//
// Generated by NVIDIA NVVM Compiler
// Compiler built on Wed Jul 10 12:41:20 2013 (1373485280)
// Cuda compilation tools, release 5.5, V5.5.0
//

.version 3.2
.target sm_30
.address_size 64

	.file	1 "D:/singlebarrel/releases/2014.03/shared/adobe/Iridas/IRIDASLIB/GPU/ShadingMask.cu", 1399785249, 8796
	.file	2 "d:\\singlebarrel\\releases\\2014.03\\shared\\adobe\\mediacore\\external\\3rdparty\\nvidia\\cuda\\win\\include\\device_functions.h", 1399785281, 191626
.global .texref texture0_RECT;
.global .texref texture1_RECT;
.global .texref inMaskImage;
.global .align 1 .b8 $str[11] = {95, 95, 67, 85, 68, 65, 95, 70, 84, 90, 0};

.visible .entry ShadingMaskKernel(
	.param .f32 ShadingMaskKernel_param_0,
	.param .f32 ShadingMaskKernel_param_1,
	.param .u32 ShadingMaskKernel_param_2,
	.param .u32 ShadingMaskKernel_param_3,
	.param .u64 ShadingMaskKernel_param_4,
	.param .u64 ShadingMaskKernel_param_5
)
{
	.reg .pred 	%p<23>;
	.reg .s16 	%rs<15>;
	.reg .s32 	%r<40>;
	.reg .f32 	%f<83>;
	.reg .s64 	%rd<33>;


	ld.param.f32 	%f19, [ShadingMaskKernel_param_0];
	ld.param.f32 	%f20, [ShadingMaskKernel_param_1];
	ld.param.u32 	%r12, [ShadingMaskKernel_param_2];
	ld.param.u32 	%r13, [ShadingMaskKernel_param_3];
	ld.param.u64 	%rd10, [ShadingMaskKernel_param_4];
	ld.param.u64 	%rd11, [ShadingMaskKernel_param_5];
	cvta.to.global.u64 	%rd1, %rd11;
	.loc 1 25 1
	mov.u32 	%r14, %ntid.x;
	mov.u32 	%r15, %ctaid.x;
	mov.u32 	%r16, %tid.x;
	mad.lo.s32 	%r1, %r14, %r15, %r16;
	mov.u32 	%r17, %ntid.y;
	mov.u32 	%r18, %ctaid.y;
	mov.u32 	%r19, %tid.y;
	mad.lo.s32 	%r2, %r17, %r18, %r19;
	.loc 1 25 1
	setp.lt.s32	%p1, %r1, 0;
	setp.ge.s32	%p2, %r1, %r12;
	or.pred  	%p3, %p1, %p2;
	.loc 1 25 1
	setp.lt.s32	%p4, %r2, 0;
	or.pred  	%p5, %p3, %p4;
	.loc 1 25 1
	setp.ge.s32	%p6, %r2, %r13;
	or.pred  	%p7, %p5, %p6;
	.loc 1 25 1
	@%p7 bra 	BB0_21;

	mul.wide.s32 	%rd12, %r2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	.loc 1 25 1
	ld.global.u32 	%r3, [%rd13];
	ld.global.u32 	%r20, [%rd13+4];
	sub.s32 	%r4, %r20, %r3;
	.loc 1 25 1
	setp.ne.s32	%p8, %r20, %r3;
	.loc 1 25 1
	mov.f32 	%f82, 0f00000000;
	.loc 1 25 1
	@%p8 bra 	BB0_2;
	bra.uni 	BB0_20;

BB0_2:
	.loc 1 25 1
	sub.ftz.f32 	%f21, %f19, %f20;
	cvt.rn.f32.u32	%f22, %r12;
	.loc 2 3606 10
	div.approx.ftz.f32 	%f23, %f21, %f22;
	.loc 1 25 69
	cvt.rn.f32.s32	%f24, %r1;
	fma.rn.ftz.f32 	%f1, %f23, %f24, %f20;
	.loc 1 25 1
	mul.lo.s32 	%r23, %r3, 10;
	cvt.u64.u32	%rd14, %r23;
	cvt.u64.u32	%rd15, %r13;
	.loc 1 25 1
	add.s64 	%rd16, %rd15, %rd14;
	shl.b64 	%rd17, %rd16, 2;
	add.s64 	%rd18, %rd17, %rd1;
	add.s64 	%rd31, %rd18, 20;
	mov.u32 	%r39, 0;
	mov.u16 	%rs14, 0;
	mov.u32 	%r38, %r39;

BB0_3:
	.loc 1 25 1
	ld.global.f32 	%f25, [%rd31+4];
	setp.geu.ftz.f32	%p9, %f25, %f1;
	@%p9 bra 	BB0_5;

	.loc 1 25 1
	ld.global.u32 	%r24, [%rd31+20];
	ld.global.u32 	%r25, [%rd31];
	or.b32  	%r26, %r24, %r25;
	setp.eq.s32	%p10, %r26, 0;
	.loc 1 25 1
	and.b16  	%rs9, %rs14, 255;
	setp.eq.s16	%p11, %rs9, 0;
	selp.u16	%rs10, 1, 0, %p11;
	.loc 1 25 1
	add.s32 	%r39, %r38, 1;
	.loc 1 25 1
	selp.b16	%rs14, %rs14, %rs10, %p10;

BB0_5:
	.loc 1 25 1
	add.s64 	%rd31, %rd31, 40;
	.loc 1 25 1
	add.s32 	%r38, %r38, 1;
	.loc 1 25 1
	setp.lt.u32	%p12, %r38, %r4;
	@%p12 bra 	BB0_3;

	.loc 1 25 1
	setp.ne.s32	%p13, %r39, %r4;
	@%p13 bra 	BB0_7;
	bra.uni 	BB0_20;

BB0_7:
	.loc 1 25 1
	setp.lt.u32	%p14, %r39, %r4;
	@%p14 bra 	BB0_9;

	mov.f32 	%f81, 0f00000000;
	bra.uni 	BB0_19;

BB0_9:
	.loc 1 25 1
	mul.wide.u32 	%rd24, %r39, 10;
	shl.b64 	%rd25, %rd24, 2;
	add.s64 	%rd27, %rd18, %rd25;
	add.s64 	%rd32, %rd27, 20;
	mov.f32 	%f81, 0f00000000;

BB0_10:
	.loc 1 25 1
	ld.global.f32 	%f3, [%rd32+-16];
	.loc 1 25 1
	setp.gt.ftz.f32	%p15, %f3, %f1;
	@%p15 bra 	BB0_18;

	.loc 1 25 1
	ld.global.u32 	%r28, [%rd32];
	setp.eq.s32	%p16, %r28, 0;
	.loc 1 25 1
	and.b16  	%rs11, %rs14, 255;
	setp.eq.s16	%p17, %rs11, 0;
	selp.u16	%rs12, 1, 0, %p17;
	.loc 1 25 1
	sub.ftz.f32 	%f28, %f1, %f3;
	.loc 2 2750 10
	abs.ftz.f32 	%f29, %f28;
	.loc 1 25 1
	ld.global.f32 	%f30, [%rd32+4];
	sub.ftz.f32 	%f31, %f1, %f30;
	.loc 2 2750 10
	abs.ftz.f32 	%f32, %f31;
	.loc 1 25 1
	add.ftz.f32 	%f33, %f29, %f32;
	rcp.approx.ftz.f32 	%f34, %f33;
	.loc 1 25 1
	setp.gtu.ftz.f32	%p18, %f33, 0f3A83126F;
	selp.f32	%f35, %f29, 0f3F800000, %p18;
	selp.f32	%f36, %f32, 0f3F800000, %p18;
	selp.f32	%f37, %f34, 0f3F000000, %p18;
	.loc 1 25 1
	ld.global.f32 	%f38, [%rd32+-12];
	ld.global.f32 	%f39, [%rd32+8];
	mul.ftz.f32 	%f40, %f39, %f35;
	fma.rn.ftz.f32 	%f41, %f38, %f36, %f40;
	mul.ftz.f32 	%f4, %f41, %f37;
	ld.global.f32 	%f42, [%rd32+-8];
	ld.global.f32 	%f43, [%rd32+12];
	mul.ftz.f32 	%f44, %f43, %f35;
	fma.rn.ftz.f32 	%f45, %f42, %f36, %f44;
	mul.ftz.f32 	%f46, %f45, %f37;
	ld.global.f32 	%f47, [%rd32+-4];
	ld.global.f32 	%f48, [%rd32+16];
	mul.ftz.f32 	%f49, %f48, %f35;
	fma.rn.ftz.f32 	%f50, %f47, %f36, %f49;
	mul.ftz.f32 	%f5, %f50, %f37;
	fma.rn.ftz.f32 	%f6, %f46, 0f40000000, 0f3F800000;
	setp.ltu.ftz.f32	%p19, %f4, 0f00000000;
	.loc 1 25 1
	selp.b16	%rs14, %rs14, %rs12, %p16;
	.loc 1 25 1
	@%p19 bra 	BB0_13;

	.loc 2 3600 10
	lg2.approx.ftz.f32 	%f51, %f4;
	mul.ftz.f32 	%f52, %f6, %f51;
	ex2.approx.ftz.f32 	%f79, %f52;
	bra.uni 	BB0_14;

BB0_13:
	.loc 1 25 102
	neg.ftz.f32 	%f53, %f4;
	.loc 2 3600 10
	lg2.approx.ftz.f32 	%f54, %f53;
	mul.ftz.f32 	%f55, %f6, %f54;
	ex2.approx.ftz.f32 	%f56, %f55;
	.loc 1 25 207
	neg.ftz.f32 	%f79, %f56;

BB0_14:
	mov.f32 	%f57, 0f3F800000;
	.loc 1 25 1
	sub.ftz.f32 	%f10, %f57, %f4;
	setp.ltu.ftz.f32	%p20, %f10, 0f00000000;
	@%p20 bra 	BB0_16;

	.loc 2 3600 10
	lg2.approx.ftz.f32 	%f58, %f10;
	mul.ftz.f32 	%f59, %f6, %f58;
	ex2.approx.ftz.f32 	%f80, %f59;
	bra.uni 	BB0_17;

BB0_16:
	.loc 1 25 142
	neg.ftz.f32 	%f60, %f10;
	.loc 2 3600 10
	lg2.approx.ftz.f32 	%f61, %f60;
	mul.ftz.f32 	%f62, %f6, %f61;
	ex2.approx.ftz.f32 	%f63, %f62;
	.loc 1 25 212
	neg.ftz.f32 	%f80, %f63;

BB0_17:
	.loc 1 25 1
	add.ftz.f32 	%f64, %f5, %f5;
	add.ftz.f32 	%f65, %f64, 0fBF800000;
	.loc 2 2820 10
	cvt.ftz.sat.f32.f32	%f66, %f65;
	.loc 2 2820 10
	cvt.ftz.sat.f32.f32	%f67, %f64;
	.loc 1 25 1
	sub.ftz.f32 	%f68, %f67, %f66;
	fma.rn.ftz.f32 	%f69, %f68, %f4, %f66;
	.loc 1 25 212
	sub.ftz.f32 	%f71, %f57, %f80;
	.loc 1 25 1
	sub.ftz.f32 	%f72, %f71, %f79;
	fma.rn.ftz.f32 	%f73, %f72, %f69, %f79;
	.loc 2 2770 10
	max.ftz.f32 	%f81, %f81, %f73;

BB0_18:
	.loc 1 25 1
	add.s64 	%rd32, %rd32, 40;
	.loc 1 25 60
	add.s32 	%r39, %r39, 1;
	.loc 1 25 1
	setp.lt.u32	%p21, %r39, %r4;
	@%p21 bra 	BB0_10;

BB0_19:
	.loc 1 25 1
	and.b16  	%rs13, %rs14, 255;
	setp.eq.s16	%p22, %rs13, 0;
	selp.f32	%f74, %f81, 0f3F800000, %p22;
	mov.f32 	%f75, 0f3F800000;
	.loc 2 2765 10
	min.ftz.f32 	%f82, %f74, %f75;

BB0_20:
	.loc 1 25 1
	mad.lo.s32 	%r37, %r2, %r12, %r1;
	cvta.to.global.u64 	%rd28, %rd10;
	.loc 1 25 1
	mul.wide.u32 	%rd29, %r37, 4;
	add.s64 	%rd30, %rd28, %rd29;
	st.global.f32 	[%rd30], %f82;

BB0_21:
	.loc 1 25 2
	ret;
}

.visible .entry Mask_Blend_Normal_ANormal(
	.param .u64 Mask_Blend_Normal_ANormal_param_0,
	.param .u32 Mask_Blend_Normal_ANormal_param_1,
	.param .u32 Mask_Blend_Normal_ANormal_param_2,
	.param .u32 Mask_Blend_Normal_ANormal_param_3,
	.param .u32 Mask_Blend_Normal_ANormal_param_4,
	.param .u64 Mask_Blend_Normal_ANormal_param_5,
	.param .u64 Mask_Blend_Normal_ANormal_param_6,
	.param .u64 Mask_Blend_Normal_ANormal_param_7,
	.param .f32 Mask_Blend_Normal_ANormal_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<32>;
	.reg .s64 	%rd<15>;


	ld.param.u64 	%rd3, [Mask_Blend_Normal_ANormal_param_0];
	ld.param.u32 	%r4, [Mask_Blend_Normal_ANormal_param_1];
	ld.param.u32 	%r5, [Mask_Blend_Normal_ANormal_param_2];
	ld.param.u32 	%r6, [Mask_Blend_Normal_ANormal_param_3];
	ld.param.u32 	%r7, [Mask_Blend_Normal_ANormal_param_4];
	ld.param.u64 	%rd4, [Mask_Blend_Normal_ANormal_param_7];
	ld.param.f32 	%f9, [Mask_Blend_Normal_ANormal_param_8];
	cvta.to.global.u64 	%rd1, %rd3;
	cvta.to.global.u64 	%rd2, %rd4;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB1_4;
	bra.uni 	BB1_1;

BB1_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 228
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 231
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mad.lo.s32 	%r14, %r2, %r6, %r1;
	mul.wide.s32 	%rd9, %r14, 4;
	add.s64 	%rd10, %rd2, %rd9;
	.loc 1 193 1
	ld.global.f32 	%f24, [%rd10];
	mul.ftz.f32 	%f25, %f24, %f19;
	mul.ftz.f32 	%f26, %f25, %f9;
	mov.f32 	%f27, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f28, %f27, %f26;
	mul.ftz.f32 	%f29, %f10, %f28;
	fma.rn.ftz.f32 	%f2, %f16, %f26, %f29;
	mul.ftz.f32 	%f30, %f11, %f28;
	fma.rn.ftz.f32 	%f3, %f17, %f26, %f30;
	mul.ftz.f32 	%f31, %f12, %f28;
	fma.rn.ftz.f32 	%f4, %f18, %f26, %f31;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB1_3;

	mul.wide.s32 	%rd11, %r3, 16;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 1 193 1
	st.global.v4.f32 	[%rd12], {%f2, %f3, %f4, %f19};
	bra.uni 	BB1_4;

BB1_3:
	mul.wide.s32 	%rd13, %r3, 8;
	add.s64 	%rd14, %rd1, %rd13;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 231
	st.global.v4.u16 	[%rd14], {%rs1, %rs2, %rs3, %rs4};

BB1_4:
	.loc 1 193 2
	ret;
}

.visible .entry Mask_Blend_Normal_AInverted(
	.param .u64 Mask_Blend_Normal_AInverted_param_0,
	.param .u32 Mask_Blend_Normal_AInverted_param_1,
	.param .u32 Mask_Blend_Normal_AInverted_param_2,
	.param .u32 Mask_Blend_Normal_AInverted_param_3,
	.param .u32 Mask_Blend_Normal_AInverted_param_4,
	.param .u64 Mask_Blend_Normal_AInverted_param_5,
	.param .u64 Mask_Blend_Normal_AInverted_param_6,
	.param .u64 Mask_Blend_Normal_AInverted_param_7,
	.param .f32 Mask_Blend_Normal_AInverted_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<33>;
	.reg .s64 	%rd<15>;


	ld.param.u64 	%rd3, [Mask_Blend_Normal_AInverted_param_0];
	ld.param.u32 	%r4, [Mask_Blend_Normal_AInverted_param_1];
	ld.param.u32 	%r5, [Mask_Blend_Normal_AInverted_param_2];
	ld.param.u32 	%r6, [Mask_Blend_Normal_AInverted_param_3];
	ld.param.u32 	%r7, [Mask_Blend_Normal_AInverted_param_4];
	ld.param.u64 	%rd4, [Mask_Blend_Normal_AInverted_param_7];
	ld.param.f32 	%f9, [Mask_Blend_Normal_AInverted_param_8];
	cvta.to.global.u64 	%rd1, %rd3;
	cvta.to.global.u64 	%rd2, %rd4;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB2_4;
	bra.uni 	BB2_1;

BB2_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 234
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mad.lo.s32 	%r14, %r2, %r6, %r1;
	mul.wide.s32 	%rd9, %r14, 4;
	add.s64 	%rd10, %rd2, %rd9;
	mov.f32 	%f24, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f25, %f24, %f19;
	ld.global.f32 	%f26, [%rd10];
	mul.ftz.f32 	%f27, %f26, %f25;
	mul.ftz.f32 	%f28, %f27, %f9;
	sub.ftz.f32 	%f29, %f24, %f28;
	mul.ftz.f32 	%f30, %f10, %f29;
	fma.rn.ftz.f32 	%f2, %f16, %f28, %f30;
	mul.ftz.f32 	%f31, %f11, %f29;
	fma.rn.ftz.f32 	%f3, %f17, %f28, %f31;
	mul.ftz.f32 	%f32, %f12, %f29;
	fma.rn.ftz.f32 	%f4, %f18, %f28, %f32;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB2_3;

	mul.wide.s32 	%rd11, %r3, 16;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 1 193 1
	st.global.v4.f32 	[%rd12], {%f2, %f3, %f4, %f19};
	bra.uni 	BB2_4;

BB2_3:
	mul.wide.s32 	%rd13, %r3, 8;
	add.s64 	%rd14, %rd1, %rd13;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 231
	st.global.v4.u16 	[%rd14], {%rs1, %rs2, %rs3, %rs4};

BB2_4:
	.loc 1 193 2
	ret;
}

.visible .entry Mask_Blend_Normal(
	.param .u64 Mask_Blend_Normal_param_0,
	.param .u32 Mask_Blend_Normal_param_1,
	.param .u32 Mask_Blend_Normal_param_2,
	.param .u32 Mask_Blend_Normal_param_3,
	.param .u32 Mask_Blend_Normal_param_4,
	.param .u64 Mask_Blend_Normal_param_5,
	.param .u64 Mask_Blend_Normal_param_6,
	.param .u64 Mask_Blend_Normal_param_7,
	.param .f32 Mask_Blend_Normal_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<31>;
	.reg .s64 	%rd<15>;


	ld.param.u64 	%rd3, [Mask_Blend_Normal_param_0];
	ld.param.u32 	%r4, [Mask_Blend_Normal_param_1];
	ld.param.u32 	%r5, [Mask_Blend_Normal_param_2];
	ld.param.u32 	%r6, [Mask_Blend_Normal_param_3];
	ld.param.u32 	%r7, [Mask_Blend_Normal_param_4];
	ld.param.u64 	%rd4, [Mask_Blend_Normal_param_7];
	ld.param.f32 	%f9, [Mask_Blend_Normal_param_8];
	cvta.to.global.u64 	%rd1, %rd3;
	cvta.to.global.u64 	%rd2, %rd4;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB3_4;
	bra.uni 	BB3_1;

BB3_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 234
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mad.lo.s32 	%r14, %r2, %r6, %r1;
	mul.wide.s32 	%rd9, %r14, 4;
	add.s64 	%rd10, %rd2, %rd9;
	.loc 1 193 1
	ld.global.f32 	%f24, [%rd10];
	mul.ftz.f32 	%f25, %f24, %f9;
	mov.f32 	%f26, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f27, %f26, %f25;
	mul.ftz.f32 	%f28, %f10, %f27;
	fma.rn.ftz.f32 	%f1, %f16, %f25, %f28;
	mul.ftz.f32 	%f29, %f11, %f27;
	fma.rn.ftz.f32 	%f2, %f17, %f25, %f29;
	mul.ftz.f32 	%f30, %f12, %f27;
	fma.rn.ftz.f32 	%f3, %f18, %f25, %f30;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB3_3;

	mul.wide.s32 	%rd11, %r3, 16;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 1 193 1
	st.global.v4.f32 	[%rd12], {%f1, %f2, %f3, %f19};
	bra.uni 	BB3_4;

BB3_3:
	mul.wide.s32 	%rd13, %r3, 8;
	add.s64 	%rd14, %rd1, %rd13;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f1;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 231
	st.global.v4.u16 	[%rd14], {%rs1, %rs2, %rs3, %rs4};

BB3_4:
	.loc 1 193 2
	ret;
}

.visible .entry Mask_Blend_Inverted_ANormal(
	.param .u64 Mask_Blend_Inverted_ANormal_param_0,
	.param .u32 Mask_Blend_Inverted_ANormal_param_1,
	.param .u32 Mask_Blend_Inverted_ANormal_param_2,
	.param .u32 Mask_Blend_Inverted_ANormal_param_3,
	.param .u32 Mask_Blend_Inverted_ANormal_param_4,
	.param .u64 Mask_Blend_Inverted_ANormal_param_5,
	.param .u64 Mask_Blend_Inverted_ANormal_param_6,
	.param .u64 Mask_Blend_Inverted_ANormal_param_7,
	.param .f32 Mask_Blend_Inverted_ANormal_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<33>;
	.reg .s64 	%rd<15>;


	ld.param.u64 	%rd3, [Mask_Blend_Inverted_ANormal_param_0];
	ld.param.u32 	%r4, [Mask_Blend_Inverted_ANormal_param_1];
	ld.param.u32 	%r5, [Mask_Blend_Inverted_ANormal_param_2];
	ld.param.u32 	%r6, [Mask_Blend_Inverted_ANormal_param_3];
	ld.param.u32 	%r7, [Mask_Blend_Inverted_ANormal_param_4];
	ld.param.u64 	%rd4, [Mask_Blend_Inverted_ANormal_param_7];
	ld.param.f32 	%f9, [Mask_Blend_Inverted_ANormal_param_8];
	cvta.to.global.u64 	%rd1, %rd3;
	cvta.to.global.u64 	%rd2, %rd4;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB4_4;
	bra.uni 	BB4_1;

BB4_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 238
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mad.lo.s32 	%r14, %r2, %r6, %r1;
	mul.wide.s32 	%rd9, %r14, 4;
	add.s64 	%rd10, %rd2, %rd9;
	.loc 1 193 1
	ld.global.f32 	%f24, [%rd10];
	mov.f32 	%f25, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f26, %f25, %f24;
	mul.ftz.f32 	%f27, %f26, %f19;
	mul.ftz.f32 	%f28, %f27, %f9;
	sub.ftz.f32 	%f29, %f25, %f28;
	mul.ftz.f32 	%f30, %f10, %f29;
	fma.rn.ftz.f32 	%f2, %f16, %f28, %f30;
	mul.ftz.f32 	%f31, %f11, %f29;
	fma.rn.ftz.f32 	%f3, %f17, %f28, %f31;
	mul.ftz.f32 	%f32, %f12, %f29;
	fma.rn.ftz.f32 	%f4, %f18, %f28, %f32;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB4_3;

	mul.wide.s32 	%rd11, %r3, 16;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 1 193 1
	st.global.v4.f32 	[%rd12], {%f2, %f3, %f4, %f19};
	bra.uni 	BB4_4;

BB4_3:
	mul.wide.s32 	%rd13, %r3, 8;
	add.s64 	%rd14, %rd1, %rd13;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 241
	st.global.v4.u16 	[%rd14], {%rs1, %rs2, %rs3, %rs4};

BB4_4:
	.loc 1 193 2
	ret;
}

.visible .entry Mask_Blend_Inverted_AInverted(
	.param .u64 Mask_Blend_Inverted_AInverted_param_0,
	.param .u32 Mask_Blend_Inverted_AInverted_param_1,
	.param .u32 Mask_Blend_Inverted_AInverted_param_2,
	.param .u32 Mask_Blend_Inverted_AInverted_param_3,
	.param .u32 Mask_Blend_Inverted_AInverted_param_4,
	.param .u64 Mask_Blend_Inverted_AInverted_param_5,
	.param .u64 Mask_Blend_Inverted_AInverted_param_6,
	.param .u64 Mask_Blend_Inverted_AInverted_param_7,
	.param .f32 Mask_Blend_Inverted_AInverted_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<34>;
	.reg .s64 	%rd<15>;


	ld.param.u64 	%rd3, [Mask_Blend_Inverted_AInverted_param_0];
	ld.param.u32 	%r4, [Mask_Blend_Inverted_AInverted_param_1];
	ld.param.u32 	%r5, [Mask_Blend_Inverted_AInverted_param_2];
	ld.param.u32 	%r6, [Mask_Blend_Inverted_AInverted_param_3];
	ld.param.u32 	%r7, [Mask_Blend_Inverted_AInverted_param_4];
	ld.param.u64 	%rd4, [Mask_Blend_Inverted_AInverted_param_7];
	ld.param.f32 	%f9, [Mask_Blend_Inverted_AInverted_param_8];
	cvta.to.global.u64 	%rd1, %rd3;
	cvta.to.global.u64 	%rd2, %rd4;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB5_4;
	bra.uni 	BB5_1;

BB5_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 238
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mad.lo.s32 	%r14, %r2, %r6, %r1;
	mul.wide.s32 	%rd9, %r14, 4;
	add.s64 	%rd10, %rd2, %rd9;
	.loc 1 193 1
	ld.global.f32 	%f24, [%rd10];
	mov.f32 	%f25, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f26, %f25, %f24;
	sub.ftz.f32 	%f27, %f25, %f19;
	mul.ftz.f32 	%f28, %f26, %f27;
	mul.ftz.f32 	%f29, %f28, %f9;
	sub.ftz.f32 	%f30, %f25, %f29;
	mul.ftz.f32 	%f31, %f10, %f30;
	fma.rn.ftz.f32 	%f2, %f16, %f29, %f31;
	mul.ftz.f32 	%f32, %f11, %f30;
	fma.rn.ftz.f32 	%f3, %f17, %f29, %f32;
	mul.ftz.f32 	%f33, %f12, %f30;
	fma.rn.ftz.f32 	%f4, %f18, %f29, %f33;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB5_3;

	mul.wide.s32 	%rd11, %r3, 16;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 1 193 1
	st.global.v4.f32 	[%rd12], {%f2, %f3, %f4, %f19};
	bra.uni 	BB5_4;

BB5_3:
	mul.wide.s32 	%rd13, %r3, 8;
	add.s64 	%rd14, %rd1, %rd13;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 241
	st.global.v4.u16 	[%rd14], {%rs1, %rs2, %rs3, %rs4};

BB5_4:
	.loc 1 193 2
	ret;
}

.visible .entry Mask_Blend_Inverted(
	.param .u64 Mask_Blend_Inverted_param_0,
	.param .u32 Mask_Blend_Inverted_param_1,
	.param .u32 Mask_Blend_Inverted_param_2,
	.param .u32 Mask_Blend_Inverted_param_3,
	.param .u32 Mask_Blend_Inverted_param_4,
	.param .u64 Mask_Blend_Inverted_param_5,
	.param .u64 Mask_Blend_Inverted_param_6,
	.param .u64 Mask_Blend_Inverted_param_7,
	.param .f32 Mask_Blend_Inverted_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<32>;
	.reg .s64 	%rd<15>;


	ld.param.u64 	%rd3, [Mask_Blend_Inverted_param_0];
	ld.param.u32 	%r4, [Mask_Blend_Inverted_param_1];
	ld.param.u32 	%r5, [Mask_Blend_Inverted_param_2];
	ld.param.u32 	%r6, [Mask_Blend_Inverted_param_3];
	ld.param.u32 	%r7, [Mask_Blend_Inverted_param_4];
	ld.param.u64 	%rd4, [Mask_Blend_Inverted_param_7];
	ld.param.f32 	%f9, [Mask_Blend_Inverted_param_8];
	cvta.to.global.u64 	%rd1, %rd3;
	cvta.to.global.u64 	%rd2, %rd4;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB6_4;
	bra.uni 	BB6_1;

BB6_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 238
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mad.lo.s32 	%r14, %r2, %r6, %r1;
	mul.wide.s32 	%rd9, %r14, 4;
	add.s64 	%rd10, %rd2, %rd9;
	.loc 1 193 1
	ld.global.f32 	%f24, [%rd10];
	mov.f32 	%f25, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f26, %f25, %f24;
	mul.ftz.f32 	%f27, %f26, %f9;
	sub.ftz.f32 	%f28, %f25, %f27;
	mul.ftz.f32 	%f29, %f10, %f28;
	fma.rn.ftz.f32 	%f1, %f16, %f27, %f29;
	mul.ftz.f32 	%f30, %f11, %f28;
	fma.rn.ftz.f32 	%f2, %f17, %f27, %f30;
	mul.ftz.f32 	%f31, %f12, %f28;
	fma.rn.ftz.f32 	%f3, %f18, %f27, %f31;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB6_3;

	mul.wide.s32 	%rd11, %r3, 16;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 1 193 1
	st.global.v4.f32 	[%rd12], {%f1, %f2, %f3, %f19};
	bra.uni 	BB6_4;

BB6_3:
	mul.wide.s32 	%rd13, %r3, 8;
	add.s64 	%rd14, %rd1, %rd13;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f1;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 241
	st.global.v4.u16 	[%rd14], {%rs1, %rs2, %rs3, %rs4};

BB6_4:
	.loc 1 193 2
	ret;
}

.visible .entry Blend_Normal(
	.param .u64 Blend_Normal_param_0,
	.param .u32 Blend_Normal_param_1,
	.param .u32 Blend_Normal_param_2,
	.param .u32 Blend_Normal_param_3,
	.param .u32 Blend_Normal_param_4,
	.param .u64 Blend_Normal_param_5,
	.param .u64 Blend_Normal_param_6,
	.param .f32 Blend_Normal_param_7
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<14>;
	.reg .f32 	%f<30>;
	.reg .s64 	%rd<11>;


	ld.param.u64 	%rd2, [Blend_Normal_param_0];
	ld.param.u32 	%r4, [Blend_Normal_param_1];
	ld.param.u32 	%r5, [Blend_Normal_param_2];
	ld.param.u32 	%r6, [Blend_Normal_param_3];
	ld.param.u32 	%r7, [Blend_Normal_param_4];
	ld.param.f32 	%f9, [Blend_Normal_param_7];
	cvta.to.global.u64 	%rd1, %rd2;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB7_4;
	bra.uni 	BB7_1;

BB7_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 238
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	.loc 1 193 1
	mul.ftz.f32 	%f24, %f19, %f9;
	mov.f32 	%f25, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f26, %f25, %f24;
	mul.ftz.f32 	%f27, %f10, %f26;
	fma.rn.ftz.f32 	%f2, %f16, %f24, %f27;
	mul.ftz.f32 	%f28, %f11, %f26;
	fma.rn.ftz.f32 	%f3, %f17, %f24, %f28;
	mul.ftz.f32 	%f29, %f12, %f26;
	fma.rn.ftz.f32 	%f4, %f18, %f24, %f29;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB7_3;

	mul.wide.s32 	%rd7, %r3, 16;
	add.s64 	%rd8, %rd1, %rd7;
	.loc 1 193 1
	st.global.v4.f32 	[%rd8], {%f2, %f3, %f4, %f19};
	bra.uni 	BB7_4;

BB7_3:
	mul.wide.s32 	%rd9, %r3, 8;
	add.s64 	%rd10, %rd1, %rd9;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 241
	st.global.v4.u16 	[%rd10], {%rs1, %rs2, %rs3, %rs4};

BB7_4:
	.loc 1 193 2
	ret;
}

.visible .entry Blend_Inverted(
	.param .u64 Blend_Inverted_param_0,
	.param .u32 Blend_Inverted_param_1,
	.param .u32 Blend_Inverted_param_2,
	.param .u32 Blend_Inverted_param_3,
	.param .u32 Blend_Inverted_param_4,
	.param .u64 Blend_Inverted_param_5,
	.param .u64 Blend_Inverted_param_6,
	.param .f32 Blend_Inverted_param_7
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<14>;
	.reg .f32 	%f<31>;
	.reg .s64 	%rd<11>;


	ld.param.u64 	%rd2, [Blend_Inverted_param_0];
	ld.param.u32 	%r4, [Blend_Inverted_param_1];
	ld.param.u32 	%r5, [Blend_Inverted_param_2];
	ld.param.u32 	%r6, [Blend_Inverted_param_3];
	ld.param.u32 	%r7, [Blend_Inverted_param_4];
	ld.param.f32 	%f9, [Blend_Inverted_param_7];
	cvta.to.global.u64 	%rd1, %rd2;
	.loc 1 193 1
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	.loc 1 193 1
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	.loc 1 193 1
	@!%p3 bra 	BB8_4;
	bra.uni 	BB8_1;

BB8_1:
	.loc 1 193 1
	cvt.rn.f32.s32	%f22, %r1;
	add.ftz.f32 	%f20, %f22, 0f3F000000;
	cvt.rn.f32.s32	%f23, %r2;
	add.ftz.f32 	%f21, %f23, 0f3F000000;
	.loc 1 193 238
	// inline asm
	tex.2d.v4.f32.f32 {%f10, %f11, %f12, %f13}, [texture0_RECT, {%f20, %f21}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f16, %f17, %f18, %f19}, [texture1_RECT, {%f20, %f21}];
	// inline asm
	mov.f32 	%f24, 0f3F800000;
	.loc 1 193 1
	sub.ftz.f32 	%f25, %f24, %f19;
	mul.ftz.f32 	%f26, %f25, %f9;
	sub.ftz.f32 	%f27, %f24, %f26;
	mul.ftz.f32 	%f28, %f10, %f27;
	fma.rn.ftz.f32 	%f2, %f16, %f26, %f28;
	mul.ftz.f32 	%f29, %f11, %f27;
	fma.rn.ftz.f32 	%f3, %f17, %f26, %f29;
	mul.ftz.f32 	%f30, %f12, %f27;
	fma.rn.ftz.f32 	%f4, %f18, %f26, %f30;
	.loc 1 193 1
	mad.lo.s32 	%r3, %r2, %r4, %r1;
	.loc 1 193 1
	setp.eq.s32	%p4, %r5, 0;
	@%p4 bra 	BB8_3;

	mul.wide.s32 	%rd7, %r3, 16;
	add.s64 	%rd8, %rd1, %rd7;
	.loc 1 193 1
	st.global.v4.f32 	[%rd8], {%f2, %f3, %f4, %f19};
	bra.uni 	BB8_4;

BB8_3:
	mul.wide.s32 	%rd9, %r3, 8;
	add.s64 	%rd10, %rd1, %rd9;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs1, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f19;
	mov.b16 	%rs4, %temp;
}
	.loc 1 193 241
	st.global.v4.u16 	[%rd10], {%rs1, %rs2, %rs3, %rs4};

BB8_4:
	.loc 1 193 2
	ret;
}

.visible .entry CopyMaskToBufferKernel(
	.param .u64 CopyMaskToBufferKernel_param_0,
	.param .u64 CopyMaskToBufferKernel_param_1,
	.param .u32 CopyMaskToBufferKernel_param_2,
	.param .u32 CopyMaskToBufferKernel_param_3
)
{
	.reg .pred 	%p<4>;
	.reg .s32 	%r<12>;
	.reg .f32 	%f<9>;
	.reg .s64 	%rd<6>;


	ld.param.u64 	%rd2, [CopyMaskToBufferKernel_param_1];
	ld.param.u32 	%r3, [CopyMaskToBufferKernel_param_2];
	ld.param.u32 	%r4, [CopyMaskToBufferKernel_param_3];
	cvta.to.global.u64 	%rd1, %rd2;
	.loc 1 272 1
	mov.u32 	%r5, %ntid.x;
	mov.u32 	%r6, %ctaid.x;
	mov.u32 	%r7, %tid.x;
	mad.lo.s32 	%r1, %r5, %r6, %r7;
	mov.u32 	%r8, %ntid.y;
	mov.u32 	%r9, %ctaid.y;
	mov.u32 	%r10, %tid.y;
	mad.lo.s32 	%r2, %r8, %r9, %r10;
	.loc 1 272 1
	setp.lt.s32	%p1, %r1, %r3;
	setp.lt.s32	%p2, %r2, %r4;
	and.pred  	%p3, %p1, %p2;
	.loc 1 272 1
	@!%p3 bra 	BB9_2;
	bra.uni 	BB9_1;

BB9_1:
	.loc 1 272 1
	cvt.rn.f32.s32	%f7, %r1;
	add.ftz.f32 	%f5, %f7, 0f3F000000;
	cvt.rn.f32.s32	%f8, %r2;
	add.ftz.f32 	%f6, %f8, 0f3F000000;
	.loc 1 272 233
	// inline asm
	tex.2d.v4.f32.f32 {%f1, %f2, %f3, %f4}, [inMaskImage, {%f5, %f6}];
	// inline asm
	.loc 1 272 1
	mad.lo.s32 	%r11, %r2, %r3, %r1;
	mul.wide.s32 	%rd4, %r11, 4;
	add.s64 	%rd5, %rd1, %rd4;
	.loc 1 272 1
	st.global.f32 	[%rd5], %f1;

BB9_2:
	.loc 1 272 2
	ret;
}


