//
// Generated by NVIDIA NVVM Compiler
// Compiler built on Fri Jul 25 04:36:16 2014 (1406288176)
// Cuda compilation tools, release 6.5, V6.5.13
//

.version 4.1
.target sm_30
.address_size 64

.global .texref texture0_RECT;
.global .texref texture1_RECT;
.global .texref inMaskImage;

.visible .entry ShadingMaskKernel(
	.param .f32 ShadingMaskKernel_param_0,
	.param .f32 ShadingMaskKernel_param_1,
	.param .u32 ShadingMaskKernel_param_2,
	.param .u32 ShadingMaskKernel_param_3,
	.param .u64 ShadingMaskKernel_param_4,
	.param .u64 ShadingMaskKernel_param_5
)
{
	.reg .pred 	%p<23>;
	.reg .s16 	%rs<15>;
	.reg .s32 	%r<45>;
	.reg .f32 	%f<83>;
	.reg .s64 	%rd<33>;


	ld.param.f32 	%f19, [ShadingMaskKernel_param_0];
	ld.param.f32 	%f20, [ShadingMaskKernel_param_1];
	ld.param.u32 	%r11, [ShadingMaskKernel_param_2];
	ld.param.u32 	%r12, [ShadingMaskKernel_param_3];
	ld.param.u64 	%rd10, [ShadingMaskKernel_param_4];
	ld.param.u64 	%rd11, [ShadingMaskKernel_param_5];
	cvta.to.global.u64 	%rd1, %rd11;
	mov.u32 	%r13, %ntid.x;
	mov.u32 	%r14, %ctaid.x;
	mov.u32 	%r15, %tid.x;
	mad.lo.s32 	%r1, %r13, %r14, %r15;
	mov.u32 	%r16, %ntid.y;
	mov.u32 	%r17, %ctaid.y;
	mov.u32 	%r18, %tid.y;
	mad.lo.s32 	%r2, %r16, %r17, %r18;
	setp.lt.s32	%p1, %r1, 0;
	setp.ge.s32	%p2, %r1, %r11;
	or.pred  	%p3, %p1, %p2;
	setp.lt.s32	%p4, %r2, 0;
	or.pred  	%p5, %p3, %p4;
	setp.ge.s32	%p6, %r2, %r12;
	or.pred  	%p7, %p5, %p6;
	@%p7 bra 	BB0_21;

	mul.wide.s32 	%rd12, %r2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	ld.global.u32 	%r3, [%rd13];
	ld.global.u32 	%r19, [%rd13+4];
	sub.s32 	%r4, %r19, %r3;
	setp.ne.s32	%p8, %r19, %r3;
	mov.f32 	%f82, 0f00000000;
	@%p8 bra 	BB0_2;
	bra.uni 	BB0_20;

BB0_2:
	sub.ftz.f32 	%f21, %f19, %f20;
	cvt.rn.f32.u32	%f22, %r11;
	div.approx.ftz.f32 	%f23, %f21, %f22;
	cvt.rn.f32.s32	%f24, %r1;
	fma.rn.ftz.f32 	%f1, %f23, %f24, %f20;
	mul.lo.s32 	%r22, %r3, 10;
	cvt.u64.u32	%rd14, %r22;
	cvt.u64.u32	%rd15, %r12;
	add.s64 	%rd16, %rd15, %rd14;
	shl.b64 	%rd17, %rd16, 2;
	add.s64 	%rd18, %rd17, %rd1;
	add.s64 	%rd31, %rd18, 20;
	mov.u32 	%r42, 0;
	mov.u16 	%rs14, 0;
	mov.u32 	%r44, %r42;

BB0_3:
	mov.u32 	%r38, %r42;
	mov.u32 	%r43, %r38;
	ld.global.f32 	%f25, [%rd31+4];
	add.s32 	%r44, %r44, 1;
	setp.geu.ftz.f32	%p9, %f25, %f1;
	@%p9 bra 	BB0_5;

	ld.global.u32 	%r23, [%rd31+20];
	ld.global.u32 	%r24, [%rd31];
	or.b32  	%r25, %r23, %r24;
	setp.eq.s32	%p10, %r25, 0;
	and.b16  	%rs9, %rs14, 255;
	setp.eq.s16	%p11, %rs9, 0;
	selp.u16	%rs10, 1, 0, %p11;
	selp.b16	%rs14, %rs14, %rs10, %p10;
	mov.u32 	%r43, %r44;

BB0_5:
	mov.u32 	%r42, %r43;
	add.s64 	%rd31, %rd31, 40;
	setp.lt.u32	%p12, %r44, %r4;
	@%p12 bra 	BB0_3;

	setp.ne.s32	%p13, %r42, %r4;
	@%p13 bra 	BB0_7;
	bra.uni 	BB0_20;

BB0_7:
	setp.lt.u32	%p14, %r42, %r4;
	@%p14 bra 	BB0_9;

	mov.f32 	%f81, 0f00000000;
	bra.uni 	BB0_19;

BB0_9:
	mul.wide.u32 	%rd24, %r42, 10;
	shl.b64 	%rd25, %rd24, 2;
	add.s64 	%rd27, %rd18, %rd25;
	add.s64 	%rd32, %rd27, 20;
	mov.f32 	%f81, 0f00000000;
	mov.u32 	%r41, %r42;

BB0_10:
	ld.global.f32 	%f3, [%rd32+-16];
	setp.gt.ftz.f32	%p15, %f3, %f1;
	@%p15 bra 	BB0_18;

	ld.global.u32 	%r27, [%rd32];
	setp.eq.s32	%p16, %r27, 0;
	and.b16  	%rs11, %rs14, 255;
	setp.eq.s16	%p17, %rs11, 0;
	selp.u16	%rs12, 1, 0, %p17;
	selp.b16	%rs14, %rs14, %rs12, %p16;
	sub.ftz.f32 	%f28, %f1, %f3;
	abs.ftz.f32 	%f29, %f28;
	ld.global.f32 	%f30, [%rd32+4];
	sub.ftz.f32 	%f31, %f1, %f30;
	abs.ftz.f32 	%f32, %f31;
	add.ftz.f32 	%f33, %f29, %f32;
	rcp.approx.ftz.f32 	%f34, %f33;
	setp.gtu.ftz.f32	%p18, %f33, 0f3A83126F;
	selp.f32	%f35, %f29, 0f3F800000, %p18;
	selp.f32	%f36, %f32, 0f3F800000, %p18;
	selp.f32	%f37, %f34, 0f3F000000, %p18;
	ld.global.f32 	%f38, [%rd32+-12];
	ld.global.f32 	%f39, [%rd32+8];
	mul.ftz.f32 	%f40, %f39, %f35;
	fma.rn.ftz.f32 	%f41, %f38, %f36, %f40;
	mul.ftz.f32 	%f4, %f41, %f37;
	ld.global.f32 	%f42, [%rd32+-8];
	ld.global.f32 	%f43, [%rd32+12];
	mul.ftz.f32 	%f44, %f43, %f35;
	fma.rn.ftz.f32 	%f45, %f42, %f36, %f44;
	mul.ftz.f32 	%f46, %f45, %f37;
	ld.global.f32 	%f47, [%rd32+-4];
	ld.global.f32 	%f48, [%rd32+16];
	mul.ftz.f32 	%f49, %f48, %f35;
	fma.rn.ftz.f32 	%f50, %f47, %f36, %f49;
	mul.ftz.f32 	%f5, %f50, %f37;
	fma.rn.ftz.f32 	%f6, %f46, 0f40000000, 0f3F800000;
	setp.ltu.ftz.f32	%p19, %f4, 0f00000000;
	@%p19 bra 	BB0_13;

	lg2.approx.ftz.f32 	%f51, %f4;
	mul.ftz.f32 	%f52, %f51, %f6;
	ex2.approx.ftz.f32 	%f79, %f52;
	bra.uni 	BB0_14;

BB0_13:
	neg.ftz.f32 	%f53, %f4;
	lg2.approx.ftz.f32 	%f54, %f53;
	mul.ftz.f32 	%f55, %f54, %f6;
	ex2.approx.ftz.f32 	%f56, %f55;
	neg.ftz.f32 	%f79, %f56;

BB0_14:
	mov.f32 	%f57, 0f3F800000;
	sub.ftz.f32 	%f10, %f57, %f4;
	setp.ltu.ftz.f32	%p20, %f10, 0f00000000;
	@%p20 bra 	BB0_16;

	lg2.approx.ftz.f32 	%f58, %f10;
	mul.ftz.f32 	%f59, %f58, %f6;
	ex2.approx.ftz.f32 	%f80, %f59;
	bra.uni 	BB0_17;

BB0_16:
	neg.ftz.f32 	%f60, %f10;
	lg2.approx.ftz.f32 	%f61, %f60;
	mul.ftz.f32 	%f62, %f61, %f6;
	ex2.approx.ftz.f32 	%f63, %f62;
	neg.ftz.f32 	%f80, %f63;

BB0_17:
	add.ftz.f32 	%f64, %f5, %f5;
	add.ftz.f32 	%f65, %f64, 0fBF800000;
	cvt.ftz.sat.f32.f32	%f66, %f65;
	cvt.ftz.sat.f32.f32	%f67, %f64;
	sub.ftz.f32 	%f68, %f67, %f66;
	fma.rn.ftz.f32 	%f69, %f68, %f4, %f66;
	sub.ftz.f32 	%f71, %f57, %f80;
	sub.ftz.f32 	%f72, %f71, %f79;
	fma.rn.ftz.f32 	%f73, %f72, %f69, %f79;
	max.ftz.f32 	%f81, %f81, %f73;

BB0_18:
	add.s64 	%rd32, %rd32, 40;
	add.s32 	%r41, %r41, 1;
	setp.lt.u32	%p21, %r41, %r4;
	@%p21 bra 	BB0_10;

BB0_19:
	and.b16  	%rs13, %rs14, 255;
	setp.eq.s16	%p22, %rs13, 0;
	selp.f32	%f74, %f81, 0f3F800000, %p22;
	mov.f32 	%f75, 0f3F800000;
	min.ftz.f32 	%f82, %f74, %f75;

BB0_20:
	mad.lo.s32 	%r36, %r2, %r11, %r1;
	cvta.to.global.u64 	%rd28, %rd10;
	mul.wide.u32 	%rd29, %r36, 4;
	add.s64 	%rd30, %rd28, %rd29;
	st.global.f32 	[%rd30], %f82;

BB0_21:
	ret;
}

.visible .entry Mask_Blend_Normal_ANormal(
	.param .u64 Mask_Blend_Normal_ANormal_param_0,
	.param .u32 Mask_Blend_Normal_ANormal_param_1,
	.param .u32 Mask_Blend_Normal_ANormal_param_2,
	.param .u32 Mask_Blend_Normal_ANormal_param_3,
	.param .u32 Mask_Blend_Normal_ANormal_param_4,
	.param .u64 Mask_Blend_Normal_ANormal_param_5,
	.param .u64 Mask_Blend_Normal_ANormal_param_6,
	.param .u64 Mask_Blend_Normal_ANormal_param_7,
	.param .f32 Mask_Blend_Normal_ANormal_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<28>;
	.reg .s64 	%rd<16>;


	ld.param.u64 	%rd4, [Mask_Blend_Normal_ANormal_param_0];
	ld.param.u32 	%r3, [Mask_Blend_Normal_ANormal_param_1];
	ld.param.u32 	%r4, [Mask_Blend_Normal_ANormal_param_2];
	ld.param.u32 	%r5, [Mask_Blend_Normal_ANormal_param_3];
	ld.param.u32 	%r6, [Mask_Blend_Normal_ANormal_param_4];
	ld.param.u64 	%rd3, [Mask_Blend_Normal_ANormal_param_7];
	ld.param.f32 	%f5, [Mask_Blend_Normal_ANormal_param_8];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB1_4;
	bra.uni 	BB1_1;

BB1_1:
	cvta.to.global.u64 	%rd7, %rd3;
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mad.lo.s32 	%r13, %r2, %r5, %r1;
	mul.wide.s32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd7, %rd10;
	ld.global.f32 	%f20, [%rd11];
	mul.ftz.f32 	%f21, %f20, %f15;
	mul.ftz.f32 	%f22, %f21, %f5;
	mov.f32 	%f23, 0f3F800000;
	sub.ftz.f32 	%f24, %f23, %f22;
	mul.ftz.f32 	%f25, %f6, %f24;
	fma.rn.ftz.f32 	%f2, %f12, %f22, %f25;
	mul.ftz.f32 	%f26, %f7, %f24;
	fma.rn.ftz.f32 	%f3, %f13, %f22, %f26;
	mul.ftz.f32 	%f27, %f8, %f24;
	fma.rn.ftz.f32 	%f4, %f14, %f22, %f27;
	mad.lo.s32 	%r14, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r14;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB1_3;

	shl.b64 	%rd12, %rd2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	st.global.v4.f32 	[%rd13], {%f2, %f3, %f4, %f15};
	bra.uni 	BB1_4;

BB1_3:
	shl.b64 	%rd14, %rd2, 3;
	add.s64 	%rd15, %rd1, %rd14;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd15], {%rs4, %rs3, %rs2, %rs1};

BB1_4:
	ret;
}

.visible .entry Mask_Blend_Normal_AInverted(
	.param .u64 Mask_Blend_Normal_AInverted_param_0,
	.param .u32 Mask_Blend_Normal_AInverted_param_1,
	.param .u32 Mask_Blend_Normal_AInverted_param_2,
	.param .u32 Mask_Blend_Normal_AInverted_param_3,
	.param .u32 Mask_Blend_Normal_AInverted_param_4,
	.param .u64 Mask_Blend_Normal_AInverted_param_5,
	.param .u64 Mask_Blend_Normal_AInverted_param_6,
	.param .u64 Mask_Blend_Normal_AInverted_param_7,
	.param .f32 Mask_Blend_Normal_AInverted_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<29>;
	.reg .s64 	%rd<16>;


	ld.param.u64 	%rd4, [Mask_Blend_Normal_AInverted_param_0];
	ld.param.u32 	%r3, [Mask_Blend_Normal_AInverted_param_1];
	ld.param.u32 	%r4, [Mask_Blend_Normal_AInverted_param_2];
	ld.param.u32 	%r5, [Mask_Blend_Normal_AInverted_param_3];
	ld.param.u32 	%r6, [Mask_Blend_Normal_AInverted_param_4];
	ld.param.u64 	%rd3, [Mask_Blend_Normal_AInverted_param_7];
	ld.param.f32 	%f5, [Mask_Blend_Normal_AInverted_param_8];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB2_4;
	bra.uni 	BB2_1;

BB2_1:
	cvta.to.global.u64 	%rd7, %rd3;
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mad.lo.s32 	%r13, %r2, %r5, %r1;
	mul.wide.s32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd7, %rd10;
	mov.f32 	%f20, 0f3F800000;
	sub.ftz.f32 	%f21, %f20, %f15;
	ld.global.f32 	%f22, [%rd11];
	mul.ftz.f32 	%f23, %f22, %f21;
	mul.ftz.f32 	%f24, %f23, %f5;
	sub.ftz.f32 	%f25, %f20, %f24;
	mul.ftz.f32 	%f26, %f6, %f25;
	fma.rn.ftz.f32 	%f2, %f12, %f24, %f26;
	mul.ftz.f32 	%f27, %f7, %f25;
	fma.rn.ftz.f32 	%f3, %f13, %f24, %f27;
	mul.ftz.f32 	%f28, %f8, %f25;
	fma.rn.ftz.f32 	%f4, %f14, %f24, %f28;
	mad.lo.s32 	%r14, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r14;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB2_3;

	shl.b64 	%rd12, %rd2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	st.global.v4.f32 	[%rd13], {%f2, %f3, %f4, %f15};
	bra.uni 	BB2_4;

BB2_3:
	shl.b64 	%rd14, %rd2, 3;
	add.s64 	%rd15, %rd1, %rd14;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd15], {%rs4, %rs3, %rs2, %rs1};

BB2_4:
	ret;
}

.visible .entry Mask_Blend_Normal(
	.param .u64 Mask_Blend_Normal_param_0,
	.param .u32 Mask_Blend_Normal_param_1,
	.param .u32 Mask_Blend_Normal_param_2,
	.param .u32 Mask_Blend_Normal_param_3,
	.param .u32 Mask_Blend_Normal_param_4,
	.param .u64 Mask_Blend_Normal_param_5,
	.param .u64 Mask_Blend_Normal_param_6,
	.param .u64 Mask_Blend_Normal_param_7,
	.param .f32 Mask_Blend_Normal_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<27>;
	.reg .s64 	%rd<16>;


	ld.param.u64 	%rd4, [Mask_Blend_Normal_param_0];
	ld.param.u32 	%r3, [Mask_Blend_Normal_param_1];
	ld.param.u32 	%r4, [Mask_Blend_Normal_param_2];
	ld.param.u32 	%r5, [Mask_Blend_Normal_param_3];
	ld.param.u32 	%r6, [Mask_Blend_Normal_param_4];
	ld.param.u64 	%rd3, [Mask_Blend_Normal_param_7];
	ld.param.f32 	%f5, [Mask_Blend_Normal_param_8];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB3_4;
	bra.uni 	BB3_1;

BB3_1:
	cvta.to.global.u64 	%rd7, %rd3;
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mad.lo.s32 	%r13, %r2, %r5, %r1;
	mul.wide.s32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd7, %rd10;
	ld.global.f32 	%f20, [%rd11];
	mul.ftz.f32 	%f21, %f20, %f5;
	mov.f32 	%f22, 0f3F800000;
	sub.ftz.f32 	%f23, %f22, %f21;
	mul.ftz.f32 	%f24, %f6, %f23;
	fma.rn.ftz.f32 	%f1, %f12, %f21, %f24;
	mul.ftz.f32 	%f25, %f7, %f23;
	fma.rn.ftz.f32 	%f2, %f13, %f21, %f25;
	mul.ftz.f32 	%f26, %f8, %f23;
	fma.rn.ftz.f32 	%f3, %f14, %f21, %f26;
	mad.lo.s32 	%r14, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r14;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB3_3;

	shl.b64 	%rd12, %rd2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	st.global.v4.f32 	[%rd13], {%f1, %f2, %f3, %f15};
	bra.uni 	BB3_4;

BB3_3:
	shl.b64 	%rd14, %rd2, 3;
	add.s64 	%rd15, %rd1, %rd14;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f1;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd15], {%rs4, %rs3, %rs2, %rs1};

BB3_4:
	ret;
}

.visible .entry Mask_Blend_Inverted_ANormal(
	.param .u64 Mask_Blend_Inverted_ANormal_param_0,
	.param .u32 Mask_Blend_Inverted_ANormal_param_1,
	.param .u32 Mask_Blend_Inverted_ANormal_param_2,
	.param .u32 Mask_Blend_Inverted_ANormal_param_3,
	.param .u32 Mask_Blend_Inverted_ANormal_param_4,
	.param .u64 Mask_Blend_Inverted_ANormal_param_5,
	.param .u64 Mask_Blend_Inverted_ANormal_param_6,
	.param .u64 Mask_Blend_Inverted_ANormal_param_7,
	.param .f32 Mask_Blend_Inverted_ANormal_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<29>;
	.reg .s64 	%rd<16>;


	ld.param.u64 	%rd4, [Mask_Blend_Inverted_ANormal_param_0];
	ld.param.u32 	%r3, [Mask_Blend_Inverted_ANormal_param_1];
	ld.param.u32 	%r4, [Mask_Blend_Inverted_ANormal_param_2];
	ld.param.u32 	%r5, [Mask_Blend_Inverted_ANormal_param_3];
	ld.param.u32 	%r6, [Mask_Blend_Inverted_ANormal_param_4];
	ld.param.u64 	%rd3, [Mask_Blend_Inverted_ANormal_param_7];
	ld.param.f32 	%f5, [Mask_Blend_Inverted_ANormal_param_8];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB4_4;
	bra.uni 	BB4_1;

BB4_1:
	cvta.to.global.u64 	%rd7, %rd3;
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mad.lo.s32 	%r13, %r2, %r5, %r1;
	mul.wide.s32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd7, %rd10;
	ld.global.f32 	%f20, [%rd11];
	mov.f32 	%f21, 0f3F800000;
	sub.ftz.f32 	%f22, %f21, %f20;
	mul.ftz.f32 	%f23, %f22, %f15;
	mul.ftz.f32 	%f24, %f23, %f5;
	sub.ftz.f32 	%f25, %f21, %f24;
	mul.ftz.f32 	%f26, %f6, %f25;
	fma.rn.ftz.f32 	%f2, %f12, %f24, %f26;
	mul.ftz.f32 	%f27, %f7, %f25;
	fma.rn.ftz.f32 	%f3, %f13, %f24, %f27;
	mul.ftz.f32 	%f28, %f8, %f25;
	fma.rn.ftz.f32 	%f4, %f14, %f24, %f28;
	mad.lo.s32 	%r14, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r14;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB4_3;

	shl.b64 	%rd12, %rd2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	st.global.v4.f32 	[%rd13], {%f2, %f3, %f4, %f15};
	bra.uni 	BB4_4;

BB4_3:
	shl.b64 	%rd14, %rd2, 3;
	add.s64 	%rd15, %rd1, %rd14;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd15], {%rs4, %rs3, %rs2, %rs1};

BB4_4:
	ret;
}

.visible .entry Mask_Blend_Inverted_AInverted(
	.param .u64 Mask_Blend_Inverted_AInverted_param_0,
	.param .u32 Mask_Blend_Inverted_AInverted_param_1,
	.param .u32 Mask_Blend_Inverted_AInverted_param_2,
	.param .u32 Mask_Blend_Inverted_AInverted_param_3,
	.param .u32 Mask_Blend_Inverted_AInverted_param_4,
	.param .u64 Mask_Blend_Inverted_AInverted_param_5,
	.param .u64 Mask_Blend_Inverted_AInverted_param_6,
	.param .u64 Mask_Blend_Inverted_AInverted_param_7,
	.param .f32 Mask_Blend_Inverted_AInverted_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<30>;
	.reg .s64 	%rd<16>;


	ld.param.u64 	%rd4, [Mask_Blend_Inverted_AInverted_param_0];
	ld.param.u32 	%r3, [Mask_Blend_Inverted_AInverted_param_1];
	ld.param.u32 	%r4, [Mask_Blend_Inverted_AInverted_param_2];
	ld.param.u32 	%r5, [Mask_Blend_Inverted_AInverted_param_3];
	ld.param.u32 	%r6, [Mask_Blend_Inverted_AInverted_param_4];
	ld.param.u64 	%rd3, [Mask_Blend_Inverted_AInverted_param_7];
	ld.param.f32 	%f5, [Mask_Blend_Inverted_AInverted_param_8];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB5_4;
	bra.uni 	BB5_1;

BB5_1:
	cvta.to.global.u64 	%rd7, %rd3;
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mad.lo.s32 	%r13, %r2, %r5, %r1;
	mul.wide.s32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd7, %rd10;
	ld.global.f32 	%f20, [%rd11];
	mov.f32 	%f21, 0f3F800000;
	sub.ftz.f32 	%f22, %f21, %f20;
	sub.ftz.f32 	%f23, %f21, %f15;
	mul.ftz.f32 	%f24, %f22, %f23;
	mul.ftz.f32 	%f25, %f24, %f5;
	sub.ftz.f32 	%f26, %f21, %f25;
	mul.ftz.f32 	%f27, %f6, %f26;
	fma.rn.ftz.f32 	%f2, %f12, %f25, %f27;
	mul.ftz.f32 	%f28, %f7, %f26;
	fma.rn.ftz.f32 	%f3, %f13, %f25, %f28;
	mul.ftz.f32 	%f29, %f8, %f26;
	fma.rn.ftz.f32 	%f4, %f14, %f25, %f29;
	mad.lo.s32 	%r14, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r14;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB5_3;

	shl.b64 	%rd12, %rd2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	st.global.v4.f32 	[%rd13], {%f2, %f3, %f4, %f15};
	bra.uni 	BB5_4;

BB5_3:
	shl.b64 	%rd14, %rd2, 3;
	add.s64 	%rd15, %rd1, %rd14;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd15], {%rs4, %rs3, %rs2, %rs1};

BB5_4:
	ret;
}

.visible .entry Mask_Blend_Inverted(
	.param .u64 Mask_Blend_Inverted_param_0,
	.param .u32 Mask_Blend_Inverted_param_1,
	.param .u32 Mask_Blend_Inverted_param_2,
	.param .u32 Mask_Blend_Inverted_param_3,
	.param .u32 Mask_Blend_Inverted_param_4,
	.param .u64 Mask_Blend_Inverted_param_5,
	.param .u64 Mask_Blend_Inverted_param_6,
	.param .u64 Mask_Blend_Inverted_param_7,
	.param .f32 Mask_Blend_Inverted_param_8
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<15>;
	.reg .f32 	%f<28>;
	.reg .s64 	%rd<16>;


	ld.param.u64 	%rd4, [Mask_Blend_Inverted_param_0];
	ld.param.u32 	%r3, [Mask_Blend_Inverted_param_1];
	ld.param.u32 	%r4, [Mask_Blend_Inverted_param_2];
	ld.param.u32 	%r5, [Mask_Blend_Inverted_param_3];
	ld.param.u32 	%r6, [Mask_Blend_Inverted_param_4];
	ld.param.u64 	%rd3, [Mask_Blend_Inverted_param_7];
	ld.param.f32 	%f5, [Mask_Blend_Inverted_param_8];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB6_4;
	bra.uni 	BB6_1;

BB6_1:
	cvta.to.global.u64 	%rd7, %rd3;
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mad.lo.s32 	%r13, %r2, %r5, %r1;
	mul.wide.s32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd7, %rd10;
	ld.global.f32 	%f20, [%rd11];
	mov.f32 	%f21, 0f3F800000;
	sub.ftz.f32 	%f22, %f21, %f20;
	mul.ftz.f32 	%f23, %f22, %f5;
	sub.ftz.f32 	%f24, %f21, %f23;
	mul.ftz.f32 	%f25, %f6, %f24;
	fma.rn.ftz.f32 	%f1, %f12, %f23, %f25;
	mul.ftz.f32 	%f26, %f7, %f24;
	fma.rn.ftz.f32 	%f2, %f13, %f23, %f26;
	mul.ftz.f32 	%f27, %f8, %f24;
	fma.rn.ftz.f32 	%f3, %f14, %f23, %f27;
	mad.lo.s32 	%r14, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r14;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB6_3;

	shl.b64 	%rd12, %rd2, 4;
	add.s64 	%rd13, %rd1, %rd12;
	st.global.v4.f32 	[%rd13], {%f1, %f2, %f3, %f15};
	bra.uni 	BB6_4;

BB6_3:
	shl.b64 	%rd14, %rd2, 3;
	add.s64 	%rd15, %rd1, %rd14;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f1;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd15], {%rs4, %rs3, %rs2, %rs1};

BB6_4:
	ret;
}

.visible .entry Blend_Normal(
	.param .u64 Blend_Normal_param_0,
	.param .u32 Blend_Normal_param_1,
	.param .u32 Blend_Normal_param_2,
	.param .u32 Blend_Normal_param_3,
	.param .u32 Blend_Normal_param_4,
	.param .u64 Blend_Normal_param_5,
	.param .u64 Blend_Normal_param_6,
	.param .f32 Blend_Normal_param_7
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<14>;
	.reg .f32 	%f<26>;
	.reg .s64 	%rd<12>;


	ld.param.u64 	%rd3, [Blend_Normal_param_0];
	ld.param.u32 	%r3, [Blend_Normal_param_1];
	ld.param.u32 	%r4, [Blend_Normal_param_2];
	ld.param.u32 	%r5, [Blend_Normal_param_3];
	ld.param.u32 	%r6, [Blend_Normal_param_4];
	ld.param.f32 	%f5, [Blend_Normal_param_7];
	cvta.to.global.u64 	%rd1, %rd3;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB7_4;
	bra.uni 	BB7_1;

BB7_1:
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mul.ftz.f32 	%f20, %f15, %f5;
	mov.f32 	%f21, 0f3F800000;
	sub.ftz.f32 	%f22, %f21, %f20;
	mul.ftz.f32 	%f23, %f6, %f22;
	fma.rn.ftz.f32 	%f2, %f12, %f20, %f23;
	mul.ftz.f32 	%f24, %f7, %f22;
	fma.rn.ftz.f32 	%f3, %f13, %f20, %f24;
	mul.ftz.f32 	%f25, %f8, %f22;
	fma.rn.ftz.f32 	%f4, %f14, %f20, %f25;
	mad.lo.s32 	%r13, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r13;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB7_3;

	shl.b64 	%rd8, %rd2, 4;
	add.s64 	%rd9, %rd1, %rd8;
	st.global.v4.f32 	[%rd9], {%f2, %f3, %f4, %f15};
	bra.uni 	BB7_4;

BB7_3:
	shl.b64 	%rd10, %rd2, 3;
	add.s64 	%rd11, %rd1, %rd10;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd11], {%rs4, %rs3, %rs2, %rs1};

BB7_4:
	ret;
}

.visible .entry Blend_Inverted(
	.param .u64 Blend_Inverted_param_0,
	.param .u32 Blend_Inverted_param_1,
	.param .u32 Blend_Inverted_param_2,
	.param .u32 Blend_Inverted_param_3,
	.param .u32 Blend_Inverted_param_4,
	.param .u64 Blend_Inverted_param_5,
	.param .u64 Blend_Inverted_param_6,
	.param .f32 Blend_Inverted_param_7
)
{
	.reg .pred 	%p<5>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<14>;
	.reg .f32 	%f<27>;
	.reg .s64 	%rd<12>;


	ld.param.u64 	%rd3, [Blend_Inverted_param_0];
	ld.param.u32 	%r3, [Blend_Inverted_param_1];
	ld.param.u32 	%r4, [Blend_Inverted_param_2];
	ld.param.u32 	%r5, [Blend_Inverted_param_3];
	ld.param.u32 	%r6, [Blend_Inverted_param_4];
	ld.param.f32 	%f5, [Blend_Inverted_param_7];
	cvta.to.global.u64 	%rd1, %rd3;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mov.u32 	%r9, %tid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r9;
	mov.u32 	%r10, %ntid.y;
	mov.u32 	%r11, %ctaid.y;
	mov.u32 	%r12, %tid.y;
	mad.lo.s32 	%r2, %r10, %r11, %r12;
	setp.lt.s32	%p1, %r1, %r5;
	setp.lt.s32	%p2, %r2, %r6;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB8_4;
	bra.uni 	BB8_1;

BB8_1:
	cvt.rn.f32.s32	%f18, %r1;
	add.ftz.f32 	%f16, %f18, 0f3F000000;
	cvt.rn.f32.s32	%f19, %r2;
	add.ftz.f32 	%f17, %f19, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f6, %f7, %f8, %f9}, [texture0_RECT, {%f16, %f17}];
	// inline asm
	// inline asm
	tex.2d.v4.f32.f32 {%f12, %f13, %f14, %f15}, [texture1_RECT, {%f16, %f17}];
	// inline asm
	mov.f32 	%f20, 0f3F800000;
	sub.ftz.f32 	%f21, %f20, %f15;
	mul.ftz.f32 	%f22, %f21, %f5;
	sub.ftz.f32 	%f23, %f20, %f22;
	mul.ftz.f32 	%f24, %f6, %f23;
	fma.rn.ftz.f32 	%f2, %f12, %f22, %f24;
	mul.ftz.f32 	%f25, %f7, %f23;
	fma.rn.ftz.f32 	%f3, %f13, %f22, %f25;
	mul.ftz.f32 	%f26, %f8, %f23;
	fma.rn.ftz.f32 	%f4, %f14, %f22, %f26;
	mad.lo.s32 	%r13, %r2, %r3, %r1;
	cvt.s64.s32	%rd2, %r13;
	setp.eq.s32	%p4, %r4, 0;
	@%p4 bra 	BB8_3;

	shl.b64 	%rd8, %rd2, 4;
	add.s64 	%rd9, %rd1, %rd8;
	st.global.v4.f32 	[%rd9], {%f2, %f3, %f4, %f15};
	bra.uni 	BB8_4;

BB8_3:
	shl.b64 	%rd10, %rd2, 3;
	add.s64 	%rd11, %rd1, %rd10;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f15;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f2;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd11], {%rs4, %rs3, %rs2, %rs1};

BB8_4:
	ret;
}

.visible .entry CopyMaskToBufferKernel(
	.param .u64 CopyMaskToBufferKernel_param_0,
	.param .u64 CopyMaskToBufferKernel_param_1,
	.param .u32 CopyMaskToBufferKernel_param_2,
	.param .u32 CopyMaskToBufferKernel_param_3
)
{
	.reg .pred 	%p<4>;
	.reg .s32 	%r<12>;
	.reg .f32 	%f<9>;
	.reg .s64 	%rd<6>;


	ld.param.u64 	%rd1, [CopyMaskToBufferKernel_param_1];
	ld.param.u32 	%r3, [CopyMaskToBufferKernel_param_2];
	ld.param.u32 	%r4, [CopyMaskToBufferKernel_param_3];
	mov.u32 	%r5, %ntid.x;
	mov.u32 	%r6, %ctaid.x;
	mov.u32 	%r7, %tid.x;
	mad.lo.s32 	%r1, %r5, %r6, %r7;
	mov.u32 	%r8, %ntid.y;
	mov.u32 	%r9, %ctaid.y;
	mov.u32 	%r10, %tid.y;
	mad.lo.s32 	%r2, %r8, %r9, %r10;
	setp.lt.s32	%p1, %r1, %r3;
	setp.lt.s32	%p2, %r2, %r4;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB9_2;
	bra.uni 	BB9_1;

BB9_1:
	cvta.to.global.u64 	%rd3, %rd1;
	cvt.rn.f32.s32	%f7, %r1;
	add.ftz.f32 	%f5, %f7, 0f3F000000;
	cvt.rn.f32.s32	%f8, %r2;
	add.ftz.f32 	%f6, %f8, 0f3F000000;
	// inline asm
	tex.2d.v4.f32.f32 {%f1, %f2, %f3, %f4}, [inMaskImage, {%f5, %f6}];
	// inline asm
	mad.lo.s32 	%r11, %r2, %r3, %r1;
	mul.wide.s32 	%rd4, %r11, 4;
	add.s64 	%rd5, %rd3, %rd4;
	st.global.f32 	[%rd5], %f1;

BB9_2:
	ret;
}

.visible .entry RasterizationKernel(
	.param .u64 RasterizationKernel_param_0,
	.param .u32 RasterizationKernel_param_1,
	.param .align 16 .b8 RasterizationKernel_param_2[16],
	.param .u64 RasterizationKernel_param_3,
	.param .u32 RasterizationKernel_param_4,
	.param .u32 RasterizationKernel_param_5,
	.param .u32 RasterizationKernel_param_6
)
{
	.reg .pred 	%p<38>;
	.reg .s32 	%r<51>;
	.reg .f32 	%f<115>;
	.reg .s64 	%rd<17>;


	ld.param.u64 	%rd3, [RasterizationKernel_param_0];
	ld.param.u32 	%r9, [RasterizationKernel_param_1];
	ld.param.u32 	%r13, [RasterizationKernel_param_2+12];
	ld.param.u32 	%r11, [RasterizationKernel_param_2+4];
	ld.param.u32 	%r12, [RasterizationKernel_param_2+8];
	ld.param.u32 	%r10, [RasterizationKernel_param_2];
	ld.param.u64 	%rd4, [RasterizationKernel_param_3];
	ld.param.u32 	%r14, [RasterizationKernel_param_4];
	ld.param.u32 	%r15, [RasterizationKernel_param_5];
	ld.param.u32 	%r16, [RasterizationKernel_param_6];
	mov.u32 	%r17, %ntid.x;
	mov.u32 	%r18, %ctaid.x;
	mov.u32 	%r19, %tid.x;
	mad.lo.s32 	%r1, %r17, %r18, %r19;
	mov.u32 	%r20, %ntid.y;
	mov.u32 	%r21, %ctaid.y;
	mov.u32 	%r22, %tid.y;
	mad.lo.s32 	%r2, %r20, %r21, %r22;
	setp.lt.u32	%p1, %r1, %r10;
	setp.ge.u32	%p2, %r1, %r12;
	or.pred  	%p3, %p1, %p2;
	setp.lt.u32	%p4, %r2, %r11;
	or.pred  	%p5, %p3, %p4;
	setp.ge.u32	%p6, %r2, %r13;
	or.pred  	%p7, %p5, %p6;
	@%p7 bra 	BB10_27;

	setp.ne.s32	%p8, %r16, 0;
	@%p8 bra 	BB10_3;

	mov.f32 	%f114, 0f00000000;
	bra.uni 	BB10_26;

BB10_3:
	cvt.rn.f32.s32	%f37, %r1;
	add.ftz.f32 	%f1, %f37, 0f3F000000;
	cvt.rn.f32.s32	%f38, %r2;
	add.ftz.f32 	%f2, %f38, 0f3F000000;
	shr.s32 	%r24, %r1, 31;
	shr.u32 	%r25, %r24, 27;
	add.s32 	%r26, %r1, %r25;
	shr.s32 	%r27, %r26, 5;
	shr.s32 	%r28, %r2, 31;
	shr.u32 	%r29, %r28, 27;
	add.s32 	%r30, %r2, %r29;
	shr.s32 	%r31, %r30, 5;
	mad.lo.s32 	%r3, %r31, %r14, %r27;
	mov.f32 	%f114, 0f00000000;
	mov.f32 	%f111, %f114;
	mov.u32 	%r23, 0;
	mov.u32 	%r50, %r23;

BB10_4:
	mov.f32 	%f106, %f111;
	mov.f32 	%f108, %f106;
	mad.lo.s32 	%r33, %r50, %r15, %r3;
	shl.b32 	%r34, %r33, 8;
	cvt.u64.u32	%rd1, %r34;
	mov.u32 	%r49, %r23;

BB10_5:
	mov.f32 	%f95, %f108;
	mov.f32 	%f5, %f95;
	mov.u32 	%r5, %r49;
	cvt.u64.u32	%rd5, %r5;
	add.s64 	%rd6, %rd5, %rd1;
	cvta.to.global.u64 	%rd7, %rd4;
	shl.b64 	%rd8, %rd6, 2;
	add.s64 	%rd9, %rd7, %rd8;
	ld.global.u32 	%r6, [%rd9];
	setp.eq.s32	%p9, %r6, -1;
	mov.f32 	%f109, %f5;
	@%p9 bra 	BB10_25;

	mul.lo.s32 	%r35, %r15, %r16;
	shl.b32 	%r36, %r35, 8;
	mul.wide.u32 	%rd11, %r36, 4;
	add.s64 	%rd12, %rd7, %rd11;
	mul.wide.u32 	%rd13, %r6, 72;
	add.s64 	%rd2, %rd12, %rd13;
	ld.global.f32 	%f7, [%rd2];
	ld.global.f32 	%f8, [%rd2+4];
	mul.ftz.f32 	%f39, %f2, %f8;
	fma.rn.ftz.f32 	%f40, %f1, %f7, %f39;
	ld.global.f32 	%f41, [%rd2+8];
	add.ftz.f32 	%f42, %f40, %f41;
	ld.global.f32 	%f9, [%rd2+12];
	ld.global.f32 	%f10, [%rd2+16];
	mul.ftz.f32 	%f43, %f2, %f10;
	fma.rn.ftz.f32 	%f44, %f1, %f9, %f43;
	ld.global.f32 	%f45, [%rd2+20];
	add.ftz.f32 	%f46, %f44, %f45;
	ld.global.f32 	%f11, [%rd2+24];
	ld.global.f32 	%f12, [%rd2+28];
	mul.ftz.f32 	%f47, %f2, %f12;
	fma.rn.ftz.f32 	%f48, %f1, %f11, %f47;
	ld.global.f32 	%f49, [%rd2+32];
	add.ftz.f32 	%f50, %f48, %f49;
	abs.ftz.f32 	%f51, %f42;
	setp.lt.ftz.f32	%p10, %f51, 0f3727C5AC;
	selp.f32	%f13, 0f00000000, %f42, %p10;
	abs.ftz.f32 	%f52, %f46;
	setp.lt.ftz.f32	%p11, %f52, 0f3727C5AC;
	selp.f32	%f14, 0f00000000, %f46, %p11;
	abs.ftz.f32 	%f53, %f50;
	setp.lt.ftz.f32	%p12, %f53, 0f3727C5AC;
	selp.f32	%f15, 0f00000000, %f50, %p12;
	setp.lt.ftz.f32	%p13, %f13, 0f00000000;
	setp.lt.ftz.f32	%p14, %f14, 0f00000000;
	or.pred  	%p15, %p13, %p14;
	setp.lt.ftz.f32	%p16, %f15, 0f00000000;
	or.pred  	%p17, %p15, %p16;
	mov.f32 	%f110, %f5;
	@%p17 bra 	BB10_24;

	setp.neu.ftz.f32	%p18, %f13, 0f00000000;
	@%p18 bra 	BB10_10;

	setp.lt.ftz.f32	%p19, %f7, 0f00000000;
	mov.f32 	%f103, %f5;
	mov.f32 	%f110, %f103;
	@%p19 bra 	BB10_24;

	setp.neu.ftz.f32	%p20, %f7, 0f00000000;
	setp.ltu.ftz.f32	%p21, %f8, 0f00000000;
	or.pred  	%p22, %p20, %p21;
	mov.f32 	%f102, %f5;
	mov.f32 	%f110, %f102;
	@!%p22 bra 	BB10_24;
	bra.uni 	BB10_10;

BB10_10:
	setp.neu.ftz.f32	%p23, %f14, 0f00000000;
	@%p23 bra 	BB10_13;

	setp.lt.ftz.f32	%p24, %f9, 0f00000000;
	mov.f32 	%f101, %f5;
	mov.f32 	%f110, %f101;
	@%p24 bra 	BB10_24;

	setp.neu.ftz.f32	%p25, %f9, 0f00000000;
	setp.ltu.ftz.f32	%p26, %f10, 0f00000000;
	or.pred  	%p27, %p25, %p26;
	mov.f32 	%f100, %f5;
	mov.f32 	%f110, %f100;
	@!%p27 bra 	BB10_24;
	bra.uni 	BB10_13;

BB10_13:
	setp.neu.ftz.f32	%p28, %f15, 0f00000000;
	@%p28 bra 	BB10_16;

	setp.lt.ftz.f32	%p29, %f11, 0f00000000;
	mov.f32 	%f99, %f5;
	mov.f32 	%f110, %f99;
	@%p29 bra 	BB10_24;

	setp.neu.ftz.f32	%p30, %f11, 0f00000000;
	setp.ltu.ftz.f32	%p31, %f12, 0f00000000;
	or.pred  	%p32, %p30, %p31;
	mov.f32 	%f98, %f5;
	mov.f32 	%f110, %f98;
	@!%p32 bra 	BB10_24;
	bra.uni 	BB10_16;

BB10_16:
	add.ftz.f32 	%f54, %f13, %f14;
	add.ftz.f32 	%f55, %f54, %f15;
	div.approx.ftz.f32 	%f16, %f13, %f55;
	div.approx.ftz.f32 	%f17, %f14, %f55;
	div.approx.ftz.f32 	%f18, %f15, %f55;
	ld.global.f32 	%f56, [%rd2+36];
	ld.global.f32 	%f57, [%rd2+40];
	mul.ftz.f32 	%f58, %f17, %f57;
	fma.rn.ftz.f32 	%f59, %f16, %f56, %f58;
	ld.global.f32 	%f60, [%rd2+44];
	fma.rn.ftz.f32 	%f19, %f18, %f60, %f59;
	setp.leu.ftz.f32	%p33, %f19, %f5;
	mov.f32 	%f97, %f5;
	mov.f32 	%f110, %f97;
	@%p33 bra 	BB10_24;

	ld.global.f32 	%f61, [%rd2+48];
	ld.global.f32 	%f62, [%rd2+52];
	mul.ftz.f32 	%f63, %f17, %f62;
	fma.rn.ftz.f32 	%f64, %f16, %f61, %f63;
	ld.global.f32 	%f65, [%rd2+56];
	fma.rn.ftz.f32 	%f66, %f18, %f65, %f64;
	fma.rn.ftz.f32 	%f20, %f66, 0f40000000, 0f3F800000;
	ld.global.f32 	%f67, [%rd2+60];
	ld.global.f32 	%f68, [%rd2+64];
	mul.ftz.f32 	%f69, %f17, %f68;
	fma.rn.ftz.f32 	%f70, %f16, %f67, %f69;
	ld.global.f32 	%f71, [%rd2+68];
	fma.rn.ftz.f32 	%f21, %f18, %f71, %f70;
	setp.ltu.ftz.f32	%p34, %f19, 0f00000000;
	@%p34 bra 	BB10_19;

	lg2.approx.ftz.f32 	%f72, %f19;
	mul.ftz.f32 	%f73, %f72, %f20;
	ex2.approx.ftz.f32 	%f112, %f73;
	bra.uni 	BB10_20;

BB10_19:
	neg.ftz.f32 	%f74, %f19;
	lg2.approx.ftz.f32 	%f75, %f74;
	mul.ftz.f32 	%f76, %f75, %f20;
	ex2.approx.ftz.f32 	%f77, %f76;
	neg.ftz.f32 	%f112, %f77;

BB10_20:
	mov.f32 	%f78, 0f3F800000;
	sub.ftz.f32 	%f25, %f78, %f19;
	setp.ltu.ftz.f32	%p35, %f25, 0f00000000;
	@%p35 bra 	BB10_22;

	lg2.approx.ftz.f32 	%f79, %f25;
	mul.ftz.f32 	%f80, %f79, %f20;
	ex2.approx.ftz.f32 	%f113, %f80;
	bra.uni 	BB10_23;

BB10_22:
	neg.ftz.f32 	%f81, %f25;
	lg2.approx.ftz.f32 	%f82, %f81;
	mul.ftz.f32 	%f83, %f82, %f20;
	ex2.approx.ftz.f32 	%f84, %f83;
	neg.ftz.f32 	%f113, %f84;

BB10_23:
	add.ftz.f32 	%f85, %f21, %f21;
	add.ftz.f32 	%f86, %f85, 0fBF800000;
	cvt.ftz.sat.f32.f32	%f87, %f86;
	cvt.ftz.sat.f32.f32	%f88, %f85;
	sub.ftz.f32 	%f89, %f88, %f87;
	fma.rn.ftz.f32 	%f90, %f89, %f19, %f87;
	sub.ftz.f32 	%f92, %f78, %f113;
	sub.ftz.f32 	%f93, %f92, %f112;
	fma.rn.ftz.f32 	%f114, %f93, %f90, %f112;
	mov.f32 	%f110, %f19;

BB10_24:
	mov.f32 	%f108, %f110;
	add.s32 	%r7, %r5, 1;
	setp.lt.u32	%p36, %r7, 256;
	mov.u32 	%r49, %r7;
	mov.f32 	%f109, %f108;
	@%p36 bra 	BB10_5;

BB10_25:
	mov.f32 	%f111, %f109;
	add.s32 	%r50, %r50, 1;
	setp.lt.u32	%p37, %r50, %r16;
	@%p37 bra 	BB10_4;

BB10_26:
	add.s32 	%r37, %r13, -1;
	sub.s32 	%r42, %r37, %r2;
	mad.lo.s32 	%r47, %r42, %r9, %r1;
	cvta.to.global.u64 	%rd14, %rd3;
	mul.wide.u32 	%rd15, %r47, 4;
	add.s64 	%rd16, %rd14, %rd15;
	st.global.f32 	[%rd16], %f114;

BB10_27:
	ret;
}


