//
// Generated by NVIDIA NVVM Compiler
// Compiler built on Fri Jul 25 04:36:16 2014 (1406288176)
// Cuda compilation tools, release 6.5, V6.5.13
//

.version 4.1
.target sm_30
.address_size 64

.const .align 4 .b8 inSpline[196];
.const .align 4 .b8 kRGB32f_To_601YPbPr[36] = {135, 22, 153, 62, 162, 69, 22, 63, 213, 120, 233, 61, 33, 201, 44, 190, 111, 155, 169, 190, 0, 0, 0, 63, 0, 0, 0, 63, 70, 94, 214, 190, 232, 134, 166, 189};
.const .align 4 .b8 k601YPbPr_To_RGB32f[36] = {0, 0, 128, 63, 0, 0, 0, 0, 188, 116, 179, 63, 0, 0, 128, 63, 152, 50, 176, 190, 158, 209, 54, 191, 0, 0, 128, 63, 229, 208, 226, 63, 0, 0, 0, 0};
.const .align 4 .b8 kRGB32f_To_601YCbCr[36] = {70, 246, 130, 66, 145, 141, 0, 67, 94, 186, 199, 65, 33, 48, 23, 194, 240, 103, 148, 194, 0, 0, 224, 66, 0, 0, 224, 66, 111, 146, 187, 194, 70, 182, 145, 193};
.const .align 4 .b8 k601YCbCr_To_RGB32f[36] = {37, 160, 149, 59, 0, 0, 0, 0, 182, 23, 205, 59, 37, 160, 149, 59, 40, 15, 201, 186, 156, 239, 80, 187, 37, 160, 149, 59, 236, 155, 1, 60, 0, 0, 0, 0};
.const .align 4 .b8 kRGB8u_To_601YCbCr[36] = {219, 121, 131, 62, 152, 14, 1, 63, 18, 131, 200, 61, 174, 199, 23, 190, 238, 252, 148, 190, 197, 224, 224, 62, 197, 224, 224, 62, 217, 78, 188, 190, 174, 71, 146, 189};
.const .align 4 .b8 k601YCbCr_To_RGB8u[36] = {127, 10, 149, 63, 0, 0, 0, 0, 160, 74, 204, 63, 127, 10, 149, 63, 254, 148, 200, 190, 184, 30, 80, 191, 127, 10, 149, 63, 78, 26, 1, 64, 0, 0, 0, 0};
.const .align 4 .b8 kRGB8u_To_601YCbCrFullRange[36] = {135, 22, 153, 62, 162, 69, 22, 63, 213, 120, 233, 61, 166, 27, 44, 190, 39, 241, 168, 190, 250, 254, 254, 62, 250, 254, 254, 62, 43, 135, 213, 190, 59, 223, 165, 189};
.const .align 4 .b8 k601YCbCrFullRange_To_RGB8u[36] = {0, 0, 128, 63, 0, 0, 0, 0, 72, 193, 178, 63, 0, 0, 128, 63, 143, 130, 175, 190, 225, 26, 54, 191, 0, 0, 128, 63, 20, 238, 225, 63, 0, 0, 0, 0};
.const .align 4 .b8 kRGB32f_To_601YCbCrFullRange[36] = {113, 125, 152, 66, 92, 175, 21, 67, 92, 143, 232, 65, 158, 111, 43, 194, 49, 72, 168, 194, 0, 0, 254, 66, 0, 0, 254, 66, 170, 177, 212, 194, 88, 57, 165, 193};
.const .align 4 .b8 k601YCbCrFullRange_To_RGB32f[36] = {129, 128, 128, 59, 0, 0, 0, 0, 188, 116, 179, 59, 129, 128, 128, 59, 194, 50, 176, 186, 179, 209, 54, 187, 129, 128, 128, 59, 229, 208, 226, 59, 0, 0, 0, 0};
.const .align 4 .b8 kRGB32f_To_709YPbPr[36] = {208, 179, 89, 62, 89, 23, 55, 63, 152, 221, 147, 61, 186, 164, 234, 189, 210, 86, 197, 190, 0, 0, 0, 63, 0, 0, 0, 63, 190, 134, 232, 190, 16, 202, 59, 189};
.const .align 4 .b8 k709YPbPr_To_RGB32f[36] = {0, 0, 128, 63, 0, 0, 0, 0, 12, 147, 201, 63, 0, 0, 128, 63, 221, 209, 63, 190, 243, 173, 239, 190, 0, 0, 128, 63, 77, 132, 237, 63, 0, 0, 0, 0};
.const .align 4 .b8 kRGB32f_To_709YCbCr[36] = {106, 60, 58, 66, 6, 161, 28, 67, 244, 253, 124, 65, 223, 79, 205, 193, 8, 172, 172, 194, 0, 0, 224, 66, 0, 0, 224, 66, 195, 117, 203, 194, 236, 81, 36, 193};
.const .align 4 .b8 k709YCbCr_To_RGB32f[36] = {37, 160, 149, 59, 0, 0, 0, 0, 239, 94, 230, 59, 37, 160, 149, 59, 33, 57, 91, 186, 178, 245, 8, 187, 37, 160, 149, 59, 82, 185, 7, 60, 0, 0, 0, 0};
.const .align 4 .b8 k709YCbCrFullRange_To_RGB32f[36] = {131, 128, 128, 59, 0, 0, 0, 0, 28, 147, 201, 59, 131, 128, 128, 59, 61, 210, 63, 186, 248, 173, 239, 186, 131, 128, 128, 59, 82, 132, 237, 59, 0, 0, 0, 0};
.const .align 4 .b8 kRGB8u_To_709YCbCr[36] = {207, 247, 58, 62, 53, 62, 29, 63, 231, 251, 125, 61, 184, 30, 206, 189, 23, 89, 173, 190, 197, 224, 224, 62, 197, 224, 224, 62, 12, 66, 204, 190, 195, 245, 36, 189};
.const .align 4 .b8 k709YCbCr_To_RGB8u[36] = {127, 10, 149, 63, 0, 0, 0, 0, 147, 120, 229, 63, 127, 10, 149, 63, 53, 94, 90, 190, 205, 108, 8, 191, 127, 10, 149, 63, 154, 49, 7, 64, 0, 0, 0, 0};
.const .align 4 .b8 k709YCbCr_To_601YCbCr[36] = {0, 0, 128, 63, 23, 100, 203, 61, 1, 77, 68, 62, 0, 0, 0, 0, 18, 103, 125, 63, 10, 158, 226, 189, 0, 0, 0, 0, 61, 98, 148, 189, 249, 191, 123, 63};
.const .align 4 .b8 k601YCbCr_To_709YCbCr[36] = {0, 0, 128, 63, 122, 165, 236, 189, 179, 237, 84, 190, 0, 0, 0, 0, 204, 98, 130, 63, 216, 188, 234, 61, 0, 0, 0, 0, 74, 179, 153, 61, 234, 61, 131, 63};
.const .align 4 .b8 kYCbCrOffset[12] = {0, 0, 128, 65, 0, 0, 0, 67, 0, 0, 0, 67};
.const .align 4 .b8 kYCbCrFullRangeOffset[12] = {0, 0, 0, 0, 0, 0, 0, 67, 0, 0, 0, 67};

.visible .func  (.param .b32 func_retval0) _Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf(
	.param .b64 _Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_0,
	.param .b64 _Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_1,
	.param .b64 _Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_2,
	.param .b32 _Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_3,
	.param .b32 _Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_4
)
{
	.reg .pred 	%p<5>;
	.reg .s32 	%r<14>;
	.reg .f32 	%f<31>;
	.reg .s64 	%rd<21>;


	ld.param.u64 	%rd4, [_Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_0];
	ld.param.u64 	%rd5, [_Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_1];
	ld.param.u64 	%rd6, [_Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_2];
	ld.param.u32 	%r7, [_Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_3];
	ld.param.f32 	%f30, [_Z31InterpolateNaturalCubicSpline2DPKfS0_S0_jf_param_4];
	add.s32 	%r13, %r7, -1;
	setp.gt.u32	%p1, %r13, 1;
	@%p1 bra 	BB0_2;

	mov.u64 	%rd20, 0;
	bra.uni 	BB0_5;

BB0_2:
	mov.u32 	%r12, 0;

BB0_3:
	add.s32 	%r9, %r13, %r12;
	shr.u32 	%r10, %r9, 1;
	mul.wide.u32 	%rd8, %r10, 4;
	add.s64 	%rd9, %rd4, %rd8;
	ld.f32 	%f7, [%rd9];
	setp.gt.ftz.f32	%p2, %f7, %f30;
	selp.b32	%r12, %r12, %r10, %p2;
	selp.b32	%r13, %r10, %r13, %p2;
	sub.s32 	%r11, %r13, %r12;
	setp.gt.u32	%p3, %r11, 1;
	@%p3 bra 	BB0_3;

	cvt.u64.u32	%rd20, %r12;

BB0_5:
	cvt.u64.u32	%rd3, %r13;
	mul.wide.u32 	%rd10, %r13, 4;
	add.s64 	%rd11, %rd4, %rd10;
	shl.b64 	%rd12, %rd20, 2;
	add.s64 	%rd13, %rd4, %rd12;
	ld.f32 	%f1, [%rd13];
	ld.f32 	%f2, [%rd11];
	sub.ftz.f32 	%f3, %f2, %f1;
	setp.eq.ftz.f32	%p4, %f3, 0f00000000;
	@%p4 bra 	BB0_7;

	sub.ftz.f32 	%f8, %f2, %f30;
	div.approx.ftz.f32 	%f9, %f8, %f3;
	sub.ftz.f32 	%f10, %f30, %f1;
	div.approx.ftz.f32 	%f11, %f10, %f3;
	add.s64 	%rd15, %rd5, %rd12;
	ld.f32 	%f12, [%rd15];
	shl.b64 	%rd16, %rd3, 2;
	add.s64 	%rd17, %rd5, %rd16;
	ld.f32 	%f13, [%rd17];
	mul.ftz.f32 	%f14, %f11, %f13;
	fma.rn.ftz.f32 	%f15, %f9, %f12, %f14;
	mul.ftz.f32 	%f16, %f9, %f9;
	mul.ftz.f32 	%f17, %f16, %f9;
	sub.ftz.f32 	%f18, %f17, %f9;
	add.s64 	%rd18, %rd6, %rd12;
	ld.f32 	%f19, [%rd18];
	mul.ftz.f32 	%f20, %f11, %f11;
	mul.ftz.f32 	%f21, %f20, %f11;
	sub.ftz.f32 	%f22, %f21, %f11;
	add.s64 	%rd19, %rd6, %rd16;
	ld.f32 	%f23, [%rd19];
	mul.ftz.f32 	%f24, %f22, %f23;
	fma.rn.ftz.f32 	%f25, %f18, %f19, %f24;
	mul.ftz.f32 	%f26, %f3, %f3;
	mul.ftz.f32 	%f27, %f25, %f26;
	mov.f32 	%f28, 0f40C00000;
	div.approx.ftz.f32 	%f29, %f27, %f28;
	add.ftz.f32 	%f30, %f15, %f29;

BB0_7:
	st.param.f32	[func_retval0+0], %f30;
	ret;
}

.visible .entry LumaCurve_MaskKernel(
	.param .u64 LumaCurve_MaskKernel_param_0,
	.param .u32 LumaCurve_MaskKernel_param_1,
	.param .u64 LumaCurve_MaskKernel_param_2,
	.param .u32 LumaCurve_MaskKernel_param_3,
	.param .u32 LumaCurve_MaskKernel_param_4,
	.param .u32 LumaCurve_MaskKernel_param_5,
	.param .u32 LumaCurve_MaskKernel_param_6
)
{
	.reg .pred 	%p<6>;
	.reg .s16 	%rs<5>;
	.reg .s32 	%r<16>;
	.reg .f32 	%f<25>;
	.reg .s64 	%rd<12>;


	ld.param.u64 	%rd4, [LumaCurve_MaskKernel_param_0];
	ld.param.u32 	%r3, [LumaCurve_MaskKernel_param_1];
	ld.param.u64 	%rd3, [LumaCurve_MaskKernel_param_2];
	ld.param.u32 	%r4, [LumaCurve_MaskKernel_param_3];
	ld.param.u32 	%r5, [LumaCurve_MaskKernel_param_4];
	ld.param.u32 	%r6, [LumaCurve_MaskKernel_param_5];
	ld.param.u32 	%r7, [LumaCurve_MaskKernel_param_6];
	cvta.to.global.u64 	%rd1, %rd4;
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mov.u32 	%r10, %tid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r10;
	mov.u32 	%r11, %ntid.y;
	mov.u32 	%r12, %ctaid.y;
	mov.u32 	%r13, %tid.y;
	mad.lo.s32 	%r2, %r11, %r12, %r13;
	setp.lt.s32	%p1, %r1, %r6;
	setp.lt.s32	%p2, %r2, %r7;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB1_7;
	bra.uni 	BB1_1;

BB1_1:
	setp.ne.s64	%p4, %rd3, 0;
	@%p4 bra 	BB1_3;

	mov.f32 	%f24, 0f3F800000;
	bra.uni 	BB1_4;

BB1_3:
	cvta.to.global.u64 	%rd5, %rd3;
	mad.lo.s32 	%r14, %r2, %r4, %r1;
	mul.wide.s32 	%rd6, %r14, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f24, [%rd7];

BB1_4:
	mad.lo.s32 	%r15, %r2, %r3, %r1;
	ld.const.f32 	%f7, [k601YPbPr_To_RGB32f];
	ld.const.f32 	%f8, [k601YPbPr_To_RGB32f+4];
	mul.ftz.f32 	%f9, %f8, 0f00000000;
	fma.rn.ftz.f32 	%f10, %f24, %f7, %f9;
	ld.const.f32 	%f11, [k601YPbPr_To_RGB32f+8];
	fma.rn.ftz.f32 	%f3, %f11, 0f00000000, %f10;
	ld.const.f32 	%f12, [k601YPbPr_To_RGB32f+12];
	ld.const.f32 	%f13, [k601YPbPr_To_RGB32f+16];
	mul.ftz.f32 	%f14, %f13, 0f00000000;
	fma.rn.ftz.f32 	%f15, %f24, %f12, %f14;
	ld.const.f32 	%f16, [k601YPbPr_To_RGB32f+20];
	fma.rn.ftz.f32 	%f4, %f16, 0f00000000, %f15;
	ld.const.f32 	%f17, [k601YPbPr_To_RGB32f+24];
	ld.const.f32 	%f18, [k601YPbPr_To_RGB32f+28];
	mul.ftz.f32 	%f19, %f18, 0f00000000;
	fma.rn.ftz.f32 	%f20, %f24, %f17, %f19;
	ld.const.f32 	%f21, [k601YPbPr_To_RGB32f+32];
	fma.rn.ftz.f32 	%f5, %f21, 0f00000000, %f20;
	cvt.s64.s32	%rd2, %r15;
	setp.eq.s32	%p5, %r5, 0;
	@%p5 bra 	BB1_6;

	shl.b64 	%rd8, %rd2, 4;
	add.s64 	%rd9, %rd1, %rd8;
	mov.f32 	%f22, 0f3F800000;
	st.global.v4.f32 	[%rd9], {%f5, %f4, %f3, %f22};
	bra.uni 	BB1_7;

BB1_6:
	shl.b64 	%rd10, %rd2, 3;
	add.s64 	%rd11, %rd1, %rd10;
	mov.f32 	%f23, 0f3F800000;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f23;
	mov.b16 	%rs1, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f3;
	mov.b16 	%rs2, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f4;
	mov.b16 	%rs3, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f5;
	mov.b16 	%rs4, %temp;
}
	st.global.v4.u16 	[%rd11], {%rs4, %rs3, %rs2, %rs1};

BB1_7:
	ret;
}

.visible .entry LumaCurve_LumaKernel(
	.param .u64 LumaCurve_LumaKernel_param_0,
	.param .u32 LumaCurve_LumaKernel_param_1,
	.param .u64 LumaCurve_LumaKernel_param_2,
	.param .u32 LumaCurve_LumaKernel_param_3,
	.param .u32 LumaCurve_LumaKernel_param_4,
	.param .u32 LumaCurve_LumaKernel_param_5,
	.param .u32 LumaCurve_LumaKernel_param_6,
	.param .u64 LumaCurve_LumaKernel_param_7
)
{
	.reg .pred 	%p<20>;
	.reg .s16 	%rs<13>;
	.reg .s32 	%r<29>;
	.reg .f32 	%f<104>;
	.reg .s64 	%rd<34>;


	ld.param.u64 	%rd7, [LumaCurve_LumaKernel_param_0];
	ld.param.u32 	%r9, [LumaCurve_LumaKernel_param_1];
	ld.param.u64 	%rd8, [LumaCurve_LumaKernel_param_2];
	ld.param.u32 	%r10, [LumaCurve_LumaKernel_param_3];
	ld.param.u32 	%r11, [LumaCurve_LumaKernel_param_4];
	ld.param.u32 	%r12, [LumaCurve_LumaKernel_param_5];
	ld.param.u32 	%r13, [LumaCurve_LumaKernel_param_6];
	mov.u32 	%r14, %ntid.x;
	mov.u32 	%r15, %ctaid.x;
	mov.u32 	%r16, %tid.x;
	mad.lo.s32 	%r1, %r14, %r15, %r16;
	mov.u32 	%r17, %ntid.y;
	mov.u32 	%r18, %ctaid.y;
	mov.u32 	%r19, %tid.y;
	mad.lo.s32 	%r2, %r17, %r18, %r19;
	setp.lt.s32	%p1, %r1, %r12;
	setp.lt.s32	%p2, %r2, %r13;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB2_31;
	bra.uni 	BB2_1;

BB2_1:
	cvta.to.global.u64 	%rd1, %rd8;
	cvta.to.global.u64 	%rd9, %rd7;
	mad.lo.s32 	%r20, %r2, %r9, %r1;
	mul.wide.s32 	%rd10, %r20, 16;
	add.s64 	%rd2, %rd9, %rd10;
	mul.wide.s32 	%rd11, %r20, 8;
	add.s64 	%rd3, %rd9, %rd11;
	setp.eq.s32	%p4, %r11, 0;
	@%p4 bra 	BB2_3;

	ld.global.v4.f32 	{%f38, %f39, %f40, %f41}, [%rd2];
	mov.f32 	%f4, %f41;
	mov.f32 	%f97, %f40;
	mov.f32 	%f96, %f39;
	mov.f32 	%f95, %f38;
	bra.uni 	BB2_4;

BB2_3:
	ld.global.v4.u16 	{%rs1, %rs2, %rs3, %rs4}, [%rd3];
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs1;
	cvt.f32.f16 	%f95, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs2;
	cvt.f32.f16 	%f96, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs3;
	cvt.f32.f16 	%f97, %temp;
	}

BB2_4:
	ld.const.f32 	%f42, [kRGB32f_To_601YPbPr];
	ld.const.f32 	%f43, [kRGB32f_To_601YPbPr+4];
	mul.ftz.f32 	%f44, %f96, %f43;
	fma.rn.ftz.f32 	%f45, %f97, %f42, %f44;
	ld.const.f32 	%f46, [kRGB32f_To_601YPbPr+8];
	fma.rn.ftz.f32 	%f13, %f95, %f46, %f45;
	setp.ne.s64	%p5, %rd8, 0;
	@%p5 bra 	BB2_6;

	mov.f32 	%f98, 0f3F800000;
	bra.uni 	BB2_7;

BB2_6:
	mad.lo.s32 	%r21, %r2, %r10, %r1;
	mul.wide.s32 	%rd12, %r21, 4;
	add.s64 	%rd13, %rd1, %rd12;
	ld.global.f32 	%f98, [%rd13];

BB2_7:
	setp.leu.ftz.f32	%p6, %f98, 0f3A83126F;
	mov.f32 	%f102, %f13;
	@%p6 bra 	BB2_28;

	ld.const.f32 	%f99, [inSpline];
	setp.lt.ftz.f32	%p7, %f13, %f99;
	@%p7 bra 	BB2_22;

	ld.const.u32 	%r22, [inSpline+192];
	add.s32 	%r28, %r22, -1;
	mul.wide.u32 	%rd14, %r28, 4;
	mov.u64 	%rd15, inSpline;
	add.s64 	%rd4, %rd15, %rd14;
	ld.const.f32 	%f100, [%rd4];
	setp.gt.ftz.f32	%p8, %f13, %f100;
	@%p8 bra 	BB2_17;

	setp.gt.u32	%p9, %r28, 1;
	@%p9 bra 	BB2_12;

	mov.u64 	%rd33, 0;
	bra.uni 	BB2_15;

BB2_12:
	mov.u32 	%r27, 0;

BB2_13:
	add.s32 	%r24, %r28, %r27;
	shr.u32 	%r25, %r24, 1;
	mul.wide.u32 	%rd17, %r25, 4;
	add.s64 	%rd19, %rd15, %rd17;
	ld.const.f32 	%f48, [%rd19];
	setp.gt.ftz.f32	%p10, %f48, %f13;
	selp.b32	%r27, %r27, %r25, %p10;
	selp.b32	%r28, %r25, %r28, %p10;
	sub.s32 	%r26, %r28, %r27;
	setp.gt.u32	%p11, %r26, 1;
	@%p11 bra 	BB2_13;

	cvt.u64.u32	%rd33, %r27;
	mul.wide.u32 	%rd20, %r28, 4;
	add.s64 	%rd22, %rd15, %rd20;
	ld.const.f32 	%f100, [%rd22];
	mul.wide.u32 	%rd23, %r27, 4;
	add.s64 	%rd24, %rd15, %rd23;
	ld.const.f32 	%f99, [%rd24];

BB2_15:
	sub.ftz.f32 	%f22, %f100, %f99;
	setp.eq.ftz.f32	%p12, %f22, 0f00000000;
	mov.f32 	%f103, %f13;
	@%p12 bra 	BB2_27;

	sub.ftz.f32 	%f49, %f100, %f13;
	div.approx.ftz.f32 	%f50, %f49, %f22;
	sub.ftz.f32 	%f51, %f13, %f99;
	div.approx.ftz.f32 	%f52, %f51, %f22;
	shl.b64 	%rd25, %rd33, 2;
	add.s64 	%rd27, %rd15, 64;
	add.s64 	%rd28, %rd27, %rd25;
	ld.const.f32 	%f53, [%rd28];
	mul.wide.u32 	%rd29, %r28, 4;
	add.s64 	%rd30, %rd27, %rd29;
	ld.const.f32 	%f54, [%rd30];
	mul.ftz.f32 	%f55, %f52, %f54;
	fma.rn.ftz.f32 	%f56, %f50, %f53, %f55;
	mul.ftz.f32 	%f57, %f50, %f50;
	mul.ftz.f32 	%f58, %f57, %f50;
	sub.ftz.f32 	%f59, %f58, %f50;
	add.s64 	%rd31, %rd25, %rd15;
	ld.const.f32 	%f60, [%rd31+128];
	mul.ftz.f32 	%f61, %f52, %f52;
	mul.ftz.f32 	%f62, %f61, %f52;
	sub.ftz.f32 	%f63, %f62, %f52;
	add.s64 	%rd32, %rd29, %rd15;
	ld.const.f32 	%f64, [%rd32+128];
	mul.ftz.f32 	%f65, %f63, %f64;
	fma.rn.ftz.f32 	%f66, %f59, %f60, %f65;
	mul.ftz.f32 	%f67, %f22, %f22;
	mul.ftz.f32 	%f68, %f66, %f67;
	mov.f32 	%f69, 0f40C00000;
	div.approx.ftz.f32 	%f70, %f68, %f69;
	add.ftz.f32 	%f23, %f56, %f70;
	mov.f32 	%f103, %f23;
	bra.uni 	BB2_27;

BB2_17:
	ld.const.f32 	%f24, [%rd4+64];
	setp.eq.ftz.f32	%p13, %f24, 0f00000000;
	@%p13 bra 	BB2_21;

	setp.eq.ftz.f32	%p14, %f24, 0f3F800000;
	@%p14 bra 	BB2_20;

	setp.gtu.ftz.f32	%p15, %f13, 0f3F800000;
	mov.f32 	%f71, 0f3F800000;
	sub.ftz.f32 	%f72, %f71, %f24;
	sub.ftz.f32 	%f73, %f13, %f72;
	selp.f32	%f25, %f73, %f24, %p15;
	mov.f32 	%f103, %f25;
	bra.uni 	BB2_27;

BB2_20:
	add.ftz.f32 	%f74, %f13, 0f3F800000;
	sub.ftz.f32 	%f26, %f74, %f100;
	mov.f32 	%f103, %f26;
	bra.uni 	BB2_27;

BB2_21:
	sub.ftz.f32 	%f27, %f100, %f13;
	mov.f32 	%f103, %f27;
	bra.uni 	BB2_27;

BB2_22:
	ld.const.f32 	%f28, [inSpline+64];
	setp.eq.ftz.f32	%p16, %f28, 0f00000000;
	@%p16 bra 	BB2_26;

	setp.eq.ftz.f32	%p17, %f28, 0f3F800000;
	@%p17 bra 	BB2_25;

	setp.ltu.ftz.f32	%p18, %f13, 0f00000000;
	add.ftz.f32 	%f75, %f13, %f28;
	selp.f32	%f29, %f75, %f28, %p18;
	mov.f32 	%f103, %f29;
	bra.uni 	BB2_27;

BB2_25:
	add.ftz.f32 	%f76, %f99, 0f3F800000;
	sub.ftz.f32 	%f30, %f76, %f13;
	mov.f32 	%f103, %f30;
	bra.uni 	BB2_27;

BB2_26:
	sub.ftz.f32 	%f31, %f13, %f99;
	mov.f32 	%f103, %f31;

BB2_27:
	mov.f32 	%f32, %f103;
	sub.ftz.f32 	%f77, %f32, %f13;
	fma.rn.ftz.f32 	%f102, %f98, %f77, %f13;

BB2_28:
	ld.const.f32 	%f78, [k601YPbPr_To_RGB32f];
	ld.const.f32 	%f79, [k601YPbPr_To_RGB32f+4];
	mul.ftz.f32 	%f80, %f79, 0f00000000;
	fma.rn.ftz.f32 	%f81, %f102, %f78, %f80;
	ld.const.f32 	%f82, [k601YPbPr_To_RGB32f+8];
	fma.rn.ftz.f32 	%f35, %f82, 0f00000000, %f81;
	ld.const.f32 	%f83, [k601YPbPr_To_RGB32f+12];
	ld.const.f32 	%f84, [k601YPbPr_To_RGB32f+16];
	mul.ftz.f32 	%f85, %f84, 0f00000000;
	fma.rn.ftz.f32 	%f86, %f102, %f83, %f85;
	ld.const.f32 	%f87, [k601YPbPr_To_RGB32f+20];
	fma.rn.ftz.f32 	%f36, %f87, 0f00000000, %f86;
	ld.const.f32 	%f88, [k601YPbPr_To_RGB32f+24];
	ld.const.f32 	%f89, [k601YPbPr_To_RGB32f+28];
	mul.ftz.f32 	%f90, %f89, 0f00000000;
	fma.rn.ftz.f32 	%f91, %f102, %f88, %f90;
	ld.const.f32 	%f92, [k601YPbPr_To_RGB32f+32];
	fma.rn.ftz.f32 	%f37, %f92, 0f00000000, %f91;
	@%p4 bra 	BB2_30;

	mov.f32 	%f93, 0f3F800000;
	st.global.v4.f32 	[%rd2], {%f37, %f36, %f35, %f93};
	bra.uni 	BB2_31;

BB2_30:
	mov.f32 	%f94, 0f3F800000;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f94;
	mov.b16 	%rs9, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f35;
	mov.b16 	%rs10, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f36;
	mov.b16 	%rs11, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f37;
	mov.b16 	%rs12, %temp;
}
	st.global.v4.u16 	[%rd3], {%rs12, %rs11, %rs10, %rs9};

BB2_31:
	ret;
}

.visible .entry LumaCurve_CompositeKernel(
	.param .u64 LumaCurve_CompositeKernel_param_0,
	.param .u32 LumaCurve_CompositeKernel_param_1,
	.param .u64 LumaCurve_CompositeKernel_param_2,
	.param .u32 LumaCurve_CompositeKernel_param_3,
	.param .u32 LumaCurve_CompositeKernel_param_4,
	.param .u32 LumaCurve_CompositeKernel_param_5,
	.param .u32 LumaCurve_CompositeKernel_param_6,
	.param .u64 LumaCurve_CompositeKernel_param_7
)
{
	.reg .pred 	%p<20>;
	.reg .s16 	%rs<13>;
	.reg .s32 	%r<38>;
	.reg .f32 	%f<117>;
	.reg .s64 	%rd<37>;


	ld.param.u64 	%rd7, [LumaCurve_CompositeKernel_param_0];
	ld.param.u32 	%r9, [LumaCurve_CompositeKernel_param_1];
	ld.param.u64 	%rd8, [LumaCurve_CompositeKernel_param_2];
	ld.param.u32 	%r10, [LumaCurve_CompositeKernel_param_3];
	ld.param.u32 	%r11, [LumaCurve_CompositeKernel_param_4];
	ld.param.u32 	%r12, [LumaCurve_CompositeKernel_param_5];
	ld.param.u32 	%r13, [LumaCurve_CompositeKernel_param_6];
	mov.u32 	%r14, %ntid.x;
	mov.u32 	%r15, %ctaid.x;
	mov.u32 	%r16, %tid.x;
	mad.lo.s32 	%r1, %r14, %r15, %r16;
	mov.u32 	%r17, %ntid.y;
	mov.u32 	%r18, %ctaid.y;
	mov.u32 	%r19, %tid.y;
	mad.lo.s32 	%r2, %r17, %r18, %r19;
	setp.lt.s32	%p1, %r1, %r12;
	setp.lt.s32	%p2, %r2, %r13;
	and.pred  	%p3, %p1, %p2;
	@!%p3 bra 	BB3_31;
	bra.uni 	BB3_1;

BB3_1:
	cvta.to.global.u64 	%rd1, %rd8;
	cvta.to.global.u64 	%rd9, %rd7;
	mad.lo.s32 	%r20, %r2, %r9, %r1;
	mul.wide.s32 	%rd10, %r20, 16;
	add.s64 	%rd2, %rd9, %rd10;
	mul.wide.s32 	%rd11, %r20, 8;
	add.s64 	%rd3, %rd9, %rd11;
	setp.eq.s32	%p4, %r11, 0;
	@%p4 bra 	BB3_3;

	ld.global.v4.f32 	{%f40, %f41, %f42, %f43}, [%rd2];
	mov.f32 	%f110, %f43;
	mov.f32 	%f109, %f42;
	mov.f32 	%f108, %f41;
	mov.f32 	%f107, %f40;
	bra.uni 	BB3_4;

BB3_3:
	ld.global.v4.u16 	{%rs1, %rs2, %rs3, %rs4}, [%rd3];
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs1;
	cvt.f32.f16 	%f107, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs2;
	cvt.f32.f16 	%f108, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs3;
	cvt.f32.f16 	%f109, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs4;
	cvt.f32.f16 	%f110, %temp;
	}

BB3_4:
	ld.const.f32 	%f44, [kRGB32f_To_601YPbPr];
	ld.const.f32 	%f45, [kRGB32f_To_601YPbPr+4];
	mul.ftz.f32 	%f46, %f108, %f45;
	fma.rn.ftz.f32 	%f47, %f109, %f44, %f46;
	ld.const.f32 	%f48, [kRGB32f_To_601YPbPr+8];
	fma.rn.ftz.f32 	%f14, %f107, %f48, %f47;
	ld.const.f32 	%f49, [kRGB32f_To_601YPbPr+12];
	ld.const.f32 	%f50, [kRGB32f_To_601YPbPr+16];
	mul.ftz.f32 	%f51, %f108, %f50;
	fma.rn.ftz.f32 	%f52, %f109, %f49, %f51;
	ld.const.f32 	%f53, [kRGB32f_To_601YPbPr+20];
	fma.rn.ftz.f32 	%f15, %f107, %f53, %f52;
	ld.const.f32 	%f54, [kRGB32f_To_601YPbPr+24];
	ld.const.f32 	%f55, [kRGB32f_To_601YPbPr+28];
	mul.ftz.f32 	%f56, %f108, %f55;
	fma.rn.ftz.f32 	%f57, %f109, %f54, %f56;
	ld.const.f32 	%f58, [kRGB32f_To_601YPbPr+32];
	fma.rn.ftz.f32 	%f16, %f107, %f58, %f57;
	setp.ne.s64	%p5, %rd8, 0;
	@%p5 bra 	BB3_6;

	mov.f32 	%f111, 0f3F800000;
	bra.uni 	BB3_7;

BB3_6:
	mad.lo.s32 	%r21, %r2, %r10, %r1;
	mul.wide.s32 	%rd12, %r21, 4;
	add.s64 	%rd13, %rd1, %rd12;
	ld.global.f32 	%f111, [%rd13];

BB3_7:
	setp.leu.ftz.f32	%p6, %f111, 0f3A83126F;
	mov.f32 	%f115, %f14;
	@%p6 bra 	BB3_28;

	ld.const.f32 	%f112, [inSpline];
	setp.lt.ftz.f32	%p7, %f14, %f112;
	@%p7 bra 	BB3_22;

	ld.const.u32 	%r22, [inSpline+192];
	add.s32 	%r37, %r22, -1;
	mul.wide.u32 	%rd14, %r37, 4;
	mov.u64 	%rd15, inSpline;
	add.s64 	%rd4, %rd15, %rd14;
	ld.const.f32 	%f113, [%rd4];
	setp.gt.ftz.f32	%p8, %f14, %f113;
	@%p8 bra 	BB3_17;

	setp.gt.u32	%p9, %r37, 1;
	@%p9 bra 	BB3_12;

	mov.u64 	%rd36, 0;
	bra.uni 	BB3_15;

BB3_12:
	mov.u32 	%r36, 0;

BB3_13:
	add.s32 	%r24, %r37, %r36;
	shr.u32 	%r25, %r24, 1;
	mul.wide.u32 	%rd17, %r25, 4;
	add.s64 	%rd19, %rd15, %rd17;
	ld.const.f32 	%f60, [%rd19];
	setp.gt.ftz.f32	%p10, %f60, %f14;
	selp.b32	%r36, %r36, %r25, %p10;
	selp.b32	%r37, %r25, %r37, %p10;
	sub.s32 	%r26, %r37, %r36;
	setp.gt.u32	%p11, %r26, 1;
	@%p11 bra 	BB3_13;

	cvt.u64.u32	%rd36, %r36;
	mul.wide.u32 	%rd20, %r37, 4;
	add.s64 	%rd22, %rd15, %rd20;
	ld.const.f32 	%f113, [%rd22];
	mul.wide.u32 	%rd23, %r36, 4;
	add.s64 	%rd24, %rd15, %rd23;
	ld.const.f32 	%f112, [%rd24];

BB3_15:
	sub.ftz.f32 	%f61, %f113, %f112;
	setp.eq.ftz.f32	%p12, %f61, 0f00000000;
	mov.f32 	%f116, %f14;
	@%p12 bra 	BB3_27;

	sub.ftz.f32 	%f62, %f113, %f14;
	div.approx.ftz.f32 	%f64, %f62, %f61;
	sub.ftz.f32 	%f65, %f14, %f112;
	div.approx.ftz.f32 	%f66, %f65, %f61;
	shl.b64 	%rd25, %rd36, 2;
	add.s64 	%rd27, %rd15, 64;
	add.s64 	%rd28, %rd27, %rd25;
	ld.const.f32 	%f67, [%rd28];
	mul.wide.u32 	%rd29, %r37, 4;
	add.s64 	%rd30, %rd27, %rd29;
	ld.const.f32 	%f68, [%rd30];
	mul.ftz.f32 	%f69, %f66, %f68;
	fma.rn.ftz.f32 	%f70, %f64, %f67, %f69;
	mul.ftz.f32 	%f71, %f64, %f64;
	mul.ftz.f32 	%f72, %f71, %f64;
	sub.ftz.f32 	%f73, %f72, %f64;
	add.s64 	%rd31, %rd25, %rd15;
	ld.const.f32 	%f74, [%rd31+128];
	mul.ftz.f32 	%f75, %f66, %f66;
	mul.ftz.f32 	%f76, %f75, %f66;
	sub.ftz.f32 	%f77, %f76, %f66;
	add.s64 	%rd32, %rd29, %rd15;
	ld.const.f32 	%f78, [%rd32+128];
	mul.ftz.f32 	%f79, %f77, %f78;
	fma.rn.ftz.f32 	%f80, %f73, %f74, %f79;
	mul.ftz.f32 	%f81, %f61, %f61;
	mul.ftz.f32 	%f82, %f80, %f81;
	mov.f32 	%f83, 0f40C00000;
	div.approx.ftz.f32 	%f84, %f82, %f83;
	add.ftz.f32 	%f25, %f70, %f84;
	mov.f32 	%f116, %f25;
	bra.uni 	BB3_27;

BB3_17:
	ld.const.f32 	%f26, [%rd4+64];
	setp.eq.ftz.f32	%p13, %f26, 0f00000000;
	@%p13 bra 	BB3_21;

	setp.eq.ftz.f32	%p14, %f26, 0f3F800000;
	@%p14 bra 	BB3_20;

	setp.gtu.ftz.f32	%p15, %f14, 0f3F800000;
	mov.f32 	%f85, 0f3F800000;
	sub.ftz.f32 	%f86, %f85, %f26;
	sub.ftz.f32 	%f87, %f14, %f86;
	selp.f32	%f27, %f87, %f26, %p15;
	mov.f32 	%f116, %f27;
	bra.uni 	BB3_27;

BB3_20:
	add.ftz.f32 	%f88, %f14, 0f3F800000;
	sub.ftz.f32 	%f28, %f88, %f113;
	mov.f32 	%f116, %f28;
	bra.uni 	BB3_27;

BB3_21:
	sub.ftz.f32 	%f29, %f113, %f14;
	mov.f32 	%f116, %f29;
	bra.uni 	BB3_27;

BB3_22:
	ld.const.f32 	%f30, [inSpline+64];
	setp.eq.ftz.f32	%p16, %f30, 0f00000000;
	@%p16 bra 	BB3_26;

	setp.eq.ftz.f32	%p17, %f30, 0f3F800000;
	@%p17 bra 	BB3_25;

	setp.ltu.ftz.f32	%p18, %f14, 0f00000000;
	add.ftz.f32 	%f89, %f14, %f30;
	selp.f32	%f31, %f89, %f30, %p18;
	mov.f32 	%f116, %f31;
	bra.uni 	BB3_27;

BB3_25:
	add.ftz.f32 	%f90, %f112, 0f3F800000;
	sub.ftz.f32 	%f32, %f90, %f14;
	mov.f32 	%f116, %f32;
	bra.uni 	BB3_27;

BB3_26:
	sub.ftz.f32 	%f33, %f14, %f112;
	mov.f32 	%f116, %f33;

BB3_27:
	mov.f32 	%f34, %f116;
	sub.ftz.f32 	%f91, %f34, %f14;
	fma.rn.ftz.f32 	%f115, %f111, %f91, %f14;

BB3_28:
	ld.const.f32 	%f92, [k601YPbPr_To_RGB32f];
	ld.const.f32 	%f93, [k601YPbPr_To_RGB32f+4];
	mul.ftz.f32 	%f94, %f15, %f93;
	fma.rn.ftz.f32 	%f95, %f115, %f92, %f94;
	ld.const.f32 	%f96, [k601YPbPr_To_RGB32f+8];
	fma.rn.ftz.f32 	%f37, %f16, %f96, %f95;
	ld.const.f32 	%f97, [k601YPbPr_To_RGB32f+12];
	ld.const.f32 	%f98, [k601YPbPr_To_RGB32f+16];
	mul.ftz.f32 	%f99, %f15, %f98;
	fma.rn.ftz.f32 	%f100, %f115, %f97, %f99;
	ld.const.f32 	%f101, [k601YPbPr_To_RGB32f+20];
	fma.rn.ftz.f32 	%f38, %f16, %f101, %f100;
	ld.const.f32 	%f102, [k601YPbPr_To_RGB32f+24];
	ld.const.f32 	%f103, [k601YPbPr_To_RGB32f+28];
	mul.ftz.f32 	%f104, %f15, %f103;
	fma.rn.ftz.f32 	%f105, %f115, %f102, %f104;
	ld.const.f32 	%f106, [k601YPbPr_To_RGB32f+32];
	fma.rn.ftz.f32 	%f39, %f16, %f106, %f105;
	@%p4 bra 	BB3_30;

	st.global.v4.f32 	[%rd2], {%f39, %f38, %f37, %f110};
	bra.uni 	BB3_31;

BB3_30:
	mul.wide.s32 	%rd34, %r20, 8;
	add.s64 	%rd35, %rd9, %rd34;
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f110;
	mov.b16 	%rs9, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f37;
	mov.b16 	%rs10, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f38;
	mov.b16 	%rs11, %temp;
}
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f39;
	mov.b16 	%rs12, %temp;
}
	st.global.v4.u16 	[%rd35], {%rs12, %rs11, %rs10, %rs9};

BB3_31:
	ret;
}


