//
// Generated by NVIDIA NVVM Compiler
// Compiler built on Wed Jul 10 12:41:20 2013 (1373485280)
// Cuda compilation tools, release 5.5, V5.5.0
//

.version 3.2
.target sm_30
.address_size 64

	.file	1 "D:/singlebarrel/releases/2014.03/shared/adobe/MediaCore/GPUFoundation/Src/ImageProcessing/FieldReverse.cu", 1399785311, 2039
	.file	2 "d:\\singlebarrel\\releases\\2014.03\\shared\\adobe\\mediacore\\external\\3rdparty\\nvidia\\cuda\\win\\include\\device_functions.h", 1399785281, 191626
.global .align 1 .b8 $str[11] = {95, 95, 67, 85, 68, 65, 95, 70, 84, 90, 0};

.visible .entry FieldReverseKernel(
	.param .u64 FieldReverseKernel_param_0,
	.param .u32 FieldReverseKernel_param_1,
	.param .u64 FieldReverseKernel_param_2,
	.param .u32 FieldReverseKernel_param_3,
	.param .u32 FieldReverseKernel_param_4,
	.param .u32 FieldReverseKernel_param_5,
	.param .u32 FieldReverseKernel_param_6
)
{
	.reg .pred 	%p<6>;
	.reg .s16 	%rs<13>;
	.reg .s32 	%r<19>;
	.reg .f32 	%f<25>;
	.reg .s64 	%rd<13>;


	ld.param.u64 	%rd3, [FieldReverseKernel_param_0];
	ld.param.u32 	%r5, [FieldReverseKernel_param_1];
	ld.param.u64 	%rd4, [FieldReverseKernel_param_2];
	ld.param.u32 	%r6, [FieldReverseKernel_param_3];
	ld.param.u32 	%r7, [FieldReverseKernel_param_4];
	ld.param.u32 	%r8, [FieldReverseKernel_param_5];
	ld.param.u32 	%r9, [FieldReverseKernel_param_6];
	cvta.to.global.u64 	%rd1, %rd4;
	cvta.to.global.u64 	%rd2, %rd3;
	.loc 1 28 1
	mov.u32 	%r10, %ntid.x;
	mov.u32 	%r11, %ctaid.x;
	mov.u32 	%r12, %tid.x;
	mad.lo.s32 	%r1, %r10, %r11, %r12;
	mov.u32 	%r13, %ntid.y;
	mov.u32 	%r14, %ctaid.y;
	mov.u32 	%r15, %tid.y;
	mad.lo.s32 	%r2, %r13, %r14, %r15;
	.loc 1 28 1
	setp.lt.s32	%p1, %r1, %r8;
	setp.lt.s32	%p2, %r2, %r9;
	and.pred  	%p3, %p1, %p2;
	.loc 1 28 1
	@!%p3 bra 	BB0_7;
	bra.uni 	BB0_1;

BB0_1:
	.loc 1 28 1
	add.s32 	%r16, %r2, -1;
	mov.u32 	%r17, 0;
	.loc 2 2642 10
	max.s32 	%r18, %r16, %r17;
	.loc 1 28 1
	mad.lo.s32 	%r3, %r18, %r5, %r1;
	setp.eq.s32	%p4, %r7, 0;
	@%p4 bra 	BB0_3;

	mul.wide.s32 	%rd5, %r3, 16;
	add.s64 	%rd6, %rd2, %rd5;
	ld.global.v4.f32 	{%f17, %f18, %f19, %f20}, [%rd6];
	mov.f32 	%f24, %f20;
	mov.f32 	%f23, %f19;
	mov.f32 	%f22, %f18;
	mov.f32 	%f21, %f17;
	bra.uni 	BB0_4;

BB0_3:
	mul.wide.s32 	%rd7, %r3, 8;
	add.s64 	%rd8, %rd2, %rd7;
	.loc 1 28 1
	ld.global.v4.u16 	{%rs1, %rs2, %rs3, %rs4}, [%rd8];
	.loc 2 3518 10
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs1;
	cvt.f32.f16 	%f21, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs2;
	cvt.f32.f16 	%f22, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs3;
	cvt.f32.f16 	%f23, %temp;
	}
	{
	.reg .b16 %temp;
	mov.b16 	%temp, %rs4;
	cvt.f32.f16 	%f24, %temp;
	}

BB0_4:
	.loc 1 28 1
	mad.lo.s32 	%r4, %r2, %r6, %r1;
	.loc 1 28 1
	@%p4 bra 	BB0_6;

	mul.wide.s32 	%rd9, %r4, 16;
	add.s64 	%rd10, %rd1, %rd9;
	.loc 1 28 1
	st.global.v4.f32 	[%rd10], {%f21, %f22, %f23, %f24};
	bra.uni 	BB0_7;

BB0_6:
	mul.wide.s32 	%rd11, %r4, 8;
	add.s64 	%rd12, %rd1, %rd11;
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f21;
	mov.b16 	%rs9, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f22;
	mov.b16 	%rs10, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f23;
	mov.b16 	%rs11, %temp;
}
	.loc 2 3513 10
	{
	.reg .b16 %temp;
	cvt.rn.ftz.f16.f32 	%temp, %f24;
	mov.b16 	%rs12, %temp;
}
	.loc 1 28 235
	st.global.v4.u16 	[%rd12], {%rs9, %rs10, %rs11, %rs12};

BB0_7:
	.loc 1 28 2
	ret;
}


