diff --git a/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu b/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu index a48affaec3c8a..c13123ab11356 100644 --- a/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu +++ b/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu @@ -58,13 +58,11 @@ // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel1Pi( -// OPT-SPIRV-SAME: ptr addrspace(1) noundef [[X_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0:[0-9]+]] !max_work_group_size [[META5:![0-9]+]] { +// OPT-SPIRV-SAME: ptr addrspace(1) noundef captures(none) [[X_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0:[0-9]+]] !max_work_group_size [[META5:![0-9]+]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] -// OPT-SPIRV-NEXT: [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[X_COERCE]] to i64 -// OPT-SPIRV-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr addrspace(4) -// OPT-SPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 -// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 -// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(4) [[TMP1]], align 4 +// OPT-SPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) [[X_COERCE]], align 4 +// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 +// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(1) [[X_COERCE]], align 4 // OPT-SPIRV-NEXT: ret void // // HOST-LABEL: define dso_local void @_Z22__device_stub__kernel1Pi( @@ -126,13 +124,11 @@ __global__ void kernel1(int *x) { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel2Ri( -// OPT-SPIRV-SAME: ptr addrspace(1) noundef align 4 dereferenceable(4) [[X_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: ptr addrspace(1) noundef align 4 captures(none) dereferenceable(4) [[X_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] -// OPT-SPIRV-NEXT: [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[X_COERCE]] to i64 -// OPT-SPIRV-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr addrspace(4) -// OPT-SPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 -// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 -// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(4) [[TMP1]], align 4 +// OPT-SPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) [[X_COERCE]], align 4 +// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 +// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(1) [[X_COERCE]], align 4 // OPT-SPIRV-NEXT: ret void // // HOST-LABEL: define dso_local void @_Z22__device_stub__kernel2Ri( @@ -195,7 +191,7 @@ __global__ void kernel2(int &x) { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel3PU3AS2iPU3AS1i( -// OPT-SPIRV-SAME: ptr addrspace(2) noundef readonly captures(none) [[X:%.*]], ptr addrspace(1) noundef writeonly captures(none) initializes((0, 4)) [[Y:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR1:[0-9]+]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: ptr addrspace(2) noundef readonly captures(none) [[X:%.*]], ptr addrspace(1) noundef writeonly captures(none) initializes((0, 4)) [[Y:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] // OPT-SPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[X]], align 4 // OPT-SPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[Y]], align 4 @@ -261,7 +257,7 @@ __global__ void kernel3(__attribute__((address_space(2))) int *x, // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_func void @_Z4funcPi( -// OPT-SPIRV-SAME: ptr addrspace(4) noundef captures(none) [[X:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR2:[0-9]+]] { +// OPT-SPIRV-SAME: ptr addrspace(4) noundef captures(none) [[X:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR1:[0-9]+]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] // OPT-SPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[X]], align 4 // OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 @@ -343,7 +339,7 @@ struct S { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel41S( -// OPT-SPIRV-SAME: [[STRUCT_S:%.*]] [[S_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: [[STRUCT_S:%.*]] [[S_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR2:[0-9]+]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] // OPT-SPIRV-NEXT: [[TMP0:%.*]] = extractvalue [[STRUCT_S]] [[S_COERCE]], 0 // OPT-SPIRV-NEXT: [[TMP1:%.*]] = extractvalue [[STRUCT_S]] [[S_COERCE]], 1 @@ -446,19 +442,17 @@ __global__ void kernel4(struct S s) { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel5P1S( -// OPT-SPIRV-SAME: ptr addrspace(1) noundef [[S_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: ptr addrspace(1) noundef readonly captures(none) [[S_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR2]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] -// OPT-SPIRV-NEXT: [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[S_COERCE]] to i64 -// OPT-SPIRV-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr addrspace(4) -// OPT-SPIRV-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[TMP1]], align 8 -// OPT-SPIRV-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4 -// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1 -// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(4) [[TMP2]], align 4 -// OPT-SPIRV-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP1]], i64 8 -// OPT-SPIRV-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[Y]], align 8 -// OPT-SPIRV-NEXT: [[TMP5:%.*]] = load float, ptr addrspace(4) [[TMP4]], align 4 -// OPT-SPIRV-NEXT: [[ADD:%.*]] = fadd contract float [[TMP5]], 1.000000e+00 -// OPT-SPIRV-NEXT: store float [[ADD]], ptr addrspace(4) [[TMP4]], align 4 +// OPT-SPIRV-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(1) [[S_COERCE]], align 8 +// OPT-SPIRV-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) [[TMP0]], align 4 +// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 +// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(4) [[TMP0]], align 4 +// OPT-SPIRV-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(1) [[S_COERCE]], i64 8 +// OPT-SPIRV-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(1) [[Y]], align 8 +// OPT-SPIRV-NEXT: [[TMP3:%.*]] = load float, ptr addrspace(4) [[TMP2]], align 4 +// OPT-SPIRV-NEXT: [[ADD:%.*]] = fadd contract float [[TMP3]], 1.000000e+00 +// OPT-SPIRV-NEXT: store float [[ADD]], ptr addrspace(4) [[TMP2]], align 4 // OPT-SPIRV-NEXT: ret void // // HOST-LABEL: define dso_local void @_Z22__device_stub__kernel5P1S( @@ -551,7 +545,7 @@ struct T { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel61T( -// OPT-SPIRV-SAME: [[STRUCT_T:%.*]] [[T_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: [[STRUCT_T:%.*]] [[T_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR2]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] // OPT-SPIRV-NEXT: [[TMP0:%.*]] = extractvalue [[STRUCT_T]] [[T_COERCE]], 0 // OPT-SPIRV-NEXT: [[DOTFCA_0_EXTRACT:%.*]] = extractvalue [2 x ptr addrspace(4)] [[TMP0]], 0 @@ -631,13 +625,11 @@ __global__ void kernel6(struct T t) { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel7Pi( -// OPT-SPIRV-SAME: ptr addrspace(1) noalias noundef [[X_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: ptr addrspace(1) noalias noundef captures(none) [[X_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] -// OPT-SPIRV-NEXT: [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[X_COERCE]] to i64 -// OPT-SPIRV-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr addrspace(4) -// OPT-SPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 -// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 -// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(4) [[TMP1]], align 4 +// OPT-SPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) [[X_COERCE]], align 4 +// OPT-SPIRV-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 +// OPT-SPIRV-NEXT: store i32 [[INC]], ptr addrspace(1) [[X_COERCE]], align 4 // OPT-SPIRV-NEXT: ret void // // HOST-LABEL: define dso_local void @_Z22__device_stub__kernel7Pi( @@ -700,7 +692,7 @@ struct SS { // OPT-NEXT: ret void // // OPT-SPIRV-LABEL: define spir_kernel void @_Z7kernel82SS( -// OPT-SPIRV-SAME: [[STRUCT_SS:%.*]] [[A_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR0]] !max_work_group_size [[META5]] { +// OPT-SPIRV-SAME: [[STRUCT_SS:%.*]] [[A_COERCE:%.*]]) local_unnamed_addr addrspace(4) #[[ATTR2]] !max_work_group_size [[META5]] { // OPT-SPIRV-NEXT: [[ENTRY:.*:]] // OPT-SPIRV-NEXT: [[TMP0:%.*]] = extractvalue [[STRUCT_SS]] [[A_COERCE]], 0 // OPT-SPIRV-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(4) [[TMP0]], align 4 diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip index d448ab134ca4d..3e49deba368cf 100644 --- a/clang/test/Headers/__clang_hip_math.hip +++ b/clang/test/Headers/__clang_hip_math.hip @@ -1075,7 +1075,6 @@ extern "C" __device__ double test_cospi(double x) { return cospi(x); } -// // DEFAULT-LABEL: @test_cyl_bessel_i0f( // DEFAULT-NEXT: entry: // DEFAULT-NEXT: [[CALL_I:%.*]] = tail call contract noundef float @__ocml_i0_f32(float noundef [[X:%.*]]) #[[ATTR14]] @@ -1748,7 +1747,6 @@ extern "C" __device__ double test_fmax(double x, double y) { return fmax(x, y); } -// // DEFAULT-LABEL: @test_fminf( // DEFAULT-NEXT: entry: // DEFAULT-NEXT: [[TMP0:%.*]] = tail call contract noundef float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) @@ -3086,10 +3084,9 @@ extern "C" __device__ long int test_lround(double x) { // AMDGCNSPIRV-LABEL: @test_modff( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15:[0-9]+]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) float @__ocml_modf_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17:![0-9]+]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr [[__TMP_I]], align 4, !tbaa [[TBAA17:![0-9]+]] // AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret float [[CALL_I]] @@ -3131,10 +3128,9 @@ extern "C" __device__ float test_modff(float x, float* y) { // AMDGCNSPIRV-LABEL: @test_modf( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) double @__ocml_modf_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19:![0-9]+]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr [[__TMP_I]], align 8, !tbaa [[TBAA19:![0-9]+]] // AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret double [[CALL_I]] @@ -4471,10 +4467,9 @@ extern "C" __device__ double test_remainder(double x, double y) { // AMDGCNSPIRV-LABEL: @test_remquof( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) float @__ocml_remquo_f32(float noundef [[X:%.*]], float noundef [[Y:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA13]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr [[__TMP_I]], align 4, !tbaa [[TBAA13]] // AMDGCNSPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA13]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret float [[CALL_I]] @@ -4516,10 +4511,9 @@ extern "C" __device__ float test_remquof(float x, float y, int* z) { // AMDGCNSPIRV-LABEL: @test_remquo( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca i32, align 4 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func noundef addrspace(4) double @__ocml_remquo_f64(double noundef [[X:%.*]], double noundef [[Y:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA13]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr [[__TMP_I]], align 4, !tbaa [[TBAA13]] // AMDGCNSPIRV-NEXT: store i32 [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA13]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret double [[CALL_I]] @@ -5230,11 +5224,10 @@ extern "C" __device__ BOOL_TYPE test___signbit(double x) { // AMDGCNSPIRV-LABEL: @test_sincosf( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) float @__ocml_sincos_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] // AMDGCNSPIRV-NEXT: store float [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr [[__TMP_I]], align 4, !tbaa [[TBAA17]] // AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA17]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret void @@ -5279,11 +5272,10 @@ extern "C" __device__ void test_sincosf(float x, float *y, float *z) { // AMDGCNSPIRV-LABEL: @test_sincos( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) double @__ocml_sincos_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] // AMDGCNSPIRV-NEXT: store double [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr [[__TMP_I]], align 8, !tbaa [[TBAA19]] // AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 8, !tbaa [[TBAA19]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret void @@ -5328,11 +5320,10 @@ extern "C" __device__ void test_sincos(double x, double *y, double *z) { // AMDGCNSPIRV-LABEL: @test_sincospif( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca float, align 4 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) float @__ocml_sincospi_f32(float noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] // AMDGCNSPIRV-NEXT: store float [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 4, !tbaa [[TBAA17]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr addrspace(4) [[__TMP_ASCAST_I]], align 4, !tbaa [[TBAA17]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load float, ptr [[__TMP_I]], align 4, !tbaa [[TBAA17]] // AMDGCNSPIRV-NEXT: store float [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 4, !tbaa [[TBAA17]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret void @@ -5377,11 +5368,10 @@ extern "C" __device__ void test_sincospif(float x, float *y, float *z) { // AMDGCNSPIRV-LABEL: @test_sincospi( // AMDGCNSPIRV-NEXT: entry: // AMDGCNSPIRV-NEXT: [[__TMP_I:%.*]] = alloca double, align 8 -// AMDGCNSPIRV-NEXT: [[__TMP_ASCAST_I:%.*]] = addrspacecast ptr [[__TMP_I]] to ptr addrspace(4) // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: [[CALL_I:%.*]] = call contract spir_func addrspace(4) double @__ocml_sincospi_f64(double noundef [[X:%.*]], ptr noundef nonnull [[__TMP_I]]) #[[ATTR14]] // AMDGCNSPIRV-NEXT: store double [[CALL_I]], ptr addrspace(4) [[Y:%.*]], align 8, !tbaa [[TBAA19]] -// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr addrspace(4) [[__TMP_ASCAST_I]], align 8, !tbaa [[TBAA19]] +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load double, ptr [[__TMP_I]], align 8, !tbaa [[TBAA19]] // AMDGCNSPIRV-NEXT: store double [[TMP0]], ptr addrspace(4) [[Z:%.*]], align 8, !tbaa [[TBAA19]] // AMDGCNSPIRV-NEXT: call addrspace(4) void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[__TMP_I]]) #[[ATTR15]] // AMDGCNSPIRV-NEXT: ret void diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt index efdd8c8d24fbd..7450a40df8d92 100644 --- a/llvm/lib/Target/SPIRV/CMakeLists.txt +++ b/llvm/lib/Target/SPIRV/CMakeLists.txt @@ -54,6 +54,8 @@ add_llvm_target(SPIRVCodeGen Core Demangle GlobalISel + Passes + Scalar SPIRVAnalysis MC SPIRVDesc diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp index 098c7a6fba50e..b440870f354dc 100644 --- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp @@ -26,11 +26,16 @@ #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" #include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/IntrinsicsAMDGPU.h" +#include "llvm/IR/PatternMatch.h" #include "llvm/InitializePasses.h" #include "llvm/MC/TargetRegistry.h" #include "llvm/Pass.h" +#include "llvm/Passes/OptimizationLevel.h" #include "llvm/Passes/PassBuilder.h" #include "llvm/Target/TargetOptions.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/InferAddressSpaces.h" #include "llvm/Transforms/Scalar/Reg2Mem.h" #include "llvm/Transforms/Utils.h" #include @@ -96,9 +101,64 @@ SPIRVTargetMachine::SPIRVTargetMachine(const Target &T, const Triple &TT, setRequiresStructuredCFG(false); } +enum AddressSpace { + Function = storageClassToAddressSpace(SPIRV::StorageClass::Function), + CrossWorkgroup = + storageClassToAddressSpace(SPIRV::StorageClass::CrossWorkgroup), + UniformConstant = + storageClassToAddressSpace(SPIRV::StorageClass::UniformConstant), + Workgroup = storageClassToAddressSpace(SPIRV::StorageClass::Workgroup), + Generic = storageClassToAddressSpace(SPIRV::StorageClass::Generic), + Invalid = UINT32_MAX +}; + +unsigned SPIRVTargetMachine::getAssumedAddrSpace(const Value *V) const { + // TODO: we only enable this for AMDGCN flavoured SPIR-V, where we know it to + // be correct; this might be relaxed in the future. + if (getTargetTriple().getVendor() != Triple::VendorType::AMD) + return Invalid; + + const auto *LD = dyn_cast(V); + if (!LD) + return Invalid; + + // It must be a load from a pointer to Generic. + assert(V->getType()->isPointerTy() && + V->getType()->getPointerAddressSpace() == AddressSpace::Generic); + + const auto *Ptr = LD->getPointerOperand(); + if (Ptr->getType()->getPointerAddressSpace() != AddressSpace::UniformConstant) + return Invalid; + // For a load from a pointer to UniformConstant, we can infer CrossWorkgroup + // storage, as this could only have been legally initialised with a + // CrossWorkgroup (aka device) constant pointer. + return AddressSpace::CrossWorkgroup; +} + +bool SPIRVTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, + unsigned DestAS) const { + if (getTargetTriple().getVendor() != Triple::VendorType::AMD) + return false; + if (SrcAS != AddressSpace::Generic && SrcAS != AddressSpace::CrossWorkgroup) + return false; + return DestAS == AddressSpace::Generic || + DestAS == AddressSpace::CrossWorkgroup; +} + void SPIRVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { -#define GET_PASS_REGISTRY "SPIRVPassRegistry.def" -#include "llvm/Passes/TargetPassRegistry.inc" + PB.registerCGSCCOptimizerLateEPCallback( + [](CGSCCPassManager &PM, OptimizationLevel Level) { + if (Level == OptimizationLevel::O0) + return; + + FunctionPassManager FPM; + + // Add infer address spaces pass to the opt pipeline after inlining + // but before SROA to increase SROA opportunities. + FPM.addPass(InferAddressSpacesPass(AddressSpace::Generic)); + + PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM))); + }); } namespace { @@ -202,6 +262,9 @@ void SPIRVPassConfig::addIRPasses() { addPass(createPromoteMemoryToRegisterPass()); } + if (TM.getOptLevel() > CodeGenOptLevel::None) + addPass(createInferAddressSpacesPass(AddressSpace::Generic)); + addPass(createSPIRVRegularizerPass()); addPass(createSPIRVPrepareFunctionsPass(TM)); addPass(createSPIRVStripConvergenceIntrinsicsPass()); diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.h b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.h index 9c59d021dfc1b..6754af67f51bf 100644 --- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.h +++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.h @@ -44,6 +44,9 @@ class SPIRVTargetMachine : public CodeGenTargetMachineImpl { return TLOF.get(); } + unsigned getAssumedAddrSpace(const Value *V) const override; + bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DstAS) const override; + void registerPassBuilderCallbacks(PassBuilder &PB) override; }; } // namespace llvm diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h index 24047f31fab29..295c0ceeade83 100644 --- a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h +++ b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h @@ -39,6 +39,10 @@ class SPIRVTTIImpl : public BasicTTIImplBase { : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {} + unsigned getFlatAddressSpace() const { + return storageClassToAddressSpace(SPIRV::StorageClass::Generic); + } + TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) { // SPIR-V natively supports OpBitcount, per 3.53.14 in the spec, as such it // is reasonable to assume the Op is fast / preferable to the expanded loop. diff --git a/llvm/test/Transforms/InferAddressSpaces/SPIRV/assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/SPIRV/assumed-addrspace.ll new file mode 100644 index 0000000000000..e9a4eb5cc61ce --- /dev/null +++ b/llvm/test/Transforms/InferAddressSpaces/SPIRV/assumed-addrspace.ll @@ -0,0 +1,30 @@ +; RUN: opt -S -mtriple=spirv64-amd-amdhsa -passes=infer-address-spaces -o - %s | FileCheck %s + +@c0 = addrspace(2) global ptr undef + +; CHECK-LABEL: @generic_ptr_from_constant +; CHECK: addrspacecast ptr addrspace(4) %p to ptr addrspace(1) +; CHECK-NEXT: load float, ptr addrspace(1) +define spir_func float @generic_ptr_from_constant() { + %p = load ptr addrspace(4), ptr addrspace(2) @c0 + %v = load float, ptr addrspace(4) %p + ret float %v +} + +%struct.S = type { ptr addrspace(4), ptr addrspace(4) } + +; CHECK-LABEL: @generic_ptr_from_aggregate_argument +; CHECK: addrspacecast ptr addrspace(4) %p0 to ptr addrspace(1) +; CHECK: addrspacecast ptr addrspace(4) %p1 to ptr addrspace(1) +; CHECK: load i32, ptr addrspace(1) +; CHECK: store float %v1, ptr addrspace(1) +; CHECK: ret +define spir_kernel void @generic_ptr_from_aggregate_argument(ptr addrspace(2) byval(%struct.S) align 8 %0) { + %p0 = load ptr addrspace(4), ptr addrspace(2) %0 + %f1 = getelementptr inbounds %struct.S, ptr addrspace(2) %0, i64 0, i32 1 + %p1 = load ptr addrspace(4), ptr addrspace(2) %f1 + %v0 = load i32, ptr addrspace(4) %p0 + %v1 = sitofp i32 %v0 to float + store float %v1, ptr addrspace(4) %p1 + ret void +} diff --git a/llvm/test/Transforms/InferAddressSpaces/SPIRV/basic.ll b/llvm/test/Transforms/InferAddressSpaces/SPIRV/basic.ll new file mode 100644 index 0000000000000..e2652623d02fe --- /dev/null +++ b/llvm/test/Transforms/InferAddressSpaces/SPIRV/basic.ll @@ -0,0 +1,463 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -mtriple=spirv32-- -passes=infer-address-spaces %s | FileCheck --check-prefix=SPV32 %s +; RUN: opt -S -mtriple=spirv64-- -passes=infer-address-spaces %s | FileCheck --check-prefix=SPV64 %s + +; Trivial optimization of generic addressing + +define float @load_global_from_flat(ptr addrspace(4) %generic_scalar) #0 { +; CHECK-LABEL: define float @load_global_from_flat( +; CHECK-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(1) +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(1) [[TMP0]], align 4 +; CHECK-NEXT: ret float [[TMP1]] +; +; SPV32-LABEL: define float @load_global_from_flat( +; SPV32-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0:[0-9]+]] { +; SPV32-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(1) +; SPV32-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(1) [[TMP0]], align 4 +; SPV32-NEXT: ret float [[TMP1]] +; +; SPV64-LABEL: define float @load_global_from_flat( +; SPV64-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0:[0-9]+]] { +; SPV64-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(1) +; SPV64-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(1) [[TMP0]], align 4 +; SPV64-NEXT: ret float [[TMP1]] +; + %tmp0 = addrspacecast ptr addrspace(4) %generic_scalar to ptr addrspace(1) + %tmp1 = load float, ptr addrspace(1) %tmp0 + ret float %tmp1 +} + +define float @load_group_from_flat(ptr addrspace(4) %generic_scalar) #0 { +; CHECK-LABEL: define float @load_group_from_flat( +; CHECK-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(3) +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(3) [[TMP0]], align 4 +; CHECK-NEXT: ret float [[TMP1]] +; +; SPV32-LABEL: define float @load_group_from_flat( +; SPV32-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(3) +; SPV32-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(3) [[TMP0]], align 4 +; SPV32-NEXT: ret float [[TMP1]] +; +; SPV64-LABEL: define float @load_group_from_flat( +; SPV64-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(3) +; SPV64-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(3) [[TMP0]], align 4 +; SPV64-NEXT: ret float [[TMP1]] +; + %tmp0 = addrspacecast ptr addrspace(4) %generic_scalar to ptr addrspace(3) + %tmp1 = load float, ptr addrspace(3) %tmp0 + ret float %tmp1 +} + +define float @load_private_from_flat(ptr addrspace(4) %generic_scalar) #0 { +; CHECK-LABEL: define float @load_private_from_flat( +; CHECK-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[TMP0]], align 4 +; CHECK-NEXT: ret float [[TMP1]] +; +; SPV32-LABEL: define float @load_private_from_flat( +; SPV32-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr +; SPV32-NEXT: [[TMP1:%.*]] = load float, ptr [[TMP0]], align 4 +; SPV32-NEXT: ret float [[TMP1]] +; +; SPV64-LABEL: define float @load_private_from_flat( +; SPV64-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr +; SPV64-NEXT: [[TMP1:%.*]] = load float, ptr [[TMP0]], align 4 +; SPV64-NEXT: ret float [[TMP1]] +; + %tmp0 = addrspacecast ptr addrspace(4) %generic_scalar to ptr + %tmp1 = load float, ptr %tmp0 + ret float %tmp1 +} + +define spir_kernel void @store_global_from_flat(ptr addrspace(4) %generic_scalar) #0 { +; CHECK-LABEL: define spir_kernel void @store_global_from_flat( +; CHECK-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(1) +; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(1) [[TMP0]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @store_global_from_flat( +; SPV32-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(1) +; SPV32-NEXT: store float 0.000000e+00, ptr addrspace(1) [[TMP0]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @store_global_from_flat( +; SPV64-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(1) +; SPV64-NEXT: store float 0.000000e+00, ptr addrspace(1) [[TMP0]], align 4 +; SPV64-NEXT: ret void +; + %tmp0 = addrspacecast ptr addrspace(4) %generic_scalar to ptr addrspace(1) + store float 0.0, ptr addrspace(1) %tmp0 + ret void +} + +define spir_kernel void @store_group_from_flat(ptr addrspace(4) %generic_scalar) #0 { +; CHECK-LABEL: define spir_kernel void @store_group_from_flat( +; CHECK-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(3) +; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(3) [[TMP0]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @store_group_from_flat( +; SPV32-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(3) +; SPV32-NEXT: store float 0.000000e+00, ptr addrspace(3) [[TMP0]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @store_group_from_flat( +; SPV64-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr addrspace(3) +; SPV64-NEXT: store float 0.000000e+00, ptr addrspace(3) [[TMP0]], align 4 +; SPV64-NEXT: ret void +; + %tmp0 = addrspacecast ptr addrspace(4) %generic_scalar to ptr addrspace(3) + store float 0.0, ptr addrspace(3) %tmp0 + ret void +} + +define spir_kernel void @store_private_from_flat(ptr addrspace(4) %generic_scalar) #0 { +; CHECK-LABEL: define spir_kernel void @store_private_from_flat( +; CHECK-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr +; CHECK-NEXT: store float 0.000000e+00, ptr [[TMP0]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @store_private_from_flat( +; SPV32-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr +; SPV32-NEXT: store float 0.000000e+00, ptr [[TMP0]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @store_private_from_flat( +; SPV64-SAME: ptr addrspace(4) [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(4) [[GENERIC_SCALAR]] to ptr +; SPV64-NEXT: store float 0.000000e+00, ptr [[TMP0]], align 4 +; SPV64-NEXT: ret void +; + %tmp0 = addrspacecast ptr addrspace(4) %generic_scalar to ptr + store float 0.0, ptr %tmp0 + ret void +} + +define spir_kernel void @load_store_global(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 { +; CHECK-LABEL: define spir_kernel void @load_store_global( +; CHECK-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(1) [[INPUT]], align 4 +; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(1) [[OUTPUT]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @load_store_global( +; SPV32-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(1) [[INPUT]], align 4 +; SPV32-NEXT: store i32 [[VAL]], ptr addrspace(1) [[OUTPUT]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @load_store_global( +; SPV64-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(1) [[INPUT]], align 4 +; SPV64-NEXT: store i32 [[VAL]], ptr addrspace(1) [[OUTPUT]], align 4 +; SPV64-NEXT: ret void +; + %tmp0 = addrspacecast ptr addrspace(1) %input to ptr addrspace(4) + %tmp1 = addrspacecast ptr addrspace(1) %output to ptr addrspace(4) + %val = load i32, ptr addrspace(4) %tmp0, align 4 + store i32 %val, ptr addrspace(4) %tmp1, align 4 + ret void +} + +define spir_kernel void @load_store_group(ptr addrspace(3) nocapture %input, ptr addrspace(3) nocapture %output) #0 { +; CHECK-LABEL: define spir_kernel void @load_store_group( +; CHECK-SAME: ptr addrspace(3) captures(none) [[INPUT:%.*]], ptr addrspace(3) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(3) [[INPUT]], align 4 +; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(3) [[OUTPUT]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @load_store_group( +; SPV32-SAME: ptr addrspace(3) captures(none) [[INPUT:%.*]], ptr addrspace(3) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(3) [[INPUT]], align 4 +; SPV32-NEXT: store i32 [[VAL]], ptr addrspace(3) [[OUTPUT]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @load_store_group( +; SPV64-SAME: ptr addrspace(3) captures(none) [[INPUT:%.*]], ptr addrspace(3) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(3) [[INPUT]], align 4 +; SPV64-NEXT: store i32 [[VAL]], ptr addrspace(3) [[OUTPUT]], align 4 +; SPV64-NEXT: ret void +; + %tmp0 = addrspacecast ptr addrspace(3) %input to ptr addrspace(4) + %tmp1 = addrspacecast ptr addrspace(3) %output to ptr addrspace(4) + %val = load i32, ptr addrspace(4) %tmp0, align 4 + store i32 %val, ptr addrspace(4) %tmp1, align 4 + ret void +} + +define spir_kernel void @load_store_private(ptr nocapture %input, ptr nocapture %output) #0 { +; CHECK-LABEL: define spir_kernel void @load_store_private( +; CHECK-SAME: ptr captures(none) [[INPUT:%.*]], ptr captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[INPUT]], align 4 +; CHECK-NEXT: store i32 [[VAL]], ptr [[OUTPUT]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @load_store_private( +; SPV32-SAME: ptr captures(none) [[INPUT:%.*]], ptr captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[VAL:%.*]] = load i32, ptr [[INPUT]], align 4 +; SPV32-NEXT: store i32 [[VAL]], ptr [[OUTPUT]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @load_store_private( +; SPV64-SAME: ptr captures(none) [[INPUT:%.*]], ptr captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[VAL:%.*]] = load i32, ptr [[INPUT]], align 4 +; SPV64-NEXT: store i32 [[VAL]], ptr [[OUTPUT]], align 4 +; SPV64-NEXT: ret void +; + %tmp0 = addrspacecast ptr %input to ptr addrspace(4) + %tmp1 = addrspacecast ptr %output to ptr addrspace(4) + %val = load i32, ptr addrspace(4) %tmp0, align 4 + store i32 %val, ptr addrspace(4) %tmp1, align 4 + ret void +} + +define spir_kernel void @load_store_flat(ptr addrspace(4) nocapture %input, ptr addrspace(4) nocapture %output) #0 { +; CHECK-LABEL: define spir_kernel void @load_store_flat( +; CHECK-SAME: ptr addrspace(4) captures(none) [[INPUT:%.*]], ptr addrspace(4) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(4) [[INPUT]], align 4 +; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(4) [[OUTPUT]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @load_store_flat( +; SPV32-SAME: ptr addrspace(4) captures(none) [[INPUT:%.*]], ptr addrspace(4) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(4) [[INPUT]], align 4 +; SPV32-NEXT: store i32 [[VAL]], ptr addrspace(4) [[OUTPUT]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @load_store_flat( +; SPV64-SAME: ptr addrspace(4) captures(none) [[INPUT:%.*]], ptr addrspace(4) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(4) [[INPUT]], align 4 +; SPV64-NEXT: store i32 [[VAL]], ptr addrspace(4) [[OUTPUT]], align 4 +; SPV64-NEXT: ret void +; + %val = load i32, ptr addrspace(4) %input, align 4 + store i32 %val, ptr addrspace(4) %output, align 4 + ret void +} + +define spir_kernel void @store_addrspacecast_ptr_value(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 { +; CHECK-LABEL: define spir_kernel void @store_addrspacecast_ptr_value( +; CHECK-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[CAST:%.*]] = addrspacecast ptr addrspace(1) [[INPUT]] to ptr addrspace(4) +; CHECK-NEXT: store ptr addrspace(4) [[CAST]], ptr addrspace(1) [[OUTPUT]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define spir_kernel void @store_addrspacecast_ptr_value( +; SPV32-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[CAST:%.*]] = addrspacecast ptr addrspace(1) [[INPUT]] to ptr addrspace(4) +; SPV32-NEXT: store ptr addrspace(4) [[CAST]], ptr addrspace(1) [[OUTPUT]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define spir_kernel void @store_addrspacecast_ptr_value( +; SPV64-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[CAST:%.*]] = addrspacecast ptr addrspace(1) [[INPUT]] to ptr addrspace(4) +; SPV64-NEXT: store ptr addrspace(4) [[CAST]], ptr addrspace(1) [[OUTPUT]], align 4 +; SPV64-NEXT: ret void +; + %cast = addrspacecast ptr addrspace(1) %input to ptr addrspace(4) + store ptr addrspace(4) %cast, ptr addrspace(1) %output, align 4 + ret void +} + +define i32 @atomicrmw_add_global_to_flat(ptr addrspace(1) %global.ptr, i32 %y) #0 { +; CHECK-LABEL: define i32 @atomicrmw_add_global_to_flat( +; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4 +; CHECK-NEXT: ret i32 [[RET]] +; +; SPV32-LABEL: define i32 @atomicrmw_add_global_to_flat( +; SPV32-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4 +; SPV32-NEXT: ret i32 [[RET]] +; +; SPV64-LABEL: define i32 @atomicrmw_add_global_to_flat( +; SPV64-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4 +; SPV64-NEXT: ret i32 [[RET]] +; + %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr addrspace(4) + %ret = atomicrmw add ptr addrspace(4) %cast, i32 %y seq_cst + ret i32 %ret +} + +define i32 @atomicrmw_add_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 { +; CHECK-LABEL: define i32 @atomicrmw_add_group_to_flat( +; CHECK-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(3) [[GROUP_PTR]], i32 [[Y]] seq_cst, align 4 +; CHECK-NEXT: ret i32 [[RET]] +; +; SPV32-LABEL: define i32 @atomicrmw_add_group_to_flat( +; SPV32-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(3) [[GROUP_PTR]], i32 [[Y]] seq_cst, align 4 +; SPV32-NEXT: ret i32 [[RET]] +; +; SPV64-LABEL: define i32 @atomicrmw_add_group_to_flat( +; SPV64-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(3) [[GROUP_PTR]], i32 [[Y]] seq_cst, align 4 +; SPV64-NEXT: ret i32 [[RET]] +; + %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr addrspace(4) + %ret = atomicrmw add ptr addrspace(4) %cast, i32 %y seq_cst + ret i32 %ret +} + +define { i32, i1 } @cmpxchg_global_to_flat(ptr addrspace(1) %global.ptr, i32 %cmp, i32 %val) #0 { +; CHECK-LABEL: define { i32, i1 } @cmpxchg_global_to_flat( +; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(1) [[GLOBAL_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4 +; CHECK-NEXT: ret { i32, i1 } [[RET]] +; +; SPV32-LABEL: define { i32, i1 } @cmpxchg_global_to_flat( +; SPV32-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(1) [[GLOBAL_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4 +; SPV32-NEXT: ret { i32, i1 } [[RET]] +; +; SPV64-LABEL: define { i32, i1 } @cmpxchg_global_to_flat( +; SPV64-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(1) [[GLOBAL_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4 +; SPV64-NEXT: ret { i32, i1 } [[RET]] +; + %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr addrspace(4) + %ret = cmpxchg ptr addrspace(4) %cast, i32 %cmp, i32 %val seq_cst monotonic + ret { i32, i1 } %ret +} + +define { i32, i1 } @cmpxchg_group_to_flat(ptr addrspace(3) %group.ptr, i32 %cmp, i32 %val) #0 { +; CHECK-LABEL: define { i32, i1 } @cmpxchg_group_to_flat( +; CHECK-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[GROUP_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4 +; CHECK-NEXT: ret { i32, i1 } [[RET]] +; +; SPV32-LABEL: define { i32, i1 } @cmpxchg_group_to_flat( +; SPV32-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[GROUP_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4 +; SPV32-NEXT: ret { i32, i1 } [[RET]] +; +; SPV64-LABEL: define { i32, i1 } @cmpxchg_group_to_flat( +; SPV64-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[GROUP_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4 +; SPV64-NEXT: ret { i32, i1 } [[RET]] +; + %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr addrspace(4) + %ret = cmpxchg ptr addrspace(4) %cast, i32 %cmp, i32 %val seq_cst monotonic + ret { i32, i1 } %ret +} + +define { ptr addrspace(4), i1 } @cmpxchg_group_to_flat_wrong_operand(ptr addrspace(3) %cas.ptr, ptr addrspace(3) %cmp.ptr, ptr addrspace(4) %val) #0 { +; CHECK-LABEL: define { ptr addrspace(4), i1 } @cmpxchg_group_to_flat_wrong_operand( +; CHECK-SAME: ptr addrspace(3) [[CAS_PTR:%.*]], ptr addrspace(3) [[CMP_PTR:%.*]], ptr addrspace(4) [[VAL:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[CAST_CMP:%.*]] = addrspacecast ptr addrspace(3) [[CMP_PTR]] to ptr addrspace(4) +; CHECK-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[CAS_PTR]], ptr addrspace(4) [[CAST_CMP]], ptr addrspace(4) [[VAL]] seq_cst monotonic, align 8 +; CHECK-NEXT: ret { ptr addrspace(4), i1 } [[RET]] +; +; SPV32-LABEL: define { ptr addrspace(4), i1 } @cmpxchg_group_to_flat_wrong_operand( +; SPV32-SAME: ptr addrspace(3) [[CAS_PTR:%.*]], ptr addrspace(3) [[CMP_PTR:%.*]], ptr addrspace(4) [[VAL:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[CAST_CMP:%.*]] = addrspacecast ptr addrspace(3) [[CMP_PTR]] to ptr addrspace(4) +; SPV32-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[CAS_PTR]], ptr addrspace(4) [[CAST_CMP]], ptr addrspace(4) [[VAL]] seq_cst monotonic, align 4 +; SPV32-NEXT: ret { ptr addrspace(4), i1 } [[RET]] +; +; SPV64-LABEL: define { ptr addrspace(4), i1 } @cmpxchg_group_to_flat_wrong_operand( +; SPV64-SAME: ptr addrspace(3) [[CAS_PTR:%.*]], ptr addrspace(3) [[CMP_PTR:%.*]], ptr addrspace(4) [[VAL:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[CAST_CMP:%.*]] = addrspacecast ptr addrspace(3) [[CMP_PTR]] to ptr addrspace(4) +; SPV64-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[CAS_PTR]], ptr addrspace(4) [[CAST_CMP]], ptr addrspace(4) [[VAL]] seq_cst monotonic, align 8 +; SPV64-NEXT: ret { ptr addrspace(4), i1 } [[RET]] +; + %cast.cmp = addrspacecast ptr addrspace(3) %cmp.ptr to ptr addrspace(4) + %ret = cmpxchg ptr addrspace(3) %cas.ptr, ptr addrspace(4) %cast.cmp, ptr addrspace(4) %val seq_cst monotonic + ret { ptr addrspace(4), i1 } %ret +} + +define void @local_nullptr(ptr addrspace(1) nocapture %results, ptr addrspace(3) %a) { +; CHECK-LABEL: define void @local_nullptr( +; CHECK-SAME: ptr addrspace(1) captures(none) [[RESULTS:%.*]], ptr addrspace(3) [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne ptr addrspace(3) [[A]], addrspacecast (ptr null to ptr addrspace(3)) +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32 +; CHECK-NEXT: store i32 [[CONV]], ptr addrspace(1) [[RESULTS]], align 4 +; CHECK-NEXT: ret void +; +; SPV32-LABEL: define void @local_nullptr( +; SPV32-SAME: ptr addrspace(1) captures(none) [[RESULTS:%.*]], ptr addrspace(3) [[A:%.*]]) { +; SPV32-NEXT: [[ENTRY:.*:]] +; SPV32-NEXT: [[TOBOOL:%.*]] = icmp ne ptr addrspace(3) [[A]], addrspacecast (ptr null to ptr addrspace(3)) +; SPV32-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32 +; SPV32-NEXT: store i32 [[CONV]], ptr addrspace(1) [[RESULTS]], align 4 +; SPV32-NEXT: ret void +; +; SPV64-LABEL: define void @local_nullptr( +; SPV64-SAME: ptr addrspace(1) captures(none) [[RESULTS:%.*]], ptr addrspace(3) [[A:%.*]]) { +; SPV64-NEXT: [[ENTRY:.*:]] +; SPV64-NEXT: [[TOBOOL:%.*]] = icmp ne ptr addrspace(3) [[A]], addrspacecast (ptr null to ptr addrspace(3)) +; SPV64-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32 +; SPV64-NEXT: store i32 [[CONV]], ptr addrspace(1) [[RESULTS]], align 4 +; SPV64-NEXT: ret void +; +entry: + %tobool = icmp ne ptr addrspace(3) %a, addrspacecast (ptr null to ptr addrspace(3)) + %conv = zext i1 %tobool to i32 + store i32 %conv, ptr addrspace(1) %results, align 4 + ret void +} + +define i32 @atomicrmw_add_global_to_flat_preserve_amdgpu_md(ptr addrspace(1) %global.ptr, i32 %y) #0 { +; CHECK-LABEL: define i32 @atomicrmw_add_global_to_flat_preserve_amdgpu_md( +; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0:![0-9]+]], !amdgpu.no.remote.memory [[META0]] +; CHECK-NEXT: ret i32 [[RET]] +; +; SPV32-LABEL: define i32 @atomicrmw_add_global_to_flat_preserve_amdgpu_md( +; SPV32-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; SPV32-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0:![0-9]+]], !amdgpu.no.remote.memory [[META0]] +; SPV32-NEXT: ret i32 [[RET]] +; +; SPV64-LABEL: define i32 @atomicrmw_add_global_to_flat_preserve_amdgpu_md( +; SPV64-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { +; SPV64-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0:![0-9]+]], !amdgpu.no.remote.memory [[META0]] +; SPV64-NEXT: ret i32 [[RET]] +; + %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr addrspace(4) + %ret = atomicrmw add ptr addrspace(4) %cast, i32 %y seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0 + ret i32 %ret +} + +define ptr addrspace(4) @try_infer_getelementptr_constant_null() { +; CHECK-LABEL: define ptr addrspace(4) @try_infer_getelementptr_constant_null() { +; CHECK-NEXT: [[CE:%.*]] = getelementptr i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) null, i64 8), i64 0 +; CHECK-NEXT: ret ptr addrspace(4) [[CE]] +; +; SPV32-LABEL: define ptr addrspace(4) @try_infer_getelementptr_constant_null() { +; SPV32-NEXT: [[CE:%.*]] = getelementptr i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) null, i64 8), i64 0 +; SPV32-NEXT: ret ptr addrspace(4) [[CE]] +; +; SPV64-LABEL: define ptr addrspace(4) @try_infer_getelementptr_constant_null() { +; SPV64-NEXT: [[CE:%.*]] = getelementptr i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) null, i64 8), i64 0 +; SPV64-NEXT: ret ptr addrspace(4) [[CE]] +; + %ce = getelementptr i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) null, i64 8), i64 0 + ret ptr addrspace(4) %ce +} + +attributes #0 = { nounwind } + +!0 = !{} +; CHECK: [[META0]] = !{} +;. +; SPV32: [[META0]] = !{} +;. +; SPV64: [[META0]] = !{} +;. diff --git a/llvm/test/Transforms/InferAddressSpaces/SPIRV/infer-address-space.ll b/llvm/test/Transforms/InferAddressSpaces/SPIRV/infer-address-space.ll new file mode 100644 index 0000000000000..7de9557a9ee90 --- /dev/null +++ b/llvm/test/Transforms/InferAddressSpaces/SPIRV/infer-address-space.ll @@ -0,0 +1,211 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -mtriple=spirv64-amd-amdhsa -passes=infer-address-spaces %s | FileCheck %s +; Ports of most of test/CodeGen/NVPTX/access-non-generic.ll + +@scalar = internal addrspace(3) global float 0.0, align 4 +@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4 + +define spir_kernel void @load_store_lds_f32(i32 %i, float %v) #0 { +; CHECK-LABEL: define spir_kernel void @load_store_lds_f32( +; CHECK-SAME: i32 [[I:%.*]], float [[V:%.*]]) addrspace(4) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[TMP:%.*]] = load float, ptr addrspace(3) @scalar, align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[TMP]]) +; CHECK-NEXT: store float [[V]], ptr addrspace(3) @scalar, align 4 +; CHECK-NEXT: call addrspace(4) void @llvm.amdgcn.s.barrier() +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(3) @scalar, align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[TMP2]]) +; CHECK-NEXT: store float [[V]], ptr addrspace(3) @scalar, align 4 +; CHECK-NEXT: call addrspace(4) void @llvm.amdgcn.s.barrier() +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[TMP3]]) +; CHECK-NEXT: store float [[V]], ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4 +; CHECK-NEXT: call addrspace(4) void @llvm.amdgcn.s.barrier() +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 5 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr addrspace(3) [[TMP4]], align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[TMP5]]) +; CHECK-NEXT: store float [[V]], ptr addrspace(3) [[TMP4]], align 4 +; CHECK-NEXT: call addrspace(4) void @llvm.amdgcn.s.barrier() +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 [[I]] +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr addrspace(3) [[TMP7]], align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[TMP8]]) +; CHECK-NEXT: store float [[V]], ptr addrspace(3) [[TMP7]], align 4 +; CHECK-NEXT: call addrspace(4) void @llvm.amdgcn.s.barrier() +; CHECK-NEXT: ret void +; +bb: + %tmp = load float, ptr addrspace(4) addrspacecast (ptr addrspace(3) @scalar to ptr addrspace(4)), align 4 + call void @use(float %tmp) + store float %v, ptr addrspace(4) addrspacecast (ptr addrspace(3) @scalar to ptr addrspace(4)), align 4 + call void @llvm.amdgcn.s.barrier() + %tmp1 = addrspacecast ptr addrspace(3) @scalar to ptr addrspace(4) + %tmp2 = load float, ptr addrspace(4) %tmp1, align 4 + call void @use(float %tmp2) + store float %v, ptr addrspace(4) %tmp1, align 4 + call void @llvm.amdgcn.s.barrier() + %tmp3 = load float, ptr addrspace(4) getelementptr inbounds ([10 x float], ptr addrspace(4) addrspacecast (ptr addrspace(3) @array to ptr addrspace(4)), i32 0, i32 5), align 4 + call void @use(float %tmp3) + store float %v, ptr addrspace(4) getelementptr inbounds ([10 x float], ptr addrspace(4) addrspacecast (ptr addrspace(3) @array to ptr addrspace(4)), i32 0, i32 5), align 4 + call void @llvm.amdgcn.s.barrier() + %tmp4 = getelementptr inbounds [10 x float], ptr addrspace(4) addrspacecast (ptr addrspace(3) @array to ptr addrspace(4)), i32 0, i32 5 + %tmp5 = load float, ptr addrspace(4) %tmp4, align 4 + call void @use(float %tmp5) + store float %v, ptr addrspace(4) %tmp4, align 4 + call void @llvm.amdgcn.s.barrier() + %tmp6 = addrspacecast ptr addrspace(3) @array to ptr addrspace(4) + %tmp7 = getelementptr inbounds [10 x float], ptr addrspace(4) %tmp6, i32 0, i32 %i + %tmp8 = load float, ptr addrspace(4) %tmp7, align 4 + call void @use(float %tmp8) + store float %v, ptr addrspace(4) %tmp7, align 4 + call void @llvm.amdgcn.s.barrier() + ret void +} + +define i32 @constexpr_load_int_from_float_lds() #0 { +; CHECK-LABEL: define i32 @constexpr_load_int_from_float_lds( +; CHECK-SAME: ) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr addrspace(3) @scalar, align 4 +; CHECK-NEXT: ret i32 [[TMP]] +; +bb: + %tmp = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @scalar to ptr addrspace(4)), align 4 + ret i32 %tmp +} + +define i32 @load_int_from_global_float(ptr addrspace(1) %input, i32 %i, i32 %j) #0 { +; CHECK-LABEL: define i32 @load_int_from_global_float( +; CHECK-SAME: ptr addrspace(1) [[INPUT:%.*]], i32 [[I:%.*]], i32 [[J:%.*]]) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr addrspace(1) [[INPUT]], i32 [[I]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr addrspace(1) [[TMP1]], i32 [[J]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[TMP2]], align 4 +; CHECK-NEXT: ret i32 [[TMP4]] +; +bb: + %tmp = addrspacecast ptr addrspace(1) %input to ptr addrspace(4) + %tmp1 = getelementptr float, ptr addrspace(4) %tmp, i32 %i + %tmp2 = getelementptr float, ptr addrspace(4) %tmp1, i32 %j + %tmp4 = load i32, ptr addrspace(4) %tmp2 + ret i32 %tmp4 +} + +define spir_kernel void @nested_const_expr() #0 { +; CHECK-LABEL: define spir_kernel void @nested_const_expr( +; CHECK-SAME: ) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: store i32 1, ptr addrspace(3) getelementptr ([10 x float], ptr addrspace(3) @array, i64 0, i64 1), align 4 +; CHECK-NEXT: ret void +; + store i32 1, ptr addrspace(4) bitcast (ptr addrspace(4) getelementptr ([10 x float], ptr addrspace(4) addrspacecast (ptr addrspace(3) @array to ptr addrspace(4)), i64 0, i64 1) to ptr addrspace(4)), align 4 + + ret void +} + +define spir_kernel void @rauw(ptr addrspace(1) %input) #0 { +; CHECK-LABEL: define spir_kernel void @rauw( +; CHECK-SAME: ptr addrspace(1) [[INPUT:%.*]]) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[ADDR:%.*]] = getelementptr float, ptr addrspace(1) [[INPUT]], i64 10 +; CHECK-NEXT: [[V:%.*]] = load float, ptr addrspace(1) [[ADDR]], align 4 +; CHECK-NEXT: store float [[V]], ptr addrspace(1) [[ADDR]], align 4 +; CHECK-NEXT: ret void +; +bb: + %generic_input = addrspacecast ptr addrspace(1) %input to ptr addrspace(4) + %addr = getelementptr float, ptr addrspace(4) %generic_input, i64 10 + %v = load float, ptr addrspace(4) %addr + store float %v, ptr addrspace(4) %addr + ret void +} + +; FIXME: Should be able to eliminate the cast inside the loop +define spir_kernel void @loop() #0 { +; CHECK-LABEL: define spir_kernel void @loop( +; CHECK-SAME: ) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[END:%.*]] = getelementptr float, ptr addrspace(3) @array, i64 10 +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[I:%.*]] = phi ptr addrspace(3) [ @array, %[[ENTRY]] ], [ [[I2:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[V:%.*]] = load float, ptr addrspace(3) [[I]], align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[V]]) +; CHECK-NEXT: [[I2]] = getelementptr float, ptr addrspace(3) [[I]], i64 1 +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq ptr addrspace(3) [[I2]], [[END]] +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %p = addrspacecast ptr addrspace(3) @array to ptr addrspace(4) + %end = getelementptr float, ptr addrspace(4) %p, i64 10 + br label %loop + +loop: ; preds = %loop, %entry + %i = phi ptr addrspace(4) [ %p, %entry ], [ %i2, %loop ] + %v = load float, ptr addrspace(4) %i + call void @use(float %v) + %i2 = getelementptr float, ptr addrspace(4) %i, i64 1 + %exit_cond = icmp eq ptr addrspace(4) %i2, %end + br i1 %exit_cond, label %exit, label %loop + +exit: ; preds = %loop + ret void +} + +@generic_end = external addrspace(1) global ptr addrspace(4) + +define spir_kernel void @loop_with_generic_bound() #0 { +; CHECK-LABEL: define spir_kernel void @loop_with_generic_bound( +; CHECK-SAME: ) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[END:%.*]] = load ptr addrspace(4), ptr addrspace(1) @generic_end, align 8 +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[I:%.*]] = phi ptr addrspace(3) [ @array, %[[ENTRY]] ], [ [[I2:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[V:%.*]] = load float, ptr addrspace(3) [[I]], align 4 +; CHECK-NEXT: call addrspace(4) void @use(float [[V]]) +; CHECK-NEXT: [[I2]] = getelementptr float, ptr addrspace(3) [[I]], i64 1 +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) [[I2]] to ptr addrspace(4) +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq ptr addrspace(4) [[TMP0]], [[END]] +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %p = addrspacecast ptr addrspace(3) @array to ptr addrspace(4) + %end = load ptr addrspace(4), ptr addrspace(1) @generic_end + br label %loop + +loop: ; preds = %loop, %entry + %i = phi ptr addrspace(4) [ %p, %entry ], [ %i2, %loop ] + %v = load float, ptr addrspace(4) %i + call void @use(float %v) + %i2 = getelementptr float, ptr addrspace(4) %i, i64 1 + %exit_cond = icmp eq ptr addrspace(4) %i2, %end + br i1 %exit_cond, label %exit, label %loop + +exit: ; preds = %loop + ret void +} + +define void @select_bug() #0 { +; CHECK-LABEL: define void @select_bug( +; CHECK-SAME: ) addrspace(4) #[[ATTR0]] { +; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr addrspace(4) inttoptr (i64 4873 to ptr addrspace(4)), null +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i64 73, i64 93 +; CHECK-NEXT: [[ADD_PTR157:%.*]] = getelementptr inbounds i64, ptr addrspace(4) undef, i64 [[SEL]] +; CHECK-NEXT: [[CMP169:%.*]] = icmp uge ptr addrspace(4) undef, [[ADD_PTR157]] +; CHECK-NEXT: unreachable +; + %cmp = icmp ne ptr addrspace(4) inttoptr (i64 4873 to ptr addrspace(4)), null + %sel = select i1 %cmp, i64 73, i64 93 + %add.ptr157 = getelementptr inbounds i64, ptr addrspace(4) undef, i64 %sel + %cmp169 = icmp uge ptr addrspace(4) undef, %add.ptr157 + unreachable +} + +declare void @llvm.amdgcn.s.barrier() #1 +declare void @use(float) #0 + +attributes #0 = { nounwind } +attributes #1 = { convergent nounwind } diff --git a/llvm/test/Transforms/InferAddressSpaces/SPIRV/lit.local.cfg b/llvm/test/Transforms/InferAddressSpaces/SPIRV/lit.local.cfg new file mode 100644 index 0000000000000..78dd74cd6dc63 --- /dev/null +++ b/llvm/test/Transforms/InferAddressSpaces/SPIRV/lit.local.cfg @@ -0,0 +1,2 @@ +if not "SPIRV" in config.root.targets: + config.unsupported = True