diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index cb9c23b8e0a0d..52b2d3320c60e 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -657,6 +657,17 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, } } +// Emit a simple mangled intrinsic that has 1 argument and a return type +// matching the argument type. +static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, + unsigned IntrinsicID) { + llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); + + CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); + Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); + return CGF.Builder.CreateCall(F, Src0); +} + // Emit an intrinsic that has 2 operands of the same type as its result. // Depending on mode, this may be a constrained floating-point intrinsic. static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, @@ -3238,9 +3249,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::trunc, - Intrinsic::experimental_constrained_trunc)); + return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc)); case Builtin::BIlround: case Builtin::BIlroundf: @@ -6827,7 +6836,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl &Ops, unsigned j = 0; for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); ai != ae; ++ai, ++j) { - if (F->isConstrainedFPIntrinsic()) + if (F->isLegacyConstrainedIntrinsic()) if (ai->getType()->isMetadataTy()) continue; if (shift > 0 && shift == j) @@ -6836,7 +6845,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl &Ops, Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); } - if (F->isConstrainedFPIntrinsic()) + if (F->isLegacyConstrainedIntrinsic()) return Builder.CreateConstrainedFPCall(F, Ops, name); else return Builder.CreateCall(F, Ops, name); @@ -12989,13 +12998,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, : Intrinsic::rint; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); } - case NEON::BI__builtin_neon_vrndh_f16: { + case NEON::BI__builtin_neon_vrndh_f16: Ops.push_back(EmitScalarExpr(E->getArg(0))); - Int = Builder.getIsFPConstrained() - ? Intrinsic::experimental_constrained_trunc - : Intrinsic::trunc; - return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz"); - } + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, HalfTy), Ops, + "vrndz"); + case NEON::BI__builtin_neon_vrnd32x_f32: case NEON::BI__builtin_neon_vrnd32xq_f32: case NEON::BI__builtin_neon_vrnd32x_f64: @@ -13029,12 +13036,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z"); } case NEON::BI__builtin_neon_vrnd_v: - case NEON::BI__builtin_neon_vrndq_v: { - Int = Builder.getIsFPConstrained() - ? Intrinsic::experimental_constrained_trunc - : Intrinsic::trunc; - return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); - } + case NEON::BI__builtin_neon_vrndq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, Ty), Ops, "vrndz"); + case NEON::BI__builtin_neon_vcvt_f64_v: case NEON::BI__builtin_neon_vcvtq_f64_v: Ops[0] = Builder.CreateBitCast(Ops[0], Ty); @@ -18251,9 +18255,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, : Intrinsic::ceil; else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || BuiltinID == PPC::BI__builtin_vsx_xvrspiz) - ID = Builder.getIsFPConstrained() - ? Intrinsic::experimental_constrained_trunc - : Intrinsic::trunc; + return emitUnaryFPBuiltin(*this, E, Intrinsic::trunc); + llvm::Function *F = CGM.getIntrinsic(ID, ResultType); return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X) : Builder.CreateCall(F, X); @@ -18754,9 +18757,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, .getScalarVal(); case PPC::BI__builtin_ppc_friz: case PPC::BI__builtin_ppc_frizs: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( - *this, E, Intrinsic::trunc, - Intrinsic::experimental_constrained_trunc)) + return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc)) .getScalarVal(); case PPC::BI__builtin_ppc_fsqrt: case PPC::BI__builtin_ppc_fsqrts: @@ -20536,8 +20537,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, CI = Intrinsic::experimental_constrained_nearbyint; break; case 1: ID = Intrinsic::round; CI = Intrinsic::experimental_constrained_round; break; - case 5: ID = Intrinsic::trunc; - CI = Intrinsic::experimental_constrained_trunc; break; + case 5: ID = Intrinsic::trunc; break; case 6: ID = Intrinsic::ceil; CI = Intrinsic::experimental_constrained_ceil; break; case 7: ID = Intrinsic::floor; @@ -20546,7 +20546,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, break; } if (ID != Intrinsic::not_intrinsic) { - if (Builder.getIsFPConstrained()) { + if (Builder.getIsFPConstrained() && ID != Intrinsic::trunc) { Function *F = CGM.getIntrinsic(CI, ResultType); return Builder.CreateConstrainedFPCall(F, X); } else { diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c index 15ae7eea820e8..0405cf7f19c73 100644 --- a/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c +++ b/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c @@ -792,7 +792,7 @@ float64x1_t test_vrndx_f64(float64x1_t a) { // COMMON-LABEL: test_vrnd_f64 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // UNCONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a) -// CONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %a, metadata !"fpexcept.strict") +// CONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // COMMONIR: ret <1 x double> [[VRNDZ1_I]] float64x1_t test_vrnd_f64(float64x1_t a) { return vrnd_f64(a); diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c index 9109626cea9ca..9079a6690b9db 100644 --- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c +++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c @@ -150,7 +150,7 @@ uint64_t test_vcvth_u64_f16 (float16_t a) { // COMMON-LABEL: test_vrndh_f16 // UNCONSTRAINED: [[RND:%.*]] = call half @llvm.trunc.f16(half %a) -// CONSTRAINED: [[RND:%.*]] = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") +// CONSTRAINED: [[RND:%.*]] = call half @llvm.trunc.f16(half %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // COMMONIR: ret half [[RND]] float16_t test_vrndh_f16(float16_t a) { return vrndh_f16(a); @@ -298,3 +298,5 @@ float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) { return vfmsh_f16(a, b, c); } +// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } + diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c b/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c index 838db02415fe5..b326f131a56e5 100644 --- a/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c +++ b/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c @@ -85,13 +85,13 @@ void test_float(void) { vf = __builtin_vsx_xvrspiz(vf); // CHECK-LABEL: try-xvrspiz // CHECK-UNCONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}}) - // CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !"fpexcept.strict") + // CHECK-CONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: xvrspiz vd = __builtin_vsx_xvrdpiz(vd); // CHECK-LABEL: try-xvrdpiz // CHECK-UNCONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}}) - // CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !"fpexcept.strict") + // CHECK-CONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: xvrdpiz vf = __builtin_vsx_xvmaddasp(vf, vf, vf); @@ -156,3 +156,5 @@ void test_float(void) { // CHECK-CONSTRAINED: fneg <2 x double> [[RESULT1]] // CHECK-ASM: xvnmsubadp } + +// CHECK-CONSTRAINED: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c index 6d2845504a39f..77ede2c10eea0 100644 --- a/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c +++ b/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c @@ -45,7 +45,7 @@ void test_float(void) { vd = __builtin_s390_vfidb(vd, 4, 1); // CHECK: call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %{{.*}}) vd = __builtin_s390_vfidb(vd, 4, 5); - // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}) + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] vd = __builtin_s390_vfidb(vd, 4, 6); // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}) vd = __builtin_s390_vfidb(vd, 4, 7); @@ -53,3 +53,5 @@ void test_float(void) { vd = __builtin_s390_vfidb(vd, 4, 4); // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4) } + +// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c index 735b6a0249ab6..7488cf90a9669 100644 --- a/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c +++ b/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c @@ -60,10 +60,11 @@ void test_float(void) { vf = __builtin_s390_vfisb(vf, 4, 1); // CHECK: call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %{{.*}}, metadata !{{.*}}) vf = __builtin_s390_vfisb(vf, 4, 5); - // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] vf = __builtin_s390_vfisb(vf, 4, 6); // CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}}) vf = __builtin_s390_vfisb(vf, 4, 7); // CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}}) } +// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c index 6a1f8f0e923f6..fe964fa38aee0 100644 --- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c +++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c @@ -303,10 +303,10 @@ void test_float(void) { // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}}) // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7 vd = vec_roundz(vd); - // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5 vd = vec_trunc(vd); - // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5 vd = vec_roundc(vd); // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}}) @@ -316,3 +316,5 @@ void test_float(void) { // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0 vd = vec_round(vd); } + +// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c index 750f5011a2679..e7ea4e325862e 100644 --- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c +++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c @@ -495,16 +495,16 @@ void test_float(void) { // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7 vf = vec_roundz(vf); - // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5 vf = vec_trunc(vf); - // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5 vd = vec_roundz(vd); - // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5 vd = vec_trunc(vd); - // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5 vf = vec_roundc(vf); @@ -541,3 +541,5 @@ void test_float(void) { // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095) // CHECK-ASM: vftcidb } + +// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } \ No newline at end of file diff --git a/clang/test/CodeGen/X86/strictfp_builtins.c b/clang/test/CodeGen/X86/strictfp_builtins.c index 43e4060bef259..75ed3a2555b3d 100644 --- a/clang/test/CodeGen/X86/strictfp_builtins.c +++ b/clang/test/CodeGen/X86/strictfp_builtins.c @@ -27,7 +27,7 @@ void p(char *str, int x) { // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16 // CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR3]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR4:[0-9]+]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[TMP2]]) #[[ATTR3]] // CHECK-NEXT: ret void @@ -43,7 +43,7 @@ void test_long_double_isinf(long double ld) { // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16 // CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR3]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR4]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR3]] // CHECK-NEXT: ret void @@ -59,7 +59,7 @@ void test_long_double_isfinite(long double ld) { // CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16 // CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR3]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR4]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR3]] // CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/arm64-vrnd-constrained.c b/clang/test/CodeGen/arm64-vrnd-constrained.c index ccf729a6a25ef..e690f26b0def5 100644 --- a/clang/test/CodeGen/arm64-vrnd-constrained.c +++ b/clang/test/CodeGen/arm64-vrnd-constrained.c @@ -14,7 +14,7 @@ float64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); } // COMMON-LABEL: rnd5 // UNCONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double> -// CONSTRAINED: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> +// CONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-ASM: frintz.2d v{{[0-9]+}}, v{{[0-9]+}} float64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); } @@ -41,3 +41,5 @@ float64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); } // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> // CHECK-ASM: frintx.2d v{{[0-9]+}}, v{{[0-9]+}} +// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) } + diff --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c index 68b9e75283c54..f5136cd18e0ef 100644 --- a/clang/test/CodeGen/constrained-math-builtins.c +++ b/clang/test/CodeGen/constrained-math-builtins.c @@ -242,10 +242,10 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f); __builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f); __builtin_truncf128(f); -// CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict") -// CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict") -// CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict") -// CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict") +// CHECK: call double @llvm.trunc.f64(double %{{.*}}) #[[ATTR_CALL:[0-9]+]] [ "fpe.except"(metadata !"strict") ] +// CHECK: call float @llvm.trunc.f32(float %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ] +// CHECK: call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ] +// CHECK: call fp128 @llvm.trunc.f128(fp128 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ] }; // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata) @@ -377,10 +377,10 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f); // CHECK: declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata) // CHECK: declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata) -// CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata) -// CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata) -// CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata) -// CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) +// CHECK: declare double @llvm.trunc.f64(double) #[[ATTR_FUNC:[0-9]+]] +// CHECK: declare float @llvm.trunc.f32(float) #[[ATTR_FUNC]] +// CHECK: declare x86_fp80 @llvm.trunc.f80(x86_fp80) #[[ATTR_FUNC]] +// CHECK: declare fp128 @llvm.trunc.f128(fp128) #[[ATTR_FUNC]] #pragma STDC FP_CONTRACT ON void bar(float f) { @@ -401,3 +401,6 @@ void bar(float f) { // CHECK: fneg // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict") }; + +// CHECK: attributes #[[ATTR_FUNC]] = { {{.*}} memory(none) } +// CHECK: attributes #[[ATTR_CALL]] = { strictfp memory(inaccessiblemem: readwrite) } diff --git a/clang/test/CodeGen/strictfp_builtins.c b/clang/test/CodeGen/strictfp_builtins.c index 58815c7de4fa9..053265dcc0667 100644 --- a/clang/test/CodeGen/strictfp_builtins.c +++ b/clang/test/CodeGen/strictfp_builtins.c @@ -31,21 +31,21 @@ void p(char *str, int x) { // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8 -// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]] +// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(metadata !"strict") ] // CHECK-NEXT: br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]] // CHECK: fpclassify_end: // CHECK-NEXT: [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ] // CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]] // CHECK-NEXT: ret void // CHECK: fpclassify_not_zero: -// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR4]] +// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ] // CHECK-NEXT: br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]] // CHECK: fpclassify_not_nan: -// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5:[0-9]+]] -// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]] +// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ] // CHECK-NEXT: br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]] // CHECK: fpclassify_not_inf: -// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR4]] +// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ] // CHECK-NEXT: [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3 // CHECK-NEXT: br label [[FPCLASSIFY_END]] // @@ -60,7 +60,7 @@ void test_fpclassify(double d) { // CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2 // CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -76,7 +76,7 @@ void test_fp16_isinf(_Float16 h) { // CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4 // CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -92,7 +92,7 @@ void test_float_isinf(float f) { // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.4, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -108,7 +108,7 @@ void test_double_isinf(double d) { // CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2 // CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.5, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -124,7 +124,7 @@ void test_fp16_isfinite(_Float16 h) { // CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4 // CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.6, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -140,7 +140,7 @@ void test_float_isfinite(float f) { // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.7, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -156,8 +156,8 @@ void test_double_isfinite(double d) { // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5]] -// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]] +// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ] // CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP0]] to i64 // CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0 // CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1 @@ -176,7 +176,7 @@ void test_isinf_sign(double d) { // CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2 // CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.9, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -192,7 +192,7 @@ void test_fp16_isnan(_Float16 h) { // CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4 // CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.10, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -208,7 +208,7 @@ void test_float_isnan(float f) { // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.11, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void @@ -224,7 +224,7 @@ void test_double_isnan(double d) { // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR4]] +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR5]] // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 // CHECK-NEXT: call void @p(ptr noundef @.str.12, i32 noundef [[TMP2]]) #[[ATTR4]] // CHECK-NEXT: ret void diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl index 451d30b4d86f0..31f1aa60780b9 100644 --- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl +++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl @@ -144,7 +144,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) { // STRICTFP-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) [[BLOCK_CAPTURE_ADDR1]], align 4 // STRICTFP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[TMP0]], i32 [[TMP1]] // STRICTFP-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(1) [[ARRAYIDX]], align 4 -// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] +// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] // STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 3 // STRICTFP-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[BLOCK_CAPTURE_ADDR2]], align 4 // STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 4 @@ -173,7 +173,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) { // STRICTFP: attributes #[[ATTR2]] = { convergent noinline nounwind optnone strictfp "stack-protector-buffer-size"="8" } // STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) } // STRICTFP: attributes #[[ATTR4]] = { convergent nounwind "stack-protector-buffer-size"="8" } -// STRICTFP: attributes #[[ATTR5]] = { strictfp } +// STRICTFP: attributes #[[ATTR5]] = { strictfp memory(inaccessiblemem: readwrite) } //. // SPIR32: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4} // SPIR32: [[META1:![0-9]+]] = !{i32 2, i32 0} diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index abfd2fdfb9de7..717539b5b2383 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -3005,6 +3005,46 @@ A "convergencectrl" operand bundle is only valid on a ``convergent`` operation. When present, the operand bundle must contain exactly one value of token type. See the :doc:`ConvergentOperations` document for details. +.. _ob_fpe: + +Floating-point Environment Operand Bundles +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These operand bundles provide details on how the operation interacts with the +:ref:`floating-point environment `. There are two kinds of such +operand bundles, which characterize interaction with floating-point control +modes and status bits respectively. + +An operand bundle tagged with "fpe.control" keeps information about control +modes used by the operation. Only rounding mode is supported now. It is +represented by a metadata string value and specifies the rounding mode, which +will be used for the operation evaluation. Possible values are: + +:: + + "rtz" - toward zero + "rte" - to nearest, ties to even + "rtp" - toward positive infinity + "rtn" - toward negative infinity + "rmm" - to nearest, ties away from zero + "dyn" - rounding mode is taken from control register + +If "fpe.control" is absent, default rounding rounding to nearest, ties to even +is assumed. + +An operand bundle tagged with "fpe.except" may be associated with the operations +that may read or write floating-point exception flags. It has a single metadata +string value, which may have one of the values: + +:: + + "ignore" + "strict" + "maytrap" + +It has the same meaning as the corresponding argument in +:ref:`constrained intrinsics `. + .. _moduleasm: Module-Level Inline Assembly diff --git a/llvm/include/llvm/AsmParser/LLParser.h b/llvm/include/llvm/AsmParser/LLParser.h index 1ef8b8ffc3966..88d48f7bc5e6a 100644 --- a/llvm/include/llvm/AsmParser/LLParser.h +++ b/llvm/include/llvm/AsmParser/LLParser.h @@ -563,6 +563,11 @@ namespace llvm { bool resolveFunctionType(Type *RetType, ArrayRef ArgList, FunctionType *&FuncTy); + void updateConstrainedIntrinsic(ValID &CalleeID, + SmallVectorImpl &Args, + SmallVectorImpl &Bundles, + AttrBuilder &FnAttrs); + // Constant Parsing. bool parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy = nullptr); diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 677b59e0c8fbe..9dc831ef23273 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -721,6 +721,7 @@ END_TWO_BYTE_PACK() case ISD::STRICT_FP_TO_BF16: #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" return true; } diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 6a41094ff933b..7ccaf9558077c 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1324,6 +1324,7 @@ class TargetLoweringBase { default: llvm_unreachable("Unexpected FP pseudo-opcode"); #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; +#define LEGACY_FUNCTION DAG_INSTRUCTION #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; #include "llvm/IR/ConstrainedOps.def" diff --git a/llvm/include/llvm/IR/ConstrainedOps.def b/llvm/include/llvm/IR/ConstrainedOps.def index 30a82bf633d57..2b1bb711444a0 100644 --- a/llvm/include/llvm/IR/ConstrainedOps.def +++ b/llvm/include/llvm/IR/ConstrainedOps.def @@ -39,6 +39,11 @@ #define CMP_INSTRUCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D) #endif +// Intrinsic function that had constrained variant. +#ifndef LEGACY_FUNCTION +#define LEGACY_FUNCTION(N,A,R,I,D) +#endif + // Arguments of the entries are: // - instruction or intrinsic function name. // - Number of original instruction/intrinsic arguments. @@ -103,7 +108,7 @@ DAG_FUNCTION(sinh, 1, 1, experimental_constrained_sinh, FSINH) DAG_FUNCTION(sqrt, 1, 1, experimental_constrained_sqrt, FSQRT) DAG_FUNCTION(tan, 1, 1, experimental_constrained_tan, FTAN) DAG_FUNCTION(tanh, 1, 1, experimental_constrained_tanh, FTANH) -DAG_FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC) +LEGACY_FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC) // This is definition for fmuladd intrinsic function, that is converted into // constrained FMA or FMUL + FADD intrinsics. @@ -114,3 +119,4 @@ FUNCTION(fmuladd, 3, 1, experimental_constrained_fmuladd) #undef CMP_INSTRUCTION #undef DAG_INSTRUCTION #undef DAG_FUNCTION +#undef LEGACY_FUNCTION \ No newline at end of file diff --git a/llvm/include/llvm/IR/FPEnv.h b/llvm/include/llvm/IR/FPEnv.h index a0197377759da..58a0c1956598c 100644 --- a/llvm/include/llvm/IR/FPEnv.h +++ b/llvm/include/llvm/IR/FPEnv.h @@ -46,19 +46,23 @@ enum ExceptionBehavior : uint8_t { /// Returns a valid RoundingMode enumerator when given a string /// that is valid as input in constrained intrinsic rounding mode /// metadata. -std::optional convertStrToRoundingMode(StringRef); +std::optional convertStrToRoundingMode(StringRef, + bool InBundle = false); /// For any RoundingMode enumerator, returns a string valid as input in /// constrained intrinsic rounding mode metadata. -std::optional convertRoundingModeToStr(RoundingMode); +std::optional convertRoundingModeToStr(RoundingMode, + bool InBundle = false); /// Returns a valid ExceptionBehavior enumerator when given a string /// valid as input in constrained intrinsic exception behavior metadata. -std::optional convertStrToExceptionBehavior(StringRef); +std::optional +convertStrToExceptionBehavior(StringRef, bool InBundle = false); /// For any ExceptionBehavior enumerator, returns a string valid as /// input in constrained intrinsic exception behavior metadata. -std::optional convertExceptionBehaviorToStr(fp::ExceptionBehavior); +std::optional convertExceptionBehaviorToStr(fp::ExceptionBehavior, + bool InBundle = false); /// Returns true if the exception handling behavior and rounding mode /// match what is used in the default floating point environment. diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h index e7afcbd31420c..076a28519491f 100644 --- a/llvm/include/llvm/IR/Function.h +++ b/llvm/include/llvm/IR/Function.h @@ -263,7 +263,7 @@ class LLVM_ABI Function : public GlobalObject, public ilist_node { /// Returns true if the function is one of the "Constrained Floating-Point /// Intrinsics". Returns false if not, and returns false when /// getIntrinsicID() returns Intrinsic::not_intrinsic. - bool isConstrainedFPIntrinsic() const; + bool isLegacyConstrainedIntrinsic() const; /// Update internal caches that depend on the function name (such as the /// intrinsic ID and libcall cache). diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 23fd8350a29b3..b8c79b53d42ae 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -357,6 +357,9 @@ class IRBuilderBase { void setConstrainedFPCallAttr(CallBase *I) { I->addFnAttr(Attribute::StrictFP); + MemoryEffects ME = MemoryEffects::inaccessibleMemOnly(); + auto A = Attribute::getWithMemoryEffects(getContext(), ME); + I->addFnAttr(A); } void setDefaultOperandBundles(ArrayRef OpBundles) { @@ -975,6 +978,16 @@ class IRBuilderBase { Instruction *FMFSource = nullptr, const Twine &Name = ""); + /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types and + /// with operand bundles. + /// If \p FMFSource is provided, copy fast-math-flags from that instruction to + /// the intrinsic. + CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef Types, + ArrayRef Args, + ArrayRef OpBundles, + Instruction *FMFSource = nullptr, + const Twine &Name = ""); + /// Create a call to intrinsic \p ID with \p RetTy and \p Args. If /// \p FMFSource is provided, copy fast-math-flags from that instruction to /// the intrinsic. @@ -2439,24 +2452,13 @@ class IRBuilderBase { CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef Args = {}, const Twine &Name = "", MDNode *FPMathTag = nullptr) { - CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); - if (IsFPConstrained) - setConstrainedFPCallAttr(CI); - if (isa(CI)) - setFPAttrs(CI, FPMathTag, FMF); - return Insert(CI, Name); + return CreateCall(FTy, Callee, Args, DefaultOperandBundles, Name, + FPMathTag); } CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef Args, ArrayRef OpBundles, - const Twine &Name = "", MDNode *FPMathTag = nullptr) { - CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles); - if (IsFPConstrained) - setConstrainedFPCallAttr(CI); - if (isa(CI)) - setFPAttrs(CI, FPMathTag, FMF); - return Insert(CI, Name); - } + const Twine &Name = "", MDNode *FPMathTag = nullptr); CallInst *CreateCall(FunctionCallee Callee, ArrayRef Args = {}, const Twine &Name = "", MDNode *FPMathTag = nullptr) { @@ -2671,6 +2673,13 @@ class IRBuilderBase { CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, Value *Alignment, Value *OffsetValue = nullptr); + + void + createFPRoundingBundle(SmallVectorImpl &Bundles, + std::optional Rounding = std::nullopt); + void createFPExceptionBundle( + SmallVectorImpl &Bundles, + std::optional Except = std::nullopt); }; /// This provides a uniform API for creating instructions and inserting diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h index e6332a16df7d5..aaa07215028e1 100644 --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -25,6 +25,7 @@ #include "llvm/IR/CallingConv.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/FMF.h" +#include "llvm/IR/FPEnv.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/LLVMContext.h" @@ -1099,6 +1100,13 @@ template class OperandBundleDefT { using OperandBundleDef = OperandBundleDefT; using ConstOperandBundleDef = OperandBundleDefT; +void addFPRoundingBundle(LLVMContext &Ctx, + SmallVectorImpl &Bundles, + RoundingMode Rounding); +void addFPExceptionBundle(LLVMContext &Ctx, + SmallVectorImpl &Bundles, + fp::ExceptionBehavior Except); + //===----------------------------------------------------------------------===// // CallBase Class //===----------------------------------------------------------------------===// @@ -2131,6 +2139,15 @@ class CallBase : public Instruction { return false; } + /// Return rounding mode specified by operand bundles. + std::optional getRoundingMode() const; + + /// Return exception behavior specified by operand bundles. + std::optional getExceptionBehavior() const; + + // Does the called function access floating-point environment? + bool isConstrained() const; + /// Used to keep track of an operand bundle. See the main comment on /// OperandBundleUser above. struct BundleOpInfo { diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h index 3436216d478e3..2f382ed6b0ad4 100644 --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -128,6 +128,10 @@ class IntrinsicInst : public CallInst { /// course of IR transformations static bool mayLowerToFunctionCall(Intrinsic::ID IID); + /// Check if the specified intrinsic can read or write FP environment. + /// Constrained intrinsics are not handled in it. + static bool canAccessFPEnvironment(Intrinsic::ID IID); + /// Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const CallInst *I) { if (const Function *CF = I->getCalledFunction()) @@ -139,6 +143,21 @@ class IntrinsicInst : public CallInst { } }; +std::optional getRoundingModeArg(const CallBase &I); +std::optional getExceptionBehaviorArg(const CallBase &I); + +/// Return true if the argument specifies an intrinsic that had a constrained +/// variant (like 'trunc.f32'). +bool hadConstrainedVariant(StringRef Name); + +/// If the given string specifies some legacy constrained intrinsic (like +/// 'llvm.experimental.constrained.trunc.f32'), return corresponding intrinsic +/// id (like 'Intrinsic::trunc') and the number of FP metadata arguments. +/// +/// \param Name Intrinsic name without prefix 'llvm.experimental.constrained' +/// (like 'trunc.f32'). +std::pair getIntrinsicForConstrained(StringRef Name); + /// Check if \p ID corresponds to a lifetime intrinsic. static inline bool isLifetimeIntrinsic(Intrinsic::ID ID) { switch (ID) { @@ -723,8 +742,6 @@ class VPBinOpIntrinsic : public VPIntrinsic { class ConstrainedFPIntrinsic : public IntrinsicInst { public: unsigned getNonMetadataArgCount() const; - std::optional getRoundingMode() const; - std::optional getExceptionBehavior() const; bool isDefaultFPEnvironment() const; // Methods for support type inquiry through isa, cast, and dyn_cast: diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h index 89dfff256e0c4..c867a944ccc9b 100644 --- a/llvm/include/llvm/IR/Intrinsics.h +++ b/llvm/include/llvm/IR/Intrinsics.h @@ -125,9 +125,10 @@ namespace Intrinsic { /// Map a MS builtin name to an intrinsic ID. ID getIntrinsicForMSBuiltin(StringRef TargetPrefix, StringRef BuiltinName); - /// Returns true if the intrinsic ID is for one of the "Constrained - /// Floating-Point Intrinsics". - bool isConstrainedFPIntrinsic(ID QID); + /// Returns true if the intrinsic ID is for one of the legacy constrained + /// floating-point intrinsics, which use metadata argument to present + /// floating-point options. + bool isLegacyConstrainedIntrinsic(ID QID); /// Returns true if the intrinsic ID is for one of the "Constrained /// Floating-Point Intrinsics" that take rounding mode metadata. diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 1ca8c2565ab0b..8d192b0d5cfe0 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1352,9 +1352,6 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in def int_experimental_constrained_roundeven : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0>, llvm_metadata_ty ]>; - def int_experimental_constrained_trunc : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], - [ LLVMMatchType<0>, - llvm_metadata_ty ]>; // Constrained floating-point comparison (quiet and signaling variants). // Third operand is the predicate represented as a metadata string. diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h index 6d4a59ba6b1f6..6d4dbac0bf32e 100644 --- a/llvm/include/llvm/IR/LLVMContext.h +++ b/llvm/include/llvm/IR/LLVMContext.h @@ -96,6 +96,8 @@ class LLVMContext { OB_ptrauth = 7, // "ptrauth" OB_kcfi = 8, // "kcfi" OB_convergencectrl = 9, // "convergencectrl" + OB_fpe_control = 10, // "fpe.control" + OB_fpe_except = 11, // "fpe.except" }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 1971c28fc4c4d..6bb86048694ac 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1706,7 +1706,6 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { case Intrinsic::experimental_constrained_floor: case Intrinsic::experimental_constrained_round: case Intrinsic::experimental_constrained_roundeven: - case Intrinsic::experimental_constrained_trunc: case Intrinsic::experimental_constrained_nearbyint: case Intrinsic::experimental_constrained_rint: case Intrinsic::experimental_constrained_fcmp: @@ -2142,8 +2141,11 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, } if (auto *Op = dyn_cast(Operands[0])) { + auto EB = Call->getExceptionBehavior(); + APFloat U = Op->getValueAPF(); + if (IntrinsicID == Intrinsic::convert_to_fp16) { - APFloat Val(Op->getValueAPF()); + APFloat Val(U); bool lost = false; Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); @@ -2151,8 +2153,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); } - APFloat U = Op->getValueAPF(); - if (IntrinsicID == Intrinsic::wasm_trunc_signed || IntrinsicID == Intrinsic::wasm_trunc_unsigned) { bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed; @@ -2231,6 +2231,8 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, } if (IntrinsicID == Intrinsic::trunc) { + if (U.isSignaling() && EB && *EB != fp::ebIgnore) + return nullptr; U.roundToIntegral(APFloat::rmTowardZero); return ConstantFP::get(Ty->getContext(), U); } @@ -2277,9 +2279,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, case Intrinsic::experimental_constrained_floor: RM = APFloat::rmTowardNegative; break; - case Intrinsic::experimental_constrained_trunc: - RM = APFloat::rmTowardZero; - break; } if (RM) { auto CI = cast(Call); diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index dd72d46f5d9aa..5367e922b0082 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -6317,6 +6317,61 @@ bool isOldDbgFormatIntrinsic(StringRef Name) { FnID == Intrinsic::dbg_assign; } +void LLParser::updateConstrainedIntrinsic( + ValID &CalleeID, SmallVectorImpl &Args, + SmallVectorImpl &Bundles, AttrBuilder &FnAttrs) { + if (Args.empty()) + return; + + StringRef Name = CalleeID.StrVal; + if (!Name.consume_front("llvm.experimental.constrained.")) + return; + + for (auto &B : Bundles) { + if (B.getTag().starts_with("fpe.")) + return; + } + + const auto getMetadataArgumentValue = [](Value *Arg) -> StringRef { + if (auto *MAV = dyn_cast(Arg)) { + if (const auto *MD = MAV->getMetadata()) { + if (auto MDStr = dyn_cast(MD)) + return MDStr->getString(); + } + } + return StringRef(); + }; + + unsigned NumMetadataArgs = 0; + if (Args.size() > 1) { + Value *V = Args[Args.size() - 2].V; + StringRef VStr = getMetadataArgumentValue(V); + if (!VStr.empty()) { + NumMetadataArgs++; + if (auto RM = convertStrToRoundingMode(VStr)) + addFPRoundingBundle(Context, Bundles, *RM); + } + } + + Value *V = Args.back().V; + StringRef VStr = getMetadataArgumentValue(V); + if (!VStr.empty()) { + NumMetadataArgs++; + if (auto EB = convertStrToExceptionBehavior(VStr)) + addFPExceptionBundle(Context, Bundles, *EB); + } + + if (hadConstrainedVariant(Name)) { + Args.pop_back_n(NumMetadataArgs); + CalleeID.StrVal = "llvm." + Name.str(); + } + + FnAttrs.addAttribute(Attribute::StrictFP); + MemoryEffects ME = MemoryEffects::inaccessibleMemOnly(); + FnAttrs.addAttribute(Attribute::getWithMemoryEffects(Context, ME)); + FnAttrs.addAttribute(Attribute::StrictFP); +} + /// FunctionHeader /// ::= OptionalLinkage OptionalPreemptionSpecifier OptionalVisibility /// OptionalCallingConv OptRetAttrs OptUnnamedAddr Type GlobalName @@ -8037,6 +8092,8 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS, parseOptionalOperandBundles(BundleList, PFS)) return true; + updateConstrainedIntrinsic(CalleeID, ArgList, BundleList, FnAttrs); + // If RetType is a non-function pointer type, then this is the short syntax // for the call, which means that RetType is just the return type. Infer the // rest of the function argument types from the arguments that are present. diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index a585a24a02246..02fbb38548d6b 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -7127,9 +7127,11 @@ Error BitcodeReader::materializeModule() { if (CallInst *CI = dyn_cast(U)) UpgradeIntrinsicCall(CI, I.second); } - if (!I.first->use_empty()) - I.first->replaceAllUsesWith(I.second); - I.first->eraseFromParent(); + if (I.second) { + if (!I.first->use_empty()) + I.first->replaceAllUsesWith(I.second); + I.first->eraseFromParent(); + } } UpgradedIntrinsics.clear(); diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp index 5ca223852cbde..5c81c61f1ab27 100644 --- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp +++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp @@ -331,7 +331,7 @@ Value *CachingVPExpander::expandPredicationToFPCall( Function *Fn = Intrinsic::getOrInsertDeclaration( VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()}); Value *NewOp; - if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID)) + if (Intrinsic::isLegacyConstrainedIntrinsic(UnpredicatedIntrinsicID)) NewOp = Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName()); else diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index f668e41094bbc..c4e6042d2a791 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2155,6 +2155,12 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, } } + // Process constrained intrinsics in a way compatible with the pre-bundle + // implementation.. + if (CI.isConstrained() && + !Intrinsic::isLegacyConstrainedIntrinsic(CI.getIntrinsicID())) + return false; + // If this is a simple intrinsic (that is, we just need to add a def of // a vreg, and uses for each arg operand, then translate it. if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index db21e70897064..b984e6dc491f3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -314,6 +314,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) { break; #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" ValVT = Node->getValueType(0); if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP || @@ -1151,6 +1152,7 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl &Results) { break; #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" ExpandStrictFPOp(Node, Results); return; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 465128099f444..1ff3dc2bcdb8a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -199,6 +199,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) { #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" R = ScalarizeVecRes_StrictFPOp(N); break; @@ -1337,6 +1338,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" SplitVecRes_StrictFPOp(N, Lo, Hi); break; @@ -4639,6 +4641,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" Res = WidenVecRes_StrictFP(N); break; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 182529123ec6d..f7a9b351b43f5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -11020,6 +11020,7 @@ SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; +#define LEGACY_FUNCTION DAG_INSTRUCTION #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; #include "llvm/IR/ConstrainedOps.def" diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index a38a3e9b91052..15c801a74dbc8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6462,6 +6462,11 @@ void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I, /// Lower the call to the specified intrinsic function. void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { + if (I.isConstrained()) { + visitConstrainedFPIntrinsic(cast(I)); + return; + } + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDLoc sdl = getCurSDLoc(); DebugLoc dl = getCurDebugLoc(); @@ -7022,7 +7027,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ case Intrinsic::INTRINSIC: #include "llvm/IR/ConstrainedOps.def" - visitConstrainedFPIntrinsic(cast(I)); + visitConstrainedFPIntrinsic(cast(I)); return; #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: #include "llvm/IR/VPIntrinsics.def" @@ -8290,7 +8295,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, } void SelectionDAGBuilder::visitConstrainedFPIntrinsic( - const ConstrainedFPIntrinsic &FPI) { + const IntrinsicInst &FPI) { SDLoc sdl = getCurSDLoc(); // We do not need to serialize constrained FP intrinsics against @@ -8299,7 +8304,13 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic( SDValue Chain = DAG.getRoot(); SmallVector Opers; Opers.push_back(Chain); - for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I) + + Intrinsic::ID ID = FPI.getIntrinsicID(); + bool IsLegacy = Intrinsic::isLegacyConstrainedIntrinsic(ID); + unsigned NumArgs = IsLegacy ? static_cast(FPI) + .getNonMetadataArgCount() + : FPI.arg_size(); + for (unsigned I = 0; I != NumArgs; ++I) Opers.push_back(getValue(FPI.getArgOperand(I))); auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) { @@ -8347,6 +8358,8 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic( case Intrinsic::INTRINSIC: \ Opcode = ISD::STRICT_##DAGN; \ break; +#define LEGACY_FUNCTION(NAME, NARG, ROUND_MODE, I, DAGN) \ + DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, NAME, DAGN) #include "llvm/IR/ConstrainedOps.def" case Intrinsic::experimental_constrained_fmuladd: { Opcode = ISD::STRICT_FMA; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index 3a8dc25e98700..8c0b8a667357c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -626,7 +626,7 @@ class SelectionDAGBuilder { DebugLoc DbgLoc); void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); - void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI); + void visitConstrainedFPIntrinsic(const IntrinsicInst &FPI); void visitConvergenceControl(const CallInst &I, unsigned Intrinsic); void visitVectorHistogram(const CallInst &I, unsigned IntrinsicID); void visitVectorExtractLastActive(const CallInst &I, unsigned Intrinsic); diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 392cfbdd21273..4275318a7e0b1 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -791,7 +791,8 @@ void TargetLoweringBase::initActions() { // Constrained floating-point operations default to expand. #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ - setOperationAction(ISD::STRICT_##DAGN, VT, Expand); + setOperationAction(ISD::STRICT_##DAGN, VT, Expand); +#define LEGACY_FUNCTION DAG_INSTRUCTION #include "llvm/IR/ConstrainedOps.def" // For most targets @llvm.get.dynamic.area.offset just returns 0. diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index e73538da282e9..912cf4ce45a4c 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "llvm/IR/AutoUpgrade.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/BinaryFormat/Dwarf.h" @@ -1193,6 +1194,31 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn, F->getParent(), ID, F->getFunctionType()->getReturnType()); return true; } + if (Name.consume_front("experimental.constrained.")) { + Name = Name.take_while( + [](char Ch) -> bool { return isAlnum(Ch) || Ch == '_'; }); + auto [NewID, NumMetadataArgs] = getIntrinsicForConstrained(Name); + if (NewID != Intrinsic::not_intrinsic) { + auto *OldTy = cast(F->getFunctionType()); + SmallVector ParamTys; + for (unsigned i = 0, e = OldTy->getNumParams() - NumMetadataArgs; + i != e; ++i) { + ParamTys.push_back(OldTy->getParamType(i)); + } + auto *NewTy = + FunctionType::get(OldTy->getReturnType(), ParamTys, false); + + SmallVector OverloadTys; + bool Success = + Intrinsic::getIntrinsicSignature(NewID, NewTy, OverloadTys); + (void)Success; + assert(Success && "cannot get intrinsic signature"); + + NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), NewID, + OverloadTys); + } + return true; + } break; // No other 'e*'. case 'f': if (Name.starts_with("flt.rounds")) { @@ -4324,6 +4350,50 @@ static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) { CI->getParent()->insertDbgRecordBefore(DR, CI->getIterator()); } +static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F, + IRBuilder<> &Builder) { + if (CB->getOperandBundle(LLVMContext::OB_fpe_control) || + CB->getOperandBundle(LLVMContext::OB_fpe_except)) + return nullptr; + + SmallVector NewBundles; + if (auto RM = getRoundingModeArg(*CB)) { + auto CurrentRM = CB->getRoundingMode(); + assert(!CurrentRM && "unexpected rounding bundle"); + Builder.createFPRoundingBundle(NewBundles, RM); + } + if (auto EB = getExceptionBehaviorArg(*CB)) { + auto CurrentEB = CB->getExceptionBehavior(); + assert(!CurrentEB && "unexpected exception bundle"); + Builder.createFPExceptionBundle(NewBundles, EB); + } + + CallInst *NewCB = nullptr; + if (!NewBundles.empty()) { + SmallVector Args(CB->args()); + SmallVector Bundles; + CB->getOperandBundlesAsDefs(Bundles); + Bundles.append(NewBundles); + + Builder.SetInsertPoint(CB->getParent(), CB->getIterator()); + NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName()); + NewCB->copyMetadata(*CB); + AttributeList Attrs = CB->getAttributes(); + NewCB->setAttributes(Attrs); + if (isa(CB)) { + FastMathFlags FMF = CB->getFastMathFlags(); + NewCB->setFastMathFlags(FMF); + } + + MemoryEffects ME = MemoryEffects::inaccessibleMemOnly(); + auto A = Attribute::getWithMemoryEffects(CB->getContext(), ME); + NewCB->addFnAttr(A); + NewCB->addFnAttr(Attribute::StrictFP); + } + + return NewCB; +} + /// Upgrade a call to an old intrinsic. All argument and return casting must be /// provided to seamlessly integrate with existing context. void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) { @@ -4352,6 +4422,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) { bool IsARM = Name.consume_front("arm."); bool IsAMDGCN = Name.consume_front("amdgcn."); bool IsDbg = Name.consume_front("dbg."); + bool IsConstrained = Name.consume_front("experimental.constrained."); Value *Rep = nullptr; if (!IsX86 && Name == "stackprotectorcheck") { @@ -4380,6 +4451,10 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) { } else { upgradeDbgIntrinsicToDbgRecord(Name, CI); } + } else if (IsConstrained) { + Rep = upgradeConstrainedIntrinsicCall(CI, F, Builder); + if (!Rep) + return; } else { llvm_unreachable("Unknown function for CallBase upgrade."); } @@ -4881,6 +4956,43 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) { MTI->setSourceAlignment(Align->getMaybeAlignValue()); break; } +#define LEGACY_FUNCTION(NAME, A, R, I, D) case Intrinsic::NAME: +#include "llvm/IR/ConstrainedOps.def" + { + SmallVector Bundles; + unsigned NumMetadataArgs = 0; + + if (auto RM = getRoundingModeArg(*CI)) { + auto CurrentRM = CI->getRoundingMode(); + assert(!CurrentRM && "unexpected rounding bundle"); + Builder.createFPRoundingBundle(Bundles, RM); + ++NumMetadataArgs; + } + + if (auto EB = getExceptionBehaviorArg(*CI)) { + auto CurrentEB = CI->getExceptionBehavior(); + assert(!CurrentEB && "unexpected exception bundle"); + Builder.createFPExceptionBundle(Bundles, EB); + ++NumMetadataArgs; + } + + SmallVector Args(CI->args()); + Args.pop_back_n(NumMetadataArgs); + NewCall = Builder.CreateCall(NewFn, Args, Bundles, CI->getName()); + NewCall->copyMetadata(*CI); + AttributeList Attrs = CI->getAttributes(); + NewCall->setAttributes(Attrs); + if (isa(CI)) { + FastMathFlags FMF = CI->getFastMathFlags(); + NewCall->setFastMathFlags(FMF); + } + + MemoryEffects ME = MemoryEffects::inaccessibleMemOnly(); + auto A = Attribute::getWithMemoryEffects(CI->getContext(), ME); + NewCall->addFnAttr(A); + NewCall->addFnAttr(Attribute::StrictFP); + break; + } } assert(NewCall && "Should have either set this variable or returned through " "the default case"); @@ -4903,7 +5015,8 @@ void llvm::UpgradeCallsToIntrinsic(Function *F) { UpgradeIntrinsicCall(CB, NewFn); // Remove old function, no longer used, from the module. - F->eraseFromParent(); + if (NewFn) + F->eraseFromParent(); } } diff --git a/llvm/lib/IR/FPEnv.cpp b/llvm/lib/IR/FPEnv.cpp index 67f21d3756e93..91a962eb8190b 100644 --- a/llvm/lib/IR/FPEnv.cpp +++ b/llvm/lib/IR/FPEnv.cpp @@ -21,7 +21,18 @@ namespace llvm { -std::optional convertStrToRoundingMode(StringRef RoundingArg) { +std::optional convertStrToRoundingMode(StringRef RoundingArg, + bool InBundle) { + if (InBundle) + return StringSwitch>(RoundingArg) + .Case("dyn", RoundingMode::Dynamic) + .Case("rte", RoundingMode::NearestTiesToEven) + .Case("rmm", RoundingMode::NearestTiesToAway) + .Case("rtn", RoundingMode::TowardNegative) + .Case("rtp", RoundingMode::TowardPositive) + .Case("rtz", RoundingMode::TowardZero) + .Default(std::nullopt); + // For dynamic rounding mode, we use round to nearest but we will set the // 'exact' SDNodeFlag so that the value will not be rounded. return StringSwitch>(RoundingArg) @@ -34,26 +45,27 @@ std::optional convertStrToRoundingMode(StringRef RoundingArg) { .Default(std::nullopt); } -std::optional convertRoundingModeToStr(RoundingMode UseRounding) { +std::optional convertRoundingModeToStr(RoundingMode UseRounding, + bool InBundle) { std::optional RoundingStr; switch (UseRounding) { case RoundingMode::Dynamic: - RoundingStr = "round.dynamic"; + RoundingStr = InBundle ? "dyn" : "round.dynamic"; break; case RoundingMode::NearestTiesToEven: - RoundingStr = "round.tonearest"; + RoundingStr = InBundle ? "rte" : "round.tonearest"; break; case RoundingMode::NearestTiesToAway: - RoundingStr = "round.tonearestaway"; + RoundingStr = InBundle ? "rmm" : "round.tonearestaway"; break; case RoundingMode::TowardNegative: - RoundingStr = "round.downward"; + RoundingStr = InBundle ? "rtn" : "round.downward"; break; case RoundingMode::TowardPositive: - RoundingStr = "round.upward"; + RoundingStr = InBundle ? "rtp" : "round.upward"; break; case RoundingMode::TowardZero: - RoundingStr = "round.towardzero"; + RoundingStr = InBundle ? "rtz" : "round.towardzero"; break; default: break; @@ -62,7 +74,14 @@ std::optional convertRoundingModeToStr(RoundingMode UseRounding) { } std::optional -convertStrToExceptionBehavior(StringRef ExceptionArg) { +convertStrToExceptionBehavior(StringRef ExceptionArg, bool InBundle) { + if (InBundle) + return StringSwitch>(ExceptionArg) + .Case("ignore", fp::ebIgnore) + .Case("maytrap", fp::ebMayTrap) + .Case("strict", fp::ebStrict) + .Default(std::nullopt); + return StringSwitch>(ExceptionArg) .Case("fpexcept.ignore", fp::ebIgnore) .Case("fpexcept.maytrap", fp::ebMayTrap) @@ -71,17 +90,17 @@ convertStrToExceptionBehavior(StringRef ExceptionArg) { } std::optional -convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept) { +convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept, bool InBundle) { std::optional ExceptStr; switch (UseExcept) { case fp::ebStrict: - ExceptStr = "fpexcept.strict"; + ExceptStr = InBundle ? "strict" : "fpexcept.strict"; break; case fp::ebIgnore: - ExceptStr = "fpexcept.ignore"; + ExceptStr = InBundle ? "ignore" : "fpexcept.ignore"; break; case fp::ebMayTrap: - ExceptStr = "fpexcept.maytrap"; + ExceptStr = InBundle ? "maytrap" : "fpexcept.maytrap"; break; } return ExceptStr; diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index 9c5dd5aeb92e9..d6c29e27a24d6 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -554,8 +554,8 @@ static MutableArrayRef makeArgArray(Argument *Args, size_t Count) { return MutableArrayRef(Args, Count); } -bool Function::isConstrainedFPIntrinsic() const { - return Intrinsic::isConstrainedFPIntrinsic(getIntrinsicID()); +bool Function::isLegacyConstrainedIntrinsic() const { + return Intrinsic::isLegacyConstrainedIntrinsic(getIntrinsicID()); } void Function::clearArguments() { diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index f340f7aafdc76..b4b020f06eec5 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -86,6 +86,43 @@ IRBuilderBase::createCallHelper(Function *Callee, ArrayRef Ops, return CI; } +CallInst *IRBuilderBase::CreateCall(FunctionType *FTy, Value *Callee, + ArrayRef Args, + ArrayRef OpBundles, + const Twine &Name, MDNode *FPMathTag) { + ArrayRef ActualBundlesRef = OpBundles; + SmallVector ActualBundles; + + if (IsFPConstrained) { + if (const auto *Func = dyn_cast(Callee)) { + if (Intrinsic::ID ID = Func->getIntrinsicID()) { + if (IntrinsicInst::canAccessFPEnvironment(ID)) { + bool NeedRound = true, NeedExcept = true; + for (const auto &Item : OpBundles) { + if (NeedRound && Item.getTag() == "fpe.round") + NeedRound = false; + else if (NeedExcept && Item.getTag() == "fpe.except") + NeedExcept = false; + ActualBundles.push_back(Item); + } + if (NeedRound && Intrinsic::hasConstrainedFPRoundingModeOperand(ID)) + createFPRoundingBundle(ActualBundles); + if (NeedExcept) + createFPExceptionBundle(ActualBundles); + ActualBundlesRef = ActualBundles; + } + } + } + } + + CallInst *CI = CallInst::Create(FTy, Callee, Args, ActualBundlesRef); + if (IsFPConstrained) + setConstrainedFPCallAttr(CI); + if (isa(CI)) + setFPAttrs(CI, FPMathTag, FMF); + return Insert(CI, Name); +} + Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) { assert(isa(Scaling) && "Expected constant integer"); if (cast(Scaling)->isZero()) @@ -897,6 +934,17 @@ CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, return createCallHelper(Fn, Args, Name, FMFSource); } +CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, + ArrayRef Types, + ArrayRef Args, + ArrayRef OpBundles, + Instruction *FMFSource, + const Twine &Name) { + Module *M = BB->getModule(); + Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, Types); + return createCallHelper(Fn, Args, Name, FMFSource, OpBundles); +} + CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, ArrayRef Args, Instruction *FMFSource, @@ -936,8 +984,11 @@ CallInst *IRBuilderBase::CreateConstrainedFPBinOp( if (FMFSource) UseFMF = FMFSource->getFastMathFlags(); - CallInst *C = CreateIntrinsic(ID, {L->getType()}, - {L, R, RoundingV, ExceptV}, nullptr, Name); + SmallVector OpBundles; + createFPRoundingBundle(OpBundles, Rounding); + createFPExceptionBundle(OpBundles, Except); + CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, RoundingV, ExceptV}, + OpBundles, nullptr, Name); setConstrainedFPCallAttr(C); setFPAttrs(C, FPMathTag, UseFMF); return C; @@ -953,8 +1004,11 @@ CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp( if (FMFSource) UseFMF = FMFSource->getFastMathFlags(); - CallInst *C = - CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name); + SmallVector OpBundles; + createFPExceptionBundle(OpBundles, Except); + + CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, OpBundles, + nullptr, Name); setConstrainedFPCallAttr(C); setFPAttrs(C, FPMathTag, UseFMF); return C; @@ -981,19 +1035,24 @@ CallInst *IRBuilderBase::CreateConstrainedFPCast( std::optional Rounding, std::optional Except) { Value *ExceptV = getConstrainedFPExcept(Except); + bool HasRounding = Intrinsic::hasConstrainedFPRoundingModeOperand(ID); FastMathFlags UseFMF = FMF; if (FMFSource) UseFMF = FMFSource->getFastMathFlags(); + SmallVector OpBundles; + createFPRoundingBundle(OpBundles, Rounding); + createFPExceptionBundle(OpBundles, Except); + CallInst *C; - if (Intrinsic::hasConstrainedFPRoundingModeOperand(ID)) { + if (HasRounding) { Value *RoundingV = getConstrainedFPRounding(Rounding); C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV}, - nullptr, Name); + OpBundles, nullptr, Name); } else - C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr, - Name); + C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, OpBundles, + nullptr, Name); setConstrainedFPCallAttr(C); @@ -1022,8 +1081,11 @@ CallInst *IRBuilderBase::CreateConstrainedFPCmp( Value *PredicateV = getConstrainedFPPredicate(P); Value *ExceptV = getConstrainedFPExcept(Except); - CallInst *C = CreateIntrinsic(ID, {L->getType()}, - {L, R, PredicateV, ExceptV}, nullptr, Name); + SmallVector OpBundles; + createFPExceptionBundle(OpBundles, Except); + + CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, PredicateV, ExceptV}, + OpBundles, nullptr, Name); setConstrainedFPCallAttr(C); return C; } @@ -1033,14 +1095,19 @@ CallInst *IRBuilderBase::CreateConstrainedFPCall( std::optional Rounding, std::optional Except) { llvm::SmallVector UseArgs; + SmallVector OpBundles; append_range(UseArgs, Args); - if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee->getIntrinsicID())) + if (Intrinsic::hasConstrainedFPRoundingModeOperand( + Callee->getIntrinsicID())) { UseArgs.push_back(getConstrainedFPRounding(Rounding)); + createFPRoundingBundle(OpBundles, Rounding); + } UseArgs.push_back(getConstrainedFPExcept(Except)); + createFPExceptionBundle(OpBundles, Except); - CallInst *C = CreateCall(Callee, UseArgs, Name); + CallInst *C = CreateCall(Callee, UseArgs, OpBundles, Name); setConstrainedFPCallAttr(C); return C; } @@ -1273,6 +1340,20 @@ CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); } +void IRBuilderBase::createFPRoundingBundle( + SmallVectorImpl &Bundles, + std::optional Rounding) { + addFPRoundingBundle(Context, Bundles, + Rounding.value_or(DefaultConstrainedRounding)); +} + +void IRBuilderBase::createFPExceptionBundle( + SmallVectorImpl &Bundles, + std::optional Except) { + addFPExceptionBundle(Context, Bundles, + Except.value_or(DefaultConstrainedExcept)); +} + IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default; IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default; IRBuilderFolder::~IRBuilderFolder() = default; diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index 065ce3a017283..6d3c360fdbd59 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -604,6 +604,29 @@ bool CallBase::hasClobberingOperandBundles() const { getIntrinsicID() != Intrinsic::assume; } +std::optional CallBase::getRoundingMode() const { + if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_control)) { + Value *V = RoundingBundle->Inputs.front(); + Metadata *MD = cast(V)->getMetadata(); + return convertStrToRoundingMode(cast(MD)->getString(), true); + } + return std::nullopt; +} + +std::optional CallBase::getExceptionBehavior() const { + if (auto ExceptionBundle = getOperandBundle(LLVMContext::OB_fpe_except)) { + Value *V = ExceptionBundle->Inputs.front(); + Metadata *MD = cast(V)->getMetadata(); + return convertStrToExceptionBehavior(cast(MD)->getString(), true); + } + return std::nullopt; +} + +bool CallBase::isConstrained() const { + return getOperandBundle(LLVMContext::OB_fpe_control) || + getOperandBundle(LLVMContext::OB_fpe_except); +} + MemoryEffects CallBase::getMemoryEffects() const { MemoryEffects ME = getAttributes().getMemoryEffects(); if (auto *Fn = dyn_cast(getCalledOperand())) { @@ -675,6 +698,26 @@ void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() { MemoryEffects::inaccessibleOrArgMemOnly()); } +void llvm::addFPRoundingBundle(LLVMContext &Ctx, + SmallVectorImpl &Bundles, + RoundingMode Rounding) { + std::optional RndStr = convertRoundingModeToStr(Rounding, true); + assert(RndStr && "Garbage rounding mode!"); + auto *RoundingMDS = MDString::get(Ctx, *RndStr); + auto *RM = MetadataAsValue::get(Ctx, RoundingMDS); + Bundles.emplace_back("fpe.control", RM); +} + +void llvm::addFPExceptionBundle(LLVMContext &Ctx, + SmallVectorImpl &Bundles, + fp::ExceptionBehavior Except) { + std::optional ExcStr = convertExceptionBehaviorToStr(Except, true); + assert(ExcStr && "Garbage exception behavior!"); + auto *ExceptMDS = MDString::get(Ctx, *ExcStr); + auto *EB = MetadataAsValue::get(Ctx, ExceptMDS); + Bundles.emplace_back("fpe.except", EB); +} + //===----------------------------------------------------------------------===// // CallInst Implementation //===----------------------------------------------------------------------===// diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp index 002bab8e079e5..262aebc4e94c6 100644 --- a/llvm/lib/IR/IntrinsicInst.cpp +++ b/llvm/lib/IR/IntrinsicInst.cpp @@ -21,6 +21,7 @@ //===----------------------------------------------------------------------===// #include "llvm/IR/IntrinsicInst.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfoMetadata.h" @@ -66,6 +67,68 @@ bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) { } } +bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) { + switch (IID) { +#define FUNCTION(NAME, A, R, I) case Intrinsic::NAME: +#define LEGACY_FUNCTION(NAME, A, R, I, N) case Intrinsic::NAME: +#include "llvm/IR/ConstrainedOps.def" + return true; + default: + return false; + } +} + +std::optional llvm::getRoundingModeArg(const CallBase &I) { + unsigned NumOperands = I.arg_size(); + if (NumOperands <= 2) + return std::nullopt; + Metadata *MD = nullptr; + auto *MAV = dyn_cast(I.getArgOperand(NumOperands - 2)); + if (MAV) + MD = MAV->getMetadata(); + if (!MD || !isa(MD)) + return std::nullopt; + return convertStrToRoundingMode(cast(MD)->getString()); +} + +std::optional +llvm::getExceptionBehaviorArg(const CallBase &I) { + unsigned NumOperands = I.arg_size(); + if (NumOperands <= 1) + return std::nullopt; + Metadata *MD = nullptr; + auto *MAV = dyn_cast(I.getArgOperand(NumOperands - 1)); + if (MAV) + MD = MAV->getMetadata(); + if (!MD || !isa(MD)) + return std::nullopt; + return convertStrToExceptionBehavior(cast(MD)->getString()); +} + +bool llvm::hadConstrainedVariant(StringRef Name) { + size_t period_pos = Name.find('.'); + if (period_pos != StringRef::npos) + Name = Name.take_front(period_pos); +#define LEGACY_FUNCTION(NAME, A, R, I, D) \ + if (Name == #NAME) \ + return true; +#include "llvm/IR/ConstrainedOps.def" + return false; +} + +std::pair +llvm::getIntrinsicForConstrained(StringRef Name) { + size_t period_pos = Name.find('.'); + if (period_pos != StringRef::npos) + Name = Name.take_front(period_pos); +#define LEGACY_FUNCTION(NAME, A, R, I, D) \ + if (Name == #NAME) \ + return std::make_pair(Intrinsic::NAME, 1 + R); +#include "llvm/IR/ConstrainedOps.def" + + return std::make_pair(Intrinsic::not_intrinsic, 0); +} + //===----------------------------------------------------------------------===// /// DbgVariableIntrinsic - This is the common base class for debug info /// intrinsics for variables. @@ -273,29 +336,6 @@ void InstrProfCallsite::setCallee(Value *Callee) { setArgOperand(4, Callee); } -std::optional ConstrainedFPIntrinsic::getRoundingMode() const { - unsigned NumOperands = arg_size(); - Metadata *MD = nullptr; - auto *MAV = dyn_cast(getArgOperand(NumOperands - 2)); - if (MAV) - MD = MAV->getMetadata(); - if (!MD || !isa(MD)) - return std::nullopt; - return convertStrToRoundingMode(cast(MD)->getString()); -} - -std::optional -ConstrainedFPIntrinsic::getExceptionBehavior() const { - unsigned NumOperands = arg_size(); - Metadata *MD = nullptr; - auto *MAV = dyn_cast(getArgOperand(NumOperands - 1)); - if (MAV) - MD = MAV->getMetadata(); - if (!MD || !isa(MD)) - return std::nullopt; - return convertStrToExceptionBehavior(cast(MD)->getString()); -} - bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const { std::optional Except = getExceptionBehavior(); if (Except) { @@ -354,7 +394,7 @@ unsigned ConstrainedFPIntrinsic::getNonMetadataArgCount() const { } bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) { - return Intrinsic::isConstrainedFPIntrinsic(I->getIntrinsicID()); + return Intrinsic::isLegacyConstrainedIntrinsic(I->getIntrinsicID()); } ElementCount VPIntrinsic::getStaticVectorLength() const { diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp index 3130a0bd2955a..168b98de2fb66 100644 --- a/llvm/lib/IR/Intrinsics.cpp +++ b/llvm/lib/IR/Intrinsics.cpp @@ -741,7 +741,7 @@ Function *Intrinsic::getDeclarationIfExists(Module *M, ID id, #include "llvm/IR/IntrinsicImpl.inc" #undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN -bool Intrinsic::isConstrainedFPIntrinsic(ID QID) { +bool Intrinsic::isLegacyConstrainedIntrinsic(ID QID) { switch (QID) { #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ case Intrinsic::INTRINSIC: diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp index e078527b597b4..1e0883641d078 100644 --- a/llvm/lib/IR/LLVMContext.cpp +++ b/llvm/lib/IR/LLVMContext.cpp @@ -97,6 +97,16 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { "convergencectrl operand bundle id drifted!"); (void)ConvergenceCtrlEntry; + auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.control"); + assert(RoundingEntry->second == LLVMContext::OB_fpe_control && + "fpe.round operand bundle id drifted!"); + (void)RoundingEntry; + + auto *ExceptionEntry = pImpl->getOrInsertBundleTag("fpe.except"); + assert(ExceptionEntry->second == LLVMContext::OB_fpe_except && + "fpe.except operand bundle id drifted!"); + (void)ExceptionEntry; + SyncScope::ID SingleThreadSSID = pImpl->getOrInsertSyncScopeID("singlethread"); assert(SingleThreadSSID == SyncScope::SingleThread && diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 55de486e90e19..e3fafecacdd2a 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -658,6 +658,9 @@ class Verifier : public InstVisitor, VerifierSupport { /// Verify the llvm.experimental.noalias.scope.decl declarations void verifyNoAliasScopeDecl(); + + /// Verify the call of a constrained intrinsic call. + void verifyConstrainedInstrinsicCall(const CallBase &CB); }; } // end anonymous namespace @@ -3718,7 +3721,9 @@ void Verifier::visitCallBase(CallBase &Call) { FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false, FoundPreallocatedBundle = false, FoundGCLiveBundle = false, FoundPtrauthBundle = false, FoundKCFIBundle = false, - FoundAttachedCallBundle = false; + FoundAttachedCallBundle = false, FoundFpeRoundBundle = false, + FoundFpeExceptBundle = false; + for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) { OperandBundleUse BU = Call.getOperandBundleAt(i); uint32_t Tag = BU.getTagID(); @@ -3781,9 +3786,40 @@ void Verifier::visitCallBase(CallBase &Call) { "Multiple \"clang.arc.attachedcall\" operand bundles", Call); FoundAttachedCallBundle = true; verifyAttachedCallBundle(Call, BU); + } else if (Tag == LLVMContext::OB_fpe_control) { + Check(!FoundFpeRoundBundle, "Multiple fpe.round operand bundles", Call); + Check(BU.Inputs.size() == 1, + "Expected exactly one fpe.round bundle operand", Call); + auto *V = dyn_cast(BU.Inputs.front()); + Check(V, "Value of fpe.round bundle operand must be a metadata", Call); + auto *MDS = dyn_cast(V->getMetadata()); + Check(MDS, "Value of fpe.round bundle operand must be a string", Call); + auto RM = convertStrToRoundingMode(MDS->getString(), true); + Check(RM.has_value(), + "Value of fpe.round bundle operand is not a correct rounding mode", + Call); + FoundFpeRoundBundle = true; + } else if (Tag == LLVMContext::OB_fpe_except) { + Check(!FoundFpeExceptBundle, "Multiple fpe.except operand bundles", Call); + Check(BU.Inputs.size() == 1, + "Expected exactly one fpe.except bundle operand", Call); + auto *V = dyn_cast(BU.Inputs.front()); + Check(V, "Value of fpe.except bundle operand must be a metadata", Call); + auto *MDS = dyn_cast(V->getMetadata()); + Check(MDS, "Value of fpe.except bundle operand must be a string", Call); + auto EB = convertStrToExceptionBehavior(MDS->getString(), true); + Check(EB.has_value(), + "Value of fpe.except bundle operand is not a correct exception " + "behavior", + Call); + FoundFpeExceptBundle = true; } } + // Verify if FP options specified in constrained intrinsic arguments agree + // with the options specified in operand bundles. + verifyConstrainedInstrinsicCall(Call); + // Verify that callee and callsite agree on whether to use pointer auth. Check(!(Call.getCalledFunction() && FoundPtrauthBundle), "Direct call cannot have a ptrauth bundle", Call); @@ -3810,6 +3846,53 @@ void Verifier::visitCallBase(CallBase &Call) { visitInstruction(Call); } +void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) { + const auto *CFPI = dyn_cast(&CB); + if (!CFPI) + return; + + // FP metadata arguments must not conflict with the corresponding + // operand bundles. + if (std::optional RM = getRoundingModeArg(CB)) { + RoundingMode Rounding = *RM; + auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_control); + Check(RoundingBundle, + "Constrained intrinsic has a rounding argument but the call does not", + CB); + if (RoundingBundle) { + std::optional RMByBundle = CB.getRoundingMode(); + Check(RMByBundle, "Invalid value of rounding mode bundle", CB); + if (RMByBundle) { + Check(*RMByBundle == Rounding, + "Rounding mode of the constrained intrinsic differs from that in " + "operand bundle", + CB); + } + } + } + + if (std::optional EB = getExceptionBehaviorArg(CB)) { + fp::ExceptionBehavior Excepts = *EB; + auto ExceptionBundle = CB.getOperandBundle(LLVMContext::OB_fpe_except); + Check(ExceptionBundle, + "Constrained intrinsic has an exception handling argument but the " + "call does not", + CB); + if (ExceptionBundle) { + std::optional EBByBundle = + CB.getExceptionBehavior(); + Check(EBByBundle, "Invalid value of exception behavior bundle", CB); + if (EBByBundle) { + Check( + *EBByBundle == Excepts, + "Exception behavior of the constrained intrinsic differs from that " + "in operand bundle", + CB); + } + } + } +} + void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context) { Check(!Attrs.contains(Attribute::InAlloca), diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index 53e486f3dc6cd..7538c84b03bfa 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -251,10 +251,12 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) { // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and // "kcfi". - bool IsNoTail = CI->isNoTailCall() || - CI->hasOperandBundlesOtherThan( - {LLVMContext::OB_clang_arc_attachedcall, - LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}); + bool IsNoTail = + CI->isNoTailCall() || + CI->hasOperandBundlesOtherThan( + {LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth, + LLVMContext::OB_kcfi, LLVMContext::OB_fpe_control, + LLVMContext::OB_fpe_except}); if (!IsNoTail && CI->doesNotAccessMemory()) { // A call to a readnone function whose arguments are all things computed diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp index cb6a4e34c226e..eae1a92632a8a 100644 --- a/llvm/lib/Transforms/Utils/CloneFunction.cpp +++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp @@ -418,7 +418,6 @@ struct PruningFunctionCloner { Instruction * PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { const Instruction &OldInst = *II; - Instruction *NewInst = nullptr; if (HostFuncIsStrictFP) { Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); if (CIID != Intrinsic::not_intrinsic) { @@ -472,18 +471,25 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { // The last arguments of a constrained intrinsic are metadata that // represent rounding mode (absents in some intrinsics) and exception // behavior. The inlined function uses default settings. - if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) + SmallVector Bundles; + if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) { Args.push_back( MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); + addFPRoundingBundle(Ctx, Bundles, RoundingMode::NearestTiesToEven); + } Args.push_back( MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); - - NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); + addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore); + auto *NewConstrainedInst = + CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict"); + + MemoryEffects ME = MemoryEffects::inaccessibleMemOnly(); + auto A = Attribute::getWithMemoryEffects(Ctx, ME); + NewConstrainedInst->addFnAttr(A); + return NewConstrainedInst; } } - if (!NewInst) - NewInst = II->clone(); - return NewInst; + return OldInst.clone(); } /// The specified block is found to be reachable, clone it and diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index cdc3f0308fe59..238ce74f69780 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -514,10 +514,9 @@ bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I, return false; } - if (auto *FPI = dyn_cast(I)) { - std::optional ExBehavior = - FPI->getExceptionBehavior(); - return *ExBehavior != fp::ebStrict; + if (auto *Call = dyn_cast(I)) { + if (auto EB = Call->getExceptionBehavior()) + return *EB != fp::ebStrict; } } diff --git a/llvm/test/Assembler/fp-intrinsics-attr.ll b/llvm/test/Assembler/fp-intrinsics-attr.ll index 5b9a44710763e..176c900465c3c 100644 --- a/llvm/test/Assembler/fp-intrinsics-attr.ll +++ b/llvm/test/Assembler/fp-intrinsics-attr.ll @@ -215,9 +215,7 @@ define void @func(double %a, double %b, double %c, i32 %i) strictfp { double %a, metadata !"fpexcept.strict") - %trunc = call double @llvm.experimental.constrained.trunc.f64( - double %a, - metadata !"fpexcept.strict") + %trunc = call double @llvm.trunc.f64(double %a) strictfp [ "fpe.except"(metadata !"strict") ] %q1 = call i1 @llvm.experimental.constrained.fcmp.f64( double %a, double %b, @@ -368,15 +366,15 @@ declare double @llvm.experimental.constrained.round.f64(double, metadata) declare double @llvm.experimental.constrained.roundeven.f64(double, metadata) ; CHECK: @llvm.experimental.constrained.roundeven.f64({{.*}}) #[[ATTR1]] -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) -; CHECK: @llvm.experimental.constrained.trunc.f64({{.*}}) #[[ATTR1]] - declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) ; CHECK: @llvm.experimental.constrained.fcmp.f64({{.*}}) #[[ATTR1]] declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) ; CHECK: @llvm.experimental.constrained.fcmps.f64({{.*}}) #[[ATTR1]] +declare double @llvm.trunc.f64(double) +; CHECK: declare double @llvm.trunc.f64(double) #[[ATTR2:[0-9]+]] + ; CHECK: attributes #[[ATTR0]] = {{{.*}} strictfp {{.*}}} ; CHECK: attributes #[[ATTR1]] = { {{.*}} strictfp {{.*}} } - +; CHECK: attributes #[[ATTR2]] = { {{.*}} memory(none) } diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll b/llvm/test/Bitcode/auto-upgrade-constrained.ll new file mode 100644 index 0000000000000..b857fa1f4a686 --- /dev/null +++ b/llvm/test/Bitcode/auto-upgrade-constrained.ll @@ -0,0 +1,327 @@ +; RUN: llvm-as < %s | llvm-dis | FileCheck %s +; RUN: llvm-dis %s.bc -o - | FileCheck %s + +define float @test_fadd(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fadd( +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ] + + +define float @test_fsub(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fsub( +; CHECK: call float @llvm.experimental.constrained.fsub.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"ignore") ] + +define float @test_fmul(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.downward", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fmul( +; CHECK: call float @llvm.experimental.constrained.fmul.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtn"), "fpe.except"(metadata !"ignore") ] + +define float @test_fdiv(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.upward", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fdiv( +; CHECK: call float @llvm.experimental.constrained.fdiv.f32(float {{.*}}, float {{.*}}, metadata !"round.upward", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtp"), "fpe.except"(metadata !"ignore") ] + +define float @test_frem(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.frem.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_frem( +; CHECK: call float @llvm.experimental.constrained.frem.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ] + +define float @test_fma(float %a, float %b, float %c) strictfp { + %res = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.towardzero", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fma( +; CHECK: call float @llvm.experimental.constrained.fma.f32(float {{.*}}, float {{.*}}, float {{.*}}, metadata !"round.towardzero", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtz"), "fpe.except"(metadata !"ignore") ] + +define float @test_fmuladd(float %a, float %b, float %c) strictfp { + %res = call float @llvm.experimental.constrained.fmuladd.f32(float %a, float %b, float %c, metadata !"round.tonearestaway", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fmuladd( +; CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float {{.*}}, float {{.*}}, float {{.*}}, metadata !"round.tonearestaway", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rmm"), "fpe.except"(metadata !"ignore") ] + +define i32 @test_fptosi(float %a) strictfp { + %res = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.ignore") + ret i32 %res +} +; CHECK-LABEL: define i32 @test_fptosi( +; CHECK: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.except"(metadata !"ignore") ] + +define i32 @test_fptoui(float %a) strictfp { + %res = call i32 @llvm.experimental.constrained.fptoui.f32.i32(float %a, metadata !"fpexcept.strict") + ret i32 %res +} +; CHECK-LABEL: define i32 @test_fptoui( +; CHECK: call i32 @llvm.experimental.constrained.fptoui.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_sitofp(i32 %a) strictfp { + %res = call float @llvm.experimental.constrained.sitofp.i32.f32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_sitofp( +; CHECK: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ] + +define float @test_uitofp(i32 %a) strictfp { + %res = call float @llvm.experimental.constrained.uitofp.i32.f32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_uitofp( +; CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ] + +define float @test_fptrunc(double %a) strictfp { + %res = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret float %res +} +; CHECK-LABEL: define float @test_fptrunc( +; CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(double {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ] + +define double @test_fpext(float %a) strictfp { + %res = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.ignore") + ret double %res +} +; CHECK-LABEL: define double @test_fpext( +; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.except"(metadata !"ignore") ] + +define float @test_sqrt(float %a) strictfp { + %res = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_sqrt( +; CHECK: call float @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_powi(float %a, i32 %b) strictfp { + %res = call float @llvm.experimental.constrained.powi.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_powi( +; CHECK: call float @llvm.experimental.constrained.powi.f32(float {{.*}}, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_ldexp(float %a, i32 %b) strictfp { + %res = call float @llvm.experimental.constrained.ldexp.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_ldexp( +; CHECK: call float @llvm.experimental.constrained.ldexp.f32.i32(float {{.*}}, i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_asin(float %a) strictfp { + %res = call float @llvm.experimental.constrained.asin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_asin( +; CHECK: call float @llvm.experimental.constrained.asin.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_acos(float %a) strictfp { + %res = call float @llvm.experimental.constrained.acos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_acos( +; CHECK: call float @llvm.experimental.constrained.acos.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_atan(float %a) strictfp { + %res = call float @llvm.experimental.constrained.atan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_atan( +; CHECK: call float @llvm.experimental.constrained.atan.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_sin(float %a) strictfp { + %res = call float @llvm.experimental.constrained.sin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_sin( +; CHECK: call float @llvm.experimental.constrained.sin.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_cos(float %a) strictfp { + %res = call float @llvm.experimental.constrained.cos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_cos( +; CHECK: call float @llvm.experimental.constrained.cos.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_tan(float %a) strictfp { + %res = call float @llvm.experimental.constrained.tan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_tan( +; CHECK: call float @llvm.experimental.constrained.tan.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_sinh(float %a) strictfp { + %res = call float @llvm.experimental.constrained.sinh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_sinh( +; CHECK: call float @llvm.experimental.constrained.sinh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_cosh(float %a) strictfp { + %res = call float @llvm.experimental.constrained.cosh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_cosh( +; CHECK: call float @llvm.experimental.constrained.cosh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_tanh(float %a) strictfp { + %res = call float @llvm.experimental.constrained.tanh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_tanh( +; CHECK: call float @llvm.experimental.constrained.tanh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_pow(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.pow.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_pow( +; CHECK: call float @llvm.experimental.constrained.pow.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_log(float %a) strictfp { + %res = call float @llvm.experimental.constrained.log.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_log( +; CHECK: call float @llvm.experimental.constrained.log.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_log10(float %a) strictfp { + %res = call float @llvm.experimental.constrained.log10.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_log10( +; CHECK: call float @llvm.experimental.constrained.log10.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_log2(float %a) strictfp { + %res = call float @llvm.experimental.constrained.log2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_log2( +; CHECK: call float @llvm.experimental.constrained.log2.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_exp(float %a) strictfp { + %res = call float @llvm.experimental.constrained.exp.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_exp( +; CHECK: call float @llvm.experimental.constrained.exp.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_exp2(float %a) strictfp { + %res = call float @llvm.experimental.constrained.exp2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_exp2( +; CHECK: call float @llvm.experimental.constrained.exp2.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_rint(float %a) strictfp { + %res = call float @llvm.experimental.constrained.rint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_rint( +; CHECK: call float @llvm.experimental.constrained.rint.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_nearbyint(float %a) strictfp { + %res = call float @llvm.experimental.constrained.nearbyint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_nearbyint( +; CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define i32 @test_lrint(float %a) strictfp { + %res = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret i32 %res +} +; CHECK-LABEL: define i32 @test_lrint( +; CHECK: call i32 @llvm.experimental.constrained.lrint.i32.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define i32 @test_llrint(float %a) strictfp { + %res = call i32 @llvm.experimental.constrained.llrint.i32.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret i32 %res +} +; CHECK-LABEL: define i32 @test_llrint( +; CHECK: call i32 @llvm.experimental.constrained.llrint.i32.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] + +define float @test_maxnum(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_maxnum( +; CHECK: call float @llvm.experimental.constrained.maxnum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_minnum(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_minnum( +; CHECK: call float @llvm.experimental.constrained.minnum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_maximum(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.maximum.f32(float %a, float %b, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_maximum( +; CHECK: call float @llvm.experimental.constrained.maximum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_minimum(float %a, float %b) strictfp { + %res = call float @llvm.experimental.constrained.minimum.f32(float %a, float %b, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_minimum( +; CHECK: call float @llvm.experimental.constrained.minimum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_ceil(float %a) strictfp { + %res = call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_ceil( +; call float @llvm.experimental.constrained.ceil.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_floor(float %a) strictfp { + %res = call float @llvm.experimental.constrained.floor.f32(float %a, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_floor( +; call float @llvm.experimental.constrained.floor.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define i32 @test_lround(float %a) strictfp { + %res = call i32 @llvm.experimental.constrained.lround.i32.f32(float %a, metadata !"fpexcept.strict") + ret i32 %res +} +; CHECK-LABEL: define i32 @test_lround( +; CHECK: call i32 @llvm.experimental.constrained.lround.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define i32 @test_llround(float %a) strictfp { + %res = call i32 @llvm.experimental.constrained.llround.i32.f32(float %a, metadata !"fpexcept.strict") + ret i32 %res +} +; CHECK-LABEL: define i32 @test_llround( +; CHECK: call i32 @llvm.experimental.constrained.llround.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_round(float %a) strictfp { + %res = call float @llvm.experimental.constrained.round.f32(float %a, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_round( +; CHECK: call float @llvm.experimental.constrained.round.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_roundeven(float %a) strictfp { + %res = call float @llvm.experimental.constrained.roundeven.f32(float %a, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_roundeven( +; CHECK: call float @llvm.experimental.constrained.roundeven.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +define float @test_trunc(float %a) strictfp { + %res = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict") + ret float %res +} +; CHECK-LABEL: define float @test_trunc( +; CHECK: call float @llvm.trunc.f32(float {{.*}}) #[[ATTR0]] [ "fpe.except"(metadata !"strict") ] + +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc b/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc new file mode 100644 index 0000000000000..75a84901b5cbc Binary files /dev/null and b/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc differ diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll index d860104b9cb3d..01e5b3f6673ae 100644 --- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll +++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll @@ -13,6 +13,8 @@ ; CHECK-NEXT: @trunc_v4f32(<4 x float> %x) #0 { ; CHECK: // %bb.0: ; CHECK-NEXT: frintz v0.4s, v0.4s ; CHECK-NEXT: ret - %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0 + %val = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) #0 [ "fpe.except"(metadata !"strict") ] ret <4 x float> %val } @@ -571,7 +571,7 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) #0 { ; CHECK: // %bb.0: ; CHECK-NEXT: frintz v0.2d, v0.2d ; CHECK-NEXT: ret - %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0 + %val = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) #0 [ "fpe.except"(metadata !"strict") ] ret <2 x double> %val } @@ -829,7 +829,7 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) #0 { ; CHECK: // %bb.0: ; CHECK-NEXT: frintz d0, d0 ; CHECK-NEXT: ret - %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0 + %val = call <1 x double> @llvm.trunc.v1f64(<1 x double> %x) #0 [ "fpe.except"(metadata !"strict") ] ret <1 x double> %val } @@ -901,7 +901,6 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata) -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata) declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata) @@ -927,7 +926,6 @@ declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, met declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata) @@ -953,7 +951,6 @@ declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, met declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata) declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata) declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata) -declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata) declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata) declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata) diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll index f2a14a9b73fa1..539ac69071984 100644 --- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll +++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll @@ -765,7 +765,7 @@ define float @trunc_f32(float %x) #0 { ; CHECK: // %bb.0: ; CHECK-NEXT: frintz s0, s0 ; CHECK-NEXT: ret - %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0 + %val = call float @llvm.trunc.f32(float %x) #0 [ "fpe.except"(metadata !"strict") ] ret float %val } @@ -1559,7 +1559,7 @@ define double @trunc_f64(double %x) #0 { ; CHECK: // %bb.0: ; CHECK-NEXT: frintz d0, d0 ; CHECK-NEXT: ret - %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0 + %val = call double @llvm.trunc.f64(double %x) #0 [ "fpe.except"(metadata !"strict") ] ret double %val } @@ -2428,7 +2428,7 @@ define fp128 @trunc_f128(fp128 %x) #0 { ; CHECK-NEXT: bl truncl ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %val = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0 + %val = call fp128 @llvm.trunc.f128(fp128 %x) #0 [ "fpe.except"(metadata !"strict") ] ret fp128 %val } @@ -3179,7 +3179,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata) declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) declare float @llvm.experimental.constrained.round.f32(float, metadata) declare float @llvm.experimental.constrained.roundeven.f32(float, metadata) -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) @@ -3231,7 +3230,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata) declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) declare double @llvm.experimental.constrained.round.f64(double, metadata) declare double @llvm.experimental.constrained.roundeven.f64(double, metadata) -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) @@ -3280,7 +3278,6 @@ declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata) declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata) declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata) -declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata) declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata) diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll index f9c359bc114ed..418a98873eaa1 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll @@ -819,11 +819,11 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 { ; CHECK-LABEL: define float @test_pown_fast_f32_strictfp ; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR0]] -; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR0]] -; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR0]] +; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR5:[0-9]+]] +; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5]] +; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] +; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] +; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR5]] ; CHECK-NEXT: [[__YEVEN:%.*]] = shl i32 [[Y]], 31 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[X]] to i32 ; CHECK-NEXT: [[__POW_SIGN:%.*]] = and i32 [[__YEVEN]], [[TMP0]] diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll index 93b6a58a22b6c..797ad8d3734eb 100644 --- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll +++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll @@ -291,7 +291,7 @@ define float @round_f32(float %x) #0 { ; CHECK-SP-NOV8: bl truncf ; CHECK-SP-V8: vrintz.f32 define float @trunc_f32(float %x) #0 { - %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0 + %val = call float @llvm.trunc.f32(float %x) #0 [ "fpe.except"(metadata !"strict") ] ret float %val } @@ -762,7 +762,7 @@ define double @round_f64(double %x) #0 { ; CHECK-DP-NOV8: bl trunc ; CHECK-DP-V8: vrintz.f64 define double @trunc_f64(double %x) #0 { - %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0 + %val = call double @llvm.trunc.f64(double %x) #0 [ "fpe.except"(metadata !"strict") ] ret double %val } diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll index eac4fb6f98bf7..379e2d7e9df9b 100644 --- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll +++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll @@ -34,11 +34,6 @@ declare double @llvm.experimental.constrained.round.f64(double, metadata) declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) - define float @ceil_f32(float %f1) strictfp { ; P8-LABEL: ceil_f32: ; P8: # %bb.0: @@ -567,9 +562,7 @@ define float @trunc_f32(float %f1) strictfp { ; P9: # %bb.0: ; P9-NEXT: xsrdpiz f1, f1 ; P9-NEXT: blr - %res = call float @llvm.experimental.constrained.trunc.f32( - float %f1, - metadata !"fpexcept.strict") + %res = call float @llvm.trunc.f32(float %f1) [ "fpe.except"(metadata !"strict") ] ret float %res } @@ -583,9 +576,7 @@ define double @trunc_f64(double %f1) strictfp { ; P9: # %bb.0: ; P9-NEXT: xsrdpiz f1, f1 ; P9-NEXT: blr - %res = call double @llvm.experimental.constrained.trunc.f64( - double %f1, - metadata !"fpexcept.strict") + %res = call double @llvm.trunc.f64(double %f1) [ "fpe.except"(metadata !"strict") ] ret double %res } @@ -599,9 +590,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %vf1) strictfp { ; P9: # %bb.0: ; P9-NEXT: xvrspiz v2, v2 ; P9-NEXT: blr - %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32( - <4 x float> %vf1, - metadata !"fpexcept.strict") + %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %vf1) [ "fpe.except"(metadata !"strict") ] ret <4 x float> %res } @@ -615,8 +604,6 @@ define <2 x double> @trunc_v2f64(<2 x double> %vf1) strictfp { ; P9: # %bb.0: ; P9-NEXT: xvrdpiz v2, v2 ; P9-NEXT: blr - %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( - <2 x double> %vf1, - metadata !"fpexcept.strict") + %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %vf1) [ "fpe.except"(metadata !"strict") ] ret <2 x double> %res } diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll index c1ee436a40c55..55f26d099d59f 100644 --- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll @@ -1061,9 +1061,7 @@ define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) #0 { ; PC64-NEXT: mtlr 0 ; PC64-NEXT: blr entry: - %trunc = call ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128( - ppc_fp128 %first, - metadata !"fpexcept.strict") #1 + %trunc = call ppc_fp128 @llvm.trunc.ppcf128(ppc_fp128 %first) #1 [ "fpe.except"(metadata !"strict") ] ret ppc_fp128 %trunc } @@ -2187,7 +2185,6 @@ declare ppc_fp128 @llvm.experimental.constrained.sqrt.ppcf128(ppc_fp128, metadat declare ppc_fp128 @llvm.experimental.constrained.fsub.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) declare ppc_fp128 @llvm.experimental.constrained.tan.ppcf128(ppc_fp128, metadata, metadata) declare ppc_fp128 @llvm.experimental.constrained.atan2.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) -declare ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(ppc_fp128, metadata) declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata) declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata) declare i1 @llvm.experimental.constrained.fptosi.i1.ppcf128(ppc_fp128, metadata) diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll index 71c3069a406fe..f18512347c98c 100644 --- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll @@ -6767,9 +6767,7 @@ define <1 x float> @constrained_vector_trunc_v1f32(<1 x float> %x) #0 { ; PC64LE9-NEXT: xsrdpiz 1, 1 ; PC64LE9-NEXT: blr entry: - %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32( - <1 x float> %x, - metadata !"fpexcept.strict") #1 + %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %x) #1 [ "fpe.except"(metadata !"strict") ] ret <1 x float> %trunc } @@ -6784,9 +6782,7 @@ define <2 x double> @constrained_vector_trunc_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xvrdpiz 34, 34 ; PC64LE9-NEXT: blr entry: - %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( - <2 x double> %x, - metadata !"fpexcept.strict") #1 + %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) #1 [ "fpe.except"(metadata !"strict") ] ret <2 x double> %trunc } @@ -6834,9 +6830,7 @@ define <3 x float> @constrained_vector_trunc_v3f32(<3 x float> %x) #0 { ; PC64LE9-NEXT: xxperm 34, 35, 1 ; PC64LE9-NEXT: blr entry: - %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32( - <3 x float> %x, - metadata !"fpexcept.strict") #1 + %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %x) #1 [ "fpe.except"(metadata !"strict") ] ret <3 x float> %trunc } @@ -6857,9 +6851,7 @@ define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 2 ; PC64LE9-NEXT: blr entry: - %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64( - <3 x double> %x, - metadata !"fpexcept.strict") #1 + %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %x) #1 [ "fpe.except"(metadata !"strict") ] ret <3 x double> %trunc } @@ -8785,7 +8777,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata) @@ -8832,7 +8823,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata) declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata) declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata) -declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata) declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata) @@ -8901,8 +8891,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata) declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata) declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata) -declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata) -declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata) declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i32(<3 x i32>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.sitofp.v3f32.v3i32(<3 x i32>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i64(<3 x i64>, metadata, metadata) @@ -8947,7 +8935,6 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata) declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata) diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll index fddb86de58f51..6da0e5c482ed5 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll @@ -1552,8 +1552,6 @@ define double @ceil_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) - define double @trunc_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -1608,7 +1606,7 @@ define double @trunc_f64(double %a) nounwind strictfp { ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - %1 = call double @llvm.experimental.constrained.trunc.f64(double %a, metadata !"fpexcept.strict") strictfp + %1 = call double @llvm.trunc.f64(double %a) strictfp [ "fpe.except"(metadata !"strict") ] ret double %1 } diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll index 8b883f781c9d9..63f84b5523398 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll @@ -1517,8 +1517,6 @@ define float @ceil_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) - define float @trunc_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: trunc_f32: ; RV32IF: # %bb.0: @@ -1573,7 +1571,7 @@ define float @trunc_f32(float %a) nounwind strictfp { ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - %1 = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict") strictfp + %1 = call float @llvm.trunc.f32(float %a) strictfp [ "fpe.except"(metadata !"strict") ] ret float %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll index 2173887e85417..a5641d47e51fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll @@ -20,10 +20,9 @@ define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half> %x, metadata !"fpexcept.strict") + %a = call <1 x half> @llvm.trunc.v1f16(<1 x half> %x) [ "fpe.except"(metadata !"strict") ] ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata) define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: trunc_v2f16: @@ -41,10 +40,9 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half> %x, metadata !"fpexcept.strict") + %a = call <2 x half> @llvm.trunc.v2f16(<2 x half> %x) [ "fpe.except"(metadata !"strict") ] ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata) define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: trunc_v4f16: @@ -62,10 +60,9 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half> %x, metadata !"fpexcept.strict") + %a = call <4 x half> @llvm.trunc.v4f16(<4 x half> %x) [ "fpe.except"(metadata !"strict") ] ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata) define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: trunc_v8f16: @@ -83,10 +80,9 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half> %x, metadata !"fpexcept.strict") + %a = call <8 x half> @llvm.trunc.v8f16(<8 x half> %x) [ "fpe.except"(metadata !"strict") ] ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata) define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: trunc_v16f16: @@ -104,10 +100,9 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret - %a = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half> %x, metadata !"fpexcept.strict") + %a = call <16 x half> @llvm.trunc.v16f16(<16 x half> %x) [ "fpe.except"(metadata !"strict") ] ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata) define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: trunc_v32f16: @@ -126,10 +121,9 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret - %a = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %x, metadata !"fpexcept.strict") + %a = call <32 x half> @llvm.trunc.v32f16(<32 x half> %x) [ "fpe.except"(metadata !"strict") ] ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata) define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: trunc_v1f32: @@ -147,10 +141,9 @@ define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float> %x, metadata !"fpexcept.strict") + %a = call <1 x float> @llvm.trunc.v1f32(<1 x float> %x) [ "fpe.except"(metadata !"strict") ] ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: trunc_v2f32: @@ -168,10 +161,9 @@ define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float> %x, metadata !"fpexcept.strict") + %a = call <2 x float> @llvm.trunc.v2f32(<2 x float> %x) [ "fpe.except"(metadata !"strict") ] ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata) define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: trunc_v4f32: @@ -189,10 +181,9 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") + %a = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) [ "fpe.except"(metadata !"strict") ] ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: trunc_v8f32: @@ -210,10 +201,9 @@ define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret - %a = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float> %x, metadata !"fpexcept.strict") + %a = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) [ "fpe.except"(metadata !"strict") ] ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata) define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: trunc_v16f32: @@ -231,10 +221,9 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret - %a = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %x, metadata !"fpexcept.strict") + %a = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) [ "fpe.except"(metadata !"strict") ] ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { ; CHECK-LABEL: trunc_v1f64: @@ -252,10 +241,9 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") + %a = call <1 x double> @llvm.trunc.v1f64(<1 x double> %x) [ "fpe.except"(metadata !"strict") ] ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata) define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { ; CHECK-LABEL: trunc_v2f64: @@ -273,10 +261,9 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") + %a = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) [ "fpe.except"(metadata !"strict") ] ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { ; CHECK-LABEL: trunc_v4f64: @@ -294,10 +281,9 @@ define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret - %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict") + %a = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) [ "fpe.except"(metadata !"strict") ] ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { ; CHECK-LABEL: trunc_v8f64: @@ -315,7 +301,6 @@ define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret - %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict") + %a = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) [ "fpe.except"(metadata !"strict") ] ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll index 8a5f118d8f6ac..d1ace747e043e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll @@ -20,10 +20,9 @@ define @trunc_nxv1f16( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv1f16( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv1f16( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f16(, metadata) define @trunc_nxv2f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f16: @@ -41,10 +40,9 @@ define @trunc_nxv2f16( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv2f16( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv2f16( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f16(, metadata) define @trunc_nxv4f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f16: @@ -62,10 +60,9 @@ define @trunc_nxv4f16( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv4f16( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv4f16( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f16(, metadata) define @trunc_nxv8f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f16: @@ -83,10 +80,9 @@ define @trunc_nxv8f16( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv8f16( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv8f16( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f16(, metadata) define @trunc_nxv16f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv16f16: @@ -104,10 +100,9 @@ define @trunc_nxv16f16( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv16f16( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv16f16( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv16f16(, metadata) define @trunc_nxv32f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv32f16: @@ -125,10 +120,9 @@ define @trunc_nxv32f16( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv32f16( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv32f16( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv32f16(, metadata) define @trunc_nxv1f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv1f32: @@ -146,10 +140,9 @@ define @trunc_nxv1f32( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv1f32( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv1f32( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f32(, metadata) define @trunc_nxv2f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f32: @@ -167,10 +160,9 @@ define @trunc_nxv2f32( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv2f32( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv2f32( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f32(, metadata) define @trunc_nxv4f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f32: @@ -188,10 +180,9 @@ define @trunc_nxv4f32( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv4f32( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv4f32( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f32(, metadata) define @trunc_nxv8f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f32: @@ -209,10 +200,9 @@ define @trunc_nxv8f32( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv8f32( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv8f32( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f32(, metadata) define @trunc_nxv16f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv16f32: @@ -230,10 +220,9 @@ define @trunc_nxv16f32( %x) strictfp ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv16f32( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv16f32( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv16f32(, metadata) define @trunc_nxv1f64( %x) strictfp { ; CHECK-LABEL: trunc_nxv1f64: @@ -251,10 +240,9 @@ define @trunc_nxv1f64( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv1f64( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv1f64( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f64(, metadata) define @trunc_nxv2f64( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f64: @@ -272,10 +260,9 @@ define @trunc_nxv2f64( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv2f64( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv2f64( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f64(, metadata) define @trunc_nxv4f64( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f64: @@ -293,10 +280,9 @@ define @trunc_nxv4f64( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv4f64( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv4f64( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f64(, metadata) define @trunc_nxv8f64( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f64: @@ -314,7 +300,6 @@ define @trunc_nxv8f64( %x) strictfp { ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret - %a = call @llvm.experimental.constrained.trunc.nxv8f64( %x, metadata !"fpexcept.strict") + %a = call @llvm.trunc.nxv8f64( %x) [ "fpe.except"(metadata !"strict") ] ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll index 3efa9e58e65d3..f8046674754d5 100644 --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll @@ -204,8 +204,6 @@ define half @ceil_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) - define half @trunc_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: trunc_f16: ; RV32IZFH: # %bb.0: @@ -272,7 +270,7 @@ define half @trunc_f16(half %a) nounwind strictfp { ; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 ; RV64IZDINXZHINX-NEXT: ret - %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp + %1 = call half @llvm.trunc.f16(half %a) strictfp [ "fpe.except"(metadata !"strict") ] ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll index 214ea46d3130d..de0394a962592 100644 --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll @@ -216,8 +216,6 @@ define half @ceil_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) - define half @trunc_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: trunc_f16: ; RV32IZFHMIN: # %bb.0: @@ -284,7 +282,7 @@ define half @trunc_f16(half %a) nounwind strictfp { ; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 ; RV64IZDINXZHINXMIN-NEXT: ret - %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp + %1 = call half @llvm.trunc.f16(half %a) strictfp [ "fpe.except"(metadata !"strict") ] ret half %1 } diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll index 1fbb1790c01dc..3f031745c2b61 100644 --- a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll +++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll @@ -161,39 +161,30 @@ define void @f12(ptr %ptr) #0 { } ; Test trunc for f32. -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) define float @f13(float %f) #0 { ; CHECK-LABEL: f13: ; CHECK: brasl %r14, truncf@PLT ; CHECK: br %r14 - %res = call float @llvm.experimental.constrained.trunc.f32( - float %f, - metadata !"fpexcept.strict") #0 + %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ] ret float %res } ; Test trunc for f64. -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) define double @f14(double %f) #0 { ; CHECK-LABEL: f14: ; CHECK: brasl %r14, trunc@PLT ; CHECK: br %r14 - %res = call double @llvm.experimental.constrained.trunc.f64( - double %f, - metadata !"fpexcept.strict") #0 + %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ] ret double %res } ; Test trunc for f128. -declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) define void @f15(ptr %ptr) #0 { ; CHECK-LABEL: f15: ; CHECK: brasl %r14, truncl@PLT ; CHECK: br %r14 %src = load fp128, ptr %ptr - %res = call fp128 @llvm.experimental.constrained.trunc.f128( - fp128 %src, - metadata !"fpexcept.strict") #0 + %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ] store fp128 %res, ptr %ptr ret void } diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll index bc304a3fb95fb..8f56f552661fd 100644 --- a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll +++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll @@ -165,39 +165,30 @@ define void @f12(ptr %ptr) #0 { } ; Test trunc for f32. -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) define float @f13(float %f) #0 { ; CHECK-LABEL: f13: ; CHECK: fiebra %f0, 5, %f0, 4 ; CHECK: br %r14 - %res = call float @llvm.experimental.constrained.trunc.f32( - float %f, - metadata !"fpexcept.strict") #0 + %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ] ret float %res } ; Test trunc for f64. -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) define double @f14(double %f) #0 { ; CHECK-LABEL: f14: ; CHECK: fidbra %f0, 5, %f0, 4 ; CHECK: br %r14 - %res = call double @llvm.experimental.constrained.trunc.f64( - double %f, - metadata !"fpexcept.strict") #0 + %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ] ret double %res } ; Test trunc for f128. -declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) define void @f15(ptr %ptr) #0 { ; CHECK-LABEL: f15: ; CHECK: fixbra %f0, 5, %f0, 4 ; CHECK: br %r14 %src = load fp128, ptr %ptr - %res = call fp128 @llvm.experimental.constrained.trunc.f128( - fp128 %src, - metadata !"fpexcept.strict") #0 + %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ] store fp128 %res, ptr %ptr ret void } diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll index 2cdff7d5c425e..df207c6b01a58 100644 --- a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll +++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll @@ -169,31 +169,24 @@ define void @f12(ptr %ptr) #0 { } ; Test trunc for f32. -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) define float @f13(float %f) #0 { ; CHECK-LABEL: f13: ; CHECK: fiebra %f0, 5, %f0, 4 ; CHECK: br %r14 - %res = call float @llvm.experimental.constrained.trunc.f32( - float %f, - metadata !"fpexcept.strict") #0 + %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ] ret float %res } ; Test trunc for f64. -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) define double @f14(double %f) #0 { ; CHECK-LABEL: f14: ; CHECK: fidbra %f0, 5, %f0, 4 ; CHECK: br %r14 - %res = call double @llvm.experimental.constrained.trunc.f64( - double %f, - metadata !"fpexcept.strict") #0 + %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ] ret double %res } ; Test trunc for f128. -declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) define void @f15(ptr %ptr) #0 { ; CHECK-LABEL: f15: ; CHECK: vl [[REG:%v[0-9]+]], 0(%r2) @@ -201,9 +194,7 @@ define void @f15(ptr %ptr) #0 { ; CHECK: vst [[RES]], 0(%r2) ; CHECK: br %r14 %src = load fp128, ptr %ptr - %res = call fp128 @llvm.experimental.constrained.trunc.f128( - fp128 %src, - metadata !"fpexcept.strict") #0 + %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ] store fp128 %res, ptr %ptr ret void } diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll index b82cb8082b7b8..a24a2d9f79193 100644 --- a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll +++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll @@ -6,13 +6,11 @@ declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadat declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.floor.f64(double, metadata) declare double @llvm.experimental.constrained.ceil.f64(double, metadata) -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) declare double @llvm.experimental.constrained.round.f64(double, metadata) declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) define <2 x double> @f1(<2 x double> %val) #0 { @@ -61,9 +59,7 @@ define <2 x double> @f5(<2 x double> %val) #0 { ; CHECK-LABEL: f5: ; CHECK: vfidb %v24, %v24, 4, 5 ; CHECK: br %r14 - %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( - <2 x double> %val, - metadata !"fpexcept.strict") #0 + %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %val) #0 [ "fpe.except"(metadata !"strict") ] ret <2 x double> %res } @@ -129,9 +125,7 @@ define double @f11(<2 x double> %val) #0 { ; CHECK: wfidb %f0, %v24, 4, 5 ; CHECK: br %r14 %scalar = extractelement <2 x double> %val, i32 0 - %res = call double @llvm.experimental.constrained.trunc.f64( - double %scalar, - metadata !"fpexcept.strict") #0 + %res = call double @llvm.trunc.f64(double %scalar) #0 [ "fpe.except"(metadata !"strict") ] ret double %res } diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll index 701dd5b2302f2..6db7d03cb82e2 100644 --- a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll +++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll @@ -6,13 +6,11 @@ declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.floor.f32(float, metadata) declare float @llvm.experimental.constrained.ceil.f32(float, metadata) -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) declare float @llvm.experimental.constrained.round.f32(float, metadata) declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata) declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata) declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata) -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) define <4 x float> @f1(<4 x float> %val) #0 { @@ -61,9 +59,7 @@ define <4 x float> @f5(<4 x float> %val) #0 { ; CHECK-LABEL: f5: ; CHECK: vfisb %v24, %v24, 4, 5 ; CHECK: br %r14 - %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32( - <4 x float> %val, - metadata !"fpexcept.strict") #0 + %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %val) #0 [ "fpe.except"(metadata !"strict") ] ret <4 x float> %res } @@ -128,9 +124,7 @@ define float @f11(<4 x float> %val) #0 { ; CHECK: wfisb %f0, %v24, 4, 5 ; CHECK: br %r14 %scalar = extractelement <4 x float> %val, i32 0 - %res = call float @llvm.experimental.constrained.trunc.f32( - float %scalar, - metadata !"fpexcept.strict") #0 + %res = call float @llvm.trunc.f32(float %scalar) #0 [ "fpe.except"(metadata !"strict") ] ret float %res } diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll index b08f0e5a74d56..74afe4c6ae4b8 100644 --- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll @@ -6071,9 +6071,7 @@ define <1 x float> @constrained_vector_trunc_v1f32(ptr %a) #0 { ; SZ13-NEXT: br %r14 entry: %b = load <1 x float>, ptr %a - %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32( - <1 x float> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <1 x float> %trunc } @@ -6108,9 +6106,7 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 { ; SZ13-NEXT: br %r14 entry: %b = load <2 x double>, ptr %a - %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( - <2 x double> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <2 x double> %trunc } @@ -6163,9 +6159,7 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 { ; SZ13-NEXT: br %r14 entry: %b = load <3 x float>, ptr %a - %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32( - <3 x float> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <3 x float> %trunc } @@ -6215,9 +6209,7 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 { ; SZ13-NEXT: br %r14 entry: %b = load <3 x double>, ptr %a - %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64( - <3 x double> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %b) #0 [ "fpe.except"(metadata !"strict") ] store <3 x double> %trunc, ptr %a ret void } @@ -6953,7 +6945,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.fsub.v1f32(<1 x float>, <1 x float>, metadata, metadata) @@ -6981,7 +6972,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata) declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata) declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata) -declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 x double>, metadata, metadata) @@ -7033,8 +7023,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata) declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata) declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata) -declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata) -declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata) @@ -7062,4 +7050,3 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll index 3b9798a2af582..87aab3f9ad9c5 100644 --- a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll +++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll @@ -7,7 +7,6 @@ declare half @llvm.experimental.constrained.ceil.f16(half, metadata) declare half @llvm.experimental.constrained.floor.f16(half, metadata) -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata) declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata) declare half @llvm.experimental.constrained.roundeven.f16(half, metadata) @@ -122,8 +121,7 @@ define half @ftrunc32(half %f) #0 { ; X64: # %bb.0: ; X64-NEXT: vrndscalesh $11, %xmm0, %xmm0, %xmm0 ; X64-NEXT: retq - %res = call half @llvm.experimental.constrained.trunc.f16( - half %f, metadata !"fpexcept.strict") #0 + %res = call half @llvm.trunc.f16(half %f) #1 [ "fpe.except"(metadata !"strict") ] ret half %res } @@ -273,7 +271,7 @@ define half @fround16(half %f) #0 { ; X86-LABEL: fround16: ; X86: # %bb.0: ; X86-NEXT: subl $8, %esp -; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0 +; X86-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero ; X86-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: calll roundf diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll index 13f890ae6e191..7235f0a95ef34 100644 --- a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll +++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll @@ -10,8 +10,6 @@ declare float @llvm.experimental.constrained.ceil.f32(float, metadata) declare double @llvm.experimental.constrained.ceil.f64(double, metadata) declare float @llvm.experimental.constrained.floor.f32(float, metadata) declare double @llvm.experimental.constrained.floor.f64(double, metadata) -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata) declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata) @@ -245,8 +243,7 @@ define float @ftrunc32(float %f) #0 { ; AVX-X64: # %bb.0: ; AVX-X64-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0 ; AVX-X64-NEXT: retq - %res = call float @llvm.experimental.constrained.trunc.f32( - float %f, metadata !"fpexcept.strict") #0 + %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ] ret float %res } @@ -298,8 +295,7 @@ define double @ftruncf64(double %f) #0 { ; AVX-X64: # %bb.0: ; AVX-X64-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0 ; AVX-X64-NEXT: retq - %res = call double @llvm.experimental.constrained.trunc.f64( - double %f, metadata !"fpexcept.strict") #0 + %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ] ret double %res } diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll index 5263e0d4f6f39..0705ee01aa380 100644 --- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll +++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll @@ -1407,7 +1407,7 @@ define fp128 @trunc(fp128 %x) nounwind strictfp { ; X86-NEXT: popl %esi ; X86-NEXT: retl $4 entry: - %trunc = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0 + %trunc = call fp128 @llvm.trunc.f128(fp128 %x) #0 [ "fpe.except"(metadata !"strict") ] ret fp128 %trunc } @@ -1993,7 +1993,6 @@ declare fp128 @llvm.experimental.constrained.atan.f128(fp128, metadata, metadata declare fp128 @llvm.experimental.constrained.atan2.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.tanh.f128(fp128, metadata, metadata) -declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata) declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata) diff --git a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll index 8bbc6247dbafd..a612c6a80e31e 100644 --- a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll +++ b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll @@ -729,7 +729,7 @@ define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp { ; X64-NEXT: addq $24, %rsp ; X64-NEXT: retq entry: - %trunc = call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0 + %trunc = call x86_fp80 @llvm.trunc.f80(x86_fp80 %x) #0 [ "fpe.except"(metadata !"strict") ] ret x86_fp80 %trunc } @@ -862,7 +862,6 @@ declare x86_fp80 @llvm.experimental.constrained.atan.f80(x86_fp80, metadata, met declare x86_fp80 @llvm.experimental.constrained.atan2.f80(x86_fp80, x86_fp80, metadata, metadata) declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata) declare x86_fp80 @llvm.experimental.constrained.tanh.f80(x86_fp80, metadata, metadata) -declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata) declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80, metadata) diff --git a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll index a2e02508327c8..e9f6cf3de8ad4 100644 --- a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll +++ b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll @@ -14,7 +14,6 @@ declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x doubl declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata) declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata) declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata) -declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata) declare <16 x half> @llvm.experimental.constrained.rint.v16f16(<16 x half>, metadata, metadata) declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata) @@ -160,8 +159,7 @@ define <16 x half> @ftruncv16f16(<16 x half> %f) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: vrndscaleph $11, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} - %res = call <16 x half> @llvm.experimental.constrained.trunc.v16f16( - <16 x half> %f, metadata !"fpexcept.strict") #0 + %res = call <16 x half> @llvm.trunc.v16f16(<16 x half> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <16 x half> %res } diff --git a/llvm/test/CodeGen/X86/vec-strict-256.ll b/llvm/test/CodeGen/X86/vec-strict-256.ll index 5945e6c1bc66e..d89996db74288 100644 --- a/llvm/test/CodeGen/X86/vec-strict-256.ll +++ b/llvm/test/CodeGen/X86/vec-strict-256.ll @@ -22,8 +22,6 @@ declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metad declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata) declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) -declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata) -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) declare <8 x float> @llvm.experimental.constrained.rint.v8f32(<8 x float>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata) declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata) @@ -234,8 +232,7 @@ define <8 x float> @ftruncv8f32(<8 x float> %f) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: vroundps $11, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} - %res = call <8 x float> @llvm.experimental.constrained.trunc.v8f32( - <8 x float> %f, metadata !"fpexcept.strict") #0 + %res = call <8 x float> @llvm.trunc.v8f32(<8 x float> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <8 x float> %res } @@ -244,8 +241,7 @@ define <4 x double> @ftruncv4f64(<4 x double> %f) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: vroundpd $11, %ymm0, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} - %res = call <4 x double> @llvm.experimental.constrained.trunc.v4f64( - <4 x double> %f, metadata !"fpexcept.strict") #0 + %res = call <4 x double> @llvm.trunc.v4f64(<4 x double> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <4 x double> %res } diff --git a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll index dfbc11a43d3d7..dac0195543ac7 100644 --- a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll +++ b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll @@ -14,7 +14,6 @@ declare <16 x half> @llvm.experimental.constrained.fptrunc.v16f16.v16f32(<16 x f declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata) declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata) -declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata) declare <32 x half> @llvm.experimental.constrained.rint.v32f16(<32 x half>, metadata, metadata) declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata) @@ -155,7 +154,7 @@ define <32 x half> @strict_vector_ftrunc_v32f16(<32 x half> %f) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: vrndscaleph $11, %zmm0, %zmm0 ; CHECK-NEXT: ret{{[l|q]}} - %res = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %f, metadata !"fpexcept.strict") #0 + %res = call <32 x half> @llvm.trunc.v32f16(<32 x half> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <32 x half> %res } diff --git a/llvm/test/CodeGen/X86/vec-strict-512.ll b/llvm/test/CodeGen/X86/vec-strict-512.ll index 2cafd74af4953..43abaaa3d7d8f 100644 --- a/llvm/test/CodeGen/X86/vec-strict-512.ll +++ b/llvm/test/CodeGen/X86/vec-strict-512.ll @@ -20,8 +20,6 @@ declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, me declare <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata) declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata) declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata) -declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) -declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata) declare <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float>, metadata, metadata) declare <8 x double> @llvm.experimental.constrained.rint.v8f64(<8 x double>, metadata, metadata) declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata) @@ -227,7 +225,7 @@ define <16 x float> @strict_vector_ftrunc_v16f32(<16 x float> %f) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm0 ; CHECK-NEXT: ret{{[l|q]}} - %res = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0 + %res = call <16 x float> @llvm.trunc.v16f32(<16 x float> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <16 x float> %res } @@ -236,7 +234,7 @@ define <8 x double> @strict_vector_ftrunc_v8f64(<8 x double> %f) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm0 ; CHECK-NEXT: ret{{[l|q]}} - %res = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0 + %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <8 x double> %res } diff --git a/llvm/test/CodeGen/X86/vec-strict-round-128.ll b/llvm/test/CodeGen/X86/vec-strict-round-128.ll index 1f7507cc02bc5..403731057618e 100644 --- a/llvm/test/CodeGen/X86/vec-strict-round-128.ll +++ b/llvm/test/CodeGen/X86/vec-strict-round-128.ll @@ -10,8 +10,6 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata) declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata) @@ -87,8 +85,7 @@ define <4 x float> @ftruncv4f32(<4 x float> %f) #0 { ; AVX: # %bb.0: ; AVX-NEXT: vroundps $11, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} - %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32( - <4 x float> %f, metadata !"fpexcept.strict") #0 + %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <4 x float> %res } @@ -102,8 +99,7 @@ define <2 x double> @ftruncv2f64(<2 x double> %f) #0 { ; AVX: # %bb.0: ; AVX-NEXT: vroundpd $11, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} - %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( - <2 x double> %f, metadata !"fpexcept.strict") #0 + %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %f) #0 [ "fpe.except"(metadata !"strict") ] ret <2 x double> %res } diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll index 49062eaef3188..e7bb0744b86d4 100644 --- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll @@ -6372,9 +6372,7 @@ define <1 x float> @constrained_vector_trunc_v1f32_var(ptr %a) #0 { ; AVX-NEXT: retq entry: %b = load <1 x float>, ptr %a - %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32( - <1 x float> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <1 x float> %trunc } @@ -6403,9 +6401,7 @@ define <2 x double> @constrained_vector_trunc_v2f64_var(ptr %a) #0 { ; AVX-NEXT: retq entry: %b = load <2 x double>, ptr %a - %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( - <2 x double> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <2 x double> %trunc } @@ -6446,9 +6442,7 @@ define <3 x float> @constrained_vector_trunc_v3f32_var(ptr %a) #0 { ; AVX-NEXT: retq entry: %b = load <3 x float>, ptr %a - %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32( - <3 x float> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <3 x float> %trunc } @@ -6490,9 +6484,7 @@ define <3 x double> @constrained_vector_trunc_v3f64_var(ptr %a) #0 { ; AVX-NEXT: retq entry: %b = load <3 x double>, ptr %a - %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64( - <3 x double> %b, - metadata !"fpexcept.strict") #0 + %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %b) #0 [ "fpe.except"(metadata !"strict") ] ret <3 x double> %trunc } @@ -9975,7 +9967,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata) declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata) @@ -10025,7 +10016,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata) declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata) declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata) -declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata) declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata) @@ -10104,8 +10094,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata) declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata) declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata) -declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata) -declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata) declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i32(<3 x i32>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.sitofp.v3f32.v3i32(<3 x i32>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i64(<3 x i64>, metadata, metadata) @@ -10156,7 +10144,6 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata) declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata) diff --git a/llvm/test/Transforms/Attributor/nofpclass.ll b/llvm/test/Transforms/Attributor/nofpclass.ll index b97454a29d513..4cedbaf2a36a3 100644 --- a/llvm/test/Transforms/Attributor/nofpclass.ll +++ b/llvm/test/Transforms/Attributor/nofpclass.ll @@ -1937,7 +1937,7 @@ define float @constrained_sitofp(i32 %arg) strictfp { ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) ; CHECK-LABEL: define nofpclass(nan nzero sub) float @constrained_sitofp ; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR8:[0-9]+]] { -; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan nzero sub) float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR20]] +; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan nzero sub) float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR23:[0-9]+]] ; CHECK-NEXT: ret float [[VAL]] ; %val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %arg, metadata !"round.dynamic", metadata !"fpexcept.strict") @@ -1948,7 +1948,7 @@ define float @constrained_uitofp(i32 %arg) strictfp { ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) ; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @constrained_uitofp ; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR8]] { -; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR20]] +; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR23]] ; CHECK-NEXT: ret float [[VAL]] ; %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %arg, metadata !"round.dynamic", metadata !"fpexcept.strict") diff --git a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll index 3871822c9dc17..90904ead23e04 100644 --- a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll +++ b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll @@ -8,7 +8,7 @@ define double @multiple_fadd(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fadd( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -20,8 +20,8 @@ define double @multiple_fadd(double %a, double %b) #0 { define double @multiple_fadd_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fadd_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -34,7 +34,7 @@ define double @multiple_fadd_split(double %a, double %b) #0 { define double @multiple_fsub(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fsub( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -46,8 +46,8 @@ define double @multiple_fsub(double %a, double %b) #0 { define double @multiple_fsub_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fsub_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -60,7 +60,7 @@ define double @multiple_fsub_split(double %a, double %b) #0 { define double @multiple_fmul(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fmul( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -72,8 +72,8 @@ define double @multiple_fmul(double %a, double %b) #0 { define double @multiple_fmul_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fmul_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -86,7 +86,7 @@ define double @multiple_fmul_split(double %a, double %b) #0 { define double @multiple_fdiv(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fdiv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -98,8 +98,8 @@ define double @multiple_fdiv(double %a, double %b) #0 { define double @multiple_fdiv_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fdiv_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -112,7 +112,7 @@ define double @multiple_fdiv_split(double %a, double %b) #0 { define double @multiple_frem(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_frem( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -124,8 +124,8 @@ define double @multiple_frem(double %a, double %b) #0 { define double @multiple_frem_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_frem_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -138,7 +138,7 @@ define double @multiple_frem_split(double %a, double %b) #0 { define i32 @multiple_fptoui(double %a) #0 { ; CHECK-LABEL: @multiple_fptoui( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -150,8 +150,8 @@ define i32 @multiple_fptoui(double %a) #0 { define i32 @multiple_fptoui_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fptoui_split( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -164,7 +164,7 @@ define i32 @multiple_fptoui_split(double %a, double %b) #0 { define double @multiple_uitofp(i32 %a) #0 { ; CHECK-LABEL: @multiple_uitofp( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -176,8 +176,8 @@ define double @multiple_uitofp(i32 %a) #0 { define double @multiple_uitofp_split(i32 %a) #0 { ; CHECK-LABEL: @multiple_uitofp_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -190,7 +190,7 @@ define double @multiple_uitofp_split(i32 %a) #0 { define i32 @multiple_fptosi(double %a) #0 { ; CHECK-LABEL: @multiple_fptosi( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -202,8 +202,8 @@ define i32 @multiple_fptosi(double %a) #0 { define i32 @multiple_fptosi_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fptosi_split( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -216,7 +216,7 @@ define i32 @multiple_fptosi_split(double %a, double %b) #0 { define double @multiple_sitofp(i32 %a) #0 { ; CHECK-LABEL: @multiple_sitofp( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -228,8 +228,8 @@ define double @multiple_sitofp(i32 %a) #0 { define double @multiple_sitofp_split(i32 %a) #0 { ; CHECK-LABEL: @multiple_sitofp_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -243,7 +243,7 @@ define i1 @multiple_fcmp(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fcmp( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -257,9 +257,9 @@ define i1 @multiple_fcmp(double %a, double %b) #0 { define i1 @multiple_fcmp_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fcmp_split( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -275,7 +275,7 @@ define i1 @multiple_fcmps(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fcmps( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -289,9 +289,9 @@ define i1 @multiple_fcmps(double %a, double %b) #0 { define i1 @multiple_fcmps_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fcmps_split( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -304,6 +304,7 @@ define i1 @multiple_fcmps_split(double %a, double %b) #0 { } attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare void @arbitraryfunc() #0 declare double @foo.f64(double, double) #0 diff --git a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll index f2675ce7816a4..6183b7c1558a5 100644 --- a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll +++ b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll @@ -10,7 +10,7 @@ define double @fadd_strict(double %a, double %b) #0 { ; CHECK-LABEL: @fadd_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -23,7 +23,7 @@ define double @fsub_strict(double %a, double %b) #0 { ; CHECK-LABEL: @fsub_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -36,7 +36,7 @@ define double @fmul_strict(double %a, double %b) #0 { ; CHECK-LABEL: @fmul_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -49,7 +49,7 @@ define double @fdiv_strict(double %a, double %b) #0 { ; CHECK-LABEL: @fdiv_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -62,7 +62,7 @@ define double @frem_strict(double %a, double %b) #0 { ; CHECK-LABEL: @frem_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -75,7 +75,7 @@ define i32 @fptoui_strict(double %a) #0 { ; CHECK-LABEL: @fptoui_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") #0 @@ -88,7 +88,7 @@ define double @uitofp_strict(i32 %a) #0 { ; CHECK-LABEL: @uitofp_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -101,7 +101,7 @@ define i32 @fptosi_strict(double %a) #0 { ; CHECK-LABEL: @fptosi_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") #0 @@ -114,7 +114,7 @@ define double @sitofp_strict(i32 %a) #0 { ; CHECK-LABEL: @sitofp_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 @@ -129,7 +129,7 @@ define i1 @fcmp_strict(double %a, double %b) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0 ; CHECK-NEXT: ret i1 [[TMP2]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0 @@ -146,7 +146,7 @@ define i1 @fcmps_strict(double %a, double %b) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0 ; CHECK-NEXT: ret i1 [[TMP2]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0 @@ -158,6 +158,7 @@ define i1 @fcmps_strict(double %a, double %b) #0 { } attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare void @arbitraryfunc() #0 declare double @foo.f64(double, double) #0 diff --git a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll index b79f7018b8d0d..61e976ce42817 100644 --- a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll +++ b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll @@ -10,7 +10,7 @@ define double @mixed_fadd_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fadd_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -23,7 +23,7 @@ define double @mixed_fadd_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fadd_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -36,7 +36,7 @@ define double @mixed_fadd_strict(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fadd_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -49,7 +49,7 @@ define double @mixed_fsub_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fsub_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -62,7 +62,7 @@ define double @mixed_fsub_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fsub_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -75,7 +75,7 @@ define double @mixed_fsub_strict(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fsub_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -88,7 +88,7 @@ define double @mixed_fmul_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fmul_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -100,7 +100,7 @@ define double @mixed_fmul_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fmul_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -113,7 +113,7 @@ define double @mixed_fmul_strict(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fmul_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -126,7 +126,7 @@ define double @mixed_fdiv_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fdiv_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -139,7 +139,7 @@ define double @mixed_fdiv_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fdiv_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -152,7 +152,7 @@ define double @mixed_fdiv_strict(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_fdiv_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -165,7 +165,7 @@ define double @mixed_frem_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_frem_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -178,7 +178,7 @@ define double @mixed_frem_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_frem_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -191,7 +191,7 @@ define double @mixed_frem_strict(double %a, double %b) #0 { ; CHECK-LABEL: @mixed_frem_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -204,7 +204,7 @@ define i32 @mixed_fptoui_maytrap(double %a) #0 { ; CHECK-LABEL: @mixed_fptoui_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -217,7 +217,7 @@ define i32 @mixed_fptoui_strict(double %a) #0 { ; CHECK-LABEL: @mixed_fptoui_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -230,7 +230,7 @@ define double @mixed_uitofp_neginf(i32 %a) #0 { ; CHECK-LABEL: @mixed_uitofp_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -243,7 +243,7 @@ define double @mixed_uitofp_maytrap(i32 %a) #0 { ; CHECK-LABEL: @mixed_uitofp_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -256,7 +256,7 @@ define double @mixed_uitofp_strict(i32 %a) #0 { ; CHECK-LABEL: @mixed_uitofp_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -269,7 +269,7 @@ define i32 @mixed_fptosi_maytrap(double %a) #0 { ; CHECK-LABEL: @mixed_fptosi_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -282,7 +282,7 @@ define i32 @mixed_fptosi_strict(double %a) #0 { ; CHECK-LABEL: @mixed_fptosi_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -295,7 +295,7 @@ define double @mixed_sitofp_neginf(i32 %a) #0 { ; CHECK-LABEL: @mixed_sitofp_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -308,7 +308,7 @@ define double @mixed_sitofp_maytrap(i32 %a) #0 { ; CHECK-LABEL: @mixed_sitofp_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -321,7 +321,7 @@ define double @mixed_sitofp_strict(i32 %a) #0 { ; CHECK-LABEL: @mixed_sitofp_strict( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -336,7 +336,7 @@ define i1 @mixed_fcmp_maytrap(double %a, double %b) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]] ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0 ; CHECK-NEXT: ret i1 [[TMP2]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -353,7 +353,7 @@ define i1 @mixed_fcmp_strict(double %a, double %b) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0 ; CHECK-NEXT: ret i1 [[TMP2]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -370,7 +370,7 @@ define i1 @mixed_fcmps_maytrap(double %a, double %b) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]] ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0 ; CHECK-NEXT: ret i1 [[TMP2]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -387,7 +387,7 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0 ; CHECK-NEXT: ret i1 [[TMP2]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -399,6 +399,7 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 { } attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare void @arbitraryfunc() #0 declare double @foo.f64(double, double) #0 diff --git a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll index 3acf5597dfc3f..1ce2fdd3f75de 100644 --- a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll +++ b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll @@ -10,7 +10,7 @@ define double @fadd_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @fadd_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -22,7 +22,7 @@ define double @fadd_defaultenv(double %a, double %b) #0 { define double @fadd_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @fadd_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -34,7 +34,7 @@ define double @fadd_neginf(double %a, double %b) #0 { define double @fadd_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @fadd_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -46,7 +46,7 @@ define double @fadd_maytrap(double %a, double %b) #0 { define double @fsub_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @fsub_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -58,7 +58,7 @@ define double @fsub_defaultenv(double %a, double %b) #0 { define double @fsub_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @fsub_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -70,7 +70,7 @@ define double @fsub_neginf(double %a, double %b) #0 { define double @fsub_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @fsub_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -82,7 +82,7 @@ define double @fsub_maytrap(double %a, double %b) #0 { define double @fmul_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @fmul_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -94,7 +94,7 @@ define double @fmul_defaultenv(double %a, double %b) #0 { define double @fmul_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @fmul_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -105,7 +105,7 @@ define double @fmul_neginf(double %a, double %b) #0 { define double @fmul_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @fmul_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -117,7 +117,7 @@ define double @fmul_maytrap(double %a, double %b) #0 { define double @fdiv_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @fdiv_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -129,7 +129,7 @@ define double @fdiv_defaultenv(double %a, double %b) #0 { define double @fdiv_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @fdiv_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -141,7 +141,7 @@ define double @fdiv_neginf(double %a, double %b) #0 { define double @fdiv_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @fdiv_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -153,7 +153,7 @@ define double @fdiv_maytrap(double %a, double %b) #0 { define double @frem_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @frem_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -165,7 +165,7 @@ define double @frem_defaultenv(double %a, double %b) #0 { define double @frem_neginf(double %a, double %b) #0 { ; CHECK-LABEL: @frem_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -177,7 +177,7 @@ define double @frem_neginf(double %a, double %b) #0 { define double @frem_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @frem_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -189,7 +189,7 @@ define double @frem_maytrap(double %a, double %b) #0 { define i32 @fptoui_defaultenv(double %a) #0 { ; CHECK-LABEL: @fptoui_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -201,7 +201,7 @@ define i32 @fptoui_defaultenv(double %a) #0 { define i32 @fptoui_maytrap(double %a) #0 { ; CHECK-LABEL: @fptoui_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.maytrap") #0 @@ -213,7 +213,7 @@ define i32 @fptoui_maytrap(double %a) #0 { define double @uitofp_defaultenv(i32 %a) #0 { ; CHECK-LABEL: @uitofp_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -225,7 +225,7 @@ define double @uitofp_defaultenv(i32 %a) #0 { define double @uitofp_neginf(i32 %a) #0 { ; CHECK-LABEL: @uitofp_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -237,7 +237,7 @@ define double @uitofp_neginf(i32 %a) #0 { define double @uitofp_maytrap(i32 %a) #0 { ; CHECK-LABEL: @uitofp_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -249,7 +249,7 @@ define double @uitofp_maytrap(i32 %a) #0 { define i32 @fptosi_defaultenv(double %a) #0 { ; CHECK-LABEL: @fptosi_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0 @@ -261,7 +261,7 @@ define i32 @fptosi_defaultenv(double %a) #0 { define i32 @fptosi_maytrap(double %a) #0 { ; CHECK-LABEL: @fptosi_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0 ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.maytrap") #0 @@ -273,7 +273,7 @@ define i32 @fptosi_maytrap(double %a) #0 { define double @sitofp_defaultenv(i32 %a) #0 { ; CHECK-LABEL: @sitofp_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 @@ -285,7 +285,7 @@ define double @sitofp_defaultenv(i32 %a) #0 { define double @sitofp_neginf(i32 %a) #0 { ; CHECK-LABEL: @sitofp_neginf( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.downward", metadata !"fpexcept.ignore") #0 @@ -297,7 +297,7 @@ define double @sitofp_neginf(i32 %a) #0 { define double @sitofp_maytrap(i32 %a) #0 { ; CHECK-LABEL: @sitofp_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] -; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP1]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 @@ -310,7 +310,7 @@ define i1 @fcmp_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @fcmp_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -325,7 +325,7 @@ define i1 @fcmp_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @fcmp_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0 @@ -340,7 +340,7 @@ define i1 @fcmps_defaultenv(double %a, double %b) #0 { ; CHECK-LABEL: @fcmps_defaultenv( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0 @@ -355,7 +355,7 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 { ; CHECK-LABEL: @fcmps_maytrap( ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0 ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0 @@ -367,6 +367,7 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 { } attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare void @arbitraryfunc() #0 declare double @foo.f64(double, double) #0 diff --git a/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll b/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll index c33e022f53be2..b2cebfeb586e3 100644 --- a/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll +++ b/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll @@ -11,7 +11,7 @@ define double @multiple_fadd(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fadd( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -23,9 +23,9 @@ define double @multiple_fadd(double %a, double %b) #0 { define double @multiple_fadd_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fadd_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -39,7 +39,7 @@ define double @multiple_fsub(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fsub( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -51,9 +51,9 @@ define double @multiple_fsub(double %a, double %b) #0 { define double @multiple_fsub_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fsub_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -67,7 +67,7 @@ define double @multiple_fmul(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fmul( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -79,9 +79,9 @@ define double @multiple_fmul(double %a, double %b) #0 { define double @multiple_fmul_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fmul_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -95,7 +95,7 @@ define double @multiple_fdiv(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fdiv( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -107,9 +107,9 @@ define double @multiple_fdiv(double %a, double %b) #0 { define double @multiple_fdiv_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_fdiv_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -123,7 +123,7 @@ define double @multiple_frem(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_frem( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -135,9 +135,9 @@ define double @multiple_frem(double %a, double %b) #0 { define double @multiple_frem_split(double %a, double %b) #0 { ; CHECK-LABEL: @multiple_frem_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -151,7 +151,7 @@ define double @multiple_uitofp(i32 %a) #0 { ; CHECK-LABEL: @multiple_uitofp( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -163,9 +163,9 @@ define double @multiple_uitofp(i32 %a) #0 { define double @multiple_uitofp_split(i32 %a) #0 { ; CHECK-LABEL: @multiple_uitofp_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -179,7 +179,7 @@ define double @multiple_sitofp(i32 %a) #0 { ; CHECK-LABEL: @multiple_sitofp( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -191,9 +191,9 @@ define double @multiple_sitofp(i32 %a) #0 { define double @multiple_sitofp_split(i32 %a) #0 { ; CHECK-LABEL: @multiple_sitofp_split( ; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]] +; CHECK-NEXT: call void @arbitraryfunc() #0 ; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] -; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 @@ -204,6 +204,7 @@ define double @multiple_sitofp_split(i32 %a) #0 { } attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare void @arbitraryfunc() #0 declare double @foo.f64(double, double) #0 diff --git a/llvm/test/Transforms/EarlyCSE/tfpropagation.ll b/llvm/test/Transforms/EarlyCSE/tfpropagation.ll index d07c9627f9b52..53127bf5f3aec 100644 --- a/llvm/test/Transforms/EarlyCSE/tfpropagation.ll +++ b/llvm/test/Transforms/EarlyCSE/tfpropagation.ll @@ -68,10 +68,10 @@ define double @branching_exceptignore(i64 %a) #0 { ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]] ; CHECK: if.then3: -; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]] +; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0 ; CHECK-NEXT: br label [[OUT:%.*]] ; CHECK: if.end3: -; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]] +; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0 ; CHECK-NEXT: br label [[OUT]] ; CHECK: out: ; CHECK-NEXT: ret double [[CONV1]] @@ -98,10 +98,10 @@ define double @branching_exceptignore_dynround(i64 %a) #0 { ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.ignore") #[[ATTR0]] ; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]] ; CHECK: if.then3: -; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]] +; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0 ; CHECK-NEXT: br label [[OUT:%.*]] ; CHECK: if.end3: -; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]] +; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0 ; CHECK-NEXT: br label [[OUT]] ; CHECK: out: ; CHECK-NEXT: ret double [[CONV1]] @@ -128,10 +128,10 @@ define double @branching_maytrap(i64 %a) #0 { ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.maytrap") #[[ATTR0]] ; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]] ; CHECK: if.then3: -; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]] +; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0 ; CHECK-NEXT: br label [[OUT:%.*]] ; CHECK: if.end3: -; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]] +; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0 ; CHECK-NEXT: br label [[OUT]] ; CHECK: out: ; CHECK-NEXT: ret double [[CONV1]] @@ -160,10 +160,10 @@ define double @branching_ebstrict(i64 %a) #0 { ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]] ; CHECK: if.then3: -; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 [[CMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 [[CMP2]]) #0 ; CHECK-NEXT: br label [[OUT:%.*]] ; CHECK: if.end3: -; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 [[CMP2]]) #[[ATTR0]] +; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 [[CMP2]]) #0 ; CHECK-NEXT: br label [[OUT]] ; CHECK: out: ; CHECK-NEXT: ret double [[CONV1]] @@ -190,5 +190,6 @@ declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, meta declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) #0 attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata) strictfp diff --git a/llvm/test/Transforms/Inline/inline-strictfp.ll b/llvm/test/Transforms/Inline/inline-strictfp.ll index bc42fafd63943..5883002061c30 100644 --- a/llvm/test/Transforms/Inline/inline-strictfp.ll +++ b/llvm/test/Transforms/Inline/inline-strictfp.ll @@ -15,8 +15,8 @@ entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %add ; CHECK-LABEL: @host_02 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] } @@ -34,8 +34,8 @@ entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %add ; CHECK-LABEL: @host_04 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.maytrap") #0 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] } @@ -77,8 +77,8 @@ entry: ret float %add ; CHECK-LABEL: @host_08 ; CHECK: call float @func_ext(float {{.*}}) #0 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] } @@ -97,8 +97,8 @@ entry: %add = call double @llvm.experimental.constrained.fadd.f64(double %0, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret double %add ; CHECK-LABEL: @host_10 -; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #0 -; CHECK: call double @llvm.experimental.constrained.fadd.f64(double {{.*}}, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 +; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK: call double @llvm.experimental.constrained.fadd.f64(double {{.*}}, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] } ; fcmp does not depend on rounding mode and has metadata argument. @@ -114,8 +114,8 @@ entry: %cmp = call i1 @inlined_11(float %a, float %b) #0 ret i1 %cmp ; CHECK-LABEL: @host_12 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 -; CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float {{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") #0 +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float {{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]] } ; Intrinsic 'ceil' has constrained variant. @@ -131,11 +131,12 @@ entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %add ; CHECK-LABEL: @host_14 -; CHECK: call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.ignore") #0 -; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 +; CHECK: call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] } attributes #0 = { strictfp } +; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) } declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) diff --git a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll index a9ef7f6a765d1..64d8e1d2454db 100644 --- a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll +++ b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll @@ -17,7 +17,7 @@ entry: define double @floor_02() #0 { ; CHECK-LABEL: @floor_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR3:[0-9]+]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double -1.100000e+01 ; entry: @@ -40,7 +40,7 @@ entry: define double @ceil_02() #0 { ; CHECK-LABEL: @ceil_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double -1.000000e+01 ; entry: @@ -55,7 +55,7 @@ define double @trunc_01() #0 { ; CHECK-NEXT: ret double 1.000000e+01 ; entry: - %result = call double @llvm.experimental.constrained.trunc.f64(double 1.010000e+01, metadata !"fpexcept.ignore") #0 + %result = call double @llvm.trunc.f64(double 1.010000e+01) #0 [ "fpe.except"(metadata !"ignore") ] ret double %result } @@ -63,7 +63,7 @@ entry: define double @trunc_02() #0 { ; CHECK-LABEL: @trunc_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.trunc.f64(double -1.010000e+01) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double -1.000000e+01 ; entry: @@ -86,7 +86,7 @@ entry: define double @round_02() #0 { ; CHECK-LABEL: @round_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double -1.100000e+01 ; entry: @@ -120,7 +120,7 @@ entry: define double @nearbyint_03() #0 { ; CHECK-LABEL: @nearbyint_03( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rtz"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 1.000000e+01 ; entry: @@ -132,7 +132,7 @@ entry: define double @nearbyint_04() #0 { ; CHECK-LABEL: @nearbyint_04( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 1.000000e+01 ; entry: @@ -144,7 +144,7 @@ entry: define double @nearbyint_05() #0 { ; CHECK-LABEL: @nearbyint_05( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -156,7 +156,7 @@ entry: define double @nonfinite_01() #0 { ; CHECK-LABEL: @nonfinite_01( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF4000000000000, metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF4000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -168,10 +168,10 @@ entry: define double @nonfinite_02() #0 { ; CHECK-LABEL: @nonfinite_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: ret double 0x7FF8000000000000 +; CHECK-NEXT: ret double 0x7FFC000000000000 ; entry: - %result = call double @llvm.experimental.constrained.trunc.f64(double 0x7ff4000000000000, metadata !"fpexcept.ignore") #0 + %result = call double @llvm.trunc.f64(double 0x7ff4000000000000) #0 [ "fpe.except"(metadata !"ignore") ] ret double %result } @@ -179,7 +179,7 @@ entry: define double @nonfinite_03() #0 { ; CHECK-LABEL: @nonfinite_03( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF8000000000000, metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF8000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 0x7FF8000000000000 ; entry: @@ -191,7 +191,7 @@ entry: define double @nonfinite_04() #0 { ; CHECK-LABEL: @nonfinite_04( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF0000000000000, metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF0000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 0x7FF0000000000000 ; entry: @@ -203,7 +203,7 @@ entry: define double @rint_01() #0 { ; CHECK-LABEL: @rint_01( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 1.000000e+01 ; entry: @@ -215,7 +215,7 @@ entry: define double @rint_02() #0 { ; CHECK-LABEL: @rint_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.010000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.010000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -270,7 +270,7 @@ entry: define double @fadd_04() #0 { ; CHECK-LABEL: @fadd_04( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -282,7 +282,7 @@ entry: define double @fadd_05() #0 { ; CHECK-LABEL: @fadd_05( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 3.000000e+00 ; entry: @@ -294,7 +294,7 @@ entry: define double @fadd_06() #0 { ; CHECK-LABEL: @fadd_06( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double 3.000000e+00 ; entry: @@ -306,7 +306,7 @@ entry: define double @fadd_07() #0 { ; CHECK-LABEL: @fadd_07( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -328,7 +328,7 @@ entry: define double @fadd_09() #0 { ; CHECK-LABEL: @fadd_09( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -492,7 +492,7 @@ entry: define i1 @cmp_eq_nan_01() #0 { ; CHECK-LABEL: @cmp_eq_nan_01( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret i1 [[RESULT]] ; entry: @@ -503,7 +503,7 @@ entry: define i1 @cmp_eq_nan_02() #0 { ; CHECK-LABEL: @cmp_eq_nan_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret i1 [[RESULT]] ; entry: @@ -515,7 +515,7 @@ entry: define i1 @cmp_eq_nan_03() #0 { ; CHECK-LABEL: @cmp_eq_nan_03( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret i1 false ; entry: @@ -526,7 +526,7 @@ entry: define i1 @cmp_eq_nan_04() #0 { ; CHECK-LABEL: @cmp_eq_nan_04( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ] ; CHECK-NEXT: ret i1 [[RESULT]] ; entry: @@ -540,7 +540,6 @@ attributes #0 = { strictfp } declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.floor.f64(double, metadata) declare double @llvm.experimental.constrained.ceil.f64(double, metadata) -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) declare double @llvm.experimental.constrained.round.f64(double, metadata) declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) diff --git a/llvm/test/Verifier/fp-intrinsics.ll b/llvm/test/Verifier/fp-intrinsics.ll index 4934843d5a2ed..fd7b07abab93f 100644 --- a/llvm/test/Verifier/fp-intrinsics.ll +++ b/llvm/test/Verifier/fp-intrinsics.ll @@ -5,7 +5,7 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat ; Test an illegal value for the rounding mode argument. ; CHECK: invalid rounding mode argument -; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynomic", metadata !"fpexcept.strict") #1 +; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynomic", metadata !"fpexcept.strict") #{{[0-9]+}} define double @f2(double %a, double %b) #0 { entry: %fadd = call double @llvm.experimental.constrained.fadd.f64( @@ -17,7 +17,7 @@ entry: ; Test an illegal value for the exception behavior argument. ; CHECK-NEXT: invalid exception behavior argument -; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.restrict") #1 +; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.restrict") #{{[0-9]+}} define double @f3(double %a, double %b) #0 { entry: %fadd = call double @llvm.experimental.constrained.fadd.f64( @@ -29,7 +29,7 @@ entry: ; Test an illegal value for the rounding mode argument. ; CHECK-NEXT: invalid rounding mode argument -; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynomic", metadata !"fpexcept.strict") #1 +; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynomic", metadata !"fpexcept.strict") #{{[0-9]+}} define double @f4(double %a) #0 { entry: %fadd = call double @llvm.experimental.constrained.sqrt.f64( @@ -41,7 +41,7 @@ entry: ; Test an illegal value for the exception behavior argument. ; CHECK-NEXT: invalid exception behavior argument -; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.restrict") #1 +; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.restrict") #{{[0-9]+}} define double @f5(double %a) #0 { entry: %fadd = call double @llvm.experimental.constrained.sqrt.f64(