diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index c820e8bf7266a..38a3c03e73514 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -15078,8 +15078,19 @@ void SCEVUnionPredicate::add(const SCEVPredicate *N, ScalarEvolution &SE) { } // Only add predicate if it is not already implied by this union predicate. - if (!implies(N, SE)) - Preds.push_back(N); + if (implies(N, SE)) + return; + + // Build a new vector containing the current predicates, except the ones that + // are implied by the new predicate N. + SmallVector PrunedPreds; + for (auto *P : Preds) { + if (N->implies(P, SE)) + continue; + PrunedPreds.push_back(P); + } + Preds = std::move(PrunedPreds); + Preds.push_back(N); } PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, diff --git a/llvm/test/Analysis/LoopAccessAnalysis/nssw-predicate-implied.ll b/llvm/test/Analysis/LoopAccessAnalysis/nssw-predicate-implied.ll index 4f595b44ae5fd..c502c7c1176c0 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/nssw-predicate-implied.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/nssw-predicate-implied.ll @@ -113,7 +113,7 @@ exit: ret void } -; FIXME: {0,+,3} implies {0,+,2}. +; {0,+,3} [nssw] implies {0,+,2} [nssw]. define void @wrap_check_iv.3_implies_iv.2_predicates_added_in_different_order(i32 noundef %N, ptr %dst, ptr %src) { ; CHECK-LABEL: 'wrap_check_iv.3_implies_iv.2_predicates_added_in_different_order' ; CHECK-NEXT: loop: @@ -135,7 +135,6 @@ define void @wrap_check_iv.3_implies_iv.2_predicates_added_in_different_order(i3 ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: -; CHECK-NEXT: {0,+,2}<%loop> Added Flags: ; CHECK-NEXT: {0,+,3}<%loop> Added Flags: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: diff --git a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll index 9da3d8f3d2802..52ef2d7fb7621 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll @@ -22,7 +22,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" ; LAA-LABEL: f1 ; LAA: Memory dependences are safe{{$}} ; LAA: SCEV assumptions: -; LAA-NEXT: {0,+,2}<%for.body> Added Flags: +; LAA-NOT: {0,+,2}<%for.body> Added Flags: ; LAA-NEXT: {%a,+,4}<%for.body> Added Flags: ; The expression for %mul_ext as analyzed by SCEV is diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll index 8d449f447d598..d42e6af1cec0c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll @@ -760,7 +760,6 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; DEFAULT: vector.scevcheck: ; DEFAULT-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1) ; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1 -; DEFAULT-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 ; DEFAULT-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 ; DEFAULT-NEXT: [[TMP3:%.*]] = add i32 1, [[TMP2]] ; DEFAULT-NEXT: [[TMP4:%.*]] = icmp ult i32 [[TMP3]], 1 @@ -810,7 +809,6 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; PRED: vector.scevcheck: ; PRED-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1) ; PRED-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1 -; PRED-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 ; PRED-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 ; PRED-NEXT: [[TMP3:%.*]] = add i32 1, [[TMP2]] ; PRED-NEXT: [[TMP4:%.*]] = icmp ult i32 [[TMP3]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll index 3f55701f4f2a4..bf27f9e6be65e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll @@ -390,7 +390,6 @@ define void @zext_iv_increment(ptr %dst, i64 %N) { ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1) ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 1, [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i32 [[TMP3]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index bfff5c94d727f..6a12be7da192e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -335,14 +335,13 @@ define void @multi_exit(ptr %dst, ptr %src.1, ptr %src.2, i64 %A, i64 %B) #0 { ; CHECK-NEXT: [[TMP1:%.*]] = freeze i64 [[TMP0]] ; CHECK-NEXT: [[UMIN7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[A:%.*]]) ; CHECK-NEXT: [[TMP2:%.*]] = add nuw i64 [[UMIN7]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 30 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 28 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[B]], i64 1) ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[UMAX]], -1 ; CHECK-NEXT: [[TMP4:%.*]] = freeze i64 [[TMP3]] ; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[A]]) -; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[UMIN]], 4294967295 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[UMIN]] to i32 ; CHECK-NEXT: [[TMP7:%.*]] = add i32 1, [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll b/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll index 5a0aec967ccd9..41868d62a35a5 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll @@ -42,7 +42,6 @@ define void @test(ptr %p) { ; VEC-NEXT: [[TMP6:%.*]] = add i64 [[UMAX]], -9 ; VEC-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[P1]] ; VEC-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3 -; VEC-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP8]], 65535 ; VEC-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP8]] to i16 ; VEC-NEXT: [[TMP11:%.*]] = add i16 2, [[TMP10]] ; VEC-NEXT: [[TMP12:%.*]] = icmp ult i16 [[TMP11]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll b/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll index e0f57d5233304..66996316b47b7 100644 --- a/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll +++ b/llvm/test/Transforms/LoopVectorize/no-fold-tail-by-masking-iv-external-uses.ll @@ -19,17 +19,11 @@ define i32 @test(ptr %arr, i64 %n) { ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[N]], -2 -; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i8 -; CHECK-NEXT: [[TMP3:%.*]] = add i8 1, [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i8 [[TMP3]], 1 -; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[TMP1]], 255 -; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP1]] to i8 ; CHECK-NEXT: [[TMP8:%.*]] = add i8 2, [[TMP7]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp ult i8 [[TMP8]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[TMP1]], 255 -; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP9]], [[TMP10]] -; CHECK-NEXT: [[TMP12:%.*]] = or i1 [[TMP6]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = or i1 [[TMP9]], [[TMP10]] ; CHECK-NEXT: br i1 [[TMP12]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll index d0f9ae28ae946..63bf01fe604e8 100644 --- a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll +++ b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll @@ -42,16 +42,11 @@ define void @test_pr63368(i1 %c, ptr %A) { ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1) ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SMAX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 -; CHECK-NEXT: [[TMP5:%.*]] = icmp slt i8 [[TMP4]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP3]], 255 -; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] -; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[TMP3]] to i8 -; CHECK-NEXT: [[TMP9:%.*]] = add i8 1, [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = icmp slt i8 [[TMP9]], 1 -; CHECK-NEXT: [[TMP11:%.*]] = icmp ugt i32 [[TMP3]], 255 -; CHECK-NEXT: [[TMP12:%.*]] = or i1 [[TMP10]], [[TMP11]] -; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP7]], [[TMP12]] -; CHECK-NEXT: br i1 [[TMP13]], label [[SCALAR_PH3]], label [[VECTOR_PH4:%.*]] +; CHECK-NEXT: [[TMP5:%.*]] = add i8 1, [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], 1 +; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP3]], 255 +; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] +; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH3]], label [[VECTOR_PH4:%.*]] ; CHECK: vector.ph4: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] @@ -69,8 +64,8 @@ define void @test_pr63368(i1 %c, ptr %A) { ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT9]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK2:%.*]], label [[VECTOR_BODY5]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block2: -; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT_2:%.*]], label [[SCALAR_PH3]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH3]] ; CHECK: scalar.ph3: ; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i8 [ [[IND_END]], [[MIDDLE_BLOCK2]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[EXIT_1]] ] ; CHECK-NEXT: br label [[LOOP_2:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll index e53de22f74b2c..2f2d715790229 100644 --- a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll +++ b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll @@ -174,20 +174,14 @@ define void @implied_wrap_predicate(ptr %A, ptr %B, ptr %C) { ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[A1]] ; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3 ; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i16 -; CHECK-NEXT: [[TMP10:%.*]] = add i16 1, [[TMP9]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i16 [[TMP10]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = add i16 2, [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i16 [[TMP10]], 2 ; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 65535 ; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP8]] to i16 -; CHECK-NEXT: [[TMP15:%.*]] = add i16 2, [[TMP14]] -; CHECK-NEXT: [[TMP16:%.*]] = icmp ult i16 [[TMP15]], 2 -; CHECK-NEXT: [[TMP17:%.*]] = icmp ugt i64 [[TMP8]], 65535 -; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]] -; CHECK-NEXT: [[TMP19:%.*]] = or i1 [[TMP13]], [[TMP18]] -; CHECK-NEXT: br i1 [[TMP19]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK-NEXT: br i1 [[TMP13]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[C2]], [[A3]] -; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP20]], 32 +; CHECK-NEXT: [[TMP14:%.*]] = sub i64 [[C2]], [[A3]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], 32 ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 4 @@ -199,16 +193,16 @@ define void @implied_wrap_predicate(ptr %A, ptr %B, ptr %C) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] -; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[OFFSET_IDX]], 0 -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP21]] -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[TMP22]], i32 0 -; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP23]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[C]], i64 [[TMP21]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[TMP24]], i32 0 -; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP25]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[TMP16]], i32 0 +; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP17]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[C]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[TMP18]], i32 0 +; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP19]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll index 892c518b6c873..4c8b43eab947f 100644 --- a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll +++ b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll @@ -29,15 +29,13 @@ define void @f1(ptr noalias %a, ; LV-LABEL: @f1( ; LV-NEXT: for.body.lver.check: ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 -; LV-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 ; LV-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) ; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0 ; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1 ; LV-NEXT: [[TMP2:%.*]] = sub i64 0, [[MUL_RESULT]] ; LV-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[MUL_RESULT]] ; LV-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[A]] -; LV-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]] -; LV-NEXT: [[TMP6:%.*]] = or i1 [[TMP1]], [[TMP5]] +; LV-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]] ; LV-NEXT: br i1 [[TMP6]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] ; LV: for.body.ph.lver.orig: ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]