From aea763008b91125ff5053267646b7f5302e47216 Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Tue, 4 Mar 2025 17:02:28 +0000 Subject: [PATCH 1/3] [msan][NFC] Add tests for Arm NEON smaxv/sminv This patch precommits tests for the smaxv/sminv intrinsics, which are currently handled suboptimally by visitInstruction. These are the signed versions of umaxv/uminv (https://github.com/llvm/llvm-project/pull/129661). Future work will update MSan to apply handleVectorReduceIntrinsic. --- .../MemorySanitizer/AArch64/arm64-smaxv.ll | 323 ++++++++++++++++++ .../MemorySanitizer/AArch64/arm64-sminv.ll | 323 ++++++++++++++++++ 2 files changed, 646 insertions(+) create mode 100644 llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll create mode 100644 llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll new file mode 100644 index 0000000000000..cbbd55d3e3497 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll @@ -0,0 +1,323 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=msan -S | FileCheck %s +; +; Forked from llvm/test/CodeGen/AArch64/arm64-uminv.ll +; +; Handled suboptimally (visitInstruction): +; - llvm.aarch64.neon.smaxv + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-android9001" + +define signext i8 @test_vmaxv_s8(<8 x i8> %a1) #0 { +; CHECK-LABEL: define signext i8 @test_vmaxv_s8( +; CHECK-SAME: <8 x i8> [[A1:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMAXV_I]] to i8 +; CHECK-NEXT: store i8 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i8 [[TMP4]] +; +entry: + %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a1) + %0 = trunc i32 %vmaxv.i to i8 + ret i8 %0 +} + +define signext i16 @test_vmaxv_s16(<4 x i16> %a1) #0 { +; CHECK-LABEL: define signext i16 @test_vmaxv_s16( +; CHECK-SAME: <4 x i16> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMAXV_I]] to i16 +; CHECK-NEXT: store i16 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i16 [[TMP4]] +; +entry: + %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a1) + %0 = trunc i32 %vmaxv.i to i16 + ret i16 %0 +} + +define i32 @test_vmaxv_s32(<2 x i32> %a1) #0 { +; CHECK-LABEL: define i32 @test_vmaxv_s32( +; CHECK-SAME: <2 x i32> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> [[A1]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[VMAXV_I]] +; +entry: + %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a1) + ret i32 %vmaxv.i +} + +define signext i8 @test_vmaxvq_s8(<16 x i8> %a1) #0 { +; CHECK-LABEL: define signext i8 @test_vmaxvq_s8( +; CHECK-SAME: <16 x i8> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMAXV_I]] to i8 +; CHECK-NEXT: store i8 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i8 [[TMP4]] +; +entry: + %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a1) + %0 = trunc i32 %vmaxv.i to i8 + ret i8 %0 +} + +define signext i16 @test_vmaxvq_s16(<8 x i16> %a1) #0 { +; CHECK-LABEL: define signext i16 @test_vmaxvq_s16( +; CHECK-SAME: <8 x i16> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMAXV_I]] to i16 +; CHECK-NEXT: store i16 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i16 [[TMP4]] +; +entry: + %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a1) + %0 = trunc i32 %vmaxv.i to i16 + ret i16 %0 +} + +define i32 @test_vmaxvq_s32(<4 x i32> %a1) #0 { +; CHECK-LABEL: define i32 @test_vmaxvq_s32( +; CHECK-SAME: <4 x i32> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[A1]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[VMAXV_I]] +; +entry: + %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a1) + ret i32 %vmaxv.i +} + +define <8 x i8> @test_vmaxv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { +; CHECK-LABEL: define <8 x i8> @test_vmaxv_s8_used_by_laneop( +; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <8 x i8> [[TMP1]], i8 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i8> [[A1]], i8 [[TMP6]], i32 3 +; CHECK-NEXT: store <8 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x i8> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + +define <4 x i16> @test_vmaxv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 { +; CHECK-LABEL: define <4 x i16> @test_vmaxv_s16_used_by_laneop( +; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> [[TMP1]], i16 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i16> [[A1]], i16 [[TMP6]], i32 3 +; CHECK-NEXT: store <4 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x i16> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + +define <2 x i32> @test_vmaxv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 { +; CHECK-LABEL: define <2 x i32> @test_vmaxv_s32_used_by_laneop( +; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> [[A2]]) +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP1]], i32 0, i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[A1]], i32 [[TMP5]], i32 1 +; CHECK-NEXT: store <2 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <2 x i32> [[TMP6]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + +define <16 x i8> @test_vmaxvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 { +; CHECK-LABEL: define <16 x i8> @test_vmaxvq_s8_used_by_laneop( +; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> [[TMP1]], i8 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[A1]], i8 [[TMP6]], i32 3 +; CHECK-NEXT: store <16 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <16 x i8> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + +define <8 x i16> @test_vmaxvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #0 { +; CHECK-LABEL: define <8 x i16> @test_vmaxvq_s16_used_by_laneop( +; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <8 x i16> [[TMP1]], i16 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[A1]], i16 [[TMP6]], i32 3 +; CHECK-NEXT: store <8 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x i16> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + +define <4 x i32> @test_vmaxvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #0 { +; CHECK-LABEL: define <4 x i32> @test_vmaxvq_s32_used_by_laneop( +; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[A2]]) +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP1]], i32 0, i32 3 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[A1]], i32 [[TMP5]], i32 3 +; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x i32> [[TMP6]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + +declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>) +declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>) +declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>) +declare i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32>) +declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>) +declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>) + +attributes #0 = { sanitize_memory } diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll new file mode 100644 index 0000000000000..b4f3cce44c306 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll @@ -0,0 +1,323 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=msan -S | FileCheck %s +; +; Forked from llvm/test/CodeGen/AArch64/arm64-uminv.ll +; +; Handled suboptimally (visitInstruction): +; - llvm.aarch64.neon.sminv + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-android9001" + +define signext i8 @test_vminv_s8(<8 x i8> %a1) #0 { +; CHECK-LABEL: define signext i8 @test_vminv_s8( +; CHECK-SAME: <8 x i8> [[A1:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMINV_I]] to i8 +; CHECK-NEXT: store i8 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i8 [[TMP4]] +; +entry: + %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a1) + %0 = trunc i32 %vminv.i to i8 + ret i8 %0 +} + +define signext i16 @test_vminv_s16(<4 x i16> %a1) #0 { +; CHECK-LABEL: define signext i16 @test_vminv_s16( +; CHECK-SAME: <4 x i16> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMINV_I]] to i16 +; CHECK-NEXT: store i16 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i16 [[TMP4]] +; +entry: + %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a1) + %0 = trunc i32 %vminv.i to i16 + ret i16 %0 +} + +define i32 @test_vminv_s32(<2 x i32> %a1) #0 { +; CHECK-LABEL: define i32 @test_vminv_s32( +; CHECK-SAME: <2 x i32> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> [[A1]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[VMINV_I]] +; +entry: + %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a1) + ret i32 %vminv.i +} + +define signext i8 @test_vminvq_s8(<16 x i8> %a1) #0 { +; CHECK-LABEL: define signext i8 @test_vminvq_s8( +; CHECK-SAME: <16 x i8> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMINV_I]] to i8 +; CHECK-NEXT: store i8 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i8 [[TMP4]] +; +entry: + %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a1) + %0 = trunc i32 %vminv.i to i8 + ret i8 %0 +} + +define signext i16 @test_vminvq_s16(<8 x i16> %a1) #0 { +; CHECK-LABEL: define signext i16 @test_vminvq_s16( +; CHECK-SAME: <8 x i16> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[A1]]) +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[VMINV_I]] to i16 +; CHECK-NEXT: store i16 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i16 [[TMP4]] +; +entry: + %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a1) + %0 = trunc i32 %vminv.i to i16 + ret i16 %0 +} + +define i32 @test_vminvq_s32(<4 x i32> %a1) #0 { +; CHECK-LABEL: define i32 @test_vminvq_s32( +; CHECK-SAME: <4 x i32> [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[A1]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[VMINV_I]] +; +entry: + %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a1) + ret i32 %vminv.i +} + +define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { +; CHECK-LABEL: define <8 x i8> @test_vminv_s8_used_by_laneop( +; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <8 x i8> [[TMP1]], i8 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i8> [[A1]], i8 [[TMP6]], i32 3 +; CHECK-NEXT: store <8 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x i8> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + +define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 { +; CHECK-LABEL: define <4 x i16> @test_vminv_s16_used_by_laneop( +; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> [[TMP1]], i16 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i16> [[A1]], i16 [[TMP6]], i32 3 +; CHECK-NEXT: store <4 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x i16> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + +define <2 x i32> @test_vminv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 { +; CHECK-LABEL: define <2 x i32> @test_vminv_s32_used_by_laneop( +; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> [[A2]]) +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP1]], i32 0, i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[A1]], i32 [[TMP5]], i32 1 +; CHECK-NEXT: store <2 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <2 x i32> [[TMP6]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + +define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 { +; CHECK-LABEL: define <16 x i8> @test_vminvq_s8_used_by_laneop( +; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> [[TMP1]], i8 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[A1]], i8 [[TMP6]], i32 3 +; CHECK-NEXT: store <16 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <16 x i8> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + +define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #0 { +; CHECK-LABEL: define <8 x i16> @test_vminvq_s16_used_by_laneop( +; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[A2]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <8 x i16> [[TMP1]], i16 0, i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[A1]], i16 [[TMP6]], i32 3 +; CHECK-NEXT: store <8 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x i16> [[TMP7]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + +define <4 x i32> @test_vminvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #0 { +; CHECK-LABEL: define <4 x i32> @test_vminvq_s32_used_by_laneop( +; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]] +; CHECK-NEXT: unreachable +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[A2]]) +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP1]], i32 0, i32 3 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[A1]], i32 [[TMP5]], i32 3 +; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x i32> [[TMP6]] +; +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + +declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>) +declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>) +declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>) +declare i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32>) +declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>) +declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>) + +attributes #0 = { sanitize_memory } From 437f872db005592f249131ea8a68980ae032ceff Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Tue, 4 Mar 2025 22:16:15 +0000 Subject: [PATCH 2/3] Fix umax and uminv tests --- .../MemorySanitizer/AArch64/arm64-umaxv.ll | 92 +++++++++++++++---- .../MemorySanitizer/AArch64/arm64-uminv.ll | 92 +++++++++++++++---- 2 files changed, 148 insertions(+), 36 deletions(-) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll index a90ed74557727..a2af44e1862d1 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll @@ -14,11 +14,19 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android9001" -define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { +define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmax_u8x8( ; CHECK-SAME: <8 x i8> [[A:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP3]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -26,6 +34,11 @@ define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -33,8 +46,9 @@ define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -54,11 +68,19 @@ return: declare i32 @bar(...) -define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp { +define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmax_u4x16( ; CHECK-SAME: <4 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -66,6 +88,11 @@ define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -73,8 +100,9 @@ define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -92,11 +120,19 @@ return: ret i32 %retval.0 } -define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp { +define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmax_u8x16( ; CHECK-SAME: <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -104,6 +140,11 @@ define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -111,8 +152,9 @@ define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -130,11 +172,19 @@ return: ret i32 %retval.0 } -define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp { +define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmax_u16x8( ; CHECK-SAME: <16 x i8> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -142,6 +192,11 @@ define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -149,8 +204,9 @@ define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -177,9 +233,9 @@ define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] ; CHECK: [[BB3]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB4]]: ; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A2]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll index a754ceec94506..67038f495be7b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll @@ -14,11 +14,19 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android9001" -define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { +define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmin_u8x8( ; CHECK-SAME: <8 x i8> [[A:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP3]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -26,6 +34,11 @@ define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -33,8 +46,9 @@ define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -54,11 +68,19 @@ return: declare i32 @bar(...) -define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp { +define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmin_u4x16( ; CHECK-SAME: <4 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to i64 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -66,6 +88,11 @@ define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -73,8 +100,9 @@ define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -92,11 +120,19 @@ return: ret i32 %retval.0 } -define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp { +define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmin_u8x16( ; CHECK-SAME: <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -104,6 +140,11 @@ define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -111,8 +152,9 @@ define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -130,11 +172,19 @@ return: ret i32 %retval.0 } -define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp { +define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp #0 { ; CHECK-LABEL: define i32 @vmin_u16x8( ; CHECK-SAME: <16 x i8> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128 +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -142,6 +192,11 @@ define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 +; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -149,8 +204,9 @@ define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] +; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -177,9 +233,9 @@ define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] ; CHECK: [[BB3]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB4]]: ; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A2]]) From f1e2f2b3d1c3a924a9a68c99882600f00a375c68 Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Wed, 5 Mar 2025 00:38:45 +0000 Subject: [PATCH 3/3] Revert "Fix umax and uminv tests" This reverts commit 437f872db005592f249131ea8a68980ae032ceff. --- .../MemorySanitizer/AArch64/arm64-umaxv.ll | 92 ++++--------------- .../MemorySanitizer/AArch64/arm64-uminv.ll | 92 ++++--------------- 2 files changed, 36 insertions(+), 148 deletions(-) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll index a2af44e1862d1..a90ed74557727 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll @@ -14,19 +14,11 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android9001" -define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp #0 { +define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmax_u8x8( ; CHECK-SAME: <8 x i8> [[A:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP3]] to i64 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -34,11 +26,6 @@ define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -46,9 +33,8 @@ define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -68,19 +54,11 @@ return: declare i32 @bar(...) -define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp #0 { +define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmax_u4x16( ; CHECK-SAME: <4 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to i64 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -88,11 +66,6 @@ define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -100,9 +73,8 @@ define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -120,19 +92,11 @@ return: ret i32 %retval.0 } -define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp #0 { +define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmax_u8x16( ; CHECK-SAME: <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -140,11 +104,6 @@ define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -152,9 +111,8 @@ define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -172,19 +130,11 @@ return: ret i32 %retval.0 } -define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp #0 { +define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmax_u16x8( ; CHECK-SAME: <16 x i8> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMAXV_I:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMAXV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -192,11 +142,6 @@ define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -204,9 +149,8 @@ define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -233,9 +177,9 @@ define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]] ; CHECK: [[BB3]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable ; CHECK: [[BB4]]: ; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A2]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll index 67038f495be7b..a754ceec94506 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll @@ -14,19 +14,11 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android9001" -define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp #0 { +define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmin_u8x8( ; CHECK-SAME: <8 x i8> [[A:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP3]] to i64 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -34,11 +26,6 @@ define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -46,9 +33,8 @@ define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -68,19 +54,11 @@ return: declare i32 @bar(...) -define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp #0 { +define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmin_u4x16( ; CHECK-SAME: <4 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to i64 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -88,11 +66,6 @@ define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -100,9 +73,8 @@ define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -120,19 +92,11 @@ return: ret i32 %retval.0 } -define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp #0 { +define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmin_u8x16( ; CHECK-SAME: <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to i128 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[TMP]], 0 @@ -140,11 +104,6 @@ define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i16 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -152,9 +111,8 @@ define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -172,19 +130,11 @@ return: ret i32 %retval.0 } -define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp #0 { +define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp { ; CHECK-LABEL: define i32 @vmin_u16x8( ; CHECK-SAME: <16 x i8> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to i128 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] -; CHECK: [[BB2]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB3]]: ; CHECK-NEXT: [[VMINV_I:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> [[A]]) #[[ATTR3]] ; CHECK-NEXT: [[TMP:%.*]] = trunc i32 [[VMINV_I]] to i8 ; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[TMP]], 0 @@ -192,11 +142,6 @@ define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP]], 0 -; CHECK-NEXT: br i1 [[_MSPROP_ICMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]] -; CHECK: [[BB7]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] -; CHECK-NEXT: unreachable -; CHECK: [[BB8]]: ; CHECK-NEXT: br i1 [[TOBOOL]], label %[[RETURN:.*]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -204,9 +149,8 @@ define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp #0 { ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: br label %[[RETURN]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[_MSPHI_S:%.*]] = phi i32 [ [[_MSRET]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[BB8]] ] -; CHECK-NEXT: store i32 [[_MSPHI_S]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL1]], %[[IF_THEN]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: @@ -233,9 +177,9 @@ define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to i64 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]] ; CHECK: [[BB3]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable ; CHECK: [[BB4]]: ; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A2]])