diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 47ee2ee507137..938dc3b8bc638 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1363,9 +1363,8 @@ static bool isSwiftError(const Value *V) { bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { const LoadInst &LI = cast(U); - - unsigned StoreSize = DL->getTypeStoreSize(LI.getType()); - if (StoreSize == 0) + TypeSize StoreSize = DL->getTypeStoreSize(LI.getType()); + if (StoreSize.isZero()) return true; ArrayRef Regs = getOrCreateVRegs(LI); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c3d1416ed518d..4bfd4d0386a86 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20909,7 +20909,7 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const { if (Op == Instruction::Add || Op == Instruction::Sub || Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor || Op == Instruction::InsertElement || - Op == Instruction::Xor || Op == Instruction::ShuffleVector) + Op == Instruction::ShuffleVector || Op == Instruction::Load) return false; if (Inst.getType()->isScalableTy()) diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll new file mode 100644 index 0000000000000..31b3c3fe3c5be --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll @@ -0,0 +1,948 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s + +define @vload_nx1i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx1i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx1i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx2i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx4i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx8i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx8i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx8i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx16i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx16i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa + ret %va +} + +define @vload_nx32i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx32i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: vload_nx32i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m4 + %va = load , ptr %pa + ret %va +} + +define @vload_nx64i8(ptr %pa) { + ; RV32-LABEL: name: vload_nx64i8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: vload_nx64i8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx1i16(ptr %pa) { + ; RV32-LABEL: name: vload_nx1i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx1i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx2i16(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx4i16(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx8i16(ptr %pa) { + ; RV32-LABEL: name: vload_nx8i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx8i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa + ret %va +} + +define @vload_nx16i16(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: vload_nx16i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m4 + %va = load , ptr %pa + ret %va +} + +define @vload_nx32i16(ptr %pa) { + ; RV32-LABEL: name: vload_nx32i16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: vload_nx32i16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx1i32(ptr %pa) { + ; RV32-LABEL: name: vload_nx1i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx1i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx2i32(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx4i32(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx4i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa + ret %va +} + +define @vload_nx8i32(ptr %pa) { + ; RV32-LABEL: name: vload_nx8i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: vload_nx8i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m4 + %va = load , ptr %pa + ret %va +} + +define @vload_nx16i32(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: vload_nx16i32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx1i64(ptr %pa) { + ; RV32-LABEL: name: vload_nx1i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx1i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx2i64(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx2i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa + ret %va +} + +define @vload_nx4i64(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: vload_nx4i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m4 + %va = load , ptr %pa + ret %va +} + +define @vload_nx8i64(ptr %pa) { + ; RV32-LABEL: name: vload_nx8i64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: vload_nx8i64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx16i8_align1(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i8_align1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 1) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx16i8_align1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 1) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 1 + ret %va +} + +define @vload_nx16i8_align2(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i8_align2 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 2) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx16i8_align2 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 2) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 2 + ret %va +} + +define @vload_nx16i8_align16(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i8_align16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx16i8_align16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 16 + ret %va +} + +define @vload_nx16i8_align64(ptr %pa) { + ; RV32-LABEL: name: vload_nx16i8_align64 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 64) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx16i8_align64 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 64) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 64 + ret %va +} + +define @vload_nx4i16_align1(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i16_align1 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 1) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i16_align1 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 1) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 1 + ret %va +} + +define @vload_nx4i16_align2(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i16_align2 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 2) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i16_align2 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 2) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 2 + ret %va +} + +define @vload_nx4i16_align4(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i16_align4 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 4) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i16_align4 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 4) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 4 + ret %va +} +define @vload_nx4i16_align8(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i16_align8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i16_align8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 8 + ret %va +} + +define @vload_nx4i16_align16(ptr %pa) { + ; RV32-LABEL: name: vload_nx4i16_align16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 16) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx4i16_align16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 16) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 16 + ret %va +} + +define @vload_nx2i32_align2(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i32_align2 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 2) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i32_align2 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 2) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 2 + ret %va +} + +define @vload_nx2i32_align4(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i32_align4 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 4) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i32_align4 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 4) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 4 + ret %va +} + +define @vload_nx2i32_align8(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i32_align8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i32_align8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 8 + ret %va +} + +define @vload_nx2i32_align16(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i32_align16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 16) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i32_align16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 16) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 16 + ret %va +} + +define @vload_nx2i32_align256(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i32_align256 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 256) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2i32_align256 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 256) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa, align 256 + ret %va +} +define @vload_nx2i64_align4(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i64_align4 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 4) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx2i64_align4 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 4) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 4 + ret %va +} + +define @vload_nx2i64_align8(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i64_align8 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 8) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx2i64_align8 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 8) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 8 + ret %va +} + +define @vload_nx2i64_align16(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i64_align16 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx2i64_align16 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 16 + ret %va +} + +define @vload_nx2i64_align32(ptr %pa) { + ; RV32-LABEL: name: vload_nx2i64_align32 + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 32) + ; RV32-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: vload_nx2i64_align32 + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa, align 32) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa, align 32 + ret %va +} + +define @vload_nx1ptr(ptr %pa) { + ; RV32-LABEL: name: vload_nx1ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx1ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8 + %va = load , ptr %pa + ret %va +} + +define @vload_nx2ptr(ptr %pa) { + ; RV32-LABEL: name: vload_nx2ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: vload_nx2ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m2 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m2 + %va = load , ptr %pa + ret %va +} + +define @vload_nx8ptr(ptr %pa) { + ; RV32-LABEL: name: vload_nx8ptr + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV32-NEXT: $v8m4 = COPY [[LOAD]]() + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: vload_nx8ptr + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[LOAD:%[0-9]+]]:_() = G_LOAD [[COPY]](p0) :: (load () from %ir.pa) + ; RV64-NEXT: $v8m8 = COPY [[LOAD]]() + ; RV64-NEXT: PseudoRET implicit $v8m8 + %va = load , ptr %pa + ret %va +} +