From 0918660aa1b359cfbcc148f9dacf594c5cf126b7 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Tue, 1 Apr 2025 09:52:35 -0700 Subject: [PATCH 1/8] Add tests --- .../AArch64/aarch64-sve-fill-spill-pair.ll | 231 ++++++++++++++++++ 1 file changed, 231 insertions(+) create mode 100644 llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll new file mode 100644 index 0000000000000..d78e882e8a268 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll @@ -0,0 +1,231 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF + +define void @nxv16i8(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: str z1, [x1, #1, mul vl] +; CHECK-NEXT: ret +; +; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8: +; CHECK-LDPALIGNEDONLY: // %bb.0: +; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] +; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1] +; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ret +; +; CHECK-STPALIGNEDONLY-LABEL: nxv16i8: +; CHECK-STPALIGNEDONLY: // %bb.0: +; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0] +; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1] +; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ret +; +; CHECK-OFF-LABEL: nxv16i8: +; CHECK-OFF: // %bb.0: +; CHECK-OFF-NEXT: ldr z0, [x0] +; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-OFF-NEXT: str z0, [x1] +; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl] +; CHECK-OFF-NEXT: ret + %vscale = tail call i64 @llvm.vscale() + %vl = shl nuw nsw i64 %vscale, 4 + %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl + %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl + %ld1 = load , ptr %ldptr, align 1 + %ld2 = load , ptr %ldptr2, align 1 + store %ld1, ptr %stptr, align 1 + store %ld2, ptr %stptr2, align 1 + ret void +} + +define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i8_max_range: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #-64, mul vl] +; CHECK-NEXT: ldr z1, [x0, #-63, mul vl] +; CHECK-NEXT: str z0, [x1, #63, mul vl] +; CHECK-NEXT: str z1, [x1, #64, mul vl] +; CHECK-NEXT: ret +; +; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range: +; CHECK-LDPALIGNEDONLY: // %bb.0: +; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ret +; +; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range: +; CHECK-STPALIGNEDONLY: // %bb.0: +; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ret +; +; CHECK-OFF-LABEL: nxv16i8_max_range: +; CHECK-OFF: // %bb.0: +; CHECK-OFF-NEXT: ldr z0, [x0, #-64, mul vl] +; CHECK-OFF-NEXT: ldr z1, [x0, #-63, mul vl] +; CHECK-OFF-NEXT: str z0, [x1, #63, mul vl] +; CHECK-OFF-NEXT: str z1, [x1, #64, mul vl] +; CHECK-OFF-NEXT: ret + %vscale = tail call i64 @llvm.vscale() + %ldoff1 = mul i64 %vscale, -1024 + %ldoff2 = mul i64 %vscale, -1008 + %stoff1 = mul i64 %vscale, 1008 + %stoff2 = mul i64 %vscale, 1024 + %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1 + %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2 + %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1 + %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2 + %ld1 = load , ptr %ldptr1, align 1 + %ld2 = load , ptr %ldptr2, align 1 + store %ld1, ptr %stptr1, align 1 + store %ld2, ptr %stptr2, align 1 + ret void +} + +define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i8_outside_range: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #-65, mul vl] +; CHECK-NEXT: ldr z1, [x0, #-64, mul vl] +; CHECK-NEXT: str z0, [x1, #64, mul vl] +; CHECK-NEXT: str z1, [x1, #65, mul vl] +; CHECK-NEXT: ret +; +; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range: +; CHECK-LDPALIGNEDONLY: // %bb.0: +; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ret +; +; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range: +; CHECK-STPALIGNEDONLY: // %bb.0: +; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ret +; +; CHECK-OFF-LABEL: nxv16i8_outside_range: +; CHECK-OFF: // %bb.0: +; CHECK-OFF-NEXT: ldr z0, [x0, #-65, mul vl] +; CHECK-OFF-NEXT: ldr z1, [x0, #-64, mul vl] +; CHECK-OFF-NEXT: str z0, [x1, #64, mul vl] +; CHECK-OFF-NEXT: str z1, [x1, #65, mul vl] +; CHECK-OFF-NEXT: ret + %vscale = tail call i64 @llvm.vscale() + %ldoff1 = mul i64 %vscale, -1040 + %ldoff2 = mul i64 %vscale, -1024 + %stoff1 = mul i64 %vscale, 1024 + %stoff2 = mul i64 %vscale, 1040 + %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1 + %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2 + %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1 + %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2 + %ld1 = load , ptr %ldptr1, align 1 + %ld2 = load , ptr %ldptr2, align 1 + store %ld1, ptr %stptr1, align 1 + store %ld2, ptr %stptr2, align 1 + ret void +} + +define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i8_2vl_stride: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ldr z1, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: str z1, [x1, #2, mul vl] +; CHECK-NEXT: ret +; +; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride: +; CHECK-LDPALIGNEDONLY: // %bb.0: +; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] +; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1] +; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ret +; +; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride: +; CHECK-STPALIGNEDONLY: // %bb.0: +; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0] +; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1] +; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ret +; +; CHECK-OFF-LABEL: nxv16i8_2vl_stride: +; CHECK-OFF: // %bb.0: +; CHECK-OFF-NEXT: ldr z0, [x0] +; CHECK-OFF-NEXT: ldr z1, [x0, #2, mul vl] +; CHECK-OFF-NEXT: str z0, [x1] +; CHECK-OFF-NEXT: str z1, [x1, #2, mul vl] +; CHECK-OFF-NEXT: ret + %vscale = tail call i64 @llvm.vscale() + %vl = shl nuw nsw i64 %vscale, 5 + %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl + %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl + %ld1 = load , ptr %ldptr, align 1 + %ld2 = load , ptr %ldptr2, align 1 + store %ld1, ptr %stptr, align 1 + store %ld2, ptr %stptr2, align 1 + ret void +} + +define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv2f64_32b_aligned: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: str z1, [x1, #1, mul vl] +; CHECK-NEXT: ret +; +; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned: +; CHECK-LDPALIGNEDONLY: // %bb.0: +; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] +; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1] +; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ret +; +; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned: +; CHECK-STPALIGNEDONLY: // %bb.0: +; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0] +; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1] +; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ret +; +; CHECK-OFF-LABEL: nxv2f64_32b_aligned: +; CHECK-OFF: // %bb.0: +; CHECK-OFF-NEXT: ldr z0, [x0] +; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-OFF-NEXT: str z0, [x1] +; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl] +; CHECK-OFF-NEXT: ret + %vscale = tail call i64 @llvm.vscale() + %vl = shl nuw nsw i64 %vscale, 4 + %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl + %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl + %ld1 = load , ptr %ldptr, align 32 + %ld2 = load , ptr %ldptr2, align 32 + store %ld1, ptr %stptr, align 32 + store %ld2, ptr %stptr2, align 32 + ret void +} From c71d9cd776f847366a564bf314b02344a2410719 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Tue, 25 Mar 2025 03:00:34 -0700 Subject: [PATCH 2/8] [AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bits=128. When compiling with -msve-vector-bits=128 or vscale_range(1, 1) and when the offsets allow it, we can pair SVE LDR/STR instructions into Neon LDP/STP. For example, given: ```cpp void foo(double const *ldp, double *stp) { svbool_t pg = svptrue_b64(); svfloat64_t ld1 = svld1_f64(pg, ldp); svfloat64_t ld2 = svld1_f64(pg, ldp+svcntd()); svst1_f64(pg, stp, ld1); svst1_f64(pg, stp+svcntd(), ld2); } ``` When compiled with `-msve-vector-bits=128`, we currently generate: ```gas foo: ldr z0, [x0] ldr z1, [x0, #1, mul vl] str z0, [x1] str z1, [x1, #1, mul vl] ret ``` With this patch, we instead generate: ```gas foo: ldp q0, q1, [x0] stp q0, q1, [x1] ret ``` Loading (and to a lesser extent, storing) multiple registers from a common base address is a commonly occurring pattern, but multi-register SVE loads/stores are only supported starting with SVE2.1. This patch offers an alternative for SVE 128-bit implementations. --- .../AArch64/AArch64LoadStoreOptimizer.cpp | 48 ++++++++++++++++++- .../AArch64/aarch64-sve-fill-spill-pair.ll | 43 ++++++----------- 2 files changed, 62 insertions(+), 29 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index cd976790ebb6f..f1f1f66e12216 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -87,6 +87,10 @@ static cl::opt LdStConstLimit("aarch64-load-store-const-scan-limit", static cl::opt EnableRenaming("aarch64-load-store-renaming", cl::init(true), cl::Hidden); +// Enable SVE fill/spill pairing for VLS 128. +static cl::opt EnableSVEFillSpillPairing("aarch64-sve-fill-spill-pairing", + cl::init(true), cl::Hidden); + #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass" namespace { @@ -97,6 +101,9 @@ using LdStPairFlags = struct LdStPairFlags { // a pair-wise insn, and false if the reverse is true. bool MergeForward = false; + // Set to true when pairing SVE fill/spill instructions. + bool SVEFillSpillPair = false; + // SExtIdx gives the index of the result of the load pair that must be // extended. The value of SExtIdx assumes that the paired load produces the // value in this order: (I, returned iterator), i.e., -1 means no value has @@ -113,6 +120,9 @@ using LdStPairFlags = struct LdStPairFlags { void setMergeForward(bool V = true) { MergeForward = V; } bool getMergeForward() const { return MergeForward; } + void setSVEFillSpillPair(bool V = true) { SVEFillSpillPair = V; } + bool getSVEFillSpillPair() const { return SVEFillSpillPair; } + void setSExtIdx(int V) { SExtIdx = V; } int getSExtIdx() const { return SExtIdx; } @@ -300,6 +310,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc, case AArch64::STRXui: case AArch64::STRXpre: case AArch64::STURXi: + case AArch64::STR_ZXI: case AArch64::LDRDui: case AArch64::LDURDi: case AArch64::LDRDpre: @@ -318,6 +329,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc, case AArch64::LDRSui: case AArch64::LDURSi: case AArch64::LDRSpre: + case AArch64::LDR_ZXI: return Opc; case AArch64::LDRSWui: return AArch64::LDRWui; @@ -363,6 +375,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) { return AArch64::STPDpre; case AArch64::STRQui: case AArch64::STURQi: + case AArch64::STR_ZXI: return AArch64::STPQi; case AArch64::STRQpre: return AArch64::STPQpre; @@ -388,6 +401,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) { return AArch64::LDPDpre; case AArch64::LDRQui: case AArch64::LDURQi: + case AArch64::LDR_ZXI: return AArch64::LDPQi; case AArch64::LDRQpre: return AArch64::LDPQpre; @@ -833,6 +847,12 @@ static bool isMergeableIndexLdSt(MachineInstr &MI, int &Scale) { } } +// Return true if MI is an SVE fill/spill instruction. +static bool isPairableFillSpillInst(const MachineInstr &MI) { + auto const Opc = MI.getOpcode(); + return Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI; +} + static bool isRewritableImplicitDef(unsigned Opc) { switch (Opc) { default: @@ -1227,6 +1247,15 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, (void)MIBSXTW; LLVM_DEBUG(dbgs() << " Extend operand:\n "); LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs())); + } else if (Flags.getSVEFillSpillPair()) { + // We are combining SVE fill/spill to LDP/STP, so we need to get the Q + // variant of the registers. + MachineOperand &MOp0 = MIB->getOperand(0); + MachineOperand &MOp1 = MIB->getOperand(1); + assert(AArch64::ZPRRegClass.contains(MOp0.getReg()) && + AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register."); + MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0)); + MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0)); } else { LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); } @@ -1829,6 +1858,9 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, Flags.clearRenameReg(); + if (isPairableFillSpillInst(FirstMI)) + Flags.setSVEFillSpillPair(); + // Track which register units have been modified and used between the first // insn (inclusive) and the second insn. ModifiedRegUnits.clear(); @@ -2661,7 +2693,8 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { // Get the needed alignments to check them if // ldp-aligned-only/stp-aligned-only features are opted. uint64_t MemAlignment = MemOp->getAlign().value(); - uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value(); + uint64_t TypeAlignment = + Align(MemOp->getSize().getValue().getKnownMinValue()).value(); if (MemAlignment < 2 * TypeAlignment) { NumFailedAlignmentCheck++; @@ -2782,6 +2815,9 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI, bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt) { AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo(); + bool const CanPairFillSpill = EnableSVEFillSpillPairing && + Subtarget->isSVEorStreamingSVEAvailable() && + Subtarget->getSVEVectorSizeInBits() == 128; bool Modified = false; // Four tranformations to do here: @@ -2822,11 +2858,18 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, } // 3) Find loads and stores that can be merged into a single load or store // pair instruction. + // When compiling for SVE 128, also try to combine SVE fill/spill + // instructions into LDP/STP. // e.g., // ldr x0, [x2] // ldr x1, [x2, #8] // ; becomes // ldp x0, x1, [x2] + // e.g., + // ldr z0, [x2] + // ldr z1, [x2, #1, mul vl] + // ; becomes + // ldp q0, q1, [x2] if (MBB.getParent()->getRegInfo().tracksLiveness()) { DefinedInBB.clear(); @@ -2840,6 +2883,9 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, updateDefinedRegisters(*MBBI, DefinedInBB, TRI); if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI)) Modified = true; + else if (CanPairFillSpill && isPairableFillSpillInst(*MBBI) && + tryToPairLdStInst(MBBI)) + Modified = true; else ++MBBI; } diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll index d78e882e8a268..79120bc5352aa 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll @@ -4,28 +4,25 @@ ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 -aarch64-sve-fill-spill-pairing=0 < %s | FileCheck %s --check-prefixes=CHECK-OFF define void @nxv16i8(ptr %ldptr, ptr %stptr) { ; CHECK-LABEL: nxv16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ldr z0, [x0] -; CHECK-NEXT: ldr z1, [x0, #1, mul vl] -; CHECK-NEXT: str z0, [x1] -; CHECK-NEXT: str z1, [x1, #1, mul vl] +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret ; ; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] ; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] -; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1] -; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1] ; CHECK-LDPALIGNEDONLY-NEXT: ret ; ; CHECK-STPALIGNEDONLY-LABEL: nxv16i8: ; CHECK-STPALIGNEDONLY: // %bb.0: -; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0] -; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0] ; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1] ; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] ; CHECK-STPALIGNEDONLY-NEXT: ret @@ -51,24 +48,20 @@ define void @nxv16i8(ptr %ldptr, ptr %stptr) { define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) { ; CHECK-LABEL: nxv16i8_max_range: ; CHECK: // %bb.0: -; CHECK-NEXT: ldr z0, [x0, #-64, mul vl] -; CHECK-NEXT: ldr z1, [x0, #-63, mul vl] -; CHECK-NEXT: str z0, [x1, #63, mul vl] -; CHECK-NEXT: str z1, [x1, #64, mul vl] +; CHECK-NEXT: ldp q0, q1, [x0, #-1024] +; CHECK-NEXT: stp q0, q1, [x1, #1008] ; CHECK-NEXT: ret ; ; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl] ; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl] -; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl] -; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1, #1008] ; CHECK-LDPALIGNEDONLY-NEXT: ret ; ; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range: ; CHECK-STPALIGNEDONLY: // %bb.0: -; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl] -; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0, #-1024] ; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl] ; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl] ; CHECK-STPALIGNEDONLY-NEXT: ret @@ -190,26 +183,20 @@ define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) { define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) { ; CHECK-LABEL: nxv2f64_32b_aligned: ; CHECK: // %bb.0: -; CHECK-NEXT: ldr z0, [x0] -; CHECK-NEXT: ldr z1, [x0, #1, mul vl] -; CHECK-NEXT: str z0, [x1] -; CHECK-NEXT: str z1, [x1, #1, mul vl] +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret ; ; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned: ; CHECK-LDPALIGNEDONLY: // %bb.0: -; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] -; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] -; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1] -; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0] +; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1] ; CHECK-LDPALIGNEDONLY-NEXT: ret ; ; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned: ; CHECK-STPALIGNEDONLY: // %bb.0: -; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0] -; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl] -; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1] -; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl] +; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0] +; CHECK-STPALIGNEDONLY-NEXT: stp q0, q1, [x1] ; CHECK-STPALIGNEDONLY-NEXT: ret ; ; CHECK-OFF-LABEL: nxv2f64_32b_aligned: From 19cd626eb4f880164c536898d537aa7e0c1d1b92 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Mon, 7 Apr 2025 06:26:53 -0700 Subject: [PATCH 3/8] Address comments --- .../AArch64/AArch64LoadStoreOptimizer.cpp | 10 +-- .../AArch64/aarch64-sve-fill-spill-pair.ll | 68 ++++++++++++++++++- 2 files changed, 73 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index f1f1f66e12216..b1240b3709c17 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -87,9 +87,10 @@ static cl::opt LdStConstLimit("aarch64-load-store-const-scan-limit", static cl::opt EnableRenaming("aarch64-load-store-renaming", cl::init(true), cl::Hidden); -// Enable SVE fill/spill pairing for VLS 128. -static cl::opt EnableSVEFillSpillPairing("aarch64-sve-fill-spill-pairing", - cl::init(true), cl::Hidden); +// Disable SVE fill/spill pairing for VLS 128. +static cl::opt + DisableSVEFillSpillPairing("aarch64-disable-sve-fill-spill-pairing", + cl::init(false), cl::Hidden); #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass" @@ -2815,7 +2816,8 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI, bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt) { AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo(); - bool const CanPairFillSpill = EnableSVEFillSpillPairing && + bool const CanPairFillSpill = !DisableSVEFillSpillPairing && + Subtarget->isLittleEndian() && Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->getSVEVectorSizeInBits() == 128; diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll index 79120bc5352aa..a06af29fbc5ee 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll @@ -1,10 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -mtriple=aarch64_be-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-BE ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF -; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 -aarch64-sve-fill-spill-pairing=0 < %s | FileCheck %s --check-prefixes=CHECK-OFF +; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 -aarch64-disable-sve-fill-spill-pairing=1 < %s | FileCheck %s --check-prefixes=CHECK-OFF define void @nxv16i8(ptr %ldptr, ptr %stptr) { ; CHECK-LABEL: nxv16i8: @@ -13,6 +14,15 @@ define void @nxv16i8(ptr %ldptr, ptr %stptr) { ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret ; +; CHECK-BE-LABEL: nxv16i8: +; CHECK-BE: // %bb.0: +; CHECK-BE-NEXT: ptrue p0.b +; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #1, mul vl] +; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #1, mul vl] +; CHECK-BE-NEXT: ret +; ; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] @@ -52,6 +62,25 @@ define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) { ; CHECK-NEXT: stp q0, q1, [x1, #1008] ; CHECK-NEXT: ret ; +; CHECK-BE-LABEL: nxv16i8_max_range: +; CHECK-BE: // %bb.0: +; CHECK-BE-NEXT: rdvl x8, #1 +; CHECK-BE-NEXT: mov x9, #-1008 // =0xfffffffffffffc10 +; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00 +; CHECK-BE-NEXT: lsr x8, x8, #4 +; CHECK-BE-NEXT: mov w11, #1008 // =0x3f0 +; CHECK-BE-NEXT: mov w12, #1024 // =0x400 +; CHECK-BE-NEXT: ptrue p0.b +; CHECK-BE-NEXT: mul x9, x8, x9 +; CHECK-BE-NEXT: mul x10, x8, x10 +; CHECK-BE-NEXT: mul x11, x8, x11 +; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x9] +; CHECK-BE-NEXT: mul x8, x8, x12 +; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x10] +; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11] +; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8] +; CHECK-BE-NEXT: ret +; ; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl] @@ -98,6 +127,25 @@ define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) { ; CHECK-NEXT: str z1, [x1, #65, mul vl] ; CHECK-NEXT: ret ; +; CHECK-BE-LABEL: nxv16i8_outside_range: +; CHECK-BE: // %bb.0: +; CHECK-BE-NEXT: rdvl x8, #1 +; CHECK-BE-NEXT: mov x9, #-1040 // =0xfffffffffffffbf0 +; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00 +; CHECK-BE-NEXT: lsr x8, x8, #4 +; CHECK-BE-NEXT: mov w11, #1024 // =0x400 +; CHECK-BE-NEXT: mov w12, #1040 // =0x410 +; CHECK-BE-NEXT: ptrue p0.b +; CHECK-BE-NEXT: mul x9, x8, x9 +; CHECK-BE-NEXT: mul x10, x8, x10 +; CHECK-BE-NEXT: mul x11, x8, x11 +; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x9] +; CHECK-BE-NEXT: mul x8, x8, x12 +; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x10] +; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11] +; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8] +; CHECK-BE-NEXT: ret +; ; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl] @@ -146,6 +194,15 @@ define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) { ; CHECK-NEXT: str z1, [x1, #2, mul vl] ; CHECK-NEXT: ret ; +; CHECK-BE-LABEL: nxv16i8_2vl_stride: +; CHECK-BE: // %bb.0: +; CHECK-BE-NEXT: ptrue p0.b +; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #2, mul vl] +; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #2, mul vl] +; CHECK-BE-NEXT: ret +; ; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0] @@ -187,6 +244,15 @@ define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) { ; CHECK-NEXT: stp q0, q1, [x1] ; CHECK-NEXT: ret ; +; CHECK-BE-LABEL: nxv2f64_32b_aligned: +; CHECK-BE: // %bb.0: +; CHECK-BE-NEXT: ptrue p0.d +; CHECK-BE-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-BE-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-BE-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-BE-NEXT: st1d { z1.d }, p0, [x1, #1, mul vl] +; CHECK-BE-NEXT: ret +; ; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned: ; CHECK-LDPALIGNEDONLY: // %bb.0: ; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0] From 5bb7624012c57de3ce43824260946bbba8b5d7a5 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Mon, 7 Apr 2025 09:27:02 -0700 Subject: [PATCH 4/8] Remove DisableSVEFillSpillPairing --- llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp | 8 +------- llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index b1240b3709c17..aa6a31d79677a 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -87,11 +87,6 @@ static cl::opt LdStConstLimit("aarch64-load-store-const-scan-limit", static cl::opt EnableRenaming("aarch64-load-store-renaming", cl::init(true), cl::Hidden); -// Disable SVE fill/spill pairing for VLS 128. -static cl::opt - DisableSVEFillSpillPairing("aarch64-disable-sve-fill-spill-pairing", - cl::init(false), cl::Hidden); - #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass" namespace { @@ -2816,8 +2811,7 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI, bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt) { AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo(); - bool const CanPairFillSpill = !DisableSVEFillSpillPairing && - Subtarget->isLittleEndian() && + bool const CanPairFillSpill = Subtarget->isLittleEndian() && Subtarget->isSVEorStreamingSVEAvailable() && Subtarget->getSVEVectorSizeInBits() == 128; diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll index a06af29fbc5ee..503ead4eba2db 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll @@ -5,7 +5,6 @@ ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF -; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 -aarch64-disable-sve-fill-spill-pairing=1 < %s | FileCheck %s --check-prefixes=CHECK-OFF define void @nxv16i8(ptr %ldptr, ptr %stptr) { ; CHECK-LABEL: nxv16i8: From fce728cd8d02d4e0a9cfdc44a9179523717e0c3e Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Mon, 7 Apr 2025 10:02:33 -0700 Subject: [PATCH 5/8] Remove isPairableFillSpillInst --- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 14 +++++++++++++ .../AArch64/AArch64LoadStoreOptimizer.cpp | 21 +++++-------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 9f8082b64ab18..5c1b8bbe29d63 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2759,6 +2759,9 @@ bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) { case AArch64::LDRXpre: case AArch64::LDURSWi: case AArch64::LDRSWpre: + // SVE instructions. + case AArch64::LDR_ZXI: + case AArch64::STR_ZXI: return true; } } @@ -2911,6 +2914,17 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const { return false; } + // Pairing SVE fills/spills is only valid for little-endian targets that + // implement VLS 128. + switch (MI.getOpcode()) { + default: + break; + case AArch64::LDR_ZXI: + case AArch64::STR_ZXI: + return Subtarget.isLittleEndian() && + Subtarget.getSVEVectorSizeInBits() == 128; + } + // Check if this load/store has a hint to avoid pair formation. // MachineMemOperands hints are set by the AArch64StorePairSuppress pass. if (isLdStPairSuppressed(MI)) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index aa6a31d79677a..1f4881be54871 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -843,12 +843,6 @@ static bool isMergeableIndexLdSt(MachineInstr &MI, int &Scale) { } } -// Return true if MI is an SVE fill/spill instruction. -static bool isPairableFillSpillInst(const MachineInstr &MI) { - auto const Opc = MI.getOpcode(); - return Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI; -} - static bool isRewritableImplicitDef(unsigned Opc) { switch (Opc) { default: @@ -1854,9 +1848,6 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, Flags.clearRenameReg(); - if (isPairableFillSpillInst(FirstMI)) - Flags.setSVEFillSpillPair(); - // Track which register units have been modified and used between the first // insn (inclusive) and the second insn. ModifiedRegUnits.clear(); @@ -2675,6 +2666,11 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { MachineMemOperand *MemOp = MI.memoperands_empty() ? nullptr : MI.memoperands().front(); + // If we are pairing SVE fill/spill, set the appropriate flag. + unsigned Opcode = MI.getOpcode(); + if (Opcode == AArch64::LDR_ZXI || Opcode == AArch64::STR_ZXI) + Flags.setSVEFillSpillPair(); + // If a load/store arrives and ldp/stp-aligned-only feature is opted, check // that the alignment of the source pointer is at least double the alignment // of the type. @@ -2811,10 +2807,6 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI, bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt) { AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo(); - bool const CanPairFillSpill = Subtarget->isLittleEndian() && - Subtarget->isSVEorStreamingSVEAvailable() && - Subtarget->getSVEVectorSizeInBits() == 128; - bool Modified = false; // Four tranformations to do here: // 1) Find loads that directly read from stores and promote them by @@ -2879,9 +2871,6 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, updateDefinedRegisters(*MBBI, DefinedInBB, TRI); if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI)) Modified = true; - else if (CanPairFillSpill && isPairableFillSpillInst(*MBBI) && - tryToPairLdStInst(MBBI)) - Modified = true; else ++MBBI; } From e5db0d376896510b577161aa1560b0454d52d281 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Tue, 8 Apr 2025 01:09:26 -0700 Subject: [PATCH 6/8] Remove SVEFillSpillPair flag --- .../Target/AArch64/AArch64LoadStoreOptimizer.cpp | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 1f4881be54871..f6b1a8ed162cc 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -97,9 +97,6 @@ using LdStPairFlags = struct LdStPairFlags { // a pair-wise insn, and false if the reverse is true. bool MergeForward = false; - // Set to true when pairing SVE fill/spill instructions. - bool SVEFillSpillPair = false; - // SExtIdx gives the index of the result of the load pair that must be // extended. The value of SExtIdx assumes that the paired load produces the // value in this order: (I, returned iterator), i.e., -1 means no value has @@ -116,9 +113,6 @@ using LdStPairFlags = struct LdStPairFlags { void setMergeForward(bool V = true) { MergeForward = V; } bool getMergeForward() const { return MergeForward; } - void setSVEFillSpillPair(bool V = true) { SVEFillSpillPair = V; } - bool getSVEFillSpillPair() const { return SVEFillSpillPair; } - void setSExtIdx(int V) { SExtIdx = V; } int getSExtIdx() const { return SExtIdx; } @@ -1237,8 +1231,8 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, (void)MIBSXTW; LLVM_DEBUG(dbgs() << " Extend operand:\n "); LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs())); - } else if (Flags.getSVEFillSpillPair()) { - // We are combining SVE fill/spill to LDP/STP, so we need to get the Q + } else if (Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI) { + // We are combining SVE fill/spill to LDP/STP, so we need to use the Q // variant of the registers. MachineOperand &MOp0 = MIB->getOperand(0); MachineOperand &MOp1 = MIB->getOperand(1); @@ -2666,11 +2660,6 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { MachineMemOperand *MemOp = MI.memoperands_empty() ? nullptr : MI.memoperands().front(); - // If we are pairing SVE fill/spill, set the appropriate flag. - unsigned Opcode = MI.getOpcode(); - if (Opcode == AArch64::LDR_ZXI || Opcode == AArch64::STR_ZXI) - Flags.setSVEFillSpillPair(); - // If a load/store arrives and ldp/stp-aligned-only feature is opted, check // that the alignment of the source pointer is at least double the alignment // of the type. From 86e4dda81574733a27f40083e42e73364783c646 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Tue, 8 Apr 2025 03:10:43 -0700 Subject: [PATCH 7/8] Restore unintentional whitespace change --- llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index f6b1a8ed162cc..dadb9c4a5801f 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -2796,6 +2796,7 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI, bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt) { AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo(); + bool Modified = false; // Four tranformations to do here: // 1) Find loads that directly read from stores and promote them by From 929a226c16c330d0e14d961c9fab164997e7a748 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Tue, 8 Apr 2025 08:10:43 -0700 Subject: [PATCH 8/8] Address comments --- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 5 +++-- llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 5c1b8bbe29d63..cc4acf74359b5 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2921,8 +2921,9 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const { break; case AArch64::LDR_ZXI: case AArch64::STR_ZXI: - return Subtarget.isLittleEndian() && - Subtarget.getSVEVectorSizeInBits() == 128; + if (!Subtarget.isLittleEndian() || + Subtarget.getSVEVectorSizeInBits() != 128) + return false; } // Check if this load/store has a hint to avoid pair formation. diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index dadb9c4a5801f..3ab61ca2165fd 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -1240,6 +1240,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register."); MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0)); MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0)); + LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); } else { LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); }