Skip to content

[AMDGPU] Add BFX Formation Combines to RegBankCombiner #141590

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: users/pierre-vh/lower-sbfe-in-rbcomb
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4636,10 +4636,17 @@ bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
return false;

const RegisterBank *RB = getRegBank(ShiftSrc);

MatchInfo = [=](MachineIRBuilder &B) {
auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
auto Cst2 = B.buildConstant(ExtractTy, Width);
B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);

if (RB) {
MRI.setRegBank(Cst1.getReg(0), *RB);
MRI.setRegBank(Cst2.getReg(0), *RB);
}
};
return true;
}
Expand Down Expand Up @@ -4674,10 +4681,18 @@ bool CombinerHelper::matchBitfieldExtractFromAnd(MachineInstr &MI,
return false;

uint64_t Width = APInt(Size, AndImm).countr_one();

const RegisterBank *RB = getRegBank(ShiftSrc);

MatchInfo = [=](MachineIRBuilder &B) {
auto WidthCst = B.buildConstant(ExtractTy, Width);
auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});

if (RB) {
MRI.setRegBank(WidthCst.getReg(0), *RB);
MRI.setRegBank(LSBCst.getReg(0), *RB);
}
};
return true;
}
Expand Down Expand Up @@ -4724,10 +4739,17 @@ bool CombinerHelper::matchBitfieldExtractFromShr(
const int64_t Pos = ShrAmt - ShlAmt;
const int64_t Width = Size - ShrAmt;

const RegisterBank *RB = getRegBank(ShlSrc);

MatchInfo = [=](MachineIRBuilder &B) {
auto WidthCst = B.buildConstant(ExtractTy, Width);
auto PosCst = B.buildConstant(ExtractTy, Pos);
B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});

if (RB) {
MRI.setRegBank(WidthCst.getReg(0), *RB);
MRI.setRegBank(PosCst.getReg(0), *RB);
}
};
return true;
}
Expand Down Expand Up @@ -4782,10 +4804,17 @@ bool CombinerHelper::matchBitfieldExtractFromShrAnd(
if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size)
return false;

const RegisterBank *RB = getRegBank(AndSrc);

MatchInfo = [=](MachineIRBuilder &B) {
auto WidthCst = B.buildConstant(ExtractTy, Width);
auto PosCst = B.buildConstant(ExtractTy, Pos);
B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});

if (RB) {
MRI.setRegBank(WidthCst.getReg(0), *RB);
MRI.setRegBank(PosCst.getReg(0), *RB);
}
};
return true;
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUCombine.td
Original file line number Diff line number Diff line change
Expand Up @@ -210,5 +210,5 @@ def AMDGPURegBankCombiner : GICombiner<
fp_minmax_to_clamp, fp_minmax_to_med3, fmed3_intrinsic_to_clamp,
identity_combines, redundant_and, constant_fold_cast_op,
cast_of_cast_combines, sext_trunc, zext_of_shift_amount_combines,
lower_uniform_sbfx, lower_uniform_ubfx]> {
lower_uniform_sbfx, lower_uniform_ubfx, form_bitfield_extract]> {
}
119 changes: 56 additions & 63 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -811,16 +811,15 @@ define amdgpu_ps i32 @s_ashr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amou
;
; GFX8-LABEL: s_ashr_v2i16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
; GFX8-NEXT: s_sext_i32_i16 s0, s0
; GFX8-NEXT: s_lshr_b32 s3, s1, 16
; GFX8-NEXT: s_ashr_i32 s0, s0, s1
; GFX8-NEXT: s_sext_i32_i16 s1, s2
; GFX8-NEXT: s_ashr_i32 s1, s1, s3
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
; GFX8-NEXT: s_lshr_b32 s2, s1, 16
; GFX8-NEXT: s_sext_i32_i16 s3, s0
; GFX8-NEXT: s_bfe_i32 s0, s0, 0x100010
; GFX8-NEXT: s_ashr_i32 s0, s0, s2
; GFX8-NEXT: s_ashr_i32 s1, s3, s1
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
; GFX8-NEXT: s_or_b32 s0, s0, s1
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
; GFX8-NEXT: s_lshl_b32 s0, s0, 16
; GFX8-NEXT: s_or_b32 s0, s1, s0
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_ashr_v2i16:
Expand Down Expand Up @@ -1014,26 +1013,24 @@ define amdgpu_ps <2 x i32> @s_ashr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
;
; GFX8-LABEL: s_ashr_v4i16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_lshr_b32 s4, s0, 16
; GFX8-NEXT: s_sext_i32_i16 s0, s0
; GFX8-NEXT: s_lshr_b32 s6, s2, 16
; GFX8-NEXT: s_ashr_i32 s0, s0, s2
; GFX8-NEXT: s_sext_i32_i16 s2, s4
; GFX8-NEXT: s_lshr_b32 s5, s1, 16
; GFX8-NEXT: s_ashr_i32 s2, s2, s6
; GFX8-NEXT: s_sext_i32_i16 s1, s1
; GFX8-NEXT: s_lshr_b32 s7, s3, 16
; GFX8-NEXT: s_ashr_i32 s1, s1, s3
; GFX8-NEXT: s_sext_i32_i16 s3, s5
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
; GFX8-NEXT: s_ashr_i32 s3, s3, s7
; GFX8-NEXT: s_lshr_b32 s4, s2, 16
; GFX8-NEXT: s_sext_i32_i16 s6, s0
; GFX8-NEXT: s_bfe_i32 s0, s0, 0x100010
; GFX8-NEXT: s_lshr_b32 s5, s3, 16
; GFX8-NEXT: s_ashr_i32 s0, s0, s4
; GFX8-NEXT: s_sext_i32_i16 s4, s1
; GFX8-NEXT: s_bfe_i32 s1, s1, 0x100010
; GFX8-NEXT: s_ashr_i32 s2, s6, s2
; GFX8-NEXT: s_ashr_i32 s1, s1, s5
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
; GFX8-NEXT: s_or_b32 s0, s0, s2
; GFX8-NEXT: s_and_b32 s2, 0xffff, s3
; GFX8-NEXT: s_ashr_i32 s3, s4, s3
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
; GFX8-NEXT: s_lshl_b32 s0, s0, 16
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
; GFX8-NEXT: s_or_b32 s1, s1, s2
; GFX8-NEXT: s_or_b32 s0, s2, s0
; GFX8-NEXT: s_and_b32 s2, 0xffff, s3
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
; GFX8-NEXT: s_or_b32 s1, s2, s1
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_ashr_v4i16:
Expand Down Expand Up @@ -1223,46 +1220,42 @@ define amdgpu_ps <4 x i32> @s_ashr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
;
; GFX8-LABEL: s_ashr_v8i16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_lshr_b32 s8, s0, 16
; GFX8-NEXT: s_sext_i32_i16 s0, s0
; GFX8-NEXT: s_lshr_b32 s12, s4, 16
; GFX8-NEXT: s_ashr_i32 s0, s0, s4
; GFX8-NEXT: s_sext_i32_i16 s4, s8
; GFX8-NEXT: s_lshr_b32 s9, s1, 16
; GFX8-NEXT: s_ashr_i32 s4, s4, s12
; GFX8-NEXT: s_sext_i32_i16 s1, s1
; GFX8-NEXT: s_lshr_b32 s13, s5, 16
; GFX8-NEXT: s_ashr_i32 s1, s1, s5
; GFX8-NEXT: s_sext_i32_i16 s5, s9
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
; GFX8-NEXT: s_lshr_b32 s10, s2, 16
; GFX8-NEXT: s_ashr_i32 s5, s5, s13
; GFX8-NEXT: s_sext_i32_i16 s2, s2
; GFX8-NEXT: s_lshr_b32 s8, s4, 16
; GFX8-NEXT: s_sext_i32_i16 s12, s0
; GFX8-NEXT: s_bfe_i32 s0, s0, 0x100010
; GFX8-NEXT: s_lshr_b32 s9, s5, 16
; GFX8-NEXT: s_ashr_i32 s0, s0, s8
; GFX8-NEXT: s_sext_i32_i16 s8, s1
; GFX8-NEXT: s_bfe_i32 s1, s1, 0x100010
; GFX8-NEXT: s_lshr_b32 s10, s6, 16
; GFX8-NEXT: s_ashr_i32 s4, s12, s4
; GFX8-NEXT: s_ashr_i32 s5, s8, s5
; GFX8-NEXT: s_ashr_i32 s1, s1, s9
; GFX8-NEXT: s_sext_i32_i16 s8, s2
; GFX8-NEXT: s_bfe_i32 s2, s2, 0x100010
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
; GFX8-NEXT: s_lshr_b32 s14, s6, 16
; GFX8-NEXT: s_ashr_i32 s2, s2, s6
; GFX8-NEXT: s_sext_i32_i16 s6, s10
; GFX8-NEXT: s_or_b32 s0, s0, s4
; GFX8-NEXT: s_and_b32 s4, 0xffff, s5
; GFX8-NEXT: s_lshr_b32 s11, s3, 16
; GFX8-NEXT: s_ashr_i32 s6, s6, s14
; GFX8-NEXT: s_sext_i32_i16 s3, s3
; GFX8-NEXT: s_lshr_b32 s11, s7, 16
; GFX8-NEXT: s_ashr_i32 s6, s8, s6
; GFX8-NEXT: s_ashr_i32 s2, s2, s10
; GFX8-NEXT: s_sext_i32_i16 s8, s3
; GFX8-NEXT: s_bfe_i32 s3, s3, 0x100010
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
; GFX8-NEXT: s_lshl_b32 s0, s0, 16
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
; GFX8-NEXT: s_lshr_b32 s15, s7, 16
; GFX8-NEXT: s_ashr_i32 s3, s3, s7
; GFX8-NEXT: s_sext_i32_i16 s7, s11
; GFX8-NEXT: s_or_b32 s1, s1, s4
; GFX8-NEXT: s_and_b32 s4, 0xffff, s6
; GFX8-NEXT: s_ashr_i32 s7, s7, s15
; GFX8-NEXT: s_ashr_i32 s3, s3, s11
; GFX8-NEXT: s_or_b32 s0, s4, s0
; GFX8-NEXT: s_and_b32 s4, 0xffff, s5
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
; GFX8-NEXT: s_or_b32 s2, s2, s4
; GFX8-NEXT: s_and_b32 s4, 0xffff, s7
; GFX8-NEXT: s_ashr_i32 s7, s8, s7
; GFX8-NEXT: s_or_b32 s1, s4, s1
; GFX8-NEXT: s_and_b32 s4, 0xffff, s6
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
; GFX8-NEXT: s_or_b32 s3, s3, s4
; GFX8-NEXT: s_or_b32 s2, s4, s2
; GFX8-NEXT: s_and_b32 s4, 0xffff, s7
; GFX8-NEXT: s_lshl_b32 s3, s3, 16
; GFX8-NEXT: s_or_b32 s3, s4, s3
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_ashr_v8i16:
Expand Down
Loading
Loading