@@ -766,7 +766,7 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
766
766
IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops.back()->getType()};
767
767
else
768
768
IntrinsicTypes = {ResultType, Ops.back()->getType()};
769
- SmallVector<llvm::Value*, 12 > Operands;
769
+ SmallVector<llvm::Value*, 6 > Operands;
770
770
771
771
bool NoPassthru =
772
772
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -836,7 +836,7 @@ multiclass RVVUnitStridedSegStoreTuple<string op> {
836
836
// Intrinsic: (tuple, ptr, vl)
837
837
unsigned Offset = IsMasked ? 1 : 0;
838
838
839
- SmallVector<llvm::Value*, 12 > Operands;
839
+ SmallVector<llvm::Value*, 5 > Operands;
840
840
Operands.push_back(Ops[Offset + 1]); // tuple
841
841
Operands.push_back(Ops[Offset]); // Ptr
842
842
if (IsMasked)
@@ -886,7 +886,7 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
886
886
IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
887
887
else
888
888
IntrinsicTypes = {ResultType, Ops.back()->getType()};
889
- SmallVector<llvm::Value*, 12 > Operands;
889
+ SmallVector<llvm::Value*, 6 > Operands;
890
890
891
891
bool NoPassthru =
892
892
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -961,7 +961,7 @@ multiclass RVVStridedSegLoadTuple<string op> {
961
961
IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
962
962
else
963
963
IntrinsicTypes = {ResultType, Ops.back()->getType()};
964
- SmallVector<llvm::Value*, 12 > Operands;
964
+ SmallVector<llvm::Value*, 7 > Operands;
965
965
966
966
bool NoPassthru =
967
967
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -1033,7 +1033,7 @@ multiclass RVVStridedSegStoreTuple<string op> {
1033
1033
// Intrinsic: (tuple, ptr, stride, vl)
1034
1034
unsigned Offset = IsMasked ? 1 : 0;
1035
1035
1036
- SmallVector<llvm::Value*, 12 > Operands;
1036
+ SmallVector<llvm::Value*, 6 > Operands;
1037
1037
Operands.push_back(Ops[Offset + 2]); // tuple
1038
1038
Operands.push_back(Ops[Offset]); // Ptr
1039
1039
Operands.push_back(Ops[Offset + 1]); // Stride
@@ -1075,7 +1075,7 @@ multiclass RVVIndexedSegLoadTuple<string op> {
1075
1075
[]<string>)),
1076
1076
ManualCodegen = [{
1077
1077
{
1078
- SmallVector<llvm::Value*, 12 > Operands;
1078
+ SmallVector<llvm::Value*, 7 > Operands;
1079
1079
1080
1080
bool NoPassthru =
1081
1081
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
@@ -1150,7 +1150,7 @@ multiclass RVVIndexedSegStoreTuple<string op> {
1150
1150
// Intrinsic: (tuple, ptr, index, vl)
1151
1151
unsigned Offset = IsMasked ? 1 : 0;
1152
1152
1153
- SmallVector<llvm::Value*, 12 > Operands;
1153
+ SmallVector<llvm::Value*, 6 > Operands;
1154
1154
Operands.push_back(Ops[Offset + 2]); // tuple
1155
1155
Operands.push_back(Ops[Offset]); // Ptr
1156
1156
Operands.push_back(Ops[Offset + 1]); // Idx
@@ -2536,24 +2536,22 @@ let HasMasked = false, HasVL = false, IRName = "" in {
2536
2536
ManualCodegen = [{
2537
2537
{
2538
2538
auto *VecTy = cast<ScalableVectorType>(ResultType);
2539
- // Mask to only valid indices.
2540
- Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2541
2539
if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
2542
2540
unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
2543
2541
assert(isPowerOf2_32(MaxIndex));
2542
+ // Mask to only valid indices.
2543
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2544
2544
Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
2545
2545
Ops[1] = Builder.CreateMul(Ops[1],
2546
2546
ConstantInt::get(Ops[1]->getType(),
2547
2547
VecTy->getMinNumElements()));
2548
2548
return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
2549
2549
}
2550
2550
2551
- bool IsRISCV64 = getTarget().getTriple().isRISCV64();
2552
- llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
2553
- Builder.getInt32Ty();
2554
2551
return Builder.CreateIntrinsic(Intrinsic::riscv_vector_extract,
2555
- {ResultType, Ops[0]->getType(), XLenTy},
2556
- {Ops[0], Ops[1]});
2552
+ {ResultType, Ops[0]->getType()},
2553
+ {Ops[0], Builder.CreateZExt(Ops[1],
2554
+ Builder.getInt32Ty())});
2557
2555
}
2558
2556
}] in {
2559
2557
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
@@ -2574,25 +2572,23 @@ let HasMasked = false, HasVL = false, IRName = "" in {
2574
2572
let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
2575
2573
ManualCodegen = [{
2576
2574
{
2577
- auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
2578
- // Mask to only valid indices.
2579
- Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2580
2575
if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
2576
+ auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
2581
2577
unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
2582
2578
assert(isPowerOf2_32(MaxIndex));
2579
+ // Mask to only valid indices.
2580
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2583
2581
Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
2584
2582
Ops[1] = Builder.CreateMul(Ops[1],
2585
2583
ConstantInt::get(Ops[1]->getType(),
2586
2584
VecTy->getMinNumElements()));
2587
2585
return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
2588
2586
}
2589
2587
2590
- bool IsRISCV64 = getTarget().getTriple().isRISCV64();
2591
- llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
2592
- Builder.getInt32Ty();
2593
2588
return Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
2594
- {ResultType, Ops[2]->getType(), XLenTy},
2595
- {Ops[0], Ops[2], Ops[1]});
2589
+ {ResultType, Ops[2]->getType()},
2590
+ {Ops[0], Ops[2],
2591
+ Builder.CreateZExt(Ops[1],Builder.getInt32Ty())});
2596
2592
}
2597
2593
}] in {
2598
2594
foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
@@ -2618,23 +2614,19 @@ let HasMasked = false, HasVL = false, IRName = "" in {
2618
2614
{
2619
2615
llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
2620
2616
auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
2621
- bool IsRISCV64 = getTarget().getTriple().isRISCV64();
2622
- llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
2623
- Builder.getInt32Ty();
2624
2617
for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
2625
- llvm::Value *Idx =
2626
- ConstantInt::get(Builder.getInt64Ty(),
2627
- isa<ScalableVectorType>(ResultType) ?
2628
- VecTy->getMinNumElements() * I : I);
2629
-
2630
- if (isa<ScalableVectorType>(ResultType))
2618
+ if (isa<ScalableVectorType>(ResultType)) {
2619
+ llvm::Value *Idx = ConstantInt::get(Builder.getInt64Ty(),
2620
+ VecTy->getMinNumElements() * I);
2631
2621
ReturnVector =
2632
2622
Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
2633
- else
2623
+ } else {
2624
+ llvm::Value *Idx = ConstantInt::get(Builder.getInt32Ty(), I);
2634
2625
ReturnVector =
2635
2626
Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
2636
- {ResultType, Ops[I]->getType(), XLenTy },
2627
+ {ResultType, Ops[I]->getType()},
2637
2628
{ReturnVector, Ops[I], Idx});
2629
+ }
2638
2630
2639
2631
}
2640
2632
return ReturnVector;
0 commit comments