[CVP] Narrow SDiv/SRem to the smallest power-of-2 that's sufficient to contain its operands

This is practically identical to what we already do for UDiv/URem:
  https://rise4fun.com/Alive/04K

Name: narrow udiv
Pre: C0 u<= 255 && C1 u<= 255
%r = udiv i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = udiv i8 %t0, %t1
%r = zext i8 %t2 to i16

Name: narrow exact udiv
Pre: C0 u<= 255 && C1 u<= 255
%r = udiv exact i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = udiv exact i8 %t0, %t1
%r = zext i8 %t2 to i16

Name: narrow urem
Pre: C0 u<= 255 && C1 u<= 255
%r = urem i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = urem i8 %t0, %t1
%r = zext i8 %t2 to i16

... only here we need to look for 'min signed bits', not 'active bits',
and there's an UB to be aware of:
  https://rise4fun.com/Alive/KG86
  https://rise4fun.com/Alive/LwR

Name: narrow sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128
%r = sdiv i16 C0, C1
  =>
%t0 = trunc i16 C0 to i9
%t1 = trunc i16 C1 to i9
%t2 = sdiv i9 %t0, %t1
%r = sext i9 %t2 to i16

Name: narrow exact sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128
%r = sdiv exact i16 C0, C1
  =>
%t0 = trunc i16 C0 to i9
%t1 = trunc i16 C1 to i9
%t2 = sdiv exact i9 %t0, %t1
%r = sext i9 %t2 to i16

Name: narrow srem
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128
%r = srem i16 C0, C1
  =>
%t0 = trunc i16 C0 to i9
%t1 = trunc i16 C1 to i9
%t2 = srem i9 %t0, %t1
%r = sext i9 %t2 to i16


Name: narrow sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128 && !(C0 == -128 && C1 == -1)
%r = sdiv i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = sdiv i8 %t0, %t1
%r = sext i8 %t2 to i16

Name: narrow exact sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128 && !(C0 == -128 && C1 == -1)
%r = sdiv exact i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = sdiv exact i8 %t0, %t1
%r = sext i8 %t2 to i16

Name: narrow srem
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128 && !(C0 == -128 && C1 == -1)
%r = srem i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = srem i8 %t0, %t1
%r = sext i8 %t2 to i16


The ConstantRangeTest.losslessSignedTruncationSignext test sanity-checks
the logic, that we can losslessly truncate ConstantRange to
`getMinSignedBits()` and signext it back, and it will be identical
to the original CR.

On vanilla llvm test-suite + RawSpeed, this fires 1262 times,
while the same fold for UDiv/URem only fires 384 times. Sic!

Additionally, this causes +606.18% (+1079) extra cases of
aggressive-instcombine.NumDAGsReduced, and +473.14% (+1145)
of aggressive-instcombine.NumInstrsReduced folds.
This commit is contained in:
Roman Lebedev 2020-09-22 16:33:18 +03:00
parent cb10d5d714
commit b289dc5306
No known key found for this signature in database
GPG key ID: 083C3EBB4A1689E0
3 changed files with 171 additions and 41 deletions

View file

@ -58,6 +58,8 @@ STATISTIC(NumMemAccess, "Number of memory access targets propagated");
STATISTIC(NumCmps, "Number of comparisons propagated"); STATISTIC(NumCmps, "Number of comparisons propagated");
STATISTIC(NumReturns, "Number of return values propagated"); STATISTIC(NumReturns, "Number of return values propagated");
STATISTIC(NumDeadCases, "Number of switch cases removed"); STATISTIC(NumDeadCases, "Number of switch cases removed");
STATISTIC(NumSDivSRemsNarrowed,
"Number of sdivs/srems whose width was decreased");
STATISTIC(NumSDivs, "Number of sdiv converted to udiv"); STATISTIC(NumSDivs, "Number of sdiv converted to udiv");
STATISTIC(NumUDivURemsNarrowed, STATISTIC(NumUDivURemsNarrowed,
"Number of udivs/urems whose width was decreased"); "Number of udivs/urems whose width was decreased");
@ -624,6 +626,60 @@ Domain getDomain(Value *V, LazyValueInfo *LVI, Instruction *CxtI) {
return Domain::Unknown; return Domain::Unknown;
}; };
/// Try to shrink a sdiv/srem's width down to the smallest power of two that's
/// sufficient to contain its operands.
static bool narrowSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
assert(Instr->getOpcode() == Instruction::SDiv ||
Instr->getOpcode() == Instruction::SRem);
if (Instr->getType()->isVectorTy())
return false;
// Find the smallest power of two bitwidth that's sufficient to hold Instr's
// operands.
unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();
// What is the smallest bit width that can accomodate the entire value ranges
// of both of the operands?
std::array<Optional<ConstantRange>, 2> CRs;
unsigned MinSignedBits = 0;
for (auto I : zip(Instr->operands(), CRs)) {
std::get<1>(I) = LVI->getConstantRange(std::get<0>(I), Instr->getParent());
MinSignedBits = std::max(std::get<1>(I)->getMinSignedBits(), MinSignedBits);
}
// sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
// prove that such a combination is impossible, we need to bump the bitwidth.
if (CRs[1]->contains(APInt::getAllOnesValue(OrigWidth)) &&
CRs[0]->contains(
APInt::getSignedMinValue(MinSignedBits).sextOrSelf(OrigWidth)))
++MinSignedBits;
// Don't shrink below 8 bits wide.
unsigned NewWidth = std::max<unsigned>(PowerOf2Ceil(MinSignedBits), 8);
// NewWidth might be greater than OrigWidth if OrigWidth is not a power of
// two.
if (NewWidth >= OrigWidth)
return false;
++NumSDivSRemsNarrowed;
IRBuilder<> B{Instr};
auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
Instr->getName() + ".lhs.trunc");
auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
Instr->getName() + ".rhs.trunc");
auto *BO = B.CreateBinOp(Instr->getOpcode(), LHS, RHS, Instr->getName());
auto *Sext = B.CreateSExt(BO, Instr->getType(), Instr->getName() + ".sext");
if (auto *BinOp = dyn_cast<BinaryOperator>(BO))
if (BinOp->getOpcode() == Instruction::SDiv)
BinOp->setIsExact(Instr->isExact());
Instr->replaceAllUsesWith(Sext);
Instr->eraseFromParent();
return true;
}
/// Try to shrink a udiv/urem's width down to the smallest power of two that's /// Try to shrink a udiv/urem's width down to the smallest power of two that's
/// sufficient to contain its operands. /// sufficient to contain its operands.
static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) { static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
@ -669,6 +725,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
} }
static bool processSRem(BinaryOperator *SDI, LazyValueInfo *LVI) { static bool processSRem(BinaryOperator *SDI, LazyValueInfo *LVI) {
assert(SDI->getOpcode() == Instruction::SRem);
if (SDI->getType()->isVectorTy()) if (SDI->getType()->isVectorTy())
return false; return false;
@ -724,6 +781,7 @@ static bool processSRem(BinaryOperator *SDI, LazyValueInfo *LVI) {
/// conditions, this can sometimes prove conditions instcombine can't by /// conditions, this can sometimes prove conditions instcombine can't by
/// exploiting range information. /// exploiting range information.
static bool processSDiv(BinaryOperator *SDI, LazyValueInfo *LVI) { static bool processSDiv(BinaryOperator *SDI, LazyValueInfo *LVI) {
assert(SDI->getOpcode() == Instruction::SDiv);
if (SDI->getType()->isVectorTy()) if (SDI->getType()->isVectorTy())
return false; return false;
@ -774,6 +832,23 @@ static bool processSDiv(BinaryOperator *SDI, LazyValueInfo *LVI) {
return true; return true;
} }
static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
assert(Instr->getOpcode() == Instruction::SDiv ||
Instr->getOpcode() == Instruction::SRem);
if (Instr->getType()->isVectorTy())
return false;
if (Instr->getOpcode() == Instruction::SDiv)
if (processSDiv(Instr, LVI))
return true;
if (Instr->getOpcode() == Instruction::SRem)
if (processSRem(Instr, LVI))
return true;
return narrowSDivOrSRem(Instr, LVI);
}
static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) { static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
if (SDI->getType()->isVectorTy()) if (SDI->getType()->isVectorTy())
return false; return false;
@ -935,10 +1010,8 @@ static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
BBChanged |= processCallSite(cast<CallBase>(*II), LVI); BBChanged |= processCallSite(cast<CallBase>(*II), LVI);
break; break;
case Instruction::SRem: case Instruction::SRem:
BBChanged |= processSRem(cast<BinaryOperator>(II), LVI);
break;
case Instruction::SDiv: case Instruction::SDiv:
BBChanged |= processSDiv(cast<BinaryOperator>(II), LVI); BBChanged |= processSDivOrSRem(cast<BinaryOperator>(II), LVI);
break; break;
case Instruction::UDiv: case Instruction::UDiv:
case Instruction::URem: case Instruction::URem:

View file

@ -271,8 +271,11 @@ define i64 @test11_i15_i15(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 16383 %c0 = icmp sle i64 %x, 16383
@ -306,8 +309,11 @@ define i64 @test12_i16_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -338,8 +344,11 @@ define i64 @test13_i16_u15(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C2]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C2]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -371,8 +380,11 @@ define i64 @test14_i16safe_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -403,8 +415,11 @@ define i64 @test15_i16safe_u15(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C2]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C2]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -435,8 +450,11 @@ define i64 @test16_i4_i4(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i8
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i8
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i8 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i8 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 3 %c0 = icmp sle i64 %x, 3
@ -469,8 +487,11 @@ define i64 @test17_i9_i9(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 255 %c0 = icmp sle i64 %x, 255
@ -569,8 +590,11 @@ define i64 @test20_i16_i18(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 16383 %c0 = icmp sle i64 %x, 16383
@ -601,8 +625,11 @@ define i64 @test21_i18_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = sdiv i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 65535 %c0 = icmp sle i64 %x, 65535
@ -635,8 +662,11 @@ define i64 @test22_i16_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = sdiv exact i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = sdiv exact i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767

View file

@ -184,8 +184,11 @@ define i64 @test11_i15_i15(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 16383 %c0 = icmp sle i64 %x, 16383
@ -219,8 +222,11 @@ define i64 @test12_i16_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = srem i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -251,8 +257,11 @@ define i64 @test13_i16_u15(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C2]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C2]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -284,8 +293,11 @@ define i64 @test14_i16safe_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -316,8 +328,11 @@ define i64 @test15_i16safe_u15(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C2]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C2]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 32767 %c0 = icmp sle i64 %x, 32767
@ -348,8 +363,11 @@ define i64 @test16_i4_i4(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i8
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i8
; CHECK-NEXT: [[DIV1:%.*]] = srem i8 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i8 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 3 %c0 = icmp sle i64 %x, 3
@ -382,8 +400,11 @@ define i64 @test17_i9_i9(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
; CHECK-NEXT: [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 255 %c0 = icmp sle i64 %x, 255
@ -482,8 +503,11 @@ define i64 @test20_i16_i18(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = srem i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 16383 %c0 = icmp sle i64 %x, 16383
@ -514,8 +538,11 @@ define i64 @test21_i18_i16(i64 %x, i64 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[C3]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[C3]])
; CHECK-NEXT: br label [[END:%.*]] ; CHECK-NEXT: br label [[END:%.*]]
; CHECK: end: ; CHECK: end:
; CHECK-NEXT: [[DIV:%.*]] = srem i64 [[X]], [[Y]] ; CHECK-NEXT: [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i64 [[DIV]] ; CHECK-NEXT: [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
; CHECK-NEXT: [[DIV1:%.*]] = srem i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
; CHECK-NEXT: [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
; CHECK-NEXT: ret i64 [[DIV_SEXT]]
; ;
entry: entry:
%c0 = icmp sle i64 %x, 65535 %c0 = icmp sle i64 %x, 65535