Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve morphing of bit comparisons to constant 0/1 #73120

Merged
merged 12 commits into from
Sep 6, 2022
47 changes: 21 additions & 26 deletions src/coreclr/jit/lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2850,38 +2850,33 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp)
GenTree* andOp1 = op1->gtGetOp1();
GenTree* andOp2 = op1->gtGetOp2();

if (op2Value != 0)
//
// If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask)
// into ((x AND mask) NE|EQ 0) when mask is a single bit.
//
if ((op2Value != 0) && isPow2(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value))
{
op2Value = 0;
op2->SetIconValue(0);
cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet()));
}

// Transform ((x AND 1) NE 0) into (x AND 1)
// The compiler requires jumps to have relop operands, so we do not fold that case.
if ((op2Value == 0) && cmp->OperIs(GT_NE) && andOp2->IsIntegralConst(1))
{
// Optimizes (X & 1) == 1 to (X & 1)
// The compiler requires jumps to have relop operands, so we do not fold that case.
LIR::Use cmpUse;
if ((op2Value == 1) && cmp->OperIs(GT_EQ))
if ((genActualType(op1) == cmp->TypeGet()) && BlockRange().TryGetUse(cmp, &cmpUse) &&
!cmpUse.User()->OperIs(GT_JTRUE) && !cmpUse.User()->OperIsConditional())
{
if (andOp2->IsIntegralConst(1) && (genActualType(op1) == cmp->TypeGet()) &&
BlockRange().TryGetUse(cmp, &cmpUse) && !cmpUse.User()->OperIs(GT_JTRUE) &&
!cmpUse.User()->OperIsConditional())
{
GenTree* next = cmp->gtNext;
GenTree* next = cmp->gtNext;

cmpUse.ReplaceWith(op1);
cmpUse.ReplaceWith(op1);

BlockRange().Remove(cmp->gtGetOp2());
BlockRange().Remove(cmp);

return next;
}
}

//
// If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask)
// into ((x AND mask) NE|EQ 0) when mask is a single bit.
//
BlockRange().Remove(cmp->gtGetOp2());
BlockRange().Remove(cmp);

if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value))
{
op2Value = 0;
op2->SetIconValue(0);
cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet()));
return next;
}
}

Expand Down
83 changes: 45 additions & 38 deletions src/coreclr/jit/morph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11978,77 +11978,84 @@ GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
//
// Now we check for a compare with the result of an '&' operator
//
// Here we look for the following transformation:
// Here we look for the following transformation (canonicalization):
//
// EQ/NE EQ/NE
// / \ / \.
// AND CNS 0/1 -> AND CNS 0
// / \ / \.
// RSZ/RSH CNS 1 x CNS (1 << y)
// / \.
// x CNS_INT +y
// RSZ/RSH CNS 1 x LSH (folded if 'y' is constant)
// / \ / \.
// x y 1 y

if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH))
{
GenTreeOp* andOp = op1->AsOp();
GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp();

if (!rshiftOp->gtGetOp2()->IsCnsIntOrI())
{
goto SKIP;
}

ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue();

if (shiftAmount < 0)
{
goto SKIP;
}

if (!andOp->gtGetOp2()->IsIntegralConst(1))
{
goto SKIP;
}

GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();

if (andOp->TypeIs(TYP_INT))
// If the shift is constant, we can fold the mask and delete the shift node:
// -> AND(x, CNS(1 << y)) EQ/NE 0
if (rshiftOp->gtGetOp2()->IsCnsIntOrI())
{
if (shiftAmount > 31)
ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue();

if (shiftAmount < 0)
{
goto SKIP;
}

andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount));
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();

// Reverse the condition if necessary.
if (op2Value == 1)
if (andOp->TypeIs(TYP_INT) && shiftAmount < 32)
{
andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount));
}
else if (andOp->TypeIs(TYP_LONG) && shiftAmount < 64)
{
gtReverseCond(cmp);
op2->SetIconValue(0);
andMask->SetLngValue(1LL << shiftAmount);
}
else
{
goto SKIP; // Unsupported type or invalid shift amount.
}
andOp->gtOp1 = rshiftOp->gtGetOp1();

DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2());
DEBUG_DESTROY_NODE(rshiftOp);
}
else if (andOp->TypeIs(TYP_LONG))
// Otherwise, if the shift is not constant, just rewire the nodes and reverse the shift op:
// AND(RSH(x, y), 1) -> AND(x, LSH(1, y))
//
// On ARM/BMI2 the original pattern should result in smaller code when comparing to non-zero,
// the other case where this transform is worth is if the compare is being used by a jump.
//
else
{
if (shiftAmount > 63)
if (!(cmp->gtFlags & GTF_RELOP_JMP_USED) &&
((op2Value == 0 && cmp->OperIs(GT_NE)) || (op2Value == 1 && cmp->OperIs(GT_EQ))))
{
goto SKIP;
}

andMask->SetLngValue(1ll << shiftAmount);
andOp->gtOp1 = rshiftOp->gtGetOp1();
rshiftOp->gtOp1 = andOp->gtGetOp2();
andOp->gtOp2 = rshiftOp;

// Reverse the cond if necessary
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetLngValue(0);
}
rshiftOp->SetAllEffectsFlags(rshiftOp->gtGetOp1(), rshiftOp->gtGetOp2());
rshiftOp->SetOper(GT_LSH);
}

andOp->gtOp1 = rshiftOp->gtGetOp1();

DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2());
DEBUG_DESTROY_NODE(rshiftOp);
// Reverse the condition if necessary.
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetIntegralValue(0);
}
}
}

Expand Down