Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename StoreVectorMxNAndZip to StoreVectorAndZip #103638

Merged
74 changes: 45 additions & 29 deletions src/coreclr/jit/hwintrinsicarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -389,13 +389,7 @@ void HWIntrinsicInfo::lookupImmBounds(
case NI_AdvSimd_Arm64_LoadAndInsertScalarVector128x3:
case NI_AdvSimd_Arm64_LoadAndInsertScalarVector128x4:
case NI_AdvSimd_StoreSelectedScalar:
case NI_AdvSimd_StoreSelectedScalarVector64x2:
case NI_AdvSimd_StoreSelectedScalarVector64x3:
case NI_AdvSimd_StoreSelectedScalarVector64x4:
case NI_AdvSimd_Arm64_StoreSelectedScalar:
case NI_AdvSimd_Arm64_StoreSelectedScalarVector128x2:
case NI_AdvSimd_Arm64_StoreSelectedScalarVector128x3:
case NI_AdvSimd_Arm64_StoreSelectedScalarVector128x4:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Arm64_InsertSelectedScalar:
case NI_Sve_FusedMultiplyAddBySelectedScalar:
Expand Down Expand Up @@ -2042,12 +2036,50 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
break;
}

case NI_AdvSimd_StoreVector64x2AndZip:
case NI_AdvSimd_StoreVector64x3AndZip:
case NI_AdvSimd_StoreVector64x4AndZip:
case NI_AdvSimd_Arm64_StoreVector128x2AndZip:
case NI_AdvSimd_Arm64_StoreVector128x3AndZip:
case NI_AdvSimd_Arm64_StoreVector128x4AndZip:
case NI_AdvSimd_StoreVectorAndZip:
case NI_AdvSimd_Arm64_StoreVectorAndZip:
{
assert(sig->numArgs == 2);
assert(retType == TYP_VOID);

CORINFO_ARG_LIST_HANDLE arg1 = sig->args;
CORINFO_ARG_LIST_HANDLE arg2 = info.compCompHnd->getArgNext(arg1);
var_types argType = TYP_UNKNOWN;
CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE;

argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass)));
op2 = impPopStack().val;
unsigned fieldCount = info.compCompHnd->getClassNumInstanceFields(argClass);
argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass)));
op1 = getArgForHWIntrinsic(argType, argClass);

assert(op2->TypeGet() == TYP_STRUCT);
if (op1->OperIs(GT_CAST))
{
// Although the API specifies a pointer, if what we have is a BYREF, that's what
// we really want, so throw away the cast.
if (op1->gtGetOp1()->TypeGet() == TYP_BYREF)
{
op1 = op1->gtGetOp1();
}
}

if (!op2->OperIs(GT_LCL_VAR))
{
unsigned tmp = lvaGrabTemp(true DEBUGARG("StoreVectorNx2 temp tree"));

impStoreToTemp(tmp, op2, CHECK_SPILL_NONE);
op2 = gtNewLclvNode(tmp, argType);
}
op2 = gtConvertTableOpToFieldList(op2, fieldCount);

intrinsic = simdSize == 8 ? NI_AdvSimd_StoreVectorAndZip : NI_AdvSimd_Arm64_StoreVectorAndZip;

info.compNeedsConsecutiveRegisters = true;
retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize);
break;
}

case NI_AdvSimd_StoreVector64x2:
case NI_AdvSimd_StoreVector64x3:
case NI_AdvSimd_StoreVector64x4:
Expand Down Expand Up @@ -2123,23 +2155,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
if (op2->TypeGet() == TYP_STRUCT)
{
info.compNeedsConsecutiveRegisters = true;
switch (fieldCount)
{
case 2:
intrinsic = simdSize == 8 ? NI_AdvSimd_StoreSelectedScalarVector64x2
: NI_AdvSimd_Arm64_StoreSelectedScalarVector128x2;
break;
case 3:
intrinsic = simdSize == 8 ? NI_AdvSimd_StoreSelectedScalarVector64x3
: NI_AdvSimd_Arm64_StoreSelectedScalarVector128x3;
break;
case 4:
intrinsic = simdSize == 8 ? NI_AdvSimd_StoreSelectedScalarVector64x4
: NI_AdvSimd_Arm64_StoreSelectedScalarVector128x4;
break;
default:
assert("unsupported");
}
intrinsic = simdSize == 8 ? NI_AdvSimd_StoreSelectedScalar : NI_AdvSimd_Arm64_StoreSelectedScalar;

if (!op2->OperIs(GT_LCL_VAR))
{
Expand Down
115 changes: 84 additions & 31 deletions src/coreclr/jit/hwintrinsiccodegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1225,37 +1225,52 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
GetEmitter()->emitIns_R_R_R(ins, emitTypeSize(intrin.baseType), op2Reg, op3Reg, op1Reg);
break;

case NI_AdvSimd_StoreSelectedScalarVector64x2:
case NI_AdvSimd_StoreSelectedScalarVector64x3:
case NI_AdvSimd_StoreSelectedScalarVector64x4:
case NI_AdvSimd_Arm64_StoreSelectedScalarVector128x2:
case NI_AdvSimd_Arm64_StoreSelectedScalarVector128x3:
case NI_AdvSimd_Arm64_StoreSelectedScalarVector128x4:
case NI_AdvSimd_StoreSelectedScalar:
case NI_AdvSimd_Arm64_StoreSelectedScalar:
{
assert(intrin.op2->OperIsFieldList());
GenTreeFieldList* fieldList = intrin.op2->AsFieldList();
GenTree* firstField = fieldList->Uses().GetHead()->GetNode();
op2Reg = firstField->GetRegNum();
unsigned regCount = 0;
if (intrin.op2->OperIsFieldList())
{
GenTreeFieldList* fieldList = intrin.op2->AsFieldList();
GenTree* firstField = fieldList->Uses().GetHead()->GetNode();
op2Reg = firstField->GetRegNum();

regNumber argReg = op2Reg;
for (GenTreeFieldList::Use& use : fieldList->Uses())
{
regCount++;
#ifdef DEBUG
unsigned regCount = 0;
regNumber argReg = op2Reg;
for (GenTreeFieldList::Use& use : fieldList->Uses())
GenTree* argNode = use.GetNode();
assert(argReg == argNode->GetRegNum());
argReg = getNextSIMDRegWithWraparound(argReg);
#endif
}
}
else
{
regCount++;
regCount = 1;
}

GenTree* argNode = use.GetNode();
assert(argReg == argNode->GetRegNum());
argReg = getNextSIMDRegWithWraparound(argReg);
switch (regCount)
{
case 2:
ins = INS_st2;
break;

case 3:
ins = INS_st3;
break;

case 4:
ins = INS_st4;
break;

default:
assert(regCount == 1);
ins = INS_st1;
break;
}
assert((ins == INS_st2 && regCount == 2) || (ins == INS_st3 && regCount == 3) ||
(ins == INS_st4 && regCount == 4));
#endif
FALLTHROUGH;
}
case NI_AdvSimd_StoreSelectedScalar:
case NI_AdvSimd_Arm64_StoreSelectedScalar:
{

HWIntrinsicImmOpHelper helper(this, intrin.op3, node);

for (helper.EmitBegin(); !helper.Done(); helper.EmitCaseEnd())
Expand All @@ -1267,12 +1282,6 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}

case NI_AdvSimd_StoreVector64x2AndZip:
case NI_AdvSimd_StoreVector64x3AndZip:
case NI_AdvSimd_StoreVector64x4AndZip:
case NI_AdvSimd_Arm64_StoreVector128x2AndZip:
case NI_AdvSimd_Arm64_StoreVector128x3AndZip:
case NI_AdvSimd_Arm64_StoreVector128x4AndZip:
case NI_AdvSimd_StoreVector64x2:
case NI_AdvSimd_StoreVector64x3:
case NI_AdvSimd_StoreVector64x4:
Expand Down Expand Up @@ -1307,6 +1316,50 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}

case NI_AdvSimd_StoreVectorAndZip:
case NI_AdvSimd_Arm64_StoreVectorAndZip:
{
unsigned regCount = 0;

assert(intrin.op2->OperIsFieldList());

GenTreeFieldList* fieldList = intrin.op2->AsFieldList();
GenTree* firstField = fieldList->Uses().GetHead()->GetNode();
op2Reg = firstField->GetRegNum();

regNumber argReg = op2Reg;
for (GenTreeFieldList::Use& use : fieldList->Uses())
{
regCount++;
#ifdef DEBUG
GenTree* argNode = use.GetNode();
kunalspathak marked this conversation as resolved.
Show resolved Hide resolved
assert(argReg == argNode->GetRegNum());
argReg = getNextSIMDRegWithWraparound(argReg);
#endif
}

switch (regCount)
{
case 2:
ins = INS_st2;
break;

case 3:
ins = INS_st3;
break;

case 4:
ins = INS_st4;
break;

default:
unreached();
}

GetEmitter()->emitIns_R_R(ins, emitSize, op2Reg, op1Reg, opt);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you make sure emitSize == EA_8BYTE or emitSize == 16BYTE or do we verify that already by this point?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, it's asserted in genGetSimdInsOpt() one line 322 above.
We aren't asserting explicitly in other cases, is there a chance getting other values by any chance?
For HW_Category_MemoryStore, the emitSize seemed to be pulled from the table in hwintrinsiclistarm64.h.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, that's fine.

break;
}

case NI_Vector64_CreateScalarUnsafe:
case NI_Vector128_CreateScalarUnsafe:
if (intrin.op1->isContainedFltOrDblImmed())
Expand Down
Loading
Loading