Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement ARM32 atomic intrinsics #97792

Open
wants to merge 19 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ public static long Decrement(ref long location) =>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static byte Exchange(ref byte location1, byte value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM
return Exchange(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand All @@ -72,7 +72,7 @@ public static byte Exchange(ref byte location1, byte value)
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static short Exchange(ref short location1, short value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM
return Exchange(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand All @@ -93,7 +93,7 @@ public static short Exchange(ref short location1, short value)
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int Exchange(ref int location1, int value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return Exchange(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down Expand Up @@ -172,7 +172,7 @@ public static T Exchange<T>([NotNullIfNotNull(nameof(value))] ref T location1, T
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static byte CompareExchange(ref byte location1, byte value, byte comparand)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM
return CompareExchange(ref location1, value, comparand); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand All @@ -194,7 +194,7 @@ public static byte CompareExchange(ref byte location1, byte value, byte comparan
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static short CompareExchange(ref short location1, short value, short comparand)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM
return CompareExchange(ref location1, value, comparand); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand All @@ -216,7 +216,7 @@ public static short CompareExchange(ref short location1, short value, short comp
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int CompareExchange(ref int location1, int value, int comparand)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return CompareExchange(ref location1, value, comparand); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down Expand Up @@ -315,7 +315,7 @@ public static long Add(ref long location1, long value) =>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static int ExchangeAdd(ref int location1, int value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return ExchangeAdd(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down
253 changes: 253 additions & 0 deletions src/coreclr/jit/codegenarm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@ bool CodeGen::genInstrWithConstant(
{
case INS_add:
case INS_sub:
if (imm < 0)
{
imm = -imm;
ins = (ins == INS_add) ? INS_sub : INS_add;
}
immFitsInIns = validImmForInstr(ins, (target_ssize_t)imm, flags);
break;

Expand Down Expand Up @@ -675,6 +680,254 @@ void CodeGen::genJumpTable(GenTree* treeNode)
genProduceReg(treeNode);
}

//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node.
//
// Arguments:
// treeNode - the GT_XADD/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
MichalPetryka marked this conversation as resolved.
Show resolved Hide resolved
{
GenTree* data = treeNode->AsOp()->gtOp2;
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();

genConsumeAddress(addr);
genConsumeRegs(data);

assert(!treeNode->OperIs(GT_XORR, GT_XAND));
assert(treeNode->OperIs(GT_XCHG) || !varTypeIsSmall(treeNode->TypeGet()));

emitAttr dataSize = emitActualTypeSize(data);

regNumber tempReg = treeNode->ExtractTempReg(RBM_ALLINT);
MichalPetryka marked this conversation as resolved.
Show resolved Hide resolved
regNumber storeReg = (treeNode->OperGet() == GT_XCHG) ? dataReg : treeNode->ExtractTempReg(RBM_ALLINT);
regNumber loadReg = (targetReg != REG_NA) ? targetReg : storeReg;

// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);

noway_assert(addrReg != loadReg);
noway_assert(dataReg != loadReg);

noway_assert((treeNode->OperGet() == GT_XCHG) || (addrReg != dataReg));

assert(addr->isUsedFromReg());
noway_assert(tempReg != REG_NA);
noway_assert(tempReg != targetReg);
noway_assert((targetReg != REG_NA) || (treeNode->OperGet() != GT_XCHG));

// Store exclusive unpredictable cases must be avoided
noway_assert(tempReg != addrReg);

// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.

gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());

// Emit code like this:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is done for other platforms as well, but not sure why this cannot be done in lower. The codegen should just emit the code instead of inserting the loop like code here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume the reason is that we'd need new nodes for the atomic loads and stores to create the loop with there. I feel like changing that should be a separate PR from this if desired.

// retry:
// ldrex loadReg, [addrReg]
// add storeReg, loadReg, dataReg # Only for GT_XADD
// # GT_XCHG storeReg === dataReg
// strex tempReg, storeReg, [addrReg]
// cmp tempReg, 0
// bne retry
// dmb ish

instruction insLd = INS_ldrex;
instruction insSt = INS_strex;
if (varTypeIsByte(treeNode->TypeGet()))
{
insLd = INS_ldrexb;
insSt = INS_strexb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
insLd = INS_ldrexh;
insSt = INS_strexh;
}

instGen_MemoryBarrier();

BasicBlock* labelRetry = genCreateTempLabel();
genDefineTempLabel(labelRetry);

// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(insLd, dataSize, loadReg, addrReg);

if (treeNode->OperGet() == GT_XADD)
{
if (data->isContainedIntOrIImmed())
{
genInstrWithConstant(INS_add, dataSize, storeReg, loadReg, data->AsIntConCommon()->IconValue(),
INS_FLAGS_DONT_CARE, tempReg);
}
else
{
GetEmitter()->emitIns_R_R_R(INS_add, dataSize, storeReg, loadReg, dataReg);
}
}

// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(insSt, dataSize, tempReg, storeReg, addrReg);

GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, tempReg, 0);
GetEmitter()->emitIns_J(INS_bne, labelRetry);

instGen_MemoryBarrier();

gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());

if (targetReg != REG_NA)
{
if (varTypeIsSmall(treeNode->TypeGet()) && varTypeIsSigned(treeNode->TypeGet()))
{
instruction mov = varTypeIsShort(treeNode->TypeGet()) ? INS_sxth : INS_sxtb;
GetEmitter()->emitIns_Mov(mov, EA_4BYTE, targetReg, targetReg, /* canSkip */ false);
}

genProduceReg(treeNode);
}
}

//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode)
{
assert(treeNode->OperIs(GT_CMPXCHG));

GenTree* addr = treeNode->Addr(); // arg1
GenTree* data = treeNode->Data(); // arg2
GenTree* comparand = treeNode->Comparand(); // arg3

regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
regNumber comparandReg = comparand->GetRegNum();

genConsumeAddress(addr);
genConsumeRegs(data);
genConsumeRegs(comparand);

emitAttr dataSize = emitActualTypeSize(data);

regNumber exResultReg = treeNode->ExtractTempReg(RBM_ALLINT);

// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(dataReg != targetReg);
noway_assert(comparandReg != targetReg);
noway_assert(addrReg != dataReg);
noway_assert(targetReg != REG_NA);
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);

assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert(!comparand->isUsedFromMemory());

// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != dataReg);
noway_assert(exResultReg != addrReg);

// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.

gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());

// Emit code like this:
// retry:
// ldrex targetReg, [addrReg]
// cmp targetReg, comparandReg
// bne compareFail
// strex exResult, dataReg, [addrReg]
// cmp exResult, 0
// bne retry
// compareFail:
// dmb ish

instruction insLd = INS_ldrex;
instruction insSt = INS_strex;
if (varTypeIsByte(treeNode->TypeGet()))
{
insLd = INS_ldrexb;
insSt = INS_strexb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
insLd = INS_ldrexh;
insSt = INS_strexh;
}

instGen_MemoryBarrier();

BasicBlock* labelRetry = genCreateTempLabel();
BasicBlock* labelCompareFail = genCreateTempLabel();
genDefineTempLabel(labelRetry);

// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(insLd, dataSize, targetReg, addrReg);

if (varTypeIsSmall(treeNode->TypeGet()) && varTypeIsSigned(treeNode->TypeGet()))
{
instruction mov = varTypeIsShort(treeNode->TypeGet()) ? INS_sxth : INS_sxtb;
GetEmitter()->emitIns_Mov(mov, EA_4BYTE, targetReg, targetReg, /* canSkip */ false);
}

if (comparand->isContainedIntOrIImmed())
{
if (comparand->IsIntegralConst(0) && emitter::isLowRegister(targetReg))
{
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelCompareFail, targetReg);
}
else
{
assert(comparand->AsIntConCommon()->IconValue() <= INT32_MAX);
GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, targetReg,
(target_ssize_t)comparand->AsIntConCommon()->IconValue());
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
else
{
GetEmitter()->emitIns_R_R(INS_cmp, EA_4BYTE, targetReg, comparandReg);
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}

// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(insSt, dataSize, exResultReg, dataReg, addrReg);

GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, exResultReg, 0);
GetEmitter()->emitIns_J(INS_bne, labelRetry);

genDefineTempLabel(labelCompareFail);

instGen_MemoryBarrier();

gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());

genProduceReg(treeNode);
}

//------------------------------------------------------------------------
// genGetInsForOper: Return instruction encoding of the operation tree.
//
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/codegenarmarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -429,17 +429,17 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
}

#ifdef TARGET_ARM64
case GT_XCHG:
case GT_XORR:
case GT_XAND:
#endif // TARGET_ARM64
case GT_XCHG:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;

case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
#endif // TARGET_ARM64

case GT_RELOAD:
// do nothing - reload is just a marker.
Expand Down
Loading
Loading