[scudo/standalone] Use .arch_extension memtag, not mte

GNU binutils accepts only `.arch_extension memtag` while Clang
accepts either that or `.arch_extension mte` to mean the same thing.

Reviewed By: pcc

Differential Revision: https://reviews.llvm.org/D95996
This commit is contained in:
Roland McGrath 2021-02-03 19:28:29 -08:00
parent bdf3ad582e
commit 4c9adbb287

View file

@ -88,11 +88,11 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
#endif // SCUDO_LINUX
inline void disableMemoryTagChecksTestOnly() {
__asm__ __volatile__(".arch_extension mte; msr tco, #1");
__asm__ __volatile__(".arch_extension memtag; msr tco, #1");
}
inline void enableMemoryTagChecksTestOnly() {
__asm__ __volatile__(".arch_extension mte; msr tco, #0");
__asm__ __volatile__(".arch_extension memtag; msr tco, #0");
}
class ScopedDisableMemoryTagChecks {
@ -100,19 +100,21 @@ class ScopedDisableMemoryTagChecks {
public:
ScopedDisableMemoryTagChecks() {
__asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1"
__asm__ __volatile__(".arch_extension memtag; mrs %0, tco; msr tco, #1"
: "=r"(PrevTCO));
}
~ScopedDisableMemoryTagChecks() {
__asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(PrevTCO));
__asm__ __volatile__(".arch_extension memtag; msr tco, %0"
:
: "r"(PrevTCO));
}
};
inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
uptr TaggedPtr;
__asm__ __volatile__(
".arch_extension mte; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
".arch_extension memtag; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
: [TaggedPtr] "=r"(TaggedPtr)
: [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
return TaggedPtr;
@ -123,7 +125,7 @@ inline uptr storeTags(uptr Begin, uptr End) {
if (Begin != End) {
__asm__ __volatile__(
R"(
.arch_extension mte
.arch_extension memtag
1:
stzg %[Cur], [%[Cur]], #16
@ -144,7 +146,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
// chunk holding a low alignment allocation is reused for a higher alignment
// allocation, the chunk may already have a non-zero tag from the previous
// allocation.
__asm__ __volatile__(".arch_extension mte; stg %0, [%0, #-16]"
__asm__ __volatile__(".arch_extension memtag; stg %0, [%0, #-16]"
:
: "r"(Ptr)
: "memory");
@ -161,7 +163,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
// purpose of catching linear overflows in this case.
uptr UntaggedEnd = untagPointer(TaggedEnd);
if (UntaggedEnd != BlockEnd)
__asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
__asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
:
: "r"(UntaggedEnd)
: "memory");
@ -175,7 +177,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
// of the allocation to 0. See explanation in prepareTaggedChunk above.
uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
if (RoundNewPtr != BlockEnd)
__asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
__asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
:
: "r"(RoundNewPtr)
: "memory");
@ -183,7 +185,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
}
__asm__ __volatile__(R"(
.arch_extension mte
.arch_extension memtag
// Set the memory tag of the region
// [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16))
@ -208,7 +210,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
inline uptr loadTag(uptr Ptr) {
uptr TaggedPtr = Ptr;
__asm__ __volatile__(".arch_extension mte; ldg %0, [%0]"
__asm__ __volatile__(".arch_extension memtag; ldg %0, [%0]"
: "+r"(TaggedPtr)
:
: "memory");