Revert "Rebase: [Facebook] [MC] Introduce NeverAlign fragment type"

This reverts commit 6d0528636a.
This commit is contained in:
spupyrev 2022-07-11 09:48:22 -07:00
parent b444358126
commit eecd41aa09
10 changed files with 37 additions and 363 deletions

View file

@ -453,7 +453,6 @@ void BinaryEmitter::emitFunctionBody(BinaryFunction &BF, bool EmitColdPart,
// This assumes the second instruction in the macro-op pair will get
// assigned to its own MCRelaxableFragment. Since all JCC instructions
// are relaxable, we should be safe.
Streamer.emitNeverAlignCodeAtEnd(/*Alignment to avoid=*/64, *BC.STI);
}
if (!EmitCodeOnly && opts::UpdateDebugSections && BF.getDWARFUnit()) {

View file

@ -33,7 +33,6 @@ class MCFragment : public ilist_node_with_parent<MCFragment, MCSection> {
public:
enum FragmentType : uint8_t {
FT_Align,
FT_NeverAlign,
FT_Data,
FT_CompactEncodedInst,
FT_Fill,
@ -341,27 +340,6 @@ public:
}
};
class MCNeverAlignFragment : public MCFragment {
/// The alignment the end of the next fragment should avoid.
unsigned Alignment;
/// When emitting Nops some subtargets have specific nop encodings.
const MCSubtargetInfo &STI;
public:
MCNeverAlignFragment(unsigned Alignment, const MCSubtargetInfo &STI,
MCSection *Sec = nullptr)
: MCFragment(FT_NeverAlign, false, Sec), Alignment(Alignment), STI(STI) {}
unsigned getAlignment() const { return Alignment; }
const MCSubtargetInfo &getSubtargetInfo() const { return STI; }
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_NeverAlign;
}
};
class MCFillFragment : public MCFragment {
uint8_t ValueSize;
/// Value to use for filling bytes.

View file

@ -157,8 +157,6 @@ public:
unsigned MaxBytesToEmit = 0) override;
void emitCodeAlignment(unsigned ByteAlignment, const MCSubtargetInfo *STI,
unsigned MaxBytesToEmit = 0) override;
void emitNeverAlignCodeAtEnd(unsigned ByteAlignment,
const MCSubtargetInfo &STI) override;
void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
SMLoc Loc) override;
void emitDwarfLocDirective(unsigned FileNo, unsigned Line, unsigned Column,

View file

@ -872,12 +872,6 @@ public:
const MCSubtargetInfo *STI,
unsigned MaxBytesToEmit = 0);
/// If the end of the fragment following this NeverAlign fragment ever gets
/// aligned to \p ByteAlignment, this fragment emits a single nop before the
/// following fragment to break this end-alignment.
virtual void emitNeverAlignCodeAtEnd(unsigned ByteAlignment,
const MCSubtargetInfo &STI);
/// Emit some number of copies of \p Value until the byte offset \p
/// Offset is reached.
///

View file

@ -290,43 +290,6 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
return IsResolved;
}
/// Check if the branch crosses the boundary.
///
/// \param StartAddr start address of the fused/unfused branch.
/// \param Size size of the fused/unfused branch.
/// \param BoundaryAlignment alignment requirement of the branch.
/// \returns true if the branch cross the boundary.
static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size,
Align BoundaryAlignment) {
uint64_t EndAddr = StartAddr + Size;
return (StartAddr >> Log2(BoundaryAlignment)) !=
((EndAddr - 1) >> Log2(BoundaryAlignment));
}
/// Check if the branch is against the boundary.
///
/// \param StartAddr start address of the fused/unfused branch.
/// \param Size size of the fused/unfused branch.
/// \param BoundaryAlignment alignment requirement of the branch.
/// \returns true if the branch is against the boundary.
static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size,
Align BoundaryAlignment) {
uint64_t EndAddr = StartAddr + Size;
return (EndAddr & (BoundaryAlignment.value() - 1)) == 0;
}
/// Check if the branch needs padding.
///
/// \param StartAddr start address of the fused/unfused branch.
/// \param Size size of the fused/unfused branch.
/// \param BoundaryAlignment alignment requirement of the branch.
/// \returns true if the branch needs padding.
static bool needPadding(uint64_t StartAddr, uint64_t Size,
Align BoundaryAlignment) {
return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) ||
isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
}
uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
const MCFragment &F) const {
assert(getBackendPtr() && "Requires assembler backend");
@ -387,41 +350,6 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
return Size;
}
case MCFragment::FT_NeverAlign: {
// Disclaimer: NeverAlign fragment size depends on the size of its immediate
// successor, but NeverAlign need not be a MCRelaxableFragment.
// NeverAlign fragment size is recomputed if the successor is relaxed:
// - If RelaxableFragment is relaxed, it gets invalidated by marking its
// predecessor as LastValidFragment.
// - This forces the assembler to call MCAsmLayout::layoutFragment on that
// relaxable fragment, which in turn will always ask the predecessor to
// compute its size (see "computeFragmentSize(prev)" in layoutFragment).
//
// In short, the simplest way to ensure that computeFragmentSize() is sane
// is to establish the following rule: it should never examine fragments
// after the current fragment in the section. If we logically need to
// examine any fragment after the current fragment, we need to do that using
// relaxation, inside MCAssembler::layoutSectionOnce.
const MCNeverAlignFragment &NAF = cast<MCNeverAlignFragment>(F);
const MCFragment *NF = F.getNextNode();
uint64_t Offset = Layout.getFragmentOffset(&NAF);
size_t NextFragSize = 0;
if (const auto *NextFrag = dyn_cast<MCRelaxableFragment>(NF)) {
NextFragSize = NextFrag->getContents().size();
} else if (const auto *NextFrag = dyn_cast<MCDataFragment>(NF)) {
NextFragSize = NextFrag->getContents().size();
} else {
llvm_unreachable("Didn't find the expected fragment after NeverAlign");
}
// Check if the next fragment ends at the alignment we want to avoid.
if (isAgainstBoundary(Offset, NextFragSize, Align(NAF.getAlignment()))) {
// Avoid this alignment by introducing minimum nop.
assert(getBackend().getMinimumNopSize() != NAF.getAlignment());
return getBackend().getMinimumNopSize();
}
return 0;
}
case MCFragment::FT_Org: {
const MCOrgFragment &OF = cast<MCOrgFragment>(F);
MCValue Value;
@ -646,15 +574,6 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
break;
}
case MCFragment::FT_NeverAlign: {
const MCNeverAlignFragment &NAF = cast<MCNeverAlignFragment>(F);
if (!Asm.getBackend().writeNopData(OS, FragmentSize,
&NAF.getSubtargetInfo()))
report_fatal_error("unable to write nop sequence of " +
Twine(FragmentSize) + " bytes");
break;
}
case MCFragment::FT_Data:
++stats::EmittedDataFragments;
OS << cast<MCDataFragment>(F).getContents();
@ -1108,6 +1027,43 @@ bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
return OldSize != LF.getContents().size();
}
/// Check if the branch crosses the boundary.
///
/// \param StartAddr start address of the fused/unfused branch.
/// \param Size size of the fused/unfused branch.
/// \param BoundaryAlignment alignment requirement of the branch.
/// \returns true if the branch cross the boundary.
static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size,
Align BoundaryAlignment) {
uint64_t EndAddr = StartAddr + Size;
return (StartAddr >> Log2(BoundaryAlignment)) !=
((EndAddr - 1) >> Log2(BoundaryAlignment));
}
/// Check if the branch is against the boundary.
///
/// \param StartAddr start address of the fused/unfused branch.
/// \param Size size of the fused/unfused branch.
/// \param BoundaryAlignment alignment requirement of the branch.
/// \returns true if the branch is against the boundary.
static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size,
Align BoundaryAlignment) {
uint64_t EndAddr = StartAddr + Size;
return (EndAddr & (BoundaryAlignment.value() - 1)) == 0;
}
/// Check if the branch needs padding.
///
/// \param StartAddr start address of the fused/unfused branch.
/// \param Size size of the fused/unfused branch.
/// \param BoundaryAlignment alignment requirement of the branch.
/// \returns true if the branch needs padding.
static bool needPadding(uint64_t StartAddr, uint64_t Size,
Align BoundaryAlignment) {
return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) ||
isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
}
bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout,
MCBoundaryAlignFragment &BF) {
// BoundaryAlignFragment that doesn't need to align any fragment should not be

View file

@ -274,9 +274,6 @@ void MCFragment::destroy() {
case FT_Align:
delete cast<MCAlignFragment>(this);
return;
case FT_NeverAlign:
delete cast<MCNeverAlignFragment>(this);
return;
case FT_Data:
delete cast<MCDataFragment>(this);
return;
@ -345,9 +342,6 @@ LLVM_DUMP_METHOD void MCFragment::dump() const {
OS << "<";
switch (getKind()) {
case MCFragment::FT_Align: OS << "MCAlignFragment"; break;
case MCFragment::FT_NeverAlign:
OS << "MCNeverAlignFragment";
break;
case MCFragment::FT_Data: OS << "MCDataFragment"; break;
case MCFragment::FT_CompactEncodedInst:
OS << "MCCompactEncodedInstFragment"; break;
@ -387,12 +381,6 @@ LLVM_DUMP_METHOD void MCFragment::dump() const {
<< " MaxBytesToEmit:" << AF->getMaxBytesToEmit() << ">";
break;
}
case MCFragment::FT_NeverAlign: {
const MCNeverAlignFragment *NAF = cast<MCNeverAlignFragment>(this);
OS << "\n ";
OS << " Alignment:" << NAF->getAlignment() << ">";
break;
}
case MCFragment::FT_Data: {
const auto *DF = cast<MCDataFragment>(this);
OS << "\n ";

View file

@ -663,11 +663,6 @@ void MCObjectStreamer::emitCodeAlignment(unsigned ByteAlignment,
cast<MCAlignFragment>(getCurrentFragment())->setEmitNops(true, STI);
}
void MCObjectStreamer::emitNeverAlignCodeAtEnd(unsigned ByteAlignment,
const MCSubtargetInfo &STI) {
insert(new MCNeverAlignFragment(ByteAlignment, STI));
}
void MCObjectStreamer::emitValueToOffset(const MCExpr *Offset,
unsigned char Value,
SMLoc Loc) {

View file

@ -1215,8 +1215,6 @@ void MCStreamer::emitValueToAlignment(unsigned ByteAlignment, int64_t Value,
void MCStreamer::emitCodeAlignment(unsigned ByteAlignment,
const MCSubtargetInfo *STI,
unsigned MaxBytesToEmit) {}
void MCStreamer::emitNeverAlignCodeAtEnd(unsigned ByteAlignment,
const MCSubtargetInfo &STI) {}
void MCStreamer::emitValueToOffset(const MCExpr *Offset, unsigned char Value,
SMLoc Loc) {}
void MCStreamer::emitBundleAlignMode(unsigned AlignPow2) {}

View file

@ -1145,7 +1145,6 @@ private:
bool parseDirectiveArch();
bool parseDirectiveNops(SMLoc L);
bool parseDirectiveEven(SMLoc L);
bool parseDirectiveAvoidEndAlign(SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
/// CodeView FPO data directives.
@ -4634,8 +4633,6 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
} else if (IDVal == ".nops")
return parseDirectiveNops(DirectiveID.getLoc());
else if (IDVal == ".avoid_end_align")
return parseDirectiveAvoidEndAlign(DirectiveID.getLoc());
else if (IDVal == ".even")
return parseDirectiveEven(DirectiveID.getLoc());
else if (IDVal == ".cv_fpo_proc")
@ -4730,27 +4727,6 @@ bool X86AsmParser::parseDirectiveEven(SMLoc L) {
return false;
}
/// Directive for NeverAlign fragment testing, not for general usage!
/// parseDirectiveAvoidEndAlign
/// ::= .avoid_end_align alignment
bool X86AsmParser::parseDirectiveAvoidEndAlign(SMLoc L) {
int64_t Alignment = 0;
SMLoc AlignmentLoc;
AlignmentLoc = getTok().getLoc();
if (getParser().checkForValidSection() ||
getParser().parseAbsoluteExpression(Alignment))
return true;
if (getParser().parseEOL("unexpected token in directive"))
return true;
if (Alignment <= 0)
return Error(AlignmentLoc, "expected a positive alignment");
getParser().getStreamer().emitNeverAlignCodeAtEnd(Alignment, getSTI());
return false;
}
/// ParseDirectiveCode
/// ::= .code16 | .code32 | .code64
bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {

View file

@ -1,208 +0,0 @@
# RUN: llvm-mc -triple=x86_64 -filetype=obj %s | llvm-objdump --no-show-raw-insn -d - | FileCheck %s
# RUN: not llvm-mc -triple=x86_64 --defsym ERR=1 %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=ERR
# avoid_end_align has no effect since test doesn't end at alignment boundary:
.avoid_end_align 64
# CHECK-NOT: nop
testl %eax, %eax
# CHECK: testl %eax, %eax
je .LBB0
.fill 58, 1, 0x00
# NeverAlign followed by MCDataFragment:
# avoid_end_align inserts nop because `test` would end at alignment boundary:
.avoid_end_align 64
# CHECK: 3e: nop
testl %eax, %eax
# CHECK-NEXT: 3f: testl %eax, %eax
je .LBB0
# CHECK-NEXT: 41: je
.LBB0:
retq
.p2align 6
.L0:
.nops 57
int3
# NeverAlign followed by RelaxableFragment:
.avoid_end_align 64
# CHECK: ba: nop
cmpl $(.L1-.L0), %eax
# CHECK-NEXT: bb: cmpl
je .L0
# CHECK-NEXT: c1: je
.nops 65
.L1:
###############################################################################
# Experiment A:
# Check that NeverAlign doesn't introduce infinite loops in layout.
# Control:
# 1. NeverAlign fragment is not added,
# 2. Short formats of cmp and jcc are used (3 and 2 bytes respectively),
# 3. cmp and jcc are placed such that to be split by 64B alignment boundary.
# 4. jcc would be relaxed to a longer format if at least one byte is added
# between .L10 and je itself, e.g. by adding a NeverAlign padding byte,
# or relaxing cmp instruction.
# 5. cmp would be relaxed to a longer format if at least one byte is added
# between .L11 and .L12, e.g. due to relaxing jcc instruction.
.p2align 6
# CHECK: 140: int3
.fill 2, 1, 0xcc
.L10:
.nops 122
int3
# CHECK: 1bc: int3
# no avoid_end_align here
# CHECK-NOT: nop
cmp $(.L12-.L11), %eax
# CHECK: 1bd: cmpl
.L11:
je .L10
# CHECK-NEXT: 1c0: je
.nops 125
.L12:
# Experiment:
# Same setup as control, except NeverAlign fragment is added before cmp.
# Expected effect:
# 1. NeverAlign pads cmp+jcc by one byte since cmp and jcc are split by a 64B
# alignment boundary,
# 2. This extra byte forces jcc relaxation to a longer format (Control rule #4),
# 3. This results in an cmp relaxation (Control rule #5),
# 4. Which in turn makes NeverAlign fragment unnecessary as cmp and jcc
# are no longer split by an alignment boundary (cmp crosses the boundary).
# 5. NeverAlign padding is removed.
# 6. cmp and jcc instruction remain in relaxed form.
# 7. Relaxation converges, layout succeeds.
.p2align 6
# CHECK: 240: int3
.fill 2, 1, 0xcc
.L20:
.nops 122
int3
# CHECK: 2bc: int3
.avoid_end_align 64
# CHECK-NOT: nop
cmp $(.L22-.L21), %eax
# CHECK-NEXT: 2bd: cmpl
.L21:
je .L20
# CHECK-NEXT: 2c3: je
.nops 125
.L22:
###############################################################################
# Experiment B: similar to exp A, but we check that once NeverAlign padding is
# removed from the layout (exp A, experiment step 5), the increased distance
# between the symbols L33 and L34 triggers the relaxation of instruction at
# label L32.
#
# Control 1: using a one-byte instruction at L33 (site of NeverAlign) leads to
# steps 2-3 of exp A, experiment:
# 2. This extra byte forces jcc relaxation to a longer format (Control rule #4),
# 3. This results in an cmp relaxation (Control rule #5),
# => short cmp under L32
.p2align 6
# CHECK: 380: int3
.fill 2, 1, 0xcc
.L30:
.nops 122
int3
# CHECK: 3fc: int3
hlt
#.avoid_end_align 64
.L33:
cmp $(.L32-.L31), %eax
# CHECK: 3fe: cmpl
.L31:
je .L30
# CHECK-NEXT: 404: je
.nops 114
.p2align 1
int3
int3
# CHECK: 47c: int3
.L34:
.nops 9
.L32:
cmp $(.L33-.L34), %eax
# CHECK: 487: cmp
# note that the size of cmp is 48a-487 == 3 bytes (distance is exactly -128)
int3
# CHECK-NEXT: 48a: int3
# Control 2: leaving out a byte at L43 (site of NeverAlign), plus
# relaxed jcc and cmp leads to a relaxed cmp under L42 (-129 as cmp's immediate)
.p2align 6
# CHECK: 4c0: int3
.fill 2, 1, 0xcc
.L40:
.nops 122
int3
# CHECK: 53c: int3
# int3
#.avoid_end_align 64
.L43:
cmp $(.L42-.L41+0x100), %eax
# CHECK: 53d: cmpl
.L41:
je .L40+0x100
# CHECK-NEXT: 543: je
.nops 114
.p2align 1
int3
int3
# CHECK: 5bc: int3
.L44:
.nops 9
.L42:
cmp $(.L43-.L44), %eax
# CHECK: 5c7: cmp
# note that the size of cmp is 5cd-5c7 == 6 bytes (distance is exactly -129)
int3
# CHECK-NEXT: 5cd: int3
# Experiment
# Checking if removing NeverAlign padding at L53 as a result of alignment and
# relaxation of cmp and jcc following it (see exp A), thus reproducing the case
# in Control 2 (getting a relaxed cmp under L52), is handled correctly.
.p2align 6
# CHECK: 600: int3
.fill 2, 1, 0xcc
.L50:
.nops 122
int3
# CHECK: 67c: int3
.avoid_end_align 64
.L53:
# CHECK-NOT: nop
cmp $(.L52-.L51), %eax
# CHECK-NEXT: 67d: cmpl
.L51:
je .L50
# CHECK-NEXT: 683: je
.nops 114
.p2align 1
int3
int3
# CHECK: 6fc: int3
.L54:
.nops 9
.L52:
cmp $(.L53-.L54), %eax
# CHECK: 707: cmp
# note that the size of cmp is 70d-707 == 6 bytes (distance is exactly -129)
int3
# CHECK-NEXT: 70d: int3
.ifdef ERR
# ERR: {{.*}}.s:[[#@LINE+1]]:17: error: unknown token in expression
.avoid_end_align
# ERR: {{.*}}.s:[[#@LINE+1]]:18: error: expected absolute expression
.avoid_end_align x
# ERR: {{.*}}.s:[[#@LINE+1]]:18: error: expected a positive alignment
.avoid_end_align 0
# ERR: {{.*}}.s:[[#@LINE+1]]:20: error: unexpected token in directive
.avoid_end_align 64, 0
.endif