[MIRParser] Diagnose too large align values in MachineMemOperands
When parsing MachineMemOperands, MIRParser treated the "align" keyword the same as "basealign". Really "basealign" should specify the alignment of the MachinePointerInfo base value, and "align" should specify the alignment of that base value plus the offset. This worked OK when the specified alignment was no larger than the alignment of the offset, but in cases like this it just caused confusion: STW killed %18, 4, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 4, align 8) MIRPrinter would never have printed this, with an offset of 4 but an align of 8, so it must have been written by hand. MIRParser would interpret "align 8" as "basealign 8", but I think it is better to give an error and force the user to write "basealign 8" if that is what they really meant. Differential Revision: https://reviews.llvm.org/D120400 Change-Id: I7eeeefc55c2df3554ba8d89f8809a2f45ada32d8
This commit is contained in:
parent
1fa1251116
commit
719bac55df
|
@ -250,7 +250,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
|
|||
.Case("dereferenceable", MIToken::kw_dereferenceable)
|
||||
.Case("invariant", MIToken::kw_invariant)
|
||||
.Case("align", MIToken::kw_align)
|
||||
.Case("basealign", MIToken::kw_align)
|
||||
.Case("basealign", MIToken::kw_basealign)
|
||||
.Case("addrspace", MIToken::kw_addrspace)
|
||||
.Case("stack", MIToken::kw_stack)
|
||||
.Case("got", MIToken::kw_got)
|
||||
|
|
|
@ -3281,11 +3281,21 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
|
|||
MDNode *Range = nullptr;
|
||||
while (consumeIfPresent(MIToken::comma)) {
|
||||
switch (Token.kind()) {
|
||||
case MIToken::kw_align:
|
||||
case MIToken::kw_align: {
|
||||
// align is printed if it is different than size.
|
||||
if (parseAlignment(BaseAlignment))
|
||||
uint64_t Alignment;
|
||||
if (parseAlignment(Alignment))
|
||||
return true;
|
||||
if (Ptr.Offset & (Alignment - 1)) {
|
||||
// MachineMemOperand::getAlign never returns a value greater than the
|
||||
// alignment of offset, so this just guards against hand-written MIR
|
||||
// that specifies a large "align" value when it should probably use
|
||||
// "basealign" instead.
|
||||
return error("specified alignment is more aligned than offset");
|
||||
}
|
||||
BaseAlignment = Alignment;
|
||||
break;
|
||||
}
|
||||
case MIToken::kw_basealign:
|
||||
// basealign is printed if it is different than align.
|
||||
if (parseAlignment(BaseAlignment))
|
||||
|
|
|
@ -410,7 +410,7 @@ body: |
|
|||
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub0
|
||||
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
|
||||
%0:vreg_64_align2 = IMPLICIT_DEF
|
||||
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 4, align 8, addrspace 1)
|
||||
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 4, basealign 8, addrspace 1)
|
||||
%2:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `float addrspace(1)* undef`, align 4, addrspace 1)
|
||||
S_NOP 0, implicit %1, implicit %2
|
||||
...
|
||||
|
|
10
llvm/test/CodeGen/MIR/Generic/aligned-memoperands-err.mir
Normal file
10
llvm/test/CodeGen/MIR/Generic/aligned-memoperands-err.mir
Normal file
|
@ -0,0 +1,10 @@
|
|||
# RUN: not llc -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
|
||||
|
||||
---
|
||||
name: aligned_memoperands
|
||||
body: |
|
||||
bb.0:
|
||||
%0:_(p0) = IMPLICIT_DEF
|
||||
; CHECK: :[[@LINE+1]]:73: specified alignment is more aligned than offset
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 8)
|
||||
...
|
28
llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
Normal file
28
llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
Normal file
|
@ -0,0 +1,28 @@
|
|||
# RUN: llc -run-pass none -o - %s | FileCheck %s
|
||||
|
||||
---
|
||||
name: aligned_memoperands
|
||||
body: |
|
||||
bb.0:
|
||||
; CHECK-LABEL: name: aligned_memoperands
|
||||
; CHECK: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
|
||||
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 2)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
|
||||
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, basealign 8)
|
||||
%0:_(p0) = IMPLICIT_DEF
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`)
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 2)
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 4) ; redundant
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 8)
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 2)
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 4) ; redundant
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 2) ; printed as "align"
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 4) ; redundant
|
||||
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 8)
|
||||
...
|
|
@ -214,8 +214,8 @@ body: |
|
|||
ST_B killed $w0, %stack.4.b.addr, 0 :: (store (s128) into %ir.b.addr)
|
||||
$w0 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load (s128) from %ir.b.addr)
|
||||
ST_B killed $w0, %stack.0.retval, 0 :: (store (s128) into %ir.retval)
|
||||
$v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load (s64) from %ir.20, align 16)
|
||||
$v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load (s64) from %ir.20 + 8, align 16)
|
||||
$v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load (s64) from %ir.20, basealign 16)
|
||||
$v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load (s64) from %ir.20 + 8, basealign 16)
|
||||
RetRA implicit $v0_64, implicit $v1_64
|
||||
|
||||
...
|
||||
|
|
|
@ -152,11 +152,11 @@ body: |
|
|||
bb.1.if.then6.i.i:
|
||||
LIFETIME_START %stack.1.ap2.i.i
|
||||
%17:gprc = LWZ 8, $zero :: (load (s32), align 8)
|
||||
STW killed %17, 8, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 8, align 8)
|
||||
STW killed %17, 8, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 8, basealign 8)
|
||||
%18:gprc = LWZ 4, $zero :: (load (s32))
|
||||
STW killed %18, 4, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 4, align 8)
|
||||
STW killed %18, 4, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 4, basealign 8)
|
||||
%19:gprc = LWZ 0, $zero :: (load (s32), align 8)
|
||||
STW killed %19, 0, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i, align 8)
|
||||
STW killed %19, 0, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i, basealign 8)
|
||||
BLR implicit $lr, implicit $rm
|
||||
|
||||
bb.2.format_reason_ap.exit.i:
|
||||
|
|
|
@ -434,9 +434,9 @@ body: |
|
|||
renamable $x14 = ADDXri renamable $x10, 64, 0, debug-location !35
|
||||
$x15 = MOVZXi 35507, 0, debug-location !35
|
||||
$x15 = MOVKXi $x15, 16821, 16, debug-location !35
|
||||
STRXui killed renamable $x15, $sp, 24, debug-location !35 :: (store (s768) into %stack.3.MyAlloca, align 32)
|
||||
STRXui killed renamable $x9, $sp, 25, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 1, align 32)
|
||||
STRXui killed renamable $x8, $sp, 26, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 2, align 32)
|
||||
STRXui killed renamable $x15, $sp, 24, debug-location !35 :: (store (s768) into %stack.3.MyAlloca, basealign 32)
|
||||
STRXui killed renamable $x9, $sp, 25, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 1, basealign 32)
|
||||
STRXui killed renamable $x8, $sp, 26, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 2, basealign 32)
|
||||
renamable $x8 = UBFMXri renamable $x10, 3, 63, debug-location !35
|
||||
renamable $x9 = ADDXrs renamable $x11, renamable $x10, 67, debug-location !35
|
||||
$x15 = MOVZXi 61937, 0, debug-location !35
|
||||
|
|
Loading…
Reference in a new issue