[mlir][Affine] Support affine vector loads/stores in LICM

Make use of affine memory op interfaces in AffineLoopInvariantCodeMotion so
that it can also work on affine.vector_load and affine.vector_store ops.

Reviewed By: bondhugula

Differential Revision: https://reviews.llvm.org/D86986
This commit is contained in:
Diego Caballero 2020-09-03 00:29:04 +03:00
parent f09ccf89fb
commit 553bfc8fa1
2 changed files with 54 additions and 6 deletions

View file

@ -63,7 +63,7 @@ areAllOpsInTheBlockListInvariant(Region &blockList, Value indVar,
static bool isMemRefDereferencingOp(Operation &op) {
// TODO: Support DMA Ops.
return isa<AffineLoadOp, AffineStoreOp>(op);
return isa<AffineReadOpInterface, AffineWriteOpInterface>(op);
}
// Returns true if the individual op is loop invariant.
@ -90,9 +90,9 @@ bool isOpLoopInvariant(Operation &op, Value indVar,
definedOps.insert(&op);
if (isMemRefDereferencingOp(op)) {
Value memref = isa<AffineLoadOp>(op)
? cast<AffineLoadOp>(op).getMemRef()
: cast<AffineStoreOp>(op).getMemRef();
Value memref = isa<AffineReadOpInterface>(op)
? cast<AffineReadOpInterface>(op).getMemRef()
: cast<AffineWriteOpInterface>(op).getMemRef();
for (auto *user : memref.getUsers()) {
// If this memref has a user that is a DMA, give up because these
// operations write to this memref.
@ -102,8 +102,9 @@ bool isOpLoopInvariant(Operation &op, Value indVar,
// If the memref used by the load/store is used in a store elsewhere in
// the loop nest, we do not hoist. Similarly, if the memref used in a
// load is also being stored too, we do not hoist the load.
if (isa<AffineStoreOp>(user) ||
(isa<AffineLoadOp>(user) && isa<AffineStoreOp>(op))) {
if (isa<AffineWriteOpInterface>(user) ||
(isa<AffineReadOpInterface>(user) &&
isa<AffineWriteOpInterface>(op))) {
if (&op != user) {
SmallVector<AffineForOp, 8> userIVs;
getLoopIVs(*user, &userIVs);

View file

@ -566,3 +566,50 @@ func @do_not_hoist_dependent_side_effect_free_op(%arg0: memref<10x512xf32>) {
// CHECK-NEXT: affine.load
// CHECK-NEXT: mulf
// CHECK-NEXT: }
// -----
// CHECK-LABEL: func @vector_loop_nothing_invariant
func @vector_loop_nothing_invariant() {
%m1 = alloc() : memref<40xf32>
%m2 = alloc() : memref<40xf32>
affine.for %arg0 = 0 to 10 {
%v0 = affine.vector_load %m1[%arg0*4] : memref<40xf32>, vector<4xf32>
%v1 = affine.vector_load %m2[%arg0*4] : memref<40xf32>, vector<4xf32>
%v2 = addf %v0, %v1 : vector<4xf32>
affine.vector_store %v2, %m1[%arg0*4] : memref<40xf32>, vector<4xf32>
}
return
}
// CHECK: affine.for
// CHECK-NEXT: affine.vector_load
// CHECK-NEXT: affine.vector_load
// CHECK-NEXT: addf
// CHECK-NEXT: affine.vector_store
// CHECK-NEXT: }
// -----
// CHECK-LABEL: func @vector_loop_all_invariant
func @vector_loop_all_invariant() {
%m1 = alloc() : memref<4xf32>
%m2 = alloc() : memref<4xf32>
%m3 = alloc() : memref<4xf32>
affine.for %arg0 = 0 to 10 {
%v0 = affine.vector_load %m1[0] : memref<4xf32>, vector<4xf32>
%v1 = affine.vector_load %m2[0] : memref<4xf32>, vector<4xf32>
%v2 = addf %v0, %v1 : vector<4xf32>
affine.vector_store %v2, %m3[0] : memref<4xf32>, vector<4xf32>
}
return
}
// CHECK: alloc()
// CHECK-NEXT: alloc()
// CHECK-NEXT: alloc()
// CHECK-NEXT: affine.vector_load
// CHECK-NEXT: affine.vector_load
// CHECK-NEXT: addf
// CHECK-NEXT: affine.vector_store
// CHECK-NEXT: affine.for