[mlir] Update FuncOp conversion passes to Pass/InterfacePass<FunctionOpInterface>

These passes generally don't rely on any special aspects of FuncOp, and moving allows
for these passes to be used in many more situations. The passes that obviously weren't
relying on invariants guaranteed by a "function" were updated to be generic pass, the
rest were updated to be FunctionOpinterface InterfacePasses.

The test updates are NFC switching from implicit nesting (-pass -pass2) form to
the -pass-pipeline form (generic passes do not implicitly nest as op-specific passes do).

Differential Revision: https://reviews.llvm.org/D121190
This commit is contained in:
River Riddle 2022-03-07 13:56:38 -08:00
parent f8d5c73c82
commit 47f175b09b
59 changed files with 126 additions and 204 deletions

View file

@ -12,10 +12,6 @@
#include "mlir/Pass/Pass.h"
namespace mlir {
class FuncOp;
template <typename T>
class OperationPass;
/// Populates patterns for the lowering of Arm NEON 2D ops to intrinsics.
/// See createConvertArmNeon2dToIntrPass.
void populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns);
@ -23,7 +19,7 @@ void populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns);
/// Creates a pass to lower Arm NEON 2D ops to intrinsics, i.e.
/// equivalent ops operating on flattened 1D vectors and mapping more
/// directly to the corresponding Arm NEON instruction.
std::unique_ptr<OperationPass<FuncOp>> createConvertArmNeon2dToIntrPass();
std::unique_ptr<Pass> createConvertArmNeon2dToIntrPass();
} // namespace mlir

View file

@ -10,19 +10,15 @@
#include <memory>
#include "mlir/Transforms/DialectConversion.h"
namespace mlir {
class FuncOp;
class RewritePatternSet;
template <typename T>
class OperationPass;
class Pass;
/// Populate the given list with patterns that convert from Complex to Standard.
void populateComplexToStandardConversionPatterns(RewritePatternSet &patterns);
/// Create a pass to convert Complex operations to the Standard dialect.
std::unique_ptr<OperationPass<FuncOp>> createConvertComplexToStandardPass();
std::unique_ptr<Pass> createConvertComplexToStandardPass();
} // namespace mlir

View file

@ -77,7 +77,7 @@ def ConvertAffineToStandard : Pass<"lower-affine"> {
// ArithmeticToLLVM
//===----------------------------------------------------------------------===//
def ConvertArithmeticToLLVM : Pass<"convert-arith-to-llvm", "FuncOp"> {
def ConvertArithmeticToLLVM : Pass<"convert-arith-to-llvm"> {
let summary = "Convert Arithmetic dialect to LLVM dialect";
let description = [{
This pass converts supported Arithmetic ops to LLVM dialect instructions.
@ -107,6 +107,16 @@ def ConvertArithmeticToSPIRV : Pass<"convert-arith-to-spirv", "ModuleOp"> {
];
}
//===----------------------------------------------------------------------===//
// ArmNeon2dToIntr
//===----------------------------------------------------------------------===//
def ConvertArmNeon2dToIntr : Pass<"arm-neon-2d-to-intr"> {
let summary = "Convert Arm NEON structured ops to intrinsics";
let constructor = "mlir::createConvertArmNeon2dToIntrPass()";
let dependentDialects = ["arm_neon::ArmNeonDialect", "vector::VectorDialect"];
}
//===----------------------------------------------------------------------===//
// AsyncToLLVM
//===----------------------------------------------------------------------===//
@ -174,7 +184,7 @@ def ConvertComplexToLLVM : Pass<"convert-complex-to-llvm", "ModuleOp"> {
// ComplexToStandard
//===----------------------------------------------------------------------===//
def ConvertComplexToStandard : Pass<"convert-complex-to-standard", "FuncOp"> {
def ConvertComplexToStandard : Pass<"convert-complex-to-standard"> {
let summary = "Convert Complex dialect to standard dialect";
let constructor = "mlir::createConvertComplexToStandardPass()";
let dependentDialects = ["math::MathDialect"];
@ -444,7 +454,7 @@ def ConvertMathToLibm : Pass<"convert-math-to-libm", "ModuleOp"> {
// MathToLLVM
//===----------------------------------------------------------------------===//
def ConvertMathToLLVM : Pass<"convert-math-to-llvm", "FuncOp"> {
def ConvertMathToLLVM : Pass<"convert-math-to-llvm"> {
let summary = "Convert Math dialect to LLVM dialect";
let description = [{
This pass converts supported Math ops to LLVM dialect intrinsics.
@ -605,7 +615,8 @@ def SCFToSPIRV : Pass<"convert-scf-to-spirv", "ModuleOp"> {
// SCFToGPU
//===----------------------------------------------------------------------===//
def ConvertAffineForToGPU : Pass<"convert-affine-for-to-gpu", "FuncOp"> {
def ConvertAffineForToGPU
: InterfacePass<"convert-affine-for-to-gpu", "FunctionOpInterface"> {
let summary = "Convert top-level AffineFor Ops to GPU kernels";
let constructor = "mlir::createAffineForToGPUPass()";
let dependentDialects = ["gpu::GPUDialect"];
@ -636,7 +647,7 @@ def ConvertShapeToStandard : Pass<"convert-shape-to-std", "ModuleOp"> {
];
}
def ConvertShapeConstraints: Pass<"convert-shape-constraints", "FuncOp"> {
def ConvertShapeConstraints : Pass<"convert-shape-constraints"> {
let summary = "Convert shape constraint operations to the standard dialect";
let description = [{
This pass eliminates shape constraints from the program, converting them to
@ -685,7 +696,8 @@ def ConvertTensorToSPIRV : Pass<"convert-tensor-to-spirv", "ModuleOp"> {
// TosaToLinalg
//===----------------------------------------------------------------------===//
def TosaToLinalg : Pass<"tosa-to-linalg", "FuncOp"> {
def TosaToLinalg
: InterfacePass<"tosa-to-linalg", "FunctionOpInterface"> {
let summary = "Lower TOSA to LinAlg on tensors";
let description = [{
Pass that converts TOSA operations to the equivalent operations using the
@ -699,7 +711,8 @@ def TosaToLinalg : Pass<"tosa-to-linalg", "FuncOp"> {
// TosaToLinalgNamed
//===----------------------------------------------------------------------===//
def TosaToLinalgNamed : Pass<"tosa-to-linalg-named", "FuncOp"> {
def TosaToLinalgNamed
: InterfacePass<"tosa-to-linalg-named", "FunctionOpInterface"> {
let summary = "Lower TOSA to LinAlg named operations";
let description = [{
Pass that converts TOSA operations to the equivalent operations using the
@ -746,7 +759,7 @@ def TosaToStandard : Pass<"tosa-to-standard"> {
// VectorToGPU
//===----------------------------------------------------------------------===//
def ConvertVectorToGPU : Pass<"convert-vector-to-gpu", "FuncOp"> {
def ConvertVectorToGPU : Pass<"convert-vector-to-gpu"> {
let summary = "Lower the operations from the vector dialect into the GPU "
"dialect";
let constructor = "mlir::createConvertVectorToGPUPass()";
@ -760,7 +773,7 @@ def ConvertVectorToGPU : Pass<"convert-vector-to-gpu", "FuncOp"> {
// VectorToSCF
//===----------------------------------------------------------------------===//
def ConvertVectorToSCF : Pass<"convert-vector-to-scf", "FuncOp"> {
def ConvertVectorToSCF : Pass<"convert-vector-to-scf"> {
let summary = "Lower the operations from the vector dialect into the SCF "
"dialect";
let constructor = "mlir::createConvertVectorToSCFPass()";
@ -850,15 +863,4 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv", "ModuleOp"> {
let dependentDialects = ["spirv::SPIRVDialect"];
}
//===----------------------------------------------------------------------===//
// ArmNeon2dToIntr
//===----------------------------------------------------------------------===//
def ConvertArmNeon2dToIntr : Pass<"arm-neon-2d-to-intr", "FuncOp"> {
let summary = "Convert Arm NEON structured ops to intrinsics";
let constructor = "mlir::createConvertArmNeon2dToIntrPass()";
let dependentDialects = ["arm_neon::ArmNeonDialect", "vector::VectorDialect"];
}
#endif // MLIR_CONVERSION_PASSES

View file

@ -13,9 +13,9 @@
#include <memory>
namespace mlir {
class FuncOp;
class FunctionOpInterface;
template <typename T>
class OperationPass;
class InterfacePass;
class Pass;
/// Create a pass that converts loop nests into GPU kernels. It considers
@ -26,9 +26,9 @@ class Pass;
/// parallelization is performed, it is under the responsibility of the caller
/// to strip-mine the loops and to perform the dependence analysis before
/// calling the conversion.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<InterfacePass<FunctionOpInterface>>
createAffineForToGPUPass(unsigned numBlockDims, unsigned numThreadDims);
std::unique_ptr<OperationPass<FuncOp>> createAffineForToGPUPass();
std::unique_ptr<InterfacePass<FunctionOpInterface>> createAffineForToGPUPass();
/// Creates a pass that converts scf.parallel operations into a gpu.launch
/// operation. The mapping of loop dimensions to launch dimensions is derived

View file

@ -13,8 +13,8 @@
namespace mlir {
class FuncOp;
class ModuleOp;
class Pass;
template <typename T>
class OperationPass;
class RewritePatternSet;
@ -26,7 +26,7 @@ std::unique_ptr<OperationPass<ModuleOp>> createConvertShapeToStandardPass();
void populateConvertShapeConstraintsConversionPatterns(
RewritePatternSet &patterns);
std::unique_ptr<OperationPass<FuncOp>> createConvertShapeConstraintsPass();
std::unique_ptr<Pass> createConvertShapeConstraintsPass();
} // namespace mlir

View file

@ -14,17 +14,16 @@
namespace mlir {
class MLIRContext;
class Pass;
class FuncOp;
class RewritePatternSet;
/// Patterns to transform vector ops into a canonical form to convert to MMA
/// matrix operations.
void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns);
/// Convert vector ops to MMA matrix operations. This will convert slice of
/// operations that can be legally converted to MMA operations. The rest of the
/// vector operations are left untouched.
void convertVectorToMMAOps(FuncOp funcOp);
/// Convert vector ops to MMA matrix operations nested under `rootOp`. This will
/// convert slice of operations that can be legally converted to MMA operations.
/// The rest of the vector operations are left untouched.
void convertVectorToMMAOps(Operation *rootOp);
/// Convert from vector to GPU ops.
std::unique_ptr<Pass> createConvertVectorToGPUPass();

View file

@ -49,27 +49,23 @@ public:
class ConvertArmNeon2dToIntr
: public ConvertArmNeon2dToIntrBase<ConvertArmNeon2dToIntr> {
void runOnOperation() override {
auto func = getOperation();
auto *context = &getContext();
RewritePatternSet patterns(context);
populateConvertArmNeon2dToIntrPatterns(patterns);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns))))
if (failed(
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
return signalPassFailure();
}
};
} // namespace
namespace mlir {
void populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns) {
void mlir::populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns) {
patterns.add<Sdot2dLoweringPattern>(patterns.getContext());
}
std::unique_ptr<OperationPass<FuncOp>> createConvertArmNeon2dToIntrPass() {
std::unique_ptr<Pass> mlir::createConvertArmNeon2dToIntrPass() {
return std::make_unique<ConvertArmNeon2dToIntr>();
}
} // namespace mlir

View file

@ -644,8 +644,6 @@ struct ConvertComplexToStandardPass
};
void ConvertComplexToStandardPass::runOnOperation() {
auto function = getOperation();
// Convert to the Standard dialect using the converter defined above.
RewritePatternSet patterns(&getContext());
populateComplexToStandardConversionPatterns(patterns);
@ -653,12 +651,12 @@ void ConvertComplexToStandardPass::runOnOperation() {
ConversionTarget target(getContext());
target.addLegalDialect<arith::ArithmeticDialect, math::MathDialect>();
target.addLegalOp<complex::CreateOp, complex::ImOp, complex::ReOp>();
if (failed(applyPartialConversion(function, target, std::move(patterns))))
if (failed(
applyPartialConversion(getOperation(), target, std::move(patterns))))
signalPassFailure();
}
} // namespace
std::unique_ptr<OperationPass<FuncOp>>
mlir::createConvertComplexToStandardPass() {
std::unique_ptr<Pass> mlir::createConvertComplexToStandardPass() {
return std::make_unique<ConvertComplexToStandardPass>();
}

View file

@ -15,6 +15,7 @@
namespace mlir {
class AffineDialect;
class FunctionOpInterface;
// Forward declaration from Dialect.h
template <typename ConcreteDialect>

View file

@ -34,7 +34,8 @@ struct ForLoopMapper : public ConvertAffineForToGPUBase<ForLoopMapper> {
}
void runOnOperation() override {
for (Operation &op : llvm::make_early_inc_range(getOperation().getOps())) {
for (Operation &op :
llvm::make_early_inc_range(getOperation().getBody().getOps())) {
if (auto forOp = dyn_cast<AffineForOp>(&op)) {
if (failed(convertAffineLoopNestToGPULaunch(forOp, numBlockDims,
numThreadDims)))
@ -61,11 +62,12 @@ struct ParallelLoopToGpuPass
} // namespace
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<InterfacePass<FunctionOpInterface>>
mlir::createAffineForToGPUPass(unsigned numBlockDims, unsigned numThreadDims) {
return std::make_unique<ForLoopMapper>(numBlockDims, numThreadDims);
}
std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineForToGPUPass() {
std::unique_ptr<InterfacePass<FunctionOpInterface>>
mlir::createAffineForToGPUPass() {
return std::make_unique<ForLoopMapper>();
}

View file

@ -63,7 +63,6 @@ class ConvertShapeConstraints
};
} // namespace
std::unique_ptr<OperationPass<FuncOp>>
mlir::createConvertShapeConstraintsPass() {
std::unique_ptr<Pass> mlir::createConvertShapeConstraintsPass() {
return std::make_unique<ConvertShapeConstraints>();
}

View file

@ -53,7 +53,7 @@ public:
target.markUnknownOpDynamicallyLegal([](Operation *) { return true; });
FuncOp func = getOperation();
FunctionOpInterface func = getOperation();
mlir::tosa::populateTosaToLinalgNamedConversionPatterns(&patterns);
if (failed(applyFullConversion(func, target, std::move(patterns))))
signalPassFailure();

View file

@ -54,7 +54,7 @@ public:
target.markUnknownOpDynamicallyLegal([](Operation *) { return true; });
FuncOp func = getOperation();
FunctionOpInterface func = getOperation();
mlir::tosa::populateTosaToLinalgConversionPatterns(&patterns);
if (failed(applyFullConversion(func, target, std::move(patterns))))
signalPassFailure();

View file

@ -503,15 +503,13 @@ static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType,
valueMapping[op->getResult(0)] = newOp;
}
namespace mlir {
void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
void mlir::populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
patterns.getContext());
}
void convertVectorToMMAOps(FuncOp funcOp) {
SetVector<Operation *> ops = getOpToConvert(funcOp);
void mlir::convertVectorToMMAOps(Operation *rootOp) {
SetVector<Operation *> ops = getOpToConvert(rootOp);
llvm::DenseMap<Value, Value> valueMapping;
for (Operation *op : ops) {
if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
@ -534,13 +532,12 @@ void convertVectorToMMAOps(FuncOp funcOp) {
}
}
} // namespace mlir
namespace {
struct ConvertVectorToGPUPass
: public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
void runOnOperation() override {
RewritePatternSet patterns(getOperation().getContext());
RewritePatternSet patterns(&getContext());
populatePrepareVectorToMMAPatterns(patterns);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));

View file

@ -1254,9 +1254,7 @@ struct TransferOp1dConversion : public VectorToSCFPattern<OpTy> {
} // namespace lowering_1_d
} // namespace
namespace mlir {
void populateVectorToSCFConversionPatterns(
void mlir::populateVectorToSCFConversionPatterns(
RewritePatternSet &patterns, const VectorTransferToSCFOptions &options) {
if (options.unroll) {
patterns.add<lowering_n_d_unrolled::UnrollTransferReadConversion,
@ -1277,8 +1275,6 @@ void populateVectorToSCFConversionPatterns(
}
}
} // namespace mlir
namespace {
struct ConvertVectorToSCFPass
@ -1300,14 +1296,14 @@ struct ConvertVectorToSCFPass
// Lower permutation maps first.
if (lowerPermutationMaps) {
RewritePatternSet lowerTransferPatterns(getOperation().getContext());
RewritePatternSet lowerTransferPatterns(&getContext());
mlir::vector::populateVectorTransferPermutationMapLoweringPatterns(
lowerTransferPatterns);
(void)applyPatternsAndFoldGreedily(getOperation(),
std::move(lowerTransferPatterns));
}
RewritePatternSet patterns(getOperation().getContext());
RewritePatternSet patterns(&getContext());
populateVectorToSCFConversionPatterns(patterns, options);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt -convert-arith-to-llvm %s -split-input-file | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s
// CHECK-LABEL: @vector_ops
func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>, %arg3: vector<4xi64>) -> vector<4xf32> {

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt -convert-arith-to-llvm %s -split-input-file | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s
// CHECK-LABEL: @vec_bin
func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> {

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-complex-to-standard | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-complex-to-standard)" | FileCheck %s
// CHECK-LABEL: func @complex_abs
// CHECK-SAME: %[[ARG:.*]]: complex<f32>

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-complex-to-standard -convert-complex-to-llvm -convert-math-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-complex-to-standard),convert-complex-to-llvm,builtin.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | FileCheck %s
// CHECK-LABEL: llvm.func @complex_abs
// CHECK-SAME: %[[ARG:.*]]: ![[C_TY:.*]])

View file

@ -1,5 +1,5 @@
// RUN: mlir-opt -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts -split-input-file %s | FileCheck %s
// RUN: mlir-opt -convert-arith-to-llvm -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1' -reconcile-unrealized-casts -split-input-file %s | FileCheck %s --check-prefix=BAREPTR
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" -split-input-file %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR
// BAREPTR-LABEL: func @check_noalias
// BAREPTR-SAME: %{{.*}}: !llvm.ptr<f32> {llvm.noalias}, %{{.*}}: !llvm.ptr<f32> {llvm.noalias}

View file

@ -1,5 +1,5 @@
// RUN: mlir-opt -convert-math-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts %s -split-input-file | FileCheck %s
// RUN: mlir-opt -convert-math-to-llvm -convert-arith-to-llvm='index-bitwidth=32' -convert-func-to-llvm='index-bitwidth=32' -reconcile-unrealized-casts %s -split-input-file | FileCheck --check-prefix=CHECK32 %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" %s -split-input-file | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},reconcile-unrealized-casts" %s -split-input-file | FileCheck --check-prefix=CHECK32 %s
// CHECK-LABEL: func @empty() {
// CHECK-NEXT: llvm.return

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -convert-math-to-llvm | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(convert-math-to-llvm)" | FileCheck %s
// CHECK-LABEL: @ops
func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) {

View file

@ -1,5 +1,5 @@
// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=0 gpu-thread-dims=1" %s | FileCheck --check-prefix=CHECK-THREADS %s
// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=0" %s | FileCheck --check-prefix=CHECK-BLOCKS %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=0 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-THREADS %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=0})" %s | FileCheck --check-prefix=CHECK-BLOCKS %s
// CHECK-THREADS-LABEL: @one_d_loop
// CHECK-BLOCKS-LABEL: @one_d_loop

View file

@ -1,5 +1,5 @@
// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck --check-prefix=CHECK-11 %s
// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=2 gpu-thread-dims=2" %s | FileCheck --check-prefix=CHECK-22 %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-11 %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=2 gpu-thread-dims=2})" %s | FileCheck --check-prefix=CHECK-22 %s
// CHECK-11-LABEL: @step_1
// CHECK-22-LABEL: @step_1

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s
// CHECK-LABEL: @step_var
func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) {

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt -convert-shape-constraints <%s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-shape-constraints)" <%s | FileCheck %s
// There's not very much useful to check here other than pasting the output.
// CHECK-LABEL: func @cstr_broadcastable(

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt --split-input-file --tosa-to-linalg-named %s -verify-diagnostics -o -| FileCheck %s
// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s
// CHECK-LABEL: @matmul
func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) {

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt --split-input-file --tosa-to-linalg %s -verify-diagnostics -o -| FileCheck %s
// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg)" %s -verify-diagnostics -o -| FileCheck %s
// CHECK: #[[$MAP0:.*]] = affine_map<() -> ()>

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-gpu -canonicalize | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-gpu)" -canonicalize | FileCheck %s
#map0 = affine_map<(d0, d1) -> (d1, d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-scf='lower-tensors=true' -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
// CHECK-LABEL: func @transfer_read_2d(
// CHECK: %[[ALLOC:.*]] = memref.alloca() : memref<vector<4x9xf32>>

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-tensors=true' -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
// CHECK-LABEL: func @transfer_read_2d(
// CHECK: %[[V_INIT:.*]] = arith.constant dense<-4.200000e+01> : vector<4x9xf32>

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
// CHECK-LABEL: func @transfer_read_inbounds
func @transfer_read_inbounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true})" -split-input-file | FileCheck %s
// Ensure that the permutation map is lowered (by inserting a transpose op)
// before lowering the vector.transfer_read.

View file

@ -1,5 +1,5 @@
// RUN: mlir-opt %s -convert-vector-to-scf -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf)" -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL
// CHECK-LABEL: func @vector_transfer_ops_0d(
func @vector_transfer_ops_0d(%M: memref<f32>) {

View file

@ -4,8 +4,8 @@
// RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.fill register-tile-sizes=4,32 vectorize" | \
// RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=memref.copy register-tile-sizes=4,32 vectorize" | \
// RUN: mlir-opt -canonicalize -convert-vector-to-scf -lower-affine -convert-linalg-to-loops | \
// RUN: mlir-opt -canonicalize -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" | \
// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \
// Activate to dump assembly
// R_UN: -dump-object-file -object-filename=/tmp/a.o \

View file

@ -1,6 +1,6 @@
// RUN: mlir-opt %s -canonicalize -cse -linalg-comprehensive-module-bufferize |\
// RUN: mlir-opt -buffer-deallocation -convert-vector-to-scf -lower-affine -convert-linalg-to-loops |\
// RUN: mlir-opt -canonicalize -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(canonicalize,cse),linalg-comprehensive-module-bufferize" |\
// RUN: mlir-opt -pass-pipeline="builtin.func(buffer-deallocation,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" |\
// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext |\

View file

@ -1,7 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf \
// RUN: -memref-expand -arith-expand -convert-vector-to-llvm \
// RUN: -convert-memref-to-llvm -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

View file

@ -1,19 +1,19 @@
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

View file

@ -1,19 +1,19 @@
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

View file

@ -1,19 +1,19 @@
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

View file

@ -1,9 +1,9 @@
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

View file

@ -1,9 +1,9 @@
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

View file

@ -1,16 +1,14 @@
// RUN: mlir-opt %s -test-vector-to-forloop -convert-vector-to-scf \
// RUN: -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop,convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine \
// RUN: -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e main \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main \
// RUN: -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \
// RUN: FileCheck %s
// RUN: mlir-opt %s -test-vector-to-forloop | FileCheck %s -check-prefix=TRANSFORM
// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM
func private @print_memref_f32(memref<*xf32>)

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt -arm-neon-2d-to-intr %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(arm-neon-2d-to-intr)" %s | FileCheck %s
// CHECK-LABEL: arm_neon_sdot2d_4x4_i8i8
func @arm_neon_sdot2d_4x4_i8i8(%a: vector<4xi32>, %b: vector<4x4xi8>, %c: vector<4x4xi8>) -> vector<4xi32> {

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-vector-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-translate -mlir-to-llvmir | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | mlir-translate -mlir-to-llvmir | FileCheck %s
func @genbool_1d() -> vector<8xi1> {
%0 = vector.constant_mask [4] : vector<8xi1>

View file

@ -1,14 +1,4 @@
// RUN: mlir-opt %s -async-to-async-runtime \
// RUN: -async-runtime-ref-counting \
// RUN: -async-runtime-ref-counting-opt \
// RUN: -convert-async-to-llvm \
// RUN: -convert-linalg-to-loops \
// RUN: -convert-scf-to-cf \
// RUN: -convert-linalg-to-llvm \
// RUN: -convert-vector-to-llvm \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner \
// RUN: -e main -entry-point-result=void -O0 \
// RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \

View file

@ -1,10 +1,4 @@
// RUN: mlir-opt %s -async-to-async-runtime \
// RUN: -async-runtime-ref-counting \
// RUN: -async-runtime-ref-counting-opt \
// RUN: -convert-async-to-llvm \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner \
// RUN: -e main -entry-point-result=void -O0 \
// RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \

View file

@ -1,12 +1,4 @@
// RUN: mlir-opt %s -async-to-async-runtime \
// RUN: -async-runtime-ref-counting \
// RUN: -async-runtime-ref-counting-opt \
// RUN: -convert-async-to-llvm \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-vector-to-llvm \
// RUN: -convert-memref-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner \
// RUN: -e main -entry-point-result=void -O0 \
// RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \

View file

@ -1,14 +1,4 @@
// RUN: mlir-opt %s -async-to-async-runtime \
// RUN: -async-runtime-ref-counting \
// RUN: -async-runtime-ref-counting-opt \
// RUN: -convert-async-to-llvm \
// RUN: -convert-linalg-to-loops \
// RUN: -convert-scf-to-cf \
// RUN: -convert-linalg-to-llvm \
// RUN: -convert-memref-to-llvm \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner \
// RUN: -e main -entry-point-result=void -O0 \
// RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-scf-to-cf -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1' -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm{use-bare-ptr-memref-call-conv=1}" -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s
// Verify bare pointer memref calling convention. `simple_add1_add2_test`
// gets two 2xf32 memrefs, adds 1.0f to the first one and 2.0f to the second

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-scf-to-cf -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \
// RUN: | FileCheck %s

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s
func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface }
func private @print_memref_i32(memref<*xi32>) attributes { llvm.emit_c_interface }

View file

@ -1,9 +1,4 @@
// RUN: mlir-opt %s -test-math-polynomial-approximation \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-vector-to-llvm \
// RUN: -convert-math-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-math-polynomial-approximation,convert-arith-to-llvm),convert-vector-to-llvm,builtin.func(convert-math-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner \
// RUN: -e main -entry-point-result=void -O0 \
// RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-scf-to-cf -convert-memref-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf),convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \
// RUN: | FileCheck %s

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -convert-scf-to-cf -memref-expand -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,memref-expand,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \
// RUN: | FileCheck %s

View file

@ -1,4 +1,4 @@
// RUN: mlir-opt -convert-linalg-to-loops -lower-affine -convert-scf-to-cf -convert-arith-to-llvm -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-linalg-to-loops,lower-affine,convert-scf-to-cf,convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s
func @main() {
%A = memref.alloc() : memref<16x16xf32>

View file

@ -1,10 +1,4 @@
// RUN: mlir-opt %s -convert-linalg-to-loops \
// RUN: -convert-scf-to-cf \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-linalg-to-llvm \
// RUN: -convert-memref-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts | \
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s

View file

@ -1,7 +1,7 @@
// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D
// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D
// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D
// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D
func @print_0d() {
%f = arith.constant 2.00000e+00 : f32

View file

@ -1,16 +1,6 @@
// Check if mlir marks the corresponding function with required coroutine attribute.
//
// RUN: mlir-opt %s -async-to-async-runtime \
// RUN: -async-runtime-ref-counting \
// RUN: -async-runtime-ref-counting-opt \
// RUN: -convert-async-to-llvm \
// RUN: -convert-linalg-to-loops \
// RUN: -convert-scf-to-cf \
// RUN: -convert-linalg-to-llvm \
// RUN: -convert-memref-to-llvm \
// RUN: -convert-arith-to-llvm \
// RUN: -convert-func-to-llvm \
// RUN: -reconcile-unrealized-casts \
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | FileCheck %s
// CHECK: llvm.func @async_execute_fn{{.*}}attributes{{.*}}"coroutine.presplit", "0"