Normalize lowering of MemRef types

The RFC for unifying Linalg and Affine compilation passes into an end-to-end flow with a predictable ABI and linkage to external function calls raised the question of why we have variable sized descriptors for memrefs depending on whether they have static or dynamic dimensions  (https://groups.google.com/a/tensorflow.org/forum/#!topic/mlir/MaL8m2nXuio).

This CL standardizes the ABI on the rank of the memrefs.
The LLVM struct for a memref becomes equivalent to:
```
template <typename Elem, size_t Rank>
struct {
  Elem *ptr;
  int64_t sizes[Rank];
};
```

PiperOrigin-RevId: 270947276
This commit is contained in:
Nicolas Vasilache 2019-09-24 11:21:04 -07:00 committed by A. Unique TensorFlower
parent 74cdbf5909
commit 42d8fa667b
9 changed files with 177 additions and 258 deletions

View file

@ -181,8 +181,8 @@ public:
// Helper function to obtain the size of the given `memref` along the
// dimension `dim`. For static dimensions, emits a constant; for dynamic
// dimensions, extracts the size from the memref descriptor.
auto memrefSize = [int64Ty, pos, i64cst](MemRefType type, Value *memref,
int dim) -> Value * {
auto memrefSize = [&rewriter, int64Ty, i64cst](
MemRefType type, Value *memref, int dim) -> Value * {
assert(dim < type.getRank());
if (type.getShape()[dim] != -1) {
return i64cst(type.getShape()[dim]);
@ -191,14 +191,12 @@ public:
for (int i = 0; i < dim; ++i)
if (type.getShape()[i] == -1)
++dynamicDimPos;
return intrinsics::extractvalue(int64Ty, memref, pos(1 + dynamicDimPos));
return intrinsics::extractvalue(
int64Ty, memref, rewriter.getI64ArrayAttr({1, dynamicDimPos}));
};
// Helper function to obtain the data pointer of the given `memref`.
auto memrefPtr = [pos](MemRefType type, Value *memref) -> Value * {
if (type.hasStaticShape())
return memref;
auto elementTy = linalg::convertLinalgType(type.getElementType())
.cast<LLVM::LLVMType>()
.getPointerTo();

View file

@ -149,6 +149,7 @@ public:
// Create our loop nest now
using namespace edsc;
using extractvalue = intrinsics::ValueBuilder<LLVM::ExtractValueOp>;
using llvmCall = intrinsics::ValueBuilder<LLVM::CallOp>;
ScopedContext scope(rewriter, loc);
ValueHandle zero = intrinsics::constant_index(0);
@ -157,26 +158,36 @@ public:
IndexedValue iOp(operand);
IndexHandle i, j, M(vOp.ub(0));
auto *dialect = op->getContext()->getRegisteredDialect<LLVM::LLVMDialect>();
auto i8PtrTy = LLVM::LLVMType::getInt8Ty(dialect).getPointerTo();
ValueHandle fmtEol(getConstantCharBuffer(rewriter, loc, "\n"));
if (vOp.rank() == 1) {
// clang-format off
LoopBuilder(&i, zero, M, 1)([&]{
llvmCall(retTy,
rewriter.getSymbolRefAttr(printfFunc),
{fmtCst, iOp(i)});
{extractvalue(i8PtrTy, fmtCst, rewriter.getIndexArrayAttr(0)),
iOp(i)});
});
llvmCall(retTy, rewriter.getSymbolRefAttr(printfFunc), {fmtEol});
llvmCall(retTy, rewriter.getSymbolRefAttr(printfFunc),
{extractvalue(i8PtrTy, fmtEol, rewriter.getIndexArrayAttr(0))});
// clang-format on
} else {
IndexHandle N(vOp.ub(1));
// clang-format off
LoopBuilder(&i, zero, M, 1)([&]{
LoopBuilder(&j, zero, N, 1)([&]{
llvmCall(retTy,
rewriter.getSymbolRefAttr(printfFunc),
{fmtCst, iOp(i, j)});
llvmCall(
retTy,
rewriter.getSymbolRefAttr(printfFunc),
{extractvalue(i8PtrTy, fmtCst, rewriter.getIndexArrayAttr(0)),
iOp(i, j)});
});
llvmCall(retTy, rewriter.getSymbolRefAttr(printfFunc), {fmtEol});
llvmCall(
retTy,
rewriter.getSymbolRefAttr(printfFunc),
{extractvalue(i8PtrTy, fmtEol, rewriter.getIndexArrayAttr(0))});
});
// clang-format on
}

View file

@ -52,36 +52,27 @@ For example, `vector<4 x f32>` converts to `!llvm.type<"<4 x float>">` and
Memref types in MLIR have both static and dynamic information associated with
them. The dynamic information comprises the buffer pointer as well as sizes of
any dynamically sized dimensions. Memref types are converted into either LLVM IR
pointer types if they are fully statically shaped; or to LLVM IR structure types
if they contain dynamic sizes. In the latter case, the first element of the
structure is a pointer to the converted (using these rules) memref element type,
followed by as many elements as the memref has dynamic sizes. The type of each
of these size arguments will be the LLVM type that results from converting the
MLIR `index` type. Zero-dimensional memrefs are treated as pointers to the
elemental type.
any dynamically sized dimensions. Memref types are normalized and converted to a
descriptor that is only dependent on the rank of the memref. The descriptor
contains the pointer to the data buffer followed by an array containing as many
64-bit integers as the rank of the memref. The array represents the size, in
number of elements, of the memref along the given dimension. For constant memref
dimensions, the corresponding size entry is a constant whose runtime value
matches the static value. This normalization serves as an ABI for the memref
type to interoperate with externally linked functions. In the particular case of
rank `0` memrefs, the size array is omitted, resulting in a wrapped pointer.
Examples:
```mlir {.mlir}
// All of the following are converted to just a pointer type because
// of fully static sizes.
memref<f32>
memref<1 x f32>
memref<10x42x42x43x123 x f32>
// resulting type
!llvm.type<"float*">
// All of the following are converted to a three-element structure
memref<?x? x f32>
memref<42x?x10x35x1x? x f32>
// resulting type assuming 64-bit pointers
!llvm.type<"{float*, i64, i64}">
memref<f32> -> !llvm.type<"{ float* }">
memref<1 x f32> -> !llvm.type<"{ float*, [1 x i64] }">
memref<? x f32> -> !llvm.type<"{ float*, [1 x i64] }">
memref<10x42x42x43x123 x f32> -> !llvm.type<"{ float*, [5 x i64] }">
memref<10x?x42x?x123 x f32> -> !llvm.type<"{ float*, [5 x i64] }">
// Memref types can have vectors as element types
memref<1x? x vector<4xf32>>
// which get converted as well
!llvm.type<"{<4 x float>*, i64}">
memref<1x? x vector<4xf32>> -> !llvm.type<"{ <4 x float>*, [1 x i64] }">
```
### Function Types

View file

@ -122,27 +122,38 @@ Type LLVMTypeConverter::convertFunctionType(FunctionType type) {
.getPointerTo();
}
// Convert a MemRef to an LLVM type. If the memref is statically-shaped, then
// we return a pointer to the converted element type. Otherwise we return an
// LLVM structure type, where the first element of the structure type is a
// pointer to the elemental type of the MemRef and the following N elements are
// values of the Index type, one for each of N dynamic dimensions of the MemRef.
// Convert a MemRef to an LLVM type. The result is a MemRef descriptor which
// contains:
// 1. the pointer to the data buffer, followed by
// 2. an array containing as many 64-bit integers as the rank of the MemRef:
// the array represents the size, in number of elements, of the memref along
// the given dimension. For constant MemRef dimensions, the corresponding size
// entry is a constant whose runtime value must match the static value.
// TODO(ntv, zinenko): add assertions for the static cases.
//
// template <typename Elem, size_t Rank>
// struct {
// Elem *ptr;
// int64_t sizes[Rank]; // omitted when rank == 0
// };
static unsigned kPtrPosInMemRefDescriptor = 0;
static unsigned kSizePosInMemRefDescriptor = 1;
Type LLVMTypeConverter::convertMemRefType(MemRefType type) {
assert((type.getAffineMaps().empty() ||
(type.getAffineMaps().size() == 1 &&
type.getAffineMaps().back().isIdentity())) &&
"Non-identity layout maps must have been normalized away");
LLVM::LLVMType elementType = unwrap(convertType(type.getElementType()));
if (!elementType)
return {};
auto ptrType = elementType.getPointerTo(type.getMemorySpace());
// Extra value for the memory space.
unsigned numDynamicSizes = type.getNumDynamicDims();
// If memref is statically-shaped we return the underlying pointer type.
if (numDynamicSizes == 0)
return ptrType;
SmallVector<LLVM::LLVMType, 8> types(numDynamicSizes + 1, getIndexType());
types.front() = ptrType;
return LLVM::LLVMType::getStructTy(llvmDialect, types);
auto ptrTy = elementType.getPointerTo(type.getMemorySpace());
auto indexTy = getIndexType();
auto rank = type.getRank();
if (rank > 0) {
auto arrayTy = LLVM::LLVMType::getArrayTy(indexTy, type.getRank());
return LLVM::LLVMType::getStructTy(ptrTy, arrayTy);
}
return LLVM::LLVMType::getStructTy(ptrTy);
}
// Convert an n-D vector type to an LLVM vector type via (n-1)-D array type when
@ -600,10 +611,6 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
allocated = rewriter.create<LLVM::BitcastOp>(op->getLoc(), elementPtrType,
ArrayRef<Value *>(allocated));
// Deal with static memrefs
if (numOperands == 0)
return rewriter.replaceOp(op, allocated);
// Create the MemRef descriptor.
auto structType = lowering.convertType(type);
Value *memRefDescriptor = rewriter.create<LLVM::UndefOp>(
@ -611,14 +618,15 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
memRefDescriptor = rewriter.create<LLVM::InsertValueOp>(
op->getLoc(), structType, memRefDescriptor, allocated,
rewriter.getIndexArrayAttr(0));
rewriter.getIndexArrayAttr(kPtrPosInMemRefDescriptor));
// Store dynamically allocated sizes in the descriptor. Dynamic sizes are
// passed in as operands.
for (auto indexedSize : llvm::enumerate(operands)) {
// Store dynamically allocated sizes in the descriptor. Static and dynamic
// sizes are all passed in as operands.
for (auto indexedSize : llvm::enumerate(sizes)) {
int64_t index = indexedSize.index();
memRefDescriptor = rewriter.create<LLVM::InsertValueOp>(
op->getLoc(), structType, memRefDescriptor, indexedSize.value(),
rewriter.getIndexArrayAttr(1 + indexedSize.index()));
rewriter.getI64ArrayAttr({kSizePosInMemRefDescriptor, index}));
}
// Return the final value of the descriptor.
@ -679,60 +687,12 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
ConversionPatternRewriter &rewriter) const override {
auto memRefCastOp = cast<MemRefCastOp>(op);
OperandAdaptor<MemRefCastOp> transformed(operands);
auto targetType = memRefCastOp.getType();
auto sourceType = memRefCastOp.getOperand()->getType().cast<MemRefType>();
// Copy the data buffer pointer.
auto elementTypePtr = getMemRefElementPtrType(targetType, lowering);
Value *buffer =
extractMemRefElementPtr(rewriter, op->getLoc(), transformed.source(),
elementTypePtr, sourceType.hasStaticShape());
// Account for static memrefs as target types
if (targetType.hasStaticShape())
return rewriter.replaceOp(op, buffer);
// Create the new MemRef descriptor.
auto structType = lowering.convertType(targetType);
Value *newDescriptor = rewriter.create<LLVM::UndefOp>(
op->getLoc(), structType, ArrayRef<Value *>{});
// Otherwise target type is dynamic memref, so create a proper descriptor.
newDescriptor = rewriter.create<LLVM::InsertValueOp>(
op->getLoc(), structType, newDescriptor, buffer,
rewriter.getIndexArrayAttr(0));
// Fill in the dynamic sizes of the new descriptor. If the size was
// dynamic, copy it from the old descriptor. If the size was static, insert
// the constant. Note that the positions of dynamic sizes in the
// descriptors start from 1 (the buffer pointer is at position zero).
int64_t sourceDynamicDimIdx = 1;
int64_t targetDynamicDimIdx = 1;
for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
// Ignore new static sizes (they will be known from the type). If the
// size was dynamic, update the index of dynamic types.
if (targetType.getShape()[i] != -1) {
if (sourceType.getShape()[i] == -1)
++sourceDynamicDimIdx;
continue;
}
auto sourceSize = sourceType.getShape()[i];
Value *size =
sourceSize == -1
? rewriter.create<LLVM::ExtractValueOp>(
op->getLoc(), getIndexType(),
transformed.source(), // NB: dynamic memref
rewriter.getIndexArrayAttr(sourceDynamicDimIdx++))
: createIndexConstant(rewriter, op->getLoc(), sourceSize);
newDescriptor = rewriter.create<LLVM::InsertValueOp>(
op->getLoc(), structType, newDescriptor, size,
rewriter.getIndexArrayAttr(targetDynamicDimIdx++));
}
assert(sourceDynamicDimIdx - 1 == sourceType.getNumDynamicDims() &&
"source dynamic dimensions were not processed");
assert(targetDynamicDimIdx - 1 == targetType.getNumDynamicDims() &&
"target dynamic dimensions were not set up");
rewriter.replaceOp(op, newDescriptor);
// memref_cast is defined for source and destination memref types with the
// same element type, same mappings, same address space and same rank.
// Therefore a simple bitcast suffices. If not it is undefined behavior.
auto targetStructType = lowering.convertType(memRefCastOp.getType());
rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(op, targetStructType,
transformed.source());
}
};
@ -754,25 +714,16 @@ struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
MemRefType type = dimOp.getOperand()->getType().cast<MemRefType>();
auto shape = type.getShape();
uint64_t index = dimOp.getIndex();
int64_t index = dimOp.getIndex();
// Extract dynamic size from the memref descriptor and define static size
// as a constant.
if (shape[index] == -1) {
// Find the position of the dynamic dimension in the list of dynamic sizes
// by counting the number of preceding dynamic dimensions. Start from 1
// because the buffer pointer is at position zero.
int64_t position = 1;
for (uint64_t i = 0; i < index; ++i) {
if (shape[i] == -1)
++position;
}
if (ShapedType::isDynamic(shape[index]))
rewriter.replaceOpWithNewOp<LLVM::ExtractValueOp>(
op, getIndexType(), transformed.memrefOrTensor(),
rewriter.getIndexArrayAttr(position));
} else {
rewriter.getI64ArrayAttr({kSizePosInMemRefDescriptor, index}));
else
rewriter.replaceOp(
op, createIndexConstant(rewriter, op->getLoc(), shape[index]));
}
}
};
@ -829,61 +780,41 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern<Derived> {
// Dynamic sizes are extracted from the MemRef descriptor, where they start
// from the position 1 (the buffer is at position 0).
SmallVector<Value *, 4> sizes;
unsigned dynamicSizeIdx = 1;
for (int64_t s : shape) {
for (auto en : llvm::enumerate(shape)) {
int64_t s = en.value();
int64_t index = en.index();
if (s == -1) {
Value *size = rewriter.create<LLVM::ExtractValueOp>(
loc, this->getIndexType(), memRefDescriptor,
rewriter.getIndexArrayAttr(dynamicSizeIdx++));
rewriter.getI64ArrayAttr({kSizePosInMemRefDescriptor, index}));
sizes.push_back(size);
} else {
sizes.push_back(this->createIndexConstant(rewriter, loc, s));
// TODO(ntv, zinenko): assert dynamic descriptor size is constant.
}
}
// The second and subsequent operands are access subscripts. Obtain the
// linearized address in the buffer.
Value *subscript = linearizeSubscripts(rewriter, loc, indices, sizes);
Value *subscript = indices.empty()
? nullptr
: linearizeSubscripts(rewriter, loc, indices, sizes);
Value *dataPtr = rewriter.create<LLVM::ExtractValueOp>(
loc, elementTypePtr, memRefDescriptor, rewriter.getIndexArrayAttr(0));
return rewriter.create<LLVM::GEPOp>(loc, elementTypePtr,
ArrayRef<Value *>{dataPtr, subscript},
loc, elementTypePtr, memRefDescriptor,
rewriter.getIndexArrayAttr(kPtrPosInMemRefDescriptor));
SmallVector<Value *, 2> gepSubValues(1, dataPtr);
if (subscript)
gepSubValues.push_back(subscript);
return rewriter.create<LLVM::GEPOp>(loc, elementTypePtr, gepSubValues,
ArrayRef<NamedAttribute>{});
}
// This is a getElementPtr variant, where the value is a direct raw pointer.
// If a shape is empty, we are dealing with a zero-dimensional memref. Return
// the pointer unmodified in this case. Otherwise, linearize subscripts to
// obtain the offset with respect to the base pointer. Use this offset to
// compute and return the element pointer.
Value *getRawElementPtr(Location loc, Type elementTypePtr,
ArrayRef<int64_t> shape, Value *rawDataPtr,
ArrayRef<Value *> indices,
ConversionPatternRewriter &rewriter) const {
if (shape.empty())
return rawDataPtr;
SmallVector<Value *, 4> sizes;
for (int64_t s : shape) {
sizes.push_back(this->createIndexConstant(rewriter, loc, s));
}
Value *subscript = linearizeSubscripts(rewriter, loc, indices, sizes);
return rewriter.create<LLVM::GEPOp>(
loc, elementTypePtr, ArrayRef<Value *>{rawDataPtr, subscript},
ArrayRef<NamedAttribute>{});
}
Value *getDataPtr(Location loc, MemRefType type, Value *dataPtr,
ArrayRef<Value *> indices,
ConversionPatternRewriter &rewriter,
llvm::Module &module) const {
auto ptrType = getMemRefElementPtrType(type, this->lowering);
auto shape = type.getShape();
if (type.hasStaticShape()) {
// NB: If memref was statically-shaped, dataPtr is pointer to raw data.
return getRawElementPtr(loc, ptrType, shape, dataPtr, indices, rewriter);
}
return getElementPtr(loc, ptrType, shape, dataPtr, indices, rewriter);
}
};

View file

@ -1,7 +1,7 @@
// RUN: mlir-opt -lower-to-llvm %s | FileCheck %s
// CHECK-LABEL: func @check_attributes(%arg0: !llvm<"float*"> {dialect.a = true, dialect.b = 4 : i64}) {
// CHECK-LABEL: func @check_attributes(%arg0: !llvm<"{ float*, [2 x i64] }"> {dialect.a = true, dialect.b = 4 : i64}) {
func @check_attributes(%static: memref<10x20xf32> {dialect.a = true, dialect.b = 4 : i64 }) {
return
}

View file

@ -1,18 +1,18 @@
// RUN: mlir-opt -lower-to-llvm %s | FileCheck %s
// CHECK-LABEL: func @check_arguments(%arg0: !llvm<"float*">, %arg1: !llvm<"{ float*, i64, i64 }">, %arg2: !llvm<"{ float*, i64 }">)
// CHECK-LABEL: func @check_arguments(%arg0: !llvm<"{ float*, [2 x i64] }">, %arg1: !llvm<"{ float*, [2 x i64] }">, %arg2: !llvm<"{ float*, [2 x i64] }">)
func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %mixed : memref<10x?xf32>) {
return
}
// CHECK-LABEL: func @check_static_return(%arg0: !llvm<"float*">) -> !llvm<"float*"> {
// CHECK-LABEL: func @check_static_return(%arg0: !llvm<"{ float*, [2 x i64] }">) -> !llvm<"{ float*, [2 x i64] }"> {
func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
// CHECK-NEXT: llvm.return %arg0 : !llvm<"float*">
// CHECK-NEXT: llvm.return %arg0 : !llvm<"{ float*, [2 x i64] }">
return %static : memref<32x18xf32>
}
// CHECK-LABEL: func @zero_d_alloc() -> !llvm<"float*"> {
// CHECK-LABEL: func @zero_d_alloc() -> !llvm<"{ float* }"> {
func @zero_d_alloc() -> memref<f32> {
// CHECK-NEXT: %0 = llvm.mlir.constant(1 : index) : !llvm.i64
// CHECK-NEXT: %1 = llvm.mlir.constant(4 : index) : !llvm.i64
@ -23,15 +23,16 @@ func @zero_d_alloc() -> memref<f32> {
return %0 : memref<f32>
}
// CHECK-LABEL: func @zero_d_dealloc(%arg0: !llvm<"float*">) {
// CHECK-LABEL: func @zero_d_dealloc(%arg0: !llvm<"{ float* }">) {
func @zero_d_dealloc(%arg0: memref<f32>) {
// CHECK-NEXT: %0 = llvm.bitcast %arg0 : !llvm<"float*"> to !llvm<"i8*">
// CHECK-NEXT: llvm.call @free(%0) : (!llvm<"i8*">) -> ()
// CHECK-NEXT: %[[ptr:.*]] = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float* }">
// CHECK-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm<"float*"> to !llvm<"i8*">
// CHECK-NEXT: llvm.call @free(%[[bc]]) : (!llvm<"i8*">) -> ()
dealloc %arg0 : memref<f32>
return
}
// CHECK-LABEL: func @mixed_alloc(%arg0: !llvm.i64, %arg1: !llvm.i64) -> !llvm<"{ float*, i64, i64 }"> {
// CHECK-LABEL: func @mixed_alloc(%arg0: !llvm.i64, %arg1: !llvm.i64) -> !llvm<"{ float*, [3 x i64] }"> {
func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
// CHECK-NEXT: %0 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %1 = llvm.mul %arg0, %0 : !llvm.i64
@ -40,17 +41,18 @@ func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
// CHECK-NEXT: %4 = llvm.mul %2, %3 : !llvm.i64
// CHECK-NEXT: %5 = llvm.call @malloc(%4) : (!llvm.i64) -> !llvm<"i8*">
// CHECK-NEXT: %6 = llvm.bitcast %5 : !llvm<"i8*"> to !llvm<"float*">
// CHECK-NEXT: %7 = llvm.mlir.undef : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %8 = llvm.insertvalue %6, %7[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %9 = llvm.insertvalue %arg0, %8[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %10 = llvm.insertvalue %arg1, %9[2 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %7 = llvm.mlir.undef : !llvm<"{ float*, [3 x i64] }">
// CHECK-NEXT: %8 = llvm.insertvalue %6, %7[0 : index] : !llvm<"{ float*, [3 x i64] }">
// CHECK-NEXT: %9 = llvm.insertvalue %arg0, %8[1, 0] : !llvm<"{ float*, [3 x i64] }">
// CHECK-NEXT: %10 = llvm.insertvalue %0, %9[1, 1] : !llvm<"{ float*, [3 x i64] }">
// CHECK-NEXT: %11 = llvm.insertvalue %arg1, %10[1, 2] : !llvm<"{ float*, [3 x i64] }">
%0 = alloc(%arg0, %arg1) : memref<?x42x?xf32>
return %0 : memref<?x42x?xf32>
}
// CHECK-LABEL: func @mixed_dealloc(%arg0: !llvm<"{ float*, i64, i64 }">) {
// CHECK-LABEL: func @mixed_dealloc(%arg0: !llvm<"{ float*, [3 x i64] }">) {
func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [3 x i64] }">
// CHECK-NEXT: %1 = llvm.bitcast %0 : !llvm<"float*"> to !llvm<"i8*">
// CHECK-NEXT: llvm.call @free(%1) : (!llvm<"i8*">) -> ()
dealloc %arg0 : memref<?x42x?xf32>
@ -58,31 +60,31 @@ func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
return
}
// CHECK-LABEL: func @dynamic_alloc(%arg0: !llvm.i64, %arg1: !llvm.i64) -> !llvm<"{ float*, i64, i64 }"> {
// CHECK-LABEL: func @dynamic_alloc(%arg0: !llvm.i64, %arg1: !llvm.i64) -> !llvm<"{ float*, [2 x i64] }"> {
func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
// CHECK-NEXT: %0 = llvm.mul %arg0, %arg1 : !llvm.i64
// CHECK-NEXT: %1 = llvm.mlir.constant(4 : index) : !llvm.i64
// CHECK-NEXT: %2 = llvm.mul %0, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.call @malloc(%2) : (!llvm.i64) -> !llvm<"i8*">
// CHECK-NEXT: %4 = llvm.bitcast %3 : !llvm<"i8*"> to !llvm<"float*">
// CHECK-NEXT: %5 = llvm.mlir.undef : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %6 = llvm.insertvalue %4, %5[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %7 = llvm.insertvalue %arg0, %6[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %8 = llvm.insertvalue %arg1, %7[2 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %5 = llvm.mlir.undef : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %6 = llvm.insertvalue %4, %5[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %7 = llvm.insertvalue %arg0, %6[1, 0] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %8 = llvm.insertvalue %arg1, %7[1, 1] : !llvm<"{ float*, [2 x i64] }">
%0 = alloc(%arg0, %arg1) : memref<?x?xf32>
return %0 : memref<?x?xf32>
}
// CHECK-LABEL: func @dynamic_dealloc(%arg0: !llvm<"{ float*, i64, i64 }">) {
// CHECK-LABEL: func @dynamic_dealloc(%arg0: !llvm<"{ float*, [2 x i64] }">) {
func @dynamic_dealloc(%arg0: memref<?x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %1 = llvm.bitcast %0 : !llvm<"float*"> to !llvm<"i8*">
// CHECK-NEXT: llvm.call @free(%1) : (!llvm<"i8*">) -> ()
dealloc %arg0 : memref<?x?xf32>
return
}
// CHECK-LABEL: func @static_alloc() -> !llvm<"float*"> {
// CHECK-LABEL: func @static_alloc() -> !llvm<"{ float*, [2 x i64] }"> {
func @static_alloc() -> memref<32x18xf32> {
// CHECK-NEXT: %0 = llvm.mlir.constant(32 : index) : !llvm.i64
// CHECK-NEXT: %1 = llvm.mlir.constant(18 : index) : !llvm.i64
@ -95,17 +97,20 @@ func @static_alloc() -> memref<32x18xf32> {
return %0 : memref<32x18xf32>
}
// CHECK-LABEL: func @static_dealloc(%arg0: !llvm<"float*">) {
// CHECK-LABEL: func @static_dealloc(%arg0: !llvm<"{ float*, [2 x i64] }">) {
func @static_dealloc(%static: memref<10x8xf32>) {
// CHECK-NEXT: %0 = llvm.bitcast %arg0 : !llvm<"float*"> to !llvm<"i8*">
// CHECK-NEXT: llvm.call @free(%0) : (!llvm<"i8*">) -> ()
// CHECK-NEXT: %[[ptr:.*]] = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm<"float*"> to !llvm<"i8*">
// CHECK-NEXT: llvm.call @free(%[[bc]]) : (!llvm<"i8*">) -> ()
dealloc %static : memref<10x8xf32>
return
}
// CHECK-LABEL: func @zero_d_load(%arg0: !llvm<"float*">) -> !llvm.float {
// CHECK-LABEL: func @zero_d_load(%arg0: !llvm<"{ float* }">) -> !llvm.float {
func @zero_d_load(%arg0: memref<f32>) -> f32 {
// CHECK-NEXT: %0 = llvm.load %arg0 : !llvm<"float*">
// CHECK-NEXT: %[[ptr:.*]] = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float* }">
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][] : (!llvm<"float*">) -> !llvm<"float*">
// CHECK-NEXT: %2 = llvm.load %[[addr]] : !llvm<"float*">
%0 = load %arg0[] : memref<f32>
return %0 : f32
}
@ -116,8 +121,9 @@ func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
// CHECK-NEXT: %1 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %2 = llvm.mul %arg1, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.add %2, %arg2 : !llvm.i64
// CHECK-NEXT: %4 = llvm.getelementptr %arg0[%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: %5 = llvm.load %4 : !llvm<"float*">
// CHECK-NEXT: %[[ptr:.*]] = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: llvm.load %[[addr]] : !llvm<"float*">
%0 = load %static[%i, %j] : memref<10x42xf32>
return
}
@ -125,10 +131,10 @@ func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
// CHECK-LABEL: func @mixed_load
func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
// CHECK-NEXT: %0 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %2 = llvm.mul %arg1, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.add %2, %arg2 : !llvm.i64
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %5 = llvm.getelementptr %4[%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: %6 = llvm.load %5 : !llvm<"float*">
%0 = load %mixed[%i, %j] : memref<42x?xf32>
@ -137,20 +143,22 @@ func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
// CHECK-LABEL: func @dynamic_load
func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[2 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[1, 0] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %2 = llvm.mul %arg1, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.add %2, %arg2 : !llvm.i64
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %5 = llvm.getelementptr %4[%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: %6 = llvm.load %5 : !llvm<"float*">
%0 = load %dynamic[%i, %j] : memref<?x?xf32>
return
}
// CHECK-LABEL: func @zero_d_store(%arg0: !llvm<"float*">, %arg1: !llvm.float) {
// CHECK-LABEL: func @zero_d_store(%arg0: !llvm<"{ float* }">, %arg1: !llvm.float) {
func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
// CHECK-NEXT: llvm.store %arg1, %arg0 : !llvm<"float*">
// CHECK-NEXT: %[[ptr:.*]] = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float* }">
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][] : (!llvm<"float*">) -> !llvm<"float*">
// CHECK-NEXT: llvm.store %arg1, %[[addr]] : !llvm<"float*">
store %arg1, %arg0[] : memref<f32>
return
}
@ -161,19 +169,20 @@ func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f
// CHECK-NEXT: %1 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %2 = llvm.mul %arg1, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.add %2, %arg2 : !llvm.i64
// CHECK-NEXT: %4 = llvm.getelementptr %arg0[%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: llvm.store %arg3, %4 : !llvm<"float*">
// CHECK-NEXT: %[[ptr:.*]] = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: llvm.store %arg3, %[[addr]] : !llvm<"float*">
store %val, %static[%i, %j] : memref<10x42xf32>
return
}
// CHECK-LABEL: func @dynamic_store
func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[2 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[1, 0] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %2 = llvm.mul %arg1, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.add %2, %arg2 : !llvm.i64
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %5 = llvm.getelementptr %4[%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: llvm.store %arg3, %5 : !llvm<"float*">
store %val, %dynamic[%i, %j] : memref<?x?xf32>
@ -183,10 +192,10 @@ func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f
// CHECK-LABEL: func @mixed_store
func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
// CHECK-NEXT: %0 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %2 = llvm.mul %arg1, %1 : !llvm.i64
// CHECK-NEXT: %3 = llvm.add %2, %arg2 : !llvm.i64
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, [2 x i64] }">
// CHECK-NEXT: %5 = llvm.getelementptr %4[%3] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
// CHECK-NEXT: llvm.store %arg3, %5 : !llvm<"float*">
store %val, %mixed[%i, %j] : memref<42x?xf32>
@ -195,91 +204,69 @@ func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32)
// CHECK-LABEL: func @memref_cast_static_to_dynamic
func @memref_cast_static_to_dynamic(%static : memref<10x42xf32>) {
// CHECK-NEXT: %0 = llvm.mlir.undef : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %1 = llvm.insertvalue %arg0, %0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %2 = llvm.mlir.constant(10 : index) : !llvm.i64
// CHECK-NEXT: %3 = llvm.insertvalue %2, %1[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %4 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %5 = llvm.insertvalue %4, %3[2 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %static : memref<10x42xf32> to memref<?x?xf32>
return
}
// CHECK-LABEL: func @memref_cast_static_to_mixed
func @memref_cast_static_to_mixed(%static : memref<10x42xf32>) {
// CHECK-NEXT: %0 = llvm.mlir.undef : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %1 = llvm.insertvalue %arg0, %0[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %2 = llvm.mlir.constant(10 : index) : !llvm.i64
// CHECK-NEXT: %3 = llvm.insertvalue %2, %1[1 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %static : memref<10x42xf32> to memref<?x42xf32>
return
}
// CHECK-LABEL: func @memref_cast_dynamic_to_static
func @memref_cast_dynamic_to_static(%dynamic : memref<?x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %dynamic : memref<?x?xf32> to memref<10x12xf32>
return
}
// CHECK-LABEL: func @memref_cast_dynamic_to_mixed
func @memref_cast_dynamic_to_mixed(%dynamic : memref<?x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %1 = llvm.mlir.undef : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %2 = llvm.insertvalue %0, %1[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %3 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %4 = llvm.insertvalue %3, %2[1 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %dynamic : memref<?x?xf32> to memref<?x12xf32>
return
}
// CHECK-LABEL: func @memref_cast_mixed_to_dynamic
func @memref_cast_mixed_to_dynamic(%mixed : memref<42x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %1 = llvm.mlir.undef : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %2 = llvm.insertvalue %0, %1[0 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %3 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %4 = llvm.insertvalue %3, %2[1 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: %5 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %6 = llvm.insertvalue %5, %4[2 : index] : !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %mixed : memref<42x?xf32> to memref<?x?xf32>
return
}
// CHECK-LABEL: func @memref_cast_mixed_to_static
func @memref_cast_mixed_to_static(%mixed : memref<42x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %mixed : memref<42x?xf32> to memref<42x1xf32>
return
}
// CHECK-LABEL: func @memref_cast_mixed_to_mixed
func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
// CHECK-NEXT: %0 = llvm.extractvalue %arg0[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %1 = llvm.mlir.undef : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %2 = llvm.insertvalue %0, %1[0 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: %3 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %4 = llvm.insertvalue %3, %2[1 : index] : !llvm<"{ float*, i64 }">
// CHECK-NEXT: llvm.bitcast %arg0 : !llvm<"{ float*, [2 x i64] }"> to !llvm<"{ float*, [2 x i64] }">
%0 = memref_cast %mixed : memref<42x?xf32> to memref<?x1xf32>
return
}
// CHECK-LABEL: func @mixed_memref_dim(%arg0: !llvm<"{ float*, i64, i64, i64 }">)
// CHECK-LABEL: func @mixed_memref_dim(%arg0: !llvm<"{ float*, [5 x i64] }">)
func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
// CHECK-NEXT: %0 = llvm.mlir.constant(42 : index) : !llvm.i64
%0 = dim %mixed, 0 : memref<42x?x?x13x?xf32>
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1 : index] : !llvm<"{ float*, i64, i64, i64 }">
// CHECK-NEXT: %1 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [5 x i64] }">
%1 = dim %mixed, 1 : memref<42x?x?x13x?xf32>
// CHECK-NEXT: %2 = llvm.extractvalue %arg0[2 : index] : !llvm<"{ float*, i64, i64, i64 }">
// CHECK-NEXT: %2 = llvm.extractvalue %arg0[1, 2] : !llvm<"{ float*, [5 x i64] }">
%2 = dim %mixed, 2 : memref<42x?x?x13x?xf32>
// CHECK-NEXT: %3 = llvm.mlir.constant(13 : index) : !llvm.i64
%3 = dim %mixed, 3 : memref<42x?x?x13x?xf32>
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[3 : index] : !llvm<"{ float*, i64, i64, i64 }">
// CHECK-NEXT: %4 = llvm.extractvalue %arg0[1, 4] : !llvm<"{ float*, [5 x i64] }">
%4 = dim %mixed, 4 : memref<42x?x?x13x?xf32>
return
}
// CHECK-LABEL: func @static_memref_dim(%arg0: !llvm<"float*">)
// CHECK-LABEL: func @static_memref_dim(%arg0: !llvm<"{ float*, [5 x i64] }">)
func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
// CHECK-NEXT: %0 = llvm.mlir.constant(42 : index) : !llvm.i64
%0 = dim %static, 0 : memref<42x32x15x13x27xf32>

View file

@ -317,23 +317,23 @@ func @more_imperfectly_nested_loops() {
func @get_i64() -> (i64)
// CHECK-LABEL: func @get_f32() -> !llvm.float
func @get_f32() -> (f32)
// CHECK-LABEL: func @get_memref() -> !llvm<"{ float*, i64, i64 }">
// CHECK-LABEL: func @get_memref() -> !llvm<"{ float*, [4 x i64] }">
func @get_memref() -> (memref<42x?x10x?xf32>)
// CHECK-LABEL: func @multireturn() -> !llvm<"{ i64, float, { float*, i64, i64 } }"> {
// CHECK-LABEL: func @multireturn() -> !llvm<"{ i64, float, { float*, [4 x i64] } }"> {
func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
^bb0:
// CHECK-NEXT: {{.*}} = llvm.call @get_i64() : () -> !llvm.i64
// CHECK-NEXT: {{.*}} = llvm.call @get_f32() : () -> !llvm.float
// CHECK-NEXT: {{.*}} = llvm.call @get_memref() : () -> !llvm<"{ float*, i64, i64 }">
// CHECK-NEXT: {{.*}} = llvm.call @get_memref() : () -> !llvm<"{ float*, [4 x i64] }">
%0 = call @get_i64() : () -> (i64)
%1 = call @get_f32() : () -> (f32)
%2 = call @get_memref() : () -> (memref<42x?x10x?xf32>)
// CHECK-NEXT: {{.*}} = llvm.mlir.undef : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0 : index] : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1 : index] : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2 : index] : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: llvm.return {{.*}} : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.mlir.undef : !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0 : index] : !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1 : index] : !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2 : index] : !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: llvm.return {{.*}} : !llvm<"{ i64, float, { float*, [4 x i64] } }">
return %0, %1, %2 : i64, f32, memref<42x?x10x?xf32>
}
@ -341,10 +341,10 @@ func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
// CHECK-LABEL: func @multireturn_caller() {
func @multireturn_caller() {
^bb0:
// CHECK-NEXT: {{.*}} = llvm.call @multireturn() : () -> !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.extractvalue {{.*}}[0 : index] : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.extractvalue {{.*}}[1 : index] : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.extractvalue {{.*}}[2 : index] : !llvm<"{ i64, float, { float*, i64, i64 } }">
// CHECK-NEXT: {{.*}} = llvm.call @multireturn() : () -> !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: {{.*}} = llvm.extractvalue {{.*}}[0 : index] : !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: {{.*}} = llvm.extractvalue {{.*}}[1 : index] : !llvm<"{ i64, float, { float*, [4 x i64] } }">
// CHECK-NEXT: {{.*}} = llvm.extractvalue {{.*}}[2 : index] : !llvm<"{ i64, float, { float*, [4 x i64] } }">
%0:3 = call @multireturn() : () -> (i64, f32, memref<42x?x10x?xf32>)
%1 = constant 42 : i64
// CHECK: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64

View file

@ -1,6 +1,7 @@
// RUN: mlir-opt %s -lower-to-llvm | FileCheck %s
// CHECK-LABEL: func @address_space(%{{.*}}: !llvm<"float addrspace(7)*">)
// CHECK-LABEL: func @address_space(
// CHECK: %{{.*}}: !llvm<"{ float addrspace(7)*, [1 x i64] }">)
func @address_space(%arg0 : memref<32xf32, (d0) -> (d0), 7>) {
%0 = alloc() : memref<32xf32, (d0) -> (d0), 5>
%1 = constant 7 : index

View file

@ -64,9 +64,9 @@ func @viewRangeConversion(%arg0: memref<?x?xf32>, %arg1: !linalg.range, %arg2: !
}
// LLVM-LABEL: @viewRangeConversion
// LLVM-NEXT: %0 = llvm.mlir.undef : !llvm<"{ float*, i64, [2 x i64], [2 x i64] }">
// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, i64, i64 }">
// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, [2 x i64] }">
// LLVM-NEXT: %2 = llvm.insertvalue %1, %0[0] : !llvm<"{ float*, i64, [2 x i64], [2 x i64] }">
// LLVM-NEXT: %3 = llvm.extractvalue %arg0[2] : !llvm<"{ float*, i64, i64 }">
// LLVM-NEXT: %3 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }">
// LLVM-NEXT: %4 = llvm.mlir.constant(1 : index) : !llvm.i64
// LLVM-NEXT: %5 = llvm.mul %4, %3 : !llvm.i64
// LLVM-NEXT: %6 = llvm.mlir.constant(0 : index) : !llvm.i64
@ -98,9 +98,9 @@ func @viewNonRangeConversion(%arg0: memref<?x?xf32>, %arg1: !linalg.range, %arg2
}
// LLVM-LABEL: @viewNonRangeConversion
// LLVM-NEXT: %0 = llvm.mlir.undef : !llvm<"{ float*, i64, [1 x i64], [1 x i64] }">
// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, i64, i64 }">
// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, [2 x i64] }">
// LLVM-NEXT: %2 = llvm.insertvalue %1, %0[0] : !llvm<"{ float*, i64, [1 x i64], [1 x i64] }">
// LLVM-NEXT: %3 = llvm.extractvalue %arg0[2] : !llvm<"{ float*, i64, i64 }">
// LLVM-NEXT: %3 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }">
// LLVM-NEXT: %4 = llvm.mlir.constant(1 : index) : !llvm.i64
// LLVM-NEXT: %5 = llvm.mul %4, %3 : !llvm.i64
// LLVM-NEXT: %6 = llvm.mlir.constant(0 : index) : !llvm.i64