From bfde17834dd9bd30da8f56166cd545f566f64895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrzej=20Warzy=C5=84ski?= Date: Mon, 30 Sep 2024 14:53:50 +0100 Subject: [PATCH] [mlir] Update the return type of `getNum{Dynamic|Scalable}Dims` (#110472) Updates the return type of `getNumDynamicDims` and `getNumScalableDims` from `int64_t` to `size_t`. This is for consistency with other helpers/methods that return "size" and to reduce the number of `static_cast`s in various places. --- .../mlir/Dialect/SparseTensor/IR/SparseTensorType.h | 2 +- mlir/include/mlir/IR/BuiltinTypeInterfaces.td | 2 +- mlir/include/mlir/IR/BuiltinTypes.td | 2 +- mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp | 3 +-- mlir/lib/Dialect/GPU/IR/GPUDialect.cpp | 3 +-- mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp | 6 ++---- mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 9 +++------ 7 files changed, 10 insertions(+), 17 deletions(-) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h index a154d7fa5fb6e5..620fd7c63146dd 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h @@ -293,7 +293,7 @@ class SparseTensorType { /// Returns the number of dimensions which have dynamic sizes. /// The return type is `int64_t` to maintain consistency with /// `ShapedType::Trait::getNumDynamicDims`. - int64_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); } + size_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); } ArrayRef getLvlTypes() const { return enc.getLvlTypes(); } LevelType getLvlType(Level l) const { diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td index db38e2e1bce22a..c9dcd546cf67c2 100644 --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -166,7 +166,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> { /// If this is a ranked type, return the number of dimensions with dynamic /// size. Otherwise, abort. - int64_t getNumDynamicDims() const { + size_t getNumDynamicDims() const { return llvm::count_if($_type.getShape(), ::mlir::ShapedType::isDynamic); } diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td index c738a8a3becc16..b2b41b16beec29 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -1253,7 +1253,7 @@ def Builtin_Vector : Builtin_Type<"Vector", "vector", } /// Get the number of scalable dimensions. - int64_t getNumScalableDims() const { + size_t getNumScalableDims() const { return llvm::count(getScalableDims(), true); } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp index 04a8ff30ee946b..f1841b860ff81a 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -249,8 +249,7 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options, LogicalResult AllocTensorOp::verify() { if (getCopy() && !getDynamicSizes().empty()) return emitError("dynamic sizes not needed when copying a tensor"); - if (!getCopy() && getType().getNumDynamicDims() != - static_cast(getDynamicSizes().size())) + if (!getCopy() && getType().getNumDynamicDims() != getDynamicSizes().size()) return emitError("expected ") << getType().getNumDynamicDims() << " dynamic sizes"; if (getCopy() && getCopy().getType() != getType()) diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp index f822c11aeec008..956877497d9338 100644 --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -2045,8 +2045,7 @@ void WaitOp::getCanonicalizationPatterns(RewritePatternSet &results, LogicalResult AllocOp::verify() { auto memRefType = llvm::cast(getMemref().getType()); - if (static_cast(getDynamicSizes().size()) != - memRefType.getNumDynamicDims()) + if (getDynamicSizes().size() != memRefType.getNumDynamicDims()) return emitOpError("dimension operand count does not equal memref " "dynamic dimension count"); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp index 75b9729e63648c..d579a27359dfa0 100644 --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -205,8 +205,7 @@ static LogicalResult verifyAllocLikeOp(AllocLikeOp op) { if (!memRefType) return op.emitOpError("result must be a memref"); - if (static_cast(op.getDynamicSizes().size()) != - memRefType.getNumDynamicDims()) + if (op.getDynamicSizes().size() != memRefType.getNumDynamicDims()) return op.emitOpError("dimension operand count does not equal memref " "dynamic dimension count"); @@ -283,8 +282,7 @@ struct SimplifyAllocConst : public OpRewritePattern { // Create new memref type (which will have fewer dynamic dimensions). MemRefType newMemRefType = MemRefType::Builder(memrefType).setShape(newShapeConstants); - assert(static_cast(dynamicSizes.size()) == - newMemRefType.getNumDynamicDims()); + assert(dynamicSizes.size() == newMemRefType.getNumDynamicDims()); // Create and insert the alloc op for the new memref. auto newAlloc = rewriter.create( diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp index 1ac96756e22b5e..defac8308b9092 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -179,8 +179,7 @@ static RankedTensorType foldDynamicToStaticDimSizes(RankedTensorType type, ValueRange dynamicSizes, SmallVector &foldedDynamicSizes) { SmallVector staticShape(type.getShape()); - assert(type.getNumDynamicDims() == - static_cast(dynamicSizes.size()) && + assert(type.getNumDynamicDims() == dynamicSizes.size() && "incorrect number of dynamic sizes"); // Compute new static and dynamic sizes. @@ -894,8 +893,7 @@ void EmptyOp::build(OpBuilder &builder, OperationState &result, } LogicalResult EmptyOp::verify() { - if (getType().getNumDynamicDims() != - static_cast(getDynamicSizes().size())) + if (getType().getNumDynamicDims() != getDynamicSizes().size()) return emitOpError("incorrect number of dynamic sizes, has ") << getDynamicSizes().size() << ", expected " << getType().getNumDynamicDims(); @@ -3672,8 +3670,7 @@ void SplatOp::getAsmResultNames( } LogicalResult SplatOp::verify() { - if (getType().getNumDynamicDims() != - static_cast(getDynamicSizes().size())) + if (getType().getNumDynamicDims() != getDynamicSizes().size()) return emitOpError("incorrect number of dynamic sizes, has ") << getDynamicSizes().size() << ", expected " << getType().getNumDynamicDims();