Skip to content

Commit

Permalink
Merge pull request #271 from Xilinx/bump_to_513d89c1
Browse files Browse the repository at this point in the history
[AutoBump] Merge with 513d89c (May 17, needs LLVM bump) (38)
  • Loading branch information
jorickert authored Sep 6, 2024
2 parents 78194ef + c25e4a7 commit a51723a
Show file tree
Hide file tree
Showing 32 changed files with 2,756 additions and 250 deletions.
62 changes: 36 additions & 26 deletions docs/development.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,42 +53,52 @@ Two setups are possible to build: in-tree and out-of-tree. The in-tree setup is

The following command generates configuration files to build the project *in-tree*, that is, using llvm/llvm-project as the main build. This will build LLVM as well as torch-mlir and its subprojects. On Windows, use the "Developer PowerShell for Visual Studio" to ensure that the compiler and linker binaries are in the `PATH` variable.

This requires `lld`, `clang`, `ccache`, and other dependencies for building `libtorch` / `PyTorch` wheels from source. If you run into issues because of these, try the [simplified build command](#simplified-build).

```shell
cmake -GNinja -Bbuild \
externals/llvm-project/llvm \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DPython3_FIND_VIRTUALENV=ONLY \
-DLLVM_ENABLE_PROJECTS=mlir \
-DLLVM_EXTERNAL_PROJECTS="torch-mlir" \
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$PWD" \
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
-DLLVM_TARGETS_TO_BUILD=host \
externals/llvm-project/llvm
```
#### Flags that can reduce build time:
* Enabling clang on Linux
```shell
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++
```
* Enabling ccache
```shell
-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache
```
* Enabling LLD (links in seconds compared to minutes)
```shell
-DCMAKE_EXE_LINKER_FLAGS_INIT="-fuse-ld=lld" -DCMAKE_MODULE_LINKER_FLAGS_INIT="-fuse-ld=lld" -DCMAKE_SHARED_LINKER_FLAGS_INIT="-fuse-ld=lld"
# Use --ld-path= instead of -fuse-ld=lld for clang > 13
```
* Enabling libtorch binary cache
By default we download the latest version of libtorch everytime you build so we are always on the latest version. Set `-DLIBTORCH_CACHE=ON` to
not download the latest version everytime. If libtorch gets out of date and you test against a newer PyTorch you may notice failures.
```shell
-DLIBTORCH_CACHE=ON
```
* Enabling building libtorch as part of your build
By default we download the latest version of libtorch. We have an experimental path to build libtorch (and PyTorch wheels) from source.
`# use clang`\
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \
`# use ccache to cache build results` \
-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
`# use LLD to link in seconds, rather than minutes` \
`# if using clang <= 13, replace --ld-path=lld with -fuse-ld=lld` \
-DCMAKE_EXE_LINKER_FLAGS_INIT="--ld-path=lld" \
-DCMAKE_MODULE_LINKER_FLAGS_INIT="--ld-path=lld" \
-DCMAKE_SHARED_LINKER_FLAGS_INIT="--ld-path=lld" \
`# Enabling libtorch binary cache instead of downloading the latest libtorch everytime.` \
`# Testing against a mismatched version of libtorch may cause failures` \
-DLIBTORCH_CACHE=ON \
`# Enable an experimental path to build libtorch (and PyTorch wheels) from source,` \
`# instead of downloading them` \
-DLIBTORCH_SRC_BUILD=ON \
`# Set the variant of libtorch to build / link against. (shared|static and optionally cxxabi11)` \
-DLIBTORCH_VARIANT=shared
```

# Simplified build

If you're running into issues with the above build command, consider using the following:

```shell
-DLIBTORCH_SRC_BUILD=ON # Build Libtorch from source
-DLIBTORCH_VARIANT=shared # Set the variant of libtorch to build / link against. (`shared`|`static` and optionally `cxxabi11`)
cmake -GNinja -Bbuild \
-DCMAKE_BUILD_TYPE=Release \
-DPython3_FIND_VIRTUALENV=ONLY \
-DLLVM_ENABLE_PROJECTS=mlir \
-DLLVM_EXTERNAL_PROJECTS="torch-mlir" \
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$PWD" \
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
-DLLVM_TARGETS_TO_BUILD=host \
externals/llvm-project/llvm
```

#### Flags to enable MLIR debugging:
Expand Down
25 changes: 25 additions & 0 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Patterns.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,31 @@ struct OpBinder {
return success();
}

// Operand matches of different arities.
ParseResult tensorListOperand(Value &value0) {
if (op->getNumOperands() != 1)
return failure();
value0 = op->getOperand(0);
auto tt = dyn_cast<Torch::ListType>(value0.getType());
if (!tt)
return failure();
if (!toValidTensorType(tt.getContainedType()))
return failure();
return success();
}

ParseResult tensorListResultType(Torch::ListType &type0) {
if (op->getNumResults() != 1)
return failure();
auto tt = dyn_cast<Torch::ListType>(op->getResult(0).getType());
if (!tt)
return failure();
if (!toValidTensorType(tt.getContainedType()))
return failure();
type0 = tt;
return success();
}

ParseResult tensorResultTypes(llvm::SmallVector<mlir::Type> &typeList) {
for (auto result : op->getResults()) {
auto t = toValidTensorType(result.getType());
Expand Down
25 changes: 20 additions & 5 deletions include/torch-mlir/Dialect/Torch/IR/TorchTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ class BaseTensorType : public Type {
/// convenient API.
Type getOptionalDtype() const;

/// Get the raw optional sparse tensor encoding.
Attribute getOptionalSparsity() const;

/// Return true if this type has a list of sizes.
bool hasSizes() const { return getOptionalSizes().has_value(); }

Expand Down Expand Up @@ -93,6 +96,10 @@ class BaseTensorType : public Type {
Type getWithSizesAndDtype(std::optional<ArrayRef<int64_t>> optionalSizes,
Type optionalDtype) const;

Type getWithSizesAndDtypeAndSparsity(
std::optional<ArrayRef<int64_t>> optionalSizes, Type optionalDtype,
Attribute optionalSparsity) const;

/// Return a type with the same shape and dtype as this one, but with
/// value semantics.
ValueTensorType getWithValueSemantics() const;
Expand Down Expand Up @@ -129,23 +136,31 @@ namespace Torch {

inline std::optional<ArrayRef<int64_t>>
BaseTensorType::getOptionalSizes() const {
if (auto tensor = dyn_cast<NonValueTensorType>())
if (auto tensor = mlir::dyn_cast<NonValueTensorType>(*this))
return tensor.getOptionalSizes();
if (auto tensor = dyn_cast<ValueTensorType>())
if (auto tensor = mlir::dyn_cast<ValueTensorType>(*this))
return tensor.getOptionalSizes();
llvm_unreachable("not a BaseTensorType!");
}

inline Type BaseTensorType::getOptionalDtype() const {
if (auto tensor = dyn_cast<NonValueTensorType>())
if (auto tensor = mlir::dyn_cast<NonValueTensorType>(*this))
return tensor.getOptionalDtype();
if (auto tensor = dyn_cast<ValueTensorType>())
if (auto tensor = mlir::dyn_cast<ValueTensorType>(*this))
return tensor.getOptionalDtype();
llvm_unreachable("not a BaseTensorType!");
}

inline Attribute BaseTensorType::getOptionalSparsity() const {
if (auto tensor = mlir::dyn_cast<NonValueTensorType>(*this))
return tensor.getOptionalSparsity();
if (auto tensor = mlir::dyn_cast<ValueTensorType>(*this))
return tensor.getOptionalSparsity();
llvm_unreachable("not a BaseTensorType!");
}

inline bool BaseTensorType::classof(Type type) {
return type.isa<NonValueTensorType, ValueTensorType>();
return mlir::isa<NonValueTensorType, ValueTensorType>(type);
}

} // namespace Torch
Expand Down
28 changes: 28 additions & 0 deletions include/torch-mlir/Dialect/Torch/Utils/SparsityUtils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Also available under a BSD-style license. See LICENSE.
//
//===----------------------------------------------------------------------===//
#ifndef TORCHMLIR_DIALECT_TORCH_SPARSITY_UTILS_H
#define TORCHMLIR_DIALECT_TORCH_SPARSITY_UTILS_H

#include "mlir/IR/Attributes.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"

namespace mlir {
namespace torch {
namespace Torch {

// Create a new SparseTensorEncodingAttr based on the provided `attr`, but with
// a new dense level inserted at `dim`.
FailureOr<Attribute> getSparsityWithDenseLTAtDim(Attribute attr, Value dim);

} // namespace Torch
} // namespace torch
} // namespace mlir

#endif // TORCHMLIR_DIALECT_TORCH_SPARSITY_UTILS_H
3 changes: 3 additions & 0 deletions include/torch-mlir/Dialect/Torch/Utils/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,9 @@ bool isBuiltInType(Type type);
// std::nullopt is returned if the tensorRank can't be determined.
std::optional<unsigned> getTensorRank(Value tensor);

// Helper function to get the number of elements in a tensor.
std::optional<int64_t> getTensorNumel(Value tensor);

bool isViewLikeOp(Operation *op);

Value getConstantWithGivenDtypeAndValue(PatternRewriter &rewriter, Location loc,
Expand Down
77 changes: 77 additions & 0 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1266,6 +1266,83 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
}
return failure();
});
patterns.onOp(
"GlobalMaxPool", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value operand;
if (binder.tensorOperand(operand) ||
binder.tensorResultType(resultType))
return failure();

auto inputTensorType = operand.getType().cast<Torch::ValueTensorType>();
if (!inputTensorType || !inputTensorType.hasSizes()) {
return rewriter.notifyMatchFailure(
binder.op, "Expected input type having sizes");
}
ArrayRef<int64_t> inputShape = inputTensorType.getSizes();
unsigned inputRank = inputShape.size();
if (!resultType || !resultType.hasSizes()) {
return rewriter.notifyMatchFailure(
binder.op, "Expected result type having sizes");
}
SmallVector<Value> cstKernel, cstPadding, cstStrides, cstDilations;
Value cstZero = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getI64IntegerAttr(0));
Value cstOne = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getI64IntegerAttr(1));
for (unsigned i = 2; i < inputRank; i++) {
if (inputShape[i] == Torch::kUnknownSize) {
Value dim = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getI64IntegerAttr(i));
Value inputDimSize = rewriter.create<Torch::AtenSizeIntOp>(
binder.getLoc(), operand, dim);
cstKernel.push_back(inputDimSize);
} else {
cstKernel.push_back(rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getI64IntegerAttr(inputShape[i])));
}
cstPadding.push_back(cstZero);
cstDilations.push_back(cstOne);
cstStrides.push_back(cstOne);
}
Value kernelSizeList = rewriter.create<Torch::PrimListConstructOp>(
binder.getLoc(),
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
cstKernel);
Value paddingList = rewriter.create<Torch::PrimListConstructOp>(
binder.getLoc(),
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
cstPadding);
Value dilationsList = rewriter.create<Torch::PrimListConstructOp>(
binder.getLoc(),
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
cstDilations);
Value stridesList = rewriter.create<Torch::PrimListConstructOp>(
binder.getLoc(),
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
cstStrides);
Value cstCeilMode =
rewriter.create<Torch::ConstantBoolOp>(binder.getLoc(), false);

if (inputRank == 3) {
rewriter.replaceOpWithNewOp<Torch::AtenMaxPool1dOp>(
binder.op, resultType, operand, kernelSizeList, stridesList,
paddingList, dilationsList, cstCeilMode);
return success();
} else if (inputRank == 4) {
rewriter.replaceOpWithNewOp<Torch::AtenMaxPool2dOp>(
binder.op, resultType, operand, kernelSizeList, stridesList,
paddingList, dilationsList, cstCeilMode);
return success();
} else if (inputRank == 5) {
rewriter.replaceOpWithNewOp<Torch::AtenMaxPool3dOp>(
binder.op, resultType, operand, kernelSizeList, stridesList,
paddingList, dilationsList, cstCeilMode);
return success();
}
return failure();
});
patterns.onOp(
"LayerNormalization", 17,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Expand Down
Loading

0 comments on commit a51723a

Please sign in to comment.