Skip to content

Commit

Permalink
[FXML-3548] Bump torch mlir
Browse files Browse the repository at this point in the history
Bump torch-mlir to ff7f8b2, and llvm to d13da15.

For now, point llvm to the upstream commit, will change again after xilinx/llvm-project itself is bumped. Test failure in `Conversion/TorchToTosa/torch-backend-to-tosa-backend-pipeline.mlir` is expected, as it requires changes from the xilinx llvm-fork.
  • Loading branch information
TinaAMD committed Nov 13, 2023
1 parent 0953522 commit 15acd5d
Show file tree
Hide file tree
Showing 142 changed files with 10,362 additions and 2,328 deletions.
19 changes: 15 additions & 4 deletions .github/workflows/RollPyTorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,21 @@ jobs:
- name: Get torch-mlir
uses: actions/checkout@v3
with:
submodules: 'true'
submodules: 'false'
token: ${{ secrets.WORKFLOW_INVOCATION_TOKEN }}

- name: Get LLVM and StableHlo submodules
run: |
set -eo pipefail
cd ${GITHUB_WORKSPACE}
# Fetching the submodules concurrently may cause problems, so we fetch
# them one after another.
rm -f .git/modules/externals/llvm-project/index.lock
rm -f .git/modules/externals/stablehlo/index.lock
git submodule update --init --recursive externals/llvm-project
git submodule update --init --recursive externals/stablehlo
- name: Setup ccache
uses: ./.github/actions/setup-build
with:
Expand Down Expand Up @@ -71,15 +83,14 @@ jobs:
echo "PTVISION_RELEASE=${VISION_RELEASE}" >> ${GITHUB_ENV}
echo "PT_HASH_CHANGED=${PT_HASH_CHANGED}" >> ${GITHUB_ENV}
- name: Build and test (in-tree), also update ODS and abstract interpretation library
- name: Build and test (out-of-tree), also update ODS and abstract interpretation library
if: env.PT_HASH_CHANGED != '0'
run: |
cd ${GITHUB_WORKSPACE}
TM_PACKAGES="in-tree" TM_USE_PYTORCH_BINARY="OFF" \
TM_PACKAGES="out-of-tree" TM_USE_PYTORCH_BINARY="OFF" \
TORCH_MLIR_SRC_PYTORCH_BRANCH="${{ env.PT_HASH }}" \
TORCH_MLIR_SRC_PYTORCH_RELEASE="${{ env.PT_RELEASE }}" \
TM_UPDATE_ODS_AND_ABSTRACT_INTERP_LIB="ON" \
TM_PYTHON_VERSIONS="cp311-cp311" \
./build_tools/python_deploy/build_linux_packages.sh
- name: Post issue comment on build failure
Expand Down
20 changes: 10 additions & 10 deletions .github/workflows/bazelBuildAndTest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,33 +58,33 @@ jobs:
-t torch-mlir:ci \
.
- name: Bazel build torch-mlir
- name: Verify buildifier was run (bazel lint)
run: |
docker run --rm \
-v "$(pwd)":"/opt/src/torch-mlir" \
-v "${HOME}/.cache/bazel":"/root/.cache/bazel" \
torch-mlir:ci \
bazel build @torch-mlir//:torch-mlir-opt
bazel run @torch-mlir//:buildifier
if [ -n "$(git status --porcelain)" ]; then
echo "Please 'bazel run @torch-mlir//:buildifier' and commit changes."
exit 1
fi
- name: Bazel test torch-mlir (lit tests)
- name: Bazel build torch-mlir
run: |
docker run --rm \
-v "$(pwd)":"/opt/src/torch-mlir" \
-v "${HOME}/.cache/bazel":"/root/.cache/bazel" \
torch-mlir:ci \
bazel test @torch-mlir//test/...
bazel build @torch-mlir//:torch-mlir-opt
- name: Verify buildifier was run (bazel lint)
- name: Bazel test torch-mlir (lit tests)
run: |
docker run --rm \
-v "$(pwd)":"/opt/src/torch-mlir" \
-v "${HOME}/.cache/bazel":"/root/.cache/bazel" \
torch-mlir:ci \
bazel run @torch-mlir//:buildifier
if [ -n "$(git status --porcelain)" ]; then
echo "Please 'bazel run @torch-mlir//:buildifier' and commit changes."
exit 1
fi
bazel test @torch-mlir//test/...
# Switch back bazel cache directory to user ownership
# to allow GHA post-cache step to save cache without
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/merge-rollpytorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
if: |
github.repository == 'llvm/torch-mlir' &&
github.event.workflow_run.actor.login == 'silvasean' &&
github.event.workflow_run.actor.login == 'stellaraccident' &&
github.event.workflow_run.conclusion == 'success'
steps:
Expand Down
9 changes: 4 additions & 5 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
[submodule "externals/llvm-project"]
path = externals/llvm-project
url = https://github.com/Xilinx/llvm-project.git
branch = misc_fixes
[submodule "externals/mlir-hlo"]
path = externals/mlir-hlo
url = https://github.com/tensorflow/mlir-hlo.git
url = https://github.com/llvm/llvm-project.git
[submodule "externals/stablehlo"]
path = externals/stablehlo
url = https://github.com/openxla/stablehlo.git
19 changes: 19 additions & 0 deletions CITATION.cff
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
cff-version: 1.2.0
title: Torch-MLIR
message: >-
If you use this software, please cite it using the
metadata from this file.
type: software
authors:
- name: LLVM
repository-code: 'https://github.com/llvm/torch-mlir'
abstract: >-
The Torch-MLIR project aims to provide first class support
from the PyTorch ecosystem to the MLIR ecosystem.
keywords:
- Compiler
- PyTorch
- MLIR
license:
- Apache-2.0 with LLVM Exceptions
- BSD
24 changes: 16 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -118,14 +118,7 @@ else()
endif()

if (TORCH_MLIR_ENABLE_STABLEHLO)
set(STABLEHLO_BUILD_EMBEDDED ON)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/externals/mlir-hlo
${CMAKE_CURRENT_BINARY_DIR}/mlir-hlo
EXCLUDE_FROM_ALL)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/externals/mlir-hlo/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/externals/mlir-hlo)
include_directories(${CMAKE_CURRENT_BINARY_DIR}/mlir-hlo/include)
include_directories(${CMAKE_CURRENT_BINARY_DIR}/mlir-hlo)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/externals/stablehlo)
endif()

set(TORCH_MLIR_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
Expand Down Expand Up @@ -229,3 +222,18 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
COMPONENT torch-mlir-headers)
endif()
endif()

# Important: If loading StableHLO in this fashion, it must come last,
# after all of our libraries and test targets have been defined.
# It seems that they both abuse upstream CMake macros that accumulate
# properties.
# Getting this wrong results in building large parts of the stablehlo
# project that we don't actually depend on. Further some of those parts
# do not even compile on all platforms.
if (TORCH_MLIR_ENABLE_STABLEHLO)
set(STABLEHLO_BUILD_EMBEDDED ON)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/externals/stablehlo
${CMAKE_CURRENT_BINARY_DIR}/stablehlo
EXCLUDE_FROM_ALL)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/externals/stablehlo)
endif()
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,25 +43,25 @@ We have few paths to lower down to the Torch MLIR Dialect.

## Install torch-mlir snapshot

At the time of writing, we release pre-built snapshot of torch-mlir for Python 3.10 on Linux and macOS.
At the time of writing, we release pre-built snapshot of torch-mlir for Python 3.11 on Linux and macOS.

If you have Python 3.10, the following commands initialize a virtual environment.
If you have Python 3.11, the following commands initialize a virtual environment.
```shell
python3.10 -m venv mlir_venv
python3.11 -m venv mlir_venv
source mlir_venv/bin/activate
```

Or, if you want to switch over multiple versions of Python using conda, you can create a conda environment with Python 3.10.
Or, if you want to switch over multiple versions of Python using conda, you can create a conda environment with Python 3.11.
```shell
conda create -n torch-mlir python=3.10
conda create -n torch-mlir python=3.11
conda activate torch-mlir
python -m pip install --upgrade pip
```

Then, we can install torch-mlir with the corresponding torch and torchvision nightlies.
```
pip install --pre torch-mlir torchvision \
-f https://llvm.github.io/torch-mlir/package-index/
-f https://llvm.github.io/torch-mlir/package-index/ \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu
```

Expand Down
3 changes: 2 additions & 1 deletion build_tools/autogen_ltc_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,8 @@ def gen_fallback_code(*args, **kwargs):
node_base="torch::lazy::TorchMlirNode",
node_base_hdr=str(self.backend_path.joinpath("mlir_node.h")),
tensor_class=self.tensor_class,
tensor_class_hdr="torch/csrc/lazy/core/tensor.h",
tensor_class_hdr="torch_mlir/csrc/base_lazy_backend/tensor.h",
create_aten_from_ltc_tensor="CreateFunctionalizedAtenFromLtcTensor",
shape_inference_hdr=str(self.generated_path.joinpath("shape_inference.h")),
lazy_ir_generator=GenMlirLazyIr,
)
Expand Down
56 changes: 17 additions & 39 deletions build_tools/autogen_ltc_backend.yaml
Original file line number Diff line number Diff line change
@@ -1,16 +1,7 @@
blacklist:
# List of unsupported ops in LTC autogen because of some error
- _index_put_impl_ # Error: TODO not sure if there are other valid types to handle here
- _index_put_impl # Error: TODO not sure if there are other valid types to handle here
- empty_like # Error: TODO add support for type BaseType(name=<BaseTy.MemoryFormat: 12>)
- index.Tensor # Error: TODO not sure if there are other valid types to handle here
- index_put # Error: TODO not sure if there are other valid types to handle here
- index_put_ # Error: TODO not sure if there are other valid types to handle here

# Ops with list of tensors output
- split.Tensor
- unbind.int
- chunk
# Disabled in favour of `aten::index_put` which supports optional indices via `hacked_twin` JIT hack.
# It also doesn't have confusing `unsafe` argument.
- _index_put_impl

# Additional ops which autogen is supported for but don't compile yet
- _convolution
Expand All @@ -21,48 +12,34 @@ blacklist:

# Disabled for consistency with TS backend
- lift_fresh_copy
- new_empty
- rsub
- slice.Tensor # Disabled in favour of slice_copy.Tensor
- zeros
- ones
- arange
- arange.start
- arange.start_step
- fill.Scalar
- scalar_tensor

# Disabled in favour of functionalized alternatives
- _reshape_alias
- expand
- permute
- select.int
- squeeze
- squeeze.dim
- t
- transpose.int
- expand
- squeeze
- unsqueeze
- view
- slice.Tensor
- split.Tensor
- split_with_sizes
- unbind.int

whitelist:
# Enabled for consistency with TS backend
- arange.start_out

# List of ops to autogen even if not supported by Torch-MLIR explicitly
#- split_copy.Tensor
#- split_with_sizes_copy
#- unbind_copy.int

# List of supported ops that we don't want to do the full codegen for
supported:
# - bernoulli
# - bernoulli_
- _to_copy
- clone
- empty.memory_format
- empty_strided
- fill_.Scalar
- _unsafe_view
- unbind_copy.int
- split_copy.Tensor
- split_with_sizes_copy
- index.Tensor
- index_put

# ops required for functionalization
- lift
Expand All @@ -83,20 +60,21 @@ supported:
- _trilinear
- linalg_pinv.atol_rtol_tensor
- logsumexp.out
- t

# List of ops that will take in symints for the size instead of ints
symint:
- empty.memory_format
- new_empty_strided
- expand_copy
- narrow_copy
- slice_backward
- slice_copy.Tensor
- split_copy.Tensor
- slice_scatter
- view
- view_copy
- as_strided_copy
- as_strided_scatter
- split_with_sizes_copy


additional_ops:
Expand Down
Loading

0 comments on commit 15acd5d

Please sign in to comment.