Skip to content

Commit

Permalink
Pass bundles
Browse files Browse the repository at this point in the history
Separates existing passes into two separate types:
  - passes - standard passes that transform IR
  - bundles - passes that have no custom logic and only run pass
              pipelines

Additionally, the existing bundles are moved to individual source files
to make repo navigation easier. Plus some minor cleanup.

The goal is to improve pipeline modularization and make it easier to
navigate available tools within TPP-MLIR.
  • Loading branch information
adam-smnk committed Jun 4, 2024
1 parent 761f011 commit 146088a
Show file tree
Hide file tree
Showing 23 changed files with 674 additions and 447 deletions.
25 changes: 25 additions & 0 deletions include/TPP/Bundles.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
//===- Bundles.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef TPP_BUNDLES_H
#define TPP_BUNDLES_H

#include "TPP/Passes.h"
#include "mlir/Pass/Pass.h"

namespace mlir {
namespace tpp {
#define GEN_PASS_DECL
#include "TPP/Bundles.h.inc"

#define GEN_PASS_REGISTRATION
#include "TPP/Bundles.h.inc"
} // namespace tpp
} // namespace mlir

#endif // TPP_BUNDLES_H
163 changes: 163 additions & 0 deletions include/TPP/Bundles.td
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
//===- Bundles.td ------------------------------------------*- Tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef TPP_DIALECT_TPP_BUNDLES
#define TPP_DIALECT_TPP_BUNDLES

include "mlir/Pass/PassBase.td"

def DefaultPipeline : Pass<"default-pipeline", "ModuleOp"> {
let summary = "The default compiler lowering pipeline";
let description = [{
A collection of passes that lower everything to MLIR LLVM IR.
}];
let options = [
Option<"gpuBackend", "gpu", "std::string",
/*default=*/"\"\"",
"Optional target GPU backend.">,
];
}

def DefaultTppPasses : Pass<"default-tpp-passes", "ModuleOp"> {
let summary = "Collection of default TPP passes";
let description = [{
A collection of passes that lower everything TPP-related
to standard low-level dialects.
}];
let options= [
Option<"linalgToLoops", "linalg-to-loops",
"bool", /*default=*/"false",
"Skip all TPP transformations. Lower linalg directly to loops.">,
ListOption<"parallelTaskGrid", "parallel-task-grid",
"unsigned", "Grid-sizes for parallel tasks.">

];
}

def TppMapping : Pass<"tpp-mapping", "ModuleOp"> {
let summary = "Map operations to be TPP compatible";
let description = [{
Apply collection of TPP rewriting passes to map eligble operations
into equivalent TPP-compatible forms.
}];
let dependentDialects = ["linalg::LinalgDialect",
"memref::MemRefDialect",
"scf::SCFDialect",
"tensor::TensorDialect"];
}

def LinalgLowering : Pass<"linalg-lowering", "func::FuncOp"> {
let summary = "Lower Linalg operations to XSMM operations.";
let dependentDialects = ["xsmm::XsmmDialect",
"scf::SCFDialect",
"memref::MemRefDialect"];
}

def LowLevelParallelization : Pass<"low-level-parallel", "ModuleOp"> {
let summary = "Low level parallelization (multi-threading, AMX config).";
let dependentDialects = ["affine::AffineDialect",
"arith::ArithDialect",
"func::FuncDialect",
"memref::MemRefDialect",
"scf::SCFDialect",
"xsmm::XsmmDialect",
"LLVM::LLVMDialect"];
let options = [
ListOption<"parallelTaskGrid", "parallel-task-grid",
"unsigned", "Grid-sizes for parallel tasks.">

];
}

def LocalDialectsLowering : Pass<"lower-local-dialects", "ModuleOp"> {
let summary = "Lower all local dialects (XSMM, check etc.).";
let dependentDialects = ["affine::AffineDialect",
"arith::ArithDialect",
"func::FuncDialect",
"memref::MemRefDialect",
"check::CheckDialect",
"perf::PerfDialect",
"scf::SCFDialect",
"tensor::TensorDialect",
"xsmm::XsmmDialect",
"LLVM::LLVMDialect"];
}

def Postprocessing : Pass<"postprocess", "func::FuncOp"> {
let summary = "IR postprocessing pass";
let description = [{
Apply various postprocessing passes such parallel loop fusion,
buffer deallocation, general cleanup etc.
}];
let dependentDialects = ["bufferization::BufferizationDialect",
"scf::SCFDialect",
"memref::MemRefDialect"];
}

def Cleanup : Pass<"cleanup"> {
let summary = "General IR cleanup e.g., canonicalization, CSE etc.";
}

def GpuPipeline : Pass<"gpu-pipeline", "ModuleOp"> {
let summary = "Lower all eligible operations into GPU compatible IR";
let options = [
Option<"gpuBackend", "gpu", "std::string",
/*default=*/"\"cuda\"",
"Target GPU backend for lowering (cuda).">,
];
}

def GpuConversion : Pass<"gpu-conversion", "ModuleOp"> {
let summary = "Convert operations to GPU";
let description = [{
Convert all eligble operations into generic GPU operations.
}];
let options = [
Option<"useWmma", "wmma",
"bool", /*default=*/"false",
"Use WMMA operations">,
ListOption<"warpTile", "warp-tile", "int64_t", "Warp tile sizes MxNxK">,
];
let dependentDialects = ["linalg::LinalgDialect",
"scf::SCFDialect",
"memref::MemRefDialect",
"gpu::GPUDialect"];
}

def GpuToCuda : Pass<"gpu-to-cuda", "ModuleOp"> {
let summary = "Lower generic GPU operations to CUDA backend";
let dependentDialects = ["affine::AffineDialect",
"arith::ArithDialect",
"memref::MemRefDialect",
"scf::SCFDialect",
"gpu::GPUDialect",
"NVVM::NVVMDialect",
"nvgpu::NVGPUDialect"];
let options = [
Option<"gpuTriple", "triple", "std::string",
/*default=*/"\"nvptx64-nvidia-cuda\"",
"GPU target triple.">,
Option<"gpuChip", "chip", "std::string",
/*default=*/"\"sm_70\"",
"GPU target architecture.">,
Option<"gpuFeatures", "features", "std::string",
/*default=*/"\"+ptx60\"",
"GPU target features.">,
];
}

def GpuToVulkan : Pass<"gpu-to-vulkan", "ModuleOp"> {
let summary = "Lower generic GPU operations to Vulkan backend";
let dependentDialects = ["arith::ArithDialect",
"memref::MemRefDialect",
"func::FuncDialect",
"gpu::GPUDialect",
"spirv::SPIRVDialect"];
}

#endif // TPP_DIALECT_TPP_BUNDLES
5 changes: 5 additions & 0 deletions include/TPP/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,8 @@ set(LLVM_TARGET_DEFINITIONS Passes.td)
mlir_tablegen(Passes.h.inc -gen-pass-decls -name TppCompiler)
add_public_tablegen_target(TPPCompilerPassIncGen)
add_mlir_doc(Passes TppCompilerPasses ./ -gen-pass-doc)

set(LLVM_TARGET_DEFINITIONS Bundles.td)
mlir_tablegen(Bundles.h.inc -gen-pass-decls -name TppBundle)
add_public_tablegen_target(TPPCompilerBundleIncGen)
add_mlir_doc(Bundles TppCompilerBundles ./ -gen-pass-doc)
21 changes: 17 additions & 4 deletions include/TPP/PassUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,26 @@
namespace mlir {
namespace tpp {

// Helper base class for passes that call and manage combination of other
// Helper base class for bundle passes that call and manage combination of other
// existing passes.
template <typename OpT> class UtilityPassBase {
template <typename OpT = void> class PassBundle {
public:
UtilityPassBase()
PassBundle()
: pm(OpT::getOperationName(), mlir::OpPassManager::Nesting::Implicit){};
virtual ~UtilityPassBase() = default;
virtual ~PassBundle() = default;

protected:
OpPassManager pm;

// Create the pass processing pipeline.
virtual void constructPipeline() = 0;
};

// Pass bundle specialization without anchor operation type.
template <> class PassBundle<void> {
public:
PassBundle() : pm(mlir::OpPassManager::Nesting::Implicit){};
virtual ~PassBundle() = default;

protected:
OpPassManager pm;
Expand Down
126 changes: 0 additions & 126 deletions include/TPP/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -194,23 +194,6 @@ def CombineXsmmOpPass : Pass<"combine-xsmm-op-optimization", "func::FuncOp"> {

}


def DefaultTppPasses : Pass<"default-tpp-passes", "ModuleOp"> {
let summary = "Collection of default TPP passes";
let description = [{
A collection of passes that lower everything TPP-related
to standard low-level dialects.
}];
let options= [
Option<"linalgToLoops", "linalg-to-loops",
"bool", /*default=*/"false",
"Skip all TPP transformations. Lower linalg directly to loops.">,
ListOption<"parallelTaskGrid", "parallel-task-grid",
"unsigned", "Grid-sizes for parallel tasks.">

];
}

def PropagatePackUnPack : Pass<"propagate-pack-and-unpack", "func::FuncOp"> {
let summary = "Propagate tensor.pack and tensor.unpack";
let description = [{
Expand Down Expand Up @@ -276,62 +259,6 @@ def DuplicateFill : Pass<"duplicate-fill", "func::FuncOp"> {
let dependentDialects = [ "linalg::LinalgDialect" ];
}

def Cleanup : Pass<"cleanup", "func::FuncOp"> {
let summary = "General IR cleanup e.g., canonicalization, CSE etc.";
}

def LowLevelParallelization : Pass<"low-level-parallel", "ModuleOp"> {
let summary = "Low level parallelization (multi-threading, AMX config).";
let dependentDialects = ["affine::AffineDialect",
"arith::ArithDialect",
"func::FuncDialect",
"memref::MemRefDialect",
"scf::SCFDialect",
"xsmm::XsmmDialect",
"LLVM::LLVMDialect"];
let options = [
ListOption<"parallelTaskGrid", "parallel-task-grid",
"unsigned", "Grid-sizes for parallel tasks.">

];
}

def LocalDialectsLowering : Pass<"lower-local-dialects", "ModuleOp"> {
let summary = "Lower all local dialects (XSMM, check etc.).";
let dependentDialects = ["affine::AffineDialect",
"arith::ArithDialect",
"func::FuncDialect",
"memref::MemRefDialect",
"check::CheckDialect",
"perf::PerfDialect",
"scf::SCFDialect",
"tensor::TensorDialect",
"xsmm::XsmmDialect",
"LLVM::LLVMDialect"];
}

def Postprocessing : Pass<"postprocess", "func::FuncOp"> {
let summary = "IR postprocessing pass";
let description = [{
Apply various postprocessing passes such parallel loop fusion,
buffer deallocation, general cleanup etc.
}];
}

def TppMapping : Pass<"tpp-mapping", "ModuleOp"> {
let summary = "Map operations to be TPP compatible";
let description = [{
Apply collection of TPP rewriting passes to map eligble operations
into equivalent TPP-compatible forms.
}];
}

def LinalgLowering : Pass<"linalg-lowering", "func::FuncOp"> {
let summary = "Lower Linalg operations to XSMM operations.";
let dependentDialects = ["xsmm::XsmmDialect", "scf::SCFDialect",
"memref::MemRefDialect"];
}

def ConvertForAllToParallelOp : Pass<"convert-forall-to-parallel",
"func::FuncOp"> {
let summary = "Convert scf.forall to scf.parallel";
Expand All @@ -340,64 +267,11 @@ def ConvertForAllToParallelOp : Pass<"convert-forall-to-parallel",
}];
}

def GpuPipeline : Pass<"gpu-pipeline", "ModuleOp"> {
let summary = "Lower all eligible operations into GPU compatible IR";
let options = [
Option<"gpuBackend", "gpu", "std::string",
/*default=*/"\"cuda\"",
"Target GPU backend for lowering (cuda).">,
];
}

def GpuConversion : Pass<"gpu-conversion", "ModuleOp"> {
let summary = "Convert operations to GPU";
let description = [{
Convert all eligble operations into generic GPU operations.
}];
let options = [
Option<"useWmma", "wmma",
"bool", /*default=*/"false",
"Use WMMA operations">,
ListOption<"warpTile", "warp-tile", "int64_t", "Warp tile sizes MxNxK">,
];
}

def GpuToCuda : Pass<"gpu-to-cuda", "ModuleOp"> {
let summary = "Lower generic GPU operations to CUDA backend";
let options = [
Option<"gpuTriple", "triple", "std::string",
/*default=*/"\"nvptx64-nvidia-cuda\"",
"GPU target triple.">,
Option<"gpuChip", "chip", "std::string",
/*default=*/"\"sm_70\"",
"GPU target architecture.">,
Option<"gpuFeatures", "features", "std::string",
/*default=*/"\"+ptx60\"",
"GPU target features.">,
];
}

def GpuToVulkan : Pass<"gpu-to-vulkan", "ModuleOp"> {
let summary = "Lower generic GPU operations to Vulkan backend";
}

def LinalgDeGeneralize : Pass<"linalg-degeneralize-generic-ops", "func::FuncOp"> {
let summary = "Convert generic ops into named ops";
let dependentDialects = ["linalg::LinalgDialect"];
}

def DefaultPipeline : Pass<"default-pipeline", "ModuleOp"> {
let summary = "The default compiler lowering pipeline";
let description = [{
A collection of passes that lower everything to MLIR LLVM IR.
}];
let options = [
Option<"gpuBackend", "gpu", "std::string",
/*default=*/"\"\"",
"Optional target GPU backend.">,
];
}

def SetSPIRVCapabilities : Pass<"tpp-set-spirv-capabilities", "ModuleOp"> {
let summary = "Set SPIR-V capabilities.";
let options = [
Expand Down
Loading

0 comments on commit 146088a

Please sign in to comment.