Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge main to release #182

Merged
merged 27 commits into from
Jun 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
de52921
Remove emails
laurettaSchubert May 31, 2024
0858624
Add test for contract check skipping
cferry-AMD May 31, 2024
f43da4d
Add check for use-mlprogram=0
cferry-AMD Jun 3, 2024
24c1d2b
Review comments on no-mlprogram test
cferry-AMD Jun 3, 2024
da2d75d
Lower sin and cos to TOSA Ops
jorickert May 17, 2024
6771e04
Add tests for Torch to Linalg backend pipeline
cferry-AMD Jun 4, 2024
36e0151
Merge pull request #167 from Xilinx/jrickert.lower_sin_and_cos_to_TOSA
jorickert Jun 4, 2024
b59d267
Merge pull request #165 from Xilinx/lauretta.removeEmails
mgehre-amd Jun 4, 2024
7ecee2a
[MLIR][Torch] Fix OnnxToLinalg lowering for AvgPool op (#3076)
vivekkhandelwal1 Apr 1, 2024
d820b8b
Fix onnx.Pad lowering to torch
josel-amd Jun 7, 2024
f7fb950
Run CI on all PRs
mgehre-amd Jun 7, 2024
c5492c0
Merge pull request #173 from Xilinx/matthias.enable_ci_main
mgehre-amd Jun 7, 2024
bea5271
Merge branch 'feature/backport_ea1_ops' into matthias.onnx_resize
mgehre-amd Jun 7, 2024
e2f68bb
Merge pull request #175 from Xilinx/matthias.onnx_resize
mgehre-amd Jun 10, 2024
c4ccde3
Merge pull request #172 from Xilinx/jose.fix_onnx_pad
mgehre-amd Jun 10, 2024
4fceed9
Merge pull request #170 from Xilinx/jose.cherry_pick_upstream_avgpool…
mgehre-amd Jun 10, 2024
95537fd
Converts all Adaptive Pooling Ops to Linalg (#2808)
zjgarvey Mar 22, 2024
ce7d715
[MLIR][Torch] Add TorchToLinalg lowering for AtenAvgPool3dOp (#3030)
vivekkhandelwal1 Jun 4, 2024
862fcca
Disable AvgPool3d for StableHLO
josel-amd Jun 7, 2024
0f85717
Merge pull request #171 from Xilinx/jose.fix_avg_pool_3d
josel-amd Jun 10, 2024
487b277
Explicit error for onnx.Pad in reflect mode
josel-amd Jun 10, 2024
01658e1
Move the error to DecomposeComplexOps
josel-amd Jun 11, 2024
ce9edf9
Update test that was left behind
josel-amd Jun 11, 2024
34e9921
Merge release_rai_1_2 into HEAD
ljfitz Jun 11, 2024
281def6
Merge pull request #179 from Xilinx/liamf.release_rai_1_2_to_nightly_…
ljfitz Jun 11, 2024
90239fc
Merge pull request #176 from Xilinx/jose.explicit_error_for_reflect
mgehre-amd Jun 12, 2024
d7a881c
Merge pull request #181 from Xilinx/release_rai_1_2
mgehre-amd Jun 12, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions include/torch-mlir/Conversion/TorchToLinalg/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,10 @@ getBackendTypeForScalarType(MLIRContext *context,

bool isUnsignedTorchType(Type type);

LogicalResult permuteTensor(Operation *op, PatternRewriter &rewriter,
Location loc, SmallVector<int64_t> dimensions,
Value input, Value &result);

} // namespace torch_to_linalg
} // namespace torch
} // namespace mlir
50 changes: 50 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -7181,6 +7181,31 @@ def Torch_Aten_AdaptiveAvgPool3dBackwardOp : Torch_Op<"aten._adaptive_avg_pool3d
}];
}

def Torch_AtenAdaptiveMaxPool1dOp : Torch_Op<"aten.adaptive_max_pool1d", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::adaptive_max_pool1d : (Tensor, int[]) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchListOfTorchIntType:$output_size
);
let results = (outs
AnyTorchTensorType:$result0,
AnyTorchTensorType:$result1
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenAdaptiveMaxPool1dOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 2);
}
void AtenAdaptiveMaxPool1dOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 2);
}
}];
}

def Torch_AtenAdaptiveMaxPool2dOp : Torch_Op<"aten.adaptive_max_pool2d", [
AllowsTypeRefinement,
HasValueSemantics,
Expand All @@ -7206,6 +7231,31 @@ def Torch_AtenAdaptiveMaxPool2dOp : Torch_Op<"aten.adaptive_max_pool2d", [
}];
}

def Torch_AtenAdaptiveMaxPool3dOp : Torch_Op<"aten.adaptive_max_pool3d", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::adaptive_max_pool3d : (Tensor, int[]) -> (Tensor, Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchListOfTorchIntType:$output_size
);
let results = (outs
AnyTorchTensorType:$result0,
AnyTorchTensorType:$result1
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenAdaptiveMaxPool3dOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 2);
}
void AtenAdaptiveMaxPool3dOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 2);
}
}];
}

def Torch_AtenTopkOp : Torch_Op<"aten.topk", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down
5 changes: 3 additions & 2 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainAtoF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
return success();
});
patterns.onOp(
"AveragePool", 19,
"AveragePool", 11,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
std::string autoPad;
SmallVector<int64_t> dilation;
Expand Down Expand Up @@ -357,7 +357,8 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
binder.op,
"padding list size does not match twice the number of axes");
}
if (binder.s64IntegerArrayAttr(strides, "strides", {1})) {
if (binder.s64IntegerArrayAttr(
strides, "strides", llvm::SmallVector<int64_t>(rank - 2, 1))) {
return failure();
}
if (strides.size() != 1 && strides.size() != rank - 2) {
Expand Down
13 changes: 2 additions & 11 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -970,17 +970,8 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
}

if (!constantValue) {
auto dataTensorType = data.getType().cast<Torch::ValueTensorType>();
if (dataTensorType.getDtype().isa<IntegerType>())
constantValue = rewriter.create<Torch::ConstantIntOp>(
loc, rewriter.getI64IntegerAttr(0));
if (dataTensorType.getDtype().isa<FloatType>())
constantValue = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getF64FloatAttr(0.0f));

if (!constantValue)
return rewriter.notifyMatchFailure(
binder.op, "expected integer or float data tensor");
constantValue = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getF64FloatAttr(0.0f));
}

// Extract all the values of 1-D pad tensor and create a list of all
Expand Down
55 changes: 7 additions & 48 deletions lib/Conversion/TorchToLinalg/DataMovement.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1457,56 +1457,15 @@ class ConvertAtenPermuteOp : public OpConversionPattern<AtenPermuteOp> {
return rewriter.notifyMatchFailure(op, "all dimensions must be constant");

Value inVector = adaptor.getSelf();
auto inType = inVector.getType().cast<RankedTensorType>();
int64_t inputRank = inType.getRank();
auto outType = getTypeConverter()
->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
Type elementType = inType.getElementType();

// Check if the dimensions are a valid constants.
int64_t numDimensions = dimensions.size();
if (inputRank != numDimensions)
Value result;
if (failed(torch_to_linalg::permuteTensor(op, rewriter, op->getLoc(),
dimensions, inVector, result)))
return rewriter.notifyMatchFailure(
op, "size of `dims` must be equal to the rank of the input");
for (unsigned i = 0; i < numDimensions; i++) {
if (dimensions[i] < 0)
dimensions[i] = toPositiveDim(dimensions[i], inputRank);
if (!isValidDim(dimensions[i], inputRank))
return rewriter.notifyMatchFailure(op, "dimension out of range");
}

Location loc = op.getLoc();

SmallVector<Value> outputDims;
for (unsigned i = 0; i < inputRank; i++)
outputDims.push_back(getDimOp(rewriter, loc, inVector, dimensions[i]));
op, "failed to perform permutation of tensor");

Value outVector = rewriter.create<tensor::EmptyOp>(
loc, getAsOpFoldResult(outputDims), elementType);
SmallVector<AffineExpr> idExprs;
SmallVector<AffineExpr> swapExprs;
for (unsigned i = 0; i < inputRank; i++)
idExprs.push_back(getAffineDimExpr(i, rewriter.getContext()));
for (unsigned i = 0; i < inputRank; i++)
swapExprs.push_back(idExprs[dimensions[i]]);

AffineMap inputMap =
AffineMap::get(inputRank, /*symbolCount=*/0, idExprs, op->getContext());
AffineMap outputMap = AffineMap::get(inputRank, /*symbolCount=*/0,
swapExprs, op->getContext());
SmallVector<AffineMap> indexingMaps{inputMap, outputMap};
SmallVector<utils::IteratorType> iteratorTypes(
inputRank, utils::IteratorType::parallel);
auto transpose = rewriter
.create<linalg::GenericOp>(
loc, outVector.getType(), inVector, outVector,
indexingMaps, iteratorTypes,
[](OpBuilder &b, Location loc, ValueRange args) {
b.create<linalg::YieldOp>(loc, args[0]);
})
.getResult(0);
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, outType, transpose);
auto outType = cast<RankedTensorType>(
getTypeConverter()->convertType(op->getResult(0).getType()));
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, outType, result);
return success();
}
};
Expand Down
Loading
Loading