Skip to content

Commit

Permalink
Merge pull request #170 from Xilinx/jose.cherry_pick_upstream_avgpool…
Browse files Browse the repository at this point in the history
…_fix

Fix lowering of torch.aten.avg_pool1d to linalg
  • Loading branch information
mgehre-amd authored Jun 10, 2024
2 parents c4ccde3 + 7ecee2a commit 4fceed9
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 14 deletions.
5 changes: 3 additions & 2 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainAtoF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
return success();
});
patterns.onOp(
"AveragePool", 19,
"AveragePool", 11,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
std::string autoPad;
SmallVector<int64_t> dilation;
Expand Down Expand Up @@ -357,7 +357,8 @@ void mlir::torch::onnx_c::populateDefaultDomainAtoF(
binder.op,
"padding list size does not match twice the number of axes");
}
if (binder.s64IntegerArrayAttr(strides, "strides", {1})) {
if (binder.s64IntegerArrayAttr(
strides, "strides", llvm::SmallVector<int64_t>(rank - 2, 1))) {
return failure();
}
if (strides.size() != 1 && strides.size() != rank - 2) {
Expand Down
18 changes: 16 additions & 2 deletions lib/Conversion/TorchToLinalg/Pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,22 @@ static Value padInputTensor(Operation *op, ConversionPatternRewriter &rewriter,
SmallVectorImpl<int64_t> &paddingInts,
Value initValue) {
SmallVector<int64_t> lowPaddingIncludingNC = {0, 0};
lowPaddingIncludingNC.append(paddingInts);
SmallVector<int64_t> highPaddingIncludingNC = lowPaddingIncludingNC;
SmallVector<int64_t> highPaddingIncludingNC = {0, 0};

unsigned selfRank = self.getType().cast<RankedTensorType>().getRank();
unsigned paddingIntsSize = paddingInts.size();

if (paddingIntsSize == 2 * (selfRank - 2)) {
// This condition being true means that the `paddingInts` contain seperate
// values for low padding and high padding.
for (unsigned i = 0; i < paddingIntsSize / 2; i++)
lowPaddingIncludingNC.push_back(paddingInts[i]);
for (unsigned i = paddingIntsSize / 2; i < paddingIntsSize; i++)
highPaddingIncludingNC.push_back(paddingInts[i]);
} else {
lowPaddingIncludingNC.append(paddingInts);
highPaddingIncludingNC = lowPaddingIncludingNC;
}

if (ceilMode) {
for (int64_t i = 0; i < dimensionality; ++i) {
Expand Down
10 changes: 0 additions & 10 deletions projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -2060,17 +2060,7 @@
"LinalgNormModule_basic",

# Failure - onnx_lowering: onnx.AveragePool
"AdaptiveAvgPool1dNonUnitOutputSizeStaticModule_basic",
"AdaptiveAvgPool1dStaticEvenMultiple_basic",
"AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic",
"AvgPool1dFloatModule_basic",
"AvgPool1dIntModule_basic",
"AvgPool1dStaticModule_basic",
"AvgPool2dCeilModeTrueModule_basic",
"AvgPool2dDivisorOverrideModule_basic",
"AvgPool2dFloatModule_basic",
"AvgPool2dIntModule_basic",
"AvgPool2dStaticModule_basic",

# Failure - onnx_lowering: onnx.Cast
"BucketizeTensorOutInt32RightModule_basic",
Expand Down

0 comments on commit 4fceed9

Please sign in to comment.