From 7371c8df2b2e1c72fb79bc67a95f8be1b9b028f3 Mon Sep 17 00:00:00 2001 From: "Rickert, Jonas" Date: Tue, 7 Jan 2025 16:02:44 +0000 Subject: [PATCH] Fix typos --- src/Dialect/ONNX/Transforms/Decompose.cpp | 6 +++--- test/mlir/onnx/onnx_decompose.mlir | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Dialect/ONNX/Transforms/Decompose.cpp b/src/Dialect/ONNX/Transforms/Decompose.cpp index 4cb6ba8a92..38a0576f48 100644 --- a/src/Dialect/ONNX/Transforms/Decompose.cpp +++ b/src/Dialect/ONNX/Transforms/Decompose.cpp @@ -856,7 +856,7 @@ class IndicesContiguousCounter { } // namespace // Decomposes ScatterNDs into a single Split and Concat. -// We can always split an ScatterNDs by splitting the input tensor together with +// We can always split ScatterNDs by splitting the input tensor together with // the indices and their updates belonging to that part of the input tensor, // performing the ScatterNDs on each split, and the concatenating the result. // Here, we handle certain ScatterNDs where after splitting them into three, @@ -998,8 +998,8 @@ struct DecomposeScatterNDPattern : public OpRewritePattern { // -- The expected index is calculated the following way: // --- The expected index is initialized with the first index in indices and // then always incremented by one. - // --- The increment works like an manual addition, the least significant - // digit/subindex gets incremented by one. If an digit overflows, it + // --- The increment works like a manual addition, the least significant + // digit/subindex gets incremented by one. If a digit overflows, it // gets reset to the first index and the addition carries to the next, // more significant digit. The addition overflows, if the index for an // axis is equal to the size of this axis in updates/indices. (By diff --git a/test/mlir/onnx/onnx_decompose.mlir b/test/mlir/onnx/onnx_decompose.mlir index 9bd8b3ffe4..b6ec4282e7 100644 --- a/test/mlir/onnx/onnx_decompose.mlir +++ b/test/mlir/onnx/onnx_decompose.mlir @@ -1148,21 +1148,21 @@ func.func @test_scatter_nd_dynamic(%data : tensor<*xf32>, %updates : tensor<1x1x // CHECK: onnx.ScatterND // ----- -func.func @test_scatter_nd_mulit_dim_differ(%data : tensor<2x6x10x12xf32>, %updates : tensor<1x1x10x12xf32> ) -> tensor<2x6x10x12xf32> { +func.func @test_scatter_nd_multi_dim_differ(%data : tensor<2x6x10x12xf32>, %updates : tensor<1x1x10x12xf32> ) -> tensor<2x6x10x12xf32> { %indices = onnx.Constant dense<[[[[0, 1, 0], [0, 1, 1], [0, 1, 2], [0, 1, 3], [0, 1, 4], [0, 1, 5], [0, 1, 6], [0, 1, 7], [0, 1, 8], [0, 1, 9]]]]> : tensor<1x1x10x3xi64> %0 = "onnx.ScatterND"(%data, %indices, %updates) {reduction = "none"} : (tensor<2x6x10x12xf32>, tensor<1x1x10x3xi64>, tensor<1x1x10x12xf32>) -> tensor<2x6x10x12xf32> onnx.Return %0 : tensor<2x6x10x12xf32> } -// CHECK-LABEL: func.func @test_scatter_nd_mulit_dim_differ +// CHECK-LABEL: func.func @test_scatter_nd_multi_dim_differ // CHECK: onnx.ScatterND // ----- -func.func @test_scatter_nd_mulit_dim_differ_multi_shift(%data : tensor<2x6x10x12xf32>, %updates : tensor<1x1x10x12xf32> ) -> tensor<2x6x10x12xf32> { +func.func @test_scatter_nd_multi_dim_differ_multi_shift(%data : tensor<2x6x10x12xf32>, %updates : tensor<1x1x10x12xf32> ) -> tensor<2x6x10x12xf32> { %indices = onnx.Constant dense<[[[[1, 1, 0], [1, 1, 1], [1, 1, 2], [1, 1, 3], [1, 1, 4], [1, 1, 5], [1, 1, 6], [1, 1, 7], [1, 1, 8], [1, 1, 9]]]]> : tensor<1x1x10x3xi64> %0 = "onnx.ScatterND"(%data, %indices, %updates) {reduction = "none"} : (tensor<2x6x10x12xf32>, tensor<1x1x10x3xi64>, tensor<1x1x10x12xf32>) -> tensor<2x6x10x12xf32> onnx.Return %0 : tensor<2x6x10x12xf32> } -// CHECK-LABEL: func.func @test_scatter_nd_mulit_dim_differ_multi_shift +// CHECK-LABEL: func.func @test_scatter_nd_multi_dim_differ_multi_shift // CHECK: onnx.ScatterND // -----