From d9574cd2884945c5d7d2eb64ce8197baef00023f Mon Sep 17 00:00:00 2001 From: Matthias Gehre Date: Tue, 17 Dec 2024 00:20:54 +0100 Subject: [PATCH 1/2] ci.yml: Really disable fail-fast --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b64a7f31bac2..694a1e49d2f9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: build-test-linux: strategy: # AMD: Disable fail-fast to see whether failures are different between stable & nightly - # fail-fast: true + fail-fast: false matrix: torch-version: [nightly, stable] name: Build and Test (Linux, torch-${{ matrix.torch-version }}, assertions) From bf6d39e4fdf5906859bc0233089626864aefb395 Mon Sep 17 00:00:00 2001 From: Matthias Gehre Date: Tue, 17 Dec 2024 00:28:00 +0100 Subject: [PATCH 2/2] Fix xfail on feature branch Tests started failing due to logical merge conflict --- projects/pt1/e2e_testing/xfail_sets.py | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index faea02d78352..694dbe8a3746 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -36,13 +36,6 @@ "PowIntIntModule_basic", } -if torch_version_for_comparison() < version.parse("2.5.0.dev"): - # AttributeError: '_OpNamespace' 'aten' object has no attribute '_safe_softmax' - LINALG_XFAIL_SET = LINALG_XFAIL_SET | { - "SafeSoftmaxModule_basic", - "SafeSoftmaxNonNoneDtypeModule_basic", - } - if torch_version_for_comparison() < version.parse("2.5.0.dev"): LINALG_XFAIL_SET = LINALG_XFAIL_SET | { # Error: 'torch.aten.scaled_dot_product_attention' op expected 8 operands, but found 7 @@ -2390,6 +2383,9 @@ TOSA_PASS_SET | { ### Tests additionally passing in make_fx_tosa + "AdaptiveAvgPool1dStaticEvenMultiple_basic", + "ScaledDotProductAttentionBoolMaskModule_basic", + "ScaledDotProductAttentionDifferentDynamicCausalModule_basic", "ArgminIntModule_basic", "ArgminIntModule_multiple_mins", "ArgminModule_basic", @@ -2444,7 +2440,6 @@ "MaxPool1dStaticModule_basic", "AdaptiveAvgPool1dNonUnitOutputSizeStaticModule_basic", "AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic", - "AdaptiveAvgPool1dStaticEvenMultiple_basic", "CosineSimilarityModule_basic", "NativeGroupNormBackwardModule_basic", "ReduceFrobeniusNormKeepDimModule_basic", @@ -2452,6 +2447,7 @@ "SliceWholeTensorModule_basic", "TensorFloatModule_basic", "TensorIntModule_basic", + "RepeatInterleaveSelfIntModule_basic", "AdaptiveAvgPool1dNonUnitOutputSizeStaticModule_basic", "AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic", "TorchPrimLoopForLikeTensorArgModule_basic", @@ -2469,14 +2465,12 @@ "NormalizeModule_basic", "ReduceFrobeniusNormKeepDimModule_basic", "ReduceFrobeniusNormModule_basic", - "ScaledDotProductAttentionBoolMaskModule_basic", "SliceEndSleStartStaticModule_basic", "ViewSizeDimFollowedByCollapsedOnesModule_basic", "ViewSizeDimFollowedByExpandedOnesModule_basic", "ViewSizeDimLedAndFollowedByCollapsedOnesModule_basic", "ViewSizeDimLedByCollapsedOnesModule_basic", "ViewSizeFromOtherTensor_basic", - "RepeatInterleaveSelfIntModule_basic", "RenormModuleFloat32NegativeDim_basic", "RenormModuleFloat32_basic", } @@ -2484,6 +2478,9 @@ ### Test failing in make_fx_tosa but not in tosa # Dynamic shape, has extra unsupported broadcast ops "Matmul_3d", + # Unimplemented operator 'aten._index_put_impl_.hacked_twin' + "IndexPutImpl1DFloatNonAccumulateModule_basic", + "IndexPutImpl1DIntNonAccumulateModule_basic", # RuntimeError: The size of tensor a (7) must match the size of tensor b (3) at non-singleton dimension 1 "Add_Module_basic", "Conv2dBiasNoPaddingModule_basic", @@ -2494,8 +2491,6 @@ "ElementwisePreluModule_basic", "ElementwisePreluStaticModule_basic", "ElementwiseLogSigmoidModule_basic", - "IndexPutImpl1DFloatNonAccumulateModule_basic", - "IndexPutImpl1DIntNonAccumulateModule_basic", # It appears that you're trying to get value out of a tracing tensor # failed to legalize operation 'torch.aten.rrelu_with_noise' "ElementwiseRreluEvalModule_basic", @@ -2524,15 +2519,11 @@ MAKE_FX_TOSA_PASS_SET = MAKE_FX_TOSA_PASS_SET | { "ScaledDotProductAttentionBoolMaskModule_basic", "ScaledDotProductAttentionDifferentModule_basic", - "ScaledDotProductAttentionDifferentDynamicCausalModule_basic", "ScaledDotProductAttentionMaskModule_basic", "ScaledDotProductAttentionSameModule_basic", } if torch_version_for_comparison() > version.parse("2.6.0.dev"): - MAKE_FX_TOSA_PASS_SET = MAKE_FX_TOSA_PASS_SET | { - "ScaledDotProductAttentionDifferentDynamicCausalModule_basic", - } MAKE_FX_TOSA_PASS_SET = MAKE_FX_TOSA_PASS_SET - { "ChunkListUnpackUneven_Module_basic", "ChunkListUnpack_Module_basic",