From b66f3205367e4660e3619f25dff1b42f16701d02 Mon Sep 17 00:00:00 2001 From: Yi LIU Date: Mon, 16 Feb 2026 23:40:23 +0800 Subject: [PATCH] Fix Precompute visitArrayGet missing memory ordering check visitStructGet correctly checks MemoryOrder before allowing precomputation of immutable fields (blocking SeqCst and shared AcqRel), but visitArrayGet skips this check entirely. This causes array.atomic.get with SeqCst or AcqRel ordering on immutable array elements to be incorrectly folded to constants, removing synchronization semantics. Add the same MemoryOrder switch to visitArrayGet matching the pattern in visitStructGet, and extend the existing precompute-gc-atomics test with array versions of all struct atomic get test cases. --- src/passes/Precompute.cpp | 29 +++++-- test/lit/passes/precompute-gc-atomics.wast | 99 ++++++++++++++++++++++ 2 files changed, 122 insertions(+), 6 deletions(-) diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp index 02cf27a26b6..274689c0167 100644 --- a/src/passes/Precompute.cpp +++ b/src/passes/Precompute.cpp @@ -185,12 +185,29 @@ class PrecomputingExpressionRunner } Flow visitArraySet(ArraySet* curr) { return Flow(NONCONSTANT_FLOW); } Flow visitArrayGet(ArrayGet* curr) { - if (curr->ref->type != Type::unreachable && !curr->ref->type.isNull()) { - // See above with struct.get - auto element = curr->ref->type.getHeapType().getArray().element; - if (element.mutable_ == Immutable) { - return Super::visitArrayGet(curr); - } + if (curr->ref->type == Type::unreachable || curr->ref->type.isNull()) { + return Flow(NONCONSTANT_FLOW); + } + switch (curr->order) { + case MemoryOrder::Unordered: + // This can always be precomputed. + break; + case MemoryOrder::SeqCst: + // This can never be precomputed away because it synchronizes with other + // threads. + return Flow(NONCONSTANT_FLOW); + case MemoryOrder::AcqRel: + // This synchronizes only with writes to the same data, so it can still + // be precomputed if the data is not shared with other threads. + if (curr->ref->type.getHeapType().isShared()) { + return Flow(NONCONSTANT_FLOW); + } + break; + } + // See above with struct.get + auto element = curr->ref->type.getHeapType().getArray().element; + if (element.mutable_ == Immutable) { + return Super::visitArrayGet(curr); } // Otherwise, we've failed to precompute. diff --git a/test/lit/passes/precompute-gc-atomics.wast b/test/lit/passes/precompute-gc-atomics.wast index 1f2d07753b4..37e1fd4ad3a 100644 --- a/test/lit/passes/precompute-gc-atomics.wast +++ b/test/lit/passes/precompute-gc-atomics.wast @@ -5,9 +5,13 @@ (module ;; CHECK: (type $shared (shared (struct (field i32)))) (type $shared (shared (struct (field i32)))) + ;; CHECK: (type $shared-array (shared (array i32))) + ;; CHECK: (type $unshared (struct (field i32))) (type $unshared (struct (field i32))) + ;; CHECK: (type $unshared-array (array i32)) + ;; CHECK: (func $get-unordered-unshared (type $0) (result i32) ;; CHECK-NEXT: (i32.const 0) ;; CHECK-NEXT: ) @@ -69,4 +73,99 @@ (struct.new_default $shared) ) ) + + ;; Array versions of the same tests. These should behave the same as the + ;; struct versions above. + + (type $shared-array (shared (array i32))) + (type $unshared-array (array i32)) + + ;; CHECK: (func $array-get-unordered-unshared (type $0) (result i32) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + (func $array-get-unordered-unshared (result i32) + (array.get $unshared-array + (array.new_default $unshared-array + (i32.const 1) + ) + (i32.const 0) + ) + ) + + ;; CHECK: (func $array-get-unordered-shared (type $0) (result i32) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + (func $array-get-unordered-shared (result i32) + (array.get $shared-array + (array.new_default $shared-array + (i32.const 1) + ) + (i32.const 0) + ) + ) + + ;; CHECK: (func $array-get-seqcst-unshared (type $0) (result i32) + ;; CHECK-NEXT: (array.atomic.get $unshared-array + ;; CHECK-NEXT: (array.new_default $unshared-array + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $array-get-seqcst-unshared (result i32) + (array.atomic.get seqcst $unshared-array + (array.new_default $unshared-array + (i32.const 1) + ) + (i32.const 0) + ) + ) + + ;; CHECK: (func $array-get-seqcst-shared (type $0) (result i32) + ;; CHECK-NEXT: (array.atomic.get $shared-array + ;; CHECK-NEXT: (array.new_default $shared-array + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $array-get-seqcst-shared (result i32) + (array.atomic.get seqcst $shared-array + (array.new_default $shared-array + (i32.const 1) + ) + (i32.const 0) + ) + ) + + ;; CHECK: (func $array-get-acqrel-unshared (type $0) (result i32) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + (func $array-get-acqrel-unshared (result i32) + ;; We can optimize this because acquire-release on unshared data does not + ;; synchronize with anything. + (array.atomic.get acqrel $unshared-array + (array.new_default $unshared-array + (i32.const 1) + ) + (i32.const 0) + ) + ) + + ;; CHECK: (func $array-get-acqrel-shared (type $0) (result i32) + ;; CHECK-NEXT: (array.atomic.get acqrel $shared-array + ;; CHECK-NEXT: (array.new_default $shared-array + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $array-get-acqrel-shared (result i32) + (array.atomic.get acqrel $shared-array + (array.new_default $shared-array + (i32.const 1) + ) + (i32.const 0) + ) + ) )