From f12ac4c11bdb879bf0c21b813d1eee1a523ae7a5 Mon Sep 17 00:00:00 2001 From: Erik Eckstein Date: Wed, 3 Feb 2021 15:46:32 +0100 Subject: [PATCH 1/4] SIL: add two utility functions to AccessPath --- include/swift/SIL/MemAccessUtils.h | 10 ++++++++++ lib/SIL/Utils/MemAccessUtils.cpp | 7 +++++++ 2 files changed, 17 insertions(+) diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index d5f504e57b310..111238a2f5bbd 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -821,6 +821,10 @@ class AccessPath { /// access, thus not within an access scope. static AccessPath computeInScope(SILValue address); + /// Creates an AccessPass, which identifies the first tail-element of the + /// object \p rootReference. + static AccessPath forTailStorage(SILValue rootReference); + // Encode a dynamic index_addr as an UnknownOffset. static constexpr int UnknownOffset = std::numeric_limits::min() >> 1; @@ -984,6 +988,12 @@ class AccessPath { SILFunction *function, unsigned useLimit = std::numeric_limits::max()) const; + /// Returns a new AccessPass, identical to this AccessPath, except that the + /// offset is replaced with \p newOffset. + AccessPath withOffset(int newOffset) const { + return AccessPath(storage, pathNode, newOffset); + } + void printPath(raw_ostream &os) const; void print(raw_ostream &os) const; void dump() const; diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 775b6cdcff351..7cf4d16755f32 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -763,6 +763,13 @@ AccessedStorage AccessedStorage::computeInScope(SILValue sourceAddress) { // MARK: AccessPath //===----------------------------------------------------------------------===// +AccessPath AccessPath::forTailStorage(SILValue rootReference) { + return AccessPath( + AccessedStorage::forClass(rootReference, AccessedStorage::TailIndex), + PathNode(rootReference->getModule()->getIndexTrieRoot()), + /*offset*/ 0); +} + bool AccessPath::contains(AccessPath subPath) const { if (!isValid() || !subPath.isValid()) { return false; From 34ae1d911a6171ceecf580ed044d52c2d29fe2d2 Mon Sep 17 00:00:00 2001 From: Erik Eckstein Date: Wed, 3 Feb 2021 16:03:26 +0100 Subject: [PATCH 2/4] SILCombine: remove release_value of a value_to_bridge_object Needed to remove unneeded reference counting for String literals. --- .../SILCombiner/SILCombinerMiscVisitors.cpp | 27 ++++++++++++++++- test/SILOptimizer/sil_combine.sil | 30 +++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp index 41b439391cb4a..daa175a4a31e3 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp @@ -997,6 +997,31 @@ SILInstruction *SILCombiner::visitIndexAddrInst(IndexAddrInst *IA) { return Builder.createIndexAddr(IA->getLoc(), base2, newIndex); } +/// Walks over all fields of an aggregate and checks if a reference count +/// operation for \p value is required. This differs from a simple `isTrivial` +/// check, because it treats a value_to_bridge_object instruction as "trivial". +static bool isTrivial(SILValue value, SILFunction *function) { + SmallVector workList; + SmallPtrSet visited; + workList.push_back(value); + while (!workList.empty()) { + SILValue v = workList.pop_back_val(); + if (v->getType().isTrivial(*function)) + continue; + if (isa(v)) + continue; + if (isa(v) || isa(v)) { + for (SILValue op : cast(v)->getOperandValues()) { + if (visited.insert(op).second) + workList.push_back(op); + } + continue; + } + return false; + } + return true; +} + SILInstruction *SILCombiner::visitReleaseValueInst(ReleaseValueInst *RVI) { assert(!RVI->getFunction()->hasOwnership()); @@ -1031,7 +1056,7 @@ SILInstruction *SILCombiner::visitReleaseValueInst(ReleaseValueInst *RVI) { RVI->getAtomicity()); // ReleaseValueInst of a trivial type is a no-op. - if (OperandTy.isTrivial(*RVI->getFunction())) + if (isTrivial(Operand, RVI->getFunction())) return eraseInstFromFunction(*RVI); // Do nothing for non-trivial non-reference types. diff --git a/test/SILOptimizer/sil_combine.sil b/test/SILOptimizer/sil_combine.sil index 4bdf761703b61..fa821eaa276c6 100644 --- a/test/SILOptimizer/sil_combine.sil +++ b/test/SILOptimizer/sil_combine.sil @@ -3825,6 +3825,36 @@ bb0(%0 : $Builtin.Int64): return %2 : $Builtin.BridgeObject } +struct MyStringObj { + @_hasStorage var a: UInt64 + @_hasStorage var b: Builtin.BridgeObject +} + +struct MyStringGuts { + @_hasStorage var o: MyStringObj +} + +struct MyString { + @_hasStorage var g: MyStringGuts +} + +// CHECK-LABEL: sil @optimize_arc_with_value_to_bridge_object2 +// CHECK: bb0(%0 : $UInt64): +// CHECK-NEXT: tuple +// CHECK-NEXT: return +// CHECK: } // end sil function 'optimize_arc_with_value_to_bridge_object2' +sil @optimize_arc_with_value_to_bridge_object2 : $@convention(thin) (UInt64) -> () { +bb0(%0 : $UInt64): + %1 = integer_literal $Builtin.Int64, -1945555039024054272 + %2 = value_to_bridge_object %1 : $Builtin.Int64 + %3 = struct $MyStringObj (%0 : $UInt64, %2 : $Builtin.BridgeObject) + %4 = struct $MyStringGuts (%3 : $MyStringObj) + %5 = struct $MyString (%4 : $MyStringGuts) + release_value %5 : $MyString + %7 = tuple () + return %7 : $() +} + // CHECK-LABEL: sil @optimize_stringObject_bit_operations // CHECK: bb0: // CHECK-NEXT: %0 = integer_literal $Builtin.Int64, 4611686018427387904 From 028afc0fb20f653b690556497aef316baaa59ff3 Mon Sep 17 00:00:00 2001 From: Erik Eckstein Date: Wed, 3 Feb 2021 16:06:42 +0100 Subject: [PATCH 3/4] DeadObjectElimination: delete dead arrays for which the "destroyArray" builtin is inlined. This is needed for non-OSSA SIL: in case we see a destroyArray builtin, we can safely remove the otherwise dead array. This optimization kicks in later in the pipeline, after array semantics are already inlined (for early SIL, DeadObjectElimination can remove arrays based on semantics). rdar://73569282 https://bugs.swift.org/browse/SR-14100 --- .../Transforms/DeadObjectElimination.cpp | 124 ++++++++++- test/SILOptimizer/dead_alloc_elim.sil | 199 ++++++++++++++++++ test/SILOptimizer/dead_array_elim.swift | 23 ++ 3 files changed, 338 insertions(+), 8 deletions(-) diff --git a/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp b/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp index 49cdaf95715eb..1819899b80068 100644 --- a/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp +++ b/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp @@ -36,6 +36,7 @@ #include "swift/SIL/SILInstruction.h" #include "swift/SIL/SILModule.h" #include "swift/SIL/SILUndef.h" +#include "swift/SIL/MemAccessUtils.h" #include "swift/SILOptimizer/Analysis/ArraySemantic.h" #include "swift/SILOptimizer/PassManager/Passes.h" #include "swift/SILOptimizer/PassManager/Transforms.h" @@ -248,18 +249,112 @@ static bool canZapInstruction(SILInstruction *Inst, bool acceptRefCountInsts, return false; } +/// Returns true if all stores in \p users store to the tail elements of +/// \p allocRef, which are destroyed by the \p destroyArray builtin. +static bool onlyStoresToTailObjects(BuiltinInst *destroyArray, + const UserList &users, + AllocRefInst *allocRef) { + // Get the number of destroyed elements. + auto *literal = dyn_cast(destroyArray->getArguments()[2]); + if (!literal || literal->getValue().getMinSignedBits() > 32) + return false; + int numDestroyed = literal->getValue().getSExtValue(); + + SILFunction *func = destroyArray->getFunction(); + SILBasicBlock *storesBlock = nullptr; + + // Check if the destroyArray destroys the tail elements of allocRef. + auto destroyPath = AccessPath::compute(destroyArray->getArguments()[1]); + if (destroyPath != AccessPath::forTailStorage(allocRef)) + return false; + + SmallVector pathsToCheck; + + // Check all stores to the tail elements. + for (SILInstruction *user : users) { + auto *store = dyn_cast(user); + if (!store) + continue; + + assert(users.count(store->getSrc()->getDefiningInstruction()) == 0 && + "Storing a use of an array (that would mean the array escapes)?"); + + // All stores must be in the same block. This ensure that the stores + // dominate the destroyArray (which may be in a different block). + if (storesBlock && store->getParent() != storesBlock) + return false; + storesBlock = store->getParent(); + + AccessPath storePath = AccessPath::compute(store->getDest()); + if (!storePath.isValid()) + return false; + + // We don't care about trivial stores. + if (store->getSrc()->getType().isTrivial(*func)) + continue; + + // Check if it's a store to the tail elements. + if (!destroyPath.contains(storePath.withOffset(0))) + return false; + + // Check if the store is within the range of the destroyed array. In OSSA + // we would not need this check. Otherwise it would be a memory lifetime + // failure. + if (storePath.getOffset() < 0 || storePath.getOffset() >= numDestroyed) + return false; + + pathsToCheck.push_back(storePath); + } + + // In non-OSSA we have to check if two paths overlap, because we could end up + // over-releasing the stored objects. + // Group the paths by tail-element index, so that we only have to check within + // a tail-element group. + std::sort(pathsToCheck.begin(), pathsToCheck.end(), [](AccessPath p1, AccessPath p2) { + return p1.getOffset() < p2.getOffset(); + }); + for (unsigned i = 0, n = pathsToCheck.size(); i < n; ++i) { + for (unsigned j = i + 1; + j < n && pathsToCheck[i].getOffset() == pathsToCheck[j].getOffset(); ++j) { + if (pathsToCheck[i].mayOverlap(pathsToCheck[j])) + return false; + // Limit the number of checks to avoid quadratic complexity. + if (j > i + 8) + return false; + } + } + return true; +} + +static bool isDestroyArray(SILInstruction *inst) { + BuiltinInst *bi = dyn_cast(inst); + return bi && bi->getBuiltinInfo().ID == BuiltinValueKind::DestroyArray; +} + +/// Inserts releases of all stores in \p users. +static void insertCompensatingReleases(SILInstruction *before, + const UserList &users) { + for (SILInstruction *user : users) { + if (auto *store = dyn_cast(user)) { + createDecrementBefore(store->getSrc(), before); + } + } +} + /// Analyze the use graph of AllocRef for any uses that would prevent us from /// zapping it completely. static bool -hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users, +hasUnremovableUsers(SILInstruction *allocation, UserList *Users, bool acceptRefCountInsts, bool onlyAcceptTrivialStores) { SmallVector Worklist; - Worklist.push_back(AllocRef); + Worklist.push_back(allocation); LLVM_DEBUG(llvm::dbgs() << " Analyzing Use Graph."); SmallVector refElementAddrs; bool deallocationMaybeInlined = false; + BuiltinInst *destroyArray = nullptr; + AllocRefInst *allocRef = dyn_cast(allocation); while (!Worklist.empty()) { SILInstruction *I = Worklist.pop_back_val(); @@ -273,17 +368,19 @@ hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users, continue; } - // If we can't zap this instruction... bail... - if (!canZapInstruction(I, acceptRefCountInsts, onlyAcceptTrivialStores)) { - LLVM_DEBUG(llvm::dbgs() << " Found instruction we can't zap...\n"); - return true; - } - if (auto *rea = dyn_cast(I)) { if (!rea->getType().isTrivial(*rea->getFunction())) refElementAddrs.push_back(rea); } else if (isa(I)) { deallocationMaybeInlined = true; + } else if (allocRef && Users && isDestroyArray(I)) { + if (destroyArray) + return true; + destroyArray = cast(I); + } else if (!canZapInstruction(I, acceptRefCountInsts, + onlyAcceptTrivialStores)) { + LLVM_DEBUG(llvm::dbgs() << " Found instruction we can't zap...\n"); + return true; } // At this point, we can remove the instruction as long as all of its users @@ -309,6 +406,12 @@ hasUnremovableUsers(SILInstruction *AllocRef, UserList *Users, } } + // In OSSA, we don't have to do this check. We can always accept a + // destroyArray and insert the compensating destroys right at the store + // instructions. + if (destroyArray) + return !onlyStoresToTailObjects(destroyArray, *Users, allocRef); + if (deallocationMaybeInlined) { // The alloc_ref is not destructed by a strong_release which is calling the // deallocator (destroying all stored properties). @@ -767,6 +870,11 @@ bool DeadObjectElimination::processAllocRef(AllocRefInst *ARI) { return false; } + auto iter = std::find_if(UsersToRemove.begin(), UsersToRemove.end(), + isDestroyArray); + if (iter != UsersToRemove.end()) + insertCompensatingReleases(*iter, UsersToRemove); + // Remove the AllocRef and all of its users. removeInstructions( ArrayRef(UsersToRemove.begin(), UsersToRemove.end())); diff --git a/test/SILOptimizer/dead_alloc_elim.sil b/test/SILOptimizer/dead_alloc_elim.sil index 7497ba996c7ee..d0a38e853b8a0 100644 --- a/test/SILOptimizer/dead_alloc_elim.sil +++ b/test/SILOptimizer/dead_alloc_elim.sil @@ -2,6 +2,7 @@ import Swift import Builtin +import SwiftShims ////////// // Data // @@ -22,6 +23,11 @@ class NontrivialDestructor { init() } +class ArrayStorage { + @_hasStorage var bodyField : Kl + init() +} + sil @$s4main17TrivialDestructorCfD : $@convention(method) (@owned TrivialDestructor) -> () { bb0(%0 : $TrivialDestructor): // Alloc/Dealloc stack should not disrupt elimination of the @@ -445,3 +451,196 @@ bb0(%0 : $X): return %10 : $() } +// CHECK-LABEL: sil @remove_dead_array_with_destroy_simple +// CHECK-NOT: alloc_ref +// CHECK-NOT: dealloc_ref +// CHECK: strong_release %0 : $Kl +// CHECK-NEXT: strong_release %0 : $Kl +// CHECK-NEXT: tuple +// CHECK-NEXT: return +// CHECK: } // end sil function 'remove_dead_array_with_destroy_simple' +sil @remove_dead_array_with_destroy_simple : $@convention(thin) (@owned Kl) -> () { +bb0(%0 : $Kl): + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [tail_elems $Kl * %3 : $Builtin.Word] $ArrayStorage + %11 = ref_tail_addr %4 : $ArrayStorage, $Kl + store %0 to %11 : $*Kl + %27 = integer_literal $Builtin.Word, 1 + %28 = index_addr %11 : $*Kl, %27 : $Builtin.Word + retain_value %0 : $Kl + store %0 to %28 : $*Kl + set_deallocating %4 : $ArrayStorage + %65 = address_to_pointer %11 : $*Kl to $Builtin.RawPointer + %66 = metatype $@thick Kl.Type + %67 = builtin "destroyArray"(%66 : $@thick Kl.Type, %65 : $Builtin.RawPointer, %3 : $Builtin.Word) : $() + dealloc_ref %4 : $ArrayStorage + %10 = tuple () + return %10 : $() +} + +// CHECK-LABEL: sil @remove_dead_array_with_destroy_complex +// CHECK-NOT: alloc_ref +// CHECK-NOT: dealloc_ref +// CHECK: release_value %0 : $String +// CHECK-NEXT: release_value %0 : $String +// CHECK-NEXT: tuple +// CHECK-NEXT: return +// CHECK: } // end sil function 'remove_dead_array_with_destroy_complex' +sil @remove_dead_array_with_destroy_complex : $@convention(thin) (@guaranteed String, Int, UInt) -> () { +bb0(%0 : $String, %1 : $Int, %2 : $UInt): + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [stack] [tail_elems $(Int, String) * %3 : $Builtin.Word] $_ContiguousArrayStorage<(Int, String)> + %5 = upcast %4 : $_ContiguousArrayStorage<(Int, String)> to $__ContiguousArrayStorageBase + %6 = struct $_SwiftArrayBodyStorage (%1 : $Int, %2 : $UInt) + %7 = struct $_ArrayBody (%6 : $_SwiftArrayBodyStorage) + %9 = ref_element_addr %5 : $__ContiguousArrayStorageBase, #__ContiguousArrayStorageBase.countAndCapacity + store %7 to %9 : $*_ArrayBody + %11 = ref_tail_addr %5 : $__ContiguousArrayStorageBase, $(Int, String) + %12 = tuple_element_addr %11 : $*(Int, String), 0 + %13 = tuple_element_addr %11 : $*(Int, String), 1 + retain_value %0 : $String + store %1 to %12 : $*Int + store %0 to %13 : $*String + %27 = integer_literal $Builtin.Word, 1 + %28 = index_addr %11 : $*(Int, String), %27 : $Builtin.Word + %29 = tuple_element_addr %28 : $*(Int, String), 0 + %30 = tuple_element_addr %28 : $*(Int, String), 1 + retain_value %0 : $String + store %1 to %29 : $*Int + store %0 to %30 : $*String + set_deallocating %4 : $_ContiguousArrayStorage<(Int, String)> + %64 = ref_tail_addr %4 : $_ContiguousArrayStorage<(Int, String)>, $(Int, String) + %65 = address_to_pointer %64 : $*(Int, String) to $Builtin.RawPointer + %66 = metatype $@thick (Int, String).Type + %67 = builtin "destroyArray"<(Int, String)>(%66 : $@thick (Int, String).Type, %65 : $Builtin.RawPointer, %3 : $Builtin.Word) : $() + fix_lifetime %4 : $_ContiguousArrayStorage<(Int, String)> + dealloc_ref %4 : $_ContiguousArrayStorage<(Int, String)> + dealloc_ref [stack] %4 : $_ContiguousArrayStorage<(Int, String)> + %10 = tuple () + return %10 : $() +} + +// CHECK-LABEL: sil @dont_remove_dead_array_overlapping_stores +// CHECK: alloc_ref +// CHECK: dealloc_ref +// CHECK: } // end sil function 'dont_remove_dead_array_overlapping_stores' +sil @dont_remove_dead_array_overlapping_stores : $@convention(thin) (@owned Kl) -> () { +bb0(%0 : $Kl): + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [tail_elems $Kl * %3 : $Builtin.Word] $ArrayStorage + %11 = ref_tail_addr %4 : $ArrayStorage, $Kl + %27 = integer_literal $Builtin.Word, 1 + %28 = index_addr %11 : $*Kl, %27 : $Builtin.Word + retain_value %0 : $Kl + store %0 to %28 : $*Kl + store %0 to %11 : $*Kl + %30 = index_addr %11 : $*Kl, %27 : $Builtin.Word + retain_value %0 : $Kl + store %0 to %30 : $*Kl // Overwrites the first store + set_deallocating %4 : $ArrayStorage + %65 = address_to_pointer %11 : $*Kl to $Builtin.RawPointer + %66 = metatype $@thick Kl.Type + %67 = builtin "destroyArray"(%66 : $@thick Kl.Type, %65 : $Builtin.RawPointer, %3 : $Builtin.Word) : $() + dealloc_ref %4 : $ArrayStorage + %10 = tuple () + return %10 : $() +} + +// CHECK-LABEL: sil @dont_remove_dead_array_store_to_body +// CHECK: alloc_ref +// CHECK: dealloc_ref +// CHECK: } // end sil function 'dont_remove_dead_array_store_to_body' +sil @dont_remove_dead_array_store_to_body : $@convention(thin) (@owned Kl) -> () { +bb0(%0 : $Kl): + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [tail_elems $Kl * %3 : $Builtin.Word] $ArrayStorage + %5 = ref_element_addr %4 : $ArrayStorage, #ArrayStorage.bodyField + %11 = ref_tail_addr %4 : $ArrayStorage, $Kl + store %0 to %5 : $*Kl // Store non-trivial type to body of ArrayStorage + %27 = integer_literal $Builtin.Word, 1 + %28 = index_addr %11 : $*Kl, %27 : $Builtin.Word + retain_value %0 : $Kl + store %0 to %28 : $*Kl + set_deallocating %4 : $ArrayStorage + %65 = address_to_pointer %11 : $*Kl to $Builtin.RawPointer + %66 = metatype $@thick Kl.Type + %67 = builtin "destroyArray"(%66 : $@thick Kl.Type, %65 : $Builtin.RawPointer, %3 : $Builtin.Word) : $() + dealloc_ref %4 : $ArrayStorage + %10 = tuple () + return %10 : $() +} + +// CHECK-LABEL: sil @dont_remove_dead_array_out_of_bounds_store +// CHECK: alloc_ref +// CHECK: dealloc_ref +// CHECK: } // end sil function 'dont_remove_dead_array_out_of_bounds_store' +sil @dont_remove_dead_array_out_of_bounds_store: $@convention(thin) (@owned Kl) -> () { +bb0(%0 : $Kl): + %2 = integer_literal $Builtin.Word, 1 + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [tail_elems $Kl * %3 : $Builtin.Word] $ArrayStorage + %11 = ref_tail_addr %4 : $ArrayStorage, $Kl + store %0 to %11 : $*Kl + %28 = index_addr %11 : $*Kl, %2 : $Builtin.Word // out-of-bounds store + retain_value %0 : $Kl + store %0 to %28 : $*Kl + set_deallocating %4 : $ArrayStorage + %65 = address_to_pointer %11 : $*Kl to $Builtin.RawPointer + %66 = metatype $@thick Kl.Type + %67 = builtin "destroyArray"(%66 : $@thick Kl.Type, %65 : $Builtin.RawPointer, %2 : $Builtin.Word) : $() + dealloc_ref %4 : $ArrayStorage + %10 = tuple () + return %10 : $() +} + +// CHECK-LABEL: sil @dont_remove_dead_array_unknown_bounds +// CHECK: alloc_ref +// CHECK: dealloc_ref +// CHECK: } // end sil function 'dont_remove_dead_array_unknown_bounds' +sil @dont_remove_dead_array_unknown_bounds : $@convention(thin) (@owned Kl, Builtin.Word) -> () { +bb0(%0 : $Kl, %1 : $Builtin.Word): + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [tail_elems $Kl * %3 : $Builtin.Word] $ArrayStorage + %11 = ref_tail_addr %4 : $ArrayStorage, $Kl + store %0 to %11 : $*Kl + %27 = integer_literal $Builtin.Word, 1 + %28 = index_addr %11 : $*Kl, %27 : $Builtin.Word + retain_value %0 : $Kl + store %0 to %28 : $*Kl + set_deallocating %4 : $ArrayStorage + %65 = address_to_pointer %11 : $*Kl to $Builtin.RawPointer + %66 = metatype $@thick Kl.Type + %67 = builtin "destroyArray"(%66 : $@thick Kl.Type, %65 : $Builtin.RawPointer, %1 : $Builtin.Word) : $() + dealloc_ref %4 : $ArrayStorage + %10 = tuple () + return %10 : $() +} + +// CHECK-LABEL: sil @dont_remove_dead_array_wrong_blocks +// CHECK: alloc_ref +// CHECK: dealloc_ref +// CHECK: } // end sil function 'dont_remove_dead_array_wrong_blocks' +sil @dont_remove_dead_array_wrong_blocks : $@convention(thin) (@owned Kl) -> () { +bb0(%0 : $Kl): + %3 = integer_literal $Builtin.Word, 2 + %4 = alloc_ref [tail_elems $Kl * %3 : $Builtin.Word] $ArrayStorage + %11 = ref_tail_addr %4 : $ArrayStorage, $Kl + cond_br undef, bb1, bb2 +bb1: + store %0 to %11 : $*Kl + br bb3 +bb2: + %27 = integer_literal $Builtin.Word, 1 + %28 = index_addr %11 : $*Kl, %27 : $Builtin.Word + store %0 to %28 : $*Kl + br bb3 +bb3: + set_deallocating %4 : $ArrayStorage + %65 = address_to_pointer %11 : $*Kl to $Builtin.RawPointer + %66 = metatype $@thick Kl.Type + %67 = builtin "destroyArray"(%66 : $@thick Kl.Type, %65 : $Builtin.RawPointer, %3 : $Builtin.Word) : $() + dealloc_ref %4 : $ArrayStorage + %10 = tuple () + return %10 : $() +} + diff --git a/test/SILOptimizer/dead_array_elim.swift b/test/SILOptimizer/dead_array_elim.swift index 2a87d86c3d583..e67d25e82648e 100644 --- a/test/SILOptimizer/dead_array_elim.swift +++ b/test/SILOptimizer/dead_array_elim.swift @@ -51,3 +51,26 @@ func testDeadArrayElimWithFixLifetimeUse() { func testDeadArrayElimWithAddressOnlyValues(x: T, y: T) { _ = [x, y] } + +// CHECK-LABEL: sil hidden @$s15dead_array_elim31testDeadArrayAfterOptimizationsySiSSF +// CHECK: bb0(%0 : $String): +// CHECK-NEXT: debug_value {{.*}} name "stringParameter" +// CHECK-NEXT: integer_literal $Builtin.Int{{[0-9]+}}, 21 +// CHECK-NEXT: struct $Int +// CHECK-NEXT: return +// CHECK: } // end sil function '$s15dead_array_elim31testDeadArrayAfterOptimizationsySiSSF' +func testDeadArrayAfterOptimizations(_ stringParameter: String) -> Int { + var sum = 0 + for x in [(1, "hello"), + (2, "a larger string which does not fit into a small string"), + (3, stringParameter), + (4, "hello"), + (5, "hello"), + (6, "hello"), + ] { + sum += x.0 + } + return sum +} + + From 1655e22f625f37be053fd6aae13b089a0882133b Mon Sep 17 00:00:00 2001 From: Erik Eckstein Date: Wed, 3 Feb 2021 19:30:54 +0100 Subject: [PATCH 4/4] SIL: fix the == operator for AccessPath The offset was not compared. --- include/swift/SIL/MemAccessUtils.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index 111238a2f5bbd..c177802864781 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -943,7 +943,8 @@ class AccessPath { bool operator==(AccessPath other) const { return - storage.hasIdenticalBase(other.storage) && pathNode == other.pathNode; + storage.hasIdenticalBase(other.storage) && pathNode == other.pathNode && + offset == other.offset; } bool operator!=(AccessPath other) const { return !(*this == other); }