|
| 1 | +//===--- StackAllocator.h - A stack allocator -----------------------------===// |
| 2 | +// |
| 3 | +// This source file is part of the Swift.org open source project |
| 4 | +// |
| 5 | +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors |
| 6 | +// Licensed under Apache License v2.0 with Runtime Library Exception |
| 7 | +// |
| 8 | +// See https://swift.org/LICENSE.txt for license information |
| 9 | +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors |
| 10 | +// |
| 11 | +//===----------------------------------------------------------------------===// |
| 12 | +// |
| 13 | +// A bump-pointer allocator that obeys a stack discipline. |
| 14 | +// |
| 15 | +//===----------------------------------------------------------------------===// |
| 16 | + |
| 17 | +#include "swift/Runtime/Debug.h" |
| 18 | +#include "llvm/Support/Alignment.h" |
| 19 | +#include <cstddef> |
| 20 | + |
| 21 | +namespace swift { |
| 22 | + |
| 23 | +/// A bump-pointer allocator that obeys a stack discipline. |
| 24 | +/// |
| 25 | +/// StackAllocator performs fast allocation and deallocation of memory by |
| 26 | +/// implementing a bump-pointer allocation strategy. |
| 27 | +/// |
| 28 | +/// This isn't strictly a bump-pointer allocator as it uses backing slabs of |
| 29 | +/// memory rather than relying on a boundless contiguous heap. However, it has |
| 30 | +/// bump-pointer semantics in that it is a monotonically growing pool of memory |
| 31 | +/// where every allocation is found by merely allocating the next N bytes in |
| 32 | +/// the slab, or the next N bytes in the next slab. |
| 33 | +/// |
| 34 | +/// In contrast to a pure bump-pointer allocator, it's possible to free memory. |
| 35 | +/// Allocations and deallocations must follow a strict stack discipline. In |
| 36 | +/// general, slabs which become unused are _not_ freed, but reused for |
| 37 | +/// subsequent allocations. |
| 38 | +/// |
| 39 | +/// It's possible to place the first slab into pre-allocated memory. |
| 40 | +/// |
| 41 | +/// The SlabCapacity specifies the capacity for newly allocated slabs. |
| 42 | +template <size_t SlabCapacity> |
| 43 | +class StackAllocator { |
| 44 | +private: |
| 45 | + |
| 46 | + struct Allocation; |
| 47 | + struct Slab; |
| 48 | + |
| 49 | + /// The last active allocation. |
| 50 | + /// |
| 51 | + /// A deallocate() must free this allocation. |
| 52 | + Allocation *lastAllocation = nullptr; |
| 53 | + |
| 54 | + /// The first slab. |
| 55 | + Slab *firstSlab; |
| 56 | + |
| 57 | + /// Used for unit testing. |
| 58 | + int32_t numAllocatedSlabs = 0; |
| 59 | + |
| 60 | + /// True if the first slab is pre-allocated. |
| 61 | + bool firstSlabIsPreallocated; |
| 62 | + |
| 63 | + /// The minimal alignment of allocated memory. |
| 64 | + static constexpr size_t alignment = alignof(std::max_align_t); |
| 65 | + |
| 66 | + /// If set to true, memory allocations are checked for buffer overflows and |
| 67 | + /// use-after-free, similar to guard-malloc. |
| 68 | + static constexpr bool guardAllocations = |
| 69 | +#ifdef NDEBUG |
| 70 | + false; |
| 71 | +#else |
| 72 | + true; |
| 73 | +#endif |
| 74 | + |
| 75 | + static constexpr uintptr_t magicUninitialized = (uintptr_t)0xcdcdcdcdcdcdcdcdull; |
| 76 | + static constexpr uintptr_t magicEndOfAllocation = (uintptr_t)0xdeadbeafdeadbeafull; |
| 77 | + |
| 78 | + /// A memory slab holding multiple allocations. |
| 79 | + /// |
| 80 | + /// This struct is actually just the slab header. The slab buffer is tail |
| 81 | + /// allocated after Slab. |
| 82 | + struct Slab { |
| 83 | + /// A single linked list of all allocated slabs. |
| 84 | + Slab *next = nullptr; |
| 85 | + |
| 86 | + // Capacity and offset do not include these header fields. |
| 87 | + uint32_t capacity; |
| 88 | + uint32_t currentOffset = 0; |
| 89 | + |
| 90 | + // Here starts the tail allocated memory buffer of the slab. |
| 91 | + |
| 92 | + Slab(size_t newCapacity) : capacity(newCapacity) { |
| 93 | + assert((size_t)capacity == newCapacity && "capacity overflow"); |
| 94 | + } |
| 95 | + |
| 96 | + /// The size of the slab header. |
| 97 | + static size_t headerSize() { |
| 98 | + return llvm::alignTo(sizeof(Slab), llvm::Align(alignment)); |
| 99 | + } |
| 100 | + |
| 101 | + /// Return \p size with the added overhead of the slab header. |
| 102 | + static size_t includingHeader(size_t size) { |
| 103 | + return headerSize() + size; |
| 104 | + } |
| 105 | + |
| 106 | + /// Return the payload buffer address at \p atOffset. |
| 107 | + /// |
| 108 | + /// Note: it's valid to call this function on a not-yet-constructed slab. |
| 109 | + char *getAddr(size_t atOffset) { |
| 110 | + return (char *)this + headerSize() + atOffset; |
| 111 | + } |
| 112 | + |
| 113 | + /// Return true if this slab can fit an allocation of \p size. |
| 114 | + /// |
| 115 | + /// \p size does not include the allocation header, but must include the |
| 116 | + /// overhead for guardAllocations (if enabled). |
| 117 | + inline bool canAllocate(size_t size) const { |
| 118 | + return currentOffset + Allocation::includingHeader(size) <= capacity; |
| 119 | + } |
| 120 | + |
| 121 | + /// Return true, if no memory is allocated in this slab. |
| 122 | + bool isEmpty() const { return currentOffset == 0; } |
| 123 | + |
| 124 | + /// Allocate \p alignedSize of bytes in this slab. |
| 125 | + /// |
| 126 | + /// \p alignedSize does not include the allocation header, but must include |
| 127 | + /// the overhead for guardAllocations (if enabled). |
| 128 | + /// |
| 129 | + /// Precondition: \p alignedSize must be aligned up to |
| 130 | + /// StackAllocator::alignment. |
| 131 | + /// Precondition: there must be enough space in this slab to fit the |
| 132 | + /// allocation. |
| 133 | + Allocation *allocate(size_t alignedSize, Allocation *lastAllocation) { |
| 134 | + assert(llvm::isAligned(llvm::Align(alignment), alignedSize)); |
| 135 | + assert(canAllocate(alignedSize)); |
| 136 | + void *buffer = getAddr(currentOffset); |
| 137 | + auto *allocation = new (buffer) Allocation(lastAllocation, this); |
| 138 | + currentOffset += Allocation::includingHeader(alignedSize); |
| 139 | + if (guardAllocations) { |
| 140 | + uintptr_t *endOfCurrentAllocation = (uintptr_t *)getAddr(currentOffset); |
| 141 | + endOfCurrentAllocation[-1] = magicEndOfAllocation; |
| 142 | + } |
| 143 | + return allocation; |
| 144 | + } |
| 145 | + |
| 146 | + /// Deallocate \p allocation. |
| 147 | + /// |
| 148 | + /// Precondition: \p allocation must be an allocation in this slab. |
| 149 | + void deallocate(Allocation *allocation) { |
| 150 | + assert(allocation->slab == this); |
| 151 | + if (guardAllocations) { |
| 152 | + auto *endOfAllocation = (uintptr_t *)getAddr(currentOffset); |
| 153 | + if (endOfAllocation[-1] != magicEndOfAllocation) |
| 154 | + fatalError(0, "Buffer overflow in StackAllocator"); |
| 155 | + for (auto *p = (uintptr_t *)allocation; p < endOfAllocation; ++p) |
| 156 | + *p = magicUninitialized; |
| 157 | + } |
| 158 | + currentOffset = (char *)allocation - getAddr(0); |
| 159 | + } |
| 160 | + }; |
| 161 | + |
| 162 | + /// A single memory allocation. |
| 163 | + /// |
| 164 | + /// This struct is actually just the allocation header. The allocated |
| 165 | + /// memory buffer is located after Allocation. |
| 166 | + struct Allocation { |
| 167 | + /// A single linked list of previous allocations. |
| 168 | + Allocation *previous; |
| 169 | + /// The containing slab. |
| 170 | + Slab *slab; |
| 171 | + |
| 172 | + // Here starts the tail allocated memory. |
| 173 | + |
| 174 | + Allocation(Allocation *previous, Slab *slab) : |
| 175 | + previous(previous), slab(slab) {} |
| 176 | + |
| 177 | + void *getAllocatedMemory() { |
| 178 | + return (char *)this + headerSize(); |
| 179 | + } |
| 180 | + |
| 181 | + /// The size of the allocation header. |
| 182 | + static size_t headerSize() { |
| 183 | + return llvm::alignTo(sizeof(Allocation), llvm::Align(alignment)); |
| 184 | + } |
| 185 | + |
| 186 | + /// Return \p size with the added overhead of the allocation header. |
| 187 | + static size_t includingHeader(size_t size) { |
| 188 | + return headerSize() + size; |
| 189 | + } |
| 190 | + }; |
| 191 | + |
| 192 | + // Return a slab which is suitable to allocate \p size memory. |
| 193 | + Slab *getSlabForAllocation(size_t size) { |
| 194 | + Slab *slab = (lastAllocation ? lastAllocation->slab : firstSlab); |
| 195 | + if (slab) { |
| 196 | + // Is there enough space in the current slab? |
| 197 | + if (slab->canAllocate(size)) |
| 198 | + return slab; |
| 199 | + |
| 200 | + // Is there a successor slab, which we allocated before (and became free |
| 201 | + // in the meantime)? |
| 202 | + if (Slab *nextSlab = slab->next) { |
| 203 | + assert(nextSlab->isEmpty()); |
| 204 | + if (nextSlab->canAllocate(size)) |
| 205 | + return nextSlab; |
| 206 | + |
| 207 | + // No space in the next slab. Although it's empty, the size exceeds its |
| 208 | + // capacity. |
| 209 | + // As we have to allocate a new slab anyway, free all successor slabs |
| 210 | + // and allocate a new one with the accumulated capacity. |
| 211 | + size_t alreadyAllocatedCapacity = freeAllSlabs(slab->next); |
| 212 | + size = std::max(size, alreadyAllocatedCapacity); |
| 213 | + } |
| 214 | + } |
| 215 | + size_t capacity = std::max(SlabCapacity, |
| 216 | + Allocation::includingHeader(size)); |
| 217 | + void *slabBuffer = malloc(Slab::includingHeader(capacity)); |
| 218 | + Slab *newSlab = new (slabBuffer) Slab(capacity); |
| 219 | + if (slab) |
| 220 | + slab->next = newSlab; |
| 221 | + else |
| 222 | + firstSlab = newSlab; |
| 223 | + numAllocatedSlabs++; |
| 224 | + return newSlab; |
| 225 | + } |
| 226 | + |
| 227 | + /// Deallocate all slabs after \p first and set \p first to null. |
| 228 | + size_t freeAllSlabs(Slab *&first) { |
| 229 | + size_t freedCapacity = 0; |
| 230 | + Slab *slab = first; |
| 231 | + first = nullptr; |
| 232 | + while (slab) { |
| 233 | + Slab *next = slab->next; |
| 234 | + freedCapacity += slab->capacity; |
| 235 | + free(slab); |
| 236 | + numAllocatedSlabs--; |
| 237 | + slab = next; |
| 238 | + } |
| 239 | + return freedCapacity; |
| 240 | + } |
| 241 | + |
| 242 | +public: |
| 243 | + /// Construct a StackAllocator without a pre-allocated first slab. |
| 244 | + StackAllocator() : firstSlab(nullptr), firstSlabIsPreallocated(false) { } |
| 245 | + |
| 246 | + /// Construct a StackAllocator with a pre-allocated first slab. |
| 247 | + StackAllocator(void *firstSlabBuffer, size_t bufferCapacity) { |
| 248 | + char *start = (char *)llvm::alignAddr(firstSlabBuffer, |
| 249 | + llvm::Align(alignment)); |
| 250 | + char *end = (char *)firstSlabBuffer + bufferCapacity; |
| 251 | + assert(start + Slab::headerSize() <= end && |
| 252 | + "buffer for first slab too small"); |
| 253 | + firstSlab = new (start) Slab(end - start - Slab::headerSize()); |
| 254 | + firstSlabIsPreallocated = true; |
| 255 | + } |
| 256 | + |
| 257 | + ~StackAllocator() { |
| 258 | + if (lastAllocation) |
| 259 | + fatalError(0, "not all allocations are deallocated"); |
| 260 | + (void)freeAllSlabs(firstSlabIsPreallocated ? firstSlab->next : firstSlab); |
| 261 | + assert(getNumAllocatedSlabs() == 0); |
| 262 | + } |
| 263 | + |
| 264 | + /// Allocate a memory buffer of \p size. |
| 265 | + void *alloc(size_t size) { |
| 266 | + if (guardAllocations) |
| 267 | + size += sizeof(uintptr_t); |
| 268 | + size_t alignedSize = llvm::alignTo(size, llvm::Align(alignment)); |
| 269 | + Slab *slab = getSlabForAllocation(alignedSize); |
| 270 | + Allocation *allocation = slab->allocate(alignedSize, lastAllocation); |
| 271 | + lastAllocation = allocation; |
| 272 | + assert(llvm::isAddrAligned(llvm::Align(alignment), |
| 273 | + allocation->getAllocatedMemory())); |
| 274 | + return allocation->getAllocatedMemory(); |
| 275 | + } |
| 276 | + |
| 277 | + /// Deallocate memory \p ptr. |
| 278 | + void dealloc(void *ptr) { |
| 279 | + if (!lastAllocation || lastAllocation->getAllocatedMemory() != ptr) |
| 280 | + fatalError(0, "freed pointer was not the last allocation"); |
| 281 | + |
| 282 | + Allocation *prev = lastAllocation->previous; |
| 283 | + lastAllocation->slab->deallocate(lastAllocation); |
| 284 | + lastAllocation = prev; |
| 285 | + } |
| 286 | + |
| 287 | + /// For unit testing. |
| 288 | + int getNumAllocatedSlabs() { return numAllocatedSlabs; } |
| 289 | +}; |
| 290 | + |
| 291 | +} // namespace swift |
| 292 | + |
0 commit comments