12 #include "allocator_base.hpp"
14 #include "internal/dynastic.hpp"
15 #include "internal/reallocator.hpp"
16 #include "internal/heap_helpers.hpp"
30 inline namespace v_100 {
39 template <
class Allocator,
size_t NumberOfChunks,
size_t ChunkSize>
51 static const uint64_t all_set = std::numeric_limits<uint64_t>::max();
52 static const uint64_t all_zero = uint64_t(0);
59 void shrink() noexcept {
60 allocator_.deallocate(controlBuffer_);
61 allocator_.deallocate(buffer_);
66 heap &operator=(
const heap &) =
delete;
69 using allocator = Allocator;
71 static constexpr
bool supports_truncated_deallocation =
true;
72 static constexpr
unsigned alignment = Allocator::alignment;
78 heap(
size_t numberOfChunks,
size_t chunkSize) noexcept {
93 numberOfChunks_ = std::move(x.numberOfChunks_);
94 chunk_size_ = std::move(x.chunk_size_);
95 buffer_ = std::move(x.buffer_);
96 controlBuffer_ = std::move(x.controlBuffer_);
97 control_ = std::move(x.control_);
98 controlSize_ = std::move(x.controlSize_);
99 allocator_ = std::move(x.allocator_);
101 x.control_ =
nullptr;
110 size_t number_of_chunk()
const noexcept {
111 return numberOfChunks_.value();
114 size_t chunk_size()
const noexcept {
115 return chunk_size_.value();
118 bool owns(
const block &b)
const noexcept {
119 return b && buffer_.
ptr <= b.
ptr &&
120 b.
ptr < (
static_cast<char *
>(buffer_.
ptr) + buffer_.
length);
123 block allocate(
size_t n) noexcept {
130 if (n > chunk_size_.value() * numberOfChunks_.value()) {
135 size_t numberOfBlocks = numberOfAlignedBytes / chunk_size_.value();
136 numberOfBlocks = std::max(
size_t(1), numberOfBlocks);
138 if (numberOfBlocks < 64) {
139 result = allocate_within_single_control_register(numberOfBlocks);
144 else if (numberOfBlocks == 64) {
145 result = allocate_within_complete_control_register(numberOfBlocks);
150 else if ((numberOfBlocks % 64) == 0) {
151 result = allocate_multiple_complete_control_registers(numberOfBlocks);
157 result = allocate_with_register_overlap(numberOfBlocks);
161 void deallocate(
block &b) noexcept {
170 const auto context = block_to_context(b);
172 if (context.subIndex + context.usedChunks <= 64) {
173 set_within_single_register<true>(context);
175 else if ((context.usedChunks % 64) == 0) {
176 deallocate_for_multiple_complete_control_register(context);
179 deallocate_with_control_register_overlap(context);
184 void deallocate_all() noexcept {
185 std::fill(control_, control_ + controlSize_, all_set);
188 bool reallocate(
block &b,
size_t n) noexcept {
189 if (internal::is_reallocation_handled_default(*
this, b, n)) {
193 const auto numberOfBlocks =
static_cast<int>(b.
length / chunk_size_.value());
194 const auto numberOfNewNeededBlocks =
197 if (numberOfBlocks == numberOfNewNeededBlocks) {
201 auto context = block_to_context(b);
202 if (context.subIndex + context.usedChunks <= 64) {
203 set_within_single_register<true>(BlockContext{ context.registerIndex,
204 context.subIndex + numberOfNewNeededBlocks,
205 context.usedChunks - numberOfNewNeededBlocks });
208 deallocate_with_control_register_overlap(
209 BlockContext{ context.registerIndex, context.subIndex + numberOfNewNeededBlocks,
210 context.usedChunks - numberOfNewNeededBlocks });
212 b.
length = numberOfNewNeededBlocks * chunk_size_.value();
218 bool expand(
block &b,
size_t delta) noexcept {
223 const auto context = block_to_context(b);
224 const auto numberOfAdditionalNeededBlocks =
static_cast<int>(
227 if (context.subIndex + context.usedChunks + numberOfAdditionalNeededBlocks <= 64) {
228 if (test_and_set_within_single_register<false>(
229 BlockContext{ context.registerIndex, context.subIndex + context.usedChunks,
230 numberOfAdditionalNeededBlocks })) {
231 b.
length += numberOfAdditionalNeededBlocks * chunk_size_.value();
237 if (test_and_set_over_multiple_registers<false>(
238 BlockContext{ context.registerIndex, context.subIndex + context.usedChunks,
239 numberOfAdditionalNeededBlocks })) {
240 b.
length += numberOfAdditionalNeededBlocks * chunk_size_.value();
248 void init() noexcept {
249 assert(chunk_size_.value() > alignment);
250 assert(chunk_size_.value() % alignment == 0);
252 controlSize_ = numberOfChunks_.value() / 64;
253 controlBuffer_ = allocator_.allocate(
sizeof(uint64_t) * controlSize_);
254 assert((
bool)controlBuffer_);
256 control_ =
static_cast<uint64_t *
>(controlBuffer_.ptr);
257 buffer_ = allocator_.allocate(chunk_size_.value() * numberOfChunks_.value());
258 assert((
bool)buffer_);
270 BlockContext block_to_context(
const block &b) noexcept {
271 const auto blockIndex =
static_cast<int>(
272 (
static_cast<char *
>(b.
ptr) - static_cast<char *>(buffer_.
ptr)) / chunk_size_.value());
274 return{ blockIndex / 64, blockIndex % 64,
static_cast<int>(b.
length / chunk_size_.value()) };
278 bool test_and_set_within_single_register(
const BlockContext &context) noexcept {
279 assert(context.subIndex + context.usedChunks <= 64);
281 uint64_t mask = (context.usedChunks == 64)
283 : (((uint64_t(1) << context.usedChunks) - 1) << context.subIndex);
285 uint64_t currentRegister, newRegister;
286 currentRegister = control_[context.registerIndex];
287 if ((currentRegister & mask) != mask) {
290 newRegister = helpers::set_used<Used>(currentRegister, mask);
291 control_[context.registerIndex] = newRegister;
296 void set_over_multiple_registers(
const BlockContext &context) noexcept {
297 size_t chunksToTest = context.usedChunks;
298 size_t subIndexStart = context.subIndex;
299 size_t registerIndex = context.registerIndex;
302 if (subIndexStart > 0)
303 mask = ((uint64_t(1) << (64 - subIndexStart)) - 1) << subIndexStart;
305 mask = (chunksToTest >= 64) ? all_set : ((uint64_t(1) << chunksToTest) - 1);
307 assert(registerIndex < controlSize_);
309 uint64_t currentRegister, newRegister;
311 currentRegister = control_[registerIndex];
312 newRegister = helpers::set_used<Used>(currentRegister, mask);
313 control_[registerIndex] = newRegister;
315 if (subIndexStart + chunksToTest > 64) {
316 chunksToTest = subIndexStart + chunksToTest - 64;
323 }
while (chunksToTest > 0);
327 bool test_and_set_over_multiple_registers(
const BlockContext &context) noexcept {
330 size_t chunksToTest = context.usedChunks;
331 size_t subIndexStart = context.subIndex;
332 size_t registerIndex = context.registerIndex;
336 if (subIndexStart > 0)
337 mask = ((uint64_t(1) << (64 - subIndexStart)) - 1) << subIndexStart;
339 mask = (chunksToTest >= 64) ? all_set : ((uint64_t(1) << chunksToTest) - 1);
341 auto currentRegister = control_[registerIndex];
343 if ((currentRegister & mask) != mask) {
347 if (subIndexStart + chunksToTest > 64) {
348 chunksToTest = subIndexStart + chunksToTest - 64;
355 if (registerIndex > controlSize_) {
358 }
while (chunksToTest > 0);
360 set_over_multiple_registers<Used>(context);
366 void set_within_single_register(
const BlockContext &context) noexcept {
367 assert(context.subIndex + context.usedChunks <= 64);
369 uint64_t mask = (context.usedChunks == 64)
371 : (((uint64_t(1) << context.usedChunks) - 1) << context.subIndex);
373 uint64_t currentRegister, newRegister;
374 currentRegister = control_[context.registerIndex];
375 newRegister = helpers::set_used<Used>(currentRegister, mask);
376 control_[context.registerIndex] = newRegister;
379 block allocate_within_single_control_register(
size_t numberOfBlocks) noexcept {
383 size_t controlIndex = 0;
384 while (controlIndex < controlSize_) {
385 auto currentControlRegister = control_[controlIndex];
388 if (currentControlRegister != 0) {
389 uint64_t mask = (numberOfBlocks == 64) ?
390 all_set : ((uint64_t(1) << numberOfBlocks) - 1);
394 while (i <= 64 - numberOfBlocks) {
395 if ((currentControlRegister & mask) == mask) {
396 auto newControlRegister = helpers::set_used<false>(currentControlRegister, mask);
398 control_[controlIndex] = newControlRegister;
400 size_t ptrOffset = (controlIndex * 64 + i) * chunk_size_.value();
402 result.
ptr =
static_cast<char *
>(buffer_.
ptr) + ptrOffset;
403 result.
length = numberOfBlocks * chunk_size_.value();
417 block allocate_within_complete_control_register(
size_t numberOfBlocks) noexcept {
421 auto freeChunk = std::find_if(control_, control_ + controlSize_,
422 [
this](
const uint64_t &v) {
return v == all_set; });
424 if (freeChunk == control_ + controlSize_) {
429 size_t ptrOffset = ((freeChunk - control_) * 64) * chunk_size_.value();
431 result.
ptr =
static_cast<char *
>(buffer_.
ptr) + ptrOffset;
432 result.
length = numberOfBlocks * chunk_size_.value();
437 block allocate_multiple_complete_control_registers(
size_t numberOfBlocks) noexcept {
440 const auto neededChunks =
static_cast<int>(numberOfBlocks / 64);
441 auto freeFirstChunk =
442 std::search_n(control_, control_ + controlSize_, neededChunks, all_set,
443 [](uint64_t
const &v, uint64_t
const &p) {
return v == p; });
445 if (freeFirstChunk == control_ + controlSize_) {
448 auto p = freeFirstChunk;
449 while (p < freeFirstChunk + neededChunks) {
454 size_t ptrOffset = ((freeFirstChunk - control_) * 64) * chunk_size_.value();
456 result.
ptr =
static_cast<char *
>(buffer_.
ptr) + ptrOffset;
457 result.
length = numberOfBlocks * chunk_size_.value();
462 block allocate_with_register_overlap(
size_t numberOfBlocks) noexcept {
466 auto p =
reinterpret_cast<unsigned char *
>(control_);
468 reinterpret_cast<unsigned char *
>(control_) + controlSize_ *
sizeof(uint64_t);
470 size_t freeBlocksCount(0);
471 unsigned char *chunkStart =
nullptr;
479 freeBlocksCount += 8;
480 if (freeBlocksCount >= numberOfBlocks) {
486 chunkStart =
nullptr;
491 if (p != lastp && freeBlocksCount >= numberOfBlocks) {
493 (chunkStart -
reinterpret_cast<unsigned char *
>(control_)) * 8 * chunk_size_.value();
495 result.
ptr =
static_cast<char *
>(buffer_.
ptr) + ptrOffset;
496 result.
length = numberOfBlocks * chunk_size_.value();
498 set_over_multiple_registers<false>(block_to_context(result));
505 void deallocate_for_multiple_complete_control_register(
const BlockContext &context) noexcept
507 const auto registerToFree = context.registerIndex + context.usedChunks / 64;
508 for (
auto i = context.registerIndex; i < registerToFree; ++i) {
509 control_[i] = all_set;
513 void deallocate_with_control_register_overlap(
const BlockContext &context) noexcept
515 set_over_multiple_registers<true>(context);
519 using namespace v_100;
size_t length
This describes the length of the reserved bytes.
bool reallocate_with_copy(OldAllocator &oldAllocator, NewAllocator &newAllocator, block &b, size_t n) noexcept
constexpr size_t round_to_alignment(size_t basis, size_t n) noexcept
void * ptr
This points to the start address of the described memory block.