From 8fceb9a5ef723aa0374dacb39987b1a678ce7224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20M=C3=BCller?= Date: Sat, 16 Apr 2016 17:35:29 +0200 Subject: [PATCH 1/5] Fix crash when calling memory_stack::allocate() on a moved-from object Also add test-case to prevent further bugs. --- include/foonathan/memory/memory_stack.hpp | 2 +- test/memory_stack.cpp | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/foonathan/memory/memory_stack.hpp b/include/foonathan/memory/memory_stack.hpp index 2854c80c..95582931 100644 --- a/include/foonathan/memory/memory_stack.hpp +++ b/include/foonathan/memory/memory_stack.hpp @@ -83,7 +83,7 @@ namespace foonathan { namespace memory auto fence = detail::debug_fence_size; auto offset = detail::align_offset(stack_.top() + fence, alignment); - if (fence + offset + size + fence <= std::size_t(block_end() - stack_.top())) + if (stack_.top() && fence + offset + size + fence <= std::size_t(block_end() - stack_.top())) { stack_.bump(fence, debug_magic::fence_memory); stack_.bump(offset, debug_magic::alignment_memory); diff --git a/test/memory_stack.cpp b/test/memory_stack.cpp index 07cc8428..94fd03ff 100644 --- a/test/memory_stack.cpp +++ b/test/memory_stack.cpp @@ -74,5 +74,19 @@ TEST_CASE("memory_stack", "[stack]") REQUIRE(alloc.no_allocated() == 1u); REQUIRE(alloc.no_deallocated() == 1u); } + SECTION("move") + { + auto other = detail::move(stack); + auto m = other.top(); + other.allocate(10, 1); + REQUIRE(alloc.no_allocated() == 1u); + + stack.allocate(10, 1); + REQUIRE(alloc.no_allocated() == 2u); + + stack = detail::move(other); + REQUIRE(alloc.no_allocated() == 1u); + stack.unwind(m); + } } From c1e39ac392368e3a933887fa244655c529e83422 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20M=C3=BCller?= Date: Sat, 16 Apr 2016 17:49:51 +0200 Subject: [PATCH 2/5] Fix problems with alignment in memory_stack::allocate() on growth When the memory_stack grows alignment for over-aligned types is not guaranteed. This is fixed. Also restructure allocate() function and a more robust checking for not supported sizes. --- include/foonathan/memory/memory_stack.hpp | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/include/foonathan/memory/memory_stack.hpp b/include/foonathan/memory/memory_stack.hpp index 95582931..32fa57ca 100644 --- a/include/foonathan/memory/memory_stack.hpp +++ b/include/foonathan/memory/memory_stack.hpp @@ -78,27 +78,24 @@ namespace foonathan { namespace memory /// \requires \c size and \c alignment must be valid. void* allocate(std::size_t size, std::size_t alignment) { - detail::check_allocation_size(size, next_capacity(), info()); - auto fence = detail::debug_fence_size; auto offset = detail::align_offset(stack_.top() + fence, alignment); - if (stack_.top() && fence + offset + size + fence <= std::size_t(block_end() - stack_.top())) - { - stack_.bump(fence, debug_magic::fence_memory); - stack_.bump(offset, debug_magic::alignment_memory); - } - else + if (!stack_.top() || fence + offset + size + fence > std::size_t(block_end() - stack_.top())) { + // need to grow auto block = arena_.allocate_block(); - FOONATHAN_MEMORY_ASSERT_MSG(fence + size + fence <= block.size, "new block size not big enough"); - stack_ = detail::fixed_memory_stack(block.memory); - // no need to align, block should be aligned for maximum - stack_.bump(fence, debug_magic::fence_memory); + + // new alignment required for over-aligned types + offset = detail::align_offset(stack_.top() + fence, alignment); + + auto needed = fence + offset + size + fence; + detail::check_allocation_size(needed, block.size, info()); } - FOONATHAN_MEMORY_ASSERT(detail::is_aligned(stack_.top(), alignment)); + stack_.bump(fence, debug_magic::fence_memory); + stack_.bump(offset, debug_magic::alignment_memory); auto mem = stack_.bump_return(size); stack_.bump(fence, debug_magic::fence_memory); return mem; From 09a09f60b81bdba4ddba04ffb8fab2be476bfa9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20M=C3=BCller?= Date: Sat, 16 Apr 2016 19:52:52 +0200 Subject: [PATCH 3/5] Fix next_capacity() so that it returns exact results --- include/foonathan/memory/detail/free_list.hpp | 14 ++++++++++++ .../memory/detail/small_free_list.hpp | 4 ++++ include/foonathan/memory/memory_arena.hpp | 22 ++++++++++++++++--- include/foonathan/memory/memory_pool.hpp | 7 +++--- .../memory/memory_pool_collection.hpp | 13 +++++------ include/foonathan/memory/memory_stack.hpp | 6 ++--- src/detail/small_free_list.cpp | 11 ++++++++++ src/memory_arena.cpp | 2 ++ 8 files changed, 61 insertions(+), 18 deletions(-) diff --git a/include/foonathan/memory/detail/free_list.hpp b/include/foonathan/memory/detail/free_list.hpp index 5d53274a..3559718a 100644 --- a/include/foonathan/memory/detail/free_list.hpp +++ b/include/foonathan/memory/detail/free_list.hpp @@ -48,6 +48,13 @@ namespace foonathan { namespace memory // pre: size != 0 void insert(void *mem, std::size_t size) FOONATHAN_NOEXCEPT; + // returns the usable size + // i.e. how many memory will be actually inserted and usable on a call to insert() + std::size_t usable_size(std::size_t size) const FOONATHAN_NOEXCEPT + { + return size; + } + // returns a single block from the list // pre: !empty() void* allocate() FOONATHAN_NOEXCEPT; @@ -131,6 +138,13 @@ namespace foonathan { namespace memory // pre: size != 0 void insert(void *mem, std::size_t size) FOONATHAN_NOEXCEPT; + // returns the usable size + // i.e. how many memory will be actually inserted and usable on a call to insert() + std::size_t usable_size(std::size_t size) const FOONATHAN_NOEXCEPT + { + return size; + } + // returns a single block from the list // pre: !empty() void* allocate() FOONATHAN_NOEXCEPT; diff --git a/include/foonathan/memory/detail/small_free_list.hpp b/include/foonathan/memory/detail/small_free_list.hpp index d7a0bd62..e3057a27 100644 --- a/include/foonathan/memory/detail/small_free_list.hpp +++ b/include/foonathan/memory/detail/small_free_list.hpp @@ -69,6 +69,10 @@ namespace foonathan { namespace memory // mem must be aligned for maximum alignment void insert(void *mem, std::size_t size) FOONATHAN_NOEXCEPT; + // returns the usable size + // i.e. how many memory will be actually inserted and usable on a call to insert() + std::size_t usable_size(std::size_t size) const FOONATHAN_NOEXCEPT; + // allocates a node big enough for the node size // pre: !empty() void* allocate() FOONATHAN_NOEXCEPT; diff --git a/include/foonathan/memory/memory_arena.hpp b/include/foonathan/memory/memory_arena.hpp index 5ca461ae..919fc1aa 100644 --- a/include/foonathan/memory/memory_arena.hpp +++ b/include/foonathan/memory/memory_arena.hpp @@ -111,6 +111,9 @@ namespace foonathan { namespace memory // the inserted block slightly smaller to allow for the fixup value using inserted_mb = memory_block; + // how much an inserted block is smaller + static const std::size_t implementation_offset; + // pushes a memory block void push(allocated_mb block) FOONATHAN_NOEXCEPT; @@ -169,6 +172,12 @@ namespace foonathan { namespace memory { return cached_.size(); } + + std::size_t cached_block_size() const FOONATHAN_NOEXCEPT + { + return cached_.top().size; + } + bool take_from_cache(detail::memory_block_stack &used) FOONATHAN_NOEXCEPT { if (cached_.empty()) @@ -214,6 +223,11 @@ namespace foonathan { namespace memory return 0u; } + std::size_t cached_block_size() const FOONATHAN_NOEXCEPT + { + return 0u; + } + bool take_from_cache(detail::memory_block_stack &) FOONATHAN_NOEXCEPT { return false; @@ -362,11 +376,13 @@ namespace foonathan { namespace memory } /// \returns The size of the next memory block, - /// i.e. of the next call to \ref allocate_block() if it does not use the cache. - /// This simply forwards to the \concept{concept_blockallocator,BlockAllocator}. + /// i.e. of the next call to \ref allocate_block(). + /// If there are blocks in the cache, returns size of the next one. + /// Otherwise forwards to the \concept{concept_blockallocator,BlockAllocator} and subtracts an implementation offset. std::size_t next_block_size() const FOONATHAN_NOEXCEPT { - return allocator_type::next_block_size(); + return this->cache_empty() ? allocator_type::next_block_size() - detail::memory_block_stack::implementation_offset + : this->cached_block_size(); } /// \returns A reference of the \concept{concept_blockallocator,BlockAllocator} object. diff --git a/include/foonathan/memory/memory_pool.hpp b/include/foonathan/memory/memory_pool.hpp index ea6d213f..86843e59 100644 --- a/include/foonathan/memory/memory_pool.hpp +++ b/include/foonathan/memory/memory_pool.hpp @@ -153,12 +153,11 @@ namespace foonathan { namespace memory } /// \returns The size of the next memory block after the free list gets empty and the arena grows. - /// This function just forwards to the \ref memory_arena. - /// \note Due to fence memory, alignment buffers and the like this may not be the exact result \ref capacity_left() will return, - /// but it is an upper bound to it. + /// \ref capacity_left() will increase by this amount. + /// \note Due to fence memory in debug mode this cannot be just divided by the \ref node_size() to get the number of nodes. std::size_t next_capacity() const FOONATHAN_NOEXCEPT { - return arena_.next_block_size(); + return free_list_.usable_size(arena_.next_block_size()); } /// \returns A reference to the \concept{concept_blockallocator,BlockAllocator} used for managing the arena. diff --git a/include/foonathan/memory/memory_pool_collection.hpp b/include/foonathan/memory/memory_pool_collection.hpp index 69dca7fa..769984d0 100644 --- a/include/foonathan/memory/memory_pool_collection.hpp +++ b/include/foonathan/memory/memory_pool_collection.hpp @@ -139,9 +139,7 @@ namespace foonathan { namespace memory void* allocate_array(std::size_t count, std::size_t node_size) { detail::check_allocation_size(node_size, [&]{return max_node_size();}, info()); - detail::check_allocation_size(count * node_size, - [&]{return PoolType::value ? next_capacity() : 0u;}, - info()); + auto& pool = pools_.get(node_size); if (pool.empty()) reserve_impl(pool, def_capacity()); @@ -208,16 +206,15 @@ namespace foonathan { namespace memory /// \returns The amount of memory available in the arena not inside the free lists. /// This is the number of bytes that can be inserted into the free lists /// without requesting more memory from the \concept{concept_blockallocator,BlockAllocator}. - /// \note Array allocations may lead to a growth even if the capacity_left is big enough. + /// \note Array allocations may lead to a growth even if the capacity is big enough. std::size_t capacity() const FOONATHAN_NOEXCEPT { return std::size_t(block_end() - stack_.top()); } - /// \returns The size of the next memory block after the free list gets empty and the arena grows. - /// This function just forwards to the \ref memory_arena. - /// \note Due to fence memory, alignment buffers and the like this may not be the exact result \ref capacity() will return, - /// but it is an upper bound to it. + /// \returns The size of the next memory block after \ref capacity() arena grows. + /// This is the amount of memory that can be distributed in the pools. + /// \note If the `PoolType` is \ref small_node_pool, the exact usable memory is lower than that. std::size_t next_capacity() const FOONATHAN_NOEXCEPT { return arena_.next_block_size(); diff --git a/include/foonathan/memory/memory_stack.hpp b/include/foonathan/memory/memory_stack.hpp index 32fa57ca..0107e2ce 100644 --- a/include/foonathan/memory/memory_stack.hpp +++ b/include/foonathan/memory/memory_stack.hpp @@ -169,10 +169,10 @@ namespace foonathan { namespace memory return std::size_t(block_end() - stack_.top()); } - /// \returns The size of the next memory block after the free list gets empty and the arena grows. + /// \returns The size of the next memory block after the current block is exhausted and the arena grows. /// This function just forwards to the \ref memory_arena. - /// \note Due to fence memory, alignment buffers and the like this may not be the exact result \ref capacity_left() will return, - /// but it is an upper bound to it. + /// \note All of it is available for the stack to use, but due to fences and alignment buffers, + /// this may not be the exact amount of memory usable for the user. std::size_t next_capacity() const FOONATHAN_NOEXCEPT { return arena_.next_block_size(); diff --git a/src/detail/small_free_list.cpp b/src/detail/small_free_list.cpp index c81e2683..79d45e37 100644 --- a/src/detail/small_free_list.cpp +++ b/src/detail/small_free_list.cpp @@ -277,6 +277,17 @@ void small_free_memory_list::insert(void *mem, std::size_t size) FOONATHAN_NOEXC capacity_ += new_nodes; } +std::size_t small_free_memory_list::usable_size(std::size_t size) const FOONATHAN_NOEXCEPT +{ + auto actual_size = node_size_ + 2 * fence_size(); + auto total_chunk_size = chunk::memory_offset + actual_size * chunk::max_nodes; + auto no_chunks = size / total_chunk_size; + auto remainder = size % total_chunk_size; + + return no_chunks * chunk::max_nodes * actual_size + + (remainder > chunk::memory_offset ? remainder - chunk::memory_offset : 0u); +} + void* small_free_memory_list::allocate() FOONATHAN_NOEXCEPT { auto chunk = find_chunk_impl(1); diff --git a/src/memory_arena.cpp b/src/memory_arena.cpp index 55950ad9..225ebe69 100644 --- a/src/memory_arena.cpp +++ b/src/memory_arena.cpp @@ -15,6 +15,8 @@ const std::size_t memory_block_stack::node::div_alignment = sizeof(memory_block_ const std::size_t memory_block_stack::node::mod_offset = sizeof(memory_block_stack::node) % max_alignment != 0u; const std::size_t memory_block_stack::node::offset = (div_alignment + mod_offset) * max_alignment; +const std::size_t memory_block_stack::implementation_offset = memory_block_stack::node::offset; + void memory_block_stack::push(allocated_mb block) FOONATHAN_NOEXCEPT { FOONATHAN_MEMORY_ASSERT(is_aligned(block.memory, max_alignment)); From 7286cb6706e13f544cc9b4dc7e0d3bb9d4057053 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20M=C3=BCller?= Date: Sat, 16 Apr 2016 20:04:35 +0200 Subject: [PATCH 4/5] Make naming of capacity functions in memory_pool_collection consistent Now it uses the proper capacity_left(). --- include/foonathan/memory/memory_pool_collection.hpp | 6 +++--- test/memory_pool_collection.cpp | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/foonathan/memory/memory_pool_collection.hpp b/include/foonathan/memory/memory_pool_collection.hpp index 769984d0..78fc5ae8 100644 --- a/include/foonathan/memory/memory_pool_collection.hpp +++ b/include/foonathan/memory/memory_pool_collection.hpp @@ -197,7 +197,7 @@ namespace foonathan { namespace memory /// as defined over the \c BucketDistribution. /// This is the number of nodes that can be allocated without the free list requesting more memory from the arena. /// \note Array allocations may lead to a growth even if the capacity_left is big enough. - std::size_t pool_capacity(std::size_t node_size) const FOONATHAN_NOEXCEPT + std::size_t pool_capacity_left(std::size_t node_size) const FOONATHAN_NOEXCEPT { FOONATHAN_MEMORY_ASSERT_MSG(node_size <= max_node_size(), "node_size too big"); return pools_.get(node_size).capacity(); @@ -207,12 +207,12 @@ namespace foonathan { namespace memory /// This is the number of bytes that can be inserted into the free lists /// without requesting more memory from the \concept{concept_blockallocator,BlockAllocator}. /// \note Array allocations may lead to a growth even if the capacity is big enough. - std::size_t capacity() const FOONATHAN_NOEXCEPT + std::size_t capacity_left() const FOONATHAN_NOEXCEPT { return std::size_t(block_end() - stack_.top()); } - /// \returns The size of the next memory block after \ref capacity() arena grows. + /// \returns The size of the next memory block after \ref capacity_left() arena grows. /// This is the amount of memory that can be distributed in the pools. /// \note If the `PoolType` is \ref small_node_pool, the exact usable memory is lower than that. std::size_t next_capacity() const FOONATHAN_NOEXCEPT diff --git a/test/memory_pool_collection.cpp b/test/memory_pool_collection.cpp index 2bdb4d34..9fa1f722 100644 --- a/test/memory_pool_collection.cpp +++ b/test/memory_pool_collection.cpp @@ -24,12 +24,12 @@ TEST_CASE("memory_pool_collection", "[pool]") const auto max_size = 16u; pools pool(max_size, 1000, alloc); REQUIRE(pool.max_node_size() == max_size); - REQUIRE(pool.capacity() <= 1000u); + REQUIRE(pool.capacity_left() <= 1000u); REQUIRE(pool.next_capacity() >= 1000u); REQUIRE(alloc.no_allocated() == 1u); for (auto i = 0u; i != max_size; ++i) - REQUIRE(pool.pool_capacity(i) == 0u); + REQUIRE(pool.pool_capacity_left(i) == 0u); SECTION("normal alloc/dealloc") { @@ -40,7 +40,7 @@ TEST_CASE("memory_pool_collection", "[pool]") b.push_back(pool.allocate_node(5)); } REQUIRE(alloc.no_allocated() == 1u); - REQUIRE(pool.capacity() <= 1000u); + REQUIRE(pool.capacity_left() <= 1000u); std::shuffle(a.begin(), a.end(), std::mt19937{}); std::shuffle(b.begin(), b.end(), std::mt19937{}); From a555ddfd1dbb9449bffdcad7e3e8b50abc14973e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20M=C3=BCller?= Date: Sat, 16 Apr 2016 20:44:56 +0200 Subject: [PATCH 5/5] Improve array allocations in memory_pool_collection Now it uses the shared stack directly and supports free lists that do not support array allocations natively. Also change internal requirements for those free lists. --- .../memory/detail/small_free_list.hpp | 9 +++-- .../memory/memory_pool_collection.hpp | 38 ++++++++++++------- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/include/foonathan/memory/detail/small_free_list.hpp b/include/foonathan/memory/detail/small_free_list.hpp index e3057a27..cce655d3 100644 --- a/include/foonathan/memory/detail/small_free_list.hpp +++ b/include/foonathan/memory/detail/small_free_list.hpp @@ -77,7 +77,7 @@ namespace foonathan { namespace memory // pre: !empty() void* allocate() FOONATHAN_NOEXCEPT; - // must not be called + // always returns nullptr, because array allocations are not supported void* allocate(std::size_t) FOONATHAN_NOEXCEPT { return nullptr; @@ -86,8 +86,11 @@ namespace foonathan { namespace memory // deallocates the node previously allocated via allocate() void deallocate(void *node) FOONATHAN_NOEXCEPT; - // must not be called - void deallocate(void *, std::size_t) FOONATHAN_NOEXCEPT {} + // forwards to insert() + void deallocate(void *mem, std::size_t size) FOONATHAN_NOEXCEPT + { + insert(mem, size); + } // hint for allocate() to be prepared to allocate n nodes // it searches for a chunk that has n nodes free diff --git a/include/foonathan/memory/memory_pool_collection.hpp b/include/foonathan/memory/memory_pool_collection.hpp index 78fc5ae8..7eb58930 100644 --- a/include/foonathan/memory/memory_pool_collection.hpp +++ b/include/foonathan/memory/memory_pool_collection.hpp @@ -123,8 +123,14 @@ namespace foonathan { namespace memory detail::check_allocation_size(node_size, [&]{return max_node_size();}, info()); auto& pool = pools_.get(node_size); if (pool.empty()) - reserve_impl(pool, def_capacity()); - return pool.allocate(); + { + auto block = reserve_memory(pool, def_capacity()); + pool.insert(block.memory, block.size); + } + + auto mem = pool.allocate(); + FOONATHAN_MEMORY_ASSERT(mem); + return mem; } /// \effects Allocates an \concept{concept_array,array} of nodes by searching for \c n continuous nodes on the appropriate free list and removing them. @@ -141,16 +147,20 @@ namespace foonathan { namespace memory detail::check_allocation_size(node_size, [&]{return max_node_size();}, info()); auto& pool = pools_.get(node_size); - if (pool.empty()) - reserve_impl(pool, def_capacity()); - auto mem = pool.allocate(count * node_size); + + // try allocating if not empty + // for pools without array allocation support, allocate() will always return nullptr + auto mem = pool.empty() ? nullptr : pool.allocate(count * node_size); if (!mem) { - reserve_impl(pool, count * node_size); - mem = pool.allocate(count * node_size); - if (!mem) - FOONATHAN_THROW(bad_array_size(info(), count * node_size, next_capacity())); + // use stack for allocation + detail::check_allocation_size(count * node_size, + [&]{return next_capacity() - pool.alignment() + 1;}, + info()); + mem = reserve_memory(pool, count * node_size).memory; + FOONATHAN_MEMORY_ASSERT(mem); } + return mem; } @@ -167,8 +177,9 @@ namespace foonathan { namespace memory /// i.e. either this allocator object or a new object created by moving this to it. void deallocate_array(void *ptr, std::size_t count, std::size_t node_size) FOONATHAN_NOEXCEPT { - FOONATHAN_MEMORY_ASSERT_MSG(PoolType::value, "array allocations not supported"); auto& pool = pools_.get(node_size); + // deallocate array + // for pools without array allocation support, this will simply forward to insert() pool.deallocate(ptr, count * node_size); } @@ -183,7 +194,7 @@ namespace foonathan { namespace memory { FOONATHAN_MEMORY_ASSERT_MSG(node_size <= max_node_size(), "node_size too big"); auto& pool = pools_.get(node_size); - reserve_impl(pool, capacity); + reserve_memory(pool, capacity); } /// \returns The maximum node size for which is a free list. @@ -249,7 +260,7 @@ namespace foonathan { namespace memory return static_cast(block.memory) + block.size; } - void reserve_impl(typename pool_type::type &pool, std::size_t capacity) + memory_block reserve_memory(typename pool_type::type &pool, std::size_t capacity) { auto mem = stack_.allocate(block_end(), capacity, detail::max_alignment); if (!mem) @@ -272,8 +283,7 @@ namespace foonathan { namespace memory mem = stack_.allocate(block_end(), capacity, detail::max_alignment); FOONATHAN_MEMORY_ASSERT(mem); } - // insert new - pool.insert(mem, capacity); + return {mem, capacity}; } memory_arena arena_;