Index: boost/pool/pool.hpp =================================================================== --- boost/pool/pool.hpp (revision 60124) +++ boost/pool/pool.hpp (working copy) @@ -33,6 +33,10 @@ // boost::simple_segregated_storage #include +#ifdef VALGRIND +#include +#endif // VALGRIND + #ifdef BOOST_NO_STDC_NAMESPACE namespace std { using ::malloc; using ::free; } #endif @@ -194,9 +198,18 @@ explicit pool(const size_type nrequested_size, const size_type nnext_size = 32) :list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size) - { } + { +#ifdef VALGRIND + VALGRIND_CREATE_MEMPOOL(this, 0, 0); +#endif // VALGRIND + } - ~pool() { purge_memory(); } + ~pool() { + purge_memory(); +#ifdef VALGRIND + VALGRIND_DESTROY_MEMPOOL(this); +#endif // VALGRIND + } // Releases memory blocks that don't have chunks allocated // pre: lists are ordered @@ -218,18 +231,32 @@ // Returns 0 if out-of-memory void * malloc() { + void * result = 0; // Look for a non-empty storage if (!store().empty()) - return store().malloc(); - return malloc_need_resize(); + result = store().malloc(); + else + result = malloc_need_resize(); +#ifdef VALGRIND + if (result != 0) + VALGRIND_MEMPOOL_ALLOC(this, result, requested_size); +#endif // VALGRIND + return result; } void * ordered_malloc() { + void * result = 0; // Look for a non-empty storage if (!store().empty()) - return store().malloc(); - return ordered_malloc_need_resize(); + result = store().malloc(); + else + result = ordered_malloc_need_resize(); +#ifdef VALGRIND + if (result != 0) + VALGRIND_MEMPOOL_ALLOC(this, result, requested_size); +#endif // VALGRIND + return result; } // Returns 0 if out-of-memory @@ -239,12 +266,22 @@ // pre: 'chunk' must have been previously // returned by *this.malloc(). void free(void * const chunk) - { store().free(chunk); } + { + store().free(chunk); +#ifdef VALGRIND + VALGRIND_MEMPOOL_FREE(this, chunk); +#endif // VALGRIND + } // pre: 'chunk' must have been previously // returned by *this.malloc(). void ordered_free(void * const chunk) - { store().ordered_free(chunk); } + { + store().ordered_free(chunk); +#ifdef VALGRIND + VALGRIND_MEMPOOL_FREE(this, chunk); +#endif // VALGRIND + } // pre: 'chunk' must have been previously // returned by *this.malloc(n). @@ -256,6 +293,9 @@ ((total_req_size % partition_size) ? true : false); store().free_n(chunks, num_chunks, partition_size); +#ifdef VALGRIND + VALGRIND_MEMPOOL_FREE(this, chunks); +#endif // VALGRIND } // pre: 'chunk' must have been previously @@ -268,6 +308,9 @@ ((total_req_size % partition_size) ? true : false); store().ordered_free_n(chunks, num_chunks, partition_size); +#ifdef VALGRIND + VALGRIND_MEMPOOL_FREE(this, chunks); +#endif // VALGRIND } // is_from() tests a chunk to determine if it was allocated from *this @@ -424,6 +467,10 @@ this->first = 0; next_size = start_size; +#ifdef VALGRIND + VALGRIND_MEMPOOL_TRIM(this, 0, 0); +#endif // VALGRIND + return true; } @@ -510,8 +557,12 @@ void * ret = store().malloc_n(num_chunks, partition_size); - if (ret != 0) + if (ret != 0) { +#ifdef VALGRIND + VALGRIND_MEMPOOL_ALLOC(this, ret, total_req_size); +#endif // VALGRIND return ret; + } // Not enougn memory in our storages; make a new storage, BOOST_USING_STD_MAX(); @@ -521,6 +572,11 @@ char * const ptr = UserAllocator::malloc(POD_size); if (ptr == 0) return 0; + +#ifdef VALGRIND + VALGRIND_MAKE_MEM_NOACCESS(ptr, POD_size); +#endif // VALGRIND + const details::PODptr node(ptr, POD_size); // Split up block so we can use what wasn't requested @@ -559,6 +615,10 @@ prev.next(node); } +#ifdef VALGRIND + VALGRIND_MEMPOOL_ALLOC(this, node.begin(), total_req_size); +#endif // VALGRIND + // and return it. return node.begin(); }