Ticket #3976: pool_valgrind.patch

File pool_valgrind.patch, 4.2 KB (added by Jeremiah Willcock, 13 years ago)

Patch for Valgrind support

  • boost/pool/pool.hpp

     
    3333// boost::simple_segregated_storage
    3434#include <boost/pool/simple_segregated_storage.hpp>
    3535
     36#ifdef VALGRIND
     37#include <valgrind/memcheck.h>
     38#endif // VALGRIND
     39
    3640#ifdef BOOST_NO_STDC_NAMESPACE
    3741 namespace std { using ::malloc; using ::free; }
    3842#endif
     
    194198    explicit pool(const size_type nrequested_size,
    195199        const size_type nnext_size = 32)
    196200    :list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size)
    197     { }
     201    {
     202#ifdef VALGRIND
     203      VALGRIND_CREATE_MEMPOOL(this, 0, 0);
     204#endif // VALGRIND
     205    }
    198206
    199     ~pool() { purge_memory(); }
     207    ~pool() {
     208      purge_memory();
     209#ifdef VALGRIND
     210      VALGRIND_DESTROY_MEMPOOL(this);
     211#endif // VALGRIND
     212    }
    200213
    201214    // Releases memory blocks that don't have chunks allocated
    202215    // pre: lists are ordered
     
    218231    // Returns 0 if out-of-memory
    219232    void * malloc()
    220233    {
     234      void * result = 0;
    221235      // Look for a non-empty storage
    222236      if (!store().empty())
    223         return store().malloc();
    224       return malloc_need_resize();
     237        result = store().malloc();
     238      else
     239        result = malloc_need_resize();
     240#ifdef VALGRIND
     241      if (result != 0)
     242        VALGRIND_MEMPOOL_ALLOC(this, result, requested_size);
     243#endif // VALGRIND
     244      return result;
    225245    }
    226246
    227247    void * ordered_malloc()
    228248    {
     249      void * result = 0;
    229250      // Look for a non-empty storage
    230251      if (!store().empty())
    231         return store().malloc();
    232       return ordered_malloc_need_resize();
     252        result = store().malloc();
     253      else
     254        result = ordered_malloc_need_resize();
     255#ifdef VALGRIND
     256      if (result != 0)
     257        VALGRIND_MEMPOOL_ALLOC(this, result, requested_size);
     258#endif // VALGRIND
     259      return result;
    233260    }
    234261
    235262    // Returns 0 if out-of-memory
     
    239266    // pre: 'chunk' must have been previously
    240267    //        returned by *this.malloc().
    241268    void free(void * const chunk)
    242     { store().free(chunk); }
     269    {
     270      store().free(chunk);
     271#ifdef VALGRIND
     272      VALGRIND_MEMPOOL_FREE(this, chunk);
     273#endif // VALGRIND
     274    }
    243275
    244276    // pre: 'chunk' must have been previously
    245277    //        returned by *this.malloc().
    246278    void ordered_free(void * const chunk)
    247     { store().ordered_free(chunk); }
     279    {
     280      store().ordered_free(chunk);
     281#ifdef VALGRIND
     282      VALGRIND_MEMPOOL_FREE(this, chunk);
     283#endif // VALGRIND
     284    }
    248285
    249286    // pre: 'chunk' must have been previously
    250287    //        returned by *this.malloc(n).
     
    256293          ((total_req_size % partition_size) ? true : false);
    257294
    258295      store().free_n(chunks, num_chunks, partition_size);
     296#ifdef VALGRIND
     297      VALGRIND_MEMPOOL_FREE(this, chunks);
     298#endif // VALGRIND
    259299    }
    260300
    261301    // pre: 'chunk' must have been previously
     
    268308          ((total_req_size % partition_size) ? true : false);
    269309
    270310      store().ordered_free_n(chunks, num_chunks, partition_size);
     311#ifdef VALGRIND
     312      VALGRIND_MEMPOOL_FREE(this, chunks);
     313#endif // VALGRIND
    271314    }
    272315
    273316    // is_from() tests a chunk to determine if it was allocated from *this
     
    424467  this->first = 0;
    425468  next_size = start_size;
    426469
     470#ifdef VALGRIND
     471  VALGRIND_MEMPOOL_TRIM(this, 0, 0);
     472#endif // VALGRIND
     473
    427474  return true;
    428475}
    429476
     
    510557
    511558  void * ret = store().malloc_n(num_chunks, partition_size);
    512559
    513   if (ret != 0)
     560  if (ret != 0) {
     561#ifdef VALGRIND
     562    VALGRIND_MEMPOOL_ALLOC(this, ret, total_req_size);
     563#endif // VALGRIND
    514564    return ret;
     565  }
    515566
    516567  // Not enougn memory in our storages; make a new storage,
    517568  BOOST_USING_STD_MAX();
     
    521572  char * const ptr = UserAllocator::malloc(POD_size);
    522573  if (ptr == 0)
    523574    return 0;
     575
     576#ifdef VALGRIND
     577  VALGRIND_MAKE_MEM_NOACCESS(ptr, POD_size);
     578#endif // VALGRIND
     579
    524580  const details::PODptr<size_type> node(ptr, POD_size);
    525581
    526582  // Split up block so we can use what wasn't requested
     
    559615    prev.next(node);
    560616  }
    561617
     618#ifdef VALGRIND
     619    VALGRIND_MEMPOOL_ALLOC(this, node.begin(), total_req_size);
     620#endif // VALGRIND
     621
    562622  //  and return it.
    563623  return node.begin();
    564624}