Ticket #3976: pool_valgrind.patch
File pool_valgrind.patch, 4.2 KB (added by , 13 years ago) |
---|
-
boost/pool/pool.hpp
33 33 // boost::simple_segregated_storage 34 34 #include <boost/pool/simple_segregated_storage.hpp> 35 35 36 #ifdef VALGRIND 37 #include <valgrind/memcheck.h> 38 #endif // VALGRIND 39 36 40 #ifdef BOOST_NO_STDC_NAMESPACE 37 41 namespace std { using ::malloc; using ::free; } 38 42 #endif … … 194 198 explicit pool(const size_type nrequested_size, 195 199 const size_type nnext_size = 32) 196 200 :list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size) 197 { } 201 { 202 #ifdef VALGRIND 203 VALGRIND_CREATE_MEMPOOL(this, 0, 0); 204 #endif // VALGRIND 205 } 198 206 199 ~pool() { purge_memory(); } 207 ~pool() { 208 purge_memory(); 209 #ifdef VALGRIND 210 VALGRIND_DESTROY_MEMPOOL(this); 211 #endif // VALGRIND 212 } 200 213 201 214 // Releases memory blocks that don't have chunks allocated 202 215 // pre: lists are ordered … … 218 231 // Returns 0 if out-of-memory 219 232 void * malloc() 220 233 { 234 void * result = 0; 221 235 // Look for a non-empty storage 222 236 if (!store().empty()) 223 return store().malloc(); 224 return malloc_need_resize(); 237 result = store().malloc(); 238 else 239 result = malloc_need_resize(); 240 #ifdef VALGRIND 241 if (result != 0) 242 VALGRIND_MEMPOOL_ALLOC(this, result, requested_size); 243 #endif // VALGRIND 244 return result; 225 245 } 226 246 227 247 void * ordered_malloc() 228 248 { 249 void * result = 0; 229 250 // Look for a non-empty storage 230 251 if (!store().empty()) 231 return store().malloc(); 232 return ordered_malloc_need_resize(); 252 result = store().malloc(); 253 else 254 result = ordered_malloc_need_resize(); 255 #ifdef VALGRIND 256 if (result != 0) 257 VALGRIND_MEMPOOL_ALLOC(this, result, requested_size); 258 #endif // VALGRIND 259 return result; 233 260 } 234 261 235 262 // Returns 0 if out-of-memory … … 239 266 // pre: 'chunk' must have been previously 240 267 // returned by *this.malloc(). 241 268 void free(void * const chunk) 242 { store().free(chunk); } 269 { 270 store().free(chunk); 271 #ifdef VALGRIND 272 VALGRIND_MEMPOOL_FREE(this, chunk); 273 #endif // VALGRIND 274 } 243 275 244 276 // pre: 'chunk' must have been previously 245 277 // returned by *this.malloc(). 246 278 void ordered_free(void * const chunk) 247 { store().ordered_free(chunk); } 279 { 280 store().ordered_free(chunk); 281 #ifdef VALGRIND 282 VALGRIND_MEMPOOL_FREE(this, chunk); 283 #endif // VALGRIND 284 } 248 285 249 286 // pre: 'chunk' must have been previously 250 287 // returned by *this.malloc(n). … … 256 293 ((total_req_size % partition_size) ? true : false); 257 294 258 295 store().free_n(chunks, num_chunks, partition_size); 296 #ifdef VALGRIND 297 VALGRIND_MEMPOOL_FREE(this, chunks); 298 #endif // VALGRIND 259 299 } 260 300 261 301 // pre: 'chunk' must have been previously … … 268 308 ((total_req_size % partition_size) ? true : false); 269 309 270 310 store().ordered_free_n(chunks, num_chunks, partition_size); 311 #ifdef VALGRIND 312 VALGRIND_MEMPOOL_FREE(this, chunks); 313 #endif // VALGRIND 271 314 } 272 315 273 316 // is_from() tests a chunk to determine if it was allocated from *this … … 424 467 this->first = 0; 425 468 next_size = start_size; 426 469 470 #ifdef VALGRIND 471 VALGRIND_MEMPOOL_TRIM(this, 0, 0); 472 #endif // VALGRIND 473 427 474 return true; 428 475 } 429 476 … … 510 557 511 558 void * ret = store().malloc_n(num_chunks, partition_size); 512 559 513 if (ret != 0) 560 if (ret != 0) { 561 #ifdef VALGRIND 562 VALGRIND_MEMPOOL_ALLOC(this, ret, total_req_size); 563 #endif // VALGRIND 514 564 return ret; 565 } 515 566 516 567 // Not enougn memory in our storages; make a new storage, 517 568 BOOST_USING_STD_MAX(); … … 521 572 char * const ptr = UserAllocator::malloc(POD_size); 522 573 if (ptr == 0) 523 574 return 0; 575 576 #ifdef VALGRIND 577 VALGRIND_MAKE_MEM_NOACCESS(ptr, POD_size); 578 #endif // VALGRIND 579 524 580 const details::PODptr<size_type> node(ptr, POD_size); 525 581 526 582 // Split up block so we can use what wasn't requested … … 559 615 prev.next(node); 560 616 } 561 617 618 #ifdef VALGRIND 619 VALGRIND_MEMPOOL_ALLOC(this, node.begin(), total_req_size); 620 #endif // VALGRIND 621 562 622 // and return it. 563 623 return node.begin(); 564 624 }