unrealircd

- supernets unrealircd source & configuration
git clone git://git.acid.vegas/unrealircd.git
Log | Files | Refs | Archive | README | LICENSE

mempool.c (21672B)

      1 /*
      2  * Copyright (c) 2007-2012, The Tor Project, Inc.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *   * Redistributions of source code must retain the above copyright
      9  *     notice, this list of conditions and the following disclaimer.
     10  *
     11  *   * Redistributions in binary form must reproduce the above
     12  *     copyright notice, this list of conditions and the following disclaimer
     13  *     in the documentation and/or other materials provided with the
     14  *     distribution.
     15  *
     16  *   * Neither the names of the copyright owners nor the names of its
     17  *     contributors may be used to endorse or promote products derived from
     18  *     this software without specific prior written permission.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /* Imported from hybrid svn:
     34  * \file mempool.c
     35  * \brief A pooling allocator
     36  * \version $Id: mempool.c 1967 2013-05-08 14:33:22Z michael $
     37  */
     38 
     39 #include "unrealircd.h"
     40 
     41 #if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
     42 /* When running with AddressSanitizer, if using memory pools we will
     43  * likely NOT detect various kinds of misusage. (This is a known problem)
     44  * Therefore, if ASan is enabled, we don't use memory pools and will
     45  * use malloc/free instead, so ASan can do it's job much better.
     46  * Theoretically we could still use ASan + memory pools if we manually
     47  * designate and mark red zones. However, that still does not fix the
     48  * case of quick memory re-use. Mempool re-uses a freed area quickly
     49  * to be efficient, while ASan does the exact opposite (putting
     50  * freed items in a 256MB quarantine buffer), since quick re-use
     51  * will hide use-after-free bugs. -- Syzop
     52  */
     53 void mp_pool_init(void)
     54 {
     55 }
     56 
     57 mp_pool_t *mp_pool_new(size_t sz, size_t ignored)
     58 {
     59     mp_pool_t *m = safe_alloc(sizeof(mp_pool_t));
     60     /* We (mis)use the item_alloc_size. It has a slightly different
     61      * meaning in the real mempool code where it's aligned, rounded, etc.
     62      * That is something we don't want as it would hide small overflows.
     63      */
     64     m->item_alloc_size = sz;
     65     return m;
     66 }
     67 
     68 void *mp_pool_get(mp_pool_t *pool)
     69 {
     70     return malloc(pool->item_alloc_size);
     71 }
     72 
     73 void mp_pool_release(void *item)
     74 {
     75     safe_free(item);
     76 }
     77 #else
     78 
     79 /** Returns floor(log2(u64)).  If u64 is 0, (incorrectly) returns 0. */
     80 static int
     81 tor_log2(uint64_t u64)
     82 {
     83   int r = 0;
     84 
     85   if (u64 >= (1ULL << 32))
     86   {
     87     u64 >>= 32;
     88     r = 32;
     89   }
     90   if (u64 >= (1ULL << 16))
     91   {
     92     u64 >>= 16;
     93     r += 16;
     94   }
     95   if (u64 >= (1ULL <<  8))
     96   {
     97     u64 >>= 8;
     98     r += 8;
     99   }
    100   if (u64 >= (1ULL <<  4))
    101   {
    102     u64 >>= 4;
    103     r += 4;
    104   }
    105   if (u64 >= (1ULL <<  2))
    106   {
    107     u64 >>= 2;
    108     r += 2;
    109   }
    110   if (u64 >= (1ULL <<  1))
    111   {
    112     u64 >>= 1;
    113     r += 1;
    114   }
    115 
    116   return r;
    117 }
    118 
    119 /** Return the power of 2 in range [1,UINT64_MAX] closest to <b>u64</b>.  If
    120  * there are two powers of 2 equally close, round down. */
    121 static uint64_t
    122 round_to_power_of_2(uint64_t u64)
    123 {
    124   int lg2;
    125   uint64_t low;
    126   uint64_t high;
    127 
    128   if (u64 == 0)
    129     return 1;
    130 
    131   lg2 = tor_log2(u64);
    132   low = 1ULL << lg2;
    133 
    134   if (lg2 == 63)
    135     return low;
    136 
    137   high = 1ULL << (lg2 + 1);
    138   if (high - u64 < u64 - low)
    139     return high;
    140   else
    141     return low;
    142 }
    143 
    144 /* OVERVIEW:
    145  *
    146  *     This is an implementation of memory pools for Tor cells.  It may be
    147  *     useful for you too.
    148  *
    149  *     Generally, a memory pool is an allocation strategy optimized for large
    150  *     numbers of identically-sized objects.  Rather than the elaborate arena
    151  *     and coalescing strategies you need to get good performance for a
    152  *     general-purpose malloc(), pools use a series of large memory "chunks",
    153  *     each of which is carved into a bunch of smaller "items" or
    154  *     "allocations".
    155  *
    156  *     To get decent performance, you need to:
    157  *        - Minimize the number of times you hit the underlying allocator.
    158  *        - Try to keep accesses as local in memory as possible.
    159  *        - Try to keep the common case fast.
    160  *
    161  *     Our implementation uses three lists of chunks per pool.  Each chunk can
    162  *     be either "full" (no more room for items); "empty" (no items); or
    163  *     "used" (not full, not empty).  There are independent doubly-linked
    164  *     lists for each state.
    165  *
    166  * CREDIT:
    167  *
    168  *     I wrote this after looking at 3 or 4 other pooling allocators, but
    169  *     without copying.  The strategy this most resembles (which is funny,
    170  *     since that's the one I looked at longest ago) is the pool allocator
    171  *     underlying Python's obmalloc code.  Major differences from obmalloc's
    172  *     pools are:
    173  *       - We don't even try to be threadsafe.
    174  *       - We only handle objects of one size.
    175  *       - Our list of empty chunks is doubly-linked, not singly-linked.
    176  *         (This could change pretty easily; it's only doubly-linked for
    177  *         consistency.)
    178  *       - We keep a list of full chunks (so we can have a "nuke everything"
    179  *         function).  Obmalloc's pools leave full chunks to float unanchored.
    180  *
    181  * LIMITATIONS:
    182  *   - Not even slightly threadsafe.
    183  *   - Likes to have lots of items per chunks.
    184  *   - One pointer overhead per allocated thing.  (The alternative is
    185  *     something like glib's use of an RB-tree to keep track of what
    186  *     chunk any given piece of memory is in.)
    187  *   - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE
    188  *     if you need doubles.
    189  *   - Could probably be optimized a bit; the representation contains
    190  *     a bit more info than it really needs to have.
    191  */
    192 
    193 /* Tuning parameters */
    194 /** Largest type that we need to ensure returned memory items are aligned to.
    195  * Change this to "double" if we need to be safe for structs with doubles. */
    196 #define ALIGNMENT_TYPE void *
    197 /** Increment that we need to align allocated. */
    198 #define ALIGNMENT sizeof(ALIGNMENT_TYPE)
    199 /** Largest memory chunk that we should allocate. */
    200 #define MAX_CHUNK (8 *(1L << 20))
    201 /** Smallest memory chunk size that we should allocate. */
    202 #define MIN_CHUNK 4096
    203 
    204 typedef struct mp_allocated_t mp_allocated_t;
    205 typedef struct mp_chunk_t mp_chunk_t;
    206 
    207 /** Holds a single allocated item, allocated as part of a chunk. */
    208 struct mp_allocated_t {
    209   /** The chunk that this item is allocated in.  This adds overhead to each
    210    * allocated item, thus making this implementation inappropriate for
    211    * very small items. */
    212   mp_chunk_t *in_chunk;
    213 
    214   union {
    215     /** If this item is free, the next item on the free list. */
    216     mp_allocated_t *next_free;
    217 
    218     /** If this item is not free, the actual memory contents of this item.
    219      * (Not actual size.) */
    220     char mem[1];
    221 
    222     /** An extra element to the union to insure correct alignment. */
    223     ALIGNMENT_TYPE dummy_;
    224   } u;
    225 };
    226 
    227 /** 'Magic' value used to detect memory corruption. */
    228 #define MP_CHUNK_MAGIC 0x09870123
    229 
    230 /** A chunk of memory.  Chunks come from malloc; we use them  */
    231 struct mp_chunk_t {
    232   uint32_t magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */
    233   mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */
    234   mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */
    235   mp_pool_t *pool; /**< The pool that this chunk is part of. */
    236 
    237   /** First free item in the freelist for this chunk.  Note that this may be
    238    * NULL even if this chunk is not at capacity: if so, the free memory at
    239    * next_mem has not yet been carved into items.
    240    */
    241   mp_allocated_t *first_free;
    242   int n_allocated; /**< Number of currently allocated items in this chunk. */
    243   int capacity; /**< Number of items that can be fit into this chunk. */
    244   size_t mem_size; /**< Number of usable bytes in mem. */
    245   char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */
    246   char mem[]; /**< Storage for this chunk. */
    247 };
    248 
    249 static mp_pool_t *mp_allocated_pools = NULL;
    250 
    251 /** Number of extra bytes needed beyond mem_size to allocate a chunk. */
    252 #define CHUNK_OVERHEAD offsetof(mp_chunk_t, mem[0])
    253 
    254 /** Given a pointer to a mp_allocated_t, return a pointer to the memory
    255  * item it holds. */
    256 #define A2M(a) (&(a)->u.mem)
    257 /** Given a pointer to a memory_item_t, return a pointer to its enclosing
    258  * mp_allocated_t. */
    259 #define M2A(p) (((char *)p) - offsetof(mp_allocated_t, u.mem))
    260 
    261 void
    262 mp_pool_init(void)
    263 {
    264   EventAdd(NULL, "mp_pool_garbage_collect", &mp_pool_garbage_collect, NULL, 119*1000, 0);
    265 }
    266 
    267 /** Helper: Allocate and return a new memory chunk for <b>pool</b>.  Does not
    268  * link the chunk into any list. */
    269 static mp_chunk_t *
    270 mp_chunk_new(mp_pool_t *pool)
    271 {
    272   size_t sz = pool->new_chunk_capacity * pool->item_alloc_size;
    273   mp_chunk_t *chunk = safe_alloc(CHUNK_OVERHEAD + sz);
    274 
    275 #ifdef MEMPOOL_STATS
    276   ++pool->total_chunks_allocated;
    277 #endif
    278   chunk->magic = MP_CHUNK_MAGIC;
    279   chunk->capacity = pool->new_chunk_capacity;
    280   chunk->mem_size = sz;
    281   chunk->next_mem = chunk->mem;
    282   chunk->pool = pool;
    283   return chunk;
    284 }
    285 
    286 /** Take a <b>chunk</b> that has just been allocated or removed from
    287  * <b>pool</b>'s empty chunk list, and add it to the head of the used chunk
    288  * list. */
    289 static void
    290 add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk)
    291 {
    292   chunk->next = pool->used_chunks;
    293   if (chunk->next)
    294     chunk->next->prev = chunk;
    295   pool->used_chunks = chunk;
    296   assert(!chunk->prev);
    297 }
    298 
    299 /** Return a newly allocated item from <b>pool</b>. */
    300 void *
    301 mp_pool_get(mp_pool_t *pool)
    302 {
    303   mp_chunk_t *chunk;
    304   mp_allocated_t *allocated;
    305 
    306   if (pool->used_chunks != NULL) {
    307     /*
    308      * Common case: there is some chunk that is neither full nor empty. Use
    309      * that one. (We can't use the full ones, obviously, and we should fill
    310      * up the used ones before we start on any empty ones.
    311      */
    312     chunk = pool->used_chunks;
    313 
    314   } else if (pool->empty_chunks) {
    315     /*
    316      * We have no used chunks, but we have an empty chunk that we haven't
    317      * freed yet: use that. (We pull from the front of the list, which should
    318      * get us the most recently emptied chunk.)
    319      */
    320     chunk = pool->empty_chunks;
    321 
    322     /* Remove the chunk from the empty list. */
    323     pool->empty_chunks = chunk->next;
    324     if (chunk->next)
    325       chunk->next->prev = NULL;
    326 
    327     /* Put the chunk on the 'used' list*/
    328     add_newly_used_chunk_to_used_list(pool, chunk);
    329 
    330     assert(!chunk->prev);
    331     --pool->n_empty_chunks;
    332     if (pool->n_empty_chunks < pool->min_empty_chunks)
    333       pool->min_empty_chunks = pool->n_empty_chunks;
    334   } else {
    335     /* We have no used or empty chunks: allocate a new chunk. */
    336     chunk = mp_chunk_new(pool);
    337 
    338     /* Add the new chunk to the used list. */
    339     add_newly_used_chunk_to_used_list(pool, chunk);
    340   }
    341 
    342   assert(chunk->n_allocated < chunk->capacity);
    343 
    344   if (chunk->first_free) {
    345     /* If there's anything on the chunk's freelist, unlink it and use it. */
    346     allocated = chunk->first_free;
    347     chunk->first_free = allocated->u.next_free;
    348     allocated->u.next_free = NULL; /* For debugging; not really needed. */
    349     assert(allocated->in_chunk == chunk);
    350   } else {
    351     /* Otherwise, the chunk had better have some free space left on it. */
    352     assert(chunk->next_mem + pool->item_alloc_size <=
    353            chunk->mem + chunk->mem_size);
    354 
    355     /* Good, it did.  Let's carve off a bit of that free space, and use
    356      * that. */
    357     allocated = (void *)chunk->next_mem;
    358     chunk->next_mem += pool->item_alloc_size;
    359     allocated->in_chunk = chunk;
    360     allocated->u.next_free = NULL; /* For debugging; not really needed. */
    361   }
    362 
    363   ++chunk->n_allocated;
    364 #ifdef MEMPOOL_STATS
    365   ++pool->total_items_allocated;
    366 #endif
    367 
    368   if (chunk->n_allocated == chunk->capacity) {
    369     /* This chunk just became full. */
    370     assert(chunk == pool->used_chunks);
    371     assert(chunk->prev == NULL);
    372 
    373     /* Take it off the used list. */
    374     pool->used_chunks = chunk->next;
    375     if (chunk->next)
    376       chunk->next->prev = NULL;
    377 
    378     /* Put it on the full list. */
    379     chunk->next = pool->full_chunks;
    380     if (chunk->next)
    381       chunk->next->prev = chunk;
    382     pool->full_chunks = chunk;
    383   }
    384   /* And return the memory portion of the mp_allocated_t. */
    385   return A2M(allocated);
    386 }
    387 
    388 /** Return an allocated memory item to its memory pool. */
    389 void
    390 mp_pool_release(void *item)
    391 {
    392   mp_allocated_t *allocated = (void *)M2A(item);
    393   mp_chunk_t *chunk = allocated->in_chunk;
    394 
    395   assert(chunk);
    396   assert(chunk->magic == MP_CHUNK_MAGIC);
    397   assert(chunk->n_allocated > 0);
    398 
    399   allocated->u.next_free = chunk->first_free;
    400   chunk->first_free = allocated;
    401 
    402   if (chunk->n_allocated == chunk->capacity) {
    403     /* This chunk was full and is about to be used. */
    404     mp_pool_t *pool = chunk->pool;
    405     /* unlink from the full list  */
    406     if (chunk->prev)
    407       chunk->prev->next = chunk->next;
    408     if (chunk->next)
    409       chunk->next->prev = chunk->prev;
    410     if (chunk == pool->full_chunks)
    411       pool->full_chunks = chunk->next;
    412 
    413     /* link to the used list. */
    414     chunk->next = pool->used_chunks;
    415     chunk->prev = NULL;
    416     if (chunk->next)
    417       chunk->next->prev = chunk;
    418     pool->used_chunks = chunk;
    419   } else if (chunk->n_allocated == 1) {
    420     /* This was used and is about to be empty. */
    421     mp_pool_t *pool = chunk->pool;
    422 
    423     /* Unlink from the used list */
    424     if (chunk->prev)
    425       chunk->prev->next = chunk->next;
    426     if (chunk->next)
    427       chunk->next->prev = chunk->prev;
    428     if (chunk == pool->used_chunks)
    429       pool->used_chunks = chunk->next;
    430 
    431     /* Link to the empty list */
    432     chunk->next = pool->empty_chunks;
    433     chunk->prev = NULL;
    434     if (chunk->next)
    435       chunk->next->prev = chunk;
    436     pool->empty_chunks = chunk;
    437 
    438     /* Reset the guts of this chunk to defragment it, in case it gets
    439      * used again. */
    440     chunk->first_free = NULL;
    441     chunk->next_mem = chunk->mem;
    442 
    443     ++pool->n_empty_chunks;
    444   }
    445 
    446   --chunk->n_allocated;
    447 }
    448 
    449 /** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
    450  * try to fit about <b>chunk_capacity</b> bytes in each chunk. */
    451 mp_pool_t *
    452 mp_pool_new(size_t item_size, size_t chunk_capacity)
    453 {
    454   mp_pool_t *pool;
    455   size_t alloc_size, new_chunk_cap;
    456 
    457 /*  assert(item_size < SIZE_T_CEILING);
    458   assert(chunk_capacity < SIZE_T_CEILING);
    459   assert(SIZE_T_CEILING / item_size > chunk_capacity);
    460 */
    461   pool = safe_alloc(sizeof(mp_pool_t));
    462   /*
    463    * First, we figure out how much space to allow per item. We'll want to
    464    * use make sure we have enough for the overhead plus the item size.
    465    */
    466   alloc_size = (size_t)(offsetof(mp_allocated_t, u.mem) + item_size);
    467   /*
    468    * If the item_size is less than sizeof(next_free), we need to make
    469    * the allocation bigger.
    470    */
    471   if (alloc_size < sizeof(mp_allocated_t))
    472     alloc_size = sizeof(mp_allocated_t);
    473 
    474   /* If we're not an even multiple of ALIGNMENT, round up. */
    475   if (alloc_size % ALIGNMENT) {
    476     alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
    477   }
    478   if (alloc_size < ALIGNMENT)
    479     alloc_size = ALIGNMENT;
    480   assert((alloc_size % ALIGNMENT) == 0);
    481 
    482   /*
    483    * Now we figure out how many items fit in each chunk. We need to fit at
    484    * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
    485    * or less than MIN_CHUNK.
    486    */
    487   if (chunk_capacity > MAX_CHUNK)
    488     chunk_capacity = MAX_CHUNK;
    489 
    490   /*
    491    * Try to be around a power of 2 in size, since that's what allocators like
    492    * handing out. 512K-1 byte is a lot better than 512K+1 byte.
    493    */
    494   chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
    495   while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
    496     chunk_capacity *= 2;
    497   if (chunk_capacity < MIN_CHUNK)
    498     chunk_capacity = MIN_CHUNK;
    499 
    500   new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
    501   assert(new_chunk_cap < INT_MAX);
    502   pool->new_chunk_capacity = (int)new_chunk_cap;
    503 
    504   pool->item_alloc_size = alloc_size;
    505 
    506   pool->next = mp_allocated_pools;
    507   mp_allocated_pools = pool;
    508 
    509   return pool;
    510 }
    511 
    512 /** Helper function for qsort: used to sort pointers to mp_chunk_t into
    513  * descending order of fullness. */
    514 static int
    515 mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
    516 {
    517   mp_chunk_t *a = *(mp_chunk_t * const *)_a;
    518   mp_chunk_t *b = *(mp_chunk_t * const *)_b;
    519   return b->n_allocated - a->n_allocated;
    520 }
    521 
    522 /** Sort the used chunks in <b>pool</b> into descending order of fullness,
    523  * so that we preferentially fill up mostly full chunks before we make
    524  * nearly empty chunks less nearly empty. */
    525 static void
    526 mp_pool_sort_used_chunks(mp_pool_t *pool)
    527 {
    528   int i, n = 0, inverted = 0;
    529   mp_chunk_t **chunks, *chunk;
    530 
    531   for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    532     ++n;
    533     if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
    534       ++inverted;
    535   }
    536 
    537   if (!inverted)
    538     return;
    539 
    540   chunks = safe_alloc(sizeof(mp_chunk_t *) * n);
    541 
    542   for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
    543     chunks[i++] = chunk;
    544 
    545   qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
    546   pool->used_chunks = chunks[0];
    547   chunks[0]->prev = NULL;
    548 
    549   for (i = 1; i < n; ++i) {
    550     chunks[i - 1]->next = chunks[i];
    551     chunks[i]->prev = chunks[i - 1];
    552   }
    553 
    554   chunks[n - 1]->next = NULL;
    555   safe_free(chunks);
    556   mp_pool_assert_ok(pool);
    557 }
    558 
    559 /** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
    560  * excess ones that have been empty for the longest. If
    561  * <b>keep_recently_used</b> is true, do not free chunks unless they have been
    562  * empty since the last call to this function.
    563  **/
    564 void
    565 mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
    566 {
    567   mp_chunk_t *chunk, **first_to_free;
    568 
    569   mp_pool_sort_used_chunks(pool);
    570   assert(n_to_keep >= 0);
    571 
    572   if (keep_recently_used) {
    573     int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
    574     if (n_to_keep < n_recently_used)
    575       n_to_keep = n_recently_used;
    576   }
    577 
    578   assert(n_to_keep >= 0);
    579 
    580   first_to_free = &pool->empty_chunks;
    581   while (*first_to_free && n_to_keep > 0) {
    582     first_to_free = &(*first_to_free)->next;
    583     --n_to_keep;
    584   }
    585   if (!*first_to_free) {
    586     pool->min_empty_chunks = pool->n_empty_chunks;
    587     return;
    588   }
    589 
    590   chunk = *first_to_free;
    591   while (chunk) {
    592     mp_chunk_t *next = chunk->next;
    593     chunk->magic = 0xdeadbeef;
    594     safe_free(chunk);
    595 #ifdef MEMPOOL_STATS
    596     ++pool->total_chunks_freed;
    597 #endif
    598     --pool->n_empty_chunks;
    599     chunk = next;
    600   }
    601 
    602   pool->min_empty_chunks = pool->n_empty_chunks;
    603   *first_to_free = NULL;
    604 }
    605 
    606 /** Helper: Given a list of chunks, free all the chunks in the list. */
    607 static void destroy_chunks(mp_chunk_t *chunk)
    608 {
    609   mp_chunk_t *next;
    610 
    611   while (chunk) {
    612     chunk->magic = 0xd3adb33f;
    613     next = chunk->next;
    614     safe_free(chunk);
    615     chunk = next;
    616   }
    617 }
    618 
    619 /** Helper: make sure that a given chunk list is not corrupt. */
    620 static int
    621 assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
    622 {
    623   mp_allocated_t *allocated;
    624   int n = 0;
    625 
    626   if (chunk)
    627     assert(chunk->prev == NULL);
    628 
    629   while (chunk) {
    630     n++;
    631     assert(chunk->magic == MP_CHUNK_MAGIC);
    632     assert(chunk->pool == pool);
    633     for (allocated = chunk->first_free; allocated;
    634          allocated = allocated->u.next_free) {
    635       assert(allocated->in_chunk == chunk);
    636     }
    637     if (empty)
    638       assert(chunk->n_allocated == 0);
    639     else if (full)
    640       assert(chunk->n_allocated == chunk->capacity);
    641     else
    642       assert(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);
    643 
    644     assert(chunk->capacity == pool->new_chunk_capacity);
    645 
    646     assert(chunk->mem_size ==
    647            pool->new_chunk_capacity * pool->item_alloc_size);
    648 
    649     assert(chunk->next_mem >= chunk->mem &&
    650            chunk->next_mem <= chunk->mem + chunk->mem_size);
    651 
    652     if (chunk->next)
    653       assert(chunk->next->prev == chunk);
    654 
    655     chunk = chunk->next;
    656   }
    657 
    658   return n;
    659 }
    660 
    661 /** Fail with an assertion if <b>pool</b> is not internally consistent. */
    662 void
    663 mp_pool_assert_ok(mp_pool_t *pool)
    664 {
    665   int n_empty;
    666 
    667   n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
    668   assert_chunks_ok(pool, pool->full_chunks, 0, 1);
    669   assert_chunks_ok(pool, pool->used_chunks, 0, 0);
    670 
    671   assert(pool->n_empty_chunks == n_empty);
    672 }
    673 
    674 void
    675 mp_pool_garbage_collect(void *arg)
    676 {
    677   mp_pool_t *pool = mp_allocated_pools;
    678 
    679   for (; pool; pool = pool->next)
    680     mp_pool_clean(pool, 0, 1);
    681 }
    682 
    683 /** Dump information about <b>pool</b>'s memory usage to the Tor log at level
    684  * <b>severity</b>. */
    685 void
    686 mp_pool_log_status(mp_pool_t *pool)
    687 {
    688   unsigned long long bytes_used = 0;
    689   unsigned long long bytes_allocated = 0;
    690   unsigned long long bu = 0, ba = 0;
    691   mp_chunk_t *chunk;
    692   int n_full = 0, n_used = 0;
    693 
    694   assert(pool);
    695 
    696   for (chunk = pool->empty_chunks; chunk; chunk = chunk->next)
    697     bytes_allocated += chunk->mem_size;
    698 
    699   for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    700     ++n_used;
    701     bu += chunk->n_allocated * pool->item_alloc_size;
    702     ba += chunk->mem_size;
    703   }
    704 
    705   bytes_used += bu;
    706   bytes_allocated += ba;
    707   bu = ba = 0;
    708 
    709   for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
    710     ++n_full;
    711     bu += chunk->n_allocated * pool->item_alloc_size;
    712     ba += chunk->mem_size;
    713   }
    714 
    715   bytes_used += bu;
    716   bytes_allocated += ba;
    717 }
    718 #endif