⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mempool.c

📁 关于tor匿名通信的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
    /* This was used and is about to be empty. */
    mp_pool_t *pool = chunk->pool;

    /* Unlink from the used list */
    if (chunk->prev)
      chunk->prev->next = chunk->next;
    if (chunk->next)
      chunk->next->prev = chunk->prev;
    if (chunk == pool->used_chunks)
      pool->used_chunks = chunk->next;

    /* Link to the empty list */
    chunk->next = pool->empty_chunks;
    chunk->prev = NULL;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->empty_chunks = chunk;

    /* Reset the guts of this chunk to defragment it, in case it gets
     * used again. */
    chunk->first_free = NULL;
    chunk->next_mem = chunk->mem;

    ++pool->n_empty_chunks;
  }
  --chunk->n_allocated;
}

/** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
 * try to fit about <b>chunk_capacity</b> bytes in each chunk. */
mp_pool_t *
mp_pool_new(size_t item_size, size_t chunk_capacity)
{
  mp_pool_t *pool;
  size_t alloc_size, new_chunk_cap;

  pool = ALLOC(sizeof(mp_pool_t));
  CHECK_ALLOC(pool);
  memset(pool, 0, sizeof(mp_pool_t));

  /* First, we figure out how much space to allow per item.  We'll want to
   * use make sure we have enough for the overhead plus the item size. */
  alloc_size = (size_t)(STRUCT_OFFSET(mp_allocated_t, u.mem) + item_size);
  /* If the item_size is less than sizeof(next_free), we need to make
   * the allocation bigger. */
  if (alloc_size < sizeof(mp_allocated_t))
    alloc_size = sizeof(mp_allocated_t);

  /* If we're not an even multiple of ALIGNMENT, round up. */
  if (alloc_size % ALIGNMENT) {
    alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
  }
  if (alloc_size < ALIGNMENT)
    alloc_size = ALIGNMENT;
  ASSERT((alloc_size % ALIGNMENT) == 0);

  /* Now we figure out how many items fit in each chunk.  We need to fit at
   * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
   * or less than MIN_CHUNK. */
  if (chunk_capacity > MAX_CHUNK)
    chunk_capacity = MAX_CHUNK;
  /* Try to be around a power of 2 in size, since that's what allocators like
   * handing out. 512K-1 byte is a lot better than 512K+1 byte. */
  chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
  while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
    chunk_capacity *= 2;
  if (chunk_capacity < MIN_CHUNK)
    chunk_capacity = MIN_CHUNK;

  new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
  tor_assert(new_chunk_cap < INT_MAX);
  pool->new_chunk_capacity = (int)new_chunk_cap;

  pool->item_alloc_size = alloc_size;

  log_debug(LD_MM, "Capacity is %lu, item size is %lu, alloc size is %lu",
            (unsigned long)pool->new_chunk_capacity,
            (unsigned long)pool->item_alloc_size,
            (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));

  return pool;
}

#ifdef LAZY_CHUNK_SORT
/** Helper function for qsort: used to sort pointers to mp_chunk_t into
 * descending order of fullness. */
static int
mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
{
  mp_chunk_t *a = *(mp_chunk_t**)_a;
  mp_chunk_t *b = *(mp_chunk_t**)_b;
  return b->n_allocated - a->n_allocated;
}

/** Sort the used chunks in <b>pool</b> into descending order of fullness,
 * so that we preferentially fill up mostly full chunks before we make
 * nearly empty chunks less nearly empty. */
static void
mp_pool_sort_used_chunks(mp_pool_t *pool)
{
  int i, n=0, inverted=0;
  mp_chunk_t **chunks, *chunk;
  for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    ++n;
    if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
      ++inverted;
  }
  if (!inverted)
    return;
  //printf("Sort %d/%d\n",inverted,n);
  chunks = ALLOC(sizeof(mp_chunk_t *)*n);
#ifdef ALLOC_CAN_RETURN_NULL
  if (PREDICT_UNLIKELY(!chunks)) return;
#endif
  for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
    chunks[i++] = chunk;
  qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
  pool->used_chunks = chunks[0];
  chunks[0]->prev = NULL;
  for (i=1;i<n;++i) {
    chunks[i-1]->next = chunks[i];
    chunks[i]->prev = chunks[i-1];
  }
  chunks[n-1]->next = NULL;
  FREE(chunks);
#if 0
  inverted = 0;
  for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    if (chunk->next) {
      ASSERT(chunk->next->n_allocated <= chunk->n_allocated);
    }
  }
#endif
  mp_pool_assert_ok(pool);
}
#endif

/** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
 * excess ones that have been empty for the longest. If
 * <b>keep_recently_used</b> is true, do not free chunks unless they have been
 * empty since the last call to this function.
 **/
void
mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
{
  mp_chunk_t *chunk, **first_to_free;

#ifdef LAZY_CHUNK_SORT
  mp_pool_sort_used_chunks(pool);
#endif
  ASSERT(n_to_keep >= 0);

  if (keep_recently_used) {
    int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
    if (n_to_keep < n_recently_used)
      n_to_keep = n_recently_used;
  }

  ASSERT(n_to_keep >= 0);

  first_to_free = &pool->empty_chunks;
  while (*first_to_free && n_to_keep > 0) {
    first_to_free = &(*first_to_free)->next;
    --n_to_keep;
  }
  if (!*first_to_free) {
    pool->min_empty_chunks = pool->n_empty_chunks;
    return;
  }

  chunk = *first_to_free;
  while (chunk) {
    mp_chunk_t *next = chunk->next;
    chunk->magic = 0xdeadbeef;
    FREE(chunk);
#ifdef MEMPOOL_STATS
    ++pool->total_chunks_freed;
#endif
    --pool->n_empty_chunks;
    chunk = next;
  }

  pool->min_empty_chunks = pool->n_empty_chunks;
  *first_to_free = NULL;
}

/** Helper: Given a list of chunks, free all the chunks in the list. */
static void
destroy_chunks(mp_chunk_t *chunk)
{
  mp_chunk_t *next;
  while (chunk) {
    chunk->magic = 0xd3adb33f;
    next = chunk->next;
    FREE(chunk);
    chunk = next;
  }
}

/** Free all space held in <b>pool</b>  This makes all pointers returned from
 * mp_pool_get(<b>pool</b>) invalid. */
void
mp_pool_destroy(mp_pool_t *pool)
{
  destroy_chunks(pool->empty_chunks);
  destroy_chunks(pool->used_chunks);
  destroy_chunks(pool->full_chunks);
  memset(pool, 0xe0, sizeof(mp_pool_t));
  FREE(pool);
}

/** Helper: make sure that a given chunk list is not corrupt. */
static int
assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
{
  mp_allocated_t *allocated;
  int n = 0;
  if (chunk)
    ASSERT(chunk->prev == NULL);

  while (chunk) {
    n++;
    ASSERT(chunk->magic == MP_CHUNK_MAGIC);
    ASSERT(chunk->pool == pool);
    for (allocated = chunk->first_free; allocated;
         allocated = allocated->u.next_free) {
      ASSERT(allocated->in_chunk == chunk);
    }
    if (empty)
      ASSERT(chunk->n_allocated == 0);
    else if (full)
      ASSERT(chunk->n_allocated == chunk->capacity);
    else
      ASSERT(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);

    ASSERT(chunk->capacity == pool->new_chunk_capacity);

    ASSERT(chunk->mem_size ==
           pool->new_chunk_capacity * pool->item_alloc_size);

    ASSERT(chunk->next_mem >= chunk->mem &&
           chunk->next_mem <= chunk->mem + chunk->mem_size);

    if (chunk->next)
      ASSERT(chunk->next->prev == chunk);

    chunk = chunk->next;
  }
  return n;
}

/** Fail with an assertion if <b>pool</b> is not internally consistent. */
void
mp_pool_assert_ok(mp_pool_t *pool)
{
  int n_empty;

  n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
  assert_chunks_ok(pool, pool->full_chunks, 0, 1);
  assert_chunks_ok(pool, pool->used_chunks, 0, 0);

  ASSERT(pool->n_empty_chunks == n_empty);
}

#ifdef TOR
/** Dump information about <b>pool</b>'s memory usage to the Tor log at level
 * <b>severity</b>. */
/*FFFF uses Tor logging functions. */
void
mp_pool_log_status(mp_pool_t *pool, int severity)
{
  uint64_t bytes_used = 0;
  uint64_t bytes_allocated = 0;
  uint64_t bu = 0, ba = 0;
  mp_chunk_t *chunk;
  int n_full = 0, n_used = 0;

  ASSERT(pool);

  for (chunk = pool->empty_chunks; chunk; chunk = chunk->next) {
    bytes_allocated += chunk->mem_size;
  }
  log_fn(severity, LD_MM, U64_FORMAT" bytes in %d empty chunks",
         U64_PRINTF_ARG(bytes_allocated), pool->n_empty_chunks);
  for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    ++n_used;
    bu += chunk->n_allocated * pool->item_alloc_size;
    ba += chunk->mem_size;
    log_fn(severity, LD_MM, "   used chunk: %d items allocated",
           chunk->n_allocated);
  }
  log_fn(severity, LD_MM, U64_FORMAT"/"U64_FORMAT
         " bytes in %d partially full chunks",
         U64_PRINTF_ARG(bu), U64_PRINTF_ARG(ba), n_used);
  bytes_used += bu;
  bytes_allocated += ba;
  bu = ba = 0;
  for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
    ++n_full;
    bu += chunk->n_allocated * pool->item_alloc_size;
    ba += chunk->mem_size;
  }
  log_fn(severity, LD_MM, U64_FORMAT"/"U64_FORMAT
         " bytes in %d full chunks",
         U64_PRINTF_ARG(bu), U64_PRINTF_ARG(ba), n_full);
  bytes_used += bu;
  bytes_allocated += ba;

  log_fn(severity, LD_MM, "Total: "U64_FORMAT"/"U64_FORMAT" bytes allocated "
         "for cell pools are full.",
         U64_PRINTF_ARG(bytes_used), U64_PRINTF_ARG(bytes_allocated));

#ifdef MEMPOOL_STATS
  log_fn(severity, LD_MM, U64_FORMAT" cell allocations ever; "
         U64_FORMAT" chunk allocations ever; "
         U64_FORMAT" chunk frees ever.",
         U64_PRINTF_ARG(pool->total_items_allocated),
         U64_PRINTF_ARG(pool->total_chunks_allocated),
         U64_PRINTF_ARG(pool->total_chunks_freed));
#endif
}
#endif


⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -