📄 slab.c
字号:
nc = kmalloc_node(memsize, GFP_KERNEL, node); if (nc) { nc->avail = 0; nc->limit = entries; nc->batchcount = batchcount; nc->touched = 0; spin_lock_init(&nc->lock); } return nc;}/* * Transfer objects in one arraycache to another. * Locking must be handled by the caller. * * Return the number of entries transferred. */static int transfer_objects(struct array_cache *to, struct array_cache *from, unsigned int max){ /* Figure out how many entries to transfer */ int nr = min(min(from->avail, max), to->limit - to->avail); if (!nr) return 0; memcpy(to->entry + to->avail, from->entry + from->avail -nr, sizeof(void *) *nr); from->avail -= nr; to->avail += nr; to->touched = 1; return nr;}#ifndef CONFIG_NUMA#define drain_alien_cache(cachep, alien) do { } while (0)#define reap_alien(cachep, l3) do { } while (0)static inline struct array_cache **alloc_alien_cache(int node, int limit){ return (struct array_cache **)BAD_ALIEN_MAGIC;}static inline void free_alien_cache(struct array_cache **ac_ptr){}static inline int cache_free_alien(struct kmem_cache *cachep, void *objp){ return 0;}static inline void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags){ return NULL;}static inline void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid){ return NULL;}#else /* CONFIG_NUMA */static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);static void *alternate_node_alloc(struct kmem_cache *, gfp_t);static struct array_cache **alloc_alien_cache(int node, int limit){ struct array_cache **ac_ptr; int memsize = sizeof(void *) * nr_node_ids; int i; if (limit > 1) limit = 12; ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); if (ac_ptr) { for_each_node(i) { if (i == node || !node_online(i)) { ac_ptr[i] = NULL; continue; } ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); if (!ac_ptr[i]) { for (i--; i >= 0; i--) kfree(ac_ptr[i]); kfree(ac_ptr); return NULL; } } } return ac_ptr;}static void free_alien_cache(struct array_cache **ac_ptr){ int i; if (!ac_ptr) return; for_each_node(i) kfree(ac_ptr[i]); kfree(ac_ptr);}static void __drain_alien_cache(struct kmem_cache *cachep, struct array_cache *ac, int node){ struct kmem_list3 *rl3 = cachep->nodelists[node]; if (ac->avail) { spin_lock(&rl3->list_lock); /* * Stuff objects into the remote nodes shared array first. * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ if (rl3->shared) transfer_objects(rl3->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; spin_unlock(&rl3->list_lock); }}/* * Called from cache_reap() to regularly drain alien caches round robin. */static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3){ int node = __get_cpu_var(reap_node); if (l3->alien) { struct array_cache *ac = l3->alien[node]; if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { __drain_alien_cache(cachep, ac, node); spin_unlock_irq(&ac->lock); } }}static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien){ int i = 0; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { ac = alien[i]; if (ac) { spin_lock_irqsave(&ac->lock, flags); __drain_alien_cache(cachep, ac, i); spin_unlock_irqrestore(&ac->lock, flags); } }}static inline int cache_free_alien(struct kmem_cache *cachep, void *objp){ struct slab *slabp = virt_to_slab(objp); int nodeid = slabp->nodeid; struct kmem_list3 *l3; struct array_cache *alien = NULL; int node; node = numa_node_id(); /* * Make sure we are not freeing a object from another node to the array * cache on this cpu. */ if (likely(slabp->nodeid == node)) return 0; l3 = cachep->nodelists[node]; STATS_INC_NODEFREES(cachep); if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { STATS_INC_ACOVERFLOW(cachep); __drain_alien_cache(cachep, alien, nodeid); } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { spin_lock(&(cachep->nodelists[nodeid])->list_lock); free_block(cachep, &objp, 1, nodeid); spin_unlock(&(cachep->nodelists[nodeid])->list_lock); } return 1;}#endifstatic void __cpuinit cpuup_canceled(long cpu){ struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); node_to_cpumask_ptr(mask, node); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; /* cpu is dead; no one can alloc from it. */ nc = cachep->array[cpu]; cachep->array[cpu] = NULL; l3 = cachep->nodelists[node]; if (!l3) goto free_array_cache; spin_lock_irq(&l3->list_lock); /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; if (nc) free_block(cachep, nc->entry, nc->avail, node); if (!cpus_empty(*mask)) { spin_unlock_irq(&l3->list_lock); goto free_array_cache; } shared = l3->shared; if (shared) { free_block(cachep, shared->entry, shared->avail, node); l3->shared = NULL; } alien = l3->alien; l3->alien = NULL; spin_unlock_irq(&l3->list_lock); kfree(shared); if (alien) { drain_alien_cache(cachep, alien); free_alien_cache(alien); }free_array_cache: kfree(nc); } /* * In the previous loop, all the objects were freed to * the respective cache's slabs, now we can go ahead and * shrink each nodelist to its limit. */ list_for_each_entry(cachep, &cache_chain, next) { l3 = cachep->nodelists[node]; if (!l3) continue; drain_freelist(cachep, l3, l3->free_objects); }}static int __cpuinit cpuup_prepare(long cpu){ struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); const int memsize = sizeof(struct kmem_list3); /* * We need to do this right in the beginning since * alloc_arraycache's are going to use this list. * kmalloc_node allows us to add the slab to the right * kmem_list3 and not this cpu's kmem_list3 */ list_for_each_entry(cachep, &cache_chain, next) { /* * Set up the size64 kmemlist for cpu before we can * begin anything. Make sure some other cpu on this * node has not already allocated this */ if (!cachep->nodelists[node]) { l3 = kmalloc_node(memsize, GFP_KERNEL, node); if (!l3) goto bad; kmem_list3_init(l3); l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; /* * The l3s don't come and go as CPUs come and * go. cache_chain_mutex is sufficient * protection here. */ cachep->nodelists[node] = l3; } spin_lock_irq(&cachep->nodelists[node]->list_lock); cachep->nodelists[node]->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; spin_unlock_irq(&cachep->nodelists[node]->list_lock); } /* * Now we can go ahead with allocating the shared arrays and * array caches */ list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared = NULL; struct array_cache **alien = NULL; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); if (!nc) goto bad; if (cachep->shared) { shared = alloc_arraycache(node, cachep->shared * cachep->batchcount, 0xbaadf00d); if (!shared) { kfree(nc); goto bad; } } if (use_alien_caches) { alien = alloc_alien_cache(node, cachep->limit); if (!alien) { kfree(shared); kfree(nc); goto bad; } } cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); spin_lock_irq(&l3->list_lock); if (!l3->shared) { /* * We are serialised from CPU_DEAD or * CPU_UP_CANCELLED by the cpucontrol lock */ l3->shared = shared; shared = NULL; }#ifdef CONFIG_NUMA if (!l3->alien) { l3->alien = alien; alien = NULL; }#endif spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(alien); } return 0;bad: cpuup_canceled(cpu); return -ENOMEM;}static int __cpuinit cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu){ long cpu = (long)hcpu; int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&cache_chain_mutex); err = cpuup_prepare(cpu); mutex_unlock(&cache_chain_mutex); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: start_cpu_timer(cpu); break;#ifdef CONFIG_HOTPLUG_CPU case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: /* * Shutdown cache reaper. Note that the cache_chain_mutex is * held so that if cache_reap() is invoked it cannot do * anything expensive but will only modify reap_work * and reschedule the timer. */ cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); /* Now the cache_reaper is guaranteed to be not running. */ per_cpu(reap_work, cpu).work.func = NULL; break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: start_cpu_timer(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: /* * Even if all the cpus of a node are down, we don't free the * kmem_list3 of any cache. This to avoid a race between * cpu_down, and a kmalloc allocation from another cpu for * memory from the node of the cpu going down. The list3 * structure is usually allocated from kmem_cache_create() and * gets destroyed at kmem_cache_destroy(). */ /* fall through */#endif case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: mutex_lock(&cache_chain_mutex); cpuup_canceled(cpu); mutex_unlock(&cache_chain_mutex); break; } return err ? NOTIFY_BAD : NOTIFY_OK;}static struct notifier_block __cpuinitdata cpucache_notifier = { &cpuup_callback, NULL, 0};/* * swap the static kmem_list3 with kmalloced memory */static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid){ struct kmem_list3 *ptr; ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); BUG_ON(!ptr); local_irq_disable(); memcpy(ptr, list, sizeof(struct kmem_list3)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->list_lock); MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep->nodelists[nodeid] = ptr; local_irq_enable();}/* * For setting up all the kmem_list3s for cache whose buffer_size is same as * size of kmem_list3. */static void __init set_up_list3s(struct kmem_cache *cachep, int index){ int node; for_each_online_node(node) { cachep->nodelists[node] = &initkmem_list3[index + node]; cachep->nodelists[node]->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; }}/* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). */void __init kmem_cache_init(void){ size_t left_over; struct cache_sizes *sizes; struct cache_names *names; int i; int order;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -