📄 nta.4
字号:
+ ptr = c->ptr;++ ch = avl_ptr_to_chunk(ptr, osize);+ atomic_set(&ch->refcnt, 1);+ ch->canary = AVL_CANARY;+ ch->size = osize;++ avl_update_node(c, i, osize);+ break;+ }+ }+ local_irq_restore(flags);+#if 1+ if (!ptr && !try) {+ struct avl_node_entry *entry;+ + try = 1;++ entry = avl_node_entry_alloc(gfp_mask, get_order(size));+ if (entry) {+ local_irq_save(flags);+ avl_node_entry_commit(entry, smp_processor_id());+ goto repeat;+ }+ + }+#endif+ if (unlikely(!ptr && try))+ if (net_ratelimit())+ printk("%s: Failed to allocate %u bytes.\n", __func__, size);++++ return ptr;+}++/*+ * Remove free chunk from the list.+ */+static inline struct avl_container *avl_search_container(void *ptr, unsigned int idx, int cpu)+{+ struct avl_container *c = ptr;+ + list_del(&c->centry);+ c->ptr = ptr;++ return c;+}++/*+ * Combine neighbour free chunks into the one with bigger size+ * and put new chunk into list of free chunks with appropriate size.+ */+static void avl_combine(struct avl_node *node, void *lp, unsigned int lbits, void *rp, unsigned int rbits, + void *cur_ptr, unsigned int cur_bits, int cpu)+{+ struct avl_container *lc, *rc, *c;+ unsigned int idx;+ void *ptr;++ lc = rc = c = NULL;+ idx = cur_bits - 1;+ ptr = cur_ptr;++ c = (struct avl_container *)cur_ptr;+ c->ptr = cur_ptr;+ + if (rp) {+ rc = avl_search_container(rp, rbits-1, cpu);+ if (!rc) {+ printk(KERN_ERR "%p.%p: Failed to find a container for right pointer %p, rbits: %u.\n", + node, cur_ptr, rp, rbits);+ BUG();+ }++ c = rc;+ idx += rbits;+ ptr = c->ptr;+ }++ if (lp) {+ lc = avl_search_container(lp, lbits-1, cpu);+ if (!lc) {+ printk(KERN_ERR "%p.%p: Failed to find a container for left pointer %p, lbits: %u.\n", + node, cur_ptr, lp, lbits);+ BUG();+ }++ idx += lbits;+ ptr = c->ptr;+ }+ avl_container_insert(c, idx, cpu);+}++/*+ * Free memory region of given size.+ * Must be called on the same CPU where allocation happend+ * with disabled interrupts.+ */+static void __avl_free_local(void *ptr, unsigned int size)+{+ unsigned long val = avl_ptr_to_value(ptr);+ unsigned int pos, idx, sbits = AVL_ALIGN(size)/AVL_MIN_SIZE;+ unsigned int rbits, lbits, cpu = avl_get_cpu_ptr(val);+ struct avl_node *node;+ unsigned long p;+ void *lp, *rp;++ node = avl_get_node_ptr((unsigned long)ptr);++ pos = avl_ptr_to_offset(ptr);+ idx = pos/BITS_PER_LONG;++ p = node->mask[idx] >> (pos%BITS_PER_LONG);+ + if ((p & 1)) {+ if (net_ratelimit())+ printk(KERN_ERR "%p.%p: Broken pointer: value: %lx, pos: %u, idx: %u, mask: %lx, p: %lx.\n", + node, ptr, val, pos, idx, node->mask[idx], p);+ return;+ }++ avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), pos, sbits, 1);++ lp = rp = NULL;+ rbits = lbits = 0;++ idx = (pos+sbits)/BITS_PER_LONG;+ p = (pos+sbits)%BITS_PER_LONG;++ if ((node->mask[idx] >> p) & 1) {+ lbits = avl_count_set_up(node->mask, ARRAY_SIZE(node->mask), pos+sbits);+ if (lbits) {+ lp = (void *)(val + (pos + sbits)*AVL_MIN_SIZE);+ }+ }++ if (pos) {+ idx = (pos-1)/BITS_PER_LONG;+ p = (pos-1)%BITS_PER_LONG;+ if ((node->mask[idx] >> p) & 1) {+ rbits = avl_count_set_down(node->mask, pos-1);+ if (rbits) {+ rp = (void *)(val + (pos-rbits)*AVL_MIN_SIZE);+ }+ }+ }++ avl_combine(node, lp, lbits, rp, rbits, ptr, sbits, cpu);+}++/*+ * Free memory region of given size.+ * If freeing CPU is not the same as allocation one, chunk will + * be placed into list of to-be-freed objects on allocation CPU,+ * otherwise chunk will be freed and combined with neighbours.+ * Must be called with disabled interrupts.+ */+static void __avl_free(void *ptr, unsigned int size)+{+ int cpu = avl_get_cpu_ptr((unsigned long)ptr);++ if (cpu != smp_processor_id()) {+ struct avl_free_list *l, *this = ptr;+ struct avl_allocator_data *alloc = &avl_allocator[cpu];++ this->cpu = smp_processor_id();+ this->size = size;++ spin_lock(&alloc->avl_free_lock);+ l = alloc->avl_free_list_head;+ alloc->avl_free_list_head = this;+ this->next = l;+ spin_unlock(&alloc->avl_free_lock);+ return;+ }++ __avl_free_local(ptr, size);+}++/*+ * Free memory region of given size without sniffer data update.+ */+void avl_free_no_zc(void *ptr, unsigned int size)+{+ unsigned long flags;+ struct avl_free_list *l;+ struct avl_allocator_data *alloc;+ struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++ if (unlikely((ch->canary != AVL_CANARY) || ch->size != size)) {+ printk("Freeing destroyed object: ptr: %p, size: %u, canary: %x, must be %x, refcnt: %d, saved size: %u.\n",+ ptr, size, ch->canary, AVL_CANARY, atomic_read(&ch->refcnt), ch->size);+ return;+ }++ if (atomic_dec_and_test(&ch->refcnt)) {+ local_irq_save(flags);+ __avl_free(ptr, size);+ + alloc = &avl_allocator[smp_processor_id()];++ while (alloc->avl_free_list_head) {+ spin_lock(&alloc->avl_free_lock);+ l = alloc->avl_free_list_head;+ alloc->avl_free_list_head = l->next;+ spin_unlock(&alloc->avl_free_lock);+ __avl_free_local(l, l->size);+ }+ local_irq_restore(flags);+ }+}++/*+ * Free memory region of given size.+ */+void avl_free(void *ptr, unsigned int size)+{+ struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++ if (unlikely((ch->canary != AVL_CANARY) || ch->size != size)) {+ printk("Freeing destroyed object: ptr: %p, size: %u, canary: %x, must be %x, refcnt: %d, saved size: %u.\n",+ ptr, size, ch->canary, AVL_CANARY, atomic_read(&ch->refcnt), ch->size);+ return;+ }+ avl_update_zc(avl_get_node_ptr((unsigned long)ptr), ptr, size);+ avl_free_no_zc(ptr, size);+}++/*+ * Initialize per-cpu allocator data.+ */+static int avl_init_cpu(int cpu)+{+ unsigned int i;+ struct avl_allocator_data *alloc = &avl_allocator[cpu];+ struct avl_node_entry *entry;++ spin_lock_init(&alloc->avl_free_lock);+ spin_lock_init(&alloc->avl_node_lock);+ INIT_LIST_HEAD(&alloc->avl_node_list);++ alloc->avl_container_array = kzalloc(sizeof(struct list_head) * AVL_CONTAINER_ARRAY_SIZE, GFP_KERNEL);+ if (!alloc->avl_container_array)+ goto err_out_exit;++ for (i=0; i<AVL_CONTAINER_ARRAY_SIZE; ++i)+ INIT_LIST_HEAD(&alloc->avl_container_array[i]);++ entry = avl_node_entry_alloc(GFP_KERNEL, AVL_ORDER);+ if (!entry)+ goto err_out_free_container;++ avl_node_entry_commit(entry, cpu);++ return 0;++err_out_free_container:+ kfree(alloc->avl_container_array);+err_out_exit:+ return -ENOMEM;+}++/*+ * Initialize network allocator.+ */+int avl_init(void)+{+ int err, cpu;++ for_each_possible_cpu(cpu) {+ err = avl_init_cpu(cpu);+ if (err)+ goto err_out;+ }++ err = avl_init_zc();++ printk(KERN_INFO "Network tree allocator has been initialized.\n");+ return 0;++err_out:+ panic("Failed to initialize network allocator.\n");++ return -ENOMEM;+}diff --git a/net/core/alloc/avl.h b/net/core/alloc/avl.hnew file mode 100644index 0000000..044d6a2--- /dev/null+++ b/net/core/alloc/avl.h@@ -0,0 +1,223 @@+/*+ * avl.h+ * + * 2006 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>+ * All rights reserved.+ * + * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation; either version 2 of the License, or+ * (at your option) any later version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHAAVLBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA+ */++#ifndef __AVL_H+#define __AVL_H++/*+ * Zero-copy allocation control block.+ * @ptr - pointer to allocated data.+ * @off - offset inside given @avl_node_entry pages (absolute number of bytes)+ * @size - size of the appropriate object+ * @entry - number of @avl_node_entry which holds allocated object+ * @number - number of @order-order pages in given @avl_node_entry+ */++struct zc_data+{+ union {+ __u32 data[2];+ void *ptr;+ } data;+ __u32 off;+ __u32 size;++ __u32 entry;+ __u32 cpu;+};++#define ZC_MAX_ENTRY_NUM 1000++/*+ * Zero-copy allocation request.+ * @type - type of the message - ipv4/ipv6/...+ * @res_len - length of reserved area at the beginning.+ * @data - allocation control block.+ */+struct zc_alloc_ctl+{+ __u16 type;+ __u16 res_len;+ struct zc_data zc;+};++struct zc_entry_status+{+ __u16 node_order, node_num;+};++struct zc_status+{+ unsigned int entry_num;+ struct zc_entry_status entry[ZC_MAX_ENTRY_NUM];+};++#define ZC_ALLOC _IOWR('Z', 1, struct zc_alloc_ctl)+#define ZC_COMMIT _IOR('Z', 2, struct zc_alloc_ctl)+#define ZC_SET_CPU _IOR('Z', 3, int)+#define ZC_STATUS _IOWR('Z', 4, struct zc_status)++#define AVL_ORDER 2 /* Maximum allocation order */+#define AVL_BITS 7 /* Must cover maximum number of pages used for allocation pools */++#ifdef __KERNEL__+#include <linux/kernel.h>+#include <linux/types.h>+#include <linux/wait.h>+#include <linux/spinlock.h>+#include <asm/page.h>++//#define AVL_DEBUG++#ifdef AVL_DEBUG+#define ulog(f, a...) printk(f, ##a)+#else+#define ulog(f, a...)+#endif++/*+ * Network tree allocator variables.+ */++#define AVL_CANARY 0xc0d0e0f0++#define AVL_ALIGN_SIZE L1_CACHE_BYTES+#define AVL_ALIGN(x) ALIGN(x, AVL_ALIGN_SIZE)++#define AVL_NODES_ON_PAGE (PAGE_SIZE/sizeof(struct avl_node))+#define AVL_NODE_NUM (1UL<<AVL_BITS)+#define AVL_NODE_PAGES ((AVL_NODE_NUM+AVL_NODES_ON_PAGE-1)/AVL_NODES_ON_PAGE)++#define AVL_MIN_SIZE AVL_ALIGN_SIZE+#define AVL_MAX_SIZE ((1<<AVL_ORDER) << PAGE_SHIFT)++#define AVL_CONTAINER_ARRAY_SIZE (AVL_MAX_SIZE/AVL_MIN_SIZE)++struct avl_node_entry;++/*+ * Meta-information container for each contiguous block used in allocation.+ * @value - start address of the contiguous block.+ * @mask - bitmask of free and empty chunks [1 - free, 0 - used].+ * @entry - pointer to parent node entry.+ */+struct avl_node+{+ unsigned long value;+ DECLARE_BITMAP(mask, AVL_MAX_SIZE/AVL_MIN_SIZE);+ struct avl_node_entry *entry;+};++/*+ * Free chunks are dereferenced into this structure and placed into LIFO list.+ */++struct avl_container+{+ void *ptr;+ struct list_head centry;+};++/*+ * When freeing happens on different than allocation CPU,+ * chunk is dereferenced into this structure and placed into+ * single-linked list in allocation CPU private area.+ */++struct avl_free_list+{+ struct avl_free_list *next;+ unsigned int size;+ unsigned int cpu;+};++/*+ * This structure is placed after each allocated chunk and contains+ * @canary - used to check memory overflow and reference counter for+ * given memory region, which is used for example for zero-copy access.+ * @size - used to check that freeing size is exactly the size of the object.+ */++struct avl_chunk+{+ unsigned int canary, size;+ atomic_t refcnt;+};++/*+ * Each array of nodes is places into dynamically grown list.+ * @avl_node_array - array of nodes (linked into pages)+ * @node_entry - entry in avl_allocator_data.avl_node_list.+ * @avl_node_order - allocation order for each node in @avl_node_array+ * @avl_node_num - number of nodes in @avl_node_array+ * @avl_entry_num - number of this entry inside allocator+ */++struct avl_node_entry+{+ struct avl_node **avl_node_array;+ struct list_head node_entry;+ u32 avl_entry_num;+ u16 avl_node_order, avl_node_num;+};++/*+ * Main per-cpu allocator structure.+ * @avl_container_array - array of lists of free chunks indexed by size of the elements+ * @avl_free_list_head - single-linked list of objects, which were started to be freed on different CPU+ * @avl_free_list_map_head - single-linked list of objects, which map update was started on different CPU+ * @avl_free_lock - lock protecting avl_free_list_head+ * @avl_node_list - list of avl_node_entry'es+ * @avl_node_lock - lock used to protect avl_node_list from access from zero-copy devices.+ * @entry_num - number of entries inside allocator.+ */+struct avl_allocator_data+{+ struct list_head *avl_container_array;+ struct avl_free_list *avl_free_list_head;+ struct avl_free_list *avl_free_map_list_head;+ spinlock_t avl_free_lock;+ struct list_head avl_node_list;+ spinlock_t avl_node_lock;+ u32 avl_entry_num;+};++void *avl_alloc(unsigned int size, gfp_t gfp_mask);+void avl_free(void *ptr, unsigned int size);+void avl_free_no_zc(void *ptr, unsigned int size);++int avl_init_zc(void);+int avl_init(void);+void avl_fill_zc(struct zc_data *zc, void *ptr, unsigned int size);++struct zc_control+{+ struct zc_data *zcb;+ unsigned int zc_num, zc_used, zc_pos;+ spinlock_t zc_lock;+ wait_queue_head_t zc_wait;+};++extern struct zc_control zc_sniffer;+extern struct avl_allocator_data avl_allocator[NR_CPUS];++#endif /* __KERNEL__ */+#endif /* __AVL_H */diff --git a/net/core/alloc/zc.c b/net/core/alloc/zc.cnew file mode 100644index 0000000..fcb386a--- /dev/null+++ b/net/core/alloc/zc.c@@ -0,0 +1,483 @@+/*+ * zc.c+ * + * 2006 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>+ * All rights reserved.+ * + * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation; either version 2 of the License, or+ * (at your option) any later version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA+ */++#include <linux/kernel.h>+#include <linux/module.h>+#include <linux/types.h>
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -