📄 nta.1
字号:
+ }++ return bits;+}++static void avl_fill_bits(struct avl_node *node, unsigned long *mask, unsigned int mask_size, + unsigned int pos, unsigned int num, unsigned int bit)+{+ unsigned int idx, start;++ idx = pos/BITS_PER_LONG;+ start = pos%BITS_PER_LONG;++ while (num && idx < mask_size) {+ unsigned long m = ((~0UL)>>start)<<start;++ if (start + num <= BITS_PER_LONG) {+ unsigned long upper_bits = BITS_PER_LONG - (start+num);++ m = (m<<upper_bits)>>upper_bits;+ }++ if (bit)+ mask[idx] |= m;+ else+ mask[idx] &= ~m;++ if (start + num <= BITS_PER_LONG)+ num = 0;+ else {+ num -= BITS_PER_LONG - start;+ start = 0;+ idx++;+ }+ }+}++static inline void avl_container_insert(struct avl_container *c, unsigned int pos, int cpu)+{+ list_add_tail(&c->centry, &avl_container_array[cpu][pos]);+}++static void avl_update_node(struct avl_container *c, unsigned int cpos, unsigned int size)+{+ struct avl_node *node = avl_get_node_ptr((unsigned long)c->ptr);+ unsigned int num = AVL_ALIGN(size)/AVL_MIN_SIZE;++ BUG_ON(cpos < num - 1);++ avl_fill_bits(node, node->mask, ARRAY_SIZE(node->mask), avl_ptr_to_offset(c->ptr), num, 0);++ if (cpos != num-1) {+ void *ptr = c->ptr + size;+ c = ptr;+ c->ptr = ptr;++ cpos -= num;++ avl_container_insert(c, cpos, smp_processor_id());+ }+}++static int avl_container_add(void *ptr, unsigned int size, gfp_t gfp_mask, int cpu)+{+ struct avl_container *c = ptr;+ unsigned int pos = AVL_ALIGN(size)/AVL_MIN_SIZE-1;++ if (!size)+ return -EINVAL;++ c->ptr = ptr;+ avl_container_insert(c, pos, cpu);++ return 0;+}++static inline struct avl_container *avl_dequeue(struct list_head *head)+{+ struct avl_container *cnt;++ cnt = list_entry(head->next, struct avl_container, centry);+ list_del(&cnt->centry);++ return cnt;+}++void *avl_alloc(unsigned int size, gfp_t gfp_mask)+{+ unsigned int i;+ void *ptr = NULL;+ unsigned long flags;++ size = AVL_ALIGN(size);++ if (size > AVL_MAX_SIZE || size < AVL_MIN_SIZE)+ return NULL;++ local_irq_save(flags);+ for (i=size/AVL_MIN_SIZE-1; i<AVL_CONTAINER_ARRAY_SIZE; ++i) {+ struct list_head *head = &avl_container_array[smp_processor_id()][i];+ struct avl_container *c;++ if (!list_empty(head)) {+ c = avl_dequeue(head);+ ptr = c->ptr;+ avl_update_node(c, i, size);+ break;+ }+ }+ local_irq_restore(flags);++ if (!ptr) {+ printk("%s: Failed to allocate %u bytes with %02x mode.\n", __func__, size, gfp_mask);+ WARN_ON(1);+ }++ return ptr;+}++static inline struct avl_container *avl_search_container(void *ptr, unsigned int idx, int cpu)+{+ struct avl_container *c = ptr;+ + list_del(&c->centry);+ c->ptr = ptr;++ return c;+}++static void avl_combine(struct avl_node *node, void *lp, unsigned int lbits, void *rp, unsigned int rbits, + void *cur_ptr, unsigned int cur_bits, gfp_t gfp_mask, int cpu)+{+ struct avl_container *lc, *rc, *c;+ unsigned int idx;+ void *ptr;++ lc = rc = c = NULL;+ idx = cur_bits - 1;+ ptr = cur_ptr;++ c = (struct avl_container *)cur_ptr;+ c->ptr = cur_ptr;+ + if (rp) {+ rc = avl_search_container(rp, rbits-1, cpu);+ if (!rc) {+ printk(KERN_ERR "%p.%p: Failed to find a container for right pointer %p, rbits: %u.\n", + node, cur_ptr, rp, rbits);+ BUG();+ }++ c = rc;+ idx += rbits;+ ptr = c->ptr;+ }++ if (lp) {+ lc = avl_search_container(lp, lbits-1, cpu);+ if (!lc) {+ printk(KERN_ERR "%p.%p: Failed to find a container for left pointer %p, lbits: %u.\n", + node, cur_ptr, lp, lbits);+ BUG();+ }++ if (!c) {+ c = lc;+ c->ptr = cur_ptr;+ }+ idx += lbits;+ ptr = c->ptr;+ }+ if (!c) {+ if (avl_container_add(cur_ptr, cur_bits*AVL_MIN_SIZE, gfp_mask, cpu))+ return;+ } else {+ avl_container_insert(c, idx, cpu);+ }+}++static void __avl_free(void *ptr, unsigned int size, gfp_t gfp_mask)+{+ value_t val = avl_ptr_to_value(ptr);+ unsigned int pos, idx, sbits = AVL_ALIGN(size)/AVL_MIN_SIZE;+ unsigned int rbits, lbits, cpu = avl_get_cpu_ptr(val);+ struct avl_node *node;+ unsigned long p;+ void *lp, *rp;++ if (cpu != smp_processor_id()) {+ struct avl_free_list *l, *this = ptr;++ this->cpu = smp_processor_id();+ this->size = size;++ spin_lock(&avl_free_lock[cpu]);+ l = avl_free_list_head[cpu];+ avl_free_list_head[cpu] = this;+ this->next = l;+ spin_unlock(&avl_free_lock[cpu]);+ return;+ }++ node = avl_get_node_ptr((unsigned long)ptr);++ pos = avl_ptr_to_offset(ptr);+ idx = pos/BITS_PER_LONG;++ p = node->mask[idx] >> (pos%BITS_PER_LONG);+ + if ((p & 1)) {+ printk(KERN_ERR "%p.%p: Broken pointer: value: %lx, pos: %u, idx: %u, mask: %lx, p: %lx.\n", + node, ptr, val, pos, idx, node->mask[idx], p);+ BUG();+ }++ avl_fill_bits(node, node->mask, ARRAY_SIZE(node->mask), pos, sbits, 1);++ lp = rp = NULL;+ rbits = lbits = 0;++ idx = (pos+sbits)/BITS_PER_LONG;+ p = (pos+sbits)%BITS_PER_LONG;++ if ((node->mask[idx] >> p) & 1) {+ lbits = avl_count_set_up(node->mask, ARRAY_SIZE(node->mask), pos+sbits);+ if (lbits) {+ lp = (void *)(val + (pos + sbits)*AVL_MIN_SIZE);+ }+ }++ if (pos) {+ idx = (pos-1)/BITS_PER_LONG;+ p = (pos-1)%BITS_PER_LONG;+ if ((node->mask[idx] >> p) & 1) {+ rbits = avl_count_set_down(node->mask, pos-1);+ if (rbits) {+ rp = (void *)(val + (pos-rbits)*AVL_MIN_SIZE);+ }+ }+ }++ avl_combine(node, lp, lbits, rp, rbits, ptr, sbits, gfp_mask, cpu);+}++void avl_free(void *ptr, unsigned int size)+{+ unsigned long flags;+ struct avl_free_list *l;+ int cpu;++ local_irq_save(flags);+ __avl_free(ptr, size, GFP_ATOMIC);+ + cpu = smp_processor_id();++ while (avl_free_list_head[cpu]) {+ spin_lock(&avl_free_lock[cpu]);+ l = avl_free_list_head[cpu];+ avl_free_list_head[cpu] = l->next;+ spin_unlock(&avl_free_lock[cpu]);+ __avl_free(l, l->size, GFP_ATOMIC);+ };+ local_irq_restore(flags);+}++static void avl_fini_cpu(int cpu)+{+ int i;++ for (i=0; i<AVL_NODE_NUM; ++i) {+ struct avl_node *node = avl_get_node(i, cpu);+ if (node->value)+ free_page(node->value);+ node->value = 0;+ }+ + kfree(avl_container_array[cpu]);+ + for (i=0; i<AVL_NODE_PAGES; ++i)+ kfree(avl_node_array[cpu][i]);+}++static int avl_init_cpu(int cpu)+{+ unsigned int i;+ int err;+ unsigned long ptr;+ + spin_lock_init(&avl_free_lock[cpu]);++ avl_node_array[cpu] = kzalloc(AVL_NODE_PAGES * sizeof(void *), GFP_KERNEL);+ if (!avl_node_array[cpu])+ return -ENOMEM;+ + for (i=0; i<AVL_NODE_PAGES; ++i) {+ avl_node_array[cpu][i] = (struct avl_node *)__get_free_page(GFP_KERNEL);+ if (!avl_node_array[cpu][i])+ goto err_out_free_node;+ }++ avl_container_array[cpu] = kzalloc(sizeof(struct list_head) * AVL_CONTAINER_ARRAY_SIZE, GFP_KERNEL);+ if (!avl_container_array[cpu])+ goto err_out_free_node;++ for (i=0; i<AVL_CONTAINER_ARRAY_SIZE; ++i)+ INIT_LIST_HEAD(&avl_container_array[cpu][i]);++ /*+ * NTA steals pages and never return them back to the system.+ */+ ptr = __get_free_pages(GFP_KERNEL, AVL_ORDER);+ if (!ptr)+ goto err_out_free_container;++ avl_root[cpu] = avl_node_alloc(ptr, cpu);+ avl_set_cpu_ptr(ptr, cpu, AVL_ORDER);+ avl_set_node_ptr(ptr, avl_root[cpu], AVL_ORDER);++ avl_insert(avl_root[cpu], avl_node_alloc(ptr, cpu), cpu);+ err = avl_container_add((void *)ptr, PAGE_SIZE*(1<<AVL_ORDER), GFP_KERNEL, cpu);+ if (err) {+ printk(KERN_ERR "Failed to add new container: ptr: %lx, size: %lu, err: %d.\n", ptr, PAGE_SIZE, err);+ goto err_out_free_values;+ }++ for (i=0; i<AVL_NODE_NUM-2; ++i) {+ ptr = __get_free_pages(GFP_KERNEL, AVL_ORDER);+ if (!ptr) {+ printk(KERN_ERR "Failed to allocate %d'th page.\n", i);+ goto err_out_free_values;+ }++ avl_node_search_alloc(ptr, cpu);+ err = avl_container_add((void *)ptr, PAGE_SIZE*(1<<AVL_ORDER), GFP_KERNEL, cpu);+ if (err) {+ printk(KERN_ERR "Failed to add new container: ptr: %lx, size: %lu, err: %d.\n", ptr, PAGE_SIZE, err);+ goto err_out_free_values;+ }+ }++ return 0;++err_out_free_values:+ for (i=0; i<AVL_NODE_NUM - 1; ++i) {+ struct avl_node *node = avl_get_node(i, cpu);+ if (node->value)+ free_page(node->value);+ node->value = 0;+ }+ +err_out_free_container:+ kfree(avl_container_array[cpu]);+err_out_free_node:+ for (i=0; i<AVL_NODE_PAGES; ++i)+ kfree(avl_node_array[cpu][i]);+ + kfree(avl_node_array[cpu]);++ return -ENOMEM;+}++int avl_init(void)+{+ int err, cpu;++ for_each_possible_cpu(cpu) {+ err = avl_init_cpu(cpu);+ if (err)+ goto err_out;+ }++ printk(KERN_INFO "Network tree allocator has been initialized.\n");+ return 0;++err_out:+ for_each_possible_cpu(cpu)+ avl_fini_cpu(cpu);++ return -ENOMEM;+}diff --git a/net/core/alloc/avl.h b/net/core/alloc/avl.hnew file mode 100644index 0000000..e6f8b6c--- /dev/null+++ b/net/core/alloc/avl.h@@ -0,0 +1,84 @@+/*+ * avl.h+ * + * 2006 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>+ * All rights reserved.+ * + * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation; either version 2 of the License, or+ * (at your option) any later version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHAAVLBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA+ */++#ifndef __AVL_H+#define __AVL_H++#include <linux/kernel.h>+#include <linux/types.h>+#include <asm/page.h>++//#define AVL_DEBUG++#ifdef AVL_DEBUG+#define ulog(f, a...) printk(f, ##a)+#else+#define ulog(f, a...)+#endif++/*+ * Network tree allocator variables.+ */++typedef unsigned long value_t;+typedef u16 avl_t;++#define AVL_ALIGN_SIZE 32+#define AVL_ALIGN(x) ALIGN(x, AVL_ALIGN_SIZE)++#define AVL_ORDER 2+#define AVL_BITS 10 /* Must cover maximum number of pages used for allocation pools */+#define AVL_NODE_EMPTY ((1UL<<AVL_BITS) - 1)++#define AVL_MIN_SIZE AVL_ALIGN_SIZE+#define AVL_MAX_SIZE (AVL_MIN_SIZE * AVL_NODE_EMPTY)+#define AVL_NODE_NUM (AVL_NODE_EMPTY - 1)++enum avl_balance {+ AVL_LEFT = 0,+ AVL_RIGHT,+ AVL_BALANCED,+};++struct avl_node+{+ u64 left:AVL_BITS,+ right:AVL_BITS,+ parent:AVL_BITS,+ id:AVL_BITS,+ balance:2,+ pos:1,+ res:(64-4*AVL_BITS)-3;+ value_t value;+ DECLARE_BITMAP(mask, (1<<AVL_ORDER)*PAGE_SIZE/AVL_MIN_SIZE);+};++struct avl_container+{+ void *ptr;+ struct list_head centry;+};++struct avl_container *avl_container_alloc(gfp_t gfp_mask);+void avl_container_free(struct avl_container *);+int avl_container_cache_init(void);++#endif /* __AVL_H */diff --git a/net/core/skbuff.c b/net/core/skbuff.cindex 022d889..7239208 100644--- a/net/core/skbuff.c+++ b/net/core/skbuff.c@@ -156,7 +156,7 @@ struct sk_buff *__alloc_skb(unsigned int /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size);- data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);+ data = avl_alloc(size + sizeof(struct skb_shared_info), gfp_mask); if (!data) goto nodata; @@ -176,6 +176,8 @@ struct sk_buff *__alloc_skb(unsigned int shinfo->gso_type = 0; shinfo->ip6_frag_id = 0; shinfo->frag_list = NULL;+ + skb->__tsize = size + sizeof(struct skb_shared_info); if (fclone) { struct sk_buff *child = skb + 1;@@ -223,7 +225,7 @@ struct sk_buff *alloc_skb_from_cache(kme /* Get the DATA. */ size = SKB_DATA_ALIGN(size);- data = kmem_cache_alloc(cp, gfp_mask);+ data = avl_alloc(size, gfp_mask); if (!data) goto nodata; @@ -234,6 +236,7 @@ struct sk_buff *alloc_skb_from_cache(kme skb->data = data; skb->tail = data; skb->end = data + size;+ skb->__tsize = size; atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->nr_frags = 0;@@ -313,7 +316,7 @@ static void skb_release_data(struct sk_b if (skb_shinfo(skb)->frag_list) skb_drop_fraglist(skb); - kfree(skb->head);+ avl_free(skb->head, skb->__tsize); } } @@ -495,6 +498,7 @@ #endif skb_copy_secmark(n, skb); #endif C(truesize);+ C(__tsize); atomic_set(&n->users, 1); C(head); C(data);@@ -688,7 +692,7 @@ int pskb_expand_head(struct sk_buff *skb size = SKB_DATA_ALIGN(size); - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);+ data = avl_alloc(size + sizeof(struct skb_shared_info), gfp_mask); if (!data) goto nodata; @@ -707,6 +711,7 @@ int pskb_expand_head(struct sk_buff *skb off = (data + nhead) - skb->head; + skb->__tsize = size + sizeof(struct skb_shared_info); skb->head = data; skb->end = data + size; skb->data += off;@@ -2057,6 +2062,9 @@ void __init skb_init(void) NULL, NULL); if (!skbuff_fclone_cache) panic("cannot create skbuff cache");++ if (avl_init())+ panic("Failed to initialize network tree allocator.\n"); } EXPORT_SYMBOL(___pskb_trim);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -