📄 nta.6
字号:
+ if (!ptr && !try) {+ struct avl_node_entry *entry;++ try = 1;++ entry = avl_node_entry_alloc(gfp_mask, get_order(size));+ if (entry) {+ local_irq_save(flags);+ avl_node_entry_commit(entry, smp_processor_id());+ goto repeat;+ }++ }+#endif+ if (unlikely(!ptr && try))+ if (net_ratelimit())+ printk("%s: Failed to allocate %u bytes.\n", __func__, size);++ return ptr;+}++/*+ * Remove free chunk from the list.+ */+static inline struct avl_container *avl_search_container(void *ptr, unsigned int idx, int cpu)+{+ struct avl_container *c = ptr;++ list_del(&c->centry);+ c->ptr = ptr;++ return c;+}++/*+ * Combine neighbour free chunks into the one with bigger size+ * and put new chunk into list of free chunks with appropriate size.+ */+static void avl_combine(struct avl_node *node, void *lp, unsigned int lbits, void *rp, unsigned int rbits,+ void *cur_ptr, unsigned int cur_bits, int cpu)+{+ struct avl_container *lc, *rc, *c;+ unsigned int idx;+ void *ptr;++ lc = rc = c = NULL;+ idx = cur_bits - 1;+ ptr = cur_ptr;++ c = (struct avl_container *)cur_ptr;+ c->ptr = cur_ptr;++ if (rp) {+ rc = avl_search_container(rp, rbits-1, cpu);+ if (!rc) {+ printk(KERN_ERR "%p.%p: Failed to find a container for right pointer %p, rbits: %u.\n",+ node, cur_ptr, rp, rbits);+ BUG();+ }++ c = rc;+ idx += rbits;+ ptr = c->ptr;+ }++ if (lp) {+ lc = avl_search_container(lp, lbits-1, cpu);+ if (!lc) {+ printk(KERN_ERR "%p.%p: Failed to find a container for left pointer %p, lbits: %u.\n",+ node, cur_ptr, lp, lbits);+ BUG();+ }++ idx += lbits;+ ptr = c->ptr;+ }+ avl_container_insert(c, idx, cpu);+}++/*+ * Free memory region of given size.+ * Must be called on the same CPU where allocation happend+ * with disabled interrupts.+ */+static void __avl_free_local(void *ptr, unsigned int size)+{+ unsigned long val = avl_ptr_to_value(ptr);+ unsigned int pos, idx, sbits = AVL_ALIGN(size)/AVL_MIN_SIZE;+ unsigned int rbits, lbits, cpu = avl_get_cpu_ptr(val);+ struct avl_node *node;+ unsigned long p;+ void *lp, *rp;++ node = avl_get_node_ptr((unsigned long)ptr);++ pos = avl_ptr_to_offset(ptr);+ idx = pos/BITS_PER_LONG;++ p = node->mask[idx] >> (pos%BITS_PER_LONG);++ if ((p & 1)) {+ if (net_ratelimit())+ printk(KERN_ERR "%p.%p: Broken pointer: value: %lx, pos: %u, idx: %u, mask: %lx, p: %lx.\n",+ node, ptr, val, pos, idx, node->mask[idx], p);+ return;+ }++ avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), pos, sbits, 1);++ lp = rp = NULL;+ rbits = lbits = 0;++ idx = (pos+sbits)/BITS_PER_LONG;+ p = (pos+sbits)%BITS_PER_LONG;++ if ((node->mask[idx] >> p) & 1) {+ lbits = avl_count_set_up(node->mask, ARRAY_SIZE(node->mask), pos+sbits);+ if (lbits) {+ lp = (void *)(val + (pos + sbits)*AVL_MIN_SIZE);+ }+ }++ if (pos) {+ idx = (pos-1)/BITS_PER_LONG;+ p = (pos-1)%BITS_PER_LONG;+ if ((node->mask[idx] >> p) & 1) {+ rbits = avl_count_set_down(node->mask, pos-1);+ if (rbits) {+ rp = (void *)(val + (pos-rbits)*AVL_MIN_SIZE);+ }+ }+ }++ avl_combine(node, lp, lbits, rp, rbits, ptr, sbits, cpu);+}++/*+ * Free memory region of given size.+ * If freeing CPU is not the same as allocation one, chunk will+ * be placed into list of to-be-freed objects on allocation CPU,+ * otherwise chunk will be freed and combined with neighbours.+ * Must be called with disabled interrupts.+ */+static void __avl_free(void *ptr, unsigned int size)+{+ int cpu = avl_get_cpu_ptr((unsigned long)ptr);++ if (cpu != smp_processor_id()) {+ struct avl_free_list *l, *this = ptr;+ struct avl_allocator_data *alloc = &avl_allocator[cpu];++ this->cpu = smp_processor_id();+ this->size = size;++ spin_lock(&alloc->avl_free_lock);+ l = alloc->avl_free_list_head;+ alloc->avl_free_list_head = this;+ this->next = l;+ spin_unlock(&alloc->avl_free_lock);+ return;+ }++ __avl_free_local(ptr, size);+}++/*+ * Free memory region of given size without sniffer data update.+ */+void avl_free_no_zc(void *ptr, unsigned int size)+{+ unsigned long flags;+ struct avl_free_list *l;+ struct avl_allocator_data *alloc;+ struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++ if (unlikely((ch->canary != AVL_CANARY) || ch->size != size)) {+ printk("Freeing destroyed object: ptr: %p, size: %u, canary: %x, must be %x, refcnt: %d, saved size: %u.\n",+ ptr, size, ch->canary, AVL_CANARY, atomic_read(&ch->refcnt), ch->size);+ return;+ }++ if (atomic_dec_and_test(&ch->refcnt)) {+ local_irq_save(flags);+ __avl_free(ptr, size);++ alloc = &avl_allocator[smp_processor_id()];++ while (alloc->avl_free_list_head) {+ spin_lock(&alloc->avl_free_lock);+ l = alloc->avl_free_list_head;+ alloc->avl_free_list_head = l->next;+ spin_unlock(&alloc->avl_free_lock);+ __avl_free_local(l, l->size);+ }+ local_irq_restore(flags);+ }+}++/*+ * Free memory region of given size.+ */+void avl_free(void *ptr, unsigned int size)+{+ struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++ if (unlikely((ch->canary != AVL_CANARY) || ch->size != size)) {+ printk("Freeing destroyed object: ptr: %p, size: %u, canary: %x, must be %x, refcnt: %d, saved size: %u.\n",+ ptr, size, ch->canary, AVL_CANARY, atomic_read(&ch->refcnt), ch->size);+ return;+ }+ avl_update_zc(avl_get_node_ptr((unsigned long)ptr), ptr, size);+ avl_free_no_zc(ptr, size);+}++/*+ * Initialize per-cpu allocator data.+ */+static int avl_init_cpu(int cpu)+{+ unsigned int i;+ struct avl_allocator_data *alloc = &avl_allocator[cpu];+ struct avl_node_entry *entry;++ spin_lock_init(&alloc->avl_free_lock);+ spin_lock_init(&alloc->avl_node_lock);+ INIT_LIST_HEAD(&alloc->avl_node_list);++ alloc->avl_container_array = kzalloc(sizeof(struct list_head) * AVL_CONTAINER_ARRAY_SIZE, GFP_KERNEL);+ if (!alloc->avl_container_array)+ goto err_out_exit;++ for (i=0; i<AVL_CONTAINER_ARRAY_SIZE; ++i)+ INIT_LIST_HEAD(&alloc->avl_container_array[i]);++ entry = avl_node_entry_alloc(GFP_KERNEL, AVL_ORDER);+ if (!entry)+ goto err_out_free_container;++ avl_node_entry_commit(entry, cpu);++ return 0;++err_out_free_container:+ kfree(alloc->avl_container_array);+err_out_exit:+ return -ENOMEM;+}++/*+ * Initialize network allocator.+ */+int avl_init(void)+{+ int err, cpu;++ for_each_possible_cpu(cpu) {+ err = avl_init_cpu(cpu);+ if (err)+ goto err_out;+ }++ err = avl_init_zc();++ printk(KERN_INFO "Network tree allocator has been initialized.\n");+ return 0;++err_out:+ panic("Failed to initialize network allocator.\n");++ return -ENOMEM;+}diff --git a/net/core/alloc/zc.c b/net/core/alloc/zc.cnew file mode 100644index 0000000..8be4d7d--- /dev/null+++ b/net/core/alloc/zc.c@@ -0,0 +1,487 @@+/*+ * zc.c+ *+ * 2006 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>+ * All rights reserved.+ *+ * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation; either version 2 of the License, or+ * (at your option) any later version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA+ */++#include <linux/kernel.h>+#include <linux/module.h>+#include <linux/types.h>+#include <linux/string.h>+#include <linux/errno.h>+#include <linux/slab.h>+#include <linux/spinlock.h>+#include <linux/percpu.h>+#include <linux/list.h>+#include <linux/mm.h>+#include <linux/fs.h>+#include <linux/poll.h>+#include <linux/ioctl.h>+#include <linux/skbuff.h>+#include <linux/netfilter.h>+#include <linux/netfilter_ipv4.h>+#include <linux/ip.h>+#include <net/flow.h>+#include <net/dst.h>+#include <net/route.h>+#include <asm/uaccess.h>+#include <linux/avl.h>++struct zc_private+{+ struct zc_data *zcb;+ struct mutex lock;+ int cpu;+};++static char zc_name[] = "zc";+static int zc_major;+struct zc_control zc_sniffer;++static int zc_release(struct inode *inode, struct file *file)+{+ struct zc_private *priv = file->private_data;++ kfree(priv);+ return 0;+}++static int zc_open(struct inode *inode, struct file *file)+{+ struct zc_private *priv;+ struct zc_control *ctl = &zc_sniffer;++ priv = kzalloc(sizeof(struct zc_private) + ctl->zc_num * sizeof(struct zc_data), GFP_KERNEL);+ if (!priv)+ return -ENOMEM;+ priv->zcb = (struct zc_data *)(priv+1);+ priv->cpu = 0; /* Use CPU0 by default */+ mutex_init(&priv->lock);++ file->private_data = priv;++ return 0;+}++static int zc_mmap(struct file *file, struct vm_area_struct *vma)+{+ struct zc_private *priv = file->private_data;+ struct avl_allocator_data *alloc = &avl_allocator[priv->cpu];+ struct avl_node_entry *e;+ unsigned long flags, start = vma->vm_start;+ int err = 0, idx, off;+ unsigned int i, j, st, num, total_num;++ st = vma->vm_pgoff;+ total_num = (vma->vm_end - vma->vm_start)/PAGE_SIZE;++ printk("%s: start: %lx, end: %lx, total_num: %u, st: %u.\n", __func__, start, vma->vm_end, total_num, st);++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);+ vma->vm_flags |= VM_RESERVED;+ vma->vm_file = file;++ spin_lock_irqsave(&alloc->avl_node_lock, flags);+ list_for_each_entry(e, &alloc->avl_node_list, node_entry) {+ if (st >= e->avl_node_num*(1U<<e->avl_node_order)) {+#if 0+ printk("%s: continue on cpu: %d, e: %p, total_num: %u, node_num: %u, node_order: %u, pages_in_node: %u, st: %u.\n",+ __func__, priv->cpu, e, total_num, e->avl_node_num, e->avl_node_order,+ e->avl_node_num*(1U<<e->avl_node_order), st);+#endif+ st -= e->avl_node_num*(1U<<e->avl_node_order);+ continue;+ }+ num = min_t(unsigned int, total_num, e->avl_node_num*(1<<e->avl_node_order));++ printk("%s: cpu: %d, e: %p, total_num: %u, node_num: %u, node_order: %u, st: %u, num: %u.\n",+ __func__, priv->cpu, e, total_num, e->avl_node_num, e->avl_node_order, st, num);++ idx = 0;+ off = st;+ for (i=st; i<num;) {+ struct avl_node *node = &e->avl_node_array[idx][off];++ if (++off >= AVL_NODES_ON_PAGE) {+ idx++;+ off = 0;+ }++ for (j=0; (j<(1<<e->avl_node_order)) && (i<num); ++j, ++i) {+ unsigned long virt = node->value + (j<<PAGE_SHIFT);+ err = vm_insert_page(vma, start, virt_to_page(virt));+ if (err) {+ printk("\n%s: Failed to insert page for addr %lx into %lx, err: %d.\n",+ __func__, virt, start, err);+ break;+ }+ start += PAGE_SIZE;+ }+ }+ if (err)+ break;+ total_num -= num;++ if (total_num == 0)+ break;+ }+ spin_unlock_irqrestore(&alloc->avl_node_lock, flags);++ return err;+}++static ssize_t zc_write(struct file *file, const char __user *buf, size_t size, loff_t *off)+{+ ssize_t sz = 0;+ struct zc_private *priv = file->private_data;+ unsigned long flags;+ unsigned int req_num = size/sizeof(struct zc_data), cnum, csize, i;+ struct zc_control *ctl = &zc_sniffer;++ while (size) {+ cnum = min_t(unsigned int, req_num, ctl->zc_num);+ csize = cnum*sizeof(struct zc_data);++ if (copy_from_user(priv->zcb, buf, csize)) {+ printk("%s: copy_from_user() failed.\n", __func__);+ break;+ }++ spin_lock_irqsave(&ctl->zc_lock, flags);+ for (i=0; i<cnum; ++i)+ avl_free_no_zc(priv->zcb[i].data.ptr, priv->zcb[i].size);+ ctl->zc_used -= cnum;+ spin_unlock_irqrestore(&ctl->zc_lock, flags);++ sz += csize;+ size -= csize;+ buf += csize;+ }++ return sz;+}++static ssize_t zc_read(struct file *file, char __user *buf, size_t size, loff_t *off)+{+ ssize_t sz = 0;+ struct zc_private *priv = file->private_data;+ unsigned long flags;+ unsigned int pos, req_num = size/sizeof(struct zc_data), cnum, csize;+ struct zc_control *ctl = &zc_sniffer;++ wait_event_interruptible(ctl->zc_wait, ctl->zc_used > 0);++ spin_lock_irqsave(&ctl->zc_lock, flags);+ cnum = min_t(unsigned int, req_num, ctl->zc_used);+ csize = cnum*sizeof(struct zc_data);+ if (ctl->zc_used) {+ if (ctl->zc_pos >= ctl->zc_used) {+ pos = ctl->zc_pos - ctl->zc_used;+ memcpy(priv->zcb, &ctl->zcb[pos], csize);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -