⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nta.3

📁 实现linux平台下零拷贝技术的软件包。
💻 3
📖 第 1 页 / 共 3 页
字号:
++#define AVL_CONTAINER_ARRAY_SIZE	(AVL_MAX_SIZE/AVL_MIN_SIZE)++struct avl_node_entry;++/*+ * Meta-information container for each contiguous block used in allocation.+ * @value - start address of the contiguous block.+ * @mask - bitmask of free and empty chunks [1 - free, 0 - used].+ * @entry - pointer to parent node entry.+ */+struct avl_node+{+	unsigned long		value;+	DECLARE_BITMAP(mask, AVL_MAX_SIZE/AVL_MIN_SIZE);+	struct avl_node_entry	*entry;+};++/*+ * Free chunks are dereferenced into this structure and placed into LIFO list.+ */++struct avl_container+{+	void			*ptr;+	struct list_head	centry;+};++/*+ * When freeing happens on different than allocation CPU,+ * chunk is dereferenced into this structure and placed into+ * single-linked list in allocation CPU private area.+ */++struct avl_free_list+{+	struct avl_free_list		*next;+	unsigned int			size;+	unsigned int			cpu;+};++/*+ * This structure is placed after each allocated chunk and contains+ * @canary used to check memory overflow and reference counter for+ * given memory region, which is used for example for zero-copy access.+ */++struct avl_chunk+{+	unsigned int			canary;+	atomic_t			refcnt;+};++/*+ * Each array of nodes is places into dynamically grown list.+ * @avl_node_array - array of nodes (linked into pages)+ * @node_entry - entry in avl_allocator_data.avl_node_list.+ * @avl_node_order - allocation order for each node in @avl_node_array+ * @avl_node_num - number of nodes in @avl_node_array+ * @avl_entry_num - number of this entry inside allocator+ */++struct avl_node_entry+{+	struct avl_node 	**avl_node_array;+	struct list_head	node_entry;+	u32			avl_entry_num;+	u16 			avl_node_order, avl_node_num;+};++/*+ * Main per-cpu allocator structure.+ * @avl_container_array - array of lists of free chunks indexed by size of the elements+ * @avl_free_list_head - single-linked list of objects, which were started to be freed on different CPU+ * @avl_free_list_map_head - single-linked list of objects, which map update was started on different CPU+ * @avl_free_lock - lock protecting avl_free_list_head+ * @avl_node_list - list of avl_node_entry'es+ * @avl_node_lock - lock used to protect avl_node_list from access from zero-copy devices.+ * @entry_num - number of entries inside allocator.+ */+struct avl_allocator_data+{+	struct list_head 	*avl_container_array;+	struct avl_free_list 	*avl_free_list_head;+	struct avl_free_list 	*avl_free_map_list_head;+	spinlock_t 		avl_free_lock;+	struct list_head 	avl_node_list;+	spinlock_t 		avl_node_lock;+	u32			avl_entry_num;+};++int avl_init_zc(void);+void *avl_alloc(unsigned int size, gfp_t gfp_mask);+void avl_free(void *ptr, unsigned int size);+int avl_init(void);++struct zc_control+{+	struct zc_data		*zcb;+	unsigned int		zc_num, zc_used, zc_pos;+	spinlock_t		zc_lock;+	wait_queue_head_t	zc_wait;+};++extern struct zc_control zc_sniffer;+extern struct avl_allocator_data avl_allocator[NR_CPUS];++#endif /* __KERNEL__ */+#endif /* __AVL_H */diff --git a/net/core/alloc/zc.c b/net/core/alloc/zc.cnew file mode 100644index 0000000..d024d33--- /dev/null+++ b/net/core/alloc/zc.c@@ -0,0 +1,258 @@+/*+ * 	avl.c+ * + * 2006 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>+ * All rights reserved.+ * + * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation; either version 2 of the License, or+ * (at your option) any later version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA+ */++#include <linux/kernel.h>+#include <linux/module.h>+#include <linux/types.h>+#include <linux/string.h>+#include <linux/errno.h>+#include <linux/slab.h>+#include <linux/spinlock.h>+#include <linux/percpu.h>+#include <linux/list.h>+#include <linux/mm.h>+#include <linux/fs.h>+#include <linux/poll.h>+#include <asm/uaccess.h>++#include "avl.h"++struct zc_private+{+	struct zc_data	*zcb;+};++static char zc_name[] = "zc";+static int zc_major;+struct zc_control zc_sniffer;++static int zc_release(struct inode *inode, struct file *file)+{+	struct zc_private *priv = file->private_data;++	kfree(priv);+	return 0;+}++static int zc_open(struct inode *inode, struct file *file)+{+	struct zc_private *priv;+	struct zc_control *ctl = &zc_sniffer;++	priv = kzalloc(sizeof(struct zc_private) + ctl->zc_num * sizeof(struct zc_data), GFP_KERNEL);+	if (!priv)+		return -ENOMEM;+	priv->zcb = (struct zc_data *)(priv+1);++	file->private_data = priv;++	return 0;+}++static int zc_mmap(struct file *file, struct vm_area_struct *vma)+{+	unsigned long flags, start = vma->vm_start;+	int cpu, err = 0;+	unsigned int i, j, st, num, total_num;++	st = vma->vm_pgoff;+	total_num = (vma->vm_end - vma->vm_start)/PAGE_SIZE;++	printk("%s: start: %lx, end: %lx, total_num: %u, st: %u.\n", __func__, start, vma->vm_end, total_num, st);++	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);+	vma->vm_flags |= VM_RESERVED;+	vma->vm_file = file;++	for_each_possible_cpu(cpu) {+		struct avl_allocator_data *alloc = &avl_allocator[cpu];+		struct avl_node_entry *e;+		int idx, off;++		spin_lock_irqsave(&alloc->avl_node_lock, flags);+		list_for_each_entry(e, &alloc->avl_node_list, node_entry) {++			if (st >= e->avl_node_num*(1U<<e->avl_node_order)) {+#if 0+				printk("%s: continue on cpu: %d, e: %p, total_num: %u, node_num: %u, node_order: %u, pages_in_node: %u, st: %u.\n", +						__func__, cpu, e, total_num, e->avl_node_num, e->avl_node_order, +						e->avl_node_num*(1U<<e->avl_node_order), st);+#endif+				st -= e->avl_node_num*(1U<<e->avl_node_order);+				continue;+			}+			num = min_t(unsigned int, total_num, e->avl_node_num*(1<<e->avl_node_order));+			+			printk("%s: cpu: %d, e: %p, total_num: %u, node_num: %u, node_order: %u, st: %u, num: %u, pages: ", +					__func__, cpu, e, total_num, e->avl_node_num, e->avl_node_order, st, num);++			idx = 0;+			off = st;+			for (i=st; i<num; ++i) {+				struct avl_node *node = &e->avl_node_array[idx][off];++				if (++off >= AVL_NODES_ON_PAGE) {+					idx++;+					off = 0;+				}++				for (j=0; (j<(1<<e->avl_node_order)) && (i<num); ++j, ++i) {+					unsigned long virt = node->value + (j<<PAGE_SHIFT);+					err = vm_insert_page(vma, start, virt_to_page(virt));+					if (err) {+						printk("\n%s: Failed to insert page for addr %lx into %lx, err: %d.\n",+								__func__, virt, start, err);+						break;+					}+					printk("%lx [%lx - %p] ", node->value, virt, virt_to_page(virt));+					start += PAGE_SIZE;+				}+			}+			printk("\n");+			if (err)+				break;+			total_num -= num;++			if (total_num == 0)+				break;+		}+		spin_unlock_irqrestore(&alloc->avl_node_lock, flags);++		if (err)+			break;+		if (total_num == 0)+			break;+	}++	return err;+}++static ssize_t zc_write(struct file *file, const char __user *buf, size_t size, loff_t *off)+{+	ssize_t sz = 0;+	struct zc_private *priv = file->private_data;+	unsigned long flags;+	unsigned int req_num = size/sizeof(struct zc_data), cnum, csize, i;+	struct zc_control *ctl = &zc_sniffer;++	while (size) {+		cnum = min_t(unsigned int, req_num, ctl->zc_num);+		csize = cnum*sizeof(struct zc_data);++		if (copy_from_user(priv->zcb, buf, csize)) {+			printk("%s: copy_from_user() failed.\n", __func__);+			break;+		}++		spin_lock_irqsave(&ctl->zc_lock, flags);+		for (i=0; i<cnum; ++i)+			avl_free(priv->zcb[i].data.ptr, priv->zcb[i].size);+		ctl->zc_used -= cnum;+		spin_unlock_irqrestore(&ctl->zc_lock, flags);++		sz += csize;+		size -= csize;+		buf += csize;+	}++	return sz;+}++static ssize_t zc_read(struct file *file, char __user *buf, size_t size, loff_t *off)+{+	ssize_t sz = 0;+	struct zc_private *priv = file->private_data;+	unsigned long flags;+	unsigned int pos, req_num = size/sizeof(struct zc_data), cnum, csize;+	struct zc_control *ctl = &zc_sniffer;++	wait_event_interruptible(ctl->zc_wait, ctl->zc_used > 0);++	spin_lock_irqsave(&ctl->zc_lock, flags);+	cnum = min_t(unsigned int, req_num, ctl->zc_used);+	csize = cnum*sizeof(struct zc_data);+	if (ctl->zc_used) {+		if (ctl->zc_pos >= ctl->zc_used) {+			pos = ctl->zc_pos - ctl->zc_used;+			memcpy(priv->zcb, &ctl->zcb[pos], csize);+		} else {+			memcpy(priv->zcb, &ctl->zcb[0], csize);+			pos = ctl->zc_num - (ctl->zc_used - ctl->zc_pos);+			memcpy(&priv->zcb[ctl->zc_pos], &ctl->zcb[pos], +					(ctl->zc_used - ctl->zc_pos)*sizeof(struct zc_data));+		}+	}+	spin_unlock_irqrestore(&ctl->zc_lock, flags);++	sz = csize;++	if (copy_to_user(buf, priv->zcb, cnum*sizeof(struct zc_data)))+		sz = -EFAULT;++	return sz;+}++static unsigned int zc_poll(struct file *file, struct poll_table_struct *wait)+{+	struct zc_control *ctl = &zc_sniffer;+	unsigned int poll_flags = 0;+	+	poll_wait(file, &ctl->zc_wait, wait);++	if (ctl->zc_used)+		poll_flags = POLLIN | POLLRDNORM;++	return poll_flags;+}++static struct file_operations zc_ops = {+	.poll		= &zc_poll,+	.open 		= &zc_open,+	.release 	= &zc_release,+	.read		= &zc_read,+	.write		= &zc_write,+	.mmap 		= &zc_mmap,+	.owner 		= THIS_MODULE,+};++int avl_init_zc(void)+{+	struct zc_control *ctl = &zc_sniffer;++	ctl->zc_num = 1024;+	init_waitqueue_head(&ctl->zc_wait);+	spin_lock_init(&ctl->zc_lock);+	ctl->zcb = kmalloc(ctl->zc_num * sizeof(struct zc_data), GFP_KERNEL);+	if (!ctl->zcb)+		return -ENOMEM;++	zc_major = register_chrdev(0, zc_name, &zc_ops);+       	if (zc_major < 0) {+		printk(KERN_ERR "Failed to register %s char device: err=%d. Zero-copy is disabled.\n", +				zc_name, zc_major);+		return -EINVAL;+	}++	printk(KERN_INFO "Network zero-copy sniffer has been enabled with %d major number.\n", zc_major);++	return 0;+}+diff --git a/net/core/skbuff.c b/net/core/skbuff.cindex 022d889..d10af88 100644--- a/net/core/skbuff.c+++ b/net/core/skbuff.c@@ -156,7 +156,7 @@ struct sk_buff *__alloc_skb(unsigned int  	/* Get the DATA. Size must match skb_add_mtu(). */ 	size = SKB_DATA_ALIGN(size);-	data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);+	data = avl_alloc(size + sizeof(struct skb_shared_info), gfp_mask); 	if (!data) 		goto nodata; @@ -223,7 +223,7 @@ struct sk_buff *alloc_skb_from_cache(kme  	/* Get the DATA. */ 	size = SKB_DATA_ALIGN(size);-	data = kmem_cache_alloc(cp, gfp_mask);+	data = avl_alloc(size, gfp_mask); 	if (!data) 		goto nodata; @@ -313,7 +313,7 @@ static void skb_release_data(struct sk_b 		if (skb_shinfo(skb)->frag_list) 			skb_drop_fraglist(skb); -		kfree(skb->head);+		avl_free(skb->head, skb->end - skb->head + sizeof(struct skb_shared_info)); 	} } @@ -688,7 +688,7 @@ int pskb_expand_head(struct sk_buff *skb  	size = SKB_DATA_ALIGN(size); -	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);+	data = avl_alloc(size + sizeof(struct skb_shared_info), gfp_mask); 	if (!data) 		goto nodata; @@ -2057,6 +2057,9 @@ void __init skb_init(void) 						NULL, NULL); 	if (!skbuff_fclone_cache) 		panic("cannot create skbuff cache");++	if (avl_init())+		panic("Failed to initialize network tree allocator.\n"); }  EXPORT_SYMBOL(___pskb_trim);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -