⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nta.5

📁 实现linux平台下零拷贝技术的软件包。
💻 5
📖 第 1 页 / 共 4 页
字号:
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.cindex 80e8ca0..4aba97b 100644--- a/drivers/net/3c59x.c+++ b/drivers/net/3c59x.c@@ -1680,7 +1680,7 @@ vortex_open(struct net_device *dev) 			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); 			vp->rx_ring[i].status = 0;	/* Clear complete bit. */ 			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);-			skb = dev_alloc_skb(PKT_BUF_SZ);+			skb = netdev_alloc_skb(dev, PKT_BUF_SZ); 			vp->rx_skbuff[i] = skb; 			if (skb == NULL) 				break;			/* Bad news!  */@@ -2405,7 +2405,7 @@ static int vortex_rx(struct net_device * 			int pkt_len = rx_status & 0x1fff; 			struct sk_buff *skb; -			skb = dev_alloc_skb(pkt_len + 5);+			skb = netdev_alloc_skb(dev, pkt_len + 5); 			if (vortex_debug > 4) 				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", 					   pkt_len, rx_status);@@ -2486,7 +2486,7 @@ boomerang_rx(struct net_device *dev)  			/* Check if the packet is long enough to just accept without 			   copying to a properly sized skbuff. */-			if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {+			if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != 0) { 				skb->dev = dev; 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */ 				pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);@@ -2525,7 +2525,7 @@ boomerang_rx(struct net_device *dev) 		struct sk_buff *skb; 		entry = vp->dirty_rx % RX_RING_SIZE; 		if (vp->rx_skbuff[entry] == NULL) {-			skb = dev_alloc_skb(PKT_BUF_SZ);+			skb = netdev_alloc_skb(dev, PKT_BUF_SZ); 			if (skb == NULL) { 				static unsigned long last_jif; 				if (time_after(jiffies, last_jif + 10 * HZ)) {diff --git a/include/linux/skbuff.h b/include/linux/skbuff.hindex 19c96d4..bb1018c 100644--- a/include/linux/skbuff.h+++ b/include/linux/skbuff.h@@ -282,7 +282,8 @@ struct sk_buff { 				nfctinfo:3; 	__u8			pkt_type:3, 				fclone:2,-				ipvs_property:1;+				ipvs_property:1,+				nta:1; 	__be16			protocol;  	void			(*destructor)(struct sk_buff *skb);@@ -327,22 +328,43 @@ #include <linux/slab.h>  #include <asm/system.h> +extern void *avl_alloc(unsigned int size, gfp_t gfp_mask);+extern void avl_free(void *ptr, unsigned int size);+extern int avl_init(void);+ extern void kfree_skb(struct sk_buff *skb); extern void	       __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, 				   gfp_t priority, int fclone);+extern struct sk_buff *__alloc_skb_emtpy(unsigned int size,+				   gfp_t priority);+extern struct sk_buff *__alloc_skb_nta(unsigned int size, gfp_t gfp_mask,+			    int fclone);+ static inline struct sk_buff *alloc_skb(unsigned int size, 					gfp_t priority) { 	return __alloc_skb(size, priority, 0); } +static inline struct sk_buff *alloc_skb_empty(unsigned int size,+					gfp_t priority)+{+	return __alloc_skb_emtpy(size, priority);+}+ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 					       gfp_t priority) { 	return __alloc_skb(size, priority, 1); } +static inline struct sk_buff *alloc_skb_nta(unsigned int size,+					gfp_t priority, int fclone)+{+	return __alloc_skb_nta(size, priority, fclone);+}+ extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 					    unsigned int size, 					    gfp_t priority);diff --git a/include/net/sock.h b/include/net/sock.hindex 324b3ea..6af3198 100644--- a/include/net/sock.h+++ b/include/net/sock.h@@ -1178,7 +1178,7 @@ static inline struct sk_buff *sk_stream_ 	int hdr_len;  	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);-	skb = alloc_skb_fclone(size + hdr_len, gfp);+	skb = alloc_skb_nta(size + hdr_len, gfp, 1); 	if (skb) { 		skb->truesize += mem; 		if (sk_stream_wmem_schedule(sk, skb->truesize)) {diff --git a/net/core/Makefile b/net/core/Makefileindex 2645ba4..d86d468 100644--- a/net/core/Makefile+++ b/net/core/Makefile@@ -10,6 +10,8 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core. obj-y		     += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 			neighbour.o rtnetlink.o utils.o link_watch.o filter.o +obj-y += alloc/+ obj-$(CONFIG_XFRM) += flow.o obj-$(CONFIG_SYSFS) += net-sysfs.o obj-$(CONFIG_NET_DIVERT) += dv.odiff --git a/net/core/alloc/Makefile b/net/core/alloc/Makefilenew file mode 100644index 0000000..779eba2--- /dev/null+++ b/net/core/alloc/Makefile@@ -0,0 +1,3 @@+obj-y		:= allocator.o++allocator-y	:= avl.o zc.odiff --git a/net/core/alloc/avl.c b/net/core/alloc/avl.cnew file mode 100644index 0000000..8fe7016--- /dev/null+++ b/net/core/alloc/avl.c@@ -0,0 +1,767 @@+/*+ * 	avl.c+ * + * 2006 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>+ * All rights reserved.+ * + * This program is free software; you can redistribute it and/or modify+ * it under the terms of the GNU General Public License as published by+ * the Free Software Foundation; either version 2 of the License, or+ * (at your option) any later version.+ *+ * This program is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the+ * GNU General Public License for more details.+ *+ * You should have received a copy of the GNU General Public License+ * along with this program; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA+ */++#include <linux/kernel.h>+#include <linux/types.h>+#include <linux/string.h>+#include <linux/errno.h>+#include <linux/slab.h>+#include <linux/spinlock.h>+#include <linux/percpu.h>+#include <linux/list.h>+#include <linux/mm.h>+#include <linux/skbuff.h>++#include "avl.h"++struct avl_allocator_data avl_allocator[NR_CPUS];++#define avl_ptr_to_chunk(ptr, size)	(struct avl_chunk *)(ptr + size)++/*+ * Get node pointer from address.+ */+static inline struct avl_node *avl_get_node_ptr(unsigned long ptr)+{+	struct page *page = virt_to_page(ptr);+	struct avl_node *node = (struct avl_node *)(page->lru.next);++	return node;+}++/*+ * Set node pointer for page for given address.+ */+static void avl_set_node_ptr(unsigned long ptr, struct avl_node *node, int order)+{+	int nr_pages = 1<<order, i;+	struct page *page = virt_to_page(ptr);+	+	for (i=0; i<nr_pages; ++i) {+		page->lru.next = (void *)node;+		page++;+	}+}++/*+ * Get allocation CPU from address.+ */+static inline int avl_get_cpu_ptr(unsigned long ptr)+{+	struct page *page = virt_to_page(ptr);+	int cpu = (int)(unsigned long)(page->lru.prev);++	return cpu;+}++/*+ * Set allocation cpu for page for given address.+ */+static void avl_set_cpu_ptr(unsigned long ptr, int cpu, int order)+{+	int nr_pages = 1<<order, i;+	struct page *page = virt_to_page(ptr);+			+	for (i=0; i<nr_pages; ++i) {+		page->lru.prev = (void *)(unsigned long)cpu;+		page++;+	}+}++/*+ * Convert pointer to node's value.+ * Node's value is a start address for contiguous chunk bound to given node.+ */+static inline unsigned long avl_ptr_to_value(void *ptr)+{+	struct avl_node *node = avl_get_node_ptr((unsigned long)ptr);+	return node->value;+}++/*+ * Convert pointer into offset from start address of the contiguous chunk+ * allocated for appropriate node.+ */+static inline int avl_ptr_to_offset(void *ptr)+{+	return ((unsigned long)ptr - avl_ptr_to_value(ptr))/AVL_MIN_SIZE;+}++/*+ * Count number of bits set down (until first unset is met in a mask) + * to the smaller addresses including bit at @pos in @mask.+ */+unsigned int avl_count_set_down(unsigned long *mask, unsigned int pos)+{+	unsigned int stop, bits = 0;+	int idx;+	unsigned long p, m;++	idx = pos/BITS_PER_LONG;+	pos = pos%BITS_PER_LONG;++	while (idx >= 0) {+		m = (~0UL>>pos)<<pos;+		p = mask[idx] | m;++		if (!(mask[idx] & m))+			break;++		stop = fls(~p);++		if (!stop) {+			bits += pos + 1;+			pos = BITS_PER_LONG - 1;+			idx--;+		} else {+			bits += pos - stop + 1;+			break;+		}+	}++	return bits;+}++/*+ * Count number of bits set up (until first unset is met in a mask) + * to the bigger addresses including bit at @pos in @mask.+ */+unsigned int avl_count_set_up(unsigned long *mask, unsigned int mask_num, +		unsigned int pos)+{+	unsigned int idx, stop, bits = 0;+	unsigned long p, m;++	idx = pos/BITS_PER_LONG;+	pos = pos%BITS_PER_LONG;++	while (idx < mask_num) {+		if (!pos)+			m = 0;+		else+			m = (~0UL<<(BITS_PER_LONG-pos))>>(BITS_PER_LONG-pos);+		p = mask[idx] | m;++		if (!(mask[idx] & ~m))+			break;++		stop = ffs(~p);++		if (!stop) {+			bits += BITS_PER_LONG - pos;+			pos = 0;+			idx++;+		} else {+			bits += stop - pos - 1;+			break;+		}+	}++	return bits;+}++/*+ * Fill @num bits from position @pos up with bit value @bit in a @mask.+ */++static void avl_fill_bits(unsigned long *mask, unsigned int mask_size, +		unsigned int pos, unsigned int num, unsigned int bit)+{+	unsigned int idx, start;++	idx = pos/BITS_PER_LONG;+	start = pos%BITS_PER_LONG;++	while (num && idx < mask_size) {+		unsigned long m = ((~0UL)>>start)<<start;++		if (start + num <= BITS_PER_LONG) {+			unsigned long upper_bits = BITS_PER_LONG - (start+num);++			m = (m<<upper_bits)>>upper_bits;+		}++		if (bit)+			mask[idx] |= m;+		else+			mask[idx] &= ~m;++		if (start + num <= BITS_PER_LONG)+			num = 0;+		else {+			num -= BITS_PER_LONG - start;+			start = 0;+			idx++;+		}+	}+}++/*+ * Add free chunk into array.+ */+static inline void avl_container_insert(struct avl_container *c, unsigned int pos, int cpu)+{+	list_add_tail(&c->centry, &avl_allocator[cpu].avl_container_array[pos]);+}++/*+ * Fill zc_data structure for given pointer and node.+ */+static void __avl_fill_zc(struct zc_data *zc, void *ptr, unsigned int size, struct avl_node *node)+{+	u32 off;+	+	off = ((unsigned long)node & ~PAGE_MASK)/sizeof(struct avl_node)*((1U<<node->entry->avl_node_order)<<PAGE_SHIFT);+	+	zc->off = off+avl_ptr_to_offset(ptr)*AVL_MIN_SIZE;+	zc->data.ptr = ptr;+	zc->size = size;+	zc->entry = node->entry->avl_entry_num;+	zc->cpu = avl_get_cpu_ptr((unsigned long)ptr);+}++void avl_fill_zc(struct zc_data *zc, void *ptr, unsigned int size)+{+	struct avl_node *node = avl_get_node_ptr((unsigned long)ptr);++	__avl_fill_zc(zc, ptr, size, node);++	printk("%s: ptr: %p, size: %u, node: entry: %u, order: %u, number: %u.\n",+			__func__, ptr, size, node->entry->avl_entry_num, +			node->entry->avl_node_order, node->entry->avl_node_num);+}++/*+ * Update zero-copy information in given @node.+ * @node - node where given pointer @ptr lives+ * @num - number of @AVL_MIN_SIZE chunks given pointer @ptr embeds+ */+static void avl_update_zc(struct avl_node *node, void *ptr, unsigned int size)+{+	struct zc_control *ctl = &zc_sniffer;+	unsigned long flags;++	spin_lock_irqsave(&ctl->zc_lock, flags);+	if (ctl->zc_used < ctl->zc_num) {+		struct zc_data *zc = &ctl->zcb[ctl->zc_pos];+		struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++		if (++ctl->zc_pos >= ctl->zc_num)+			ctl->zc_pos = 0;+	+		atomic_inc(&ch->refcnt);++		__avl_fill_zc(zc, ptr, size, node);++		ctl->zc_used++;+		wake_up(&ctl->zc_wait);++		ulog("%s: used: %u, pos: %u, num: %u, ptr: %p, size: %u.\n",+				__func__, ctl->zc_used, ctl->zc_pos, ctl->zc_num, ptr, zc->size);+	}+	spin_unlock_irqrestore(&ctl->zc_lock, flags);+}++/*+ * Update node's bitmask of free/used chunks.+ * If processed chunk size is bigger than requested one, + * split it and add the rest into list of free chunks with appropriate size.+ */+static void avl_update_node(struct avl_container *c, unsigned int cpos, unsigned int size)+{+	struct avl_node *node = avl_get_node_ptr((unsigned long)c->ptr);+	unsigned int num = AVL_ALIGN(size + sizeof(struct avl_chunk))/AVL_MIN_SIZE;++	BUG_ON(cpos < num - 1);++	avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), avl_ptr_to_offset(c->ptr), num, 0);++	if (cpos != num-1) {+		void *ptr = c->ptr + AVL_ALIGN(size + sizeof(struct avl_chunk));++		c = ptr;+		c->ptr = ptr;++		cpos -= num;++		avl_container_insert(c, cpos, smp_processor_id());+	}+}++/*+ * Dereference free chunk into container and add it into list of free+ * chunks with appropriate size.+ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -