⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nta.6

📁 实现linux平台下零拷贝技术的软件包。
💻 6
📖 第 1 页 / 共 4 页
字号:
+#include <linux/mm.h>+#include <linux/net.h>+#include <linux/avl.h>++struct avl_allocator_data avl_allocator[NR_CPUS];++#define avl_ptr_to_chunk(ptr, size)	(struct avl_chunk *)(ptr + size)++/*+ * Get node pointer from address.+ */+static inline struct avl_node *avl_get_node_ptr(unsigned long ptr)+{+	struct page *page = virt_to_page(ptr);+	struct avl_node *node = (struct avl_node *)(page->lru.next);++	return node;+}++/*+ * Set node pointer for page for given address.+ */+static void avl_set_node_ptr(unsigned long ptr, struct avl_node *node, int order)+{+	int nr_pages = 1<<order, i;+	struct page *page = virt_to_page(ptr);++	for (i=0; i<nr_pages; ++i) {+		page->lru.next = (void *)node;+		page++;+	}+}++/*+ * Get allocation CPU from address.+ */+static inline int avl_get_cpu_ptr(unsigned long ptr)+{+	struct page *page = virt_to_page(ptr);+	int cpu = (int)(unsigned long)(page->lru.prev);++	return cpu;+}++/*+ * Set allocation cpu for page for given address.+ */+static void avl_set_cpu_ptr(unsigned long ptr, int cpu, int order)+{+	int nr_pages = 1<<order, i;+	struct page *page = virt_to_page(ptr);++	for (i=0; i<nr_pages; ++i) {+		page->lru.prev = (void *)(unsigned long)cpu;+		page++;+	}+}++/*+ * Convert pointer to node's value.+ * Node's value is a start address for contiguous chunk bound to given node.+ */+static inline unsigned long avl_ptr_to_value(void *ptr)+{+	struct avl_node *node = avl_get_node_ptr((unsigned long)ptr);+	return node->value;+}++/*+ * Convert pointer into offset from start address of the contiguous chunk+ * allocated for appropriate node.+ */+static inline int avl_ptr_to_offset(void *ptr)+{+	return ((unsigned long)ptr - avl_ptr_to_value(ptr))/AVL_MIN_SIZE;+}++/*+ * Count number of bits set down (until first unset is met in a mask)+ * to the smaller addresses including bit at @pos in @mask.+ */+unsigned int avl_count_set_down(unsigned long *mask, unsigned int pos)+{+	unsigned int stop, bits = 0;+	int idx;+	unsigned long p, m;++	idx = pos/BITS_PER_LONG;+	pos = pos%BITS_PER_LONG;++	while (idx >= 0) {+		m = (~0UL>>pos)<<pos;+		p = mask[idx] | m;++		if (!(mask[idx] & m))+			break;++		stop = fls(~p);++		if (!stop) {+			bits += pos + 1;+			pos = BITS_PER_LONG - 1;+			idx--;+		} else {+			bits += pos - stop + 1;+			break;+		}+	}++	return bits;+}++/*+ * Count number of bits set up (until first unset is met in a mask)+ * to the bigger addresses including bit at @pos in @mask.+ */+unsigned int avl_count_set_up(unsigned long *mask, unsigned int mask_num,+		unsigned int pos)+{+	unsigned int idx, stop, bits = 0;+	unsigned long p, m;++	idx = pos/BITS_PER_LONG;+	pos = pos%BITS_PER_LONG;++	while (idx < mask_num) {+		if (!pos)+			m = 0;+		else+			m = (~0UL<<(BITS_PER_LONG-pos))>>(BITS_PER_LONG-pos);+		p = mask[idx] | m;++		if (!(mask[idx] & ~m))+			break;++		stop = ffs(~p);++		if (!stop) {+			bits += BITS_PER_LONG - pos;+			pos = 0;+			idx++;+		} else {+			bits += stop - pos - 1;+			break;+		}+	}++	return bits;+}++/*+ * Fill @num bits from position @pos up with bit value @bit in a @mask.+ */++static void avl_fill_bits(unsigned long *mask, unsigned int mask_size,+		unsigned int pos, unsigned int num, unsigned int bit)+{+	unsigned int idx, start;++	idx = pos/BITS_PER_LONG;+	start = pos%BITS_PER_LONG;++	while (num && idx < mask_size) {+		unsigned long m = ((~0UL)>>start)<<start;++		if (start + num <= BITS_PER_LONG) {+			unsigned long upper_bits = BITS_PER_LONG - (start+num);++			m = (m<<upper_bits)>>upper_bits;+		}++		if (bit)+			mask[idx] |= m;+		else+			mask[idx] &= ~m;++		if (start + num <= BITS_PER_LONG)+			num = 0;+		else {+			num -= BITS_PER_LONG - start;+			start = 0;+			idx++;+		}+	}+}++/*+ * Add free chunk into array.+ */+static inline void avl_container_insert(struct avl_container *c, unsigned int pos, int cpu)+{+	list_add_tail(&c->centry, &avl_allocator[cpu].avl_container_array[pos]);+}++#ifdef CONFIG_ZCSNIFF+/*+ * Fill zc_data structure for given pointer and node.+ */+static void __avl_fill_zc(struct zc_data *zc, void *ptr, unsigned int size, struct avl_node *node)+{+	u32 off;++	off = ((unsigned long)node & ~PAGE_MASK)/sizeof(struct avl_node)*((1U<<node->entry->avl_node_order)<<PAGE_SHIFT);++	zc->off = off+avl_ptr_to_offset(ptr)*AVL_MIN_SIZE;+	zc->data.ptr = ptr;+	zc->size = size;+	zc->entry = node->entry->avl_entry_num;+	zc->cpu = avl_get_cpu_ptr((unsigned long)ptr);+}++void avl_fill_zc(struct zc_data *zc, void *ptr, unsigned int size)+{+	struct avl_node *node = avl_get_node_ptr((unsigned long)ptr);++	__avl_fill_zc(zc, ptr, size, node);++	printk("%s: ptr: %p, size: %u, node: entry: %u, order: %u, number: %u.\n",+			__func__, ptr, size, node->entry->avl_entry_num,+			node->entry->avl_node_order, node->entry->avl_node_num);+}++/*+ * Update zero-copy information in given @node.+ * @node - node where given pointer @ptr lives+ * @num - number of @AVL_MIN_SIZE chunks given pointer @ptr embeds+ */+static void avl_update_zc(struct avl_node *node, void *ptr, unsigned int size)+{+	struct zc_control *ctl = &zc_sniffer;+	unsigned long flags;++	spin_lock_irqsave(&ctl->zc_lock, flags);+	if (ctl->zc_used < ctl->zc_num) {+		struct zc_data *zc = &ctl->zcb[ctl->zc_pos];+		struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++		if (++ctl->zc_pos >= ctl->zc_num)+			ctl->zc_pos = 0;++		atomic_inc(&ch->refcnt);++		__avl_fill_zc(zc, ptr, size, node);++		ctl->zc_used++;+		wake_up(&ctl->zc_wait);++		ulog("%s: used: %u, pos: %u, num: %u, ptr: %p, size: %u.\n",+				__func__, ctl->zc_used, ctl->zc_pos, ctl->zc_num, ptr, zc->size);+	}+	spin_unlock_irqrestore(&ctl->zc_lock, flags);+}+#else+static void avl_update_zc(struct avl_node *node, void *ptr, unsigned int size)+{+}+#endif++/*+ * Update node's bitmask of free/used chunks.+ * If processed chunk size is bigger than requested one,+ * split it and add the rest into list of free chunks with appropriate size.+ */+static void avl_update_node(struct avl_container *c, unsigned int cpos, unsigned int size)+{+	struct avl_node *node = avl_get_node_ptr((unsigned long)c->ptr);+	unsigned int num = AVL_ALIGN(size + sizeof(struct avl_chunk))/AVL_MIN_SIZE;++	BUG_ON(cpos < num - 1);++	avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), avl_ptr_to_offset(c->ptr), num, 0);++	if (cpos != num-1) {+		void *ptr = c->ptr + AVL_ALIGN(size + sizeof(struct avl_chunk));++		c = ptr;+		c->ptr = ptr;++		cpos -= num;++		avl_container_insert(c, cpos, smp_processor_id());+	}+}++/*+ * Dereference free chunk into container and add it into list of free+ * chunks with appropriate size.+ */+static int avl_container_add(void *ptr, unsigned int size, int cpu)+{+	struct avl_container *c = ptr;+	unsigned int pos = AVL_ALIGN(size)/AVL_MIN_SIZE-1;++	if (!size)+		return -EINVAL;++	c->ptr = ptr;+	avl_container_insert(c, pos, cpu);++	return 0;+}++/*+ * Dequeue first free chunk from the list.+ */+static inline struct avl_container *avl_dequeue(struct list_head *head)+{+	struct avl_container *cnt;++	cnt = list_entry(head->next, struct avl_container, centry);+	list_del(&cnt->centry);++	return cnt;+}++/*+ * Add new node entry int network allocator.+ * must be called with disabled preemtpion.+ */+static void avl_node_entry_commit(struct avl_node_entry *entry, int cpu)+{+	int i, idx, off;++	idx = off = 0;+	for (i=0; i<entry->avl_node_num; ++i) {+		struct avl_node *node;++		node = &entry->avl_node_array[idx][off];++		if (++off >= AVL_NODES_ON_PAGE) {+			idx++;+			off = 0;+		}+#ifdef CONFIG_ZCSNIFF+		node->entry = entry;+#endif+		avl_set_cpu_ptr(node->value, cpu, entry->avl_node_order);+		avl_set_node_ptr(node->value, node, entry->avl_node_order);+		avl_container_add((void *)node->value, (1<<entry->avl_node_order)<<PAGE_SHIFT, cpu);+	}++	spin_lock(&avl_allocator[cpu].avl_node_lock);+	entry->avl_entry_num = avl_allocator[cpu].avl_entry_num;+	list_add_tail(&entry->node_entry, &avl_allocator[cpu].avl_node_list);+	avl_allocator[cpu].avl_entry_num++;+	spin_unlock(&avl_allocator[cpu].avl_node_lock);++	printk("Network allocator cache has grown: entry: %u, number: %u, order: %u.\n",+			entry->avl_entry_num, entry->avl_node_num, entry->avl_node_order);+}++/*+ * Simple cache growing function - allocate as much as possible,+ * but no more than @AVL_NODE_NUM pages when there is a need for that.+ */+static struct avl_node_entry *avl_node_entry_alloc(gfp_t gfp_mask, int order)+{+	struct avl_node_entry *entry;+	int i, num = 0, idx, off, j;+	unsigned long ptr;++	entry = kzalloc(sizeof(struct avl_node_entry), gfp_mask);+	if (!entry)+		return NULL;++	entry->avl_node_array = kzalloc(AVL_NODE_PAGES * sizeof(void *), gfp_mask);+	if (!entry->avl_node_array)+		goto err_out_free_entry;++	for (i=0; i<AVL_NODE_PAGES; ++i) {+		entry->avl_node_array[i] = (struct avl_node *)__get_free_page(gfp_mask);+		if (!entry->avl_node_array[i]) {+			num = i;+			goto err_out_free;+		}+	}++	idx = off = 0;++	for (i=0; i<AVL_NODE_NUM; ++i) {+		struct avl_node *node;++		ptr = __get_free_pages(gfp_mask | __GFP_ZERO, order);+		if (!ptr)+			break;++		node = &entry->avl_node_array[idx][off];++		if (++off >= AVL_NODES_ON_PAGE) {+			idx++;+			off = 0;+		}++		for (j=0; j<(1<<order); ++j)+			get_page(virt_to_page(ptr + (j<<PAGE_SHIFT)));++		node->value = ptr;+		memset(node->mask, 0, sizeof(node->mask));+		avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), 0, ((1<<order)<<PAGE_SHIFT)/AVL_MIN_SIZE, 1);+	}++	ulog("%s: entry: %p, node: %u, node_pages: %lu, node_num: %lu, order: %d, allocated: %d, container: %u, max_size: %u, min_size: %u, bits: %u.\n",+		__func__, entry, sizeof(struct avl_node), AVL_NODE_PAGES, AVL_NODE_NUM, order,+		i, AVL_CONTAINER_ARRAY_SIZE, AVL_MAX_SIZE, AVL_MIN_SIZE, ((1<<order)<<PAGE_SHIFT)/AVL_MIN_SIZE);++	if (i == 0)+		goto err_out_free;++	entry->avl_node_num = i;+	entry->avl_node_order = order;++	return entry;++err_out_free:+	for (i=0; i<AVL_NODE_PAGES; ++i)+		free_page((unsigned long)entry->avl_node_array[i]);+err_out_free_entry:+	kfree(entry);+	return NULL;+}++/*+ * Allocate memory region with given size and mode.+ * If allocation fails due to unsupported order, otherwise+ * allocate new node entry with given mode and try to allocate again+ * Cache growing happens only with 0-order allocations.+ */+void *avl_alloc(unsigned int size, gfp_t gfp_mask)+{+	unsigned int i, try = 0, osize = size;+	void *ptr = NULL;+	unsigned long flags;++	size = AVL_ALIGN(size + sizeof(struct avl_chunk));++	if (size > AVL_MAX_SIZE || size < AVL_MIN_SIZE) {+		/*+		 * Print info about unsupported order so user could send a "bug report"+		 * or increase initial allocation order.+		 */+		if (get_order(size) > AVL_ORDER && net_ratelimit()) {+			printk(KERN_INFO "%s: Failed to allocate %u bytes with %02x mode, order %u, max order %u.\n",+					__func__, size, gfp_mask, get_order(size), AVL_ORDER);+			WARN_ON(1);+		}++		return NULL;+	}++	local_irq_save(flags);+repeat:+	for (i=size/AVL_MIN_SIZE-1; i<AVL_CONTAINER_ARRAY_SIZE; ++i) {+		struct list_head *head = &avl_allocator[smp_processor_id()].avl_container_array[i];+		struct avl_container *c;++		if (!list_empty(head)) {+			struct avl_chunk *ch;++			c = avl_dequeue(head);+			ptr = c->ptr;++			ch = avl_ptr_to_chunk(ptr, osize);+			atomic_set(&ch->refcnt, 1);+			ch->canary = AVL_CANARY;+			ch->size = osize;++			avl_update_node(c, i, osize);+			break;+		}+	}+	local_irq_restore(flags);+#if 1

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -