⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nta.5

📁 实现linux平台下零拷贝技术的软件包。
💻 5
📖 第 1 页 / 共 4 页
字号:
+static int avl_container_add(void *ptr, unsigned int size, int cpu)+{+	struct avl_container *c = ptr;+	unsigned int pos = AVL_ALIGN(size)/AVL_MIN_SIZE-1;++	if (!size)+		return -EINVAL;++	c->ptr = ptr;+	avl_container_insert(c, pos, cpu);++	return 0;+}++/*+ * Dequeue first free chunk from the list.+ */+static inline struct avl_container *avl_dequeue(struct list_head *head)+{+	struct avl_container *cnt;++	cnt = list_entry(head->next, struct avl_container, centry);+	list_del(&cnt->centry);++	return cnt;+}++/*+ * Add new node entry int network allocator.+ * must be called with disabled preemtpion.+ */+static void avl_node_entry_commit(struct avl_node_entry *entry, int cpu)+{+	int i, idx, off;++	idx = off = 0;+	for (i=0; i<entry->avl_node_num; ++i) {+		struct avl_node *node;++		node = &entry->avl_node_array[idx][off];++		if (++off >= AVL_NODES_ON_PAGE) {+			idx++;+			off = 0;+		}++		node->entry = entry;++		avl_set_cpu_ptr(node->value, cpu, entry->avl_node_order);+		avl_set_node_ptr(node->value, node, entry->avl_node_order);+		avl_container_add((void *)node->value, (1<<entry->avl_node_order)<<PAGE_SHIFT, cpu);+	}++	spin_lock(&avl_allocator[cpu].avl_node_lock);+	entry->avl_entry_num = avl_allocator[cpu].avl_entry_num;+	list_add_tail(&entry->node_entry, &avl_allocator[cpu].avl_node_list);+	avl_allocator[cpu].avl_entry_num++;+	spin_unlock(&avl_allocator[cpu].avl_node_lock);++	printk("Network allocator cache has grown: entry: %u, number: %u, order: %u.\n",+			entry->avl_entry_num, entry->avl_node_num, entry->avl_node_order);+}++/*+ * Simple cache growing function - allocate as much as possible,+ * but no more than @AVL_NODE_NUM pages when there is a need for that.+ */+static struct avl_node_entry *avl_node_entry_alloc(gfp_t gfp_mask, int order)+{+	struct avl_node_entry *entry;+	int i, num = 0, idx, off, j;+	unsigned long ptr;++	entry = kzalloc(sizeof(struct avl_node_entry), gfp_mask);+	if (!entry)+		return NULL;++	entry->avl_node_array = kzalloc(AVL_NODE_PAGES * sizeof(void *), gfp_mask);+	if (!entry->avl_node_array)+		goto err_out_free_entry;++	for (i=0; i<AVL_NODE_PAGES; ++i) {+		entry->avl_node_array[i] = (struct avl_node *)__get_free_page(gfp_mask);+		if (!entry->avl_node_array[i]) {+			num = i;+			goto err_out_free;+		}+	}++	idx = off = 0;++	for (i=0; i<AVL_NODE_NUM; ++i) {+		struct avl_node *node;++		ptr = __get_free_pages(gfp_mask | __GFP_ZERO, order);+		if (!ptr)+			break;++		node = &entry->avl_node_array[idx][off];++		if (++off >= AVL_NODES_ON_PAGE) {+			idx++;+			off = 0;+		}++		for (j=0; j<(1<<order); ++j)+			get_page(virt_to_page(ptr + (j<<PAGE_SHIFT)));++		node->value = ptr;+		memset(node->mask, 0, sizeof(node->mask));+		avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), 0, ((1<<order)<<PAGE_SHIFT)/AVL_MIN_SIZE, 1);+	}++	ulog("%s: entry: %p, node: %u, node_pages: %lu, node_num: %lu, order: %d, allocated: %d, container: %u, max_size: %u, min_size: %u, bits: %u.\n", +		__func__, entry, sizeof(struct avl_node), AVL_NODE_PAGES, AVL_NODE_NUM, order, +		i, AVL_CONTAINER_ARRAY_SIZE, AVL_MAX_SIZE, AVL_MIN_SIZE, ((1<<order)<<PAGE_SHIFT)/AVL_MIN_SIZE);++	if (i == 0)+		goto err_out_free;++	entry->avl_node_num = i;+	entry->avl_node_order = order;++	return entry;++err_out_free:+	for (i=0; i<AVL_NODE_PAGES; ++i)+		free_page((unsigned long)entry->avl_node_array[i]);+err_out_free_entry:+	kfree(entry);+	return NULL;+}++/*+ * Allocate memory region with given size and mode.+ * If allocation fails due to unsupported order, otherwise+ * allocate new node entry with given mode and try to allocate again+ * Cache growing happens only with 0-order allocations.+ */+void *avl_alloc(unsigned int size, gfp_t gfp_mask)+{+	unsigned int i, try = 0, osize = size;+	void *ptr = NULL;+	unsigned long flags;++	size = AVL_ALIGN(size + sizeof(struct avl_chunk));++	if (size > AVL_MAX_SIZE || size < AVL_MIN_SIZE) {+		/*+		 * Print info about unsupported order so user could send a "bug report"+		 * or increase initial allocation order.+		 */+		if (get_order(size) > AVL_ORDER && net_ratelimit()) {+			printk(KERN_INFO "%s: Failed to allocate %u bytes with %02x mode, order %u, max order %u.\n", +					__func__, size, gfp_mask, get_order(size), AVL_ORDER);+			WARN_ON(1);+		}++		return NULL;+	}++	local_irq_save(flags);+repeat:+	for (i=size/AVL_MIN_SIZE-1; i<AVL_CONTAINER_ARRAY_SIZE; ++i) {+		struct list_head *head = &avl_allocator[smp_processor_id()].avl_container_array[i];+		struct avl_container *c;++		if (!list_empty(head)) {+			struct avl_chunk *ch;++			c = avl_dequeue(head);+			ptr = c->ptr;++			ch = avl_ptr_to_chunk(ptr, osize);+			atomic_set(&ch->refcnt, 1);+			ch->canary = AVL_CANARY;+			ch->size = osize;++			avl_update_node(c, i, osize);+			break;+		}+	}+	local_irq_restore(flags);+#if 1+	if (!ptr && !try) {+		struct avl_node_entry *entry;+		+		try = 1;++		entry = avl_node_entry_alloc(gfp_mask, get_order(size));+		if (entry) {+			local_irq_save(flags);+			avl_node_entry_commit(entry, smp_processor_id());+			goto repeat;+		}+			+	}+#endif+	if (unlikely(!ptr && try))+		if (net_ratelimit())+			printk("%s: Failed to allocate %u bytes.\n", __func__, size);++	return ptr;+}++/*+ * Remove free chunk from the list.+ */+static inline struct avl_container *avl_search_container(void *ptr, unsigned int idx, int cpu)+{+	struct avl_container *c = ptr;+	+	list_del(&c->centry);+	c->ptr = ptr;++	return c;+}++/*+ * Combine neighbour free chunks into the one with bigger size+ * and put new chunk into list of free chunks with appropriate size.+ */+static void avl_combine(struct avl_node *node, void *lp, unsigned int lbits, void *rp, unsigned int rbits, +		void *cur_ptr, unsigned int cur_bits, int cpu)+{+	struct avl_container *lc, *rc, *c;+	unsigned int idx;+	void *ptr;++	lc = rc = c = NULL;+	idx = cur_bits - 1;+	ptr = cur_ptr;++	c = (struct avl_container *)cur_ptr;+	c->ptr = cur_ptr;+	+	if (rp) {+		rc = avl_search_container(rp, rbits-1, cpu);+		if (!rc) {+			printk(KERN_ERR "%p.%p: Failed to find a container for right pointer %p, rbits: %u.\n", +					node, cur_ptr, rp, rbits);+			BUG();+		}++		c = rc;+		idx += rbits;+		ptr = c->ptr;+	}++	if (lp) {+		lc = avl_search_container(lp, lbits-1, cpu);+		if (!lc) {+			printk(KERN_ERR "%p.%p: Failed to find a container for left pointer %p, lbits: %u.\n", +					node, cur_ptr, lp, lbits);+			BUG();+		}++		idx += lbits;+		ptr = c->ptr;+	}+	avl_container_insert(c, idx, cpu);+}++/*+ * Free memory region of given size.+ * Must be called on the same CPU where allocation happend+ * with disabled interrupts.+ */+static void __avl_free_local(void *ptr, unsigned int size)+{+	unsigned long val = avl_ptr_to_value(ptr);+	unsigned int pos, idx, sbits = AVL_ALIGN(size)/AVL_MIN_SIZE;+	unsigned int rbits, lbits, cpu = avl_get_cpu_ptr(val);+	struct avl_node *node;+	unsigned long p;+	void *lp, *rp;++	node = avl_get_node_ptr((unsigned long)ptr);++	pos = avl_ptr_to_offset(ptr);+	idx = pos/BITS_PER_LONG;++	p = node->mask[idx] >> (pos%BITS_PER_LONG);+	+	if ((p & 1)) {+		if (net_ratelimit())+			printk(KERN_ERR "%p.%p: Broken pointer: value: %lx, pos: %u, idx: %u, mask: %lx, p: %lx.\n", +				node, ptr, val, pos, idx, node->mask[idx], p);+		return;+	}++	avl_fill_bits(node->mask, ARRAY_SIZE(node->mask), pos, sbits, 1);++	lp = rp = NULL;+	rbits = lbits = 0;++	idx = (pos+sbits)/BITS_PER_LONG;+	p = (pos+sbits)%BITS_PER_LONG;++	if ((node->mask[idx] >> p) & 1) {+		lbits = avl_count_set_up(node->mask, ARRAY_SIZE(node->mask), pos+sbits);+		if (lbits) {+			lp = (void *)(val + (pos + sbits)*AVL_MIN_SIZE);+		}+	}++	if (pos) {+		idx = (pos-1)/BITS_PER_LONG;+		p = (pos-1)%BITS_PER_LONG;+		if ((node->mask[idx] >> p) & 1) {+			rbits = avl_count_set_down(node->mask, pos-1);+			if (rbits) {+				rp = (void *)(val + (pos-rbits)*AVL_MIN_SIZE);+			}+		}+	}++	avl_combine(node, lp, lbits, rp, rbits, ptr, sbits, cpu);+}++/*+ * Free memory region of given size.+ * If freeing CPU is not the same as allocation one, chunk will + * be placed into list of to-be-freed objects on allocation CPU,+ * otherwise chunk will be freed and combined with neighbours.+ * Must be called with disabled interrupts.+ */+static void __avl_free(void *ptr, unsigned int size)+{+	int cpu = avl_get_cpu_ptr((unsigned long)ptr);++	if (cpu != smp_processor_id()) {+		struct avl_free_list *l, *this = ptr;+		struct avl_allocator_data *alloc = &avl_allocator[cpu];++		this->cpu = smp_processor_id();+		this->size = size;++		spin_lock(&alloc->avl_free_lock);+		l = alloc->avl_free_list_head;+		alloc->avl_free_list_head = this;+		this->next = l;+		spin_unlock(&alloc->avl_free_lock);+		return;+	}++	__avl_free_local(ptr, size);+}++/*+ * Free memory region of given size without sniffer data update.+ */+void avl_free_no_zc(void *ptr, unsigned int size)+{+	unsigned long flags;+	struct avl_free_list *l;+	struct avl_allocator_data *alloc;+	struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++	if (unlikely((ch->canary != AVL_CANARY) || ch->size != size)) {+		printk("Freeing destroyed object: ptr: %p, size: %u, canary: %x, must be %x, refcnt: %d, saved size: %u.\n",+				ptr, size, ch->canary, AVL_CANARY, atomic_read(&ch->refcnt), ch->size);+		return;+	}++	if (atomic_dec_and_test(&ch->refcnt)) {+		local_irq_save(flags);+		__avl_free(ptr, size);+		+		alloc = &avl_allocator[smp_processor_id()];++		while (alloc->avl_free_list_head) {+			spin_lock(&alloc->avl_free_lock);+			l = alloc->avl_free_list_head;+			alloc->avl_free_list_head = l->next;+			spin_unlock(&alloc->avl_free_lock);+			__avl_free_local(l, l->size);+		}+		local_irq_restore(flags);+	}+}++/*+ * Free memory region of given size.+ */+void avl_free(void *ptr, unsigned int size)+{+	struct avl_chunk *ch = avl_ptr_to_chunk(ptr, size);++	if (unlikely((ch->canary != AVL_CANARY) || ch->size != size)) {+		printk("Freeing destroyed object: ptr: %p, size: %u, canary: %x, must be %x, refcnt: %d, saved size: %u.\n",+				ptr, size, ch->canary, AVL_CANARY, atomic_read(&ch->refcnt), ch->size);+		return;+	}+	avl_update_zc(avl_get_node_ptr((unsigned long)ptr), ptr, size);+	avl_free_no_zc(ptr, size);+}++/*+ * Initialize per-cpu allocator data.+ */+static int avl_init_cpu(int cpu)+{+	unsigned int i;+	struct avl_allocator_data *alloc = &avl_allocator[cpu];+	struct avl_node_entry *entry;++	spin_lock_init(&alloc->avl_free_lock);+	spin_lock_init(&alloc->avl_node_lock);+	INIT_LIST_HEAD(&alloc->avl_node_list);++	alloc->avl_container_array = kzalloc(sizeof(struct list_head) * AVL_CONTAINER_ARRAY_SIZE, GFP_KERNEL);+	if (!alloc->avl_container_array)+		goto err_out_exit;++	for (i=0; i<AVL_CONTAINER_ARRAY_SIZE; ++i)+		INIT_LIST_HEAD(&alloc->avl_container_array[i]);++	entry = avl_node_entry_alloc(GFP_KERNEL, AVL_ORDER);+	if (!entry)+		goto err_out_free_container;++	avl_node_entry_commit(entry, cpu);++	return 0;++err_out_free_container:+	kfree(alloc->avl_container_array);+err_out_exit:+	return -ENOMEM;+}++/*+ * Initialize network allocator.+ */+int avl_init(void)+{+	int err, cpu;++	for_each_possible_cpu(cpu) {+		err = avl_init_cpu(cpu);+		if (err)+			goto err_out;+	}++	err = avl_init_zc();++	printk(KERN_INFO "Network tree allocator has been initialized.\n");+	return 0;++err_out:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -