⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 arp_tables.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	}	/* Check hooks all assigned */	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {		/* Only hooks which are valid */		if (!(valid_hooks & (1 << i)))			continue;		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {			duprintf("Invalid hook entry %u %u\n",				 i, hook_entries[i]);			return -EINVAL;		}		if (newinfo->underflow[i] == 0xFFFFFFFF) {			duprintf("Invalid underflow %u %u\n",				 i, underflows[i]);			return -EINVAL;		}	}	if (!mark_source_chains(newinfo, valid_hooks, entry0)) {		duprintf("Looping hook\n");		return -ELOOP;	}	/* Finally, each sanity check must pass */	i = 0;	ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,				 check_entry, name, size, &i);	if (ret != 0) {		ARPT_ENTRY_ITERATE(entry0, newinfo->size,				cleanup_entry, &i);		return ret;	}	/* And one copy for every other CPU */	for_each_possible_cpu(i) {		if (newinfo->entries[i] && newinfo->entries[i] != entry0)			memcpy(newinfo->entries[i], entry0, newinfo->size);	}	return ret;}/* Gets counters. */static inline int add_entry_to_counter(const struct arpt_entry *e,				       struct xt_counters total[],				       unsigned int *i){	ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);	(*i)++;	return 0;}static inline int set_entry_to_counter(const struct arpt_entry *e,				       struct xt_counters total[],				       unsigned int *i){	SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);	(*i)++;	return 0;}static void get_counters(const struct xt_table_info *t,			 struct xt_counters counters[]){	unsigned int cpu;	unsigned int i;	unsigned int curcpu;	/* Instead of clearing (by a previous call to memset())	 * the counters and using adds, we set the counters	 * with data used by 'current' CPU	 * We dont care about preemption here.	 */	curcpu = raw_smp_processor_id();	i = 0;	ARPT_ENTRY_ITERATE(t->entries[curcpu],			   t->size,			   set_entry_to_counter,			   counters,			   &i);	for_each_possible_cpu(cpu) {		if (cpu == curcpu)			continue;		i = 0;		ARPT_ENTRY_ITERATE(t->entries[cpu],				   t->size,				   add_entry_to_counter,				   counters,				   &i);	}}static int copy_entries_to_user(unsigned int total_size,				struct arpt_table *table,				void __user *userptr){	unsigned int off, num, countersize;	struct arpt_entry *e;	struct xt_counters *counters;	struct xt_table_info *private = table->private;	int ret = 0;	void *loc_cpu_entry;	/* We need atomic snapshot of counters: rest doesn't change	 * (other than comefrom, which userspace doesn't care	 * about).	 */	countersize = sizeof(struct xt_counters) * private->number;	counters = vmalloc_node(countersize, numa_node_id());	if (counters == NULL)		return -ENOMEM;	/* First, sum counters... */	write_lock_bh(&table->lock);	get_counters(private, counters);	write_unlock_bh(&table->lock);	loc_cpu_entry = private->entries[raw_smp_processor_id()];	/* ... then copy entire thing ... */	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {		ret = -EFAULT;		goto free_counters;	}	/* FIXME: use iterator macros --RR */	/* ... then go back and fix counters and names */	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){		struct arpt_entry_target *t;		e = (struct arpt_entry *)(loc_cpu_entry + off);		if (copy_to_user(userptr + off				 + offsetof(struct arpt_entry, counters),				 &counters[num],				 sizeof(counters[num])) != 0) {			ret = -EFAULT;			goto free_counters;		}		t = arpt_get_target(e);		if (copy_to_user(userptr + off + e->target_offset				 + offsetof(struct arpt_entry_target,					    u.user.name),				 t->u.kernel.target->name,				 strlen(t->u.kernel.target->name)+1) != 0) {			ret = -EFAULT;			goto free_counters;		}	} free_counters:	vfree(counters);	return ret;}static int get_entries(const struct arpt_get_entries *entries,		       struct arpt_get_entries __user *uptr){	int ret;	struct arpt_table *t;	t = xt_find_table_lock(NF_ARP, entries->name);	if (t && !IS_ERR(t)) {		struct xt_table_info *private = t->private;		duprintf("t->private->number = %u\n",			 private->number);		if (entries->size == private->size)			ret = copy_entries_to_user(private->size,						   t, uptr->entrytable);		else {			duprintf("get_entries: I've got %u not %u!\n",				 private->size, entries->size);			ret = -EINVAL;		}		module_put(t->me);		xt_table_unlock(t);	} else		ret = t ? PTR_ERR(t) : -ENOENT;	return ret;}static int do_replace(void __user *user, unsigned int len){	int ret;	struct arpt_replace tmp;	struct arpt_table *t;	struct xt_table_info *newinfo, *oldinfo;	struct xt_counters *counters;	void *loc_cpu_entry, *loc_cpu_old_entry;	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)		return -EFAULT;	/* Hack: Causes ipchains to give correct error msg --RR */	if (len != sizeof(tmp) + tmp.size)		return -ENOPROTOOPT;	/* overflow check */	if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -			SMP_CACHE_BYTES)		return -ENOMEM;	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))		return -ENOMEM;	newinfo = xt_alloc_table_info(tmp.size);	if (!newinfo)		return -ENOMEM;	/* choose the copy that is on our node/cpu */	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),			   tmp.size) != 0) {		ret = -EFAULT;		goto free_newinfo;	}	counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));	if (!counters) {		ret = -ENOMEM;		goto free_newinfo;	}	ret = translate_table(tmp.name, tmp.valid_hooks,			      newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,			      tmp.hook_entry, tmp.underflow);	if (ret != 0)		goto free_newinfo_counters;	duprintf("arp_tables: Translated table\n");	t = try_then_request_module(xt_find_table_lock(NF_ARP, tmp.name),				    "arptable_%s", tmp.name);	if (!t || IS_ERR(t)) {		ret = t ? PTR_ERR(t) : -ENOENT;		goto free_newinfo_counters_untrans;	}	/* You lied! */	if (tmp.valid_hooks != t->valid_hooks) {		duprintf("Valid hook crap: %08X vs %08X\n",			 tmp.valid_hooks, t->valid_hooks);		ret = -EINVAL;		goto put_module;	}	oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);	if (!oldinfo)		goto put_module;	/* Update module usage count based on number of rules */	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",		oldinfo->number, oldinfo->initial_entries, newinfo->number);	if ((oldinfo->number > oldinfo->initial_entries) ||	    (newinfo->number <= oldinfo->initial_entries))		module_put(t->me);	if ((oldinfo->number > oldinfo->initial_entries) &&	    (newinfo->number <= oldinfo->initial_entries))		module_put(t->me);	/* Get the old counters. */	get_counters(oldinfo, counters);	/* Decrease module usage counts and free resource */	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];	ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);	xt_free_table_info(oldinfo);	if (copy_to_user(tmp.counters, counters,			 sizeof(struct xt_counters) * tmp.num_counters) != 0)		ret = -EFAULT;	vfree(counters);	xt_table_unlock(t);	return ret; put_module:	module_put(t->me);	xt_table_unlock(t); free_newinfo_counters_untrans:	ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); free_newinfo_counters:	vfree(counters); free_newinfo:	xt_free_table_info(newinfo);	return ret;}/* We're lazy, and add to the first CPU; overflow works its fey magic * and everything is OK. */static inline int add_counter_to_entry(struct arpt_entry *e,				       const struct xt_counters addme[],				       unsigned int *i){	ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);	(*i)++;	return 0;}static int do_add_counters(void __user *user, unsigned int len){	unsigned int i;	struct xt_counters_info tmp, *paddc;	struct arpt_table *t;	struct xt_table_info *private;	int ret = 0;	void *loc_cpu_entry;	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)		return -EFAULT;	if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))		return -EINVAL;	paddc = vmalloc(len);	if (!paddc)		return -ENOMEM;	if (copy_from_user(paddc, user, len) != 0) {		ret = -EFAULT;		goto free;	}	t = xt_find_table_lock(NF_ARP, tmp.name);	if (!t || IS_ERR(t)) {		ret = t ? PTR_ERR(t) : -ENOENT;		goto free;	}	write_lock_bh(&t->lock);	private = t->private;	if (private->number != tmp.num_counters) {		ret = -EINVAL;		goto unlock_up_free;	}	i = 0;	/* Choose the copy that is on our node */	loc_cpu_entry = private->entries[smp_processor_id()];	ARPT_ENTRY_ITERATE(loc_cpu_entry,			   private->size,			   add_counter_to_entry,			   paddc->counters,			   &i); unlock_up_free:	write_unlock_bh(&t->lock);	xt_table_unlock(t);	module_put(t->me); free:	vfree(paddc);	return ret;}static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len){	int ret;	if (!capable(CAP_NET_ADMIN))		return -EPERM;	switch (cmd) {	case ARPT_SO_SET_REPLACE:		ret = do_replace(user, len);		break;	case ARPT_SO_SET_ADD_COUNTERS:		ret = do_add_counters(user, len);		break;	default:		duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);		ret = -EINVAL;	}	return ret;}static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len){	int ret;	if (!capable(CAP_NET_ADMIN))		return -EPERM;	switch (cmd) {	case ARPT_SO_GET_INFO: {		char name[ARPT_TABLE_MAXNAMELEN];		struct arpt_table *t;		if (*len != sizeof(struct arpt_getinfo)) {			duprintf("length %u != %Zu\n", *len,				 sizeof(struct arpt_getinfo));			ret = -EINVAL;			break;		}		if (copy_from_user(name, user, sizeof(name)) != 0) {			ret = -EFAULT;			break;		}		name[ARPT_TABLE_MAXNAMELEN-1] = '\0';		t = try_then_request_module(xt_find_table_lock(NF_ARP, name),					    "arptable_%s", name);		if (t && !IS_ERR(t)) {			struct arpt_getinfo info;			struct xt_table_info *private = t->private;			info.valid_hooks = t->valid_hooks;			memcpy(info.hook_entry, private->hook_entry,			       sizeof(info.hook_entry));			memcpy(info.underflow, private->underflow,			       sizeof(info.underflow));			info.num_entries = private->number;			info.size = private->size;			strcpy(info.name, name);			if (copy_to_user(user, &info, *len) != 0)				ret = -EFAULT;			else				ret = 0;			xt_table_unlock(t);			module_put(t->me);		} else			ret = t ? PTR_ERR(t) : -ENOENT;	}	break;	case ARPT_SO_GET_ENTRIES: {		struct arpt_get_entries get;		if (*len < sizeof(get)) {			duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));			ret = -EINVAL;		} else if (copy_from_user(&get, user, sizeof(get)) != 0) {			ret = -EFAULT;		} else if (*len != sizeof(struct arpt_get_entries) + get.size) {			duprintf("get_entries: %u != %Zu\n", *len,				 sizeof(struct arpt_get_entries) + get.size);			ret = -EINVAL;		} else			ret = get_entries(&get, user);		break;	}	case ARPT_SO_GET_REVISION_TARGET: {		struct xt_get_revision rev;		if (*len != sizeof(rev)) {			ret = -EINVAL;			break;		}		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {			ret = -EFAULT;			break;		}		try_then_request_module(xt_find_revision(NF_ARP, rev.name,							 rev.revision, 1, &ret),					"arpt_%s", rev.name);		break;	}	default:		duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);		ret = -EINVAL;	}	return ret;}int arpt_register_table(struct arpt_table *table,			const struct arpt_replace *repl){	int ret;	struct xt_table_info *newinfo;	static struct xt_table_info bootstrap		= { 0, 0, 0, { 0 }, { 0 }, { } };	void *loc_cpu_entry;	newinfo = xt_alloc_table_info(repl->size);	if (!newinfo) {		ret = -ENOMEM;		return ret;	}	/* choose the copy on our node/cpu */	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];	memcpy(loc_cpu_entry, repl->entries, repl->size);	ret = translate_table(table->name, table->valid_hooks,			      newinfo, loc_cpu_entry, repl->size,			      repl->num_entries,			      repl->hook_entry,			      repl->underflow);	duprintf("arpt_register_table: translate table gives %d\n", ret);	if (ret != 0) {		xt_free_table_info(newinfo);		return ret;	}	ret = xt_register_table(table, &bootstrap, newinfo);	if (ret != 0) {		xt_free_table_info(newinfo);		return ret;	}	return 0;}void arpt_unregister_table(struct arpt_table *table){	struct xt_table_info *private;	void *loc_cpu_entry;	private = xt_unregister_table(table);	/* Decrease module usage counts and free resources */	loc_cpu_entry = private->entries[raw_smp_processor_id()];	ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,			   cleanup_entry, NULL);	xt_free_table_info(private);}/* The built-in targets: standard (NULL) and error. */static struct arpt_target arpt_standard_target __read_mostly = {	.name		= ARPT_STANDARD_TARGET,	.targetsize	= sizeof(int),	.family		= NF_ARP,};static struct arpt_target arpt_error_target __read_mostly = {	.name		= ARPT_ERROR_TARGET,	.target		= arpt_error,	.targetsize	= ARPT_FUNCTION_MAXNAMELEN,	.family		= NF_ARP,};static struct nf_sockopt_ops arpt_sockopts = {	.pf		= PF_INET,	.set_optmin	= ARPT_BASE_CTL,	.set_optmax	= ARPT_SO_SET_MAX+1,	.set		= do_arpt_set_ctl,	.get_optmin	= ARPT_BASE_CTL,	.get_optmax	= ARPT_SO_GET_MAX+1,	.get		= do_arpt_get_ctl,	.owner		= THIS_MODULE,};static int __init arp_tables_init(void){	int ret;	ret = xt_proto_init(NF_ARP);	if (ret < 0)		goto err1;	/* Noone else will be downing sem now, so we won't sleep */	ret = xt_register_target(&arpt_standard_target);	if (ret < 0)		goto err2;	ret = xt_register_target(&arpt_error_target);	if (ret < 0)		goto err3;	/* Register setsockopt */	ret = nf_register_sockopt(&arpt_sockopts);	if (ret < 0)		goto err4;	printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n");	return 0;err4:	xt_unregister_target(&arpt_error_target);err3:	xt_unregister_target(&arpt_standard_target);err2:	xt_proto_fini(NF_ARP);err1:	return ret;}static void __exit arp_tables_fini(void){	nf_unregister_sockopt(&arpt_sockopts);	xt_unregister_target(&arpt_error_target);	xt_unregister_target(&arpt_standard_target);	xt_proto_fini(NF_ARP);}EXPORT_SYMBOL(arpt_register_table);EXPORT_SYMBOL(arpt_unregister_table);EXPORT_SYMBOL(arpt_do_table);module_init(arp_tables_init);module_exit(arp_tables_fini);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -