📄 ip_tables.c
字号:
struct ipt_entry_target *t; if (!ip_checkentry(&e->ip)) { duprintf("ip_tables: ip check failed %p %s.\n", e, name); return -EINVAL; } if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset) return -EINVAL; t = ipt_get_target(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0;}static inline int check_match(struct ipt_entry_match *m, const char *name, const struct ipt_ip *ip, unsigned int hookmask, unsigned int *i){ struct xt_match *match; int ret; match = m->u.kernel.match; ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m), name, hookmask, ip->proto, ip->invflags & IPT_INV_PROTO); if (!ret && m->u.kernel.match->checkentry && !m->u.kernel.match->checkentry(name, ip, match, m->data, hookmask)) { duprintf("ip_tables: check failed for `%s'.\n", m->u.kernel.match->name); ret = -EINVAL; } if (!ret) (*i)++; return ret;}static inline intfind_check_match(struct ipt_entry_match *m, const char *name, const struct ipt_ip *ip, unsigned int hookmask, unsigned int *i){ struct xt_match *match; int ret; match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, m->u.user.revision), "ipt_%s", m->u.user.name); if (IS_ERR(match) || !match) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return match ? PTR_ERR(match) : -ENOENT; } m->u.kernel.match = match; ret = check_match(m, name, ip, hookmask, i); if (ret) goto err; return 0;err: module_put(m->u.kernel.match->me); return ret;}static inline int check_target(struct ipt_entry *e, const char *name){ struct ipt_entry_target *t; struct xt_target *target; int ret; t = ipt_get_target(e); target = t->u.kernel.target; ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), name, e->comefrom, e->ip.proto, e->ip.invflags & IPT_INV_PROTO); if (!ret && t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(name, e, target, t->data, e->comefrom)) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); ret = -EINVAL; } return ret;}static inline intfind_check_entry(struct ipt_entry *e, const char *name, unsigned int size, unsigned int *i){ struct ipt_entry_target *t; struct xt_target *target; int ret; unsigned int j; ret = check_entry(e, name); if (ret) return ret; j = 0; ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip, e->comefrom, &j); if (ret != 0) goto cleanup_matches; t = ipt_get_target(e); target = try_then_request_module(xt_find_target(AF_INET, t->u.user.name, t->u.user.revision), "ipt_%s", t->u.user.name); if (IS_ERR(target) || !target) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = target ? PTR_ERR(target) : -ENOENT; goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; (*i)++; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: IPT_MATCH_ITERATE(e, cleanup_match, &j); return ret;}static inline intcheck_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, unsigned char *base, unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int *i){ unsigned int h; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* Check hooks & underflows */ for (h = 0; h < NF_IP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* FIXME: underflows must be unconditional, standard verdicts < 0 (not IPT_RETURN). --RR */ /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; (*i)++; return 0;}static inline intcleanup_entry(struct ipt_entry *e, unsigned int *i){ struct ipt_entry_target *t; if (i && (*i)-- == 0) return 1; /* Cleanup all matches */ IPT_MATCH_ITERATE(e, cleanup_match, NULL); t = ipt_get_target(e); if (t->u.kernel.target->destroy) t->u.kernel.target->destroy(t->u.kernel.target, t->data); module_put(t->u.kernel.target->me); return 0;}/* Checks and translates the user-supplied table segment (held in newinfo) */static inttranslate_table(const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, void *entry0, unsigned int size, unsigned int number, const unsigned int *hook_entries, const unsigned int *underflows){ unsigned int i; int ret; newinfo->size = size; newinfo->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_IP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, check_entry_size_and_hooks, newinfo, entry0, entry0 + size, hook_entries, underflows, &i); if (ret != 0) return ret; if (i != number) { duprintf("translate_table: %u not %u entries\n", i, number); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_IP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, find_check_entry, name, size, &i); if (ret != 0) { IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } return ret;}/* Gets counters. */static inline intadd_entry_to_counter(const struct ipt_entry *e, struct xt_counters total[], unsigned int *i){ ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0;}static inline intset_entry_to_counter(const struct ipt_entry *e, struct ipt_counters total[], unsigned int *i){ SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0;}static voidget_counters(const struct xt_table_info *t, struct xt_counters counters[]){ unsigned int cpu; unsigned int i; unsigned int curcpu; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * We dont care about preemption here. */ curcpu = raw_smp_processor_id(); i = 0; IPT_ENTRY_ITERATE(t->entries[curcpu], t->size, set_entry_to_counter, counters, &i); for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; IPT_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); }}static inline struct xt_counters * alloc_counters(struct xt_table *table){ unsigned int countersize; struct xt_counters *counters; struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc_node(countersize, numa_node_id()); if (counters == NULL) return ERR_PTR(-ENOMEM); /* First, sum counters... */ write_lock_bh(&table->lock); get_counters(private, counters); write_unlock_bh(&table->lock); return counters;}static intcopy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr){ unsigned int off, num; struct ipt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); /* choose the copy that is on our node/cpu, ... * This choice is lazy (because current thread is * allowed to migrate to another cpu) */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; struct ipt_entry_match *m; struct ipt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct ipt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct ipt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret;}#ifdef CONFIG_COMPATstruct compat_delta { struct compat_delta *next; unsigned int offset; short delta;};static struct compat_delta *compat_offsets = NULL;static int compat_add_offset(unsigned int offset, short delta){ struct compat_delta *tmp; tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); if (!tmp) return -ENOMEM; tmp->offset = offset; tmp->delta = delta; if (compat_offsets) { tmp->next = compat_offsets->next; compat_offsets->next = tmp; } else { compat_offsets = tmp; tmp->next = NULL; } return 0;}static void compat_flush_offsets(void){ struct compat_delta *tmp, *next; if (compat_offsets) { for(tmp = compat_offsets; tmp; tmp = next) { next = tmp->next; kfree(tmp); } compat_offsets = NULL; }}static short compat_calc_jump(unsigned int offset){ struct compat_delta *tmp; short delta; for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next) if (tmp->offset < offset) delta += tmp->delta; return delta;}static void compat_standard_from_user(void *dst, void *src){ int v = *(compat_int_t *)src; if (v > 0) v += compat_calc_jump(v); memcpy(dst, &v, sizeof(v));}static int compat_standard_to_user(void __user *dst, void *src){ compat_int_t cv = *(int *)src; if (cv > 0) cv -= compat_calc_jump(cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;}static inline intcompat_calc_match(struct ipt_entry_match *m, int * size){ *size += xt_compat_match_offset(m->u.kernel.match); return 0;}static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info, void *base, struct xt_table_info *newinfo){ struct ipt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = 0; entry_offset = (void *)e - base; IPT_MATCH_ITERATE(e, compat_calc_match, &off); t = ipt_get_target(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = compat_add_offset(entry_offset, off); if (ret) return ret; for (i = 0; i< NF_IP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ipt_entry *) (base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ipt_entry *) (base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0;}static int compat_table_info(struct xt_table_info *info, struct xt_table_info *newinfo){ void *loc_cpu_entry; int i; if (!newinfo || !info) return -EINVAL; memset(newinfo, 0, sizeof(struct xt_table_info)); newinfo->size = info->size; newinfo->number = info->number; for (i = 0; i < NF_IP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } loc_cpu_entry = info->entries[raw_smp_processor_id()]; return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size, compat_calc_entry, info, loc_cpu_entry, newinfo);}#endifstatic int get_info(void __user *user, int *len, int compat){ char name[IPT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) { duprintf("length %u != %u\n", *len, (unsigned int)sizeof(struct ipt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[IPT_TABLE_MAXNAMELEN-1] = '\0';#ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET);#endif t = try_then_request_module(xt_find_table_lock(AF_INET, name), "iptable_%s", name); if (t && !IS_ERR(t)) { struct ipt_getinfo info; struct xt_table_info *private = t->private;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -