📄 ip_tables.c
字号:
t->u.kernel.target = target; ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), name, e->comefrom, e->ip.proto, e->ip.invflags & IPT_INV_PROTO); if (ret) goto err; if (t->u.kernel.target == &ipt_standard_target) { if (!standard_check(t, size)) { ret = -EINVAL; goto cleanup_matches; } } else if (t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(name, e, target, t->data, t->u.target_size - sizeof(*t), e->comefrom)) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); ret = -EINVAL; goto err; } (*i)++; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: IPT_MATCH_ITERATE(e, cleanup_match, &j); return ret;}static inline intcheck_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, unsigned char *base, unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int *i){ unsigned int h; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* Check hooks & underflows */ for (h = 0; h < NF_IP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* FIXME: underflows must be unconditional, standard verdicts < 0 (not IPT_RETURN). --RR */ /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; (*i)++; return 0;}static inline intcleanup_entry(struct ipt_entry *e, unsigned int *i){ struct ipt_entry_target *t; if (i && (*i)-- == 0) return 1; /* Cleanup all matches */ IPT_MATCH_ITERATE(e, cleanup_match, NULL); t = ipt_get_target(e); if (t->u.kernel.target->destroy) t->u.kernel.target->destroy(t->u.kernel.target, t->data, t->u.target_size - sizeof(*t)); module_put(t->u.kernel.target->me); return 0;}/* Checks and translates the user-supplied table segment (held in newinfo) */static inttranslate_table(const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, void *entry0, unsigned int size, unsigned int number, const unsigned int *hook_entries, const unsigned int *underflows){ unsigned int i; int ret; newinfo->size = size; newinfo->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_IP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, check_entry_size_and_hooks, newinfo, entry0, entry0 + size, hook_entries, underflows, &i); if (ret != 0) return ret; if (i != number) { duprintf("translate_table: %u not %u entries\n", i, number); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_IP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, check_entry, name, size, &i); if (ret != 0) { IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } return ret;}/* Gets counters. */static inline intadd_entry_to_counter(const struct ipt_entry *e, struct xt_counters total[], unsigned int *i){ ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0;}static inline intset_entry_to_counter(const struct ipt_entry *e, struct ipt_counters total[], unsigned int *i){ SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0;}static voidget_counters(const struct xt_table_info *t, struct xt_counters counters[]){ unsigned int cpu; unsigned int i; unsigned int curcpu; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * We dont care about preemption here. */ curcpu = raw_smp_processor_id(); i = 0; IPT_ENTRY_ITERATE(t->entries[curcpu], t->size, set_entry_to_counter, counters, &i); for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; IPT_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); }}static inline struct xt_counters * alloc_counters(struct ipt_table *table){ unsigned int countersize; struct xt_counters *counters; struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc_node(countersize, numa_node_id()); if (counters == NULL) return ERR_PTR(-ENOMEM); /* First, sum counters... */ write_lock_bh(&table->lock); get_counters(private, counters); write_unlock_bh(&table->lock); return counters;}static intcopy_entries_to_user(unsigned int total_size, struct ipt_table *table, void __user *userptr){ unsigned int off, num; struct ipt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); /* choose the copy that is on our node/cpu, ... * This choice is lazy (because current thread is * allowed to migrate to another cpu) */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; struct ipt_entry_match *m; struct ipt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct ipt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct ipt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret;}#ifdef CONFIG_COMPATstruct compat_delta { struct compat_delta *next; u_int16_t offset; short delta;};static struct compat_delta *compat_offsets = NULL;static int compat_add_offset(u_int16_t offset, short delta){ struct compat_delta *tmp; tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); if (!tmp) return -ENOMEM; tmp->offset = offset; tmp->delta = delta; if (compat_offsets) { tmp->next = compat_offsets->next; compat_offsets->next = tmp; } else { compat_offsets = tmp; tmp->next = NULL; } return 0;}static void compat_flush_offsets(void){ struct compat_delta *tmp, *next; if (compat_offsets) { for(tmp = compat_offsets; tmp; tmp = next) { next = tmp->next; kfree(tmp); } compat_offsets = NULL; }}static short compat_calc_jump(u_int16_t offset){ struct compat_delta *tmp; short delta; for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next) if (tmp->offset < offset) delta += tmp->delta; return delta;}struct compat_ipt_standard_target{ struct compat_xt_entry_target target; compat_int_t verdict;};struct compat_ipt_standard{ struct compat_ipt_entry entry; struct compat_ipt_standard_target target;};#define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))#define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))#define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)static int compat_ipt_standard_fn(void *target, void **dstptr, int *size, int convert){ struct compat_ipt_standard_target compat_st, *pcompat_st; struct ipt_standard_target st, *pst; int ret; ret = 0; switch (convert) { case COMPAT_TO_USER: pst = target; memcpy(&compat_st.target, &pst->target, sizeof(compat_st.target)); compat_st.verdict = pst->verdict; if (compat_st.verdict > 0) compat_st.verdict -= compat_calc_jump(compat_st.verdict); compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN; if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN)) ret = -EFAULT; *size -= IPT_ST_OFFSET; *dstptr += IPT_ST_COMPAT_LEN; break; case COMPAT_FROM_USER: pcompat_st = target; memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN); st.verdict = pcompat_st->verdict; if (st.verdict > 0) st.verdict += compat_calc_jump(st.verdict); st.target.u.user.target_size = IPT_ST_LEN; memcpy(*dstptr, &st, IPT_ST_LEN); *size += IPT_ST_OFFSET; *dstptr += IPT_ST_LEN; break; case COMPAT_CALC_SIZE: *size += IPT_ST_OFFSET; break; default: ret = -ENOPROTOOPT; break; } return ret;}static inline intcompat_calc_match(struct ipt_entry_match *m, int * size){ if (m->u.kernel.match->compat) m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE); else xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE); return 0;}static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info, void *base, struct xt_table_info *newinfo){ struct ipt_entry_target *t; u_int16_t entry_offset; int off, i, ret; off = 0; entry_offset = (void *)e - base; IPT_MATCH_ITERATE(e, compat_calc_match, &off); t = ipt_get_target(e); if (t->u.kernel.target->compat) t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE); else xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE); newinfo->size -= off; ret = compat_add_offset(entry_offset, off); if (ret) return ret; for (i = 0; i< NF_IP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ipt_entry *) (base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ipt_entry *) (base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0;}static int compat_table_info(struct xt_table_info *info, struct xt_table_info *newinfo){ void *loc_cpu_entry; int i; if (!newinfo || !info) return -EINVAL; memset(newinfo, 0, sizeof(struct xt_table_info)); newinfo->size = info->size; newinfo->number = info->number; for (i = 0; i < NF_IP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } loc_cpu_entry = info->entries[raw_smp_processor_id()]; return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size, compat_calc_entry, info, loc_cpu_entry, newinfo);}#endifstatic int get_info(void __user *user, int *len, int compat){ char name[IPT_TABLE_MAXNAMELEN]; struct ipt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) { duprintf("length %u != %u\n", *len, (unsigned int)sizeof(struct ipt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[IPT_TABLE_MAXNAMELEN-1] = '\0';#ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET);#endif t = try_then_request_module(xt_find_table_lock(AF_INET, name), "iptable_%s", name); if (t && !IS_ERR(t)) { struct ipt_getinfo info; struct xt_table_info *private = t->private;#ifdef CONFIG_COMPAT if (compat) { struct xt_table_info tmp; ret = compat_table_info(private, &tmp); compat_flush_offsets(); private = &tmp; }#endif info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT;#ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET);#endif return ret;}static intget_entries(struct ipt_get_entries __user *uptr, int *len){ int ret; struct ipt_get_entries get;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -