📄 ip6_tables.c
字号:
duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_IP6_NUMHOOKS);#ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_IP6_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); }#endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ip6t_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, IP6T_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ip6t_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ip6t_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1;}static inline intcleanup_match(struct ip6t_entry_match *m, unsigned int *i){ if (i && (*i)-- == 0) return 1; if (m->u.kernel.match->destroy) m->u.kernel.match->destroy(m->u.kernel.match, m->data); module_put(m->u.kernel.match->me); return 0;}static inline intcheck_match(struct ip6t_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, unsigned int hookmask, unsigned int *i){ struct xt_match *match; int ret; match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, m->u.user.revision), "ip6t_%s", m->u.user.name); if (IS_ERR(match) || !match) { duprintf("check_match: `%s' not found\n", m->u.user.name); return match ? PTR_ERR(match) : -ENOENT; } m->u.kernel.match = match; ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m), name, hookmask, ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); if (ret) goto err; if (m->u.kernel.match->checkentry && !m->u.kernel.match->checkentry(name, ipv6, match, m->data, hookmask)) { duprintf("ip_tables: check failed for `%s'.\n", m->u.kernel.match->name); ret = -EINVAL; goto err; } (*i)++; return 0;err: module_put(m->u.kernel.match->me); return ret;}static struct xt_target ip6t_standard_target;static inline intcheck_entry(struct ip6t_entry *e, const char *name, unsigned int size, unsigned int *i){ struct ip6t_entry_target *t; struct xt_target *target; int ret; unsigned int j; if (!ip6_checkentry(&e->ipv6)) { duprintf("ip_tables: ip check failed %p %s.\n", e, name); return -EINVAL; } if (e->target_offset + sizeof(struct ip6t_entry_target) > e->next_offset) return -EINVAL; j = 0; ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j); if (ret != 0) goto cleanup_matches; t = ip6t_get_target(e); ret = -EINVAL; if (e->target_offset + t->u.target_size > e->next_offset) goto cleanup_matches; target = try_then_request_module(xt_find_target(AF_INET6, t->u.user.name, t->u.user.revision), "ip6t_%s", t->u.user.name); if (IS_ERR(target) || !target) { duprintf("check_entry: `%s' not found\n", t->u.user.name); ret = target ? PTR_ERR(target) : -ENOENT; goto cleanup_matches; } t->u.kernel.target = target; ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t), name, e->comefrom, e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); if (ret) goto err; if (t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(name, e, target, t->data, e->comefrom)) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); ret = -EINVAL; goto err; } (*i)++; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: IP6T_MATCH_ITERATE(e, cleanup_match, &j); return ret;}static inline intcheck_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, unsigned char *base, unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int *i){ unsigned int h; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* Check hooks & underflows */ for (h = 0; h < NF_IP6_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* FIXME: underflows must be unconditional, standard verdicts < 0 (not IP6T_RETURN). --RR */ /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; (*i)++; return 0;}static inline intcleanup_entry(struct ip6t_entry *e, unsigned int *i){ struct ip6t_entry_target *t; if (i && (*i)-- == 0) return 1; /* Cleanup all matches */ IP6T_MATCH_ITERATE(e, cleanup_match, NULL); t = ip6t_get_target(e); if (t->u.kernel.target->destroy) t->u.kernel.target->destroy(t->u.kernel.target, t->data); module_put(t->u.kernel.target->me); return 0;}/* Checks and translates the user-supplied table segment (held in newinfo) */static inttranslate_table(const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, void *entry0, unsigned int size, unsigned int number, const unsigned int *hook_entries, const unsigned int *underflows){ unsigned int i; int ret; newinfo->size = size; newinfo->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_IP6_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, check_entry_size_and_hooks, newinfo, entry0, entry0 + size, hook_entries, underflows, &i); if (ret != 0) return ret; if (i != number) { duprintf("translate_table: %u not %u entries\n", i, number); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_IP6_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, check_entry, name, size, &i); if (ret != 0) { IP6T_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } return 0;}/* Gets counters. */static inline intadd_entry_to_counter(const struct ip6t_entry *e, struct xt_counters total[], unsigned int *i){ ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0;}static inline intset_entry_to_counter(const struct ip6t_entry *e, struct ip6t_counters total[], unsigned int *i){ SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0;}static voidget_counters(const struct xt_table_info *t, struct xt_counters counters[]){ unsigned int cpu; unsigned int i; unsigned int curcpu; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * We dont care about preemption here. */ curcpu = raw_smp_processor_id(); i = 0; IP6T_ENTRY_ITERATE(t->entries[curcpu], t->size, set_entry_to_counter, counters, &i); for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; IP6T_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); }}static intcopy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr){ unsigned int off, num, countersize; struct ip6t_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); if (counters == NULL) return -ENOMEM; /* First, sum counters... */ write_lock_bh(&table->lock); get_counters(private, counters); write_unlock_bh(&table->lock); /* choose the copy that is on ourc node/cpu */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; struct ip6t_entry_match *m; struct ip6t_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ip6t_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct ip6t_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct ip6t_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret;}static intget_entries(const struct ip6t_get_entries *entries, struct ip6t_get_entries __user *uptr){ int ret; struct xt_table *t; t = xt_find_table_lock(AF_INET6, entries->name); if (t && !IS_ERR(t)) { struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (entries->size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, entries->size); ret = -EINVAL; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret;}static intdo_replace(void __user *user, unsigned int len){ int ret; struct ip6t_replace tmp; struct xt_table *t; struct xt_table_info *newinfo, *oldinfo; struct xt_counters *counters; void *loc_cpu_entry, *loc_cpu_old_entry; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; /* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto free_newinfo; } ret = translate_table(tmp.name, tmp.valid_hooks, newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo_counters;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -