📄 nf_conntrack_netlink.c
字号:
if (!tb[CTA_TUPLE_IP]) return -EINVAL; tuple->src.l3num = l3num; err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); if (err < 0) return err; if (!tb[CTA_TUPLE_PROTO]) return -EINVAL; err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple); if (err < 0) return err; /* orig and expect tuples get DIR_ORIGINAL */ if (type == CTA_TUPLE_REPLY) tuple->dst.dir = IP_CT_DIR_REPLY; else tuple->dst.dir = IP_CT_DIR_ORIGINAL; return 0;}#ifdef CONFIG_NF_NAT_NEEDEDstatic const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },};static int nfnetlink_parse_nat_proto(struct nlattr *attr, const struct nf_conn *ct, struct nf_nat_range *range){ struct nlattr *tb[CTA_PROTONAT_MAX+1]; struct nf_nat_protocol *npt; int err; err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); if (err < 0) return err; npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); if (!npt->nlattr_to_range) { nf_nat_proto_put(npt); return 0; } /* nlattr_to_range returns 1 if it parsed, 0 if not, neg. on error */ if (npt->nlattr_to_range(tb, range) > 0) range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; nf_nat_proto_put(npt); return 0;}static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { [CTA_NAT_MINIP] = { .type = NLA_U32 }, [CTA_NAT_MAXIP] = { .type = NLA_U32 },};static inline intnfnetlink_parse_nat(struct nlattr *nat, const struct nf_conn *ct, struct nf_nat_range *range){ struct nlattr *tb[CTA_NAT_MAX+1]; int err; memset(range, 0, sizeof(*range)); err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); if (err < 0) return err; if (tb[CTA_NAT_MINIP]) range->min_ip = *(__be32 *)nla_data(tb[CTA_NAT_MINIP]); if (!tb[CTA_NAT_MAXIP]) range->max_ip = range->min_ip; else range->max_ip = *(__be32 *)nla_data(tb[CTA_NAT_MAXIP]); if (range->min_ip) range->flags |= IP_NAT_RANGE_MAP_IPS; if (!tb[CTA_NAT_PROTO]) return 0; err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); if (err < 0) return err; return 0;}#endifstatic inline intctnetlink_parse_help(struct nlattr *attr, char **helper_name){ struct nlattr *tb[CTA_HELP_MAX+1]; nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL); if (!tb[CTA_HELP_NAME]) return -EINVAL; *helper_name = nla_data(tb[CTA_HELP_NAME]); return 0;}static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { [CTA_STATUS] = { .type = NLA_U32 }, [CTA_TIMEOUT] = { .type = NLA_U32 }, [CTA_MARK] = { .type = NLA_U32 }, [CTA_USE] = { .type = NLA_U32 }, [CTA_ID] = { .type = NLA_U32 },};static intctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *cda[]){ struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int8_t u3 = nfmsg->nfgen_family; int err = 0; if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else { /* Flush the whole table */ nf_conntrack_flush(); return 0; } if (err < 0) return err; h = nf_conntrack_find_get(&tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); if (cda[CTA_ID]) { u_int32_t id = ntohl(*(__be32 *)nla_data(cda[CTA_ID])); if (id != (u32)(unsigned long)ct) { nf_ct_put(ct); return -ENOENT; } } if (del_timer(&ct->timeout)) ct->timeout.function((unsigned long)ct); nf_ct_put(ct); return 0;}static intctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *cda[]){ struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct sk_buff *skb2 = NULL; struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int8_t u3 = nfmsg->nfgen_family; int err = 0; if (nlh->nlmsg_flags & NLM_F_DUMP) {#ifndef CONFIG_NF_CT_ACCT if (NFNL_MSG_TYPE(nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO) return -ENOTSUPP;#endif return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, ctnetlink_done); } if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else return -EINVAL; if (err < 0) return err; h = nf_conntrack_find_get(&tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); err = -ENOMEM; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) { nf_ct_put(ct); return -ENOMEM; } err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, IPCTNL_MSG_CT_NEW, 1, ct); nf_ct_put(ct); if (err <= 0) goto free; err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); if (err < 0) goto out; return 0;free: kfree_skb(skb2);out: return err;}static inline intctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[]){ unsigned long d; unsigned int status = ntohl(*(__be32 *)nla_data(cda[CTA_STATUS])); d = ct->status ^ status; if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) /* unchangeable */ return -EINVAL; if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) /* SEEN_REPLY bit can only be set */ return -EINVAL; if (d & IPS_ASSURED && !(status & IPS_ASSURED)) /* ASSURED bit can only be set */ return -EINVAL; if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {#ifndef CONFIG_NF_NAT_NEEDED return -EINVAL;#else struct nf_nat_range range; if (cda[CTA_NAT_DST]) { if (nfnetlink_parse_nat(cda[CTA_NAT_DST], ct, &range) < 0) return -EINVAL; if (nf_nat_initialized(ct, HOOK2MANIP(NF_IP_PRE_ROUTING))) return -EEXIST; nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); } if (cda[CTA_NAT_SRC]) { if (nfnetlink_parse_nat(cda[CTA_NAT_SRC], ct, &range) < 0) return -EINVAL; if (nf_nat_initialized(ct, HOOK2MANIP(NF_IP_POST_ROUTING))) return -EEXIST; nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); }#endif } /* Be careful here, modifying NAT bits can screw up things, * so don't let users modify them directly if they don't pass * nf_nat_range. */ ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); return 0;}static inline intctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]){ struct nf_conntrack_helper *helper; struct nf_conn_help *help = nfct_help(ct); char *helpname; int err; /* don't change helper of sibling connections */ if (ct->master) return -EINVAL; err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); if (err < 0) return err; if (!strcmp(helpname, "")) { if (help && help->helper) { /* we had a helper before ... */ nf_ct_remove_expectations(ct); rcu_assign_pointer(help->helper, NULL); } return 0; } helper = __nf_conntrack_helper_find_byname(helpname); if (helper == NULL) return -EINVAL; if (help) { if (help->helper == helper) return 0; if (help->helper) return -EBUSY; /* need to zero data of old helper */ memset(&help->help, 0, sizeof(help->help)); } else { help = nf_ct_helper_ext_add(ct, GFP_KERNEL); if (help == NULL) return -ENOMEM; } rcu_assign_pointer(help->helper, helper); return 0;}static inline intctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[]){ u_int32_t timeout = ntohl(*(__be32 *)nla_data(cda[CTA_TIMEOUT])); if (!del_timer(&ct->timeout)) return -ETIME; ct->timeout.expires = jiffies + timeout * HZ; add_timer(&ct->timeout); return 0;}static inline intctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[]){ struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO]; struct nf_conntrack_l4proto *l4proto; u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; int err = 0; nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); l4proto = nf_ct_l4proto_find_get(l3num, npt); if (l4proto->from_nlattr) err = l4proto->from_nlattr(tb, ct); nf_ct_l4proto_put(l4proto); return err;}static intctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]){ int err; if (cda[CTA_HELP]) { err = ctnetlink_change_helper(ct, cda); if (err < 0) return err; } if (cda[CTA_TIMEOUT]) { err = ctnetlink_change_timeout(ct, cda); if (err < 0) return err; } if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) return err; } if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) return err; }#if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) ct->mark = ntohl(*(__be32 *)nla_data(cda[CTA_MARK]));#endif return 0;}static intctnetlink_create_conntrack(struct nlattr *cda[], struct nf_conntrack_tuple *otuple, struct nf_conntrack_tuple *rtuple, struct nf_conn *master_ct){ struct nf_conn *ct; int err = -EINVAL; struct nf_conn_help *help; struct nf_conntrack_helper *helper; ct = nf_conntrack_alloc(otuple, rtuple); if (ct == NULL || IS_ERR(ct)) return -ENOMEM; if (!cda[CTA_TIMEOUT]) goto err; ct->timeout.expires = ntohl(*(__be32 *)nla_data(cda[CTA_TIMEOUT])); ct->timeout.expires = jiffies + ct->timeout.expires * HZ; ct->status |= IPS_CONFIRMED; if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) goto err; } if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) goto err; }#if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) ct->mark = ntohl(*(__be32 *)nla_data(cda[CTA_MARK]));#endif helper = nf_ct_helper_find_get(rtuple); if (helper) { help = nf_ct_helper_ext_add(ct, GFP_KERNEL); if (help == NULL) { nf_ct_helper_put(helper); err = -ENOMEM; goto err; } /* not in hash table yet so not strictly necessary */ rcu_assign_pointer(help->helper, helper); } /* setup master conntrack: this is a confirmed expectation */ if (master_ct) { __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = master_ct; } add_timer(&ct->timeout); nf_conntrack_hash_insert(ct); if (helper) nf_ct_helper_put(helper); return 0;err: nf_conntrack_free(ct); return err;}static intctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *cda[]){ struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int8_t u3 = nfmsg->nfgen_family; int err = 0; if (cda[CTA_TUPLE_ORIG]) { err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); if (err < 0) return err; } if (cda[CTA_TUPLE_REPLY]) { err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3); if (err < 0) return err; } write_lock_bh(&nf_conntrack_lock); if (cda[CTA_TUPLE_ORIG]) h = __nf_conntrack_find(&otuple, NULL); else if (cda[CTA_TUPLE_REPLY]) h = __nf_conntrack_find(&rtuple, NULL); if (h == NULL) { struct nf_conntrack_tuple master; struct nf_conntrack_tuple_hash *master_h = NULL; struct nf_conn *master_ct = NULL; if (cda[CTA_TUPLE_MASTER]) { err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3); if (err < 0) return err; master_h = __nf_conntrack_find(&master, NULL); if (master_h == NULL) { err = -ENOENT; goto out_unlock; } master_ct = nf_ct_tuplehash_to_ctrack(master_h); atomic_inc(&master_ct->ct_general.use); } write_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) err = ctnetlink_create_conntrack(cda, &otuple, &rtuple, master_ct); if (err < 0 && master_ct) nf_ct_put(master_ct); return err; } /* implicit 'else' */ /* We manipulate the conntrack inside the global conntrack table lock, * so there's no need to increase the refcount */ err = -EEXIST; if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { /* we only allow nat config for new conntracks */ if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { err = -EINVAL; goto out_unlock; } /* can't link an existing conntrack to a master */ if (cda[CTA_TUPLE_MASTER]) { err = -EINVAL; goto out_unlock; } err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), cda); }out_unlock: write_unlock_bh(&nf_conntrack_lock); return err;}/*********************************************************************** * EXPECT ***********************************************************************/static inline intctnetlink_exp_dump_tuple(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, enum ctattr_expect type){ struct nlattr *nest_parms;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -