📄 nfnetlink_queue.c
字号:
if (net_ratelimit()) printk(KERN_ERR "nf_queue: OOM in nfqnl_enqueue_packet()\n"); status = -ENOMEM; goto err_out_put; } entry->info = info; entry->skb = skb; entry->id = atomic_inc_return(&queue->id_sequence); nskb = nfqnl_build_packet_message(queue, entry, &status); if (nskb == NULL) goto err_out_free; spin_lock_bh(&queue->lock); if (!queue->peer_pid) goto err_out_free_nskb; if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; status = -ENOSPC; if (net_ratelimit()) printk(KERN_WARNING "nf_queue: full at %d entries, " "dropping packets(s). Dropped: %d\n", queue->queue_total, queue->queue_dropped); goto err_out_free_nskb; } /* nfnetlink_unicast will either free the nskb or add it to a socket */ status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); if (status < 0) { queue->queue_user_dropped++; goto err_out_unlock; } __enqueue_entry(queue, entry); spin_unlock_bh(&queue->lock); instance_put(queue); return status;err_out_free_nskb: kfree_skb(nskb);err_out_unlock: spin_unlock_bh(&queue->lock);err_out_free: kfree(entry);err_out_put: instance_put(queue); return status;}static intnfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e){ int diff; int err; diff = data_len - e->skb->len; if (diff < 0) { if (pskb_trim(e->skb, data_len)) return -ENOMEM; } else if (diff > 0) { if (data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { err = pskb_expand_head(e->skb, 0, diff - skb_tailroom(e->skb), GFP_ATOMIC); if (err) { printk(KERN_WARNING "nf_queue: OOM " "in mangle, dropping packet\n"); return err; } } skb_put(e->skb, diff); } if (!skb_make_writable(e->skb, data_len)) return -ENOMEM; skb_copy_to_linear_data(e->skb, data, data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0;}static inline intid_cmp(struct nfqnl_queue_entry *e, unsigned long id){ return (id == e->id);}static intnfqnl_set_mode(struct nfqnl_instance *queue, unsigned char mode, unsigned int range){ int status; spin_lock_bh(&queue->lock); status = __nfqnl_set_mode(queue, mode, range); spin_unlock_bh(&queue->lock); return status;}static intdev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex){ struct nf_info *entinf = entry->info; if (entinf->indev) if (entinf->indev->ifindex == ifindex) return 1; if (entinf->outdev) if (entinf->outdev->ifindex == ifindex) return 1;#ifdef CONFIG_BRIDGE_NETFILTER if (entry->skb->nf_bridge) { if (entry->skb->nf_bridge->physindev && entry->skb->nf_bridge->physindev->ifindex == ifindex) return 1; if (entry->skb->nf_bridge->physoutdev && entry->skb->nf_bridge->physoutdev->ifindex == ifindex) return 1; }#endif return 0;}/* drop all packets with either indev or outdev == ifindex from all queue * instances */static voidnfqnl_dev_drop(int ifindex){ int i; QDEBUG("entering for ifindex %u\n", ifindex); /* this only looks like we have to hold the readlock for a way too long * time, issue_verdict(), nf_reinject(), ... - but we always only * issue NF_DROP, which is processed directly in nf_reinject() */ read_lock_bh(&instances_lock); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry(inst, tmp, head, hlist) { struct nfqnl_queue_entry *entry; while ((entry = find_dequeue_entry(inst, dev_cmp, ifindex)) != NULL) issue_verdict(entry, NF_DROP); } } read_unlock_bh(&instances_lock);}#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)static intnfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr){ struct net_device *dev = ptr; if (dev->nd_net != &init_net) return NOTIFY_DONE; /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) nfqnl_dev_drop(dev->ifindex); return NOTIFY_DONE;}static struct notifier_block nfqnl_dev_notifier = { .notifier_call = nfqnl_rcv_dev_event,};static intnfqnl_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr){ struct netlink_notify *n = ptr; if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER && n->pid) { int i; /* destroy all instances for this pid */ write_lock_bh(&instances_lock); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp, *t2; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { if ((n->net == &init_net) && (n->pid == inst->peer_pid)) __instance_destroy(inst); } } write_unlock_bh(&instances_lock); } return NOTIFY_DONE;}static struct notifier_block nfqnl_rtnl_notifier = { .notifier_call = nfqnl_rcv_nl_event,};static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, [NFQA_MARK] = { .type = NLA_U32 }, [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },};static intnfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *nfqa[]){ struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_msg_verdict_hdr *vhdr; struct nfqnl_instance *queue; unsigned int verdict; struct nfqnl_queue_entry *entry; int err; queue = instance_lookup_get(queue_num); if (!queue) return -ENODEV; if (queue->peer_pid != NETLINK_CB(skb).pid) { err = -EPERM; goto err_out_put; } if (!nfqa[NFQA_VERDICT_HDR]) { err = -EINVAL; goto err_out_put; } vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); verdict = ntohl(vhdr->verdict); if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { err = -EINVAL; goto err_out_put; } entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id)); if (entry == NULL) { err = -ENOENT; goto err_out_put; } if (nfqa[NFQA_PAYLOAD]) { if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) verdict = NF_DROP; } if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(*(__be32 *) nla_data(nfqa[NFQA_MARK])); issue_verdict(entry, verdict); instance_put(queue); return 0;err_out_put: instance_put(queue); return err;}static intnfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *nfqa[]){ return -ENOTSUPP;}static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },};static struct nf_queue_handler nfqh = { .name = "nf_queue", .outfn = &nfqnl_enqueue_packet,};static intnfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *nfqa[]){ struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_instance *queue; int ret = 0; QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type)); queue = instance_lookup_get(queue_num); if (nfqa[NFQA_CFG_CMD]) { struct nfqnl_msg_config_cmd *cmd; cmd = nla_data(nfqa[NFQA_CFG_CMD]); QDEBUG("found CFG_CMD\n"); switch (cmd->command) { case NFQNL_CFG_CMD_BIND: if (queue) return -EBUSY; queue = instance_create(queue_num, NETLINK_CB(skb).pid); if (!queue) return -EINVAL; break; case NFQNL_CFG_CMD_UNBIND: if (!queue) return -ENODEV; if (queue->peer_pid != NETLINK_CB(skb).pid) { ret = -EPERM; goto out_put; } instance_destroy(queue); break; case NFQNL_CFG_CMD_PF_BIND: QDEBUG("registering queue handler for pf=%u\n", ntohs(cmd->pf)); ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh); break; case NFQNL_CFG_CMD_PF_UNBIND: QDEBUG("unregistering queue handler for pf=%u\n", ntohs(cmd->pf)); ret = nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh); break; default: ret = -EINVAL; break; } } else { if (!queue) { QDEBUG("no config command, and no instance ENOENT\n"); ret = -ENOENT; goto out_put; } if (queue->peer_pid != NETLINK_CB(skb).pid) { QDEBUG("no config command, and wrong pid\n"); ret = -EPERM; goto out_put; } } if (nfqa[NFQA_CFG_PARAMS]) { struct nfqnl_msg_config_params *params; if (!queue) { ret = -ENOENT; goto out_put; } params = nla_data(nfqa[NFQA_CFG_PARAMS]); nfqnl_set_mode(queue, params->copy_mode, ntohl(params->copy_range)); } if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { __be32 *queue_maxlen; queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); spin_lock_bh(&queue->lock); queue->queue_maxlen = ntohl(*queue_maxlen); spin_unlock_bh(&queue->lock); }out_put: instance_put(queue); return ret;}static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, .attr_count = NFQA_MAX, }, [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, .attr_count = NFQA_MAX, .policy = nfqa_verdict_policy }, [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, .attr_count = NFQA_CFG_MAX, .policy = nfqa_cfg_policy },};static const struct nfnetlink_subsystem nfqnl_subsys = { .name = "nf_queue", .subsys_id = NFNL_SUBSYS_QUEUE, .cb_count = NFQNL_MSG_MAX, .cb = nfqnl_cb,};#ifdef CONFIG_PROC_FSstruct iter_state { unsigned int bucket;};static struct hlist_node *get_first(struct seq_file *seq){ struct iter_state *st = seq->private; if (!st) return NULL; for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { if (!hlist_empty(&instance_table[st->bucket])) return instance_table[st->bucket].first; } return NULL;}static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h){ struct iter_state *st = seq->private; h = h->next; while (!h) { if (++st->bucket >= INSTANCE_BUCKETS) return NULL; h = instance_table[st->bucket].first; } return h;}static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos){ struct hlist_node *head; head = get_first(seq); if (head) while (pos && (head = get_next(seq, head))) pos--; return pos ? NULL : head;}static void *seq_start(struct seq_file *seq, loff_t *pos){ read_lock_bh(&instances_lock); return get_idx(seq, *pos);}static void *seq_next(struct seq_file *s, void *v, loff_t *pos){ (*pos)++; return get_next(s, v);}static void seq_stop(struct seq_file *s, void *v){ read_unlock_bh(&instances_lock);}static int seq_show(struct seq_file *s, void *v){ const struct nfqnl_instance *inst = v; return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", inst->queue_num, inst->peer_pid, inst->queue_total, inst->copy_mode, inst->copy_range, inst->queue_dropped, inst->queue_user_dropped, atomic_read(&inst->id_sequence), atomic_read(&inst->use));}static const struct seq_operations nfqnl_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show,};static int nfqnl_open(struct inode *inode, struct file *file){ return seq_open_private(file, &nfqnl_seq_ops, sizeof(struct iter_state));}static const struct file_operations nfqnl_file_ops = { .owner = THIS_MODULE, .open = nfqnl_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private,};#endif /* PROC_FS */static int __init nfnetlink_queue_init(void){ int i, status = -ENOMEM;#ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_nfqueue;#endif for (i = 0; i < INSTANCE_BUCKETS; i++) INIT_HLIST_HEAD(&instance_table[i]); netlink_register_notifier(&nfqnl_rtnl_notifier); status = nfnetlink_subsys_register(&nfqnl_subsys); if (status < 0) { printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; }#ifdef CONFIG_PROC_FS proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440, proc_net_netfilter); if (!proc_nfqueue) goto cleanup_subsys; proc_nfqueue->proc_fops = &nfqnl_file_ops;#endif register_netdevice_notifier(&nfqnl_dev_notifier); return status;#ifdef CONFIG_PROC_FScleanup_subsys: nfnetlink_subsys_unregister(&nfqnl_subsys);#endifcleanup_netlink_notifier: netlink_unregister_notifier(&nfqnl_rtnl_notifier); return status;}static void __exit nfnetlink_queue_fini(void){ nf_unregister_queue_handlers(&nfqh); unregister_netdevice_notifier(&nfqnl_dev_notifier);#ifdef CONFIG_PROC_FS remove_proc_entry("nfnetlink_queue", proc_net_netfilter);#endif nfnetlink_subsys_unregister(&nfqnl_subsys); netlink_unregister_notifier(&nfqnl_rtnl_notifier);}MODULE_DESCRIPTION("netfilter packet queue handler");MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");MODULE_LICENSE("GPL");MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);module_init(nfnetlink_queue_init);module_exit(nfnetlink_queue_fini);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -