📄 cls_u32.c
字号:
BUG_TRAP(root_ht != NULL); if (root_ht && --root_ht->refcnt == 0) u32_destroy_hnode(tp, root_ht); if (--tp_c->refcnt == 0) { struct tc_u_hnode *ht; struct tc_u_common **tp_cp; for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) { if (*tp_cp == tp_c) { *tp_cp = tp_c->next; break; } } for (ht=tp_c->hlist; ht; ht = ht->next) u32_clear_hnode(tp, ht); while ((ht = tp_c->hlist) != NULL) { tp_c->hlist = ht->next; BUG_TRAP(ht->refcnt == 0); kfree(ht); } kfree(tp_c); } tp->data = NULL;}static int u32_delete(struct tcf_proto *tp, unsigned long arg){ struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; if (ht == NULL) return 0; if (TC_U32_KEY(ht->handle)) return u32_delete_key(tp, (struct tc_u_knode*)ht); if (tp->root == ht) return -EINVAL; if (--ht->refcnt == 0) u32_destroy_hnode(tp, ht); return 0;}static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle){ struct tc_u_knode *n; unsigned i = 0x7FF; for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) if (i < TC_U32_NODE(n->handle)) i = TC_U32_NODE(n->handle); i++; return handle|(i>0xFFF ? 0xFFF : i);}static int u32_set_parms(struct tcf_proto *tp, unsigned long base, struct tc_u_hnode *ht, struct tc_u_knode *n, struct rtattr **tb, struct rtattr *est){ int err; struct tcf_exts e; err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map); if (err < 0) return err; err = -EINVAL; if (tb[TCA_U32_LINK-1]) { u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]); struct tc_u_hnode *ht_down = NULL; if (TC_U32_KEY(handle)) goto errout; if (handle) { ht_down = u32_lookup_ht(ht->tp_c, handle); if (ht_down == NULL) goto errout; ht_down->refcnt++; } tcf_tree_lock(tp); ht_down = xchg(&n->ht_down, ht_down); tcf_tree_unlock(tp); if (ht_down) ht_down->refcnt--; } if (tb[TCA_U32_CLASSID-1]) { n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]); tcf_bind_filter(tp, &n->res, base); }#ifdef CONFIG_NET_CLS_IND if (tb[TCA_U32_INDEV-1]) { err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); if (err < 0) goto errout; }#endif tcf_exts_change(tp, &n->exts, &e); return 0;errout: tcf_exts_destroy(tp, &e); return err;}static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, struct rtattr **tca, unsigned long *arg){ struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; struct tc_u32_sel *s; struct rtattr *opt = tca[TCA_OPTIONS-1]; struct rtattr *tb[TCA_U32_MAX]; u32 htid; int err; if (opt == NULL) return handle ? -EINVAL : 0; if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0) return -EINVAL; if ((n = (struct tc_u_knode*)*arg) != NULL) { if (TC_U32_KEY(n->handle) == 0) return -EINVAL; return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]); } if (tb[TCA_U32_DIVISOR-1]) { unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]); if (--divisor > 0x100) return -EINVAL; if (TC_U32_KEY(handle)) return -EINVAL; if (handle == 0) { handle = gen_new_htid(tp->data); if (handle == 0) return -ENOMEM; } ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); if (ht == NULL) return -ENOBUFS; ht->tp_c = tp_c; ht->refcnt = 0; ht->divisor = divisor; ht->handle = handle; ht->prio = tp->prio; ht->next = tp_c->hlist; tp_c->hlist = ht; *arg = (unsigned long)ht; return 0; } if (tb[TCA_U32_HASH-1]) { htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]); if (TC_U32_HTID(htid) == TC_U32_ROOT) { ht = tp->root; htid = ht->handle; } else { ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); if (ht == NULL) return -EINVAL; } } else { ht = tp->root; htid = ht->handle; } if (ht->divisor < TC_U32_HASH(htid)) return -EINVAL; if (handle) { if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) return -EINVAL; handle = htid | TC_U32_NODE(handle); } else handle = gen_new_kid(ht, htid); if (tb[TCA_U32_SEL-1] == NULL || RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel)) return -EINVAL; s = RTA_DATA(tb[TCA_U32_SEL-1]); n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); if (n == NULL) return -ENOBUFS;#ifdef CONFIG_CLS_U32_PERF n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); if (n->pf == NULL) { kfree(n); return -ENOBUFS; }#endif memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); n->ht_up = ht; n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;#ifdef CONFIG_CLS_U32_MARK if (tb[TCA_U32_MARK-1]) { struct tc_u32_mark *mark; if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) {#ifdef CONFIG_CLS_U32_PERF kfree(n->pf);#endif kfree(n); return -EINVAL; } mark = RTA_DATA(tb[TCA_U32_MARK-1]); memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); n->mark.success = 0; }#endif err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]); if (err == 0) { struct tc_u_knode **ins; for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) break; n->next = *ins; wmb(); *ins = n; *arg = (unsigned long)n; return 0; }#ifdef CONFIG_CLS_U32_PERF kfree(n->pf);#endif kfree(n); return err;}static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg){ struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; unsigned h; if (arg->stop) return; for (ht = tp_c->hlist; ht; ht = ht->next) { if (ht->prio != tp->prio) continue; if (arg->count >= arg->skip) { if (arg->fn(tp, (unsigned long)ht, arg) < 0) { arg->stop = 1; return; } } arg->count++; for (h = 0; h <= ht->divisor; h++) { for (n = ht->ht[h]; n; n = n->next) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(tp, (unsigned long)n, arg) < 0) { arg->stop = 1; return; } arg->count++; } } }}static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t){ struct tc_u_knode *n = (struct tc_u_knode*)fh; unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; if (n == NULL) return skb->len; t->tcm_handle = n->handle; rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (TC_U32_KEY(n->handle) == 0) { struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; u32 divisor = ht->divisor+1; RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor); } else { RTA_PUT(skb, TCA_U32_SEL, sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), &n->sel); if (n->ht_up) { u32 htid = n->handle & 0xFFFFF000; RTA_PUT(skb, TCA_U32_HASH, 4, &htid); } if (n->res.classid) RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid); if (n->ht_down) RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);#ifdef CONFIG_CLS_U32_MARK if (n->mark.val || n->mark.mask) RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);#endif if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) goto rtattr_failure;#ifdef CONFIG_NET_CLS_IND if(strlen(n->indev)) RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);#endif#ifdef CONFIG_CLS_U32_PERF RTA_PUT(skb, TCA_U32_PCNT, sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), n->pf);#endif } rta->rta_len = skb_tail_pointer(skb) - b; if (TC_U32_KEY(n->handle)) if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0) goto rtattr_failure; return skb->len;rtattr_failure: nlmsg_trim(skb, b); return -1;}static struct tcf_proto_ops cls_u32_ops = { .next = NULL, .kind = "u32", .classify = u32_classify, .init = u32_init, .destroy = u32_destroy, .get = u32_get, .put = u32_put, .change = u32_change, .delete = u32_delete, .walk = u32_walk, .dump = u32_dump, .owner = THIS_MODULE,};static int __init init_u32(void){ printk("u32 classifier\n");#ifdef CONFIG_CLS_U32_PERF printk(" Performance counters on\n");#endif#ifdef CONFIG_NET_CLS_IND printk(" input device check on \n");#endif#ifdef CONFIG_NET_CLS_ACT printk(" Actions configured \n");#endif return register_tcf_proto_ops(&cls_u32_ops);}static void __exit exit_u32(void){ unregister_tcf_proto_ops(&cls_u32_ops);}module_init(init_u32)module_exit(exit_u32)MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -