📄 cls_u32.c
字号:
}static int u32_delete(struct tcf_proto *tp, unsigned long arg){ struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; if (ht == NULL) return 0; if (TC_U32_KEY(ht->handle)) return u32_delete_key(tp, (struct tc_u_knode*)ht); if (tp->root == ht) return -EINVAL; if (--ht->refcnt == 0) u32_destroy_hnode(tp, ht); return 0;}static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle){ struct tc_u_knode *n; unsigned i = 0x7FF; for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) if (i < TC_U32_NODE(n->handle)) i = TC_U32_NODE(n->handle); i++; return handle|(i>0xFFF ? 0xFFF : i);}static int u32_set_parms(struct Qdisc *q, unsigned long base, struct tc_u_hnode *ht, struct tc_u_knode *n, struct rtattr **tb, struct rtattr *est){#ifdef CONFIG_NET_CLS_ACT struct tc_action *act = NULL; int ret;#endif if (tb[TCA_U32_LINK-1]) { u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]); struct tc_u_hnode *ht_down = NULL; if (TC_U32_KEY(handle)) return -EINVAL; if (handle) { ht_down = u32_lookup_ht(ht->tp_c, handle); if (ht_down == NULL) return -EINVAL; ht_down->refcnt++; } sch_tree_lock(q); ht_down = xchg(&n->ht_down, ht_down); sch_tree_unlock(q); if (ht_down) ht_down->refcnt--; } if (tb[TCA_U32_CLASSID-1]) { unsigned long cl; n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]); sch_tree_lock(q); cl = __cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid)); sch_tree_unlock(q); if (cl) q->ops->cl_ops->unbind_tcf(q, cl); }#ifdef CONFIG_NET_CLS_ACT /*backward compatibility */ if (tb[TCA_U32_POLICE-1]) { act = kmalloc(sizeof(*act),GFP_KERNEL); if (NULL == act) return -ENOMEM; memset(act,0,sizeof(*act)); ret = tcf_action_init_1(tb[TCA_U32_POLICE-1], est,act,"police", TCA_ACT_NOREPLACE, TCA_ACT_BIND); if (0 > ret){ tcf_action_destroy(act, TCA_ACT_UNBIND); return ret; } act->type = TCA_OLD_COMPAT; sch_tree_lock(q); act = xchg(&n->action, act); sch_tree_unlock(q); tcf_action_destroy(act, TCA_ACT_UNBIND); } if(tb[TCA_U32_ACT-1]) { act = kmalloc(sizeof(*act),GFP_KERNEL); if (NULL == act) return -ENOMEM; memset(act,0,sizeof(*act)); ret = tcf_action_init(tb[TCA_U32_ACT-1], est,act,NULL,TCA_ACT_NOREPLACE, TCA_ACT_BIND); if (0 > ret) { tcf_action_destroy(act, TCA_ACT_UNBIND); return ret; } sch_tree_lock(q); act = xchg(&n->action, act); sch_tree_unlock(q); tcf_action_destroy(act, TCA_ACT_UNBIND); }#else#ifdef CONFIG_NET_CLS_POLICE if (tb[TCA_U32_POLICE-1]) { struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est); sch_tree_lock(q); police = xchg(&n->police, police); sch_tree_unlock(q); tcf_police_release(police, TCA_ACT_UNBIND); }#endif#endif#ifdef CONFIG_NET_CLS_IND n->indev[0] = 0; if(tb[TCA_U32_INDEV-1]) { struct rtattr *input_dev = tb[TCA_U32_INDEV-1]; if (RTA_PAYLOAD(input_dev) >= IFNAMSIZ) { printk("cls_u32: bad indev name %s\n",(char*)RTA_DATA(input_dev)); /* should we clear state first? */ return -EINVAL; } sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev)); printk("got IND %s\n",n->indev); }#endif return 0;}static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, struct rtattr **tca, unsigned long *arg){ struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; struct tc_u32_sel *s; struct rtattr *opt = tca[TCA_OPTIONS-1]; struct rtattr *tb[TCA_U32_MAX]; u32 htid; int err; if (opt == NULL) return handle ? -EINVAL : 0; if (rtattr_parse(tb, TCA_U32_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0) return -EINVAL; if ((n = (struct tc_u_knode*)*arg) != NULL) { if (TC_U32_KEY(n->handle) == 0) return -EINVAL; return u32_set_parms(tp->q, base, n->ht_up, n, tb, tca[TCA_RATE-1]); } if (tb[TCA_U32_DIVISOR-1]) { unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]); if (--divisor > 0x100) return -EINVAL; if (TC_U32_KEY(handle)) return -EINVAL; if (handle == 0) { handle = gen_new_htid(tp->data); if (handle == 0) return -ENOMEM; } ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); if (ht == NULL) return -ENOBUFS; memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*)); ht->tp_c = tp_c; ht->refcnt = 0; ht->divisor = divisor; ht->handle = handle; ht->next = tp_c->hlist; tp_c->hlist = ht; *arg = (unsigned long)ht; return 0; } if (tb[TCA_U32_HASH-1]) { htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]); if (TC_U32_HTID(htid) == TC_U32_ROOT) { ht = tp->root; htid = ht->handle; } else { ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); if (ht == NULL) return -EINVAL; } } else { ht = tp->root; htid = ht->handle; } if (ht->divisor < TC_U32_HASH(htid)) return -EINVAL; if (handle) { if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) return -EINVAL; handle = htid | TC_U32_NODE(handle); } else handle = gen_new_kid(ht, htid); if (tb[TCA_U32_SEL-1] == 0 || RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel)) return -EINVAL; s = RTA_DATA(tb[TCA_U32_SEL-1]); n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); if (n == NULL) return -ENOBUFS; memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));#ifdef CONFIG_CLS_U32_PERF n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64), GFP_KERNEL); if (n->pf == NULL) { kfree(n); return -ENOBUFS; } memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64));#endif memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); n->ht_up = ht; n->handle = handle;{ u8 i = 0; u32 mask = s->hmask; if (mask) { while (!(mask & 1)) { i++; mask>>=1; } } n->fshift = i;} err = u32_set_parms(tp->q, base, ht, n, tb, tca[TCA_RATE-1]); if (err == 0) { struct tc_u_knode **ins; for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) break; n->next = *ins; wmb(); *ins = n; *arg = (unsigned long)n; return 0; }#ifdef CONFIG_CLS_U32_PERF if (n && (NULL != n->pf)) kfree(n->pf);#endif kfree(n); return err;}static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg){ struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; unsigned h; if (arg->stop) return; for (ht = tp_c->hlist; ht; ht = ht->next) { if (arg->count >= arg->skip) { if (arg->fn(tp, (unsigned long)ht, arg) < 0) { arg->stop = 1; return; } } arg->count++; for (h = 0; h <= ht->divisor; h++) { for (n = ht->ht[h]; n; n = n->next) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(tp, (unsigned long)n, arg) < 0) { arg->stop = 1; return; } arg->count++; } } }}static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t){ struct tc_u_knode *n = (struct tc_u_knode*)fh; unsigned char *b = skb->tail; struct rtattr *rta; if (n == NULL) return skb->len; t->tcm_handle = n->handle; rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (TC_U32_KEY(n->handle) == 0) { struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; u32 divisor = ht->divisor+1; RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor); } else { RTA_PUT(skb, TCA_U32_SEL, sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), &n->sel); if (n->ht_up) { u32 htid = n->handle & 0xFFFFF000; RTA_PUT(skb, TCA_U32_HASH, 4, &htid); } if (n->res.classid) RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid); if (n->ht_down) RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);#ifdef CONFIG_NET_CLS_ACT /* again for backward compatible mode - we want * to work with both old and new modes of entering * tc data even if iproute2 was newer - jhs */ if (n->action) { struct rtattr * p_rta = (struct rtattr*)skb->tail; if (n->action->type != TCA_OLD_COMPAT) { RTA_PUT(skb, TCA_U32_ACT, 0, NULL); if (tcf_action_dump(skb,n->action, 0, 0) < 0) { goto rtattr_failure; } } else { RTA_PUT(skb, TCA_U32_POLICE, 0, NULL); if (tcf_action_dump_old(skb,n->action,0,0) < 0) { goto rtattr_failure; } } p_rta->rta_len = skb->tail - (u8*)p_rta; }#else#ifdef CONFIG_NET_CLS_POLICE if (n->police) { struct rtattr * p_rta = (struct rtattr*)skb->tail; RTA_PUT(skb, TCA_U32_POLICE, 0, NULL); if (tcf_police_dump(skb, n->police) < 0) goto rtattr_failure; p_rta->rta_len = skb->tail - (u8*)p_rta; }#endif#endif#ifdef CONFIG_NET_CLS_IND if(strlen(n->indev)) { struct rtattr * p_rta = (struct rtattr*)skb->tail; RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev); p_rta->rta_len = skb->tail - (u8*)p_rta; }#endif#ifdef CONFIG_CLS_U32_PERF RTA_PUT(skb, TCA_U32_PCNT, sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(__u64), n->pf);#endif } rta->rta_len = skb->tail - b;#ifdef CONFIG_NET_CLS_ACT if (TC_U32_KEY(n->handle) != 0) { if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) { if (tcf_action_copy_stats(skb,n->action)) goto rtattr_failure; } }#else#ifdef CONFIG_NET_CLS_POLICE if (TC_U32_KEY(n->handle) && n->police) { if (qdisc_copy_stats(skb, &n->police->stats, n->police->stats_lock)) goto rtattr_failure; }#endif#endif return skb->len;rtattr_failure: skb_trim(skb, b - skb->data); return -1;}static struct tcf_proto_ops cls_u32_ops = { .next = NULL, .kind = "u32", .classify = u32_classify, .init = u32_init, .destroy = u32_destroy, .get = u32_get, .put = u32_put, .change = u32_change, .delete = u32_delete, .walk = u32_walk, .dump = u32_dump, .owner = THIS_MODULE,};static int __init init_u32(void){ printk("u32 classifier\n");#ifdef CONFIG_CLS_U32_PERF printk(" Perfomance counters on\n");#endif#ifdef CONFIG_NET_CLS_POLICE printk(" OLD policer on \n");#endif#ifdef CONFIG_NET_CLS_IND printk(" input device check on \n");#endif#ifdef CONFIG_NET_CLS_ACT printk(" Actions configured \n");#endif return register_tcf_proto_ops(&cls_u32_ops);}static void __exit exit_u32(void) { unregister_tcf_proto_ops(&cls_u32_ops);}module_init(init_u32)module_exit(exit_u32)MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -