📄 xfrm_policy.c
字号:
/* * xfrm_policy.c * * Changes: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * Kazunori MIYAZAWA @USAGI * YOSHIFUJI Hideaki * Split up af-specific portion * Derek Atkins <derek@ihtfp.com> Add the post_input processor * */#include <linux/config.h>#include <linux/slab.h>#include <linux/kmod.h>#include <linux/list.h>#include <linux/spinlock.h>#include <linux/workqueue.h>#include <linux/notifier.h>#include <linux/netdevice.h>#include <net/xfrm.h>#include <net/ip.h>DECLARE_MUTEX(xfrm_cfg_sem);static rwlock_t xfrm_policy_lock = RW_LOCK_UNLOCKED;struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];static rwlock_t xfrm_policy_afinfo_lock = RW_LOCK_UNLOCKED;static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];kmem_cache_t *xfrm_dst_cache;static struct work_struct xfrm_policy_gc_work;static struct list_head xfrm_policy_gc_list = LIST_HEAD_INIT(xfrm_policy_gc_list);static spinlock_t xfrm_policy_gc_lock = SPIN_LOCK_UNLOCKED;int xfrm_register_type(struct xfrm_type *type, unsigned short family){ struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct xfrm_type_map *typemap; int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; typemap = afinfo->type_map; write_lock(&typemap->lock); if (likely(typemap->map[type->proto] == NULL)) typemap->map[type->proto] = type; else err = -EEXIST; write_unlock(&typemap->lock); xfrm_policy_put_afinfo(afinfo); return err;}int xfrm_unregister_type(struct xfrm_type *type, unsigned short family){ struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct xfrm_type_map *typemap; int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; typemap = afinfo->type_map; write_lock(&typemap->lock); if (unlikely(typemap->map[type->proto] != type)) err = -ENOENT; else typemap->map[type->proto] = NULL; write_unlock(&typemap->lock); xfrm_policy_put_afinfo(afinfo); return err;}struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family){ struct xfrm_policy_afinfo *afinfo; struct xfrm_type_map *typemap; struct xfrm_type *type; int modload_attempted = 0;retry: afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return NULL; typemap = afinfo->type_map; read_lock(&typemap->lock); type = typemap->map[proto]; if (unlikely(type && !try_module_get(type->owner))) type = NULL; read_unlock(&typemap->lock); if (!type && !modload_attempted) { xfrm_policy_put_afinfo(afinfo); request_module("xfrm-type-%d-%d", (int) family, (int) proto); modload_attempted = 1; goto retry; } xfrm_policy_put_afinfo(afinfo); return type;}int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family){ struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; if (likely(afinfo->dst_lookup != NULL)) err = afinfo->dst_lookup(dst, fl); else err = -EINVAL; xfrm_policy_put_afinfo(afinfo); return err;}void xfrm_put_type(struct xfrm_type *type){ module_put(type->owner);}static inline unsigned long make_jiffies(long secs){ if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) return MAX_SCHEDULE_TIMEOUT-1; else return secs*HZ;}static void xfrm_policy_timer(unsigned long data){ struct xfrm_policy *xp = (struct xfrm_policy*)data; unsigned long now = (unsigned long)xtime.tv_sec; long next = LONG_MAX; int warn = 0; int dir; read_lock(&xp->lock); if (xp->dead) goto out; dir = xp->index & 7; if (xp->lft.hard_add_expires_seconds) { long tmo = xp->lft.hard_add_expires_seconds + xp->curlft.add_time - now; if (tmo <= 0) goto expired; if (tmo < next) next = tmo; } if (xp->lft.hard_use_expires_seconds) { long tmo = xp->lft.hard_use_expires_seconds + (xp->curlft.use_time ? : xp->curlft.add_time) - now; if (tmo <= 0) goto expired; if (tmo < next) next = tmo; } if (xp->lft.soft_add_expires_seconds) { long tmo = xp->lft.soft_add_expires_seconds + xp->curlft.add_time - now; if (tmo <= 0) { warn = 1; tmo = XFRM_KM_TIMEOUT; } if (tmo < next) next = tmo; } if (xp->lft.soft_use_expires_seconds) { long tmo = xp->lft.soft_use_expires_seconds + (xp->curlft.use_time ? : xp->curlft.add_time) - now; if (tmo <= 0) { warn = 1; tmo = XFRM_KM_TIMEOUT; } if (tmo < next) next = tmo; } if (warn) km_policy_expired(xp, dir, 0); if (next != LONG_MAX && !mod_timer(&xp->timer, jiffies + make_jiffies(next))) xfrm_pol_hold(xp);out: read_unlock(&xp->lock); xfrm_pol_put(xp); return;expired: read_unlock(&xp->lock); km_policy_expired(xp, dir, 1); xfrm_policy_delete(xp, dir); xfrm_pol_put(xp);}/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 * SPD calls. */struct xfrm_policy *xfrm_policy_alloc(int gfp){ struct xfrm_policy *policy; policy = kmalloc(sizeof(struct xfrm_policy), gfp); if (policy) { memset(policy, 0, sizeof(struct xfrm_policy)); atomic_set(&policy->refcnt, 1); policy->lock = RW_LOCK_UNLOCKED; init_timer(&policy->timer); policy->timer.data = (unsigned long)policy; policy->timer.function = xfrm_policy_timer; } return policy;}/* Destroy xfrm_policy: descendant resources must be released to this moment. */void __xfrm_policy_destroy(struct xfrm_policy *policy){ if (!policy->dead) BUG(); if (policy->bundles) BUG(); if (del_timer(&policy->timer)) BUG(); kfree(policy);}static void xfrm_policy_gc_kill(struct xfrm_policy *policy){ struct dst_entry *dst; while ((dst = policy->bundles) != NULL) { policy->bundles = dst->next; dst_free(dst); } if (del_timer(&policy->timer)) atomic_dec(&policy->refcnt); if (atomic_read(&policy->refcnt) > 1) flow_cache_flush(); xfrm_pol_put(policy);}static void xfrm_policy_gc_task(void *data){ struct xfrm_policy *policy; struct list_head *entry, *tmp; struct list_head gc_list = LIST_HEAD_INIT(gc_list); spin_lock_bh(&xfrm_policy_gc_lock); list_splice_init(&xfrm_policy_gc_list, &gc_list); spin_unlock_bh(&xfrm_policy_gc_lock); list_for_each_safe(entry, tmp, &gc_list) { policy = list_entry(entry, struct xfrm_policy, list); xfrm_policy_gc_kill(policy); }}/* Rule must be locked. Release descentant resources, announce * entry dead. The rule must be unlinked from lists to the moment. */void xfrm_policy_kill(struct xfrm_policy *policy){ write_lock_bh(&policy->lock); if (policy->dead) goto out; policy->dead = 1; spin_lock(&xfrm_policy_gc_lock); list_add(&policy->list, &xfrm_policy_gc_list); spin_unlock(&xfrm_policy_gc_lock); schedule_work(&xfrm_policy_gc_work);out: write_unlock_bh(&policy->lock);}/* Generate new index... KAME seems to generate them ordered by cost * of an absolute inpredictability of ordering of rules. This will not pass. */static u32 xfrm_gen_index(int dir){ u32 idx; struct xfrm_policy *p; static u32 idx_generator; for (;;) { idx = (idx_generator | dir); idx_generator += 8; if (idx == 0) idx = 8; for (p = xfrm_policy_list[dir]; p; p = p->next) { if (p->index == idx) break; } if (!p) return idx; }}int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl){ struct xfrm_policy *pol, **p; struct xfrm_policy *delpol = NULL; struct xfrm_policy **newpos = NULL; write_lock_bh(&xfrm_policy_lock); for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) { if (excl) { write_unlock_bh(&xfrm_policy_lock); return -EEXIST; } *p = pol->next; delpol = pol; if (policy->priority > pol->priority) continue; } else if (policy->priority >= pol->priority) continue; if (!newpos) newpos = p; if (delpol) break; } if (newpos) p = newpos; xfrm_pol_hold(policy); policy->next = *p; *p = policy; atomic_inc(&flow_cache_genid); policy->index = delpol ? delpol->index : xfrm_gen_index(dir); policy->curlft.add_time = (unsigned long)xtime.tv_sec; policy->curlft.use_time = 0; if (!mod_timer(&policy->timer, jiffies + HZ)) xfrm_pol_hold(policy); write_unlock_bh(&xfrm_policy_lock); if (delpol) { xfrm_policy_kill(delpol); } return 0;}struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, int delete){ struct xfrm_policy *pol, **p; write_lock_bh(&xfrm_policy_lock); for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { if (memcmp(sel, &pol->selector, sizeof(*sel)) == 0) { xfrm_pol_hold(pol); if (delete) *p = pol->next; break; } } write_unlock_bh(&xfrm_policy_lock); if (pol && delete) { atomic_inc(&flow_cache_genid); xfrm_policy_kill(pol); } return pol;}struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete){ struct xfrm_policy *pol, **p; write_lock_bh(&xfrm_policy_lock); for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) { if (pol->index == id) { xfrm_pol_hold(pol); if (delete) *p = pol->next; break; } } write_unlock_bh(&xfrm_policy_lock); if (pol && delete) { atomic_inc(&flow_cache_genid); xfrm_policy_kill(pol); } return pol;}void xfrm_policy_flush(void){ struct xfrm_policy *xp; int dir; write_lock_bh(&xfrm_policy_lock); for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { while ((xp = xfrm_policy_list[dir]) != NULL) { xfrm_policy_list[dir] = xp->next; write_unlock_bh(&xfrm_policy_lock); xfrm_policy_kill(xp); write_lock_bh(&xfrm_policy_lock); } } atomic_inc(&flow_cache_genid); write_unlock_bh(&xfrm_policy_lock);}int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *data){ struct xfrm_policy *xp; int dir; int count = 0; int error = 0; read_lock_bh(&xfrm_policy_lock); for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) count++; } if (count == 0) { error = -ENOENT; goto out; } for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) { error = func(xp, dir%XFRM_POLICY_MAX, --count, data); if (error) goto out; } }out: read_unlock_bh(&xfrm_policy_lock); return error;}/* Find policy to apply to this flow. */static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, void **objp, atomic_t **obj_refp){ struct xfrm_policy *pol; read_lock_bh(&xfrm_policy_lock); for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) { struct xfrm_selector *sel = &pol->selector; int match; if (pol->family != family) continue; match = xfrm_selector_match(sel, fl, family); if (match) { xfrm_pol_hold(pol); break; } } read_unlock_bh(&xfrm_policy_lock); if ((*objp = (void *) pol) != NULL) *obj_refp = &pol->refcnt;}struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl){ struct xfrm_policy *pol; read_lock_bh(&xfrm_policy_lock); if ((pol = sk->sk_policy[dir]) != NULL) { int match = xfrm_selector_match(&pol->selector, fl, sk->sk_family); if (match) xfrm_pol_hold(pol); else pol = NULL; } read_unlock_bh(&xfrm_policy_lock); return pol;}static void __xfrm_policy_link(struct xfrm_policy *pol, int dir){ pol->next = xfrm_policy_list[dir]; xfrm_policy_list[dir] = pol; xfrm_pol_hold(pol);}static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, int dir){ struct xfrm_policy **polp; for (polp = &xfrm_policy_list[dir]; *polp != NULL; polp = &(*polp)->next) { if (*polp == pol) { *polp = pol->next; return pol; } } return NULL;}void xfrm_policy_delete(struct xfrm_policy *pol, int dir){ write_lock_bh(&xfrm_policy_lock); pol = __xfrm_policy_unlink(pol, dir); write_unlock_bh(&xfrm_policy_lock); if (pol) { if (dir < XFRM_POLICY_MAX) atomic_inc(&flow_cache_genid); xfrm_policy_kill(pol); }}int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol){ struct xfrm_policy *old_pol; write_lock_bh(&xfrm_policy_lock); old_pol = sk->sk_policy[dir]; sk->sk_policy[dir] = pol; if (pol) { pol->curlft.add_time = (unsigned long)xtime.tv_sec; pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir); __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); } if (old_pol) __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); write_unlock_bh(&xfrm_policy_lock); if (old_pol) { xfrm_policy_kill(old_pol); } return 0;}static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir){ struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC); if (newp) { newp->selector = old->selector; newp->lft = old->lft; newp->curlft = old->curlft; newp->action = old->action; newp->flags = old->flags; newp->xfrm_nr = old->xfrm_nr; newp->index = old->index; memcpy(newp->xfrm_vec, old->xfrm_vec, newp->xfrm_nr*sizeof(struct xfrm_tmpl)); write_lock_bh(&xfrm_policy_lock); __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); write_unlock_bh(&xfrm_policy_lock); xfrm_pol_put(newp); } return newp;}int __xfrm_sk_clone_policy(struct sock *sk){ struct xfrm_policy *p0 = sk->sk_policy[0], *p1 = sk->sk_policy[1]; sk->sk_policy[0] = sk->sk_policy[1] = NULL; if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) return -ENOMEM; if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) return -ENOMEM; return 0;}/* Resolve list of templates for the flow, given policy. */static intxfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl, struct xfrm_state **xfrm, unsigned short family){ int nx; int i, error; xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); for (nx=0, i = 0; i < policy->xfrm_nr; i++) { struct xfrm_state *x; xfrm_address_t *remote = daddr; xfrm_address_t *local = saddr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -