⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfrm_policy.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];		if (tmpl->mode) {			remote = &tmpl->id.daddr;			local = &tmpl->saddr;		}		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);		if (x && x->km.state == XFRM_STATE_VALID) {			xfrm[nx++] = x;			daddr = remote;			saddr = local;			continue;		}		if (x) {			error = (x->km.state == XFRM_STATE_ERROR ?				 -EINVAL : -EAGAIN);			xfrm_state_put(x);		}		if (!tmpl->optional)			goto fail;	}	return nx;fail:	for (nx--; nx>=0; nx--)		xfrm_state_put(xfrm[nx]);	return error;}/* Check that the bundle accepts the flow and its components are * still valid. */static struct dst_entry *xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family){	struct dst_entry *x;	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);	if (unlikely(afinfo == NULL))		return ERR_PTR(-EINVAL);	x = afinfo->find_bundle(fl, policy);	xfrm_policy_put_afinfo(afinfo);	return x;}/* Allocate chain of dst_entry's, attach known xfrm's, calculate * all the metrics... Shortly, bundle a bundle. */static intxfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,		   struct flowi *fl, struct dst_entry **dst_p,		   unsigned short family){	int err;	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);	if (unlikely(afinfo == NULL))		return -EINVAL;	err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);	xfrm_policy_put_afinfo(afinfo);	return err;}static inline int policy_to_flow_dir(int dir){	if (XFRM_POLICY_IN == FLOW_DIR_IN &&	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&	    XFRM_POLICY_FWD == FLOW_DIR_FWD)		return dir;	switch (dir) {	default:	case XFRM_POLICY_IN:		return FLOW_DIR_IN;	case XFRM_POLICY_OUT:		return FLOW_DIR_OUT;	case XFRM_POLICY_FWD:		return FLOW_DIR_FWD;	};}static int stale_bundle(struct dst_entry *dst);/* Main function: finds/creates a bundle for given flow. * * At the moment we eat a raw IP route. Mostly to speed up lookups * on interfaces with disabled IPsec. */int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,		struct sock *sk, int flags){	struct xfrm_policy *policy;	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];	struct dst_entry *dst, *dst_orig = *dst_p;	int nx = 0;	int err;	u32 genid;	u16 family = dst_orig->ops->family;restart:	genid = atomic_read(&flow_cache_genid);	policy = NULL;	if (sk && sk->sk_policy[1])		policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);	if (!policy) {		/* To accelerate a bit...  */		if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])			return 0;		policy = flow_cache_lookup(fl, family,					   policy_to_flow_dir(XFRM_POLICY_OUT),					   xfrm_policy_lookup);	}	if (!policy)		return 0;	policy->curlft.use_time = (unsigned long)xtime.tv_sec;	switch (policy->action) {	case XFRM_POLICY_BLOCK:		/* Prohibit the flow */		xfrm_pol_put(policy);		return -EPERM;	case XFRM_POLICY_ALLOW:		if (policy->xfrm_nr == 0) {			/* Flow passes not transformed. */			xfrm_pol_put(policy);			return 0;		}		/* Try to find matching bundle.		 *		 * LATER: help from flow cache. It is optional, this		 * is required only for output policy.		 */		dst = xfrm_find_bundle(fl, policy, family);		if (IS_ERR(dst)) {			xfrm_pol_put(policy);			return PTR_ERR(dst);		}		if (dst)			break;		nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);		if (unlikely(nx<0)) {			err = nx;			if (err == -EAGAIN && flags) {				DECLARE_WAITQUEUE(wait, current);				add_wait_queue(&km_waitq, &wait);				set_current_state(TASK_INTERRUPTIBLE);				schedule();				set_current_state(TASK_RUNNING);				remove_wait_queue(&km_waitq, &wait);				nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);				if (nx == -EAGAIN && signal_pending(current)) {					err = -ERESTART;					goto error;				}				if (nx == -EAGAIN ||				    genid != atomic_read(&flow_cache_genid)) {					xfrm_pol_put(policy);					goto restart;				}				err = nx;			}			if (err < 0)				goto error;		}		if (nx == 0) {			/* Flow passes not transformed. */			xfrm_pol_put(policy);			return 0;		}		dst = dst_orig;		err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);		if (unlikely(err)) {			int i;			for (i=0; i<nx; i++)				xfrm_state_put(xfrm[i]);			goto error;		}		write_lock_bh(&policy->lock);		if (unlikely(policy->dead || stale_bundle(dst))) {			/* Wow! While we worked on resolving, this			 * policy has gone. Retry. It is not paranoia,			 * we just cannot enlist new bundle to dead object.			 * We can't enlist stable bundles either.			 */			write_unlock_bh(&policy->lock);			xfrm_pol_put(policy);			if (dst)				dst_free(dst);			goto restart;		}		dst->next = policy->bundles;		policy->bundles = dst;		dst_hold(dst);		write_unlock_bh(&policy->lock);	}	*dst_p = dst;	dst_release(dst_orig);	xfrm_pol_put(policy);	return 0;error:	dst_release(dst_orig);	xfrm_pol_put(policy);	*dst_p = NULL;	return err;}/* When skb is transformed back to its "native" form, we have to * check policy restrictions. At the moment we make this in maximally * stupid way. Shame on me. :-) Of course, connected sockets must * have policy cached at them. */static inline intxfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 	      unsigned short family){	if (xfrm_state_kern(x))		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);	return	x->id.proto == tmpl->id.proto &&		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&		x->props.mode == tmpl->mode &&		(tmpl->aalgos & (1<<x->props.aalgo)) &&		!(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));}static inline intxfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,	       unsigned short family){	int idx = start;	if (tmpl->optional) {		if (!tmpl->mode)			return start;	} else		start = -1;	for (; idx < sp->len; idx++) {		if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))			return ++idx;		if (sp->x[idx].xvec->props.mode)			break;	}	return start;}static int_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family){	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);	if (unlikely(afinfo == NULL))		return -EAFNOSUPPORT;	afinfo->decode_session(skb, fl);	xfrm_policy_put_afinfo(afinfo);	return 0;}int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 			unsigned short family){	struct xfrm_policy *pol;	struct flowi fl;	if (_decode_session(skb, &fl, family) < 0)		return 0;	/* First, check used SA against their selectors. */	if (skb->sp) {		int i;		for (i=skb->sp->len-1; i>=0; i--) {		  struct sec_decap_state *xvec = &(skb->sp->x[i]);			if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))				return 0;			/* If there is a post_input processor, try running it */			if (xvec->xvec->type->post_input &&			    (xvec->xvec->type->post_input)(xvec->xvec,							   &(xvec->decap),							   skb) != 0)				return 0;		}	}	pol = NULL;	if (sk && sk->sk_policy[dir])		pol = xfrm_sk_policy_lookup(sk, dir, &fl);	if (!pol)		pol = flow_cache_lookup(&fl, family,					policy_to_flow_dir(dir),					xfrm_policy_lookup);	if (!pol)		return !skb->sp;	pol->curlft.use_time = (unsigned long)xtime.tv_sec;	if (pol->action == XFRM_POLICY_ALLOW) {		struct sec_path *sp;		static struct sec_path dummy;		int i, k;		if ((sp = skb->sp) == NULL)			sp = &dummy;		/* For each tunnel xfrm, find the first matching tmpl.		 * For each tmpl before that, find corresponding xfrm.		 * Order is _important_. Later we will implement		 * some barriers, but at the moment barriers		 * are implied between each two transformations.		 */		for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {			k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);			if (k < 0)				goto reject;		}		for (; k < sp->len; k++) {			if (sp->x[k].xvec->props.mode)				goto reject;		}		xfrm_pol_put(pol);		return 1;	}reject:	xfrm_pol_put(pol);	return 0;}int __xfrm_route_forward(struct sk_buff *skb, unsigned short family){	struct flowi fl;	if (_decode_session(skb, &fl, family) < 0)		return 0;	return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;}/* Optimize later using cookies and generation ids. */static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie){	if (!stale_bundle(dst))		return dst;	dst_release(dst);	return NULL;}static int stale_bundle(struct dst_entry *dst){	struct dst_entry *child = dst;	while (child) {		if (child->obsolete > 0 ||		    (child->dev && !netif_running(child->dev)) ||		    (child->xfrm && child->xfrm->km.state != XFRM_STATE_VALID)) {			return 1;		}		child = child->child;	}	return 0;}static void xfrm_dst_destroy(struct dst_entry *dst){	if (!dst->xfrm)		return;	xfrm_state_put(dst->xfrm);	dst->xfrm = NULL;}static void xfrm_link_failure(struct sk_buff *skb){	/* Impossible. Such dst must be popped before reaches point of failure. */	return;}static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst){	if (dst) {		if (dst->obsolete) {			dst_release(dst);			dst = NULL;		}	}	return dst;}static void xfrm_prune_bundles(int (*func)(struct dst_entry *)){	int i;	struct xfrm_policy *pol;	struct dst_entry *dst, **dstp, *gc_list = NULL;	read_lock_bh(&xfrm_policy_lock);	for (i=0; i<2*XFRM_POLICY_MAX; i++) {		for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {			write_lock(&pol->lock);			dstp = &pol->bundles;			while ((dst=*dstp) != NULL) {				if (func(dst)) {					*dstp = dst->next;					dst->next = gc_list;					gc_list = dst;				} else {					dstp = &dst->next;				}			}			write_unlock(&pol->lock);		}	}	read_unlock_bh(&xfrm_policy_lock);	while (gc_list) {		dst = gc_list;		gc_list = dst->next;		dst_free(dst);	}}static int unused_bundle(struct dst_entry *dst){	return !atomic_read(&dst->__refcnt);}static void __xfrm_garbage_collect(void){	xfrm_prune_bundles(unused_bundle);}int xfrm_flush_bundles(void){	xfrm_prune_bundles(stale_bundle);	return 0;}/* Well... that's _TASK_. We need to scan through transformation * list and figure out what mss tcp should generate in order to * final datagram fit to mtu. Mama mia... :-) * * Apparently, some easy way exists, but we used to choose the most * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta. * * Consider this function as something like dark humour. :-) */static int xfrm_get_mss(struct dst_entry *dst, u32 mtu){	int res = mtu - dst->header_len;	for (;;) {		struct dst_entry *d = dst;		int m = res;		do {			struct xfrm_state *x = d->xfrm;			if (x) {				spin_lock_bh(&x->lock);				if (x->km.state == XFRM_STATE_VALID &&				    x->type && x->type->get_max_size)					m = x->type->get_max_size(d->xfrm, m);				else					m += x->props.header_len;				spin_unlock_bh(&x->lock);			}		} while ((d = d->child) != NULL);		if (m <= mtu)			break;		res -= (m - mtu);		if (res < 88)			return mtu;	}	return res + dst->header_len;}int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo){	int err = 0;	if (unlikely(afinfo == NULL))		return -EINVAL;	if (unlikely(afinfo->family >= NPROTO))		return -EAFNOSUPPORT;	write_lock(&xfrm_policy_afinfo_lock);	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))		err = -ENOBUFS;	else {		struct dst_ops *dst_ops = afinfo->dst_ops;		if (likely(dst_ops->kmem_cachep == NULL))			dst_ops->kmem_cachep = xfrm_dst_cache;		if (likely(dst_ops->check == NULL))			dst_ops->check = xfrm_dst_check;		if (likely(dst_ops->destroy == NULL))			dst_ops->destroy = xfrm_dst_destroy;		if (likely(dst_ops->negative_advice == NULL))			dst_ops->negative_advice = xfrm_negative_advice;		if (likely(dst_ops->link_failure == NULL))			dst_ops->link_failure = xfrm_link_failure;		if (likely(dst_ops->get_mss == NULL))			dst_ops->get_mss = xfrm_get_mss;		if (likely(afinfo->garbage_collect == NULL))			afinfo->garbage_collect = __xfrm_garbage_collect;		xfrm_policy_afinfo[afinfo->family] = afinfo;	}	write_unlock(&xfrm_policy_afinfo_lock);	return err;}int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo){	int err = 0;	if (unlikely(afinfo == NULL))		return -EINVAL;	if (unlikely(afinfo->family >= NPROTO))		return -EAFNOSUPPORT;	write_lock(&xfrm_policy_afinfo_lock);	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))			err = -EINVAL;		else {			struct dst_ops *dst_ops = afinfo->dst_ops;			xfrm_policy_afinfo[afinfo->family] = NULL;			dst_ops->kmem_cachep = NULL;			dst_ops->check = NULL;			dst_ops->destroy = NULL;			dst_ops->negative_advice = NULL;			dst_ops->link_failure = NULL;			dst_ops->get_mss = NULL;			afinfo->garbage_collect = NULL;		}	}	write_unlock(&xfrm_policy_afinfo_lock);	return err;}struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family){	struct xfrm_policy_afinfo *afinfo;	if (unlikely(family >= NPROTO))		return NULL;	read_lock(&xfrm_policy_afinfo_lock);	afinfo = xfrm_policy_afinfo[family];	if (likely(afinfo != NULL))		read_lock(&afinfo->lock);	read_unlock(&xfrm_policy_afinfo_lock);	return afinfo;}void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo){	if (unlikely(afinfo == NULL))		return;	read_unlock(&afinfo->lock);}static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr){	switch (event) {	case NETDEV_DOWN:		xfrm_flush_bundles();	}	return NOTIFY_DONE;}struct notifier_block xfrm_dev_notifier = {	xfrm_dev_event,	NULL,	0};void __init xfrm_policy_init(void){	xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",					   sizeof(struct xfrm_dst),					   0, SLAB_HWCACHE_ALIGN,					   NULL, NULL);	if (!xfrm_dst_cache)		panic("XFRM: failed to allocate xfrm_dst_cache\n");	INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);	register_netdevice_notifier(&xfrm_dev_notifier);}void __init xfrm_init(void){	xfrm_state_init();	xfrm_policy_init();	xfrm_input_init();}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -