⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfrm_policy.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	return error;}/* Check that the bundle accepts the flow and its components are * still valid. */static struct dst_entry *xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family){	struct dst_entry *x;	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);	if (unlikely(afinfo == NULL))		return ERR_PTR(-EINVAL);	x = afinfo->find_bundle(fl, policy);	xfrm_policy_put_afinfo(afinfo);	return x;}/* Allocate chain of dst_entry's, attach known xfrm's, calculate * all the metrics... Shortly, bundle a bundle. */static intxfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,		   struct flowi *fl, struct dst_entry **dst_p,		   unsigned short family){	int err;	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);	if (unlikely(afinfo == NULL))		return -EINVAL;	err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);	xfrm_policy_put_afinfo(afinfo);	return err;}static int inlinexfrm_dst_alloc_copy(void **target, void *src, int size){	if (!*target) {		*target = kmalloc(size, GFP_ATOMIC);		if (!*target)			return -ENOMEM;	}	memcpy(*target, src, size);	return 0;}static int inlinexfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel){#ifdef CONFIG_XFRM_SUB_POLICY	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;	return xfrm_dst_alloc_copy((void **)&(xdst->partner),				   sel, sizeof(*sel));#else	return 0;#endif}static int inlinexfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl){#ifdef CONFIG_XFRM_SUB_POLICY	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));#else	return 0;#endif}static int stale_bundle(struct dst_entry *dst);/* Main function: finds/creates a bundle for given flow. * * At the moment we eat a raw IP route. Mostly to speed up lookups * on interfaces with disabled IPsec. */int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,		  struct sock *sk, int flags){	struct xfrm_policy *policy;	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];	int npols;	int pol_dead;	int xfrm_nr;	int pi;	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];	struct dst_entry *dst, *dst_orig = *dst_p;	int nx = 0;	int err;	u32 genid;	u16 family;	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);restart:	genid = atomic_read(&flow_cache_genid);	policy = NULL;	for (pi = 0; pi < ARRAY_SIZE(pols); pi++)		pols[pi] = NULL;	npols = 0;	pol_dead = 0;	xfrm_nr = 0;	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {		policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);		err = PTR_ERR(policy);		if (IS_ERR(policy))			goto dropdst;	}	if (!policy) {		/* To accelerate a bit...  */		if ((dst_orig->flags & DST_NOXFRM) ||		    !xfrm_policy_count[XFRM_POLICY_OUT])			return 0;		policy = flow_cache_lookup(fl, dst_orig->ops->family,					   dir, xfrm_policy_lookup);		err = PTR_ERR(policy);		if (IS_ERR(policy))			goto dropdst;	}	if (!policy)		return 0;	family = dst_orig->ops->family;	policy->curlft.use_time = get_seconds();	pols[0] = policy;	npols ++;	xfrm_nr += pols[0]->xfrm_nr;	switch (policy->action) {	default:	case XFRM_POLICY_BLOCK:		/* Prohibit the flow */		err = -EPERM;		goto error;	case XFRM_POLICY_ALLOW:#ifndef CONFIG_XFRM_SUB_POLICY		if (policy->xfrm_nr == 0) {			/* Flow passes not transformed. */			xfrm_pol_put(policy);			return 0;		}#endif		/* Try to find matching bundle.		 *		 * LATER: help from flow cache. It is optional, this		 * is required only for output policy.		 */		dst = xfrm_find_bundle(fl, policy, family);		if (IS_ERR(dst)) {			err = PTR_ERR(dst);			goto error;		}		if (dst)			break;#ifdef CONFIG_XFRM_SUB_POLICY		if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {			pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,							    fl, family,							    XFRM_POLICY_OUT);			if (pols[1]) {				if (IS_ERR(pols[1])) {					err = PTR_ERR(pols[1]);					goto error;				}				if (pols[1]->action == XFRM_POLICY_BLOCK) {					err = -EPERM;					goto error;				}				npols ++;				xfrm_nr += pols[1]->xfrm_nr;			}		}		/*		 * Because neither flowi nor bundle information knows about		 * transformation template size. On more than one policy usage		 * we can realize whether all of them is bypass or not after		 * they are searched. See above not-transformed bypass		 * is surrounded by non-sub policy configuration, too.		 */		if (xfrm_nr == 0) {			/* Flow passes not transformed. */			xfrm_pols_put(pols, npols);			return 0;		}#endif		nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);		if (unlikely(nx<0)) {			err = nx;			if (err == -EAGAIN && sysctl_xfrm_larval_drop) {				/* EREMOTE tells the caller to generate				 * a one-shot blackhole route.				 */				xfrm_pol_put(policy);				return -EREMOTE;			}			if (err == -EAGAIN && flags) {				DECLARE_WAITQUEUE(wait, current);				add_wait_queue(&km_waitq, &wait);				set_current_state(TASK_INTERRUPTIBLE);				schedule();				set_current_state(TASK_RUNNING);				remove_wait_queue(&km_waitq, &wait);				nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);				if (nx == -EAGAIN && signal_pending(current)) {					err = -ERESTART;					goto error;				}				if (nx == -EAGAIN ||				    genid != atomic_read(&flow_cache_genid)) {					xfrm_pols_put(pols, npols);					goto restart;				}				err = nx;			}			if (err < 0)				goto error;		}		if (nx == 0) {			/* Flow passes not transformed. */			xfrm_pols_put(pols, npols);			return 0;		}		dst = dst_orig;		err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);		if (unlikely(err)) {			int i;			for (i=0; i<nx; i++)				xfrm_state_put(xfrm[i]);			goto error;		}		for (pi = 0; pi < npols; pi++) {			read_lock_bh(&pols[pi]->lock);			pol_dead |= pols[pi]->dead;			read_unlock_bh(&pols[pi]->lock);		}		write_lock_bh(&policy->lock);		if (unlikely(pol_dead || stale_bundle(dst))) {			/* Wow! While we worked on resolving, this			 * policy has gone. Retry. It is not paranoia,			 * we just cannot enlist new bundle to dead object.			 * We can't enlist stable bundles either.			 */			write_unlock_bh(&policy->lock);			if (dst)				dst_free(dst);			err = -EHOSTUNREACH;			goto error;		}		if (npols > 1)			err = xfrm_dst_update_parent(dst, &pols[1]->selector);		else			err = xfrm_dst_update_origin(dst, fl);		if (unlikely(err)) {			write_unlock_bh(&policy->lock);			if (dst)				dst_free(dst);			goto error;		}		dst->next = policy->bundles;		policy->bundles = dst;		dst_hold(dst);		write_unlock_bh(&policy->lock);	}	*dst_p = dst;	dst_release(dst_orig);	xfrm_pols_put(pols, npols);	return 0;error:	xfrm_pols_put(pols, npols);dropdst:	dst_release(dst_orig);	*dst_p = NULL;	return err;}EXPORT_SYMBOL(__xfrm_lookup);int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,		struct sock *sk, int flags){	int err = __xfrm_lookup(dst_p, fl, sk, flags);	if (err == -EREMOTE) {		dst_release(*dst_p);		*dst_p = NULL;		err = -EAGAIN;	}	return err;}EXPORT_SYMBOL(xfrm_lookup);static inline intxfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl){	struct xfrm_state *x;	if (!skb->sp || idx < 0 || idx >= skb->sp->len)		return 0;	x = skb->sp->xvec[idx];	if (!x->type->reject)		return 0;	return x->type->reject(x, skb, fl);}/* When skb is transformed back to its "native" form, we have to * check policy restrictions. At the moment we make this in maximally * stupid way. Shame on me. :-) Of course, connected sockets must * have policy cached at them. */static inline intxfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,	      unsigned short family){	if (xfrm_state_kern(x))		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);	return	x->id.proto == tmpl->id.proto &&		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&		x->props.mode == tmpl->mode &&		((tmpl->aalgos & (1<<x->props.aalgo)) ||		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&		!(x->props.mode != XFRM_MODE_TRANSPORT &&		  xfrm_state_addr_cmp(tmpl, x, family));}/* * 0 or more than 0 is returned when validation is succeeded (either bypass * because of optional transport mode, or next index of the mathced secpath * state with the template. * -1 is returned when no matching template is found. * Otherwise "-2 - errored_index" is returned. */static inline intxfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,	       unsigned short family){	int idx = start;	if (tmpl->optional) {		if (tmpl->mode == XFRM_MODE_TRANSPORT)			return start;	} else		start = -1;	for (; idx < sp->len; idx++) {		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))			return ++idx;		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {			if (start == -1)				start = -2-idx;			break;		}	}	return start;}intxfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family){	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);	int err;	if (unlikely(afinfo == NULL))		return -EAFNOSUPPORT;	afinfo->decode_session(skb, fl);	err = security_xfrm_decode_session(skb, &fl->secid);	xfrm_policy_put_afinfo(afinfo);	return err;}EXPORT_SYMBOL(xfrm_decode_session);static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp){	for (; k < sp->len; k++) {		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {			*idxp = k;			return 1;		}	}	return 0;}int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,			unsigned short family){	struct xfrm_policy *pol;	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];	int npols = 0;	int xfrm_nr;	int pi;	struct flowi fl;	u8 fl_dir = policy_to_flow_dir(dir);	int xerr_idx = -1;	if (xfrm_decode_session(skb, &fl, family) < 0)		return 0;	nf_nat_decode_session(skb, &fl, family);	/* First, check used SA against their selectors. */	if (skb->sp) {		int i;		for (i=skb->sp->len-1; i>=0; i--) {			struct xfrm_state *x = skb->sp->xvec[i];			if (!xfrm_selector_match(&x->sel, &fl, family))				return 0;		}	}	pol = NULL;	if (sk && sk->sk_policy[dir]) {		pol = xfrm_sk_policy_lookup(sk, dir, &fl);		if (IS_ERR(pol))			return 0;	}	if (!pol)		pol = flow_cache_lookup(&fl, family, fl_dir,					xfrm_policy_lookup);	if (IS_ERR(pol))		return 0;	if (!pol) {		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {			xfrm_secpath_reject(xerr_idx, skb, &fl);			return 0;		}		return 1;	}	pol->curlft.use_time = get_seconds();	pols[0] = pol;	npols ++;#ifdef CONFIG_XFRM_SUB_POLICY	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {		pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,						    &fl, family,						    XFRM_POLICY_IN);		if (pols[1]) {			if (IS_ERR(pols[1]))				return 0;			pols[1]->curlft.use_time = get_seconds();			npols ++;		}	}#endif	if (pol->action == XFRM_POLICY_ALLOW) {		struct sec_path *sp;		static struct sec_path dummy;		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];		struct xfrm_tmpl **tpp = tp;		int ti = 0;		int i, k;		if ((sp = skb->sp) == NULL)			sp = &dummy;		for (pi = 0; pi < npols; pi++) {			if (pols[pi] != pol &&			    pols[pi]->action != XFRM_POLICY_ALLOW)				goto reject;			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)				goto reject_error;			for (i = 0; i < pols[pi]->xfrm_nr; i++)				tpp[ti++] = &pols[pi]->xfrm_vec[i];		}		xfrm_nr = ti;		if (npols > 1) {			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);			tpp = stp;		}		/* For each tunnel xfrm, find the first matching tmpl.		 * For each tmpl before that, find corresponding xfrm.		 * Order is _important_. Later we will implement		 * some barriers, but at the moment barriers		 * are implied between each two transformations.		 */		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {			k = xfrm_policy_ok(tpp[i], sp, k, family);			if (k < 0) {				if (k < -1)					/* "-2 - errored_index" returned */					xerr_idx = -(2+k);				goto reject;			}		}		if (secpath_has_nontransport(sp, k, &xerr_idx))			goto reject;		xfrm_pols_put(pols, npols);		return 1;	}reject:	xfrm_secpath_reject(xerr_idx, skb, &fl);reject_error:	xfrm_pols_put(pols, npols);	return 0;}EXPORT_SYMBOL(__xfrm_policy_check);int __xfrm_route_forward(struct sk_buff *skb, unsigned short family){	struct flowi fl;	if (xfrm_decode_session(skb, &fl, family) < 0)		return 0;	return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;}EXPORT_SYMBOL(__xfrm_route_forward);/* Optimize later using cookies and generation ids. */static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie){	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete	 * to "-1" to force all XFRM destinations to get validated by	 * dst_ops->check on every use.  We do this because when a	 * normal route referenced by an XFRM dst is obsoleted we do	 * not go looking around for all parent referencing XFRM dsts	 * so that we can invalidate them.  It is just too much work.	 * Instead we make the checks here on every use.  For example:	 *	 *	XFRM dst A --> IPv4 dst X	 *	 * X is the "xdst->route" of A (X is also the "dst->path" of A	 * in this example).  If X is marked obsolete, "A" will not	 * notice.  That's what we are validating here via the	 * stale_bundle() check.	 *	 * When a policy's bundle is pruned, we dst_free() the XFRM	 * dst which causes it's ->obsolete field to be set to a	 * positive non-zero integer.  If an XFRM dst has been pruned	 * like this, we want to force a new route lookup.	 */	if (dst->obsolete < 0 && !stale_bundle(dst))		return dst;	return NULL;}static int stale_bundle(struct dst_entry *dst){	return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);}void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev){	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {		dst->dev = init_net.loopback_dev;		dev_hold(dst->dev);		dev_put(dev);	}}EXPORT_SYMBOL(xfrm_dst_ifdown);static void xfrm_link_failure(struct sk_buff *skb){	/* Impossible. Such dst must be popped before reaches point of failure. */	return;}static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst){	if (dst) {		if (dst->obsolete) {			dst_release(dst);			dst = NULL;		}	}	return dst;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -