⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip_output.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
					data + transhdrlen, fraggap, 0);				skb_prev->csum = csum_sub(skb_prev->csum,							  skb->csum);				data += fraggap;				pskb_trim_unique(skb_prev, maxfraglen);			}			copy = datalen - transhdrlen - fraggap;			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {				err = -EFAULT;				kfree_skb(skb);				goto error;			}			offset += copy;			length -= datalen - fraggap;			transhdrlen = 0;			exthdrlen = 0;			csummode = CHECKSUM_NONE;			/*			 * Put the packet on the pending queue.			 */			__skb_queue_tail(&sk->sk_write_queue, skb);			continue;		}		if (copy > length)			copy = length;		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {			unsigned int off;			off = skb->len;			if (getfrag(from, skb_put(skb, copy),					offset, copy, off, skb) < 0) {				__skb_trim(skb, off);				err = -EFAULT;				goto error;			}		} else {			int i = skb_shinfo(skb)->nr_frags;			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];			struct page *page = sk->sk_sndmsg_page;			int off = sk->sk_sndmsg_off;			unsigned int left;			if (page && (left = PAGE_SIZE - off) > 0) {				if (copy >= left)					copy = left;				if (page != frag->page) {					if (i == MAX_SKB_FRAGS) {						err = -EMSGSIZE;						goto error;					}					get_page(page);					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);					frag = &skb_shinfo(skb)->frags[i];				}			} else if (i < MAX_SKB_FRAGS) {				if (copy > PAGE_SIZE)					copy = PAGE_SIZE;				page = alloc_pages(sk->sk_allocation, 0);				if (page == NULL)  {					err = -ENOMEM;					goto error;				}				sk->sk_sndmsg_page = page;				sk->sk_sndmsg_off = 0;				skb_fill_page_desc(skb, i, page, 0, 0);				frag = &skb_shinfo(skb)->frags[i];			} else {				err = -EMSGSIZE;				goto error;			}			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {				err = -EFAULT;				goto error;			}			sk->sk_sndmsg_off += copy;			frag->size += copy;			skb->len += copy;			skb->data_len += copy;			skb->truesize += copy;			atomic_add(copy, &sk->sk_wmem_alloc);		}		offset += copy;		length -= copy;	}	return 0;error:	inet->cork.length -= length;	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);	return err;}ssize_t	ip_append_page(struct sock *sk, struct page *page,		       int offset, size_t size, int flags){	struct inet_sock *inet = inet_sk(sk);	struct sk_buff *skb;	struct rtable *rt;	struct ip_options *opt = NULL;	int hh_len;	int mtu;	int len;	int err;	unsigned int maxfraglen, fragheaderlen, fraggap;	if (inet->hdrincl)		return -EPERM;	if (flags&MSG_PROBE)		return 0;	if (skb_queue_empty(&sk->sk_write_queue))		return -EINVAL;	rt = inet->cork.rt;	if (inet->cork.flags & IPCORK_OPT)		opt = inet->cork.opt;	if (!(rt->u.dst.dev->features&NETIF_F_SG))		return -EOPNOTSUPP;	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);	mtu = inet->cork.fragsize;	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;	if (inet->cork.length + size > 0xFFFF - fragheaderlen) {		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);		return -EMSGSIZE;	}	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)		return -EINVAL;	inet->cork.length += size;	if ((sk->sk_protocol == IPPROTO_UDP) &&	    (rt->u.dst.dev->features & NETIF_F_UFO)) {		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;	}	while (size > 0) {		int i;		if (skb_is_gso(skb))			len = size;		else {			/* Check if the remaining data fits into current packet. */			len = mtu - skb->len;			if (len < size)				len = maxfraglen - skb->len;		}		if (len <= 0) {			struct sk_buff *skb_prev;			int alloclen;			skb_prev = skb;			fraggap = skb_prev->len - maxfraglen;			alloclen = fragheaderlen + hh_len + fraggap + 15;			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);			if (unlikely(!skb)) {				err = -ENOBUFS;				goto error;			}			/*			 *	Fill in the control structures			 */			skb->ip_summed = CHECKSUM_NONE;			skb->csum = 0;			skb_reserve(skb, hh_len);			/*			 *	Find where to start putting bytes.			 */			skb_put(skb, fragheaderlen + fraggap);			skb_reset_network_header(skb);			skb->transport_header = (skb->network_header +						 fragheaderlen);			if (fraggap) {				skb->csum = skb_copy_and_csum_bits(skb_prev,								   maxfraglen,						    skb_transport_header(skb),								   fraggap, 0);				skb_prev->csum = csum_sub(skb_prev->csum,							  skb->csum);				pskb_trim_unique(skb_prev, maxfraglen);			}			/*			 * Put the packet on the pending queue.			 */			__skb_queue_tail(&sk->sk_write_queue, skb);			continue;		}		i = skb_shinfo(skb)->nr_frags;		if (len > size)			len = size;		if (skb_can_coalesce(skb, i, page, offset)) {			skb_shinfo(skb)->frags[i-1].size += len;		} else if (i < MAX_SKB_FRAGS) {			get_page(page);			skb_fill_page_desc(skb, i, page, offset, len);		} else {			err = -EMSGSIZE;			goto error;		}		if (skb->ip_summed == CHECKSUM_NONE) {			__wsum csum;			csum = csum_page(page, offset, len);			skb->csum = csum_block_add(skb->csum, csum, skb->len);		}		skb->len += len;		skb->data_len += len;		skb->truesize += len;		atomic_add(len, &sk->sk_wmem_alloc);		offset += len;		size -= len;	}	return 0;error:	inet->cork.length -= size;	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);	return err;}static void ip_cork_release(struct inet_sock *inet){	inet->cork.flags &= ~IPCORK_OPT;	kfree(inet->cork.opt);	inet->cork.opt = NULL;	if (inet->cork.rt) {		ip_rt_put(inet->cork.rt);		inet->cork.rt = NULL;	}}/* *	Combined all pending IP fragments on the socket as one IP datagram *	and push them out. */int ip_push_pending_frames(struct sock *sk){	struct sk_buff *skb, *tmp_skb;	struct sk_buff **tail_skb;	struct inet_sock *inet = inet_sk(sk);	struct ip_options *opt = NULL;	struct rtable *rt = inet->cork.rt;	struct iphdr *iph;	__be16 df = 0;	__u8 ttl;	int err = 0;	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)		goto out;	tail_skb = &(skb_shinfo(skb)->frag_list);	/* move skb->data to ip header from ext header */	if (skb->data < skb_network_header(skb))		__skb_pull(skb, skb_network_offset(skb));	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {		__skb_pull(tmp_skb, skb_network_header_len(skb));		*tail_skb = tmp_skb;		tail_skb = &(tmp_skb->next);		skb->len += tmp_skb->len;		skb->data_len += tmp_skb->len;		skb->truesize += tmp_skb->truesize;		__sock_put(tmp_skb->sk);		tmp_skb->destructor = NULL;		tmp_skb->sk = NULL;	}	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow	 * to fragment the frame generated here. No matter, what transforms	 * how transforms change size of the packet, it will come out.	 */	if (inet->pmtudisc < IP_PMTUDISC_DO)		skb->local_df = 1;	/* DF bit is set when we want to see DF on outgoing frames.	 * If local_df is set too, we still allow to fragment this frame	 * locally. */	if (inet->pmtudisc >= IP_PMTUDISC_DO ||	    (skb->len <= dst_mtu(&rt->u.dst) &&	     ip_dont_fragment(sk, &rt->u.dst)))		df = htons(IP_DF);	if (inet->cork.flags & IPCORK_OPT)		opt = inet->cork.opt;	if (rt->rt_type == RTN_MULTICAST)		ttl = inet->mc_ttl;	else		ttl = ip_select_ttl(inet, &rt->u.dst);	iph = (struct iphdr *)skb->data;	iph->version = 4;	iph->ihl = 5;	if (opt) {		iph->ihl += opt->optlen>>2;		ip_options_build(skb, opt, inet->cork.addr, rt, 0);	}	iph->tos = inet->tos;	iph->tot_len = htons(skb->len);	iph->frag_off = df;	ip_select_ident(iph, &rt->u.dst, sk);	iph->ttl = ttl;	iph->protocol = sk->sk_protocol;	iph->saddr = rt->rt_src;	iph->daddr = rt->rt_dst;	ip_send_check(iph);	skb->priority = sk->sk_priority;	skb->dst = dst_clone(&rt->u.dst);	if (iph->protocol == IPPROTO_ICMP)		icmp_out_count(((struct icmphdr *)			skb_transport_header(skb))->type);	/* Netfilter gets whole the not fragmented skb. */	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,		      skb->dst->dev, dst_output);	if (err) {		if (err > 0)			err = inet->recverr ? net_xmit_errno(err) : 0;		if (err)			goto error;	}out:	ip_cork_release(inet);	return err;error:	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);	goto out;}/* *	Throw away all pending data on the socket. */void ip_flush_pending_frames(struct sock *sk){	struct sk_buff *skb;	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)		kfree_skb(skb);	ip_cork_release(inet_sk(sk));}/* *	Fetch data from kernel space and fill in checksum if needed. */static int ip_reply_glue_bits(void *dptr, char *to, int offset,			      int len, int odd, struct sk_buff *skb){	__wsum csum;	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);	skb->csum = csum_block_add(skb->csum, csum, odd);	return 0;}/* *	Generic function to send a packet as reply to another packet. *	Used to send TCP resets so far. ICMP should use this function too. * *	Should run single threaded per socket because it uses the sock *     	structure to pass arguments. * *	LATER: switch from ip_build_xmit to ip_append_* */void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,		   unsigned int len){	struct inet_sock *inet = inet_sk(sk);	struct {		struct ip_options	opt;		char			data[40];	} replyopts;	struct ipcm_cookie ipc;	__be32 daddr;	struct rtable *rt = (struct rtable*)skb->dst;	if (ip_options_echo(&replyopts.opt, skb))		return;	daddr = ipc.addr = rt->rt_src;	ipc.opt = NULL;	if (replyopts.opt.optlen) {		ipc.opt = &replyopts.opt;		if (ipc.opt->srr)			daddr = replyopts.opt.faddr;	}	{		struct flowi fl = { .oif = arg->bound_dev_if,				    .nl_u = { .ip4_u =					      { .daddr = daddr,						.saddr = rt->rt_spec_dst,						.tos = RT_TOS(ip_hdr(skb)->tos) } },				    /* Not quite clean, but right. */				    .uli_u = { .ports =					       { .sport = tcp_hdr(skb)->dest,						 .dport = tcp_hdr(skb)->source } },				    .proto = sk->sk_protocol };		security_skb_classify_flow(skb, &fl);		if (ip_route_output_key(&rt, &fl))			return;	}	/* And let IP do all the hard work.	   This chunk is not reenterable, hence spinlock.	   Note that it uses the fact, that this function is called	   with locally disabled BH and that sk cannot be already spinlocked.	 */	bh_lock_sock(sk);	inet->tos = ip_hdr(skb)->tos;	sk->sk_priority = skb->priority;	sk->sk_protocol = ip_hdr(skb)->protocol;	sk->sk_bound_dev_if = arg->bound_dev_if;	ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,		       &ipc, rt, MSG_DONTWAIT);	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {		if (arg->csumoffset >= 0)			*((__sum16 *)skb_transport_header(skb) +			  arg->csumoffset) = csum_fold(csum_add(skb->csum,								arg->csum));		skb->ip_summed = CHECKSUM_NONE;		ip_push_pending_frames(sk);	}	bh_unlock_sock(sk);	ip_rt_put(rt);}void __init ip_init(void){	ip_rt_init();	inet_initpeers();#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)	igmp_mc_proc_init();#endif}EXPORT_SYMBOL(ip_generic_getfrag);EXPORT_SYMBOL(ip_queue_xmit);EXPORT_SYMBOL(ip_send_check);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -