⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip_output.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		skb = sock_alloc_send_skb(sk, fraglen+hh_len+15, 0, flags&MSG_DONTWAIT, &err);		if (skb == NULL)			goto error;		/*		 *	Fill in the control structures		 */		 		skb->priority = sk->priority;		skb->dst = dst_clone(&rt->u.dst);		skb_reserve(skb, hh_len);		/*		 *	Find where to start putting bytes.		 */		 		data = skb_put(skb, fraglen);		skb->nh.iph = (struct iphdr *)data;		/*		 *	Only write IP header onto non-raw packets 		 */		 		{			struct iphdr *iph = (struct iphdr *)data;			iph->version = 4;			iph->ihl = 5;			if (opt) {				iph->ihl += opt->optlen>>2;				ip_options_build(skb, opt,						 ipc->addr, rt, offset);			}			iph->tos = sk->ip_tos;			iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);			iph->id = id;			iph->frag_off = htons(offset>>3);			iph->frag_off |= mf|df;			if (rt->rt_type == RTN_MULTICAST)				iph->ttl = sk->ip_mc_ttl;			else				iph->ttl = sk->ip_ttl;			iph->protocol = sk->protocol;			iph->check = 0;			iph->saddr = rt->rt_src;			iph->daddr = rt->rt_dst;			iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);			data += iph->ihl*4;						/*			 *	Any further fragments will have MF set.			 */			 			mf = htons(IP_MF);		}				/*		 *	User data callback		 */		if (getfrag(frag, data, offset, fraglen-fragheaderlen)) {			err = -EFAULT;			kfree_skb(skb);			goto error;		}		offset -= (maxfraglen-fragheaderlen);		fraglen = maxfraglen;		nfrags++;#ifdef CONFIG_FIREWALL		switch (call_out_firewall(PF_INET, rt->u.dst.dev, skb->nh.iph, NULL, &skb)) {		case FW_QUEUE:			kfree_skb(skb);			continue;		case FW_BLOCK:		case FW_REJECT:			kfree_skb(skb);			err = -EPERM;			goto error;		}#endif		err = -ENETDOWN;		if (rt->u.dst.output(skb))			goto error;	} while (offset >= 0);	if (nfrags>1)		ip_statistics.IpFragCreates += nfrags;	dev_unlock_list();	return 0;error:	ip_statistics.IpOutDiscards++;	if (nfrags>1)		ip_statistics.IpFragCreates += nfrags;	dev_unlock_list();	return err; }/* *	Fast path for unfragmented packets. */int ip_build_xmit(struct sock *sk, 		  int getfrag (const void *,			       char *,			       unsigned int,				       unsigned int),		  const void *frag,		  unsigned length,		  struct ipcm_cookie *ipc,		  struct rtable *rt,		  int flags){	int err;	struct sk_buff *skb;	int df;	struct iphdr *iph;	/*	 *	Try the simple case first. This leaves fragmented frames, and by	 *	choice RAW frames within 20 bytes of maximum size(rare) to the long path	 */	if (!sk->ip_hdrincl) {		length += sizeof(struct iphdr);		/*		 * 	Check for slow path.		 */		if (length > rt->u.dst.pmtu || ipc->opt != NULL)  			return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags); 	} else {		if (length > rt->u.dst.dev->mtu) {			ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);			return -EMSGSIZE;		}	}	/*	 *	Do path mtu discovery if needed.	 */	df = 0;	if (ip_dont_fragment(sk, &rt->u.dst))		df = htons(IP_DF);	/* 	 *	Fast path for unfragmented frames without options. 	 */ 	{	int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;	skb = sock_alloc_send_skb(sk, length+hh_len+15,				  0, flags&MSG_DONTWAIT, &err);	if(skb==NULL)		goto error; 	skb_reserve(skb, hh_len);	}		skb->priority = sk->priority;	skb->dst = dst_clone(&rt->u.dst);	skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);		dev_lock_list();		if(!sk->ip_hdrincl) {		iph->version=4;		iph->ihl=5;		iph->tos=sk->ip_tos;		iph->tot_len = htons(length);		iph->id=htons(ip_id_count++);		iph->frag_off = df;		iph->ttl=sk->ip_mc_ttl;		if (rt->rt_type != RTN_MULTICAST)			iph->ttl=sk->ip_ttl;		iph->protocol=sk->protocol;		iph->saddr=rt->rt_src;		iph->daddr=rt->rt_dst;		iph->check=0;		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);		err = getfrag(frag, ((char *)iph)+iph->ihl*4,0, length-iph->ihl*4);	}	else		err = getfrag(frag, (void *)iph, 0, length);	dev_unlock_list();	if (err)		goto error_fault;#ifdef CONFIG_FIREWALL	switch (call_out_firewall(PF_INET, rt->u.dst.dev, iph, NULL, &skb)) {	case FW_QUEUE:		kfree_skb(skb);		return 0;	case FW_BLOCK:	case FW_REJECT:		kfree_skb(skb);		err = -EPERM;		goto error;	}#endif	return rt->u.dst.output(skb);error_fault:	err = -EFAULT;	kfree_skb(skb);error:	ip_statistics.IpOutDiscards++;	return err; }		       /* *	This IP datagram is too large to be sent in one piece.  Break it up into *	smaller pieces (each of size equal to IP header plus *	a block of the data of the original IP data part) that will yet fit in a *	single device frame, and queue such a frame for sending. * *	Yes this is inefficient, feel free to submit a quicker one. */void ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)){	struct iphdr *iph;	unsigned char *raw;	unsigned char *ptr;	struct device *dev;	struct sk_buff *skb2;	unsigned int mtu, hlen, left, len; 	int offset;	int not_last_frag;	struct rtable *rt = (struct rtable*)skb->dst;	dev = rt->u.dst.dev;	/*	 *	Point into the IP datagram header.	 */	raw = skb->nh.raw;	iph = (struct iphdr*)raw;	/*	 *	Setup starting values.	 */	hlen = iph->ihl * 4;	left = ntohs(iph->tot_len) - hlen;	/* Space per frame */	mtu = rt->u.dst.pmtu - hlen;	/* Size of data space */	ptr = raw + hlen;			/* Where to start from */	/*	 *	The protocol doesn't seem to say what to do in the case that the	 *	frame + options doesn't fit the mtu. As it used to fall down dead	 *	in this case we were fortunate it didn't happen	 *	 *	It is impossible, because mtu>=68. --ANK (980801)	 */#ifdef CONFIG_NET_PARANOIA	if (mtu<8) 		goto fail;#endif	/*	 *	Fragment the datagram.	 */	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;	not_last_frag = iph->frag_off & htons(IP_MF);	/*	 *	Keep copying data until we run out.	 */	while(left > 0)	{		len = left;		/* IF: it doesn't fit, use 'mtu' - the data space left */		if (len > mtu)			len = mtu;		/* IF: we are not sending upto and including the packet end		   then align the next start on an eight byte boundary */		if (len < left)	{			len &= ~7;		}		/*		 *	Allocate buffer.		 */		if ((skb2 = alloc_skb(len+hlen+dev->hard_header_len+15,GFP_ATOMIC)) == NULL) {			NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));			goto fail;		}		/*		 *	Set up data on packet		 */		skb2->pkt_type = skb->pkt_type;		skb2->priority = skb->priority;		skb_reserve(skb2, (dev->hard_header_len+15)&~15);		skb_put(skb2, len + hlen);		skb2->nh.raw = skb2->data;		skb2->h.raw = skb2->data + hlen;		/*		 *	Charge the memory for the fragment to any owner		 *	it might possess		 */		if (skb->sk)			skb_set_owner_w(skb2, skb->sk);		skb2->dst = dst_clone(skb->dst);		/*		 *	Copy the packet header into the new buffer.		 */		memcpy(skb2->nh.raw, raw, hlen);		/*		 *	Copy a block of the IP datagram.		 */		memcpy(skb2->h.raw, ptr, len);		left -= len;		/*		 *	Fill in the new header fields.		 */		iph = skb2->nh.iph;		iph->frag_off = htons((offset >> 3));		/* ANK: dirty, but effective trick. Upgrade options only if		 * the segment to be fragmented was THE FIRST (otherwise,		 * options are already fixed) and make it ONCE		 * on the initial skb, so that all the following fragments		 * will inherit fixed options.		 */		if (offset == 0)			ip_options_fragment(skb);		/*		 *	Added AC : If we are fragmenting a fragment that's not the		 *		   last fragment then keep MF on each bit		 */		if (left > 0 || not_last_frag)			iph->frag_off |= htons(IP_MF);		ptr += len;		offset += len;		/*		 *	Put this fragment into the sending queue.		 */		ip_statistics.IpFragCreates++;		iph->tot_len = htons(len + hlen);		ip_send_check(iph);		output(skb2);	}	kfree_skb(skb);	ip_statistics.IpFragOKs++;	return;	fail:	kfree_skb(skb); 	ip_statistics.IpFragFails++; }/* *	Fetch data from kernel space and fill in checksum if needed. */static int ip_reply_glue_bits(const void *dptr, char *to, unsigned int offset, 			      unsigned int fraglen){        struct ip_reply_arg *dp = (struct ip_reply_arg*)dptr;	u16 *pktp = (u16 *)to;	struct iovec *iov; 	int len; 	int hdrflag = 1; 	iov = &dp->iov[0]; 	if (offset >= iov->iov_len) { 		offset -= iov->iov_len;		iov++; 		hdrflag = 0; 	}	len = iov->iov_len - offset;	if (fraglen > len) { /* overlapping. */ 		dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, len,					     dp->csum);		offset = 0;		fraglen -= len; 		to += len; 		iov++;	}	dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, fraglen, 					     dp->csum); 	if (hdrflag && dp->csumoffset)		*(pktp + dp->csumoffset) = csum_fold(dp->csum); /* fill in checksum */	return 0;	       }/*  *	Generic function to send a packet as reply to another packet. *	Used to send TCP resets so far. ICMP should use this function too. * *	Should run single threaded per socket because it uses the sock  *     	structure to pass arguments. */void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,		   unsigned int len){	struct {		struct ip_options	opt;		char			data[40];	} replyopts;	struct ipcm_cookie ipc;	u32 daddr;	struct rtable *rt = (struct rtable*)skb->dst;		if (ip_options_echo(&replyopts.opt, skb))		return;		sk->ip_tos = skb->nh.iph->tos;	sk->priority = skb->priority;	sk->protocol = skb->nh.iph->protocol;	daddr = ipc.addr = rt->rt_src;	ipc.opt = &replyopts.opt;		if (ipc.opt->srr)		daddr = replyopts.opt.faddr;	if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0))		return;	/* And let IP do all the hard work. */	ip_build_xmit(sk, ip_reply_glue_bits, arg, len, &ipc, rt, MSG_DONTWAIT);	ip_rt_put(rt);}/* *	IP protocol layer initialiser */static struct packet_type ip_packet_type ={	__constant_htons(ETH_P_IP),	NULL,	/* All devices */	ip_rcv,	NULL,	NULL,};#ifdef CONFIG_PROC_FS#ifdef CONFIG_IP_MULTICASTstatic struct proc_dir_entry proc_net_igmp = {	PROC_NET_IGMP, 4, "igmp",	S_IFREG | S_IRUGO, 1, 0, 0,	0, &proc_net_inode_operations,	ip_mc_procinfo};#endif#endif	/* *	IP registers the packet type and then calls the subprotocol initialisers */__initfunc(void ip_init(void)){	dev_add_pack(&ip_packet_type);	ip_rt_init();#ifdef CONFIG_PROC_FS#ifdef CONFIG_IP_MULTICAST	proc_net_register(&proc_net_igmp);#endif#endif	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -