⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netpoll.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Common framework for low-level network console, dump, and debugger code * * Sep 8 2003  Matt Mackall <mpm@selenic.com> * * based on the netconsole code from: * * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com> * Copyright (C) 2002  Red Hat, Inc. */#include <linux/smp_lock.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/string.h>#include <linux/inetdevice.h>#include <linux/inet.h>#include <linux/interrupt.h>#include <linux/netpoll.h>#include <linux/sched.h>#include <linux/rcupdate.h>#include <net/tcp.h>#include <net/udp.h>#include <asm/unaligned.h>/* * We maintain a small pool of fully-sized skbs, to make sure the * message gets out even in extreme OOM situations. */#define MAX_SKBS 32#define MAX_UDP_CHUNK 1460static spinlock_t skb_list_lock = SPIN_LOCK_UNLOCKED;static int nr_skbs;static struct sk_buff *skbs;static spinlock_t rx_list_lock = SPIN_LOCK_UNLOCKED;static LIST_HEAD(rx_list);static atomic_t trapped;spinlock_t netpoll_poll_lock = SPIN_LOCK_UNLOCKED;#define NETPOLL_RX_ENABLED  1#define NETPOLL_RX_DROP     2#define MAX_SKB_SIZE \		(MAX_UDP_CHUNK + sizeof(struct udphdr) + \				sizeof(struct iphdr) + sizeof(struct ethhdr))static void zap_completion_queue(void);static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,			     unsigned short ulen, u32 saddr, u32 daddr){	if (uh->check == 0)		return 0;	if (skb->ip_summed == CHECKSUM_HW)		return csum_tcpudp_magic(			saddr, daddr, ulen, IPPROTO_UDP, skb->csum);	skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);	return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));}void netpoll_poll(struct netpoll *np){	/*	 * In cases where there is bi-directional communications, reading	 * only one message at a time can lead to packets being dropped by	 * the network adapter, forcing superfluous retries and possibly	 * timeouts.  Thus, we set our budget to a more reasonable value.	 */	int budget = 16;	unsigned long flags;	if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)		return;	/* Process pending work on NIC */	np->dev->poll_controller(np->dev);	/* If scheduling is stopped, tickle NAPI bits */	spin_lock_irqsave(&netpoll_poll_lock, flags);	if (np->dev->poll &&	    test_bit(__LINK_STATE_RX_SCHED, &np->dev->state)) {		np->dev->netpoll_rx |= NETPOLL_RX_DROP;		atomic_inc(&trapped);		np->dev->poll(np->dev, &budget);		atomic_dec(&trapped);		np->dev->netpoll_rx &= ~NETPOLL_RX_DROP;	}	spin_unlock_irqrestore(&netpoll_poll_lock, flags);	zap_completion_queue();}static void refill_skbs(void){	struct sk_buff *skb;	unsigned long flags;	spin_lock_irqsave(&skb_list_lock, flags);	while (nr_skbs < MAX_SKBS) {		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);		if (!skb)			break;		skb->next = skbs;		skbs = skb;		nr_skbs++;	}	spin_unlock_irqrestore(&skb_list_lock, flags);}static void zap_completion_queue(void){	unsigned long flags;	struct softnet_data *sd = &get_cpu_var(softnet_data);	if (sd->completion_queue) {		struct sk_buff *clist;		local_irq_save(flags);		clist = sd->completion_queue;		sd->completion_queue = NULL;		local_irq_restore(flags);		while (clist != NULL) {			struct sk_buff *skb = clist;			clist = clist->next;			__kfree_skb(skb);		}	}	put_cpu_var(softnet_data);}static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve){	int once = 1, count = 0;	unsigned long flags;	struct sk_buff *skb = NULL;	zap_completion_queue();repeat:	if (nr_skbs < MAX_SKBS)		refill_skbs();	skb = alloc_skb(len, GFP_ATOMIC);	if (!skb) {		spin_lock_irqsave(&skb_list_lock, flags);		skb = skbs;		if (skb)			skbs = skb->next;		skb->next = NULL;		nr_skbs--;		spin_unlock_irqrestore(&skb_list_lock, flags);	}	if(!skb) {		count++;		if (once && (count == 1000000)) {			printk("out of netpoll skbs!\n");			once = 0;		}		netpoll_poll(np);		goto repeat;	}	atomic_set(&skb->users, 1);	skb_reserve(skb, reserve);	return skb;}void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb){	int status;repeat:	if(!np || !np->dev || !netif_running(np->dev)) {		__kfree_skb(skb);		return;	}	spin_lock(&np->dev->xmit_lock);	np->dev->xmit_lock_owner = smp_processor_id();	/*	 * network drivers do not expect to be called if the queue is	 * stopped.	 */	if (netif_queue_stopped(np->dev)) {		np->dev->xmit_lock_owner = -1;		spin_unlock(&np->dev->xmit_lock);		netpoll_poll(np);		goto repeat;	}	status = np->dev->hard_start_xmit(skb, np->dev);	np->dev->xmit_lock_owner = -1;	spin_unlock(&np->dev->xmit_lock);	/* transmit busy */	if(status) {		netpoll_poll(np);		goto repeat;	}}void netpoll_send_udp(struct netpoll *np, const char *msg, int len){	int total_len, eth_len, ip_len, udp_len;	struct sk_buff *skb;	struct udphdr *udph;	struct iphdr *iph;	struct ethhdr *eth;	udp_len = len + sizeof(*udph);	ip_len = eth_len = udp_len + sizeof(*iph);	total_len = eth_len + ETH_HLEN;	skb = find_skb(np, total_len, total_len - len);	if (!skb)		return;	memcpy(skb->data, msg, len);	skb->len += len;	udph = (struct udphdr *) skb_push(skb, sizeof(*udph));	udph->source = htons(np->local_port);	udph->dest = htons(np->remote_port);	udph->len = htons(udp_len);	udph->check = 0;	iph = (struct iphdr *)skb_push(skb, sizeof(*iph));	/* iph->version = 4; iph->ihl = 5; */	put_unaligned(0x45, (unsigned char *)iph);	iph->tos      = 0;	put_unaligned(htons(ip_len), &(iph->tot_len));	iph->id       = 0;	iph->frag_off = 0;	iph->ttl      = 64;	iph->protocol = IPPROTO_UDP;	iph->check    = 0;	put_unaligned(htonl(np->local_ip), &(iph->saddr));	put_unaligned(htonl(np->remote_ip), &(iph->daddr));	iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);	eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);	eth->h_proto = htons(ETH_P_IP);	memcpy(eth->h_source, np->local_mac, 6);	memcpy(eth->h_dest, np->remote_mac, 6);	netpoll_send_skb(np, skb);}static void arp_reply(struct sk_buff *skb){	struct arphdr *arp;	unsigned char *arp_ptr;	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;	u32 sip, tip;	struct sk_buff *send_skb;	unsigned long flags;	struct list_head *p;	struct netpoll *np = NULL;	spin_lock_irqsave(&rx_list_lock, flags);	list_for_each(p, &rx_list) {		np = list_entry(p, struct netpoll, rx_list);		if ( np->dev == skb->dev )			break;		np = NULL;	}	spin_unlock_irqrestore(&rx_list_lock, flags);	if (!np) return;	/* No arp on this interface */	if (skb->dev->flags & IFF_NOARP)		return;	if (!pskb_may_pull(skb, (sizeof(struct arphdr) +				 (2 * skb->dev->addr_len) +				 (2 * sizeof(u32)))))		return;	skb->h.raw = skb->nh.raw = skb->data;	arp = skb->nh.arph;	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||	    arp->ar_pro != htons(ETH_P_IP) ||	    arp->ar_op != htons(ARPOP_REQUEST))		return;	arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;	memcpy(&sip, arp_ptr, 4);	arp_ptr += 4 + skb->dev->addr_len;	memcpy(&tip, arp_ptr, 4);	/* Should we ignore arp? */	if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))		return;	size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);	send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),			    LL_RESERVED_SPACE(np->dev));	if (!send_skb)		return;	send_skb->nh.raw = send_skb->data;	arp = (struct arphdr *) skb_put(send_skb, size);	send_skb->dev = skb->dev;	send_skb->protocol = htons(ETH_P_ARP);	/* Fill the device header for the ARP frame */	if (np->dev->hard_header &&	    np->dev->hard_header(send_skb, skb->dev, ptype,				       np->remote_mac, np->local_mac,				       send_skb->len) < 0) {		kfree_skb(send_skb);		return;	}	/*	 * Fill out the arp protocol part.	 *	 * we only support ethernet device type,	 * which (according to RFC 1390) should always equal 1 (Ethernet).

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -