⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 《嵌入式系统设计与实例开发实验教材二源码》Linux内核移植与编译实验
💻 C
📖 第 1 页 / 共 5 页
字号:
/*=======================================================================			Receiver routines  =======================================================================*/int netdev_max_backlog = 300;/* These numbers are selected based on intuition and some * experimentatiom, if you have more scientific way of doing this * please go ahead and fix things. */int no_cong_thresh = 10;int no_cong = 20;int lo_cong = 100;int mod_cong = 290;struct netif_rx_stats netdev_rx_stat[NR_CPUS];#ifdef CONFIG_NET_HW_FLOWCONTROLatomic_t netdev_dropping = ATOMIC_INIT(0);static unsigned long netdev_fc_mask = 1;unsigned long netdev_fc_xoff = 0;spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;static struct{	void (*stimul)(struct net_device *);	struct net_device *dev;} netdev_fc_slots[BITS_PER_LONG];int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev)){	int bit = 0;	unsigned long flags;	spin_lock_irqsave(&netdev_fc_lock, flags);	if (netdev_fc_mask != ~0UL) {		bit = ffz(netdev_fc_mask);		netdev_fc_slots[bit].stimul = stimul;		netdev_fc_slots[bit].dev = dev;		set_bit(bit, &netdev_fc_mask);		clear_bit(bit, &netdev_fc_xoff);	}	spin_unlock_irqrestore(&netdev_fc_lock, flags);	return bit;}void netdev_unregister_fc(int bit){	unsigned long flags;	spin_lock_irqsave(&netdev_fc_lock, flags);	if (bit > 0) {		netdev_fc_slots[bit].stimul = NULL;		netdev_fc_slots[bit].dev = NULL;		clear_bit(bit, &netdev_fc_mask);		clear_bit(bit, &netdev_fc_xoff);	}	spin_unlock_irqrestore(&netdev_fc_lock, flags);}static void netdev_wakeup(void){	unsigned long xoff;	spin_lock(&netdev_fc_lock);	xoff = netdev_fc_xoff;	netdev_fc_xoff = 0;	while (xoff) {		int i = ffz(~xoff);		xoff &= ~(1<<i);		netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);	}	spin_unlock(&netdev_fc_lock);}#endifstatic void get_sample_stats(int cpu){#ifdef RAND_LIE	unsigned long rd;	int rq;#endif	int blog = softnet_data[cpu].input_pkt_queue.qlen;	int avg_blog = softnet_data[cpu].avg_blog;	avg_blog = (avg_blog >> 1)+ (blog >> 1);	if (avg_blog > mod_cong) {		/* Above moderate congestion levels. */		softnet_data[cpu].cng_level = NET_RX_CN_HIGH;#ifdef RAND_LIE		rd = net_random();		rq = rd % netdev_max_backlog;		if (rq < avg_blog) /* unlucky bastard */			softnet_data[cpu].cng_level = NET_RX_DROP;#endif	} else if (avg_blog > lo_cong) {		softnet_data[cpu].cng_level = NET_RX_CN_MOD;#ifdef RAND_LIE		rd = net_random();		rq = rd % netdev_max_backlog;			if (rq < avg_blog) /* unlucky bastard */				softnet_data[cpu].cng_level = NET_RX_CN_HIGH;#endif	} else if (avg_blog > no_cong) 		softnet_data[cpu].cng_level = NET_RX_CN_LOW;	else  /* no congestion */		softnet_data[cpu].cng_level = NET_RX_SUCCESS;	softnet_data[cpu].avg_blog = avg_blog;}#ifdef OFFLINE_SAMPLEstatic void sample_queue(unsigned long dummy){/* 10 ms 0r 1ms -- i dont care -- JHS */	int next_tick = 1;	int cpu = smp_processor_id();	get_sample_stats(cpu);	next_tick += jiffies;	mod_timer(&samp_timer, next_tick);}#endif/** *	netif_rx	-	post buffer to the network code *	@skb: buffer to post * *	This function receives a packet from a device driver and queues it for *	the upper (protocol) levels to process.  It always succeeds. The buffer *	may be dropped during processing for congestion control or by the  *	protocol layers. *       *	return values: *	NET_RX_SUCCESS	(no congestion)            *	NET_RX_CN_LOW     (low congestion)  *	NET_RX_CN_MOD     (moderate congestion) *	NET_RX_CN_HIGH    (high congestion)  *	NET_RX_DROP    (packet was dropped) *       *       */int netif_rx(struct sk_buff *skb){	int this_cpu = smp_processor_id();	struct softnet_data *queue;	unsigned long flags;	if (skb->stamp.tv_sec == 0)		do_gettimeofday(&skb->stamp);	/* The code is rearranged so that the path is the most	   short when CPU is congested, but is still operating.	 */	queue = &softnet_data[this_cpu];	local_irq_save(flags);	netdev_rx_stat[this_cpu].total++;	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {		if (queue->input_pkt_queue.qlen) {			if (queue->throttle)				goto drop;enqueue:			dev_hold(skb->dev);			__skb_queue_tail(&queue->input_pkt_queue,skb);			/* Runs from irqs or BH's, no need to wake BH */			cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);			local_irq_restore(flags);#ifndef OFFLINE_SAMPLE			get_sample_stats(this_cpu);#endif			return softnet_data[this_cpu].cng_level;		}		if (queue->throttle) {			queue->throttle = 0;#ifdef CONFIG_NET_HW_FLOWCONTROL			if (atomic_dec_and_test(&netdev_dropping))				netdev_wakeup();#endif		}		goto enqueue;	}	if (queue->throttle == 0) {		queue->throttle = 1;		netdev_rx_stat[this_cpu].throttled++;#ifdef CONFIG_NET_HW_FLOWCONTROL		atomic_inc(&netdev_dropping);#endif	}drop:	netdev_rx_stat[this_cpu].dropped++;	local_irq_restore(flags);	kfree_skb(skb);	return NET_RX_DROP;}/* Deliver skb to an old protocol, which is not threaded well   or which do not understand shared skbs. */static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int last){	static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED;	int ret = NET_RX_DROP;	if (!last) {		skb = skb_clone(skb, GFP_ATOMIC);		if (skb == NULL)			return ret;	}	if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) {		kfree_skb(skb);		return ret;	}	/* The assumption (correct one) is that old protocols	   did not depened on BHs different of NET_BH and TIMER_BH.	 */	/* Emulate NET_BH with special spinlock */	spin_lock(&net_bh_lock);	/* Disable timers and wait for all timers completion */	tasklet_disable(bh_task_vec+TIMER_BH);	ret = pt->func(skb, skb->dev, pt);	tasklet_hi_enable(bh_task_vec+TIMER_BH);	spin_unlock(&net_bh_lock);	return ret;}/* Reparent skb to master device. This function is called * only from net_rx_action under BR_NETPROTO_LOCK. It is misuse * of BR_NETPROTO_LOCK, but it is OK for now. */static __inline__ void skb_bond(struct sk_buff *skb){	struct net_device *dev = skb->dev;		if (dev->master) {		dev_hold(dev->master);		skb->dev = dev->master;		dev_put(dev);	}}static void net_tx_action(struct softirq_action *h){	int cpu = smp_processor_id();	if (softnet_data[cpu].completion_queue) {		struct sk_buff *clist;		local_irq_disable();		clist = softnet_data[cpu].completion_queue;		softnet_data[cpu].completion_queue = NULL;		local_irq_enable();		while (clist != NULL) {			struct sk_buff *skb = clist;			clist = clist->next;			BUG_TRAP(atomic_read(&skb->users) == 0);			__kfree_skb(skb);		}	}	if (softnet_data[cpu].output_queue) {		struct net_device *head;		local_irq_disable();		head = softnet_data[cpu].output_queue;		softnet_data[cpu].output_queue = NULL;		local_irq_enable();		while (head != NULL) {			struct net_device *dev = head;			head = head->next_sched;			smp_mb__before_clear_bit();			clear_bit(__LINK_STATE_SCHED, &dev->state);			if (spin_trylock(&dev->queue_lock)) {				qdisc_run(dev);				spin_unlock(&dev->queue_lock);			} else {				netif_schedule(dev);			}		}	}}/** *	net_call_rx_atomic *	@fn: function to call * *	Make a function call that is atomic with respect to the protocol *	layers. */ void net_call_rx_atomic(void (*fn)(void)){	br_write_lock_bh(BR_NETPROTO_LOCK);	fn();	br_write_unlock_bh(BR_NETPROTO_LOCK);}#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;#endifstatic __inline__ int handle_bridge(struct sk_buff *skb,				     struct packet_type *pt_prev){	int ret = NET_RX_DROP;	if (pt_prev) {		if (!pt_prev->data)			ret = deliver_to_old_ones(pt_prev, skb, 0);		else {			atomic_inc(&skb->users);			ret = pt_prev->func(skb, skb->dev, pt_prev);		}	}	br_handle_frame_hook(skb);	return ret;}#ifdef CONFIG_NET_DIVERTstatic inline void handle_diverter(struct sk_buff *skb){	/* if diversion is supported on device, then divert */	if (skb->dev->divert && skb->dev->divert->divert)		divert_frame(skb);}#endif   /* CONFIG_NET_DIVERT */static void net_rx_action(struct softirq_action *h){	int this_cpu = smp_processor_id();	struct softnet_data *queue = &softnet_data[this_cpu];	unsigned long start_time = jiffies;	int bugdet = netdev_max_backlog;	br_read_lock(BR_NETPROTO_LOCK);	for (;;) {		struct sk_buff *skb;		struct net_device *rx_dev;		local_irq_disable();		skb = __skb_dequeue(&queue->input_pkt_queue);		local_irq_enable();		if (skb == NULL)			break;		skb_bond(skb);		rx_dev = skb->dev;#ifdef CONFIG_NET_FASTROUTE		if (skb->pkt_type == PACKET_FASTROUTE) {			netdev_rx_stat[this_cpu].fastroute_deferred_out++;			dev_queue_xmit(skb);			dev_put(rx_dev);			continue;		}#endif		skb->h.raw = skb->nh.raw = skb->data;		{			struct packet_type *ptype, *pt_prev;			unsigned short type = skb->protocol;			pt_prev = NULL;			for (ptype = ptype_all; ptype; ptype = ptype->next) {				if (!ptype->dev || ptype->dev == skb->dev) {					if (pt_prev) {						if (!pt_prev->data) {							deliver_to_old_ones(pt_prev, skb, 0);						} else {							atomic_inc(&skb->users);							pt_prev->func(skb,								      skb->dev,								      pt_prev);						}					}					pt_prev = ptype;				}			}#ifdef CONFIG_NET_DIVERT			if (skb->dev->divert && skb->dev->divert->divert)				handle_diverter(skb);#endif /* CONFIG_NET_DIVERT */			#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)			if (skb->dev->br_port != NULL &&			    br_handle_frame_hook != NULL) {				handle_bridge(skb, pt_prev);				dev_put(rx_dev);				continue;			}#endif			for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) {				if (ptype->type == type &&				    (!ptype->dev || ptype->dev == skb->dev)) {					if (pt_prev) {						if (!pt_prev->data)							deliver_to_old_ones(pt_prev, skb, 0);						else {							atomic_inc(&skb->users);							pt_prev->func(skb,								      skb->dev,								      pt_prev);						}					}					pt_prev = ptype;				}			}			if (pt_prev) {				if (!pt_prev->data)					deliver_to_old_ones(pt_prev, skb, 1);				else					pt_prev->func(skb, skb->dev, pt_prev);			} else				kfree_skb(skb);		}		dev_put(rx_dev);		if (bugdet-- < 0 || jiffies - start_time > 1)			goto softnet_break;#ifdef CONFIG_NET_HW_FLOWCONTROL	if (queue->throttle && queue->input_pkt_queue.qlen < no_cong_thresh ) {		if (atomic_dec_and_test(&netdev_dropping)) {			queue->throttle = 0;			netdev_wakeup();			goto softnet_break;		}	}#endif	}	br_read_unlock(BR_NETPROTO_LOCK);	local_irq_disable();	if (queue->throttle) {		queue->throttle = 0;#ifdef CONFIG_NET_HW_FLOWCONTROL		if (atomic_dec_and_test(&netdev_dropping))			netdev_wakeup();#endif	}	local_irq_enable();	NET_PROFILE_LEAVE(softnet_process);	return;softnet_break:	br_read_unlock(BR_NETPROTO_LOCK);	local_irq_disable();	netdev_rx_stat[this_cpu].time_squeeze++;	/* This already runs in BH context, no need to wake up BH's */	cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);	local_irq_enable();	NET_PROFILE_LEAVE(softnet_process);	return;}static gifconf_func_t * gifconf_list [NPROTO];/** *	register_gifconf	-	register a SIOCGIF handler *	@family: Address family *	@gifconf: Function handler * *	Register protocol dependent address dumping routines. The handler *	that is passed must not be freed or reused until it has been replaced *	by another handler. */ int register_gifconf(unsigned int family, gifconf_func_t * gifconf){	if (family>=NPROTO)		return -EINVAL;	gifconf_list[family] = gifconf;	return 0;}/* *	Map an interface index to its name (SIOCGIFNAME) *//* *	We need this ioctl for efficient implementation of the *	if_indextoname() function required by the IPv6 API.  Without *	it, we would have to search all the interfaces to find a *	match.  --pb */static int dev_ifname(struct ifreq *arg){	struct net_device *dev;	struct ifreq ifr;	/*	 *	Fetch the caller's info block. 	 */		if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))		return -EFAULT;	read_lock(&dev_base_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -