⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 包括初始化、进程管理、进程通信、内存管理、设备管理、中断、文件系统、系统调用等精选源码分析。
💻 C
📖 第 1 页 / 共 5 页
字号:

static void netdev_wakeup(void)
{
	unsigned long xoff;

	spin_lock(&netdev_fc_lock);
	xoff = netdev_fc_xoff;
	netdev_fc_xoff = 0;
	while (xoff) {
		int i = ffz(~xoff);
		xoff &= ~(1<<i);
		netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
	}
	spin_unlock(&netdev_fc_lock);
}
#endif

static void get_sample_stats(int cpu)
{
#ifdef RAND_LIE
	unsigned long rd;
	int rq;
#endif
	int blog = softnet_data[cpu].input_pkt_queue.qlen;
	int avg_blog = softnet_data[cpu].avg_blog;

	avg_blog = (avg_blog >> 1)+ (blog >> 1);

	if (avg_blog > mod_cong) {
		/* Above moderate congestion levels. */
		softnet_data[cpu].cng_level = NET_RX_CN_HIGH;
#ifdef RAND_LIE
		rd = net_random();
		rq = rd % netdev_max_backlog;
		if (rq < avg_blog) /* unlucky bastard */
			softnet_data[cpu].cng_level = NET_RX_DROP;
#endif
	} else if (avg_blog > lo_cong) {
		softnet_data[cpu].cng_level = NET_RX_CN_MOD;
#ifdef RAND_LIE
		rd = net_random();
		rq = rd % netdev_max_backlog;
			if (rq < avg_blog) /* unlucky bastard */
				softnet_data[cpu].cng_level = NET_RX_CN_HIGH;
#endif
	} else if (avg_blog > no_cong) 
		softnet_data[cpu].cng_level = NET_RX_CN_LOW;
	else  /* no congestion */
		softnet_data[cpu].cng_level = NET_RX_SUCCESS;

	softnet_data[cpu].avg_blog = avg_blog;
}

#ifdef OFFLINE_SAMPLE
static void sample_queue(unsigned long dummy)
{
/* 10 ms 0r 1ms -- i dont care -- JHS */
	int next_tick = 1;
	int cpu = smp_processor_id();

	get_sample_stats(cpu);
	next_tick += jiffies;
	mod_timer(&samp_timer, next_tick);
}
#endif


/**
 *	netif_rx	-	post buffer to the network code
 *	@skb: buffer to post
 *
 *	This function receives a packet from a device driver and queues it for
 *	the upper (protocol) levels to process.  It always succeeds. The buffer
 *	may be dropped during processing for congestion control or by the 
 *	protocol layers.
 *      
 *	return values:
 *	NET_RX_SUCCESS	(no congestion)           
 *	NET_RX_CN_LOW     (low congestion) 
 *	NET_RX_CN_MOD     (moderate congestion)
 *	NET_RX_CN_HIGH    (high congestion) 
 *	NET_RX_DROP    (packet was dropped)
 *      
 *      
 */

int netif_rx(struct sk_buff *skb)
{
	int this_cpu = smp_processor_id();
	struct softnet_data *queue;
	unsigned long flags;

	if (skb->stamp.tv_sec == 0)
		get_fast_time(&skb->stamp);

	/* The code is rearranged so that the path is the most
	   short when CPU is congested, but is still operating.
	 */
	queue = &softnet_data[this_cpu];

	local_irq_save(flags);

	netdev_rx_stat[this_cpu].total++;
	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
		if (queue->input_pkt_queue.qlen) {
			if (queue->throttle)
				goto drop;

enqueue:
			dev_hold(skb->dev);
			__skb_queue_tail(&queue->input_pkt_queue,skb);
			__cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
			local_irq_restore(flags);
#ifndef OFFLINE_SAMPLE
			get_sample_stats(this_cpu);
#endif
			return softnet_data[this_cpu].cng_level;
		}

		if (queue->throttle) {
			queue->throttle = 0;
#ifdef CONFIG_NET_HW_FLOWCONTROL
			if (atomic_dec_and_test(&netdev_dropping))
				netdev_wakeup();
#endif
		}
		goto enqueue;
	}

	if (queue->throttle == 0) {
		queue->throttle = 1;
		netdev_rx_stat[this_cpu].throttled++;
#ifdef CONFIG_NET_HW_FLOWCONTROL
		atomic_inc(&netdev_dropping);
#endif
	}

drop:
	netdev_rx_stat[this_cpu].dropped++;
	local_irq_restore(flags);

	kfree_skb(skb);
	return NET_RX_DROP;
}

/* Deliver skb to an old protocol, which is not threaded well
   or which do not understand shared skbs.
 */
static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int last)
{
	static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED;
	int ret = NET_RX_DROP;


	if (!last) {
		skb = skb_clone(skb, GFP_ATOMIC);
		if (skb == NULL)
			return ret;
	}

	/* The assumption (correct one) is that old protocols
	   did not depened on BHs different of NET_BH and TIMER_BH.
	 */

	/* Emulate NET_BH with special spinlock */
	spin_lock(&net_bh_lock);

	/* Disable timers and wait for all timers completion */
	tasklet_disable(bh_task_vec+TIMER_BH);

	ret = pt->func(skb, skb->dev, pt);

	tasklet_enable(bh_task_vec+TIMER_BH);
	spin_unlock(&net_bh_lock);
	return ret;
}

/* Reparent skb to master device. This function is called
 * only from net_rx_action under BR_NETPROTO_LOCK. It is misuse
 * of BR_NETPROTO_LOCK, but it is OK for now.
 */
static __inline__ void skb_bond(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	
	if (dev->master) {
		dev_hold(dev->master);
		skb->dev = dev->master;
		dev_put(dev);
	}
}

static void net_tx_action(struct softirq_action *h)
{
	int cpu = smp_processor_id();

	if (softnet_data[cpu].completion_queue) {
		struct sk_buff *clist;

		local_irq_disable();
		clist = softnet_data[cpu].completion_queue;
		softnet_data[cpu].completion_queue = NULL;
		local_irq_enable();

		while (clist != NULL) {
			struct sk_buff *skb = clist;
			clist = clist->next;

			BUG_TRAP(atomic_read(&skb->users) == 0);
			__kfree_skb(skb);
		}
	}

	if (softnet_data[cpu].output_queue) {
		struct net_device *head;

		local_irq_disable();
		head = softnet_data[cpu].output_queue;
		softnet_data[cpu].output_queue = NULL;
		local_irq_enable();

		while (head != NULL) {
			struct net_device *dev = head;
			head = head->next_sched;

			smp_mb__before_clear_bit();
			clear_bit(__LINK_STATE_SCHED, &dev->state);

			if (spin_trylock(&dev->queue_lock)) {
				qdisc_run(dev);
				spin_unlock(&dev->queue_lock);
			} else {
				netif_schedule(dev);
			}
		}
	}
}

/**
 *	net_call_rx_atomic
 *	@fn: function to call
 *
 *	Make a function call that is atomic with respect to the protocol
 *	layers.
 */
 
void net_call_rx_atomic(void (*fn)(void))
{
	br_write_lock_bh(BR_NETPROTO_LOCK);
	fn();
	br_write_unlock_bh(BR_NETPROTO_LOCK);
}

#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
#endif

static int __inline__ handle_bridge(struct sk_buff *skb,
				     struct packet_type *pt_prev)
{
	int ret = NET_RX_DROP;

	if (pt_prev) {
		if (!pt_prev->data)
			ret = deliver_to_old_ones(pt_prev, skb, 0);
		else {
			atomic_inc(&skb->users);
			ret = pt_prev->func(skb, skb->dev, pt_prev);
		}
	}

	br_handle_frame_hook(skb);
	return ret;
}


#ifdef CONFIG_NET_DIVERT
static inline void handle_diverter(struct sk_buff *skb)
{
	/* if diversion is supported on device, then divert */
	if (skb->dev->divert && skb->dev->divert->divert)
		divert_frame(skb);
}
#endif   /* CONFIG_NET_DIVERT */


static void net_rx_action(struct softirq_action *h)
{
	int this_cpu = smp_processor_id();
	struct softnet_data *queue = &softnet_data[this_cpu];
	unsigned long start_time = jiffies;
	int bugdet = netdev_max_backlog;

	br_read_lock(BR_NETPROTO_LOCK);

	for (;;) {
		struct sk_buff *skb;
		struct net_device *rx_dev;

		local_irq_disable();
		skb = __skb_dequeue(&queue->input_pkt_queue);
		local_irq_enable();

		if (skb == NULL)
			break;

		skb_bond(skb);

		rx_dev = skb->dev;

#ifdef CONFIG_NET_FASTROUTE
		if (skb->pkt_type == PACKET_FASTROUTE) {
			netdev_rx_stat[this_cpu].fastroute_deferred_out++;
			dev_queue_xmit(skb);
			dev_put(rx_dev);
			continue;
		}
#endif
		skb->h.raw = skb->nh.raw = skb->data;
		{
			struct packet_type *ptype, *pt_prev;
			unsigned short type = skb->protocol;

			pt_prev = NULL;
			for (ptype = ptype_all; ptype; ptype = ptype->next) {
				if (!ptype->dev || ptype->dev == skb->dev) {
					if (pt_prev) {
						if (!pt_prev->data) {
							deliver_to_old_ones(pt_prev, skb, 0);
						} else {
							atomic_inc(&skb->users);
							pt_prev->func(skb,
								      skb->dev,
								      pt_prev);
						}
					}
					pt_prev = ptype;
				}
			}

#ifdef CONFIG_NET_DIVERT
			if (skb->dev->divert && skb->dev->divert->divert)
				handle_diverter(skb);
#endif /* CONFIG_NET_DIVERT */

			
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
			if (skb->dev->br_port != NULL &&
			    br_handle_frame_hook != NULL) {
				handle_bridge(skb, pt_prev);
				dev_put(rx_dev);
				continue;
			}
#endif

			for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) {
				if (ptype->type == type &&
				    (!ptype->dev || ptype->dev == skb->dev)) {
					if (pt_prev) {
						if (!pt_prev->data)
							deliver_to_old_ones(pt_prev, skb, 0);
						else {
							atomic_inc(&skb->users);
							pt_prev->func(skb,
								      skb->dev,
								      pt_prev);
						}
					}
					pt_prev = ptype;
				}
			}

			if (pt_prev) {
				if (!pt_prev->data)
					deliver_to_old_ones(pt_prev, skb, 1);
				else
					pt_prev->func(skb, skb->dev, pt_prev);
			} else
				kfree_skb(skb);
		}

		dev_put(rx_dev);

		if (bugdet-- < 0 || jiffies - start_time > 1)
			goto softnet_break;

#ifdef CONFIG_NET_HW_FLOWCONTROL
	if (queue->throttle && queue->input_pkt_queue.qlen < no_cong_thresh ) {
		if (atomic_dec_and_test(&netdev_dropping)) {
			queue->throttle = 0;
			netdev_wakeup();
			goto softnet_break;
		}
	}
#endif

	}
	br_read_unlock(BR_NETPROTO_LOCK);

	local_irq_disable();
	if (queue->throttle) {
		queue->throttle = 0;
#ifdef CONFIG_NET_HW_FLOWCONTROL
		if (atomic_dec_and_test(&netdev_dropping))
			netdev_wakeup();
#endif
	}
	local_irq_enable();

	NET_PROFILE_LEAVE(softnet_process);
	return;

softnet_break:
	br_read_unlock(BR_NETPROTO_LOCK);

	local_irq_disable();
	netdev_rx_stat[this_cpu].time_squeeze++;
	__cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
	local_irq_enable();

	NET_PROFILE_LEAVE(softnet_process);
	return;
}

static gifconf_func_t * gifconf_list [NPROTO];

/**
 *	register_gifconf	-	register a SIOCGIF handler
 *	@family: Address family
 *	@gifconf: Function handler
 *
 *	Register protocol dependent address dumping routines. The handler
 *	that is passed must not be freed or reused until it has been replaced
 *	by another handler.
 */
 
int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
{
	if (family>=NPROTO)
		return -EINVAL;
	gifconf_list[family] = gifconf;
	return 0;
}


/*
 *	Map an interface index to its name (SIOCGIFNAME)
 */

/*
 *	We need this ioctl for efficient implementation of the
 *	if_indextoname() function required by the IPv6 API.  Without
 *	it, we would have to search all the interfaces to find a
 *	match.  --pb
 */

static int dev_ifname(struct ifreq *arg)
{
	struct net_device *dev;
	struct ifreq ifr;

	/*
	 *	Fetch the caller's info block. 
	 */
	
	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
		return -EFAULT;

	read_lock(&dev_base_lock);
	dev = __dev_get_by_index(ifr.ifr_ifindex);
	if (!dev) {
		read_unlock(&dev_base_lock);
		return -ENODEV;
	}

	strcpy(ifr.ifr_name, dev->name);
	read_unlock(&dev_base_lock);

	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
		return -EFAULT;
	return 0;
}

/*
 *	Perform a SIOCGIFCONF call. This structure will change
 *	size eventually, and there is nothing I can do about it.
 *	Thus we will need a 'compatibility mode'.
 */

static int dev_ifconf(char *arg)
{
	struct ifconf ifc;
	struct net_device *dev;
	char *pos;
	int len;
	int total;
	int i;

	/*
	 *	Fetch the caller's info block. 
	 */
	
	if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
		return -EFAULT;

	pos = ifc.ifc_buf;
	len = ifc.ifc_len;

	/*
	 *	Loop over the interfaces, and write an info block for each. 
	 */

	total = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -