⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 *	@name: name format string * *	Passed a format string - eg "lt%d" it will try and find a suitable *	id. Not efficient for many devices, not called a lot. The caller *	must hold the dev_base or rtnl lock while allocating the name and *	adding the device in order to avoid duplicates. Returns the number *	of the unit assigned or a negative errno code. */int dev_alloc_name(struct net_device *dev, const char *name){	int i;	char buf[32];	/*	 *	If you need over 100 please also fix the algorithm...	 */	for (i = 0; i < 100; i++) {		sprintf(buf,name,i);		if (__dev_get_by_name(buf) == NULL) {			strcpy(dev->name, buf);			return i;		}	}	return -ENFILE;	/* Over 100 of the things .. bail out! */}/** *	dev_alloc - allocate a network device and name *	@name: name format string *	@err: error return pointer * *	Passed a format string, eg. "lt%d", it will allocate a network device *	and space for the name. %NULL is returned if no memory is available. *	If the allocation succeeds then the name is assigned and the  *	device pointer returned. %NULL is returned if the name allocation *	failed. The cause of an error is returned as a negative errno code *	in the variable @err points to. * *	The caller must hold the @dev_base or RTNL locks when doing this in *	order to avoid duplicate name allocations. */struct net_device *dev_alloc(const char *name, int *err){	struct net_device *dev=kmalloc(sizeof(struct net_device), GFP_KERNEL);	if (dev == NULL) {		*err = -ENOBUFS;		return NULL;	}	memset(dev, 0, sizeof(struct net_device));	*err = dev_alloc_name(dev, name);	if (*err < 0) {		kfree(dev);		return NULL;	}	return dev;}/** *	netdev_state_change - device changes state *	@dev: device to cause notification * *	Called to indicate a device has changed state. This function calls *	the notifier chains for netdev_chain and sends a NEWLINK message *	to the routing socket. */ void netdev_state_change(struct net_device *dev){	if (dev->flags&IFF_UP) {		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);	}}#ifdef CONFIG_KMOD/** *	dev_load 	- load a network module *	@name: name of interface * *	If a network interface is not present and the process has suitable *	privileges this function loads the module. If module loading is not *	available in this kernel then it becomes a nop. */void dev_load(const char *name){	if (!dev_get(name) && capable(CAP_SYS_MODULE))		request_module(name);}#elseextern inline void dev_load(const char *unused){;}#endifstatic int default_rebuild_header(struct sk_buff *skb){	printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", skb->dev ? skb->dev->name : "NULL!!!");	kfree_skb(skb);	return 1;}/** *	dev_open	- prepare an interface for use.  *	@dev:	device to open * *	Takes a device from down to up state. The device's private open *	function is invoked and then the multicast lists are loaded. Finally *	the device is moved into the up state and a %NETDEV_UP message is *	sent to the netdev notifier chain. * *	Calling this function on an active interface is a nop. On a failure *	a negative errno code is returned. */ int dev_open(struct net_device *dev){	int ret = 0;	/*	 *	Is it already up?	 */	if (dev->flags&IFF_UP)		return 0;	/*	 *	Is it even present?	 */	if (!netif_device_present(dev))		return -ENODEV;	/*	 *	Call device private open method	 */	if (try_inc_mod_count(dev->owner)) {		if (dev->open) {			ret = dev->open(dev);			if (ret != 0 && dev->owner)				__MOD_DEC_USE_COUNT(dev->owner);		}	} else {		ret = -ENODEV;	}	/*	 *	If it went open OK then:	 */	 	if (ret == 0) 	{		/*		 *	Set the flags.		 */		dev->flags |= IFF_UP;		set_bit(__LINK_STATE_START, &dev->state);		/*		 *	Initialize multicasting status 		 */		dev_mc_upload(dev);		/*		 *	Wakeup transmit queue engine		 */		dev_activate(dev);		/*		 *	... and announce new interface.		 */		notifier_call_chain(&netdev_chain, NETDEV_UP, dev);	}	return(ret);}#ifdef CONFIG_NET_FASTROUTEstatic void dev_do_clear_fastroute(struct net_device *dev){	if (dev->accept_fastpath) {		int i;		for (i=0; i<=NETDEV_FASTROUTE_HMASK; i++) {			struct dst_entry *dst;			write_lock_irq(&dev->fastpath_lock);			dst = dev->fastpath[i];			dev->fastpath[i] = NULL;			write_unlock_irq(&dev->fastpath_lock);			dst_release(dst);		}	}}void dev_clear_fastroute(struct net_device *dev){	if (dev) {		dev_do_clear_fastroute(dev);	} else {		read_lock(&dev_base_lock);		for (dev = dev_base; dev; dev = dev->next)			dev_do_clear_fastroute(dev);		read_unlock(&dev_base_lock);	}}#endif/** *	dev_close - shutdown an interface. *	@dev: device to shutdown * *	This function moves an active device into down state. A  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier *	chain. */ int dev_close(struct net_device *dev){	if (!(dev->flags&IFF_UP))		return 0;	/*	 *	Tell people we are going down, so that they can	 *	prepare to death, when device is still operating.	 */	notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);	dev_deactivate(dev);	clear_bit(__LINK_STATE_START, &dev->state);	/*	 *	Call the device specific close. This cannot fail.	 *	Only if device is UP	 *	 *	We allow it to be called even after a DETACH hot-plug	 *	event.	 */	 	if (dev->stop)		dev->stop(dev);	/*	 *	Device is now down.	 */	dev->flags &= ~IFF_UP;#ifdef CONFIG_NET_FASTROUTE	dev_clear_fastroute(dev);#endif	/*	 *	Tell people we are down	 */	notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);	/*	 * Drop the module refcount	 */	if (dev->owner)		__MOD_DEC_USE_COUNT(dev->owner);	return(0);}/* *	Device change register/unregister. These are not inline or static *	as we export them to the world. */ /** *	register_netdevice_notifier - register a network notifier block *	@nb: notifier * *	Register a notifier to be called when network device events occur. *	The notifier passed is linked into the kernel structures and must *	not be reused until it has been unregistered. A negative errno code *	is returned on a failure. */int register_netdevice_notifier(struct notifier_block *nb){	return notifier_chain_register(&netdev_chain, nb);}/** *	unregister_netdevice_notifier - unregister a network notifier block *	@nb: notifier * *	Unregister a notifier previously registered by *	register_netdevice_notifier(). The notifier is unlinked into the *	kernel structures and may then be reused. A negative errno code *	is returned on a failure. */int unregister_netdevice_notifier(struct notifier_block *nb){	return notifier_chain_unregister(&netdev_chain,nb);}/* *	Support routine. Sends outgoing frames to any network *	taps currently in use. */void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev){	struct packet_type *ptype;	get_fast_time(&skb->stamp);	br_read_lock(BR_NETPROTO_LOCK);	for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next) 	{		/* Never send packets back to the socket		 * they originated from - MvS (miquels@drinkel.ow.org)		 */		if ((ptype->dev == dev || !ptype->dev) &&			((struct sock *)ptype->data != skb->sk))		{			struct sk_buff *skb2;			if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)				break;			/* skb->nh should be correctly			   set by sender, so that the second statement is			   just protection against buggy protocols.			 */			skb2->mac.raw = skb2->data;			if (skb2->nh.raw < skb2->data || skb2->nh.raw >= skb2->tail) {				if (net_ratelimit())					printk(KERN_DEBUG "protocol %04x is buggy, dev %s\n", skb2->protocol, dev->name);				skb2->nh.raw = skb2->data;				if (dev->hard_header)					skb2->nh.raw += dev->hard_header_len;			}			skb2->h.raw = skb2->nh.raw;			skb2->pkt_type = PACKET_OUTGOING;			ptype->func(skb2, skb->dev, ptype);		}	}	br_read_unlock(BR_NETPROTO_LOCK);}/** *	dev_queue_xmit - transmit a buffer *	@skb: buffer to transmit *	 *	Queue a buffer for transmission to a network device. The caller must *	have set the device and priority and built the buffer before calling this  *	function. The function can be called from an interrupt. * *	A negative errno code is returned on a failure. A success does not *	guarantee the frame will be transmitted as it may be dropped due *	to congestion or traffic shaping. */ int dev_queue_xmit(struct sk_buff *skb){	struct net_device *dev = skb->dev;	struct Qdisc  *q;	/* Grab device queue */	spin_lock_bh(&dev->queue_lock);	q = dev->qdisc;	if (q->enqueue) {		int ret = q->enqueue(skb, q);		qdisc_run(dev);		spin_unlock_bh(&dev->queue_lock);		return ret == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : ret;	}	/* The device has no queue. Common case for software devices:	   loopback, all the sorts of tunnels...	   Really, it is unlikely that xmit_lock protection is necessary here.	   (f.e. loopback and IP tunnels are clean ignoring statistics counters.)	   However, it is possible, that they rely on protection	   made by us here.	   Check this and shot the lock. It is not prone from deadlocks.	   Either shot noqueue qdisc, it is even simpler 8)	 */	if (dev->flags&IFF_UP) {		int cpu = smp_processor_id();		if (dev->xmit_lock_owner != cpu) {			spin_unlock(&dev->queue_lock);			spin_lock(&dev->xmit_lock);			dev->xmit_lock_owner = cpu;			if (!netif_queue_stopped(dev)) {				if (netdev_nit)					dev_queue_xmit_nit(skb,dev);				if (dev->hard_start_xmit(skb, dev) == 0) {					dev->xmit_lock_owner = -1;					spin_unlock_bh(&dev->xmit_lock);					return 0;				}			}			dev->xmit_lock_owner = -1;			spin_unlock_bh(&dev->xmit_lock);			if (net_ratelimit())				printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);			kfree_skb(skb);			return -ENETDOWN;		} else {			/* Recursion is detected! It is possible, unfortunately */			if (net_ratelimit())				printk(KERN_DEBUG "Dead loop on virtual device %s, fix it urgently!\n", dev->name);		}	}	spin_unlock_bh(&dev->queue_lock);	kfree_skb(skb);	return -ENETDOWN;}/*=======================================================================			Receiver routines  =======================================================================*/int netdev_max_backlog = 300;/* These numbers are selected based on intuition and some * experimentatiom, if you have more scientific way of doing this * please go ahead and fix things. */int no_cong_thresh = 10;int no_cong = 20;int lo_cong = 100;int mod_cong = 290;struct netif_rx_stats netdev_rx_stat[NR_CPUS];#ifdef CONFIG_NET_HW_FLOWCONTROLatomic_t netdev_dropping = ATOMIC_INIT(0);static unsigned long netdev_fc_mask = 1;unsigned long netdev_fc_xoff = 0;spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;static struct{	void (*stimul)(struct net_device *);	struct net_device *dev;} netdev_fc_slots[32];int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev)){	int bit = 0;	unsigned long flags;	spin_lock_irqsave(&netdev_fc_lock, flags);	if (netdev_fc_mask != ~0UL) {		bit = ffz(netdev_fc_mask);		netdev_fc_slots[bit].stimul = stimul;		netdev_fc_slots[bit].dev = dev;		set_bit(bit, &netdev_fc_mask);		clear_bit(bit, &netdev_fc_xoff);	}	spin_unlock_irqrestore(&netdev_fc_lock, flags);	return bit;}void netdev_unregister_fc(int bit){	unsigned long flags;	spin_lock_irqsave(&netdev_fc_lock, flags);	if (bit > 0) {		netdev_fc_slots[bit].stimul = NULL;		netdev_fc_slots[bit].dev = NULL;		clear_bit(bit, &netdev_fc_mask);		clear_bit(bit, &netdev_fc_xoff);	}	spin_unlock_irqrestore(&netdev_fc_lock, flags);}static void netdev_wakeup(void){	unsigned long xoff;	spin_lock(&netdev_fc_lock);	xoff = netdev_fc_xoff;	netdev_fc_xoff = 0;	while (xoff) {		int i = ffz(~xoff);		xoff &= ~(1<<i);		netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);	}	spin_unlock(&netdev_fc_lock);}#endifstatic void get_sample_stats(int cpu){#ifdef RAND_LIE	unsigned long rd;	int rq;#endif	int blog = softnet_data[cpu].input_pkt_queue.qlen;	int avg_blog = softnet_data[cpu].avg_blog;	avg_blog = (avg_blog >> 1)+ (blog >> 1);	if (avg_blog > mod_cong) {		/* Above moderate congestion levels. */		softnet_data[cpu].cng_level = NET_RX_CN_HIGH;#ifdef RAND_LIE		rd = net_random();		rq = rd % netdev_max_backlog;		if (rq < avg_blog) /* unlucky bastard */			softnet_data[cpu].cng_level = NET_RX_DROP;#endif	} else if (avg_blog > lo_cong) {		softnet_data[cpu].cng_level = NET_RX_CN_MOD;#ifdef RAND_LIE		rd = net_random();		rq = rd % netdev_max_backlog;			if (rq < avg_blog) /* unlucky bastard */				softnet_data[cpu].cng_level = NET_RX_CN_HIGH;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -