⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	return ret;}/** *	dev_close - shutdown an interface. *	@dev: device to shutdown * *	This function moves an active device into down state. A *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier *	chain. */int dev_close(struct net_device *dev){	might_sleep();	if (!(dev->flags & IFF_UP))		return 0;	/*	 *	Tell people we are going down, so that they can	 *	prepare to death, when device is still operating.	 */	call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);	dev_deactivate(dev);	clear_bit(__LINK_STATE_START, &dev->state);	/* Synchronize to scheduled poll. We cannot touch poll list,	 * it can be even on different cpu. So just clear netif_running().	 *	 * dev->stop() will invoke napi_disable() on all of it's	 * napi_struct instances on this device.	 */	smp_mb__after_clear_bit(); /* Commit netif_running(). */	/*	 *	Call the device specific close. This cannot fail.	 *	Only if device is UP	 *	 *	We allow it to be called even after a DETACH hot-plug	 *	event.	 */	if (dev->stop)		dev->stop(dev);	/*	 *	Device is now down.	 */	dev->flags &= ~IFF_UP;	/*	 * Tell people we are down	 */	call_netdevice_notifiers(NETDEV_DOWN, dev);	return 0;}static int dev_boot_phase = 1;/* *	Device change register/unregister. These are not inline or static *	as we export them to the world. *//** *	register_netdevice_notifier - register a network notifier block *	@nb: notifier * *	Register a notifier to be called when network device events occur. *	The notifier passed is linked into the kernel structures and must *	not be reused until it has been unregistered. A negative errno code *	is returned on a failure. * * 	When registered all registration and up events are replayed *	to the new notifier to allow device to have a race free *	view of the network device list. */int register_netdevice_notifier(struct notifier_block *nb){	struct net_device *dev;	struct net_device *last;	struct net *net;	int err;	rtnl_lock();	err = raw_notifier_chain_register(&netdev_chain, nb);	if (err)		goto unlock;	if (dev_boot_phase)		goto unlock;	for_each_net(net) {		for_each_netdev(net, dev) {			err = nb->notifier_call(nb, NETDEV_REGISTER, dev);			err = notifier_to_errno(err);			if (err)				goto rollback;			if (!(dev->flags & IFF_UP))				continue;			nb->notifier_call(nb, NETDEV_UP, dev);		}	}unlock:	rtnl_unlock();	return err;rollback:	last = dev;	for_each_net(net) {		for_each_netdev(net, dev) {			if (dev == last)				break;			if (dev->flags & IFF_UP) {				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);				nb->notifier_call(nb, NETDEV_DOWN, dev);			}			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);		}	}	raw_notifier_chain_unregister(&netdev_chain, nb);	goto unlock;}/** *	unregister_netdevice_notifier - unregister a network notifier block *	@nb: notifier * *	Unregister a notifier previously registered by *	register_netdevice_notifier(). The notifier is unlinked into the *	kernel structures and may then be reused. A negative errno code *	is returned on a failure. */int unregister_netdevice_notifier(struct notifier_block *nb){	int err;	rtnl_lock();	err = raw_notifier_chain_unregister(&netdev_chain, nb);	rtnl_unlock();	return err;}/** *	call_netdevice_notifiers - call all network notifier blocks *      @val: value passed unmodified to notifier function *      @dev: net_device pointer passed unmodified to notifier function * *	Call all network notifier blocks.  Parameters and return value *	are as for raw_notifier_call_chain(). */int call_netdevice_notifiers(unsigned long val, struct net_device *dev){	return raw_notifier_call_chain(&netdev_chain, val, dev);}/* When > 0 there are consumers of rx skb time stamps */static atomic_t netstamp_needed = ATOMIC_INIT(0);void net_enable_timestamp(void){	atomic_inc(&netstamp_needed);}void net_disable_timestamp(void){	atomic_dec(&netstamp_needed);}static inline void net_timestamp(struct sk_buff *skb){	if (atomic_read(&netstamp_needed))		__net_timestamp(skb);	else		skb->tstamp.tv64 = 0;}/* *	Support routine. Sends outgoing frames to any network *	taps currently in use. */static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev){	struct packet_type *ptype;	net_timestamp(skb);	rcu_read_lock();	list_for_each_entry_rcu(ptype, &ptype_all, list) {		/* Never send packets back to the socket		 * they originated from - MvS (miquels@drinkel.ow.org)		 */		if ((ptype->dev == dev || !ptype->dev) &&		    (ptype->af_packet_priv == NULL ||		     (struct sock *)ptype->af_packet_priv != skb->sk)) {			struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);			if (!skb2)				break;			/* skb->nh should be correctly			   set by sender, so that the second statement is			   just protection against buggy protocols.			 */			skb_reset_mac_header(skb2);			if (skb_network_header(skb2) < skb2->data ||			    skb2->network_header > skb2->tail) {				if (net_ratelimit())					printk(KERN_CRIT "protocol %04x is "					       "buggy, dev %s\n",					       skb2->protocol, dev->name);				skb_reset_network_header(skb2);			}			skb2->transport_header = skb2->network_header;			skb2->pkt_type = PACKET_OUTGOING;			ptype->func(skb2, skb->dev, ptype, skb->dev);		}	}	rcu_read_unlock();}void __netif_schedule(struct net_device *dev){	if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {		unsigned long flags;		struct softnet_data *sd;		local_irq_save(flags);		sd = &__get_cpu_var(softnet_data);		dev->next_sched = sd->output_queue;		sd->output_queue = dev;		raise_softirq_irqoff(NET_TX_SOFTIRQ);		local_irq_restore(flags);	}}EXPORT_SYMBOL(__netif_schedule);void dev_kfree_skb_irq(struct sk_buff *skb){	if (atomic_dec_and_test(&skb->users)) {		struct softnet_data *sd;		unsigned long flags;		local_irq_save(flags);		sd = &__get_cpu_var(softnet_data);		skb->next = sd->completion_queue;		sd->completion_queue = skb;		raise_softirq_irqoff(NET_TX_SOFTIRQ);		local_irq_restore(flags);	}}EXPORT_SYMBOL(dev_kfree_skb_irq);void dev_kfree_skb_any(struct sk_buff *skb){	if (in_irq() || irqs_disabled())		dev_kfree_skb_irq(skb);	else		dev_kfree_skb(skb);}EXPORT_SYMBOL(dev_kfree_skb_any);/** * netif_device_detach - mark device as removed * @dev: network device * * Mark device as removed from system and therefore no longer available. */void netif_device_detach(struct net_device *dev){	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&	    netif_running(dev)) {		netif_stop_queue(dev);	}}EXPORT_SYMBOL(netif_device_detach);/** * netif_device_attach - mark device as attached * @dev: network device * * Mark device as attached from system and restart if needed. */void netif_device_attach(struct net_device *dev){	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&	    netif_running(dev)) {		netif_wake_queue(dev);		__netdev_watchdog_up(dev);	}}EXPORT_SYMBOL(netif_device_attach);/* * Invalidate hardware checksum when packet is to be mangled, and * complete checksum manually on outgoing path. */int skb_checksum_help(struct sk_buff *skb){	__wsum csum;	int ret = 0, offset;	if (skb->ip_summed == CHECKSUM_COMPLETE)		goto out_set_summed;	if (unlikely(skb_shinfo(skb)->gso_size)) {		/* Let GSO fix up the checksum. */		goto out_set_summed;	}	offset = skb->csum_start - skb_headroom(skb);	BUG_ON(offset >= skb_headlen(skb));	csum = skb_checksum(skb, offset, skb->len - offset, 0);	offset += skb->csum_offset;	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));	if (skb_cloned(skb) &&	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);		if (ret)			goto out;	}	*(__sum16 *)(skb->data + offset) = csum_fold(csum);out_set_summed:	skb->ip_summed = CHECKSUM_NONE;out:	return ret;}/** *	skb_gso_segment - Perform segmentation on skb. *	@skb: buffer to segment *	@features: features for the output path (see dev->features) * *	This function segments the given skb and returns a list of segments. * *	It may return NULL if the skb requires no segmentation.  This is *	only possible when GSO is used for verifying header integrity. */struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features){	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);	struct packet_type *ptype;	__be16 type = skb->protocol;	int err;	BUG_ON(skb_shinfo(skb)->frag_list);	skb_reset_mac_header(skb);	skb->mac_len = skb->network_header - skb->mac_header;	__skb_pull(skb, skb->mac_len);	if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {		if (skb_header_cloned(skb) &&		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))			return ERR_PTR(err);	}	rcu_read_lock();	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {				err = ptype->gso_send_check(skb);				segs = ERR_PTR(err);				if (err || skb_gso_ok(skb, features))					break;				__skb_push(skb, (skb->data -						 skb_network_header(skb)));			}			segs = ptype->gso_segment(skb, features);			break;		}	}	rcu_read_unlock();	__skb_push(skb, skb->data - skb_mac_header(skb));	return segs;}EXPORT_SYMBOL(skb_gso_segment);/* Take action when hardware reception checksum errors are detected. */#ifdef CONFIG_BUGvoid netdev_rx_csum_fault(struct net_device *dev){	if (net_ratelimit()) {		printk(KERN_ERR "%s: hw csum failure.\n",			dev ? dev->name : "<unknown>");		dump_stack();	}}EXPORT_SYMBOL(netdev_rx_csum_fault);#endif/* Actually, we should eliminate this check as soon as we know, that: * 1. IOMMU is present and allows to map all the memory. * 2. No high memory really exists on this machine. */static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb){#ifdef CONFIG_HIGHMEM	int i;	if (dev->features & NETIF_F_HIGHDMA)		return 0;	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)		if (PageHighMem(skb_shinfo(skb)->frags[i].page))			return 1;#endif	return 0;}struct dev_gso_cb {	void (*destructor)(struct sk_buff *skb);};#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)static void dev_gso_skb_destructor(struct sk_buff *skb){	struct dev_gso_cb *cb;	do {		struct sk_buff *nskb = skb->next;		skb->next = nskb->next;		nskb->next = NULL;		kfree_skb(nskb);	} while (skb->next);	cb = DEV_GSO_CB(skb);	if (cb->destructor)		cb->destructor(skb);}/** *	dev_gso_segment - Perform emulated hardware segmentation on skb. *	@skb: buffer to segment * *	This function segments the given skb and stores the list of segments *	in skb->next. */static int dev_gso_segment(struct sk_buff *skb){	struct net_device *dev = skb->dev;	struct sk_buff *segs;	int features = dev->features & ~(illegal_highdma(dev, skb) ?					 NETIF_F_SG : 0);	segs = skb_gso_segment(skb, features);	/* Verifying header integrity only. */	if (!segs)		return 0;	if (unlikely(IS_ERR(segs)))		return PTR_ERR(segs);	skb->next = segs;	DEV_GSO_CB(skb)->destructor = skb->destructor;	skb->destructor = dev_gso_skb_destructor;	return 0;}int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev){	if (likely(!skb->next)) {		if (!list_empty(&ptype_all))			dev_queue_xmit_nit(skb, dev);		if (netif_needs_gso(dev, skb)) {			if (unlikely(dev_gso_segment(skb)))				goto out_kfree_skb;			if (skb->next)				goto gso;		}		return dev->hard_start_xmit(skb, dev);	}gso:	do {		struct sk_buff *nskb = skb->next;		int rc;		skb->next = nskb->next;		nskb->next = NULL;		rc = dev->hard_start_xmit(nskb, dev);		if (unlikely(rc)) {			nskb->next = skb->next;			skb->next = nskb;			return rc;		}		if (unlikely((netif_queue_stopped(dev) ||			     netif_subqueue_stopped(dev, skb)) &&			     skb->next))			return NETDEV_TX_BUSY;	} while (skb->next);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -