📄 netdevice.h
字号:
extern unsigned dev_get_flags(const struct net_device *);extern int dev_change_flags(struct net_device *, unsigned);extern int dev_change_name(struct net_device *, char *);extern int dev_set_mtu(struct net_device *, int);extern int dev_set_mac_address(struct net_device *, struct sockaddr *);extern int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);extern void dev_init(void);extern int netdev_budget;/* Called by rtnetlink.c:rtnl_unlock() */extern void netdev_run_todo(void);static inline void dev_put(struct net_device *dev){ atomic_dec(&dev->refcnt);}static inline void dev_hold(struct net_device *dev){ atomic_inc(&dev->refcnt);}/* Carrier loss detection, dial on demand. The functions netif_carrier_on * and _off may be called from IRQ context, but it is caller * who is responsible for serialization of these calls. * * The name carrier is inappropriate, these functions should really be * called netif_lowerlayer_*() because they represent the state of any * kind of lower layer not just hardware media. */extern void linkwatch_fire_event(struct net_device *dev);static inline int netif_carrier_ok(const struct net_device *dev){ return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);}extern void __netdev_watchdog_up(struct net_device *dev);extern void netif_carrier_on(struct net_device *dev);extern void netif_carrier_off(struct net_device *dev);static inline void netif_dormant_on(struct net_device *dev){ if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev);}static inline void netif_dormant_off(struct net_device *dev){ if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev);}static inline int netif_dormant(const struct net_device *dev){ return test_bit(__LINK_STATE_DORMANT, &dev->state);}static inline int netif_oper_up(const struct net_device *dev) { return (dev->operstate == IF_OPER_UP || dev->operstate == IF_OPER_UNKNOWN /* backward compat */);}/* Hot-plugging. */static inline int netif_device_present(struct net_device *dev){ return test_bit(__LINK_STATE_PRESENT, &dev->state);}extern void netif_device_detach(struct net_device *dev);extern void netif_device_attach(struct net_device *dev);/* * Network interface message level settings */#define HAVE_NETIF_MSG 1enum { NETIF_MSG_DRV = 0x0001, NETIF_MSG_PROBE = 0x0002, NETIF_MSG_LINK = 0x0004, NETIF_MSG_TIMER = 0x0008, NETIF_MSG_IFDOWN = 0x0010, NETIF_MSG_IFUP = 0x0020, NETIF_MSG_RX_ERR = 0x0040, NETIF_MSG_TX_ERR = 0x0080, NETIF_MSG_TX_QUEUED = 0x0100, NETIF_MSG_INTR = 0x0200, NETIF_MSG_TX_DONE = 0x0400, NETIF_MSG_RX_STATUS = 0x0800, NETIF_MSG_PKTDATA = 0x1000, NETIF_MSG_HW = 0x2000, NETIF_MSG_WOL = 0x4000,};#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits){ /* use default */ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) return default_msg_enable_bits; if (debug_value == 0) /* no output */ return 0; /* set low N bits */ return (1 << debug_value) - 1;}/* Test if receive needs to be scheduled */static inline int __netif_rx_schedule_prep(struct net_device *dev){ return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);}/* Test if receive needs to be scheduled but only if up */static inline int netif_rx_schedule_prep(struct net_device *dev){ return netif_running(dev) && __netif_rx_schedule_prep(dev);}/* Add interface to tail of rx poll list. This assumes that _prep has * already been called and returned 1. */extern void __netif_rx_schedule(struct net_device *dev);/* Try to reschedule poll. Called by irq handler. */static inline void netif_rx_schedule(struct net_device *dev){ if (netif_rx_schedule_prep(dev)) __netif_rx_schedule(dev);}/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). * Do not inline this? */static inline int netif_rx_reschedule(struct net_device *dev, int undo){ if (netif_rx_schedule_prep(dev)) { unsigned long flags; dev->quota += undo; local_irq_save(flags); list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); __raise_softirq_irqoff(NET_RX_SOFTIRQ); local_irq_restore(flags); return 1; } return 0;}/* Remove interface from poll list: it must be in the poll list * on current cpu. This primitive is called by dev->poll(), when * it completes the work. The device cannot be out of poll list at this * moment, it is BUG(). */static inline void netif_rx_complete(struct net_device *dev){ unsigned long flags; local_irq_save(flags); BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); list_del(&dev->poll_list); smp_mb__before_clear_bit(); clear_bit(__LINK_STATE_RX_SCHED, &dev->state); local_irq_restore(flags);}static inline void netif_poll_disable(struct net_device *dev){ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) /* No hurry. */ schedule_timeout_interruptible(1);}static inline void netif_poll_enable(struct net_device *dev){ smp_mb__before_clear_bit(); clear_bit(__LINK_STATE_RX_SCHED, &dev->state);}/* same as netif_rx_complete, except that local_irq_save(flags) * has already been issued */static inline void __netif_rx_complete(struct net_device *dev){ BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); list_del(&dev->poll_list); smp_mb__before_clear_bit(); clear_bit(__LINK_STATE_RX_SCHED, &dev->state);}static inline void netif_tx_lock(struct net_device *dev){ spin_lock(&dev->_xmit_lock); dev->xmit_lock_owner = smp_processor_id();}static inline void netif_tx_lock_bh(struct net_device *dev){ spin_lock_bh(&dev->_xmit_lock); dev->xmit_lock_owner = smp_processor_id();}static inline int netif_tx_trylock(struct net_device *dev){ int ok = spin_trylock(&dev->_xmit_lock); if (likely(ok)) dev->xmit_lock_owner = smp_processor_id(); return ok;}static inline void netif_tx_unlock(struct net_device *dev){ dev->xmit_lock_owner = -1; spin_unlock(&dev->_xmit_lock);}static inline void netif_tx_unlock_bh(struct net_device *dev){ dev->xmit_lock_owner = -1; spin_unlock_bh(&dev->_xmit_lock);}static inline void netif_tx_disable(struct net_device *dev){ netif_tx_lock_bh(dev); netif_stop_queue(dev); netif_tx_unlock_bh(dev);}/* These functions live elsewhere (drivers/net/net_init.c, but related) */extern void ether_setup(struct net_device *dev);/* Support for loadable net-drivers */extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, void (*setup)(struct net_device *));extern int register_netdev(struct net_device *dev);extern void unregister_netdev(struct net_device *dev);/* Functions used for multicast support */extern void dev_mc_upload(struct net_device *dev);extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);extern void dev_mc_discard(struct net_device *dev);extern void dev_set_promiscuity(struct net_device *dev, int inc);extern void dev_set_allmulti(struct net_device *dev, int inc);extern void netdev_state_change(struct net_device *dev);extern void netdev_features_change(struct net_device *dev);/* Load a device via the kmod */extern void dev_load(const char *name);extern void dev_mcast_init(void);extern int netdev_max_backlog;extern int weight_p;extern int netdev_set_master(struct net_device *dev, struct net_device *master);extern int skb_checksum_help(struct sk_buff *skb);extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);#ifdef CONFIG_BUGextern void netdev_rx_csum_fault(struct net_device *dev);#elsestatic inline void netdev_rx_csum_fault(struct net_device *dev){}#endif/* rx skb timestamps */extern void net_enable_timestamp(void);extern void net_disable_timestamp(void);#ifdef CONFIG_PROC_FSextern void *dev_seq_start(struct seq_file *seq, loff_t *pos);extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);extern void dev_seq_stop(struct seq_file *seq, void *v);#endifextern void linkwatch_run_queue(void);static inline int net_gso_ok(int features, int gso_type){ int feature = gso_type << NETIF_F_GSO_SHIFT; return (features & feature) == feature;}static inline int skb_gso_ok(struct sk_buff *skb, int features){ return net_gso_ok(features, skb_shinfo(skb)->gso_type);}static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb){ return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL));}/* On bonding slaves other than the currently active slave, suppress * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and * ARP on active-backup slaves with arp_validate enabled. */static inline int skb_bond_should_drop(struct sk_buff *skb){ struct net_device *dev = skb->dev; struct net_device *master = dev->master; if (master && (dev->priv_flags & IFF_SLAVE_INACTIVE)) { if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && skb->protocol == __constant_htons(ETH_P_ARP)) return 0; if (master->priv_flags & IFF_MASTER_ALB) { if (skb->pkt_type != PACKET_BROADCAST && skb->pkt_type != PACKET_MULTICAST) return 0; } if (master->priv_flags & IFF_MASTER_8023AD && skb->protocol == __constant_htons(ETH_P_SLOW)) return 0; return 1; } return 0;}#endif /* __KERNEL__ */#endif /* _LINUX_DEV_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -