netdevice.h
来自「linux 内核源代码」· C头文件 代码 · 共 1,506 行 · 第 1/3 页
H
1,506 行
#ifdef CONFIG_NETDEVICES_MULTIQUEUE#ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return;#endif set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);#endif}/** * netif_subqueue_stopped - test status of subqueue * @dev: network device * @queue_index: sub queue index * * Check individual transmit queue of a device with multiple transmit queues. */static inline int __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index){#ifdef CONFIG_NETDEVICES_MULTIQUEUE return test_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);#else return 0;#endif}static inline int netif_subqueue_stopped(const struct net_device *dev, struct sk_buff *skb){ return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));}/** * netif_wake_subqueue - allow sending packets on subqueue * @dev: network device * @queue_index: sub queue index * * Resume individual transmit queue of a device with multiple transmit queues. */static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index){#ifdef CONFIG_NETDEVICES_MULTIQUEUE#ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return;#endif if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state)) __netif_schedule(dev);#endif}/** * netif_is_multiqueue - test if device has multiple transmit queues * @dev: network device * * Check if device has multiple transmit queues * Always falls if NETDEVICE_MULTIQUEUE is not configured */static inline int netif_is_multiqueue(const struct net_device *dev){#ifdef CONFIG_NETDEVICES_MULTIQUEUE return (!!(NETIF_F_MULTI_QUEUE & dev->features));#else return 0;#endif}/* Use this variant when it is known for sure that it * is executing from interrupt context. */extern void dev_kfree_skb_irq(struct sk_buff *skb);/* Use this variant in places where it could be invoked * either from interrupt or non-interrupt context. */extern void dev_kfree_skb_any(struct sk_buff *skb);#define HAVE_NETIF_RX 1extern int netif_rx(struct sk_buff *skb);extern int netif_rx_ni(struct sk_buff *skb);#define HAVE_NETIF_RECEIVE_SKB 1extern int netif_receive_skb(struct sk_buff *skb);extern int dev_valid_name(const char *name);extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);extern int dev_ethtool(struct net *net, struct ifreq *);extern unsigned dev_get_flags(const struct net_device *);extern int dev_change_flags(struct net_device *, unsigned);extern int dev_change_name(struct net_device *, char *);extern int dev_change_net_namespace(struct net_device *, struct net *, const char *);extern int dev_set_mtu(struct net_device *, int);extern int dev_set_mac_address(struct net_device *, struct sockaddr *);extern int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);extern int netdev_budget;/* Called by rtnetlink.c:rtnl_unlock() */extern void netdev_run_todo(void);/** * dev_put - release reference to device * @dev: network device * * Release reference to device to allow it to be freed. */static inline void dev_put(struct net_device *dev){ atomic_dec(&dev->refcnt);}/** * dev_hold - get reference to device * @dev: network device * * Hold reference to device to keep it from being freed. */static inline void dev_hold(struct net_device *dev){ atomic_inc(&dev->refcnt);}/* Carrier loss detection, dial on demand. The functions netif_carrier_on * and _off may be called from IRQ context, but it is caller * who is responsible for serialization of these calls. * * The name carrier is inappropriate, these functions should really be * called netif_lowerlayer_*() because they represent the state of any * kind of lower layer not just hardware media. */extern void linkwatch_fire_event(struct net_device *dev);/** * netif_carrier_ok - test if carrier present * @dev: network device * * Check if carrier is present on device */static inline int netif_carrier_ok(const struct net_device *dev){ return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);}extern void __netdev_watchdog_up(struct net_device *dev);extern void netif_carrier_on(struct net_device *dev);extern void netif_carrier_off(struct net_device *dev);/** * netif_dormant_on - mark device as dormant. * @dev: network device * * Mark device as dormant (as per RFC2863). * * The dormant state indicates that the relevant interface is not * actually in a condition to pass packets (i.e., it is not 'up') but is * in a "pending" state, waiting for some external event. For "on- * demand" interfaces, this new state identifies the situation where the * interface is waiting for events to place it in the up state. * */static inline void netif_dormant_on(struct net_device *dev){ if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev);}/** * netif_dormant_off - set device as not dormant. * @dev: network device * * Device is not in dormant state. */static inline void netif_dormant_off(struct net_device *dev){ if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev);}/** * netif_dormant - test if carrier present * @dev: network device * * Check if carrier is present on device */static inline int netif_dormant(const struct net_device *dev){ return test_bit(__LINK_STATE_DORMANT, &dev->state);}/** * netif_oper_up - test if device is operational * @dev: network device * * Check if carrier is operational */static inline int netif_oper_up(const struct net_device *dev) { return (dev->operstate == IF_OPER_UP || dev->operstate == IF_OPER_UNKNOWN /* backward compat */);}/** * netif_device_present - is device available or removed * @dev: network device * * Check if device has not been removed from system. */static inline int netif_device_present(struct net_device *dev){ return test_bit(__LINK_STATE_PRESENT, &dev->state);}extern void netif_device_detach(struct net_device *dev);extern void netif_device_attach(struct net_device *dev);/* * Network interface message level settings */#define HAVE_NETIF_MSG 1enum { NETIF_MSG_DRV = 0x0001, NETIF_MSG_PROBE = 0x0002, NETIF_MSG_LINK = 0x0004, NETIF_MSG_TIMER = 0x0008, NETIF_MSG_IFDOWN = 0x0010, NETIF_MSG_IFUP = 0x0020, NETIF_MSG_RX_ERR = 0x0040, NETIF_MSG_TX_ERR = 0x0080, NETIF_MSG_TX_QUEUED = 0x0100, NETIF_MSG_INTR = 0x0200, NETIF_MSG_TX_DONE = 0x0400, NETIF_MSG_RX_STATUS = 0x0800, NETIF_MSG_PKTDATA = 0x1000, NETIF_MSG_HW = 0x2000, NETIF_MSG_WOL = 0x4000,};#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits){ /* use default */ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) return default_msg_enable_bits; if (debug_value == 0) /* no output */ return 0; /* set low N bits */ return (1 << debug_value) - 1;}/* Test if receive needs to be scheduled but only if up */static inline int netif_rx_schedule_prep(struct net_device *dev, struct napi_struct *napi){ return napi_schedule_prep(napi);}/* Add interface to tail of rx poll list. This assumes that _prep has * already been called and returned 1. */static inline void __netif_rx_schedule(struct net_device *dev, struct napi_struct *napi){ __napi_schedule(napi);}/* Try to reschedule poll. Called by irq handler. */static inline void netif_rx_schedule(struct net_device *dev, struct napi_struct *napi){ if (netif_rx_schedule_prep(dev, napi)) __netif_rx_schedule(dev, napi);}/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */static inline int netif_rx_reschedule(struct net_device *dev, struct napi_struct *napi){ if (napi_schedule_prep(napi)) { __netif_rx_schedule(dev, napi); return 1; } return 0;}/* same as netif_rx_complete, except that local_irq_save(flags) * has already been issued */static inline void __netif_rx_complete(struct net_device *dev, struct napi_struct *napi){ __napi_complete(napi);}/* Remove interface from poll list: it must be in the poll list * on current cpu. This primitive is called by dev->poll(), when * it completes the work. The device cannot be out of poll list at this * moment, it is BUG(). */static inline void netif_rx_complete(struct net_device *dev, struct napi_struct *napi){ unsigned long flags; local_irq_save(flags); __netif_rx_complete(dev, napi); local_irq_restore(flags);}/** * netif_tx_lock - grab network device transmit lock * @dev: network device * @cpu: cpu number of lock owner * * Get network device transmit lock */static inline void __netif_tx_lock(struct net_device *dev, int cpu){ spin_lock(&dev->_xmit_lock); dev->xmit_lock_owner = cpu;}static inline void netif_tx_lock(struct net_device *dev){ __netif_tx_lock(dev, smp_processor_id());}static inline void netif_tx_lock_bh(struct net_device *dev){ spin_lock_bh(&dev->_xmit_lock); dev->xmit_lock_owner = smp_processor_id();}static inline int netif_tx_trylock(struct net_device *dev){ int ok = spin_trylock(&dev->_xmit_lock); if (likely(ok)) dev->xmit_lock_owner = smp_processor_id(); return ok;}static inline void netif_tx_unlock(struct net_device *dev){ dev->xmit_lock_owner = -1; spin_unlock(&dev->_xmit_lock);}static inline void netif_tx_unlock_bh(struct net_device *dev){ dev->xmit_lock_owner = -1; spin_unlock_bh(&dev->_xmit_lock);}#define HARD_TX_LOCK(dev, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_lock(dev, cpu); \ } \}#define HARD_TX_UNLOCK(dev) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ netif_tx_unlock(dev); \ } \}static inline void netif_tx_disable(struct net_device *dev){ netif_tx_lock_bh(dev); netif_stop_queue(dev); netif_tx_unlock_bh(dev);}/* These functions live elsewhere (drivers/net/net_init.c, but related) */extern void ether_setup(struct net_device *dev);/* Support for loadable net-drivers */extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, void (*setup)(struct net_device *), unsigned int queue_count);#define alloc_netdev(sizeof_priv, name, setup) \ alloc_netdev_mq(sizeof_priv, name, setup, 1)extern int register_netdev(struct net_device *dev);extern void unregister_netdev(struct net_device *dev);/* Functions used for secondary unicast and multicast support */extern void dev_set_rx_mode(struct net_device *dev);extern void __dev_set_rx_mode(struct net_device *dev);extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);extern int dev_mc_sync(struct net_device *to, struct net_device *from);extern void dev_mc_unsync(struct net_device *to, struct net_device *from);extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);extern void dev_set_promiscuity(struct net_device *dev, int inc);extern void dev_set_allmulti(struct net_device *dev, int inc);extern void netdev_state_change(struct net_device *dev);extern void netdev_features_change(struct net_device *dev);/* Load a device via the kmod */extern void dev_load(struct net *net, const char *name);extern void dev_mcast_init(void);extern int netdev_max_backlog;extern int weight_p;extern int netdev_set_master(struct net_device *dev, struct net_device *master);extern int skb_checksum_help(struct sk_buff *skb);extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);#ifdef CONFIG_BUGextern void netdev_rx_csum_fault(struct net_device *dev);#elsestatic inline void netdev_rx_csum_fault(struct net_device *dev){}#endif/* rx skb timestamps */extern void net_enable_timestamp(void);extern void net_disable_timestamp(void);#ifdef CONFIG_PROC_FSextern void *dev_seq_start(struct seq_file *seq, loff_t *pos);extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);extern void dev_seq_stop(struct seq_file *seq, void *v);#endifextern void linkwatch_run_queue(void);extern int netdev_compute_features(unsigned long all, unsigned long one);static inline int net_gso_ok(int features, int gso_type){ int feature = gso_type << NETIF_F_GSO_SHIFT; return (features & feature) == feature;}static inline int skb_gso_ok(struct sk_buff *skb, int features){ return net_gso_ok(features, skb_shinfo(skb)->gso_type);}static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb){ return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL));}/* On bonding slaves other than the currently active slave, suppress * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and * ARP on active-backup slaves with arp_validate enabled. */static inline int skb_bond_should_drop(struct sk_buff *skb){ struct net_device *dev = skb->dev; struct net_device *master = dev->master; if (master && (dev->priv_flags & IFF_SLAVE_INACTIVE)) { if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && skb->protocol == __constant_htons(ETH_P_ARP)) return 0; if (master->priv_flags & IFF_MASTER_ALB) { if (skb->pkt_type != PACKET_BROADCAST && skb->pkt_type != PACKET_MULTICAST) return 0; } if (master->priv_flags & IFF_MASTER_8023AD && skb->protocol == __constant_htons(ETH_P_SLOW)) return 0; return 1; } return 0;}#endif /* __KERNEL__ */#endif /* _LINUX_DEV_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?