📄 netdevice.h
字号:
struct vlan_group *grp); void (*vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); void (*vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); int (*hard_header_parse)(struct sk_buff *skb, unsigned char *haddr); int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); int (*accept_fastpath)(struct net_device *, struct dst_entry*); /* open/release and usage marking */ struct module *owner; /* bridge stuff */ struct net_bridge_port *br_port;#ifdef CONFIG_NET_FASTROUTE#define NETDEV_FASTROUTE_HMASK 0xF /* Semi-private data. Keep it at the end of device struct. */ rwlock_t fastpath_lock; struct dst_entry *fastpath[NETDEV_FASTROUTE_HMASK+1];#endif#ifdef CONFIG_NET_DIVERT /* this will get initialized at each interface type init routine */ struct divert_blk *divert;#endif /* CONFIG_NET_DIVERT */};struct packet_type { unsigned short type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ int (*func) (struct sk_buff *, struct net_device *, struct packet_type *); void *data; /* Private to the packet type */ struct packet_type *next;};#include <linux/interrupt.h>#include <linux/notifier.h>extern struct net_device loopback_dev; /* The loopback */extern struct net_device *dev_base; /* All devices */extern rwlock_t dev_base_lock; /* Device list lock */extern int netdev_boot_setup_add(char *name, struct ifmap *map);extern int netdev_boot_setup_check(struct net_device *dev);extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr);extern void dev_add_pack(struct packet_type *pt);extern void dev_remove_pack(struct packet_type *pt);extern int dev_get(const char *name);extern struct net_device *dev_get_by_name(const char *name);extern struct net_device *__dev_get_by_name(const char *name);extern struct net_device *dev_alloc(const char *name, int *err);extern int dev_alloc_name(struct net_device *dev, const char *name);extern int dev_open(struct net_device *dev);extern int dev_close(struct net_device *dev);extern int dev_queue_xmit(struct sk_buff *skb);extern int register_netdevice(struct net_device *dev);extern int unregister_netdevice(struct net_device *dev);extern int register_netdevice_notifier(struct notifier_block *nb);extern int unregister_netdevice_notifier(struct notifier_block *nb);extern int dev_new_index(void);extern struct net_device *dev_get_by_index(int ifindex);extern struct net_device *__dev_get_by_index(int ifindex);extern int dev_restart(struct net_device *dev);typedef int gifconf_func_t(struct net_device * dev, char * bufptr, int len);extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);static inline int unregister_gifconf(unsigned int family){ return register_gifconf(family, 0);}/* * Incoming packets are placed on per-cpu queues so that * no locking is needed. */struct softnet_data{ int throttle; int cng_level; int avg_blog; struct sk_buff_head input_pkt_queue; struct list_head poll_list; struct net_device *output_queue; struct sk_buff *completion_queue; struct net_device blog_dev; /* Sorry. 8) */} ____cacheline_aligned;extern struct softnet_data softnet_data[NR_CPUS];#define HAVE_NETIF_QUEUEstatic inline void __netif_schedule(struct net_device *dev){ if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { unsigned long flags; int cpu = smp_processor_id(); local_irq_save(flags); dev->next_sched = softnet_data[cpu].output_queue; softnet_data[cpu].output_queue = dev; cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); local_irq_restore(flags); }}static inline void netif_schedule(struct net_device *dev){ if (!test_bit(__LINK_STATE_XOFF, &dev->state)) __netif_schedule(dev);}static inline void netif_start_queue(struct net_device *dev){ clear_bit(__LINK_STATE_XOFF, &dev->state);}static inline void netif_wake_queue(struct net_device *dev){ if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) __netif_schedule(dev);}static inline void netif_stop_queue(struct net_device *dev){ set_bit(__LINK_STATE_XOFF, &dev->state);}static inline int netif_queue_stopped(struct net_device *dev){ return test_bit(__LINK_STATE_XOFF, &dev->state);}static inline int netif_running(struct net_device *dev){ return test_bit(__LINK_STATE_START, &dev->state);}/* Use this variant when it is known for sure that it * is executing from interrupt context. */static inline void dev_kfree_skb_irq(struct sk_buff *skb){ if (atomic_dec_and_test(&skb->users)) { int cpu =smp_processor_id(); unsigned long flags; local_irq_save(flags); skb->next = softnet_data[cpu].completion_queue; softnet_data[cpu].completion_queue = skb; cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); local_irq_restore(flags); }}/* Use this variant in places where it could be invoked * either from interrupt or non-interrupt context. */static inline void dev_kfree_skb_any(struct sk_buff *skb){ if (in_irq()) dev_kfree_skb_irq(skb); else dev_kfree_skb(skb);}#define HAVE_NETIF_RX 1extern int netif_rx(struct sk_buff *skb);#define HAVE_NETIF_RECEIVE_SKB 1extern int netif_receive_skb(struct sk_buff *skb);extern int dev_ioctl(unsigned int cmd, void *);extern int dev_change_flags(struct net_device *, unsigned);extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);extern void dev_init(void);extern int netdev_nit;/* Post buffer to the network code from _non interrupt_ context. * see net/core/dev.c for netif_rx description. */static inline int netif_rx_ni(struct sk_buff *skb){ int err = netif_rx(skb); if (softirq_pending(smp_processor_id())) do_softirq(); return err;}static inline void dev_init_buffers(struct net_device *dev){ /* WILL BE REMOVED IN 2.5.0 */}extern int netdev_finish_unregister(struct net_device *dev);static inline void dev_put(struct net_device *dev){ if (atomic_dec_and_test(&dev->refcnt)) netdev_finish_unregister(dev);}#define __dev_put(dev) atomic_dec(&(dev)->refcnt)#define dev_hold(dev) atomic_inc(&(dev)->refcnt)/* Carrier loss detection, dial on demand. The functions netif_carrier_on * and _off may be called from IRQ context, but it is caller * who is responsible for serialization of these calls. */static inline int netif_carrier_ok(struct net_device *dev){ return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);}extern void __netdev_watchdog_up(struct net_device *dev);static inline void netif_carrier_on(struct net_device *dev){ clear_bit(__LINK_STATE_NOCARRIER, &dev->state); if (netif_running(dev)) __netdev_watchdog_up(dev);}static inline void netif_carrier_off(struct net_device *dev){ set_bit(__LINK_STATE_NOCARRIER, &dev->state);}/* Hot-plugging. */static inline int netif_device_present(struct net_device *dev){ return test_bit(__LINK_STATE_PRESENT, &dev->state);}static inline void netif_device_detach(struct net_device *dev){ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_stop_queue(dev); }}static inline void netif_device_attach(struct net_device *dev){ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_wake_queue(dev); __netdev_watchdog_up(dev); }}/* * Network interface message level settings */#define HAVE_NETIF_MSG 1enum { NETIF_MSG_DRV = 0x0001, NETIF_MSG_PROBE = 0x0002, NETIF_MSG_LINK = 0x0004, NETIF_MSG_TIMER = 0x0008, NETIF_MSG_IFDOWN = 0x0010, NETIF_MSG_IFUP = 0x0020, NETIF_MSG_RX_ERR = 0x0040, NETIF_MSG_TX_ERR = 0x0080, NETIF_MSG_TX_QUEUED = 0x0100, NETIF_MSG_INTR = 0x0200, NETIF_MSG_TX_DONE = 0x0400, NETIF_MSG_RX_STATUS = 0x0800, NETIF_MSG_PKTDATA = 0x1000, NETIF_MSG_HW = 0x2000, NETIF_MSG_WOL = 0x4000,};#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)/* Schedule rx intr now? */static inline int netif_rx_schedule_prep(struct net_device *dev){ return netif_running(dev) && !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);}/* Add interface to tail of rx poll list. This assumes that _prep has * already been called and returned 1. */static inline void __netif_rx_schedule(struct net_device *dev){ unsigned long flags; int cpu = smp_processor_id(); local_irq_save(flags); dev_hold(dev); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); if (dev->quota < 0) dev->quota += dev->weight; else dev->quota = dev->weight; __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); local_irq_restore(flags);}/* Try to reschedule poll. Called by irq handler. */static inline void netif_rx_schedule(struct net_device *dev){ if (netif_rx_schedule_prep(dev)) __netif_rx_schedule(dev);}/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). * Do not inline this? */static inline int netif_rx_reschedule(struct net_device *dev, int undo){ if (netif_rx_schedule_prep(dev)) { unsigned long flags; int cpu = smp_processor_id(); dev->quota += undo; local_irq_save(flags); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); local_irq_restore(flags); return 1; } return 0;}/* Remove interface from poll list: it must be in the poll list * on current cpu. This primitive is called by dev->poll(), when * it completes the work. The device cannot be out of poll list at this * moment, it is BUG(). */static inline void netif_rx_complete(struct net_device *dev){ unsigned long flags; local_irq_save(flags); if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG(); list_del(&dev->poll_list); clear_bit(__LINK_STATE_RX_SCHED, &dev->state); local_irq_restore(flags);}/* These functions live elsewhere (drivers/net/net_init.c, but related) */extern void ether_setup(struct net_device *dev);extern void fddi_setup(struct net_device *dev);extern void tr_setup(struct net_device *dev);extern void fc_setup(struct net_device *dev);extern void fc_freedev(struct net_device *dev);/* Support for loadable net-drivers */extern int register_netdev(struct net_device *dev);extern void unregister_netdev(struct net_device *dev);/* Functions used for multicast support */extern void dev_mc_upload(struct net_device *dev);extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);extern void dev_mc_discard(struct net_device *dev);extern void dev_set_promiscuity(struct net_device *dev, int inc);extern void dev_set_allmulti(struct net_device *dev, int inc);extern void netdev_state_change(struct net_device *dev);/* Load a device via the kmod */extern void dev_load(const char *name);extern void dev_mcast_init(void);extern int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev));extern void netdev_unregister_fc(int bit);extern int netdev_max_backlog;extern int weight_p;extern unsigned long netdev_fc_xoff;extern atomic_t netdev_dropping;extern int netdev_set_master(struct net_device *dev, struct net_device *master);extern struct sk_buff * skb_checksum_help(struct sk_buff *skb);#ifdef CONFIG_NET_FASTROUTEextern int netdev_fastroute;extern int netdev_fastroute_obstacles;extern void dev_clear_fastroute(struct net_device *dev);#endif#endif /* __KERNEL__ */#endif /* _LINUX_DEV_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -