netdevice.h

来自「linux 内核源代码」· C头文件 代码 · 共 1,506 行 · 第 1/3 页

H
1,506
字号
#define NETIF_F_LLTX		4096	/* LockLess TX - deprecated. Please */					/* do not use LLTX in new drivers */#define NETIF_F_NETNS_LOCAL	8192	/* Does not change network namespaces */#define NETIF_F_MULTI_QUEUE	16384	/* Has multiple TX/RX queues */#define NETIF_F_LRO		32768	/* large receive offload */	/* Segmentation offload features */#define NETIF_F_GSO_SHIFT	16#define NETIF_F_GSO_MASK	0xffff0000#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)#define NETIF_F_UFO		(SKB_GSO_UDP << NETIF_F_GSO_SHIFT)#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)#define NETIF_F_TSO_ECN		(SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)#define NETIF_F_TSO6		(SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)	/* List of features with software fallbacks. */#define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)#define NETIF_F_V4_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)#define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)#define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)	struct net_device	*next_sched;	/* Interface index. Unique device identifier	*/	int			ifindex;	int			iflink;	struct net_device_stats* (*get_stats)(struct net_device *dev);	struct net_device_stats	stats;#ifdef CONFIG_WIRELESS_EXT	/* List of functions to handle Wireless Extensions (instead of ioctl).	 * See <net/iw_handler.h> for details. Jean II */	const struct iw_handler_def *	wireless_handlers;	/* Instance data managed by the core of Wireless Extensions. */	struct iw_public_data *	wireless_data;#endif	const struct ethtool_ops *ethtool_ops;	/* Hardware header description */	const struct header_ops *header_ops;	/*	 * This marks the end of the "visible" part of the structure. All	 * fields hereafter are internal to the system, and may change at	 * will (read: may be cleaned up at will).	 */	unsigned int		flags;	/* interface flags (a la BSD)	*/	unsigned short		gflags;        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */	unsigned short		padded;	/* How much padding added by alloc_netdev() */	unsigned char		operstate; /* RFC2863 operstate */	unsigned char		link_mode; /* mapping policy to operstate */	unsigned		mtu;	/* interface MTU value		*/	unsigned short		type;	/* interface hardware type	*/	unsigned short		hard_header_len;	/* hardware hdr length	*/	struct net_device	*master; /* Pointer to master device of a group,					  * which this device is member of.					  */	/* Interface address info. */	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */	unsigned char		addr_len;	/* hardware address length	*/	unsigned short          dev_id;		/* for shared network cards */	struct dev_addr_list	*uc_list;	/* Secondary unicast mac addresses */	int			uc_count;	/* Number of installed ucasts	*/	int			uc_promisc;	struct dev_addr_list	*mc_list;	/* Multicast mac addresses	*/	int			mc_count;	/* Number of installed mcasts	*/	int			promiscuity;	int			allmulti;	/* Protocol specific pointers */		void 			*atalk_ptr;	/* AppleTalk link 	*/	void			*ip_ptr;	/* IPv4 specific data	*/  	void                    *dn_ptr;        /* DECnet specific data */	void                    *ip6_ptr;       /* IPv6 specific data */	void			*ec_ptr;	/* Econet specific data	*/	void			*ax25_ptr;	/* AX.25 specific data */	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data,						   assign before registering *//* * Cache line mostly used on receive path (including eth_type_trans()) */	unsigned long		last_rx;	/* Time of last Rx	*/	/* Interface address info used in eth_type_trans() */	unsigned char		dev_addr[MAX_ADDR_LEN];	/* hw address, (before bcast 							because most packets are unicast) */	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*//* * Cache line mostly used on queue transmit path (qdisc) */	/* device queue lock */	spinlock_t		queue_lock ____cacheline_aligned_in_smp;	struct Qdisc		*qdisc;	struct Qdisc		*qdisc_sleeping;	struct list_head	qdisc_list;	unsigned long		tx_queue_len;	/* Max frames per queue allowed */	/* Partially transmitted GSO packet. */	struct sk_buff		*gso_skb;	/* ingress path synchronizer */	spinlock_t		ingress_lock;	struct Qdisc		*qdisc_ingress;/* * One part is mostly used on xmit path (device) */	/* hard_start_xmit synchronizer */	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;	/* cpu id of processor entered to hard_start_xmit or -1,	   if nobody entered there.	 */	int			xmit_lock_owner;	void			*priv;	/* pointer to private data	*/	int			(*hard_start_xmit) (struct sk_buff *skb,						    struct net_device *dev);	/* These may be needed for future network-power-down code. */	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/	int			watchdog_timeo; /* used by dev_watchdog() */	struct timer_list	watchdog_timer;/* * refcnt is a very hot point, so align it on SMP */	/* Number of references to this device */	atomic_t		refcnt ____cacheline_aligned_in_smp;	/* delayed register/unregister */	struct list_head	todo_list;	/* device index hash chain */	struct hlist_node	index_hlist;	struct net_device	*link_watch_next;	/* register/unregister state machine */	enum { NETREG_UNINITIALIZED=0,	       NETREG_REGISTERED,	/* completed register_netdevice */	       NETREG_UNREGISTERING,	/* called unregister_netdevice */	       NETREG_UNREGISTERED,	/* completed unregister todo */	       NETREG_RELEASED,		/* called free_netdev */	} reg_state;	/* Called after device is detached from network. */	void			(*uninit)(struct net_device *dev);	/* Called after last user reference disappears. */	void			(*destructor)(struct net_device *dev);	/* Pointers to interface service routines.	*/	int			(*open)(struct net_device *dev);	int			(*stop)(struct net_device *dev);#define HAVE_NETDEV_POLL#define HAVE_CHANGE_RX_FLAGS	void			(*change_rx_flags)(struct net_device *dev,						   int flags);#define HAVE_SET_RX_MODE	void			(*set_rx_mode)(struct net_device *dev);#define HAVE_MULTICAST			 	void			(*set_multicast_list)(struct net_device *dev);#define HAVE_SET_MAC_ADDR  		 	int			(*set_mac_address)(struct net_device *dev,						   void *addr);#define HAVE_VALIDATE_ADDR	int			(*validate_addr)(struct net_device *dev);#define HAVE_PRIVATE_IOCTL	int			(*do_ioctl)(struct net_device *dev,					    struct ifreq *ifr, int cmd);#define HAVE_SET_CONFIG	int			(*set_config)(struct net_device *dev,					      struct ifmap *map);#define HAVE_CHANGE_MTU	int			(*change_mtu)(struct net_device *dev, int new_mtu);#define HAVE_TX_TIMEOUT	void			(*tx_timeout) (struct net_device *dev);	void			(*vlan_rx_register)(struct net_device *dev,						    struct vlan_group *grp);	void			(*vlan_rx_add_vid)(struct net_device *dev,						   unsigned short vid);	void			(*vlan_rx_kill_vid)(struct net_device *dev,						    unsigned short vid);	int			(*neigh_setup)(struct net_device *dev, struct neigh_parms *);#ifdef CONFIG_NETPOLL	struct netpoll_info	*npinfo;#endif#ifdef CONFIG_NET_POLL_CONTROLLER	void                    (*poll_controller)(struct net_device *dev);#endif	/* Network namespace this network device is inside */	struct net		*nd_net;	/* bridge stuff */	struct net_bridge_port	*br_port;	/* macvlan */	struct macvlan_port	*macvlan_port;	/* class/net/name entry */	struct device		dev;	/* space for optional statistics and wireless sysfs groups */	struct attribute_group  *sysfs_groups[3];	/* rtnetlink link ops */	const struct rtnl_link_ops *rtnl_link_ops;	/* The TX queue control structures */	unsigned int			egress_subqueue_count;	struct net_device_subqueue	egress_subqueue[1];};#define to_net_dev(d) container_of(d, struct net_device, dev)#define	NETDEV_ALIGN		32#define	NETDEV_ALIGN_CONST	(NETDEV_ALIGN - 1)/** *	netdev_priv - access network device private data *	@dev: network device * * Get network device private data */static inline void *netdev_priv(const struct net_device *dev){	return dev->priv;}/* Set the sysfs physical device reference for the network logical device * if set prior to registration will cause a symlink during initialization. */#define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))/** *	netif_napi_add - initialize a napi context *	@dev:  network device *	@napi: napi context *	@poll: polling function *	@weight: default weight * * netif_napi_add() must be used to initialize a napi context prior to calling * *any* of the other napi related functions. */static inline void netif_napi_add(struct net_device *dev,				  struct napi_struct *napi,				  int (*poll)(struct napi_struct *, int),				  int weight){	INIT_LIST_HEAD(&napi->poll_list);	napi->poll = poll;	napi->weight = weight;#ifdef CONFIG_NETPOLL	napi->dev = dev;	list_add(&napi->dev_list, &dev->napi_list);	spin_lock_init(&napi->poll_lock);	napi->poll_owner = -1;#endif	set_bit(NAPI_STATE_SCHED, &napi->state);}struct packet_type {	__be16			type;	/* This is really htons(ether_type). */	struct net_device	*dev;	/* NULL is wildcarded here	     */	int			(*func) (struct sk_buff *,					 struct net_device *,					 struct packet_type *,					 struct net_device *);	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,						int features);	int			(*gso_send_check)(struct sk_buff *skb);	void			*af_packet_priv;	struct list_head	list;};#include <linux/interrupt.h>#include <linux/notifier.h>extern rwlock_t				dev_base_lock;		/* Device list lock */#define for_each_netdev(net, d)		\		list_for_each_entry(d, &(net)->dev_base_head, dev_list)#define for_each_netdev_safe(net, d, n)	\		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)#define for_each_netdev_continue(net, d)		\		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)#define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)static inline struct net_device *next_net_device(struct net_device *dev){	struct list_head *lh;	struct net *net;	net = dev->nd_net;	lh = dev->dev_list.next;	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);}static inline struct net_device *first_net_device(struct net *net){	return list_empty(&net->dev_base_head) ? NULL :		net_device_entry(net->dev_base_head.next);}extern int 			netdev_boot_setup_check(struct net_device *dev);extern unsigned long		netdev_boot_base(const char *prefix, int unit);extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);extern void		dev_add_pack(struct packet_type *pt);extern void		dev_remove_pack(struct packet_type *pt);extern void		__dev_remove_pack(struct packet_type *pt);extern struct net_device	*dev_get_by_flags(struct net *net, unsigned short flags,						  unsigned short mask);extern struct net_device	*dev_get_by_name(struct net *net, const char *name);extern struct net_device	*__dev_get_by_name(struct net *net, const char *name);extern int		dev_alloc_name(struct net_device *dev, const char *name);extern int		dev_open(struct net_device *dev);extern int		dev_close(struct net_device *dev);extern int		dev_queue_xmit(struct sk_buff *skb);extern int		register_netdevice(struct net_device *dev);extern void		unregister_netdevice(struct net_device *dev);extern void		free_netdev(struct net_device *dev);extern void		synchronize_net(void);extern int 		register_netdevice_notifier(struct notifier_block *nb);extern int		unregister_netdevice_notifier(struct notifier_block *nb);extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);extern int		dev_restart(struct net_device *dev);#ifdef CONFIG_NETPOLL_TRAPextern int		netpoll_trap(void);#endifstatic inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,				  unsigned short type,				  const void *daddr, const void *saddr,				  unsigned len){	if (!dev->header_ops || !dev->header_ops->create)		return 0;	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);}static inline int dev_parse_header(const struct sk_buff *skb,				   unsigned char *haddr){	const struct net_device *dev = skb->dev;	if (!dev->header_ops || !dev->header_ops->parse)		return 0;	return dev->header_ops->parse(skb, haddr);}typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);extern int		register_gifconf(unsigned int family, gifconf_func_t * gifconf);static inline int unregister_gifconf(unsigned int family){	return register_gifconf(family, NULL);}/* * Incoming packets are placed on per-cpu queues so that * no locking is needed. */struct softnet_data{	struct net_device	*output_queue;	struct sk_buff_head	input_pkt_queue;	struct list_head	poll_list;	struct sk_buff		*completion_queue;	struct napi_struct	backlog;#ifdef CONFIG_NET_DMA	struct dma_chan		*net_dma;#endif};DECLARE_PER_CPU(struct softnet_data,softnet_data);#define HAVE_NETIF_QUEUEextern void __netif_schedule(struct net_device *dev);static inline void netif_schedule(struct net_device *dev){	if (!test_bit(__LINK_STATE_XOFF, &dev->state))		__netif_schedule(dev);}/** *	netif_start_queue - allow transmit *	@dev: network device * *	Allow upper layers to call the device hard_start_xmit routine. */static inline void netif_start_queue(struct net_device *dev){	clear_bit(__LINK_STATE_XOFF, &dev->state);}/** *	netif_wake_queue - restart transmit *	@dev: network device * *	Allow upper layers to call the device hard_start_xmit routine. *	Used for flow control when transmit resources are available. */static inline void netif_wake_queue(struct net_device *dev){#ifdef CONFIG_NETPOLL_TRAP	if (netpoll_trap()) {		clear_bit(__LINK_STATE_XOFF, &dev->state);		return;	}#endif	if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))		__netif_schedule(dev);}/** *	netif_stop_queue - stop transmitted packets *	@dev: network device * *	Stop upper layers calling the device hard_start_xmit routine. *	Used for flow control when transmit resources are unavailable. */static inline void netif_stop_queue(struct net_device *dev){	set_bit(__LINK_STATE_XOFF, &dev->state);}/** *	netif_queue_stopped - test if transmit queue is flowblocked *	@dev: network device * *	Test if transmit queue on device is currently unable to send. */static inline int netif_queue_stopped(const struct net_device *dev){	return test_bit(__LINK_STATE_XOFF, &dev->state);}/** *	netif_running - test if up *	@dev: network device * *	Test if the device has been brought up. */static inline int netif_running(const struct net_device *dev){	return test_bit(__LINK_STATE_START, &dev->state);}/* * Routines to manage the subqueues on a device.  We only need start * stop, and a check if it's stopped.  All other device management is * done at the overall netdevice level. * Also test the device if we're multiqueue. *//** *	netif_start_subqueue - allow sending packets on subqueue *	@dev: network device *	@queue_index: sub queue index * * Start individual transmit queue of a device with multiple transmit queues. */static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index){#ifdef CONFIG_NETDEVICES_MULTIQUEUE	clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);#endif}/** *	netif_stop_subqueue - stop sending packets on subqueue *	@dev: network device *	@queue_index: sub queue index * * Stop individual transmit queue of a device with multiple transmit queues. */static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index){

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?