⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kcompat.h

📁 Click is a modular router toolkit. To use it you ll need to know how to compile and install the sof
💻 H
📖 第 1 页 / 共 2 页
字号:
#define pci_set_dma_mask _kc_pci_set_dma_maskextern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);#endif#ifndef pci_request_regions#define pci_request_regions _kc_pci_request_regionsextern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);#endif#ifndef pci_release_regions#define pci_release_regions _kc_pci_release_regionsextern void _kc_pci_release_regions(struct pci_dev *pdev);#endif/**************************************//* NETWORK DRIVER API */#ifndef alloc_etherdev#define alloc_etherdev _kc_alloc_etherdevextern struct net_device * _kc_alloc_etherdev(int sizeof_priv);#endif#ifndef is_valid_ether_addr#define is_valid_ether_addr _kc_is_valid_ether_addrextern int _kc_is_valid_ether_addr(u8 *addr);#endif/**************************************//* MISCELLANEOUS */#ifndef INIT_TQUEUE#define INIT_TQUEUE(_tq, _routine, _data)		\	do {						\		INIT_LIST_HEAD(&(_tq)->list);		\		(_tq)->sync = 0;			\		(_tq)->routine = _routine;		\		(_tq)->data = _data;			\	} while (0)#endif#endif /* 2.4.3 => 2.4.0 *//*****************************************************************************//* 2.4.6 => 2.4.3 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )#ifndef pci_set_power_state#define pci_set_power_state _kc_pci_set_power_stateextern int _kc_pci_set_power_state(struct pci_dev *dev, int state);#endif#ifndef pci_save_state#define pci_save_state _kc_pci_save_stateextern int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer);#endif#ifndef pci_restore_state#define pci_restore_state _kc_pci_restore_stateextern int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer);#endif#ifndef pci_enable_wake#define pci_enable_wake _kc_pci_enable_wakeextern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);#endif#ifndef pci_disable_device#define pci_disable_device _kc_pci_disable_deviceextern void _kc_pci_disable_device(struct pci_dev *pdev);#endif/* PCI PM entry point syntax changed, so don't support suspend/resume */#undef CONFIG_PM#endif /* 2.4.6 => 2.4.3 *//*****************************************************************************//* 2.4.9 => 2.4.6 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,9) )#ifndef HAVE_PCI_SET_MWI#define pci_set_mwi(X) pci_write_config_word(X, \			       PCI_COMMAND, adapter->hw.pci_cmd_word | \			       PCI_COMMAND_INVALIDATE);#define pci_clear_mwi(X) pci_write_config_word(X, \			       PCI_COMMAND, adapter->hw.pci_cmd_word & \			       ~PCI_COMMAND_INVALIDATE);#endif#endif /* 2.4.9 => 2.4.6 *//*****************************************************************************//* 2.4.10 => 2.4.9 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )/**************************************//* MODULE API */#ifndef MODULE_LICENSE	#define MODULE_LICENSE(X)#endif/**************************************//* OTHER */#undef min#define min(x,y) ({ \	const typeof(x) _x = (x);	\	const typeof(y) _y = (y);	\	(void) (&_x == &_y);		\	_x < _y ? _x : _y; })#undef max#define max(x,y) ({ \	const typeof(x) _x = (x);	\	const typeof(y) _y = (y);	\	(void) (&_x == &_y);		\	_x > _y ? _x : _y; })#ifndef list_for_each_safe#define list_for_each_safe(pos, n, head) \	for (pos = (head)->next, n = pos->next; pos != (head); \		pos = n, n = pos->next)#endif#endif /* 2.4.10 -> 2.4.6 *//*****************************************************************************//* 2.4.13 => 2.4.10 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )/**************************************//* PCI DMA MAPPING */#ifndef virt_to_page	#define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))#endif#ifndef pci_map_page#define pci_map_page _kc_pci_map_pageextern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);#endif#ifndef pci_unmap_page#define pci_unmap_page _kc_pci_unmap_pageextern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);#endif/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */#undef DMA_32BIT_MASK#define DMA_32BIT_MASK	0xffffffff#undef DMA_64BIT_MASK#define DMA_64BIT_MASK	0xffffffff/**************************************//* OTHER */#ifndef cpu_relax#define cpu_relax()	rep_nop()#endif#endif /* 2.4.13 => 2.4.10 *//*****************************************************************************//* 2.4.17 => 2.4.12 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )#ifndef __devexit_p	#define __devexit_p(x) &(x)#endif#ifndef VLAN_HLEN#define VLAN_HLEN 4#endif#ifndef VLAN_ETH_HLEN#define VLAN_ETH_HLEN 18#endif#ifndef VLAN_ETH_FRAME_LEN#define VLAN_ETH_FRAME_LEN 1518#endif#endif /* 2.4.17 => 2.4.13 *//*****************************************************************************//* 2.4.20 => 2.4.19 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )/* we won't support NAPI on less than 2.4.20 */#ifdef NAPI#undef CONFIG_E100_NAPI#undef CONFIG_IXGB_NAPI#endif#endif /* 2.4.20 => 2.4.19 *//*****************************************************************************//* 2.4.22 => 2.4.17 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )#define pci_name(x)	((x)->slot_name)#endif/*****************************************************************************//* 2.4.23 => 2.4.22 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )/*****************************************************************************/#ifdef NAPI#ifndef netif_poll_disable#define netif_poll_disable(x) _kc_netif_poll_disable(x)static inline void _kc_netif_poll_disable(struct net_device *netdev){	while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {		/* No hurry */		current->state = TASK_INTERRUPTIBLE;		schedule_timeout(1);	}}#endif#ifndef netif_poll_enable#define netif_poll_enable(x) _kc_netif_poll_enable(x)static inline void _kc_netif_poll_enable(struct net_device *netdev){	clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);}#endif#endif /* NAPI */#endif /* 2.4.23 => 2.4.22 *//*****************************************************************************//* 2.6.4 => 2.6.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )#define ETHTOOL_OPS_COMPAT#endif /* 2.6.4 => 2.6.0 *//*****************************************************************************//* 2.5.71 => 2.4.x */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )#define sk_protocol protocol#endif /* 2.5.70 => 2.4.x *//*****************************************************************************//* < 2.4.27 or 2.6.0 <= 2.6.5 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )#ifndef netif_msg_init#define netif_msg_init _kc_netif_msg_initstatic inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits){	/* use default */	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))		return default_msg_enable_bits;	if (debug_value == 0) /* no output */		return 0;	/* set low N bits */	return (1 << debug_value) -1;}#endif#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 *//*****************************************************************************/#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))#define netdev_priv(x) x->priv#endif/*****************************************************************************//* <= 2.5.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )#undef pci_register_driver#define pci_register_driver pci_module_init#endif /* <= 2.5.0 *//*****************************************************************************//* 2.5.28 => 2.4.23 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )static inline void _kc_synchronize_irq() { synchronize_irq(); }#undef synchronize_irq#define synchronize_irq(X) _kc_synchronize_irq()#include <linux/tqueue.h>#define work_struct tq_struct#define INIT_WORK INIT_TQUEUE#define schedule_work schedule_task#define flush_scheduled_work flush_scheduled_tasks#endif /* 2.5.28 => 2.4.17 *//*****************************************************************************//* 2.6.0 => 2.5.28 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )#define MODULE_INFO(version, _version)#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1#endif#define pci_set_consistent_dma_mask(dev,mask) 1#undef dev_put#define dev_put(dev) __dev_put(dev)#ifndef skb_fill_page_desc#define skb_fill_page_desc _kc_skb_fill_page_descextern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);#endif#ifndef pci_dma_mapping_error#define pci_dma_mapping_error _kc_pci_dma_mapping_errorstatic inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr){	return dma_addr == 0;}#endif#endif /* 2.6.0 => 2.5.28 *//*****************************************************************************//* 2.6.4 => 2.6.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )#define MODULE_VERSION(_version) MODULE_INFO(version, _version)#endif /* 2.6.4 => 2.6.0 *//*****************************************************************************//* 2.6.5 => 2.6.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )#define pci_dma_sync_single_for_cpu	pci_dma_sync_single#define pci_dma_sync_single_for_device	pci_dma_sync_single_for_cpu#endif /* 2.6.5 => 2.6.0 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )#undef if_mii#define if_mii _kc_if_miistatic inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq){	return (struct mii_ioctl_data *) &rq->ifr_ifru;}#endif /* < 2.6.7 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )#define msleep(x)	do { set_current_state(TASK_UNINTERRUPTIBLE); \				schedule_timeout((x * HZ)/1000 + 2); \			} while (0)#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))#define __iomem#define MSEC_PER_SEC    1000Lstatic inline unsigned int _kc_jiffies_to_msecs(const unsigned long j){#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)	return (MSEC_PER_SEC / HZ) * j;#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)	return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);#else	return (j * MSEC_PER_SEC) / HZ;#endif}static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m){	if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))		return MAX_JIFFY_OFFSET;#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)	return m * (HZ / MSEC_PER_SEC);#else	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;#endif}#define msleep_interruptible _kc_msleep_interruptiblestatic unsigned inline long _kc_msleep_interruptible(unsigned int msecs){	unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;	while (timeout && !signal_pending(current)) {		__set_current_state(TASK_INTERRUPTIBLE);		timeout = schedule_timeout(timeout);	}	return _kc_jiffies_to_msecs(timeout);}#endif /* < 2.6.9 *//*****************************************************************************/#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) && \      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef pci_save_state#undef pci_save_state#endif#define pci_save_state(X) { \        int i; \        if (adapter->pci_state) { \                for (i = 0; i < 16; i++) { \                        pci_read_config_dword((X), \                                              i * 4, \                                              &adapter->pci_state[i]); \                } \        } \}#ifdef pci_restore_state#undef pci_restore_state#endif#define pci_restore_state(X) { \        int i; \        if (adapter->pci_state) { \                for (i = 0; i < 16; i++) { \                        pci_write_config_dword((X), \                                               i * 4, \                                               adapter->pci_state[i]); \                } \        } else { \                for (i = 0; i < 6; i++) { \                        pci_write_config_dword((X), \                                               PCI_BASE_ADDRESS_0 + (i * 4), \                                               (X)->resource[i].start); \                } \        } \}#endif /* 2.4.6 <= x < 2.6.10 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef module_param_array_named#undef module_param_array_named#define module_param_array_named(name, array, type, nump, perm)          \	static struct kparam_array __param_arr_##name                    \	= { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \	    sizeof(array[0]), array };                                   \	module_param_call(name, param_array_set, param_array_get,        \			  &__param_arr_##name, perm)#endif /* module_param_array_named */#endif /* < 2.6.10 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )#define PCI_D0      0#define PCI_D1      1#define PCI_D2      2#define PCI_D3hot   3#define PCI_D3cold  4#define pci_choose_state(pdev,state) state#define PMSG_SUSPEND 3#undef NETIF_F_LLTX#ifndef ARCH_HAS_PREFETCH#define prefetch(X)#endif#ifndef NET_IP_ALIGN#define NET_IP_ALIGN 2#endif#endif /* < 2.6.11 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )#define E100_USE_REBOOT_NOTIFIER#else#define USE_DRIVER_SHUTDOWN_HANDLER#define E100_USE_SHUTDOWN_HANDLER#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )#define pm_message_t u32#ifndef kzalloc#define kzalloc _kc_kzallocextern void *_kc_kzalloc(size_t size, int flags);#endif#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )#undef E100_USE_PCI_ERS#undef CONFIG_E1000_PCI_ERS#undef CONFIG_IXGB_PCI_ERS#else#define E100_USE_PCI_ERS#define CONFIG_E1000_PCI_ERS#define CONFIG_IXGB_PCI_ERS#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )#ifndef IRQF_PROBE_SHARED#ifdef SA_PROBEIRQ#define IRQF_PROBE_SHARED SA_PROBEIRQ#else#define IRQF_PROBE_SHARED 0#endif#endif#ifndef IRQF_SHARED#define IRQF_SHARED SA_SHIRQ#endif#ifndef netdev_alloc_skb#define netdev_alloc_skb _kc_netdev_alloc_skbextern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,                                            unsigned int length);#endif#ifndef skb_is_gso#ifdef NETIF_F_TSO#define skb_is_gso _kc_skb_is_gsostatic inline int _kc_skb_is_gso(const struct sk_buff *skb){	return skb_shinfo(skb)->gso_size;}#endif#endif#endif /* < 2.6.18 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);typedef irqreturn_t (*new_handler_t)(int, void*);static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)#else /* 2.4.x */typedef void (*irq_handler_t)(int, void*, struct pt_regs *);typedef void (*new_handler_t)(int, void*);static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)#endif{	irq_handler_t new_handler = (irq_handler_t) handler;	return request_irq(irq, new_handler, flags, devname, dev_id);}#undef request_irq#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))#endif /* < 2.6.19 *//*****************************************************************************/#endif /* _KCOMPAT_H_ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -