📄 kcompat.h
字号:
if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) return default_msg_enable_bits; if (debug_value == 0) /* no output */ return 0; /* set low N bits */ return (1 << debug_value) -1;}#endif#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 *//*****************************************************************************/#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))#define netdev_priv(x) x->priv#endif/*****************************************************************************//* <= 2.5.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )#undef pci_register_driver#define pci_register_driver pci_module_init#endif /* <= 2.5.0 *//*****************************************************************************//* 2.5.28 => 2.4.23 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )static inline void _kc_synchronize_irq(void){ synchronize_irq();}#undef synchronize_irq#define synchronize_irq(X) _kc_synchronize_irq()#include <linux/tqueue.h>#define work_struct tq_struct#undef INIT_WORK#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)#undef container_of#define container_of list_entry#define schedule_work schedule_task#define flush_scheduled_work flush_scheduled_tasks#endif /* 2.5.28 => 2.4.17 *//*****************************************************************************//* 2.6.0 => 2.5.28 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )#define MODULE_INFO(version, _version)#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1#endif#define pci_set_consistent_dma_mask(dev,mask) 1#undef dev_put#define dev_put(dev) __dev_put(dev)#ifndef skb_fill_page_desc#define skb_fill_page_desc _kc_skb_fill_page_descextern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);#endif#ifndef pci_dma_mapping_error#define pci_dma_mapping_error _kc_pci_dma_mapping_errorstatic inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr){ return dma_addr == 0;}#endif#undef ALIGN#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))#endif /* 2.6.0 => 2.5.28 *//*****************************************************************************//* 2.6.4 => 2.6.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )#define MODULE_VERSION(_version) MODULE_INFO(version, _version)#endif /* 2.6.4 => 2.6.0 *//*****************************************************************************//* 2.6.5 => 2.6.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )#define pci_dma_sync_single_for_cpu pci_dma_sync_single#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu#endif /* 2.6.5 => 2.6.0 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )#undef if_mii#define if_mii _kc_if_miistatic inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq){ return (struct mii_ioctl_data *) &rq->ifr_ifru;}#endif /* < 2.6.7 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ schedule_timeout((x * HZ)/1000 + 2); \ } while (0)#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))#define __iomem#ifndef kcalloc#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)extern void *_kc_kzalloc(size_t size, int flags);#endif#define MSEC_PER_SEC 1000Lstatic inline unsigned int _kc_jiffies_to_msecs(const unsigned long j){#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j;#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);#else return (j * MSEC_PER_SEC) / HZ;#endif}static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m){ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET;#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) return m * (HZ / MSEC_PER_SEC);#else return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;#endif}#define msleep_interruptible _kc_msleep_interruptiblestatic inline unsigned long _kc_msleep_interruptible(unsigned int msecs){ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; while (timeout && !signal_pending(current)) { __set_current_state(TASK_INTERRUPTIBLE); timeout = schedule_timeout(timeout); } return _kc_jiffies_to_msecs(timeout);}#endif /* < 2.6.9 *//*****************************************************************************/#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) && \ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef pci_save_state#undef pci_save_state#endif#define pci_save_state(X) { \ int i; \ if (adapter->pci_state) { \ for (i = 0; i < 16; i++) { \ pci_read_config_dword((X), \ i * 4, \ &adapter->pci_state[i]); \ } \ } \}#ifdef pci_restore_state#undef pci_restore_state#endif#define pci_restore_state(X) { \ int i; \ if (adapter->pci_state) { \ for (i = 0; i < 16; i++) { \ pci_write_config_dword((X), \ i * 4, \ adapter->pci_state[i]); \ } \ } else { \ for (i = 0; i < 6; i++) { \ pci_write_config_dword((X), \ PCI_BASE_ADDRESS_0 + (i * 4), \ (X)->resource[i].start); \ } \ } \}#endif /* 2.4.6 <= x < 2.6.10 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef module_param_array_named#undef module_param_array_named#define module_param_array_named(name, array, type, nump, perm) \ static struct kparam_array __param_arr_##name \ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ sizeof(array[0]), array }; \ module_param_call(name, param_array_set, param_array_get, \ &__param_arr_##name, perm)#endif /* module_param_array_named */#endif /* < 2.6.10 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )#define PCI_D0 0#define PCI_D1 1#define PCI_D2 2#define PCI_D3hot 3#define PCI_D3cold 4#define pci_choose_state(pdev,state) state#define PMSG_SUSPEND 3#undef NETIF_F_LLTX#ifndef ARCH_HAS_PREFETCH#define prefetch(X)#endif#ifndef NET_IP_ALIGN#define NET_IP_ALIGN 2#endif#endif /* < 2.6.11 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )#include <linux/reboot.h>#define USE_REBOOT_NOTIFIER#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )#define pm_message_t u32#ifndef kzalloc#define kzalloc _kc_kzallocextern void *_kc_kzalloc(size_t size, int flags);#endif#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )#undef CONFIG_E1000_PCI_ERS#undef CONFIG_IXGB_PCI_ERS#else#define CONFIG_E1000_PCI_ERS#define CONFIG_IXGB_PCI_ERS#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )#ifndef IRQF_PROBE_SHARED#ifdef SA_PROBEIRQ#define IRQF_PROBE_SHARED SA_PROBEIRQ#else#define IRQF_PROBE_SHARED 0#endif#endif#ifndef IRQF_SHARED#define IRQF_SHARED SA_SHIRQ#endif#ifndef ARRAY_SIZE#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))#endif#ifndef netdev_alloc_skb#define netdev_alloc_skb _kc_netdev_alloc_skbextern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev, unsigned int length);#endif#ifndef skb_is_gso#ifdef NETIF_F_TSO#define skb_is_gso _kc_skb_is_gsostatic inline int _kc_skb_is_gso(const struct sk_buff *skb){ return skb_shinfo(skb)->gso_size;}#endif#endif#endif /* < 2.6.18 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )typedef _Bool bool;#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )#ifndef RHEL_VERSION#define RHEL_VERSION 0#endif#if (!(( RHEL_VERSION == 4 ) && ( RHEL_UPDATE >= 5 )))typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);#endiftypedef irqreturn_t (*new_handler_t)(int, void*);static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)#else /* 2.4.x */typedef void (*irq_handler_t)(int, void*, struct pt_regs *);typedef void (*new_handler_t)(int, void*);static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)#endif{ irq_handler_t new_handler = (irq_handler_t) handler; return request_irq(irq, new_handler, flags, devname, dev_id);}#undef request_irq#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */#define PCIE_CONFIG_SPACE_LEN 256#define PCI_CONFIG_SPACE_LEN 64#define PCIE_LINK_STATUS 0x12#undef pci_save_state#define pci_save_state(pdev) _kc_pci_save_state(adapter)#define _kc_pci_save_state(adapter) 0; { \ int size, i; \ u16 pcie_link_status; \ \ u16 cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); \ if (cap_offset) { \ if (pci_read_config_word(pdev, cap_offset + PCIE_LINK_STATUS, &pcie_link_status)) \ size = PCI_CONFIG_SPACE_LEN; \ else \ size = PCIE_CONFIG_SPACE_LEN; \ WARN_ON(adapter->config_space != NULL); \ adapter->config_space = kmalloc(size, GFP_KERNEL); \ if (!adapter->config_space) { \ printk(KERN_ERR "Out of memory in pci_save_msi_state\n"); \ return -ENOMEM; \ } \ for (i = 0; i < (size / 4); i++) \ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); \ } \}#undef pci_restore_state#define pci_restore_state(pdev) _kc_pci_restore_state(adapter)#define _kc_pci_restore_state(adapter) { \ int size, i; \ u16 pcie_link_status; \ \ u16 cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); \ if (cap_offset) { \ if (adapter->config_space != NULL) { \ if (pci_read_config_word(pdev, cap_offset + PCIE_LINK_STATUS, &pcie_link_status)) \ size = PCI_CONFIG_SPACE_LEN; \ else \ size = PCIE_CONFIG_SPACE_LEN; \ \ for (i = 0; i < (size / 4); i++) \ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); \ kfree(adapter->config_space); \ adapter->config_space = NULL; \ } \ } \}#endif /* < 2.6.19 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )#undef INIT_WORK#define INIT_WORK(_work, _func) \do { \ INIT_LIST_HEAD(&(_work)->entry); \ (_work)->pending = 0; \ (_work)->func = (void (*)(void *))_func; \ (_work)->data = _work; \ init_timer(&(_work)->timer); \} while (0)#endif#ifndef round_jiffies#define round_jiffies(x) x#endif#define csum_offset csum#endif /* < 2.6.20 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;#define pci_channel_offline(pdev) (pdev->error_state && \ pdev->error_state != pci_channel_io_normal)#endif /* < 2.6.21 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )#define tcp_hdr(skb) (skb->h.th)#define tcp_hdrlen(skb) (skb->h.th->doff << 2)#define skb_transport_offset(skb) (skb->h.raw - skb->data)#define skb_transport_header(skb) (skb->h.raw)#define ipv6_hdr(skb) (skb->nh.ipv6h)#define ip_hdr(skb) (skb->nh.iph)#define skb_network_offset(skb) (skb->nh.raw - skb->data)#define skb_network_header(skb) (skb->nh.raw)#define skb_tail_pointer(skb) skb->tail#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ memcpy(skb->data + offset, from, len)#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)#define pci_register_driver pci_module_init#ifndef alloc_etherdev_mq#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)#endif#endif /* < 2.6.22 */#endif /* _KCOMPAT_H_ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -