📄 kcompat.h
字号:
pci_write_config_dword((X), \ i * 4, \ adapter->pci_state[i]); \ } \ } else { \ for (i = 0; i < 6; i++) { \ pci_write_config_dword((X), \ PCI_BASE_ADDRESS_0 + (i * 4), \ (X)->resource[i].start); \ } \ } \}#endif /* 2.4.6 <= x < 2.6.10 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef module_param_array_named#undef module_param_array_named#define module_param_array_named(name, array, type, nump, perm) \ static struct kparam_array __param_arr_##name \ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ sizeof(array[0]), array }; \ module_param_call(name, param_array_set, param_array_get, \ &__param_arr_##name, perm)#endif /* module_param_array_named */#endif /* < 2.6.10 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )#define PCI_D0 0#define PCI_D1 1#define PCI_D2 2#define PCI_D3hot 3#define PCI_D3cold 4#define pci_choose_state(pdev,state) state#define PMSG_SUSPEND 3#undef NETIF_F_LLTX#ifndef ARCH_HAS_PREFETCH#define prefetch(X)#endif#ifndef NET_IP_ALIGN#define NET_IP_ALIGN 2#endif#define KC_USEC_PER_SEC 1000000L#define usecs_to_jiffies _kc_usecs_to_jiffiesstatic inline unsigned int _kc_jiffies_to_usecs(const unsigned long j){#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) return (KC_USEC_PER_SEC / HZ) * j;#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);#else return (j * KC_USEC_PER_SEC) / HZ;#endif}static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m){ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET;#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) return m * (HZ / KC_USEC_PER_SEC);#else return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;#endif}#endif /* < 2.6.11 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )#include <linux/reboot.h>#define USE_REBOOT_NOTIFIER/* Generic MII registers. */#define MII_CTRL1000 0x09 /* 1000BASE-T control */#define MII_STAT1000 0x0a /* 1000BASE-T status *//* Advertisement control register. */#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause *//* 1000BASE-T Control register */#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )#define pm_message_t u32#ifndef kzalloc#define kzalloc _kc_kzallocextern void *_kc_kzalloc(size_t size, int flags);#endif/* Generic MII registers. */#define MII_ESTATUS 0x0f /* Extended Status *//* Basic mode status register. */#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 *//* Extended status register. */#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )#undef HAVE_PCI_ERS#else /* 2.6.16 and above */#undef HAVE_PCI_ERS#define HAVE_PCI_ERS#endif/*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )#ifndef IRQF_PROBE_SHARED#ifdef SA_PROBEIRQ#define IRQF_PROBE_SHARED SA_PROBEIRQ#else#define IRQF_PROBE_SHARED 0#endif#endif#ifndef IRQF_SHARED#define IRQF_SHARED SA_SHIRQ#endif#ifndef ARRAY_SIZE#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))#endif#ifndef netdev_alloc_skb#define netdev_alloc_skb _kc_netdev_alloc_skbextern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev, unsigned int length);#endif#ifndef skb_is_gso#ifdef NETIF_F_TSO#define skb_is_gso _kc_skb_is_gsostatic inline int _kc_skb_is_gso(const struct sk_buff *skb){ return skb_shinfo(skb)->gso_size;}#else#define skb_is_gso(a) 0#endif#endif#endif /* < 2.6.18 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )#ifndef DIV_ROUND_UP#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))#endif#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )#ifndef RHEL_RELEASE_CODE#define RHEL_RELEASE_CODE 0#endif#ifndef RHEL_RELEASE_VERSION#define RHEL_RELEASE_VERSION(a,b) 0#endif#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ))) typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);#endiftypedef irqreturn_t (*new_handler_t)(int, void*);static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)#else /* 2.4.x */typedef void (*irq_handler_t)(int, void*, struct pt_regs *);typedef void (*new_handler_t)(int, void*);static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)#endif{ irq_handler_t new_handler = (irq_handler_t) handler; return request_irq(irq, new_handler, flags, devname, dev_id);}#undef request_irq#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))#define irq_handler_t new_handler_t/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */#define PCIE_CONFIG_SPACE_LEN 256#define PCI_CONFIG_SPACE_LEN 64#define PCIE_LINK_STATUS 0x12#ifdef DRIVER_E1000E#define pci_config_space_ich8lan() { \ if (adapter->flags & FLAG_IS_ICH) \ size = PCIE_CONFIG_SPACE_LEN; \}#else#define pci_config_space_ich8lan()#endif#undef pci_save_state#define pci_save_state(pdev) _kc_pci_save_state(adapter)#define _kc_pci_save_state(adapter) 0; { \ int size = PCI_CONFIG_SPACE_LEN, i; \ u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); \ u16 pcie_link_status; \ \ if (pcie_cap_offset) { \ if (!pci_read_config_word(pdev, pcie_cap_offset + PCIE_LINK_STATUS, \ &pcie_link_status)) \ size = PCIE_CONFIG_SPACE_LEN; \ } \ pci_config_space_ich8lan(); \ WARN_ON(adapter->config_space != NULL); \ adapter->config_space = kmalloc(size, GFP_KERNEL); \ if (!adapter->config_space) { \ printk(KERN_ERR "Out of memory in pci_save_state\n"); \ return -ENOMEM; \ } \ for (i = 0; i < (size / 4); i++) \ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); \}#undef pci_restore_state#define pci_restore_state(pdev) _kc_pci_restore_state(adapter)#define _kc_pci_restore_state(adapter) { \ int size = PCI_CONFIG_SPACE_LEN, i; \ u16 pcie_cap_offset; \ u16 pcie_link_status; \ \ if (adapter->config_space != NULL) { \ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); \ if (pcie_cap_offset) { \ if (!pci_read_config_word(pdev, pcie_cap_offset + PCIE_LINK_STATUS, \ &pcie_link_status)) \ size = PCIE_CONFIG_SPACE_LEN; \ } \ pci_config_space_ich8lan(); \ for (i = 0; i < (size / 4); i++) \ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); \ kfree(adapter->config_space); \ adapter->config_space = NULL; \ } \}#endif /* < 2.6.19 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )#undef INIT_WORK#define INIT_WORK(_work, _func) \do { \ INIT_LIST_HEAD(&(_work)->entry); \ (_work)->pending = 0; \ (_work)->func = (void (*)(void *))_func; \ (_work)->data = _work; \ init_timer(&(_work)->timer); \} while (0)#endif#ifndef PCI_VDEVICE#define PCI_VDEVICE(ven, dev) \ PCI_VENDOR_ID_##ven, (dev), \ PCI_ANY_ID, PCI_ANY_ID, 0, 0#endif#ifndef round_jiffies#define round_jiffies(x) x#endif#define csum_offset csum#endif /* < 2.6.20 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;#define pci_channel_offline(pdev) (pdev->error_state && \ pdev->error_state != pci_channel_io_normal)#endif /* < 2.6.21 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )#define tcp_hdr(skb) (skb->h.th)#define tcp_hdrlen(skb) (skb->h.th->doff << 2)#define skb_transport_offset(skb) (skb->h.raw - skb->data)#define skb_transport_header(skb) (skb->h.raw)#define ipv6_hdr(skb) (skb->nh.ipv6h)#define ip_hdr(skb) (skb->nh.iph)#define skb_network_offset(skb) (skb->nh.raw - skb->data)#define skb_network_header(skb) (skb->nh.raw)#define skb_tail_pointer(skb) skb->tail#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ memcpy(skb->data + offset, from, len)#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)#define pci_register_driver pci_module_init#define skb_mac_header(skb) skb->mac.raw#ifdef NETIF_F_MULTI_QUEUE#ifndef alloc_etherdev_mq#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)#endif#endif /* NETIF_F_MULTI_QUEUE */#ifndef ETH_FCS_LEN#define ETH_FCS_LEN 4#endif#endif /* < 2.6.22 *//*****************************************************************************/#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )#undef ETHTOOL_GPERMADDR#undef SET_MODULE_OWNER#define SET_MODULE_OWNER(dev) do { } while (0)#endif /* > 2.6.22 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )/* NAPI API changes in 2.6.24 break everything */struct napi_struct { /* used to look up the real NAPI polling routine */ int (*poll)(struct napi_struct *, int); int weight;};#ifdef NAPIextern int __kc_adapter_clean(struct net_device *, int *);#define netif_rx_complete(netdev, napi) netif_rx_complete(netdev)#define netif_rx_schedule_prep(netdev, napi) netif_rx_schedule_prep(netdev)#define netif_rx_schedule(netdev, napi) netif_rx_schedule(netdev)#define __netif_rx_schedule(netdev, napi) __netif_rx_schedule(netdev)#define napi_enable(napi) netif_poll_enable(adapter->netdev)#define napi_disable(napi) netif_poll_disable(adapter->netdev)#define netif_napi_add(_netdev, _napi, _poll, _weight) \ do { \ struct napi_struct *__napi = _napi; \ _netdev->poll = &(__kc_adapter_clean); \ _netdev->weight = (_weight); \ __napi->poll = &(_poll); \ __napi->weight = (_weight); \ netif_poll_disable(_netdev); \ } while (0)#else /* NAPI */#define netif_napi_add(_netdev, _napi, _poll, _weight) \ do { \ struct napi_struct *__napi = _napi; \ _netdev->poll = &(_poll); \ _netdev->weight = (_weight); \ __napi->poll = &(_poll); \ __napi->weight = (_weight); \ } while (0)#endif /* NAPI */#undef dev_get_by_name#define dev_get_by_name(_a, _b) dev_get_by_name(_b)#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)#endif /* < 2.6.24 *//*****************************************************************************/#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )#include <linux/pm_qos_params.h>#endif /* > 2.6.24 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )#define PM_QOS_CPU_DMA_LATENCY 1#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )#include <linux/latency.h>#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY#define pm_qos_add_requirement(pm_qos_class, name, value) \ set_acceptable_latency(name, value)#define pm_qos_remove_requirement(pm_qos_class, name) \ remove_acceptable_latency(name)#define pm_qos_update_requirement(pm_qos_class, name, value) \ modify_acceptable_latency(name, value)#else#define PM_QOS_DEFAULT_VALUE -1#define pm_qos_add_requirement(pm_qos_class, name, value)#define pm_qos_remove_requirement(pm_qos_class, name)#define pm_qos_update_requirement(pm_qos_class, name, value) { \ if (value != PM_QOS_DEFAULT_VALUE) { \ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ pci_name(adapter->pdev)); \ } \}#endif /* > 2.6.18 */#endif /* < 2.6.25 *//*****************************************************************************/#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )#endif /* < 2.6.26 */#ifndef NETIF_F_MULTI_QUEUE#define NETIF_F_MULTI_QUEUE 0#define netif_is_multiqueue(a) 0#define netif_stop_subqueue(a, b)#define netif_wake_subqueue(a, b)#define netif_start_subqueue(a, b)#endif /* NETIF_F_MULTI_QUEUE */#endif /* _KCOMPAT_H_ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -