📄 kcompat.h
字号:
}/*****************************************************************************//* 2.4.13 => 2.4.3 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )/**************************************//* PCI DMA MAPPING */#if defined(CONFIG_HIGHMEM)#ifndef PCI_DRAM_OFFSET#define PCI_DRAM_OFFSET 0#endifu64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction){ return (((u64)(page - mem_map) << PAGE_SHIFT) + offset + PCI_DRAM_OFFSET);}#else /* CONFIG_HIGHMEM */u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction){ return pci_map_single(dev, (void *)page_address(page) + offset, size, direction);}#endif /* CONFIG_HIGHMEM */void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction){ return pci_unmap_single(dev, dma_addr, size, direction);}#endif /* 2.4.13 => 2.4.3 *//*****************************************************************************//* 2.4.3 => 2.4.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )/**************************************//* PCI DRIVER API */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) )int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) { return 0; }#elseint _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask){ if(!pci_dma_supported(dev, mask)) return -EIO; dev->dma_mask = mask; return 0;}#endifint _kc_pci_request_regions(struct pci_dev *dev, char *res_name){ int i; for (i = 0; i < 6; i++) { if (pci_resource_len(dev, i) == 0) continue;#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ) if ((dev->base_address[i] & PCI_BASE_ADDRESS_SPACE_IO)) request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name);#else if (pci_resource_flags(dev, i) & IORESOURCE_IO) { if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { pci_release_regions(dev); return -EBUSY; } } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { pci_release_regions(dev); return -EBUSY; } }#endif } return 0;}void _kc_pci_release_regions(struct pci_dev *dev){ int i; for (i = 0; i < 6; i++) { if (pci_resource_len(dev, i) == 0) continue;#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ) if ((dev->base_address[i] & PCI_BASE_ADDRESS_SPACE)) release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));#else if (pci_resource_flags(dev, i) & IORESOURCE_IO) release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));#endif }}/**************************************//* NETWORK DRIVER API */#define _KC_MAX_NET_DEV 32static int my_net_count = 0;static struct _kc_net_dev_ext my_net_devices[_KC_MAX_NET_DEV];struct net_device * _kc_alloc_etherdev(int sizeof_priv){ struct net_device *dev; int alloc_size; if(my_net_count >= _KC_MAX_NET_DEV) return NULL; alloc_size = sizeof (*dev) + sizeof_priv + IFNAMSIZ + 31; dev = kmalloc(alloc_size, GFP_KERNEL); if (!dev) return NULL; memset(dev, 0, alloc_size); if (sizeof_priv) dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ) dev->name = (char *) dev->priv + sizeof_priv;#endif dev->name[0] = '\0'; ether_setup(dev); my_net_devices[my_net_count].dev = dev; my_net_count++; return dev;}int _kc_is_valid_ether_addr(u8 *addr){ const char zaddr[6] = {0,}; return !(addr[0]&1) && memcmp( addr, zaddr, 6);}#endif /* 2.4.3 => 2.4.0 *//*****************************************************************************//* 2.4.0 => 2.2.0 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) )/**************************************//* PCI DRIVER API */#define _KC_MAX_PCI_DEV 32static int my_pci_count = 0;static struct _kc_pci_dev_ext my_pci_devices[_KC_MAX_PCI_DEV];int _kc_pci_module_init(struct pci_driver *drv){ struct pci_dev *dev; struct pci_device_id *pciid; uint16_t subvendor, subdevice; my_pci_count = 0; for(dev = pci_devices; dev; dev = dev->next) { if(my_pci_count >= _KC_MAX_PCI_DEV) break; pciid = &drv->id_table[0]; pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &subvendor); pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &subdevice); while(pciid->vendor != 0) { if(((pciid->vendor == dev->vendor) || (pciid->vendor == PCI_ANY_ID)) && ((pciid->device == dev->device) || (pciid->device == PCI_ANY_ID)) && ((pciid->subvendor == subvendor) || (pciid->subvendor == PCI_ANY_ID)) && ((pciid->subdevice == subdevice) || (pciid->subdevice == PCI_ANY_ID))) { my_pci_devices[my_pci_count].dev = dev; my_pci_devices[my_pci_count].driver = drv; my_pci_count++; if(drv->probe(dev, pciid)) { my_pci_count--; my_pci_devices[my_pci_count].dev = NULL; } break; } pciid++; } } return (my_pci_count > 0) ? 0 : -ENODEV;}void _kc_pci_unregister_driver(struct pci_driver *drv){ int i; for(i = 0; i < my_pci_count; i++) { if(my_pci_devices[i].dev) { drv->remove(my_pci_devices[i].dev); my_pci_devices[i].dev = NULL; } } my_pci_count = 0;}void _kc_pci_set_drvdata(struct pci_dev *dev, void *data){ int i; for(i = 0; i < my_pci_count; i++) { if(my_pci_devices[i].dev == dev) { my_pci_devices[i].pci_drvdata = data; } }}void * _kc_pci_get_drvdata(struct pci_dev *dev){ int i; for(i = 0; i < my_pci_count; i++) { if(my_pci_devices[i].dev == dev) { return my_pci_devices[i].pci_drvdata; } } return NULL;}int _kc_pci_enable_device(struct pci_dev *dev) { return 0; }int _kc_pci_resource_start(struct pci_dev *dev, int bar){ return ((dev->base_address[bar] & PCI_BASE_ADDRESS_SPACE) ? (dev->base_address[bar] & PCI_BASE_ADDRESS_IO_MASK) : (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK));}unsigned long _kc_pci_resource_len(struct pci_dev *dev, int bar){ u32 old, len; int bar_reg = PCI_BASE_ADDRESS_0 + (bar << 2); pci_read_config_dword(dev, bar_reg, &old); pci_write_config_dword(dev, bar_reg, ~0); pci_read_config_dword(dev, bar_reg, &len); pci_write_config_dword(dev, bar_reg, old); if((len & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) len = ~(len & PCI_BASE_ADDRESS_MEM_MASK); else len = ~(len & PCI_BASE_ADDRESS_IO_MASK) & 0xffff; return (len + 1);}struct pci_driver *_kc_pci_dev_driver(struct pci_dev *dev){ int i; for(i = 0; i < my_pci_count; i++) { if(my_pci_devices[i].dev == dev) { return my_pci_devices[i].driver; } } return NULL;}/**************************************//* PCI DMA MAPPING */void * _kc_pci_alloc_consistent(struct pci_dev *dev, size_t size, u64 *dma_handle){ void *vaddr = kmalloc(size, GFP_KERNEL); if(vaddr) *dma_handle = virt_to_bus(vaddr); return vaddr;}void _kc_pci_free_consistent(struct pci_dev *dev, size_t size, void *addr, u64 dma_handle){ kfree(addr);}u64 _kc_pci_map_single(struct pci_dev *dev, void *addr, size_t size, int direction){ return virt_to_bus(addr);}void _kc_pci_unmap_single(struct pci_dev *dev, u64 dma_addr, size_t size, int direction) { return; }void _kc_pci_dma_sync_single(struct pci_dev *dev, u64 dma_addr, size_t size, int direction) { return; }/**************************************//* NETWORK DRIVER API */void _kc_netif_device_attach(struct net_device *dev){ if(netif_running(dev) && netif_queue_stopped(dev)) { netif_wake_queue(dev); }}void _kc_netif_device_detach(struct net_device *dev){ if(netif_running(dev)) { netif_stop_queue(dev); }}void _kc_netif_carrier_on(struct net_device *dev){ int i; for(i = 0; i < my_net_count; i++) { if(my_net_devices[i].dev == dev) { set_bit(0, &my_net_devices[i].carrier); } }}void _kc_netif_carrier_off(struct net_device *dev){ int i; for(i = 0; i < my_net_count; i++) { if(my_net_devices[i].dev == dev) { clear_bit(0, &my_net_devices[i].carrier); } }}int _kc_netif_carrier_ok(struct net_device *dev){ int i; for(i = 0; i < my_net_count; i++) { if(my_net_devices[i].dev == dev) { return test_bit(0, &my_net_devices[i].carrier); } } return 0;}#endif /* 2.4.0 => 2.2.0 *//*****************************************************************************//* 2.4.6 => 2.4.3 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )int _kc_pci_set_power_state(struct pci_dev *dev, int state){ return 0; }int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer){ return 0; }int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer){ return 0; }int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable){ return 0; }void _kc_pci_disable_device(struct pci_dev *pdev){}#endif /* 2.4.6 => 2.4.3 */#ifndef IRQ_HANDLED#define irqreturn_t void#define IRQ_HANDLED#define IRQ_NONE#endif#ifndef SET_NETDEV_DEV#define SET_NETDEV_DEV(net, pdev)#endif#ifndef likely#define likely(x) (x)#define unlikely(x) (x)#endif#ifndef HAVE_FREE_NETDEV#define free_netdev(x) kfree(x)#endif#ifdef HAVE_POLL_CONTROLLER#define CONFIG_NET_POLL_CONTROLLER#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )#define pci_name(x) ((x)->slot_name)#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )#ifdef CONFIG_E100_NAPI#ifndef netif_poll_disable#define netif_poll_disable(x) _kc_netif_poll_disable(x)static inline void _kc_netif_poll_disable(struct net_device *dev){ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) { /* No hurry. */ current->state = TASK_INTERRUPTIBLE; schedule_timeout(1); }}#endif#ifndef netif_poll_enable#define netif_poll_enable(x) _kc_netif_poll_enable(x)static inline void _kc_netif_poll_enable(struct net_device *dev){ clear_bit(__LINK_STATE_RX_SCHED, &dev->state);}#endif#endif#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )#include <linux/tqueue.h>#define work_struct tq_struct#define INIT_WORK INIT_TQUEUE#define schedule_work schedule_task#define MODULE_INFO(version, _version)#endif#ifndef module_param#define module_param(v,t,p) MODULE_PARM(v, "i");#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )#define MODULE_VERSION(_version) MODULE_INFO(version, _version)#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )#define pci_dma_sync_single_for_cpu pci_dma_sync_single#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu#define pci_dma_mapping_error(X) 0#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )#undef if_mii#define if_mii _kc_if_miistatic inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq){ return (struct mii_ioctl_data *) &rq->ifr_ifru;}#endif /* < 2.6.7 */#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ schedule_timeout((x * HZ)/1000 + 2); \ } while(0)#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) )#define msleep_interruptible(x) do {set_current_state(TASK_INTERRUPTIBLE); \ schedule_timeout((x * HZ)/1000); \ } while(0)#define __iomem#endif#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) && \ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef pci_save_state#undef pci_save_state#endif#define pci_save_state(X) { \ int i; \ if (nic->pm_state) { \ for (i = 0; i < 16; i++) { \ pci_read_config_dword((X), \ i * 4, \ &nic->pm_state[i]); \ } \ } \}#ifdef pci_restore_state#undef pci_restore_state#endif#define pci_restore_state(X) { \ int i; \ if (nic->pm_state) { \ for (i = 0; i < 16; i++) { \ pci_write_config_dword((X), \ i * 4, \ nic->pm_state[i]); \ } \ } else { \ for (i = 0; i < 6; i++) { \ pci_write_config_dword((X), \ PCI_BASE_ADDRESS_0 + (i * 4), \ (X)->resource[i].start); \ } \ pci_write_config_byte((X), PCI_INTERRUPT_LINE, (X)->irq); \ } \}#endif /* 2.4.6 <= x < 2.6.10 */#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))#define netdev_priv(x) x->priv#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )#ifdef module_param_array_named#undef module_param_array_named#define module_param_array_named(name, array, type, nump, perm) \ static struct kparam_array __param_arr_##name \ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ sizeof(array[0]), array }; \ module_param_call(name, param_array_set, param_array_get, \ &__param_arr_##name, perm)#endif /* module_param_array_named */#endif /* < 2.6.10 */#ifndef NET_IP_ALIGN#define NET_IP_ALIGN 2#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )#define PCI_D0 0#define PCI_D1 1#define PCI_D2 2#define PCI_D3hot 3#define PCI_D3cold 4#define pci_choose_state(pdev,state) state#endif#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )#define pm_message_t u32#endif#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )#define E100_USE_SHUTDOWN_HANDLER#else#define E100_USE_REBOOT_NOTIFIER#endif#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17) )#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16) )#define IRQF_PROBE_SHARED 0#else#define IRQF_PROBE_SHARED SA_PROBEIRQ#endif /* <= 2.6.16 */#define IRQF_SHARED SA_SHIRQ#ifndef netdev_alloc_skb#define netdev_alloc_skb _kc_netdev_alloc_skbstatic inline struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev, unsigned int length){ /* 16 == NET_PAD_SKB */ struct sk_buff *skb; skb = alloc_skb(length + 16, GFP_ATOMIC); if (likely(skb != NULL)) { skb_reserve(skb, 16); skb->dev = dev; } return skb;}#endif#endif /* <= 2.6.17 */#endif /* _KCOMPAT_H_ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -