📄 s2io.c
字号:
MODULE_LICENSE("GPL");MODULE_VERSION(DRV_VERSION);/* Module Loadable parameters. */S2IO_PARM_INT(tx_fifo_num, 1);S2IO_PARM_INT(rx_ring_num, 1);S2IO_PARM_INT(rx_ring_mode, 1);S2IO_PARM_INT(use_continuous_tx_intrs, 1);S2IO_PARM_INT(rmac_pause_time, 0x100);S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);S2IO_PARM_INT(shared_splits, 0);S2IO_PARM_INT(tmac_util_period, 5);S2IO_PARM_INT(rmac_util_period, 5);S2IO_PARM_INT(l3l4hdr_size, 128);/* Frequency of Rx desc syncs expressed as power of 2 */S2IO_PARM_INT(rxsync_frequency, 3);/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */S2IO_PARM_INT(intr_type, 2);/* Large receive offload feature */static unsigned int lro_enable;module_param_named(lro, lro_enable, uint, 0);/* Max pkts to be aggregated by LRO at one time. If not specified, * aggregation happens until we hit max IP pkt size(64K) */S2IO_PARM_INT(lro_max_pkts, 0xFFFF);S2IO_PARM_INT(indicate_max_pkts, 0);S2IO_PARM_INT(napi, 1);S2IO_PARM_INT(ufo, 0);S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);static unsigned int tx_fifo_len[MAX_TX_FIFOS] = {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};static unsigned int rx_ring_sz[MAX_RX_RINGS] = {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};static unsigned int rts_frm_len[MAX_RX_RINGS] = {[0 ...(MAX_RX_RINGS - 1)] = 0 };module_param_array(tx_fifo_len, uint, NULL, 0);module_param_array(rx_ring_sz, uint, NULL, 0);module_param_array(rts_frm_len, uint, NULL, 0);/* * S2IO device table. * This table lists all the devices that this driver supports. */static struct pci_device_id s2io_tbl[] __devinitdata = { {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, PCI_ANY_ID, PCI_ANY_ID}, {0,}};MODULE_DEVICE_TABLE(pci, s2io_tbl);static struct pci_error_handlers s2io_err_handler = { .error_detected = s2io_io_error_detected, .slot_reset = s2io_io_slot_reset, .resume = s2io_io_resume,};static struct pci_driver s2io_driver = { .name = "S2IO", .id_table = s2io_tbl, .probe = s2io_init_nic, .remove = __devexit_p(s2io_rem_nic), .err_handler = &s2io_err_handler,};/* A simplifier macro used both by init and free shared_mem Fns(). */#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)/** * init_shared_mem - Allocation and Initialization of Memory * @nic: Device private variable. * Description: The function allocates all the memory areas shared * between the NIC and the driver. This includes Tx descriptors, * Rx descriptors and the statistics block. */static int init_shared_mem(struct s2io_nic *nic){ u32 size; void *tmp_v_addr, *tmp_v_addr_next; dma_addr_t tmp_p_addr, tmp_p_addr_next; struct RxD_block *pre_rxd_blk = NULL; int i, j, blk_cnt; int lst_size, lst_per_page; struct net_device *dev = nic->dev; unsigned long tmp; struct buffAdd *ba; struct mac_info *mac_control; struct config_param *config; unsigned long long mem_allocated = 0; mac_control = &nic->mac_control; config = &nic->config; /* Allocation and initialization of TXDLs in FIOFs */ size = 0; for (i = 0; i < config->tx_fifo_num; i++) { size += config->tx_cfg[i].fifo_len; } if (size > MAX_AVAILABLE_TXDS) { DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); return -EINVAL; } lst_size = (sizeof(struct TxD) * config->max_txds); lst_per_page = PAGE_SIZE / lst_size; for (i = 0; i < config->tx_fifo_num; i++) { int fifo_len = config->tx_cfg[i].fifo_len; int list_holder_size = fifo_len * sizeof(struct list_info_hold); mac_control->fifos[i].list_info = kzalloc(list_holder_size, GFP_KERNEL); if (!mac_control->fifos[i].list_info) { DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n"); return -ENOMEM; } mem_allocated += list_holder_size; } for (i = 0; i < config->tx_fifo_num; i++) { int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, lst_per_page); mac_control->fifos[i].tx_curr_put_info.offset = 0; mac_control->fifos[i].tx_curr_put_info.fifo_len = config->tx_cfg[i].fifo_len - 1; mac_control->fifos[i].tx_curr_get_info.offset = 0; mac_control->fifos[i].tx_curr_get_info.fifo_len = config->tx_cfg[i].fifo_len - 1; mac_control->fifos[i].fifo_no = i; mac_control->fifos[i].nic = nic; mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; for (j = 0; j < page_num; j++) { int k = 0; dma_addr_t tmp_p; void *tmp_v; tmp_v = pci_alloc_consistent(nic->pdev, PAGE_SIZE, &tmp_p); if (!tmp_v) { DBG_PRINT(INFO_DBG, "pci_alloc_consistent "); DBG_PRINT(INFO_DBG, "failed for TxDL\n"); return -ENOMEM; } /* If we got a zero DMA address(can happen on * certain platforms like PPC), reallocate. * Store virtual address of page we don't want, * to be freed later. */ if (!tmp_p) { mac_control->zerodma_virt_addr = tmp_v; DBG_PRINT(INIT_DBG, "%s: Zero DMA address for TxDL. ", dev->name); DBG_PRINT(INIT_DBG, "Virtual address %p\n", tmp_v); tmp_v = pci_alloc_consistent(nic->pdev, PAGE_SIZE, &tmp_p); if (!tmp_v) { DBG_PRINT(INFO_DBG, "pci_alloc_consistent "); DBG_PRINT(INFO_DBG, "failed for TxDL\n"); return -ENOMEM; } mem_allocated += PAGE_SIZE; } while (k < lst_per_page) { int l = (j * lst_per_page) + k; if (l == config->tx_cfg[i].fifo_len) break; mac_control->fifos[i].list_info[l].list_virt_addr = tmp_v + (k * lst_size); mac_control->fifos[i].list_info[l].list_phy_addr = tmp_p + (k * lst_size); k++; } } } nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL); if (!nic->ufo_in_band_v) return -ENOMEM; mem_allocated += (size * sizeof(u64)); /* Allocation and initialization of RXDs in Rings */ size = 0; for (i = 0; i < config->rx_ring_num; i++) { if (config->rx_cfg[i].num_rxd % (rxd_count[nic->rxd_mode] + 1)) { DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i); DBG_PRINT(ERR_DBG, "RxDs per Block"); return FAILURE; } size += config->rx_cfg[i].num_rxd; mac_control->rings[i].block_count = config->rx_cfg[i].num_rxd / (rxd_count[nic->rxd_mode] + 1 ); mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count; } if (nic->rxd_mode == RXD_MODE_1) size = (size * (sizeof(struct RxD1))); else size = (size * (sizeof(struct RxD3))); for (i = 0; i < config->rx_ring_num; i++) { mac_control->rings[i].rx_curr_get_info.block_index = 0; mac_control->rings[i].rx_curr_get_info.offset = 0; mac_control->rings[i].rx_curr_get_info.ring_len = config->rx_cfg[i].num_rxd - 1; mac_control->rings[i].rx_curr_put_info.block_index = 0; mac_control->rings[i].rx_curr_put_info.offset = 0; mac_control->rings[i].rx_curr_put_info.ring_len = config->rx_cfg[i].num_rxd - 1; mac_control->rings[i].nic = nic; mac_control->rings[i].ring_no = i; blk_cnt = config->rx_cfg[i].num_rxd / (rxd_count[nic->rxd_mode] + 1); /* Allocating all the Rx blocks */ for (j = 0; j < blk_cnt; j++) { struct rx_block_info *rx_blocks; int l; rx_blocks = &mac_control->rings[i].rx_blocks[j]; size = SIZE_OF_BLOCK; //size is always page size tmp_v_addr = pci_alloc_consistent(nic->pdev, size, &tmp_p_addr); if (tmp_v_addr == NULL) { /* * In case of failure, free_shared_mem() * is called, which should free any * memory that was alloced till the * failure happened. */ rx_blocks->block_virt_addr = tmp_v_addr; return -ENOMEM; } mem_allocated += size; memset(tmp_v_addr, 0, size); rx_blocks->block_virt_addr = tmp_v_addr; rx_blocks->block_dma_addr = tmp_p_addr; rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)* rxd_count[nic->rxd_mode], GFP_KERNEL); if (!rx_blocks->rxds) return -ENOMEM; mem_allocated += (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); for (l=0; l<rxd_count[nic->rxd_mode];l++) { rx_blocks->rxds[l].virt_addr = rx_blocks->block_virt_addr + (rxd_size[nic->rxd_mode] * l); rx_blocks->rxds[l].dma_addr = rx_blocks->block_dma_addr + (rxd_size[nic->rxd_mode] * l); } } /* Interlinking all Rx Blocks */ for (j = 0; j < blk_cnt; j++) { tmp_v_addr = mac_control->rings[i].rx_blocks[j].block_virt_addr; tmp_v_addr_next = mac_control->rings[i].rx_blocks[(j + 1) % blk_cnt].block_virt_addr; tmp_p_addr = mac_control->rings[i].rx_blocks[j].block_dma_addr; tmp_p_addr_next = mac_control->rings[i].rx_blocks[(j + 1) % blk_cnt].block_dma_addr; pre_rxd_blk = (struct RxD_block *) tmp_v_addr; pre_rxd_blk->reserved_2_pNext_RxD_block = (unsigned long) tmp_v_addr_next; pre_rxd_blk->pNext_RxD_Blk_physical = (u64) tmp_p_addr_next; } } if (nic->rxd_mode == RXD_MODE_3B) { /* * Allocation of Storages for buffer addresses in 2BUFF mode * and the buffers as well. */ for (i = 0; i < config->rx_ring_num; i++) { blk_cnt = config->rx_cfg[i].num_rxd / (rxd_count[nic->rxd_mode]+ 1); mac_control->rings[i].ba = kmalloc((sizeof(struct buffAdd *) * blk_cnt), GFP_KERNEL); if (!mac_control->rings[i].ba) return -ENOMEM; mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt); for (j = 0; j < blk_cnt; j++) { int k = 0; mac_control->rings[i].ba[j] = kmalloc((sizeof(struct buffAdd) * (rxd_count[nic->rxd_mode] + 1)), GFP_KERNEL); if (!mac_control->rings[i].ba[j]) return -ENOMEM; mem_allocated += (sizeof(struct buffAdd) * \ (rxd_count[nic->rxd_mode] + 1)); while (k != rxd_count[nic->rxd_mode]) { ba = &mac_control->rings[i].ba[j][k]; ba->ba_0_org = (void *) kmalloc (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); if (!ba->ba_0_org) return -ENOMEM; mem_allocated += (BUF0_LEN + ALIGN_SIZE); tmp = (unsigned long)ba->ba_0_org; tmp += ALIGN_SIZE; tmp &= ~((unsigned long) ALIGN_SIZE); ba->ba_0 = (void *) tmp; ba->ba_1_org = (void *) kmalloc (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); if (!ba->ba_1_org) return -ENOMEM; mem_allocated += (BUF1_LEN + ALIGN_SIZE); tmp = (unsigned long) ba->ba_1_org; tmp += ALIGN_SIZE; tmp &= ~((unsigned long) ALIGN_SIZE); ba->ba_1 = (void *) tmp; k++; } } } } /* Allocation and initialization of Statistics block */ size = sizeof(struct stat_block); mac_control->stats_mem = pci_alloc_consistent (nic->pdev, size, &mac_control->stats_mem_phy); if (!mac_control->stats_mem) { /* * In case of failure, free_shared_mem() is called, which * should free any memory that was alloced till the * failure happened. */ return -ENOMEM; } mem_allocated += size; mac_control->stats_mem_sz = size; tmp_v_addr = mac_control->stats_mem; mac_control->stats_info = (struct stat_block *) tmp_v_addr; memset(tmp_v_addr, 0, size); DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, (unsigned long long) tmp_p_addr); mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; return SUCCESS;}/** * free_shared_mem - Free the allocated Memory * @nic: Device private variable. * Description: This function is to free all memory locations allocated by * the init_shared_mem() function and return it to the kernel. */static void free_shared_mem(struct s2io_nic *nic){ int i, j, blk_cnt, size; u32 ufo_size = 0; void *tmp_v_addr; dma_addr_t tmp_p_addr; struct mac_info *mac_control; struct config_param *config; int lst_size, lst_per_page; struct net_device *dev; int page_num = 0; if (!nic) return; dev = nic->dev; mac_control = &nic->mac_control; config = &nic->config; lst_size = (sizeof(struct TxD) * config->max_txds); lst_per_page = PAGE_SIZE / lst_size; for (i = 0; i < config->tx_fifo_num; i++) { ufo_size += config->tx_cfg[i].fifo_len; page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, lst_per_page); for (j = 0; j < page_num; j++) { int mem_blks = (j * lst_per_page); if (!mac_control->fifos[i].list_info) return; if (!mac_control->fifos[i].list_info[mem_blks]. list_virt_addr) break; pci_free_consistent(nic->pdev, PAGE_SIZE, mac_control->fifos[i]. list_info[mem_blks]. list_virt_addr, mac_control->fifos[i]. list_info[mem_blks]. list_phy_addr); nic->mac_control.stats_info->sw_stat.mem_freed += PAGE_SIZE; } /* If we got a zero DMA address during allocation, * free the page now */ if (mac_control->zerodma_virt_addr) { pci_free_consistent(nic->pdev, PAGE_SIZE, mac_control->zerodma_virt_addr, (dma_addr_t)0); DBG_PRINT(INIT_DBG, "%s: Freeing TxDL with zero DMA addr. ", dev->name); DBG_PRINT(INIT_DBG, "Virtual address %p\n", mac_control->zerodma_virt_addr); nic->mac_control.stats_info->sw_stat.mem_freed += PAGE_SIZE; } kfree(mac_control->fifos[i].list_info); nic->mac_control.stats_info->sw_stat.mem_freed += (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold)); } size = SIZE_OF_BLOCK;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -