📄 rtl_e100.c
字号:
list_del(&(rx_struct->list_elem)); pci_unmap_single(private_data.pdev, rx_struct->dma_addr, sizeof (rfd_t), PCI_DMA_TODEVICE); /*add the header, or it would crashe the system*/ rx_struct->skb -= private_data.rfd_size; rtl_free(rx_struct->skb); kfree(rx_struct); } while (!list_empty(&(private_data.rx_struct_pool))) { rx_struct = list_entry(private_data.rx_struct_pool.next, struct rx_list_elem, list_elem); list_del(&(rx_struct->list_elem)); kfree(rx_struct); }}/** * e100_alloc_rfd_pool - allocate RFDs * @private_data: atapter's private data struct * * Allocates initial pool of skb which holds both rfd and data, * and return a pointer to the head of the list */static inte100_alloc_rfd_pool(){ struct rx_list_elem *rx_struct; int i; INIT_LIST_HEAD(&(private_data.active_rx_list)); INIT_LIST_HEAD(&(private_data.rx_struct_pool)); private_data.skb_req = private_data.params.RxDescriptors; for (i = 0; i < private_data.skb_req; i++) { rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC); list_add(&(rx_struct->list_elem), &(private_data.rx_struct_pool)); } e100_alloc_skbs(); return !list_empty(&(private_data.active_rx_list));}voide100_clear_pools(){ private_data.last_tcb = NULL; e100_free_rfd_pool(); e100_free_tcb_pool();}/** * e100intr - interrupt handler * @irq: the IRQ number * @dev_inst: the net_device struct * @regs: registers (unused) * * This routine is the ISR for the e100 board. It services * the RX & TX queues & starts the RU if it has stopped due * to no resources. */unsigned inte100intr(unsigned int irq, struct pt_regs *regs){ u16 intr_status; cli(); inside_the_interrupt_handler = 1; rt_e100_interrupted++; if(rt_e100_writting & rt_e100_interrupted) rtl_printf("I've been interrupted %d times\n",rt_e100_interrupted); intr_status = readw(&private_data.scb->scb_status); /* If not my interrupt, just return */ if (!(intr_status & SCB_STATUS_ACK_MASK) || (intr_status == 0xffff)) { return 1; } /* disable and ack intr */ e100_disable_clear_intr(); /* do recv work if any */ if (intr_status & (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR )) private_data.drv_stats.rx_intr_pkts += e100_rx_srv(); /* clean up after tx'ed packets */ if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX)) e100_tx_srv(); e100_set_intr_mask(); /* We must be sure that we're out of the interrupt handler before cleanup_modules */ /* is executed. If cleanup_modules is being executed, we don't have to enable the */ /* irq. If enabled, then the system could crash. */ if(!trying_to_close) rtl_hard_enable_irq(private_data.pdev->irq); rt_e100_interrupted--; sti(); return (inside_the_interrupt_handler = 0);}/** * e100_tx_skb_free - free TX skbs resources * @private_data: atapter's private data struct * @tcb: associated tcb of the freed skb * * This routine frees resources of TX skbs. */static inline voide100_tx_skb_free(tcb_t *tcb){ if (tcb->tcb_skb) { tbd_t *tbd_arr = tcb->tbd_ptr; pci_unmap_single(private_data.pdev, le32_to_cpu(tbd_arr->tbd_buf_addr), le16_to_cpu(tbd_arr->tbd_buf_cnt), PCI_DMA_TODEVICE); rtl_free(tcb->tcb_skb); tcb->tcb_skb = NULL; }}/** * e100_tx_srv - service TX queues * @private_data: atapter's private data struct * * This routine services the TX queues. It reclaims the TCB's & TBD's & other * resources used during the transmit of this buffer. It is called from the ISR. * We don't need a tx_lock since we always access buffers which were already * prepared. */voide100_tx_srv(){ tcb_t *tcb; int i; /*non-tx initiate CNA intr before e100_setup_tcb_pool() called*/ if (private_data.last_tcb !=NULL) /* go over at most TxDescriptors buffers */ for (i = 0; i < private_data.params.TxDescriptors; i++) { tcb = private_data.tcb_pool.data; tcb += private_data.tcb_pool.head; rmb(); /* if the buffer at 'head' is not complete, break */ if (!(tcb->tcb_hdr.cb_status & __constant_cpu_to_le16(CB_STATUS_COMPLETE))) { break; } /* service next buffer, clear the out of resource condition */ tcb->tcb_hdr.cb_status = 0; /* if we've caught up with 'tail', break */ if (private_data.tcb_pool.head == private_data.tcb_pool.tail) { break; } private_data.tcb_pool.head = NEXT_TCB_TOUSE(private_data.tcb_pool.head); mb(); }}/** * e100_rx_srv - service RX queue * @private_data: atapter's private data struct * @max_number_of_rfds: max number of RFDs to process * @rx_congestion: flag pointer, to inform the calling function of congestion. */u32e100_rx_srv(){ rfd_t *rfdhead, *rfdprev; /* new rfd, received rfd */ int i; u16 rfd_statushead; unsigned char *skbhead, *skbprev; unsigned int data_sz; struct rx_list_elem *rx_structhead, *rx_structprev; u32 rfd_cnt = 0; for (i = 0; i < private_data.params.RxDescriptors; i++) { rx_structhead =private_data.rvhead; skbhead = rx_structhead->skb; rfdhead = RFD_POINTER(skbhead, private_data); /* locate RFD within skb */ // sync only the RFD header pci_dma_sync_single(private_data.pdev, rx_structhead->dma_addr, private_data.rfd_size, PCI_DMA_FROMDEVICE); rfd_statushead = le16_to_cpu(rfdhead->rfd_header.cb_status); /* get RFD's status */ if (!(rfd_statushead & RFD_STATUS_COMPLETE)) /* does not contains data yet - exit */ { /*clear the EL bit to the prev RFD*/ rx_structprev =private_data.rvprev; skbprev = rx_structprev->skb; rfdprev = RFD_POINTER(skbprev, private_data); rfdprev->rfd_header.cb_status = 0; rfdprev->rfd_header.cb_cmd &= __constant_cpu_to_le16((u16) ~RFD_EL_BIT); rfdprev->rfd_act_cnt = 0; rfdprev->rfd_sz = __constant_cpu_to_le16(RFD_DATA_SIZE); mb(); private_data.rvprev = PREV_RV_USED(private_data.rvhead); /*set the EL bit to the last RFD */ rx_structprev =private_data.rvprev; skbprev = rx_structprev->skb; rfdprev = RFD_POINTER(skbprev, private_data); rfdprev->rfd_header.cb_status = 0; rfdprev->rfd_header.cb_cmd = __constant_cpu_to_le16(RFD_EL_BIT); rfdprev->rfd_act_cnt = 0; mb(); break; } data_sz = min_t(u16, (le16_to_cpu(rfdhead->rfd_act_cnt) & 0x3fff), (sizeof (rfd_t) - private_data.rfd_size)); /* now sync all the data */ pci_dma_sync_single(private_data.pdev, rx_structhead->dma_addr, (data_sz + private_data.rfd_size), PCI_DMA_FROMDEVICE); /* set packet size, excluding checksum (2 last bytes) if it is present */ if ((private_data.flags & DF_CSUM_OFFLOAD) && (private_data.rev_id < D102_REV_ID)) data_sz -= 2; /** similar to netif_rx()**/ filter_incoming_packet(skbhead,data_sz); mb(); /*claim the dirty RFD*/ rfdhead->rfd_header.cb_status = 0; rfdhead->rfd_header.cb_cmd &= __constant_cpu_to_le16((u16) ~RFD_EL_BIT); rfdhead->rfd_act_cnt = 0; rfdhead->rfd_sz = __constant_cpu_to_le16(RFD_DATA_SIZE); private_data.rvhead = NEXT_RV_TOUSE(private_data.rvhead); mb(); rfd_cnt++; } /* end of rfd loop */ /* restart the RU if it has stopped */ if ((readw(&private_data.scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) { rtl_printf(KERN_ERR"***Now e100_start_ru()\n"); e100_start_ru(); } return rfd_cnt;}/** * e100_prepare_xmit_buff - prepare a buffer for transmission * @private_data: atapter's private data struct * @skb: skb to send * */static inline tcb_t *e100_prepare_xmit_buff(const char *buffer, size_t size){ unsigned char *buff; tcb_t *tcb=NULL, *prev_tcb; tcb = private_data.tcb_pool.data; tcb += TCB_TO_USE(private_data.tcb_pool); if (private_data.flags & USE_IPCB) { tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT; tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCP_PACKET; tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCPUDP_CHECKSUM_ENABLE; } tcb->tcb_hdr.cb_status = 0; tcb->tcb_thrshld = private_data.tx_thld; tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_S_BIT); wmb(); /* Set I (Interrupt) bit on every (TX_FRAME_CNT)th packet */ if (!(++private_data.tx_count % TX_FRAME_CNT)) tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_I_BIT); else /* Clear I bit on other packets */ tcb->tcb_hdr.cb_cmd &= ~__constant_cpu_to_le16(CB_I_BIT); wmb(); buff=tcb->tcb_skb; if(buff) memcpy(buff, buffer, size); (tcb->tbd_ptr)->tbd_buf_cnt = cpu_to_le16(size); tcb->tcb_tbd_num = 1; tcb->tcb_tbd_ptr = tcb->tcb_tbd_dflt_ptr; wmb(); /* clear the S-BIT on the previous tcb */ prev_tcb = private_data.tcb_pool.data; prev_tcb += PREV_TCB_USED(private_data.tcb_pool); prev_tcb->tcb_hdr.cb_cmd &= __constant_cpu_to_le16((u16) ~CB_S_BIT); if (private_data.tcb_pool.head == NEXT_TCB_TOUSE(private_data.tcb_pool.tail)) { /*no memory for TX*/ rtl_printf(KERN_ERR "\n*** no memory for TX***\n"); return NULL; } private_data.tcb_pool.tail = NEXT_TCB_TOUSE(private_data.tcb_pool.tail); wmb(); e100_start_cu( tcb); return tcb;}/* Changed for 82558 enhancement *//** * e100_start_cu - start the adapter's CU * @private_data: atapter's private data struct * @tcb: TCB to be transmitted * * This routine issues a CU Start or CU Resume command to the 82558/9. * This routine was added because the prepare_ext_xmit_buff takes advantage * of the 82558/9's Dynamic TBD chaining feature and has to start the CU as * soon as the first TBD is ready. * * e100_start_cu must be called while holding the tx_lock ! */u8e100_start_cu(tcb_t *tcb){ //unsigned long lock_flag; u8 ret = true; switch (private_data.next_cu_cmd) { case RESUME_NO_WAIT: /*last cu command was a CU_RESMUE if this is a 558 or newer we don't need to * wait for command word to clear, we reach here only if we are bachlor */ e100_exec_cmd( SCB_CUC_RESUME); private_data.next_cu_cmd = RESUME_WAIT; break; case RESUME_WAIT: if ((private_data.flags & IS_ICH) && (private_data.cur_line_speed == 10) && (private_data.cur_dplx_mode == HALF_DUPLEX)) { e100_wait_exec_simple( SCB_CUC_NOOP); udelay(1); } if ((e100_wait_exec_simple( SCB_CUC_RESUME)) && (private_data.flags & IS_BACHELOR) && (!(private_data.flags & IS_ICH))) { private_data.next_cu_cmd = RESUME_NO_WAIT; } break; case START_WAIT: // The last command was a non_tx CU command if (!e100_wait_cus_idle()) rtl_printf(KERN_DEBUG "e100: cu_start: timeout waiting for cu\n"); if (!e100_wait_exec_cmplx( (u32) (tcb->tcb_phys), SCB_CUC_START, CB_TRANSMIT)) { rtl_printf(KERN_DEBUG "e100: cu_start: timeout waiting for scb\n"); e100_exec_cmplx( (u32) (tcb->tcb_phys), SCB_CUC_START); ret = false; } //rtl_printf(KERN_ERR "***In e100_start_cu(), START_WAIT mode \n"); private_data.next_cu_cmd = RESUME_WAIT; break; } /* save the last tcb */ private_data.last_tcb = tcb; return ret;}/* ====================================================================== *//* hw *//* ====================================================================== *//** * e100_selftest - perform H/W self test * @private_data: atapter's private data struct * @st_timeout: address to return timeout value, if fails * @st_result: address to return selftest result, if fails * * This routine will issue PORT Self-test command to test the e100. * The self-test will fail if the adapter's master-enable bit is not * set in the PCI Command Register, or if the adapter is not seated * in a PCI master-enabled slot. we also disable interrupts when the * command is completed. * * Returns: * true: if adapter passes self_test * false: otherwise */unsigned chare100_selftest(u32 *st_timeout, u32 *st_result){ u32 selftest_cmd; /* initialize the nic state before running test */ e100_sw_reset( PORT_SOFTWARE_RESET); /* Setup the address of the self_test area */ selftest_cmd = private_data.selftest_phys; /* Setup SELF TEST Command Code in D3 - D0 */ selftest_cmd |= PORT_SELFTEST; /* Initialize the self-test signature and results DWORDS */ private_data.selftest->st_sign = 0; private_data.selftest->st_result = 0xffffffff; /* Do the port command */ writel(selftest_cmd, &private_data.scb->scb_port); readw(&(private_data.scb->scb_status)); /* flushes last write, read-safe */ /* Wait at least 10 milliseconds for the self-test to complete */ mdelay(11); /* disable interrupts since they are enabled */ /* after device reset during selftest */ e100_disable_clear_intr(); /* if The First Self Test DWORD Still Zero, We've timed out. If the * second DWORD is not zero then we have an error. */ if ((private_data.selftest->st_sign == 0) || (private_data.selftest->st_result != 0)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -