📄 hwmtm.c
字号:
queue = smc->os.hwm.tx_p ; NDD_TRACE("THfB",virt,len,frame_status) ; /* Bug fix: AF / May 31 1999 (#missing) * snmpinfo problem reported by IBM is caused by invalid * t-pointer (txd) if LAN_TX is not set but LOC_TX only. * Set: t = queue->tx_curr_put here ! */ t = queue->tx_curr_put ; DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ; if (frame_status & LAN_TX) { /* '*t' is already defined */ DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; t->txd_virt = virt ; t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ; t->txd_tbadr = AIX_REVERSE(phys) ; tbctrl = AIX_REVERSE((((u_long)frame_status & (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | BMU_OWN|BMU_CHECK |len) ; t->txd_tbctrl = tbctrl ;#ifndef AIX DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; outpd(queue->tx_bmu_ctl,CSR_START) ;#else /* ifndef AIX */ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; if (frame_status & QUEUE_A0) { outpd(ADDR(B0_XA_CSR),CSR_START) ; } else { outpd(ADDR(B0_XS_CSR),CSR_START) ; }#endif queue->tx_free-- ; queue->tx_used++ ; queue->tx_curr_put = t->txd_next ; if (frame_status & LAST_FRAG) { smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; } } if (frame_status & LOC_TX) { DB_TX("LOC_TX: ",0,0,3) ; if (frame_status & FIRST_FRAG) { if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; DB_TX("No SMbuf; transmit terminated",0,0,4) ; } else { smc->os.hwm.tx_data = smtod(smc->os.hwm.tx_mb,char *) - 1 ;#ifdef USE_OS_CPY#ifdef PASS_1ST_TXD_2_TX_COMP hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, smc->os.hwm.tx_len) ;#endif#endif } } if (smc->os.hwm.tx_mb) {#ifndef USE_OS_CPY DB_TX("copy fragment into MBuf ",0,0,3) ; memcpy(smc->os.hwm.tx_data,virt,len) ; smc->os.hwm.tx_data += len ;#endif if (frame_status & LAST_FRAG) {#ifdef USE_OS_CPY#ifndef PASS_1ST_TXD_2_TX_COMP /* * hwm_cpy_txd2mb(txd,data,len) copies 'len' * bytes from the virtual pointer in 'rxd' * to 'data'. The virtual pointer of the * os-specific tx-buffer should be written * in the LAST txd. */ hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, smc->os.hwm.tx_len) ;#endif /* nPASS_1ST_TXD_2_TX_COMP */#endif /* USE_OS_CPY */ smc->os.hwm.tx_data = smtod(smc->os.hwm.tx_mb,char *) - 1 ; *(char *)smc->os.hwm.tx_mb->sm_data = *smc->os.hwm.tx_data ; smc->os.hwm.tx_data++ ; smc->os.hwm.tx_mb->sm_len = smc->os.hwm.tx_len - 1 ; DB_TX("pass LLC frame to SMT ",0,0,3) ; smt_received_pack(smc,smc->os.hwm.tx_mb, RD_FS_LOCAL) ; } } } NDD_TRACE("THfE",t,queue->tx_free,0) ;}/* * queues a receive for later send */static void queue_llc_rx(struct s_smc *smc, SMbuf *mb){ DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ; smc->os.hwm.queued_rx_frames++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.llc_rx_pipe == 0) { smc->os.hwm.llc_rx_pipe = mb ; } else { smc->os.hwm.llc_rx_tail->sm_next = mb ; } smc->os.hwm.llc_rx_tail = mb ; /* * force an timer IRQ to receive the data */ if (!smc->os.hwm.isr_flag) { smt_force_irq(smc) ; }}/* * get a SMbuf from the llc_rx_queue */static SMbuf *get_llc_rx(struct s_smc *smc){ SMbuf *mb ; if ((mb = smc->os.hwm.llc_rx_pipe)) { smc->os.hwm.queued_rx_frames-- ; smc->os.hwm.llc_rx_pipe = mb->sm_next ; } DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ; return(mb) ;}/* * queues a transmit SMT MBuf during the time were the MBuf is * queued the TxD ring */static void queue_txd_mb(struct s_smc *smc, SMbuf *mb){ DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ; smc->os.hwm.queued_txd_mb++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.txd_tx_pipe == 0) { smc->os.hwm.txd_tx_pipe = mb ; } else { smc->os.hwm.txd_tx_tail->sm_next = mb ; } smc->os.hwm.txd_tx_tail = mb ;}/* * get a SMbuf from the txd_tx_queue */static SMbuf *get_txd_mb(struct s_smc *smc){ SMbuf *mb ; if ((mb = smc->os.hwm.txd_tx_pipe)) { smc->os.hwm.queued_txd_mb-- ; smc->os.hwm.txd_tx_pipe = mb->sm_next ; } DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ; return(mb) ;}/* * SMT Send function */void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc){ char far *data ; int len ; int n ; int i ; int frag_count ; int frame_status ; SK_LOC_DECL(char far,*virt[3]) ; int frag_len[3] ; struct s_smt_tx_queue *queue ; struct s_smt_fp_txd volatile *t ; u_long phys ; u_int tbctrl ; NDD_TRACE("THSB",mb,fc,0) ; DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; mb->sm_off-- ; /* set to fc */ mb->sm_len++ ; /* + fc */ data = smtod(mb,char *) ; *data = fc ; if (fc == FC_SMT_LOC) *data = FC_SMT_INFO ; /* * determine the frag count and the virt addresses of the frags */ frag_count = 0 ; len = mb->sm_len ; while (len) { n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ; if (n >= len) { n = len ; } DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ; virt[frag_count] = data ; frag_len[frag_count] = n ; frag_count++ ; len -= n ; data += n ; } /* * determine the frame status */ queue = smc->hw.fp.tx[QUEUE_A0] ; if (fc == FC_BEACON || fc == FC_SMT_LOC) { frame_status = LOC_TX ; } else { frame_status = LAN_TX ; if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) || (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO))) frame_status |= LOC_TX ; } if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { frame_status &= ~LAN_TX; if (frame_status) { DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; } else { DB_TX("Ring is down: terminate transmission",0,0,2) ; smt_free_mbuf(smc,mb) ; return ; } } DB_TX("frame_status = 0x%x ",frame_status,0,5) ; if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) { mb->sm_use_count = 2 ; } if (frame_status & LAN_TX) { t = queue->tx_curr_put ; frame_status |= FIRST_FRAG ; for (i = 0; i < frag_count; i++) { DB_TX("init TxD = 0x%x",(void *)t,0,5) ; if (i == frag_count-1) { frame_status |= LAST_FRAG ; t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR | (((u_long)(mb->sm_len-1)&3) << 27)) ; } t->txd_virt = virt[i] ; phys = dma_master(smc, (void far *)virt[i], frag_len[i], DMA_RD|SMT_BUF) ; t->txd_tbadr = AIX_REVERSE(phys) ; tbctrl = AIX_REVERSE((((u_long) frame_status & (FIRST_FRAG|LAST_FRAG)) << 26) | BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; t->txd_tbctrl = tbctrl ;#ifndef AIX DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; outpd(queue->tx_bmu_ctl,CSR_START) ;#else DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; outpd(ADDR(B0_XA_CSR),CSR_START) ;#endif frame_status &= ~FIRST_FRAG ; queue->tx_curr_put = t = t->txd_next ; queue->tx_free-- ; queue->tx_used++ ; } smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; queue_txd_mb(smc,mb) ; } if (frame_status & LOC_TX) { DB_TX("pass Mbuf to LLC queue",0,0,5) ; queue_llc_rx(smc,mb) ; } /* * We need to unqueue the free SMT_MBUFs here, because it may * be that the SMT want's to send more than 1 frame for one down call */ mac_drv_clear_txd(smc) ; NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;}/* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd) * void mac_drv_clear_txd(smc) * * function DOWNCALL (hardware module, hwmtm.c) * mac_drv_clear_txd searches in both send queues for TxD's * which were finished by the adapter. It calls dma_complete * for each TxD. If the last fragment of an LLC frame is * reached, it calls mac_drv_tx_complete to release the * send buffer. * * return nothing * * END_MANUAL_ENTRY */static void mac_drv_clear_txd(struct s_smc *smc){ struct s_smt_tx_queue *queue ; struct s_smt_fp_txd volatile *t1 ; struct s_smt_fp_txd volatile *t2 = NULL ; SMbuf *mb ; u_long tbctrl ; int i ; int frag_count ; int n ; NDD_TRACE("THcB",0,0,0) ; for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; t1 = queue->tx_curr_get ; DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ; for ( ; ; ) { frag_count = 0 ; do { DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; tbctrl = CR_READ(t1->txd_tbctrl) ; tbctrl = AIX_REVERSE(tbctrl) ; if (tbctrl & BMU_OWN || !queue->tx_used){ DB_TX("End of TxDs queue %d",i,0,4) ; goto free_next_queue ; /* next queue */ } t1 = t1->txd_next ; frag_count++ ; } while (!(tbctrl & BMU_EOF)) ; t1 = queue->tx_curr_get ; for (n = frag_count; n; n--) { tbctrl = AIX_REVERSE(t1->txd_tbctrl) ; dma_complete(smc, (union s_fp_descr volatile *) t1, (int) (DMA_RD | ((tbctrl & BMU_SMT_TX) >> 18))) ; t2 = t1 ; t1 = t1->txd_next ; } if (tbctrl & BMU_SMT_TX) { mb = get_txd_mb(smc) ; smt_free_mbuf(smc,mb) ; } else {#ifndef PASS_1ST_TXD_2_TX_COMP DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ; mac_drv_tx_complete(smc,t2) ;#else DB_TX("mac_drv_tx_comp for TxD 0x%x", queue->tx_curr_get,0,4) ; mac_drv_tx_complete(smc,queue->tx_curr_get) ;#endif } queue->tx_curr_get = t1 ; queue->tx_free += frag_count ; queue->tx_used -= frag_count ; }free_next_queue: ; } NDD_TRACE("THcE",0,0,0) ;}/* * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue) * * void mac_drv_clear_tx_queue(smc) * struct s_smc *smc ; * * function DOWNCALL (hardware module, hwmtm.c) * mac_drv_clear_tx_queue is called from the SMT when * the RMT state machine has entered the ISOLATE state. * This function is also called by the os-specific module * after it has called the function card_stop(). * In this case, the frames in the send queues are obsolete and * should be removed. * * note calling sequence: * CLI_FBI(), card_stop(), * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), * * NOTE: The caller is responsible that the BMUs are idle * when this function is called. * * END_MANUAL_ENTRY */void mac_drv_clear_tx_queue(struct s_smc *smc){ struct s_smt_fp_txd volatile *t ; struct s_smt_tx_queue *queue ; int tx_used ; int i ; if (smc->hw.hw_state != STOPPED) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ; return ; } for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ; /* * switch the OWN bit of all pending frames to the host */ t = queue->tx_curr_get ; tx_used = queue->tx_used ; while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; tx_used-- ; } } /* * release all TxD's for both send queues */ mac_drv_clear_txd(smc) ; for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; t = queue->tx_curr_get ; /* * write the phys pointer of the NEXT descriptor into the * BMU's current address descriptor pointer and set * tx_curr_get and tx_curr_put to this position */ if (i == QUEUE_S) { outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ; } else { outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ; } queue->tx_curr_put = queue->tx_curr_get->txd_next ; queue->tx_curr_get = queue->tx_curr_put ; }}/* ------------------------------------------------------------- TEST FUNCTIONS: -------------------------------------------------------------*/#ifdef DEBUG/* * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev) * void mac_drv_debug_lev(smc,flag,lev) * * function DOWNCALL (drvsr.c) * To get a special debug info the user can assign a debug level * to any debug flag. * * para flag debug flag, possible values are: * = 0: reset all debug flags (the defined level is * ignored) * = 1: debug.d_smtf * = 2: debug.d_smt * = 3: debug.d_ecm * = 4: debug.d_rmt * = 5: debug.d_cfm * = 6: debug.d_pcm * * = 10: debug.d_os.hwm_rx (hardware module receive path) * = 11: debug.d_os.hwm_tx(hardware module transmit path) * = 12: debug.d_os.hwm_gen(hardware module general flag) * * lev debug level * * END_MANUAL_ENTRY */void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev){ switch(flag) { case (int)NULL: DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ; DB_P.d_cfm = 0 ; DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;#ifdef SBA DB_P.d_sba = 0 ;#endif#ifdef ESS DB_P.d_ess = 0 ;#endif break ; case DEBUG_SMTF: DB_P.d_smtf = lev ; break ; case DEBUG_SMT: DB_P.d_smt = lev ; break ; case DEBUG_ECM: DB_P.d_ecm = lev ; break ; case DEBUG_RMT: DB_P.d_rmt = lev ; break ; case DEBUG_CFM: DB_P.d_cfm = lev ; break ; case DEBUG_PCM: DB_P.d_pcm = lev ; break ; case DEBUG_SBA:#ifdef SBA DB_P.d_sba = lev ;#endif break ; case DEBUG_ESS:#ifdef ESS DB_P.d_ess = lev ;#endif break ; case DB_HWM_RX: DB_P.d_os.hwm_rx = lev ; break ; case DB_HWM_TX: DB_P.d_os.hwm_tx = lev ; break ; case DB_HWM_GEN: DB_P.d_os.hwm_gen = lev ; break ; default: break ; }}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -