📄 ins_eth_ocm.c
字号:
pk_free(pkt);
}
else{ //Transfer was successfully setup
info->next_tx_desc++;
if(info->next_tx_desc == ETH_OCM_TX_DESC_COUNT)
info->next_tx_desc = 0;
//See if all descriptor are in use
if(info->next_tx_desc == info->cur_tx_desc)
info->next_tx_desc_rdy = 0;
//Put the packet in the sending queue
putq(&info->sending, (qp)pkt);
}
}
}
#endif // ifndef ETH_OCM_SYNC_TX
#ifdef ETH_OCM_SYNC_TX
/**
* Raw send function to initiate a transfer to the mac
*
* @param net - NET structure associated with the Opencores MAC instance
* @param data - pointer to the data payload
*
* @return SUCCESS if success, else a negative value
*/
int eth_ocm_raw_send(NET net, char * data, unsigned data_bytes){
int result;
int i;
unsigned len;
eth_ocm_dev *dev;
eth_ocm_info *info;
eth_ocm_regs *regs;
alt_u8 *buf;
#ifdef UCOS_II
int cpu_sr;
#endif
OS_ENTER_CRITICAL(); //disable interrupts
dev = (eth_ocm_dev *)net->n_local;
info = dev->info;
regs = dev->regs;
len = data_bytes;
result = 0;
//Check to see if someone is nesting send calls (BAD!)
if(info->sem){
dprintf("[eth_ocm_raw_send] ERROR: Nested raw send call\n");
OS_EXIT_CRITICAL();
return ENP_RESOURCE;
}
//Grab the semaphore
info->sem = 1;
// clear bit-31 before passing it to SGDMA Driver
buf = (alt_u8 *)alt_remap_cached( (volatile void *)data, 4);
//advance the pointer beyond the header bias
buf = (alt_u8 *)((unsigned int)buf + ETHHDR_BIAS);
//Some error checks first
if(len < ETH_OCM_MIN_MTU)
result = -1; //packet too small
if(len > ETH_OCM_MAX_MTU)
result = -EFBIG; //packet too big
if(regs->txdescs[0].ctrl & ETH_OCM_TXDESC_READY_MSK)
result = -EBUSY; //DMA not available
if(!result){
//Write pointer to descriptor
regs->txdescs[0].ptr = (unsigned int)buf;
//Write length and setup transfer
regs->txdescs[0].ctrl =
((len << ETH_OCM_TXDESC_LEN_OFST) |
ETH_OCM_TXDESC_READY_MSK |
ETH_OCM_TXDESC_WRAP_MSK |
ETH_OCM_TXDESC_PAD_MSK |
ETH_OCM_TXDESC_CRC_MSK);
//Wait for transfer to complete
i=0;
do{
result = regs->txdescs[0].ctrl;
i++;
}while((result & ETH_OCM_TXDESC_READY_MSK) && i<ETH_OCM_TRANSMIT_TIMEOUT);
//Make sure no timeout occurred
if(i<ETH_OCM_TRANSMIT_TIMEOUT){
if(result &
(ETH_OCM_TXDESC_UR_MSK |
ETH_OCM_TXDESC_RL_MSK |
ETH_OCM_TXDESC_LC_MSK |
ETH_OCM_TXDESC_CS_MSK)){
#if (ETH_OCM_DBG_LVL >= 2)
dprintf("[eth_ocm_raw_send] Transmit error 0x%x\n", result);
#endif // if ETH_OCM_DBG_LVL
result = -EIO; //Some error occured
}
else{
#if (ETH_OCM_DBG_LVL >= 5)
if(result & ETH_OCM_TXDESC_RTRY_MSK)
dprintf("[eth_ocm_raw_send] Transmit retries: %d\n", (result & ETH_OCM_TXDESC_RTRY_MSK)>>ETH_OCM_TXDESC_RTRY_OFST);
#endif
result = 0;
}
}
else{ //Timeout
result = -ETIMEDOUT;
}
} //End of if(!result) Transmit branch
//Check final result
if(!result){ //Successfull transfer
net->n_mib->ifOutOctets += data_bytes; //Increment TX data counter
// we dont know whether it was unicast or not, we count both in <ifOutUcastPkts>
net->n_mib->ifOutUcastPkts++;
result = SUCCESS;
}
else{ //Failed transfer
#if (ETH_OCM_DBG_LVL >= 2)
dprintf("[eth_ocm_raw_send] Transmit failed, "
"ret=%u, len=%d\n",
result,
len);
#endif // if ETH_OCM_DBG_LVL
net->n_mib->ifOutDiscards++; //increment TX discard counter
result = SEND_DROPPED; // ENP_RESOURCE and SEND_DROPPED have the same value!
}
info->sem = 0; //release semaphore
OS_EXIT_CRITICAL(); //reenable interrupts
return result; //success
}
//End of function eth_ocm_raw_send
#endif // ifdef ETH_OCM_SYNC_TX
/**
* Receive ISR (interrupt service routine)
*
* @param context - context of the Opencores MAC instance
* @param id - IRQ number
*/
void eth_ocm_isr(void *context, alt_u32 id){
eth_ocm_dev *dev;
eth_ocm_regs *regs;
int result;
dev = (eth_ocm_dev *)context;
regs = dev->regs;
//Read the interrupt source
result = regs->int_source;
while(result){
//Clear interrupt flags immediately. Only clear the ones that
//have been set. We do this in case another one has occured since
//we read it.
regs->int_source = result; //clear interrupts
//Check for receive flags
if(result & (ETH_OCM_INT_MASK_RXB_MSK | ETH_OCM_INT_MASK_RXE_MSK)){
//Call the receive function. This will set up a new transfer
eth_ocm_rx_isr(dev);
//Check to see if there is something in the stack's received queue
if ((rcvdq.q_len) > 0){
SignalPktDemux();
}
}
//Check for busy flag
if(result & ETH_OCM_INT_MASK_BUSY_MSK){
#if (ETH_OCM_DBG_LVL >= 3)
dprintf("Frame dropped: too busy to receive\n");
#endif
dev->info->netp->n_mib->ifInDiscards++;
}
#ifndef ETH_OCM_SYNC_TX
//Check for transmit flags
if(result & (ETH_OCM_INT_MASK_TXE_MSK | ETH_OCM_INT_MASK_TXB_MSK)){
eth_ocm_tx_isr(dev);
}
#endif //ifndef ETH_OCM_SYNC_TX
//See if any interrupts have been set
result = regs->int_source;
}
}
/**
* Set up the first receive transfer
*/
static int eth_ocm_read_init(eth_ocm_dev *dev){
eth_ocm_info *info;
eth_ocm_regs *regs;
alt_u8 *buf_ptr;
PACKET *pkts;
alt_u32 temp;
int i;
info = dev->info;
regs = dev->regs;
pkts = info->rx_pkts;
//allocate a packet for every descriptor
for(i=0;i<ETH_OCM_RX_DESC_COUNT;i++){
pkts[i] = pk_alloc(ETH_OCM_BUF_ALLOC_SIZE);
if (!pkts[i]){ // couldn't get a free buffer for rx
dprintf("[eth_ocm_read_init] Fatal error: Unable to allocte ETH_OCM_RX_DESC_COUNT buffers\n");
return ENP_NOBUFFER;
}
// ensure bit-31 of pkt_array[i]->nb_buff is clear before passing
buf_ptr = (alt_u8*)alt_remap_cached ((volatile void*) pkts[i]->nb_buff, 4);
//shift the actual write location over by ETHHDR_BIAS (see ipport.h)
buf_ptr = (alt_u8*)(((unsigned int)buf_ptr) + ETHHDR_BIAS);
if(!(regs->rxdescs[i].ctrl & ETH_OCM_RXDESC_EMPTY_MSK)){
//Write pointer
regs->rxdescs[i].ptr = (alt_u32)buf_ptr;
//Write the control register to start the transfer
temp = ETH_OCM_RXDESC_EMPTY_MSK | ETH_OCM_RXDESC_IRQ_MSK;
if(i == (ETH_OCM_RX_DESC_COUNT - 1))
temp |= ETH_OCM_RXDESC_WRAP_MSK;
regs->rxdescs[i].ctrl = temp;
}
else{
dprintf("[eth_ocm_read_init] Fatal error: RX descriptor unavailable.\n");
dprintf("[eth_ocm_read_init] Descriptor %u = 0x%08x\n", i, (int)regs->rxdescs[i].ctrl);
return ENP_RESOURCE;
}
}
return SUCCESS;
}
//End of function eth_ocm_read_init
/**
* Receive operation. Checks the status of the received frame.
* Attempt to obtain a new buffer from the InterNiche stack.
* Schedules another RX transfer
*
* @return SUCCESS on success
*/
static int eth_ocm_rx_isr(eth_ocm_dev *dev)
{
eth_ocm_info *info;
eth_ocm_regs *regs;
struct ethhdr *eth;
int pklen;
alt_u32 stat;
alt_u8 cur;
PACKET pkt;
PACKET *pkts;
alt_u8 *buf_ptr;
info = dev->info;
regs = dev->regs;
pkts = info->rx_pkts;
cur = info->cur_rx_desc;
stat = regs->rxdescs[cur].ctrl;
//We'll process as many descriptors as are ready
while(!(stat & ETH_OCM_RXDESC_EMPTY_MSK)){
pklen = stat & ETH_OCM_RXDESC_LEN_MSK;
pklen = pklen >> ETH_OCM_RXDESC_LEN_OFST;
//Increment received byte count
info->netp->n_mib->ifInOctets += (u_long)pklen;
pkts[cur]->nb_prot = pkts[cur]->nb_buff + ETHHDR_SIZE;
pkts[cur]->nb_plen = pklen - (14 + 4); //Packet length minus (header + CRC)
pkts[cur]->nb_tstamp = cticks;
pkts[cur]->net = info->netp;
// set packet type for demux routine
eth = (struct ethhdr *)(pkts[cur]->nb_buff + ETHHDR_BIAS);
pkts[cur]->type = eth->e_type;
if (!(stat & ETH_OCM_RXDESC_ERROR_MSK)){
pkt = pk_alloc(ETH_OCM_BUF_ALLOC_SIZE);
if (!pkt){ // couldn't get a free buffer for rx
#if (ETH_OCM_DBG_LVL >= 4)
dprintf("[eth_ocm_rx_isr] No free RX buffers (Swamping the NicheStack)\n");
#endif // if ETH_OCM_DBG_LVL
info->netp->n_mib->ifInDiscards++;
}
else{
putq(&rcvdq, pkts[cur]);
pkts[cur] = pkt;
}
}
else{
#if (ETH_OCM_DBG_LVL >= 3)
dprintf("[eth_ocm_rx_isr] Frame discarded due to errors: 0x%08x!\n", (unsigned)stat);
#endif // if ETH_OCM_DBG_LVL
info->netp->n_mib->ifInDiscards++;
}
// ensure bit-31 of pkt_array[]->nb_buff is clear before passing
// to DMA Driver
buf_ptr = (alt_u8*)alt_remap_cached ((volatile void*) pkts[cur]->nb_buff, 4);
//shift the actual write location over by ETHHDR_BIAS (see ipport.h)
buf_ptr = (alt_u8*)((unsigned int)buf_ptr + ETHHDR_BIAS);
//Write pointer
regs->rxdescs[cur].ptr = (unsigned int)buf_ptr;
//Write the control register to start the transfer
stat = ETH_OCM_RXDESC_EMPTY_MSK | ETH_OCM_RXDESC_IRQ_MSK;
if(cur == (ETH_OCM_RX_DESC_COUNT - 1))
stat |= ETH_OCM_RXDESC_WRAP_MSK;
regs->rxdescs[cur].ctrl = stat;
//increment current descriptor counter
cur++;
if(cur == ETH_OCM_RX_DESC_COUNT)
cur = 0;
//get new descriptors status
stat = regs->rxdescs[cur].ctrl;
}
info->cur_rx_desc = cur;
return SUCCESS;
}
void eth_ocm_stats(void *pio, int iface) {
NET ifp;
eth_ocm_dev *dev;
eth_ocm_regs *regs;
int i;
//get the ifp first
ifp = nets[iface];
dev = (eth_ocm_dev *)ifp->n_local;
regs = dev->regs;
#ifndef ETH_OCM_SYNC_TX
ns_printf(pio, "ToSend queue: max:%d, current:%d\n",
dev->info->tosend.q_max,
dev->info->tosend.q_len);
ns_printf(pio, "Sendng queue: max:%d, current:%d\n",
dev->info->sending.q_max,
dev->info->sending.q_len);
#endif //ifndef ETH_OCM_SYNC_TX
ns_printf(pio, "TX Descriptor status:\n");
for(i=0;i<ETH_OCM_TX_DESC_COUNT;i++){
ns_printf(pio," %3d: 0x%08x\n", i, regs->txdescs[i].ctrl);
}
ns_printf(pio, "RX Descriptor status:\n");
for(i=0;i<ETH_OCM_RX_DESC_COUNT;i++){
ns_printf(pio," %3d: 0x%08x\n", i, regs->rxdescs[i].ctrl);
}
}
/**
* Closes the opencores mac interface
*
* @param iface index of the NET interface associated with the Opencores MAC.
* @return SUCCESS
*/
int eth_ocm_close(int iface)
{
int err;
NET ifp;
eth_ocm_dev *dev;
/* status = down */
ifp = nets[iface];
dev = (eth_ocm_dev *)ifp->n_local;
ifp->n_mib->ifAdminStatus = ETH_OCM_STATUS_DOWN;
/* disable the interrupt in the OS*/
err = alt_irq_register (dev->irq, 0, NULL);
if (err){
dprintf("[eth_ocm_close] Could not unregister interrupt, error = %d\n",err);
return err;
}
// Shut down the MAC
IOWR_ETH_OCM_MODER(dev->base, 0);
/* status = down */
ifp->n_mib->ifOperStatus = ETH_OCM_STATUS_DOWN;
//deallocate memory for the eth_ocm_info struct allocated in eth_ocm_prep
free(dev->info->rx_pkts);
free(dev->info);
return SUCCESS;
}
#endif // ifdef ALT_INICHE
//End of file ins_eth_ocm.h
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -