📄 ve100.c
字号:
static int ve100_hw_init(struct nic* nic)
{
int ret;
ve100_hw_reset(nic);
printk("hardware init!\n");
// if(!in_interrupt())
// return -1;
if((ret = ve100_phy_init(nic)))
return ret;
/*Lionel 2008 01000000 load cu base */
if((ret = ve100_exec_cmd(nic,cuc_load_base,0)))
return ret;
/*Lionel 2008 00000110 load ru base */
if((ret = ve100_exec_cmd(nic,ruc_load_base,0)))
return ret;
if((ret = ve100_exec_cb(nic,NULL,ve100_configure)))
return ret;
if((ret = ve100_exec_cb(nic,NULL,ve100_setup_iaaddr)))
return ret;
/*Lionel 2008 01000000 Load Dump Counters Address. This command directs the device where to
write dump data when the Dump Statistical Counters or Dump and Reset
Statistical Counters command is used. It must be executed at least once before
the Dump Statistical Counters or Dump and Reset Statistical Counters
command is used. The address of the dump area must be placed in the general
pointer register. */
if((ret = ve100_exec_cmd(nic,cuc_dump_addr,nic->dma_addr + offsetof(struct mem,stats))))
return ret;
/* Lionel 2008 01110000 Dump and Reset Statistical Counters. This command directs the device to
first dump its statistical counters to the area designated by the Load Dump
Counters Address command and then to clear these counters. */
if((ret = ve100_exec_cmd(nic,cuc_dump_reset,0)))
return ret;
ve100_disable_irq(nic);
return 0;
}
/* update status */
static void ve100_update_stats(struct nic* nic)
{
struct net_device_stats* ns = &nic->net_stats;
struct stats* s = &nic->mem->stats;
u32* complete = (nic->mac < mac_82558_D101_A4 ? &s->fc_xmt_pause :
(nic->mac < mac_82559_D101M) ? (u32*)&s->xmt_tco_frames : &s->complete);
if(*complete == le32_to_cpu(cuc_dump_reset_complete))
{
*complete = 0;
nic->tx_frames = le32_to_cpu(s->tx_good_frames);
nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
ns->tx_window_errors += le32_to_cpu(s->tx_max_collisions);
ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
ns->collisions += nic->tx_collisions;
ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + le32_to_cpu(s->tx_lost_crs);
ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + nic->rx_over_length_errors;
ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
le32_to_cpu(s->rx_alignment_errors) +
le32_to_cpu(s->rx_short_frame_errors) +
le32_to_cpu(s->rx_cdt_errors);
nic->tx_deferred += le32_to_cpu(s->tx_deferred);
nic->tx_single_collisions += le32_to_cpu(s->tx_single_collisions);
nic->tx_multiple_collisions += le32_to_cpu(s->tx_multiple_collisions);
if(nic->mac >= mac_82558_D101_A4)
{
nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
nic->rx_fc_unsupported += le32_to_cpu(s->fc_rcv_unsupported);
if(nic->mac >= mac_82559_D101M)
{
nic->tx_tco_frames += le16_to_cpu(s->xmt_tco_frames);
nic->rx_tco_frames += le16_to_cpu(s->rcv_tco_frames);
}
}
}
ve100_exec_cmd(nic,cuc_dump_reset,0);
}
/* netcard open method */
static int ve100_open(struct net_device* ndev)
{
int ret;
struct nic* nic = netdev_priv(ndev);
/* turn off carrier signal */
netif_carrier_off(ndev);
if((ret = ve100_start(nic)))
printk("Can't open interface, exit!\n");
return ret;
}
/* netcard close method */
static int ve100_close(struct net_device* ndev)
{
ve100_end(netdev_priv(ndev));
return 0;
}
/* get status */
static struct net_device_stats *ve100_get_stats(struct net_device* ndev)
{
struct nic* nic = netdev_priv(ndev);
return &nic->net_stats;
}
/* set mac address */
static int ve100_set_mac_address(struct net_device* ndev,void* data)
{
struct sockaddr* addr = data;
struct nic* nic = netdev_priv(ndev);
if(!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/*Lionel 2008 copy addr->sa_data to netdev->dev_addr */
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
ve100_exec_cb(nic, NULL, ve100_setup_iaaddr);
return 0;
}
/* change mtu */
static int ve100_change_mtu(struct net_device* ndev,int newptu)
{
if(newptu < ETH_ZLEN || newptu > ETH_DATA_LEN)
return -EINVAL;
ndev->mtu = newptu;
return 0;
}
/* io control */
static int ve100_do_ioctl(struct net_device* ndev,struct ifreq* ifr,int cmd)
{
struct nic* nic = netdev_priv(ndev);
/*Lione 2008 call mii ioctl method to run io control command */
return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
}
/* alloc e100 */
static int ve100_alloc(struct nic* nic)
{
nic->mem = pci_alloc_consistent(nic->pdev,sizeof(struct mem),&nic->dma_addr);
return nic->mem ? 0 : -ENOMEM;
}
/* free e100 */
static void ve100_free(struct nic* nic)
{
if(nic->mem)
{
pci_free_consistent(nic->pdev,sizeof(struct mem),nic->mem,nic->dma_addr);
nic->mem = NULL;
}
}
/* transmit timeout */
static void ve100_tx_timeout(struct net_device* ndev)
{
struct nic* nic = netdev_priv(ndev);
schedule_work(&nic->tx_timeout_task);
}
/* poll method */
static int ve100_poll(struct net_device* ndev,int *budget)
{
unsigned int worktodo = min(ndev->quota,*budget);
unsigned int workdone;
int tx_cleaned;
struct nic* nic = netdev_priv(ndev);
workdone = 0;
ve100_rx_clean(nic,&workdone,worktodo);
tx_cleaned = ve100_tx_clean(nic);
if(!netif_running(ndev) || ((!tx_cleaned) && (workdone ==0)))
{
netif_rx_complete(ndev);
ve100_enable_irq(nic);
return 0;
}
ndev->quota = ndev->quota - workdone;
*budget = *budget-workdone;
return 1;
}
/* xmit frame */
static int ve100_xmit_frame(struct sk_buff* skb, struct net_device* ndev)
{
int ret;
struct nic* nic = netdev_priv(ndev);
if(nic->flags & ich_10h_workaround)
{
/* Lionel 2008 The no operation command does not affect the current state of the
unit.*/
if(ve100_exec_cmd(nic,cuc_nop,0))
printk("execute nop command failed!\n");
udelay(1);
}
ret = ve100_exec_cb(nic,skb,ve100_xmit_prepare);
if( ret == -ENOSPC || ret == -ENOMEM)
{
printk("No space or out of tx resources !\n");
netif_stop_queue(ndev);
return 1;
}
ndev->trans_start = jiffies;
return 0;
}
/* transmit clean */
static int ve100_tx_clean(struct nic* nic)
{
int tx_cleaned = 0;
struct cb* cb;
spin_lock(&nic->cb_lock);
for(cb = nic->cb_to_clean; cb->status & cpu_to_le16(cb_complete);cb = nic->cb_to_clean = cb->next)
{
if(likely(cb->skb != NULL))
{
nic->net_stats.tx_packets++;
nic->net_stats.tx_bytes += cb->skb->len;
pci_unmap_single(nic->pdev,le32_to_cpu(cb->u.tcb.tbd.buf_addr),
le16_to_cpu(cb->u.tcb.tbd.size),PCI_DMA_TODEVICE);
dev_kfree_skb_any(cb->skb);
cb->skb = NULL;
tx_cleaned = 1;
}
cb->status = 0;
nic->cbs_avail++;
}
spin_unlock(&nic->cb_lock);
if(unlikely(tx_cleaned && netif_queue_stopped(nic->ndev)))
netif_wake_queue(nic->ndev);
return tx_cleaned;
}
/* clean command blocks */
static void ve100_clean_cbs(struct nic* nic)
{
if(nic->cbs)
{
while(nic->cbs_avail != nic->params.cbs.count)
{
struct cb* cb = nic->cb_to_clean;
if(cb->skb)
{
pci_unmap_single(nic->pdev,le32_to_cpu(cb->u.tcb.tbd.buf_addr),
le16_to_cpu(cb->u.tcb.tbd.size),PCI_DMA_TODEVICE);
dev_kfree_skb(cb->skb);
}
nic->cb_to_clean = nic->cb_to_clean->next;
nic->cbs_avail++;
}
pci_free_consistent(nic->pdev,sizeof(struct cb) * nic->params.cbs.count,nic->cbs,nic->cbs_dma_addr);
nic->cbs = NULL;
nic->cbs_avail = 0;
}
nic->cuc_cmd = cuc_start;
nic->cbs_to_use = nic->cbs_to_send = nic->cb_to_clean = nic->cbs;
}
/* alloc command blocks */
static int ve100_alloc_cbs(struct nic* nic)
{
struct cb* cb;
unsigned int i;
unsigned int count;
count = nic->params.cbs.count;
/*Lionel 2008 0001 CU Start. CU Start begins execution of the first command on the CBL. A
pointer to the first CB of the CBL should be placed in the SCB General Pointer
before issuing this command. */
nic->cuc_cmd = cuc_start;
nic->cbs_to_use = nic->cbs_to_send = nic->cb_to_clean = NULL;
nic->cbs_avail = 0;
nic->cbs = pci_alloc_consistent(nic->pdev,sizeof(struct cb) * count, &nic->cbs_dma_addr);
if(!nic->cbs)
return -ENOMEM;
for(cb = nic->cbs, i = 0; i < count; cb++,i++)
{
/*Lionel 2008 to make a cbs linklist */
cb->next = (i + 1<count) ? cb + 1 : nic->cbs;
cb->prev = (i==0) ? nic->cbs + count -1 : cb -1;
cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
cb->link = cpu_to_le32(nic->cbs_dma_addr + ((i+1) % count) * sizeof(struct cb));
cb->skb = NULL;
}
nic->cbs_to_use = nic->cbs_to_send = nic->cb_to_clean = nic->cbs;
nic->cbs_avail = count;
return 0;
}
/* start receiver */
static void ve100_start_receiver(struct nic* nic, struct rx* rx)
{
if(!nic->rxs)
return;
if(RU_SUSPENDED != nic->ru_running)
return;
if(!rx)
rx = nic->rxs;
/* start ru if suspended or idle and rfa is non-NULL */
if(rx->skb)
{
ve100_exec_cmd(nic,ruc_start,rx->dma_addr);
nic->ru_running = RU_RUNNING;
}
}
/* allocate sk_buff */
static int ve100_rx_alloc_skb(struct nic* nic, struct rx* rx)
{
if(!(rx->skb = netdev_alloc_skb(nic->ndev,RFD_BUF_LEN + NET_IP_ALIGN)))
return -ENOMEM;
/* align init and map the rfd */
skb_reserve(rx->skb,NET_IP_ALIGN);
skb_copy_to_linear_data(rx->skb,&nic->blank_rfd,sizeof(struct rfd));
rx->dma_addr = pci_map_single(nic->pdev,rx->skb->data,RFD_BUF_LEN,PCI_DMA_BIDIRECTIONAL);
if(pci_dma_mapping_error(rx->dma_addr))
{
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
return -ENOMEM;
}
/* Link the RFD to end of RFA by linking previous RFD to
* this one, and clearing EL bit of previous. */
if(rx->prev->skb)
{
struct rfd* prev_rfd = (struct rfd*)rx->prev->skb->data;
put_unaligned(cpu_to_le32(rx->dma_addr),(u32*)&prev_rfd->link);
wmb();
prev_rfd->command &= ~cpu_to_le16(cb_el);
pci_dma_sync_single_for_device(nic->pdev,rx->prev->dma_addr,sizeof(struct rfd),PCI_DMA_TODEVICE);
}
return 0;
}
/*rx indicate */
static int ve100_rx_indicate(struct nic* nic,struct rx* rx,unsigned int* workdone,unsigned int worktodo)
{
struct sk_buff* skb = rx->skb;
struct rfd* rfd = (struct rfd*)skb->data;
u16 rfd_status,actual_size;
if(unlikely(workdone && *workdone >= worktodo))
return -EAGAIN;
/*Lionel 2008 pci_dma_sync_single_for_cpu let the cpu be capable of access the DMA space before cancel DMA map */
/* Need to sync before taking a peek at cb_complete bit */
pci_dma_sync_single_for_cpu(nic->pdev,rx->dma_addr,sizeof(struct rfd),PCI_DMA_FROMDEVICE);
rfd_status = le16_to_cpu(rfd->status);
/* data is not ready */
if(unlikely(!(rfd_status & cb_complete)))
return -ENODATA;
/* get actual data size */
actual_size = le16_to_cpu(rfd->actual_size) & 0x3fff;
if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
actual_size = RFD_BUF_LEN - sizeof(struct rfd);
/* cancel map to dma */
pci_unmap_single(nic->pdev,rx->dma_addr,RFD_BUF_LEN,PCI_DMA_FROMDEVICE);
/* this allows for a fast restart without re-enabling interrupts */
/*Lionel 2008 If this bit is set to one, it indicates that this command block is the last one on the CBL.
The CU will go from the active to the idle state after the execution of the CB is finished.
This transition will always cause an interrupt with the CNA/CI bit set in the SCB. */
if(le16_to_cpu(rfd->command) & cb_el)
nic->ru_running = RU_SUSPENDED;
/* Pull off the RFD and put the actual data (minus eth hdr) */
/*Lionel 2008 jump the head of skb */
skb_reserve(skb,sizeof(struct rfd));
/* Lionel 2008 update the tail and len pointer */
skb_put(skb,actual_size);
skb->protocol = eth_type_trans(skb,nic->ndev);
if(unlikely(!(rfd_status & cb_ok)))
{
dev_kfree_skb_any(skb);
}
else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN)
{
nic->rx_over_length_errors++;
dev_kfree_skb_any(skb);
}
else
{
nic->net_stats.rx_packets++;
nic->net_stats.rx_bytes += actual_size;
nic->ndev->last_rx = jiffies;
/* Lionel 2008 put the data to protocol stack */
netif_receive_skb(skb);
if(workdone)
(*workdone)++;
}
rx->skb = NULL;
return 0;
}
/* receive clean */
static void ve100_rx_clean(struct nic* nic, unsigned int* workdone,unsigned int worktodo)
{
int restart_required = 0;
struct rx* rx;
struct rx* rx_to_start = NULL;
int ret;
/* avoid race between hardware and rx_to_clean */
if(RU_SUSPENDED == nic->ru_running)
restart_required = 1;
/* indicate newly arrive packets */
for(rx=nic->rx_to_clean; rx->skb; rx=nic->rx_to_clean=rx->next)
{
ret = ve100_rx_indicate(nic,rx,workdone,worktodo);
if(ret == -EAGAIN)
{
restart_required = 0;
break;
}
else if(ret == -ENODATA)
break;
}
/* save our staring point as the place we'll restart the receiver */
if(restart_required)
rx_to_start = nic->rx_to_clean;
/* alloc net skbs */
for(rx=nic->rx_to_use; !rx->skb; rx=nic->rx_to_use=rx->next)
{
if(unlikely(ve100_rx_alloc_skb(nic,rx)))
break;
}
if(restart_required)
{
// ack the rnr?
/* Lionel 2008 This bit indicates when the RU leaves the ready state. The RU may leave the ready
state due to an RU Abort command or because there are no available resources or
if the RU filled an RFD with its suspend bit set.*/
writeb(stat_ack_rnr,&nic->csr->scb.stat_ack);
ve100_start_receiver(nic,rx_to_start);
if(workdone)
(*workdone)++;
}
}
/* clean receive list */
static void ve100_rx_clean_list(struct nic* nic)
{
unsigned int i;
unsigned int count = nic->params.rfds.count;
struct rx* rx;
nic->ru_running = RU_UNINITIALIZED;
if(nic->rxs)
{
for(rx = nic->rxs, i=0; i < count; i++,rx++)
{
if(rx->skb)
{
pci_unmap_single(nic->pdev,rx->dma_addr,RFD_BUF_LEN,PCI_DMA_FROMDEVICE);
dev_kfree_skb(rx->skb);
}
}
kfree(nic->rxs);
nic->rxs = NULL;
}
nic->rx_to_use = nic->rx_to_clean = NULL;
}
/* ve100 allocl receive list */
static int ve100_rx_alloc_list(struct nic* nic)
{
unsigned int i;
unsigned int count = nic->params.rfds.count;
struct rx* rx;
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -