📄 r1000_n.c
字号:
}
#ifdef R1000_BOTTOM_HALVES
static void r1000_rx_interrupt(unsigned long ndev){
struct net_device *netdev = (void *)ndev;
struct r1000_private *priv = netdev->priv;
#else
static void r1000_rx_interrupt(struct net_device *netdev, struct r1000_private *priv, unsigned long ioaddr){
#endif //R1000_BOTTOM_HALVES
unsigned long flags;
spin_lock_irqsave(&priv->rx_lock,flags);
r1000_rx_action(netdev);
spin_unlock_irqrestore(&priv->rx_lock,flags);
}
static void FASTCALL (r1000_rx_action(struct net_device *netdev));
static void fastcall r1000_rx_action(struct net_device *netdev){
struct r1000_private *priv = netdev->priv;
struct pci_dev *pdev = priv->pci_dev;
int cur_rx;
int pkt_size = 0 ;
int rxdesc_cnt = 0;
int ret;
struct sk_buff *n_skb = NULL;
struct sk_buff *cur_skb;
struct sk_buff *rx_skb;
struct RxDesc *rxdesc;
assert(netdev != NULL);
assert (priv != NULL);
assert (ioaddr != NULL);
cur_rx = priv->cur_rx;
rxdesc = &priv->RxDescArray[cur_rx];
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
pci_dma_sync_single(pdev, priv->rxdesc_array_dma_addr[cur_rx], sizeof(struct RxDesc), PCI_DMA_FROMDEVICE);
#endif
while (((le32_to_cpu(rxdesc->status) & OWNbit)== 0) && (rxdesc_cnt < max_interrupt_work)){
rxdesc_cnt++;
if( le32_to_cpu(rxdesc->status) & RxRES ){
printk(KERN_INFO "%s: Rx ERROR!!!\n", netdev->name);
priv->stats.rx_errors++;
if ( le32_to_cpu(rxdesc->status) & (RxRWT|RxRUNT) )
priv->stats.rx_length_errors++;
if ( le32_to_cpu(rxdesc->status) & RxCRC)
priv->stats.rx_crc_errors++;
}
else{
pkt_size=(int)(le32_to_cpu(rxdesc->status) & 0x00001FFF)-4;
if( pkt_size > priv->rx_pkt_len ){
printk("%s: Error -- Rx packet size(%d) > mtu(%d)+14\n", netdev->name, pkt_size, netdev->mtu);
pkt_size = priv->rx_pkt_len;
}
DBG_PRINT("%s: RX pkt_size = %d\n", __FUNCTION__, pkt_size);
{// -----------------------------------------------------
rx_skb = priv->Rx_skbuff[cur_rx];
n_skb = R1000_ALLOC_RXSKB(MAX_RX_SKBDATA_SIZE);
if( n_skb != NULL ) {
skb_reserve (n_skb, 8); // 16 byte align the IP fields. //
// Indicate rx_skb
if( rx_skb != NULL ){
rx_skb->dev = netdev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
pci_dma_sync_single(pdev, priv->rx_skbuff_dma_addr[cur_rx], sizeof(struct RxDesc), PCI_DMA_FROMDEVICE);
#endif
skb_put ( rx_skb, pkt_size );
rx_skb->protocol = eth_type_trans ( rx_skb, netdev );
ret = R1000_NETIF_RX (rx_skb);
// netdev->last_rx = jiffies;
priv->stats.rx_bytes += pkt_size;
priv->stats.rx_packets++;
}//end if( rx_skb != NULL )
priv->Rx_skbuff[cur_rx] = n_skb;
}
else{
DBG_PRINT("%s: Allocate n_skb failed!\n",__FUNCTION__ );
priv->Rx_skbuff[cur_rx] = rx_skb;
}
// Update rx descriptor
if( cur_rx == (NUM_RX_DESC-1) ){
priv->RxDescArray[cur_rx].status = cpu_to_le32((OWNbit | EORbit) | (unsigned long)priv->hw_rx_pkt_len);
}
else{
priv->RxDescArray[cur_rx].status = cpu_to_le32(OWNbit | (unsigned long)priv->hw_rx_pkt_len);
}
cur_skb = priv->Rx_skbuff[cur_rx];
if( cur_skb != NULL ){
priv->rx_skbuff_dma_addr[cur_rx] = pci_map_single(pdev, cur_skb->data, MAX_RX_SKBDATA_SIZE, PCI_DMA_FROMDEVICE);
rxdesc->buf_addr = cpu_to_le32(priv->rx_skbuff_dma_addr[cur_rx]);
}
else{
DBG_PRINT("%s: %s() cur_skb == NULL\n", netdev->name, __FUNCTION__);
}
}//------------------------------------------------------------
}// end of if( priv->RxDescArray[cur_rx].status & RxRES )
cur_rx = (cur_rx +1) % NUM_RX_DESC;
rxdesc = &priv->RxDescArray[cur_rx];
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
pci_dma_sync_single(pdev, priv->rxdesc_array_dma_addr[cur_rx], sizeof(struct RxDesc), PCI_DMA_FROMDEVICE);
#endif
}// end of while ( (priv->RxDescArray[cur_rx].status & 0x80000000)== 0)
if( rxdesc_cnt >= max_interrupt_work ){
DBG_PRINT("%s: Too much work at Rx interrupt.\n", netdev->name);
}
priv->cur_rx = cur_rx;
}
/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
static void r1000_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
#else
static irqreturn_t r1000_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
#endif
{
struct net_device *netdev = (struct net_device *) dev_instance;
struct r1000_private *priv = netdev->priv;
unsigned long ioaddr = priv->ioaddr;
unsigned int status = 0;
unsigned int phy_status = 0;
RTL_W16(IntrMask,0x0000);
status=RTL_R16(IntrStatus);
if(status==0xFFFF){
RTL_W16(IntrMask,r1000_intr_mask);
return IRQ_HANDLED;
}
if((status&r1000_intr_mask)==0)
return IRQ_NONE;
#if 0
if(status&(RxOK|TxOK|TxErr)){
#ifdef R1000_BOTTOM_HALVES
tasklet_schedule(&priv->r1000_rx_tasklet);
tasklet_schedule(&priv->r1000_tx_tasklet);
#else
r1000_rx_interrupt(netdev,priv,ioaddr);
r1000_tx_interrupt(netdev,priv,ioaddr);
#endif //R1000_BOTTOM_HALVES
RTL_W16(IntrStatus,RxOK|TxOK|TxErr);
}
#endif
if(status&RxOK){
#ifdef R1000_BOTTOM_HALVES
tasklet_schedule(&priv->r1000_rx_tasklet);
#else
r1000_rx_interrupt(netdev,priv,ioaddr);
#endif //R1000_BOTTOM_HALVES
RTL_W16(IntrStatus,RxOK);
}
if(status&(TxOK|TxErr)){
#ifdef R1000_BOTTOM_HALVES
tasklet_schedule(&priv->r1000_tx_tasklet);
#else
r1000_tx_interrupt(netdev,priv,ioaddr);
#endif //R1000_BOTTOM_HALVES
RTL_W16(IntrStatus,TxOK|TxErr);
}
if(status&RxErr){
RTL_W16(IntrStatus,RxErr);
}
if((status&TxOK)&&(status&TxDescUnavail)){
RTL_W8(TxPoll,0x40);
RTL_W16(IntrStatus,TxOK|TxDescUnavail);
}
if(status & LinkChg){
if(((priv->mcfg==MCFG_METHOD_2)||(priv->mcfg==MCFG_METHOD_3))&&(phy_status&_100Mbps)){
phy_status = RTL_R8(PHYstatus);
if(phy_status & LinkStatus){
R1000_WRITE_GMII_REG((unsigned long)ioaddr,0x1f,0x0001);
R1000_WRITE_GMII_REG((unsigned long)ioaddr,0x10,0xf01b);
R1000_WRITE_GMII_REG((unsigned long)ioaddr,0x1f,0x0000);
}else{
R1000_WRITE_GMII_REG((unsigned long)ioaddr,0x1f,0x0001);
R1000_WRITE_GMII_REG((unsigned long)ioaddr,0x10,0xf41b);
R1000_WRITE_GMII_REG((unsigned long)ioaddr,0x1f,0x0000);
}
}
RTL_W16(IntrStatus,LinkChg);
}
RTL_W16 ( IntrMask, r1000_intr_mask);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
return IRQ_HANDLED;
#endif
}
static int r1000_close(struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
unsigned long ioaddr = priv->ioaddr;
int i;
// -----------------------------------------
r1000_delete_timer( &(priv->r1000_timer) );
netif_stop_queue(netdev);
#ifdef R1000_BOTTOM_HALVES
tasklet_kill(&priv->r1000_rx_tasklet);
tasklet_kill(&priv->r1000_tx_tasklet);
#endif //R1000_BOTTOM_HALVES
spin_lock_irq(&priv->lock);
/* Stop the chip's Tx and Rx processes. */
RTL_W8(ChipCmd,0x00);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16(IntrMask,0x0000);
/* Update the error counts. */
priv->stats.rx_missed_errors += RTL_R32(RxMissed);
RTL_W32(RxMissed,0);
spin_unlock_irq(&priv->lock);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
synchronize_irq ();
#else
synchronize_irq(netdev->irq);
#endif
free_irq (netdev->irq, netdev);
r1000_tx_clear (priv);
//2004-05-11
if(priv->txdesc_space != NULL){
pci_free_consistent(
priv->pci_dev,
priv->sizeof_txdesc_space,
priv->txdesc_space,
priv->txdesc_phy_dma_addr
);
priv->txdesc_space = NULL;
}
if(priv->rxdesc_space != NULL){
pci_free_consistent(
priv->pci_dev,
priv->sizeof_rxdesc_space,
priv->rxdesc_space,
priv->rxdesc_phy_dma_addr
);
priv->rxdesc_space = NULL;
}
priv->TxDescArray = NULL;
priv->RxDescArray = NULL;
{//-----------------------------------------------------------------------------
for(i=0;i<NUM_RX_DESC;i++){
if( priv->Rx_skbuff[i] != NULL ) {
R1000_FREE_RXSKB ( priv->Rx_skbuff[i] );
}
}
}//-----------------------------------------------------------------------------
DBG_PRINT("%s: %s() alloc_rxskb_cnt = %d\n", netdev->name, __FUNCTION__, alloc_rxskb_cnt );
return 0;
}
static unsigned const ethernet_polynomial = 0x04c11db7U;
static inline u32 ether_crc (int length, unsigned char *data)
{
int crc = -1;
while (--length >= 0) {
unsigned char current_octet = *data++;
int bit;
for (bit = 0; bit < 8; bit++, current_octet >>= 1)
crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
}
return crc;
}
static void r1000_set_rx_mode (struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
unsigned long ioaddr = priv->ioaddr;
unsigned long flags;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
u32 tmp=0;
if (netdev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", netdev->name);
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else if ((netdev->mc_count > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
for (i = 0, mclist = netdev->mc_list; mclist && i < netdev->mc_count; i++, mclist = mclist->next)
{
set_bit (ether_crc (ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
}
#else
for (i = 0, mclist = netdev->mc_list; mclist && i < netdev->mc_count; i++, mclist = mclist->next)
{
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
#endif
}
// spin_lock_irqsave(&priv->lock,flags);
spin_lock_irqsave(&priv->rx_lock,flags);
tmp = r1000_rx_config | rx_mode | (RTL_R32(RxConfig) & rtl_chip_info[priv->chipset].RxConfigMask);
RTL_W32 ( RxConfig, tmp);
if((priv->mcfg==MCFG_METHOD_11)||(priv->mcfg==MCFG_METHOD_12)||
(priv->mcfg==MCFG_METHOD_13)||(priv->mcfg==MCFG_METHOD_14)||
(priv->mcfg==MCFG_METHOD_15)){
RTL_W32 ( MAR0 + 0, 0xFFFFFFFF);
RTL_W32 ( MAR0 + 4, 0xFFFFFFFF);
}else{
RTL_W32 ( MAR0 + 0, mc_filter[0]);
RTL_W32 ( MAR0 + 4, mc_filter[1]);
}
// spin_unlock_irqrestore(&priv->lock,flags);
spin_unlock_irqrestore(&priv->rx_lock,flags);
}//end of r1000_set_rx_mode (struct net_device *netdev)
struct net_device_stats *r1000_get_stats(struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
return &priv->stats;
}
static struct pci_driver r1000_pci_driver = {
name: MODULENAME,
id_table: r1000_pci_tbl,
probe: r1000_init_one,
remove: __devexit_p(r1000_remove_one),
suspend: NULL,
resume: NULL,
};
static int __init r1000_init_module (void)
{
return pci_module_init (&r1000_pci_driver); // pci_register_driver (drv)
}
static void __exit r1000_cleanup_module (void)
{
pci_unregister_driver (&r1000_pci_driver);
}
#ifdef R1000_JUMBO_FRAME_SUPPORT
static int r1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct r1000_private *priv = netdev->priv;
unsigned long ioaddr = priv->ioaddr;
if( new_mtu > MAX_JUMBO_FRAME_MTU ){
printk("%s: Error -- new_mtu(%d) > MAX_JUMBO_FRAME_MTU(%d).\n", netdev->name, new_mtu, MAX_JUMBO_FRAME_MTU);
return -1;
}
netdev->mtu = new_mtu;
priv->curr_mtu_size = new_mtu;
priv->tx_pkt_len = new_mtu + ETH_HDR_LEN;
priv->rx_pkt_len = new_mtu + ETH_HDR_LEN;
priv->hw_rx_pkt_len = priv->rx_pkt_len + 8;
RTL_W8 ( Cfg9346, Cfg9346_Unlock);
RTL_W16 ( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
RTL_W8 ( Cfg9346, Cfg9346_Lock);
DBG_PRINT("-------------------------- \n");
DBG_PRINT("netdev->mtu = %d \n", netdev->mtu);
DBG_PRINT("priv->curr_mtu_size = %d \n", priv->curr_mtu_size);
DBG_PRINT("priv->rx_pkt_len = %d \n", priv->rx_pkt_len);
DBG_PRINT("priv->tx_pkt_len = %d \n", priv->tx_pkt_len);
DBG_PRINT("RTL_W16( RxMaxSize, %d )\n", priv->hw_rx_pkt_len);
DBG_PRINT("-------------------------- \n");
r1000_close(netdev);
r1000_open(netdev);
return 0;
}
#endif //end #ifdef R1000_JUMBO_FRAME_SUPPORT
module_init(r1000_init_module);
module_exit(r1000_cleanup_module);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -