📄 r1000_n.c
字号:
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0x802f );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0x4f02 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0409 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xf0f9 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x9800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x9000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xdf01 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xdf20 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xff95 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xba00 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xa800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xa000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xff41 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xdf20 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x00bb );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xb800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xb000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xdf41 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xdc60 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x6340 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x007d );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xd800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xd000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xdf01 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xdf20 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x100a );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xa0ff );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xf800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xf000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1f, 0x0000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x0b, 0x0000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x9200 );
#if 0
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0001 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x15, 0x1000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x18, 0x65C7 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0x00A1 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0x0008 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x1020 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x1000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xFF41 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDE60 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x0077 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xDF01 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDF20 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xFF95 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xFA00 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xFF41 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDE20 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x00BB );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xDF01 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDF20 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xFF95 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xBF00 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF800 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0000 );
R1000_WRITE_GMII_REG( (unsigned long)ioaddr, 0x0B, 0x0000 );
#endif
}
else{
DBG_PRINT("priv->mcfg=%d. Discard hw PHY config.\n",priv->mcfg);
}
}
static void r1000_hw_start (struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
struct pci_dev *pdev = priv->pci_dev;
unsigned long ioaddr = priv->ioaddr;
u32 i;
u8 i8;
u16 i16;
if((priv->mcfg!=MCFG_METHOD_5)&&(priv->mcfg!=MCFG_METHOD_11)&&
(priv->mcfg!=MCFG_METHOD_12)&&(priv->mcfg!=MCFG_METHOD_13)&&
(priv->mcfg!=MCFG_METHOD_14)&&(priv->mcfg!=MCFG_METHOD_15)){
/* Soft reset the chip. */
RTL_W8 ( ChipCmd, CmdReset);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--){
if ((RTL_R8( ChipCmd ) & CmdReset) == 0) break;
else udelay (10);
}
RTL_W8 ( Cfg9346, Cfg9346_Unlock);
RTL_W8 ( ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W8 ( ETThReg, ETTh);
RTL_W16(CPlusCmd,RTL_R16(CPlusCmd)|CPCR_MulRW_Enable);
pci_write_config_byte(pdev,Cache_Line_Size,0x08);
// For gigabit rtl8169
RTL_W16 ( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
// Set Rx Config register
i = r1000_rx_config | ( RTL_R32( RxConfig ) & rtl_chip_info[priv->chipset].RxConfigMask);
RTL_W32 ( RxConfig, i);
/* Set DMA burst size and Interframe Gap Time */
RTL_W32 ( TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift) );
RTL_W16( CPlusCmd, RTL_R16(CPlusCmd) );
if(priv->mcfg==MCFG_METHOD_2||priv->mcfg==MCFG_METHOD_3){
RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<14)|(1<<3)) );
DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14\n");
}else{
RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<3)) );
DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3.\n");
}
{
RTL_W16(0xE2,0x0000);
}
priv->cur_rx = 0;
RTL_W32 ( TxDescStartAddr, priv->txdesc_phy_dma_addr);
RTL_W32 ( TxDescStartAddr + 4, 0x00);
RTL_W32 ( RxDescStartAddr, priv->rxdesc_phy_dma_addr);
RTL_W32 ( RxDescStartAddr + 4, 0x00);
RTL_W8 ( Cfg9346, Cfg9346_Lock );
udelay (10);
RTL_W32 ( RxMissed, 0 );
r1000_set_rx_mode(netdev);
RTL_W16 ( MultiIntr, RTL_R16(MultiIntr) & 0xF000);
RTL_W16 ( IntrMask, r1000_intr_mask);
}else{
/* Soft reset the chip. */
RTL_W8 ( ChipCmd, CmdReset);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--){
if ((RTL_R8( ChipCmd ) & CmdReset) == 0) break;
else udelay (10);
}
if(priv->mcfg==MCFG_METHOD_5){
RTL_W16(CPlusCmd,RTL_R16(CPlusCmd)|CPCR_MulRW_Enable);
pci_write_config_byte(pdev,Cache_Line_Size,0x08);
}
if( priv->mcfg == MCFG_METHOD_13 ){
pci_write_config_word(pdev,0x68,0x00);
pci_write_config_word(pdev,0x69,0x08);
}
if( priv->mcfg == MCFG_METHOD_5 ){
i8=RTL_R8(Config2);
i8=i8&0x07;
if(i8&&0x01)
RTL_W32(Off7Ch,0x0007FFFF);
i=0x0007FF00;
RTL_W32(Off7Ch, i);
pci_read_config_word(pdev,0x04,&i16);
i16=i16&0xEF;
pci_write_config_word(pdev,0x04,i16);
}
RTL_W8 ( Cfg9346, Cfg9346_Unlock);
RTL_W8 ( ETThReg, ETTh);
// For gigabit rtl8169
RTL_W16 ( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
RTL_W16( CPlusCmd, RTL_R16(CPlusCmd) );
if(priv->mcfg==MCFG_METHOD_2||priv->mcfg==MCFG_METHOD_3){
RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<14)|(1<<3)) );
DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14\n");
}else{
RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<3)) );
DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3.\n");
}
{
RTL_W16(0xE2,0x0000);
}
priv->cur_rx = 0;
RTL_W32 ( TxDescStartAddr, priv->txdesc_phy_dma_addr);
RTL_W32 ( TxDescStartAddr + 4, 0x00);
RTL_W32 ( RxDescStartAddr, priv->rxdesc_phy_dma_addr);
RTL_W32 ( RxDescStartAddr + 4, 0x00);
RTL_W8 ( ChipCmd, CmdTxEnb | CmdRxEnb);
// Set Rx Config register
i = r1000_rx_config | ( RTL_R32( RxConfig ) & rtl_chip_info[priv->chipset].RxConfigMask);
RTL_W32 ( RxConfig, i);
/* Set DMA burst size and Interframe Gap Time */
RTL_W32 ( TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift) );
RTL_W8 ( Cfg9346, Cfg9346_Lock );
udelay (10);
RTL_W32 ( RxMissed, 0 );
r1000_set_rx_mode(netdev);
RTL_W16 ( MultiIntr, RTL_R16(MultiIntr) & 0xF000);
RTL_W16 ( IntrMask, r1000_intr_mask);
}
netif_start_queue(netdev);
}//end of r1000_hw_start (struct net_device *netdev)
static void r1000_init_ring (struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
struct pci_dev *pdev = priv->pci_dev;
int i;
struct sk_buff *skb;
priv->cur_rx = 0;
priv->cur_tx = 0;
priv->dirty_tx = 0;
memset(priv->TxDescArray, 0x0, NUM_TX_DESC*sizeof(struct TxDesc));
memset(priv->RxDescArray, 0x0, NUM_RX_DESC*sizeof(struct RxDesc));
for (i=0 ; i<NUM_TX_DESC ; i++){
priv->Tx_skbuff[i]=NULL;
priv->txdesc_array_dma_addr[i] = pci_map_single(pdev, &priv->TxDescArray[i], sizeof(struct TxDesc), PCI_DMA_TODEVICE);
}
for (i=0; i <NUM_RX_DESC; i++) {
if(i==(NUM_RX_DESC-1)){
priv->RxDescArray[i].status = cpu_to_le32((OWNbit | EORbit) | (unsigned long)priv->hw_rx_pkt_len);
}
else{
priv->RxDescArray[i].status = cpu_to_le32(OWNbit | (unsigned long)priv->hw_rx_pkt_len);
}
{//-----------------------------------------------------------------------
skb = priv->Rx_skbuff[i];
priv->rx_skbuff_dma_addr[i] = pci_map_single(pdev, skb->data, MAX_RX_SKBDATA_SIZE, PCI_DMA_FROMDEVICE);
if( skb != NULL ){
priv->RxDescArray[i].buf_addr = cpu_to_le32(priv->rx_skbuff_dma_addr[i]);
priv->RxDescArray[i].buf_Haddr = 0;
}
else{
DBG_PRINT("%s: %s() Rx_skbuff == NULL\n", netdev->name, __FUNCTION__);
priv->drvinit_fail = 1;
}
}//-----------------------------------------------------------------------
priv->rxdesc_array_dma_addr[i] = pci_map_single(pdev, &priv->RxDescArray[i], sizeof(struct RxDesc), PCI_DMA_TODEVICE);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
pci_dma_sync_single(pdev, priv->rxdesc_array_dma_addr[i], sizeof(struct RxDesc), PCI_DMA_TODEVICE);
#endif
}
}
static void r1000_tx_clear (struct r1000_private *priv)
{
int i;
priv->cur_tx = 0;
for ( i = 0 ; i < NUM_TX_DESC ; i++ ){
if ( priv->Tx_skbuff[i] != NULL ) {
dev_kfree_skb ( priv->Tx_skbuff[i] );
priv->Tx_skbuff[i] = NULL;
priv->stats.tx_dropped++;
}
}
}
static void r1000_tx_timeout (struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
unsigned long ioaddr = priv->ioaddr;
u8 tmp8;
/* disable Tx, if not already */
tmp8 = RTL_R8(ChipCmd);
if(tmp8&CmdTxEnb){
RTL_W8(ChipCmd,tmp8 & ~CmdTxEnb);
}
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16(IntrMask,0x0000);
/* Stop a shared interrupt from scavenging while we are. */
// spin_lock_irq(&priv->lock);
spin_lock_irq(&priv->tx_lock);
r1000_tx_clear(priv);
// spin_unlock_irq(&priv->lock);
spin_unlock_irq(&priv->tx_lock);
r1000_hw_start(netdev);
netif_wake_queue(netdev);
}
static int r1000_start_xmit (struct sk_buff *skb, struct net_device *netdev)
{
struct r1000_private *priv = netdev->priv;
unsigned long ioaddr = priv->ioaddr;
struct pci_dev *pdev = priv->pci_dev;
int entry = priv->cur_tx % NUM_TX_DESC;
int buf_len = 60;
dma_addr_t txbuf_dma_addr;
// spin_lock_irq(&priv->lock);
spin_lock(&priv->tx_lock);
if( (le32_to_cpu(priv->TxDescArray[entry].status) & OWNbit)==0 ){
priv->Tx_skbuff[entry] = skb;
txbuf_dma_addr = pci_map_single(pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
priv->TxDescArray[entry].buf_addr = cpu_to_le32(txbuf_dma_addr);
DBG_PRINT("%s: TX pkt_size = %d\n", __FUNCTION__, skb->len);
if( skb->len <= priv->tx_pkt_len ){
buf_len = skb->len;
}
else{
printk("%s: Error -- Tx packet size(%d) > mtu(%d)+14\n", netdev->name, skb->len, netdev->mtu);
buf_len = priv->tx_pkt_len;
}
if( entry != (NUM_TX_DESC-1) ){
priv->TxDescArray[entry].status = cpu_to_le32((OWNbit | FSbit | LSbit) | buf_len);
}
else{
priv->TxDescArray[entry].status = cpu_to_le32((OWNbit | EORbit | FSbit | LSbit) | buf_len);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
pci_dma_sync_single(pdev, priv->txdesc_array_dma_addr[entry], sizeof(struct TxDesc), PCI_DMA_TODEVICE);
#endif
RTL_W8(TxPoll,0x40); //set polling bit
netdev->trans_start = jiffies;
priv->stats.tx_bytes += ( (skb->len > ETH_ZLEN) ? skb->len : ETH_ZLEN);
priv->cur_tx++;
}//end of if( (priv->TxDescArray[entry].status & 0x80000000)==0 )
// spin_unlock_irq(&priv->lock);
if ( (priv->cur_tx - NUM_TX_DESC) == priv->dirty_tx ){
netif_stop_queue(netdev);
}
else{
if (netif_queue_stopped(netdev)){
netif_wake_queue(netdev);
}
}
spin_unlock(&priv->tx_lock);
return 0;
}
#ifdef R1000_BOTTOM_HALVES
static void r1000_tx_interrupt(unsigned long ndev){
struct net_device *netdev = (void *)ndev;
struct r1000_private *priv = netdev->priv;
#else
static void r1000_tx_interrupt(struct net_device *netdev, struct r1000_private *priv, unsigned long ioaddr){
#endif //R1000_BOTTOM_HALVES
unsigned long flags;
spin_lock_irqsave(&priv->tx_lock,flags);
r1000_tx_action(netdev);
spin_unlock_irqrestore(&priv->tx_lock,flags);
}
static void FASTCALL (r1000_tx_action(struct net_device *netdev));
static void fastcall r1000_tx_action(struct net_device *netdev){
struct r1000_private *priv = netdev->priv;
unsigned long dirty_tx, tx_left=0;
int entry = priv->cur_tx % NUM_TX_DESC;
int txloop_cnt = 0;
assert (netdev != NULL);
assert (priv != NULL);
assert (ioaddr != NULL);
dirty_tx = priv->dirty_tx;
tx_left = priv->cur_tx - dirty_tx;
while( (tx_left > 0) && (txloop_cnt < max_interrupt_work) ){
if( (le32_to_cpu(priv->TxDescArray[entry].status) & OWNbit) == 0 ){
dev_kfree_skb_irq( priv->Tx_skbuff[dirty_tx % NUM_TX_DESC] );
priv->Tx_skbuff[dirty_tx % NUM_TX_DESC] = NULL;
priv->stats.tx_packets++;
dirty_tx++;
tx_left--;
entry++;
}
txloop_cnt ++;
}
if (priv->dirty_tx != dirty_tx) {
priv->dirty_tx = dirty_tx;
if (netif_queue_stopped(netdev))
netif_wake_queue(netdev);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -