📄 ethernet.c
字号:
*R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable); *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); /* clear dma0 and 1 eop and descr irq masks */ *R_IRQ_MASK2_CLR = IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); /* Reset and wait for the DMA channels */ RESET_DMA(NETWORK_TX_DMA_NBR); RESET_DMA(NETWORK_RX_DMA_NBR); WAIT_DMA(NETWORK_TX_DMA_NBR); WAIT_DMA(NETWORK_RX_DMA_NBR); /* Initialise the etrax network controller */ /* allocate the irq corresponding to the receiving DMA */ if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rx_interrupt, 0, cardname, (void *)dev)) { goto grace_exit0; } /* allocate the irq corresponding to the transmitting DMA */ if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100tx_interrupt, 0, cardname, (void *)dev)) { goto grace_exit1; } /* allocate the irq corresponding to the network errors etc */ if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0, cardname, (void *)dev)) { goto grace_exit2; } /* * Always allocate the DMA channels after the IRQ, * and clean up on failure. */ if (request_dma(NETWORK_TX_DMA_NBR, cardname)) { goto grace_exit3; } if (request_dma(NETWORK_RX_DMA_NBR, cardname)) { goto grace_exit4; } /* give the HW an idea of what MAC address we want */ *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); *R_NETWORK_SA_2 = 0;#if 0 /* use promiscuous mode for testing */ *R_NETWORK_GA_0 = 0xffffffff; *R_NETWORK_GA_1 = 0xffffffff; *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */#else SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); *R_NETWORK_REC_CONFIG = network_rec_config_shadow;#endif *R_NETWORK_GEN_CONFIG = IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) | IO_STATE(R_NETWORK_GEN_CONFIG, enable, on); *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr) | IO_STATE(R_NETWORK_TR_CTRL, delay, none) | IO_STATE(R_NETWORK_TR_CTRL, cancel, dont) | IO_STATE(R_NETWORK_TR_CTRL, cd, enable) | IO_STATE(R_NETWORK_TR_CTRL, retry, enable) | IO_STATE(R_NETWORK_TR_CTRL, pad, enable) | IO_STATE(R_NETWORK_TR_CTRL, crc, enable); save_flags(flags); cli(); /* enable the irq's for ethernet DMA */ *R_IRQ_MASK2_SET = IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); *R_IRQ_MASK0_SET = IO_STATE(R_IRQ_MASK0_SET, overrun, set) | IO_STATE(R_IRQ_MASK0_SET, underrun, set) | IO_STATE(R_IRQ_MASK0_SET, excessive_col, set); tx_skb = 0; /* make sure the irqs are cleared */ *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); /* make sure the rec and transmit error counters are cleared */ (void)*R_REC_COUNTERS; /* dummy read */ (void)*R_TR_COUNTERS; /* dummy read */ /* start the receiving DMA channel so we can receive packets from now on */ *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc); *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start); restore_flags(flags); /* We are now ready to accept transmit requeusts from * the queueing layer of the networking. */ netif_start_queue(dev); return 0;grace_exit4: free_dma(NETWORK_TX_DMA_NBR);grace_exit3: free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);grace_exit2: free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);grace_exit1: free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);grace_exit0: return -EAGAIN;}static voide100_check_speed(unsigned long dummy){ unsigned long data; int old_speed = current_speed; data = e100_get_mdio_reg(MDIO_BASE_STATUS_REG); if (!(data & MDIO_LINK_UP_MASK)) { current_speed = 0; } else { data = e100_get_mdio_reg(MDIO_AUX_CTRL_STATUS_REG); current_speed = (data & MDIO_SPEED ? 100 : 10); } if (old_speed != current_speed) e100_set_network_leds(NO_NETWORK_ACTIVITY); /* Reinitialize the timer. */ speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; add_timer(&speed_timer);}static voide100_negotiate(void){ unsigned short cmd; unsigned short data = e100_get_mdio_reg(MDIO_ADVERTISMENT_REG); int bitCounter; /* Discard old speed and duplex settings */ data &= ~(MDIO_ADVERT_100_HD | MDIO_ADVERT_100_FD | MDIO_ADVERT_10_FD | MDIO_ADVERT_10_HD); switch (current_speed_selection) { case 10 : if (current_duplex == full) data |= MDIO_ADVERT_10_FD; else if (current_duplex == half) data |= MDIO_ADVERT_10_HD; else data |= MDIO_ADVERT_10_HD | MDIO_ADVERT_10_FD; break; case 100 : if (current_duplex == full) data |= MDIO_ADVERT_100_FD; else if (current_duplex == half) data |= MDIO_ADVERT_100_HD; else data |= MDIO_ADVERT_100_HD | MDIO_ADVERT_100_FD; break; case 0 : /* Auto */ if (current_duplex == full) data |= MDIO_ADVERT_100_FD | MDIO_ADVERT_10_FD; else if (current_duplex == half) data |= MDIO_ADVERT_100_HD | MDIO_ADVERT_10_HD; else data |= MDIO_ADVERT_100_HD | MDIO_ADVERT_100_FD | MDIO_ADVERT_10_FD | MDIO_ADVERT_10_HD; break; default : /* assume autoneg speed and duplex */ data |= MDIO_ADVERT_100_HD | MDIO_ADVERT_100_FD | MDIO_ADVERT_10_FD | MDIO_ADVERT_10_HD; } cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (MDIO_PHYS_ADDR << 7) | (MDIO_ADVERTISMENT_REG<< 2); e100_send_mdio_cmd(cmd, 1); /* Data... */ for (bitCounter=15; bitCounter>=0 ; bitCounter--) { e100_send_mdio_bit(GET_BIT(bitCounter, data)); } /* Renegotiate with link partner */ data = e100_get_mdio_reg(MDIO_BASE_CONTROL_REG); data |= MDIO_BC_NEGOTIATE; cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (MDIO_PHYS_ADDR << 7) | (MDIO_BASE_CONTROL_REG<< 2); e100_send_mdio_cmd(cmd, 1); /* Data... */ for (bitCounter=15; bitCounter>=0 ; bitCounter--) { e100_send_mdio_bit(GET_BIT(bitCounter, data)); } }static voide100_set_speed(unsigned long speed){ current_speed_selection = speed; e100_negotiate();}static voide100_check_duplex(unsigned long dummy){ unsigned long data; data = e100_get_mdio_reg(MDIO_AUX_CTRL_STATUS_REG); if (data & MDIO_FULL_DUPLEX_IND) { if (!full_duplex) { /* Duplex changed to full? */ full_duplex = 1; SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); *R_NETWORK_REC_CONFIG = network_rec_config_shadow; } } else { /* half */ if (full_duplex) { /* Duplex changed to half? */ full_duplex = 0; SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); *R_NETWORK_REC_CONFIG = network_rec_config_shadow; } } /* Reinitialize the timer. */ duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; add_timer(&duplex_timer);}static void e100_set_duplex(enum duplex new_duplex){ current_duplex = new_duplex; e100_negotiate();}static unsigned shorte100_get_mdio_reg(unsigned char reg_num){ unsigned short cmd; /* Data to be sent on MDIO port */ unsigned short data; /* Data read from MDIO */ int bitCounter; /* Start of frame, OP Code, Physical Address, Register Address */ cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (MDIO_PHYS_ADDR << 7) | (reg_num << 2); e100_send_mdio_cmd(cmd, 0); data = 0; /* Data... */ for (bitCounter=15; bitCounter>=0 ; bitCounter--) { data |= (e100_receive_mdio_bit() << bitCounter); } return data;}static voide100_send_mdio_cmd(unsigned short cmd, int write_cmd){ int bitCounter; unsigned char data = 0x2; /* Preamble */ for (bitCounter = 31; bitCounter>= 0; bitCounter--) e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE)); for (bitCounter = 15; bitCounter >= 2; bitCounter--) e100_send_mdio_bit(GET_BIT(bitCounter, cmd)); /* Turnaround */ for (bitCounter = 1; bitCounter >= 0 ; bitCounter--) if (write_cmd) e100_send_mdio_bit(GET_BIT(bitCounter, data)); else e100_receive_mdio_bit();}static voide100_send_mdio_bit(unsigned char bit){ *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); udelay(1); *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | IO_MASK(R_NETWORK_MGM_CTRL, mdck) | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); udelay(1);}static unsigned chare100_receive_mdio_bit(){ unsigned char bit; *R_NETWORK_MGM_CTRL = 0; bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT); udelay(1); *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck); udelay(1); return bit;}static void e100_reset_tranceiver(void){ unsigned short cmd; unsigned short data; int bitCounter; data = e100_get_mdio_reg(MDIO_BASE_CONTROL_REG); cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (MDIO_PHYS_ADDR << 7) | (MDIO_BASE_CONTROL_REG << 2); e100_send_mdio_cmd(cmd, 1); data |= 0x8000; for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) { e100_send_mdio_bit(GET_BIT(bitCounter, data)); }}/* Called by upper layers if they decide it took too long to complete * sending a packet - we need to reset and stuff. */static voide100_tx_timeout(struct net_device *dev){ struct net_local *np = (struct net_local *)dev->priv; printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, tx_done(dev) ? "IRQ problem" : "network cable problem"); /* remember we got an error */ np->stats.tx_errors++; /* reset the TX DMA in case it has hung on something */ RESET_DMA(NETWORK_TX_DMA_NBR); WAIT_DMA(NETWORK_TX_DMA_NBR); /* Reset the tranceiver. */ e100_reset_tranceiver(); /* and get rid of the packet that never got an interrupt */ dev_kfree_skb(tx_skb); tx_skb = 0; /* tell the upper layers we're ok again */ netif_wake_queue(dev);}/* This will only be invoked if the driver is _not_ in XOFF state. * What this means is that we need not check it, and that this * invariant will hold if we make sure that the netif_*_queue() * calls are done at the proper times. */static inte100_send_packet(struct sk_buff *skb, struct net_device *dev){ struct net_local *np = (struct net_local *)dev->priv; int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned char *buf = skb->data; #ifdef ETHDEBUG printk("send packet len %d\n", length);#endif spin_lock_irq(&np->lock); /* protect from tx_interrupt */ tx_skb = skb; /* remember it so we can free it in the tx irq handler later */ dev->trans_start = jiffies; e100_hardware_send_packet(buf, length); /* this simple TX driver has only one send-descriptor so we're full * directly. If this had a send-ring instead, we would only do this if * the ring got full. */ netif_stop_queue(dev); spin_unlock_irq(&np->lock); return 0;}/* * The typical workload of the driver: * Handle the network interface interrupts. */static voide100rx_interrupt(int irq, void *dev_id, struct pt_regs * regs){ struct net_device *dev = (struct net_device *)dev_id; unsigned long irqbits = *R_IRQ_MASK2_RD; if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { /* acknowledge the eop interrupt */ *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); /* check if one or more complete packets were indeed received */ while (*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) { /* Take out the buffer and give it to the OS, then * allocate a new buffer to put a packet in. */ e100_rx(dev); ((struct net_local *)dev->priv)->stats.rx_packets++; /* restart/continue on the channel, for safety */ *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); /* clear dma channel 1 eop/descr irq bits */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -