📄 ethernet.c
字号:
else full_duplex = 0;}static voidtdk_check_duplex(void){ unsigned long data; data = e100_get_mdio_reg(MDIO_TDK_DIAGNOSTIC_REG); full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;}static voidbroadcom_check_duplex(void){ unsigned long data; data = e100_get_mdio_reg(MDIO_AUX_CTRL_STATUS_REG); full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;}static void e100_set_duplex(enum duplex new_duplex){ if (new_duplex != current_duplex) { current_duplex = new_duplex; e100_negotiate(); }}static inte100_probe_transceiver(void){ unsigned int phyid_high; unsigned int phyid_low; unsigned int oui; struct transceiver_ops* ops = NULL; /* Probe MDIO physical address */ for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) { if (e100_get_mdio_reg(MDIO_BASE_STATUS_REG) != 0xffff) break; } if (mdio_phy_addr == 32) return -ENODEV; /* Get manufacturer */ phyid_high = e100_get_mdio_reg(MDIO_PHY_ID_HIGH_REG); phyid_low = e100_get_mdio_reg(MDIO_PHY_ID_LOW_REG); oui = (phyid_high << 6) | (phyid_low >> 10); for (ops = &transceivers[0]; ops->oui; ops++) { if (ops->oui == oui) break; } transceiver = ops; return 0;}static unsigned shorte100_get_mdio_reg(unsigned char reg_num){ unsigned short cmd; /* Data to be sent on MDIO port */ unsigned short data; /* Data read from MDIO */ int bitCounter; /* Start of frame, OP Code, Physical Address, Register Address */ cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (mdio_phy_addr << 7) | (reg_num << 2); e100_send_mdio_cmd(cmd, 0); data = 0; /* Data... */ for (bitCounter=15; bitCounter>=0 ; bitCounter--) { data |= (e100_receive_mdio_bit() << bitCounter); } return data;}static voide100_set_mdio_reg(unsigned char reg, unsigned short data){ int bitCounter; unsigned short cmd; cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (reg << 2); e100_send_mdio_cmd(cmd, 1); /* Data... */ for (bitCounter=15; bitCounter>=0 ; bitCounter--) { e100_send_mdio_bit(GET_BIT(bitCounter, data)); }}static voide100_send_mdio_cmd(unsigned short cmd, int write_cmd){ int bitCounter; unsigned char data = 0x2; /* Preamble */ for (bitCounter = 31; bitCounter>= 0; bitCounter--) e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE)); for (bitCounter = 15; bitCounter >= 2; bitCounter--) e100_send_mdio_bit(GET_BIT(bitCounter, cmd)); /* Turnaround */ for (bitCounter = 1; bitCounter >= 0 ; bitCounter--) if (write_cmd) e100_send_mdio_bit(GET_BIT(bitCounter, data)); else e100_receive_mdio_bit();}static voide100_send_mdio_bit(unsigned char bit){ *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); udelay(1); *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | IO_MASK(R_NETWORK_MGM_CTRL, mdck) | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); udelay(1);}static unsigned chare100_receive_mdio_bit(){ unsigned char bit; *R_NETWORK_MGM_CTRL = 0; bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT); udelay(1); *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck); udelay(1); return bit;}static void e100_reset_transceiver(void){ unsigned short cmd; unsigned short data; int bitCounter; data = e100_get_mdio_reg(MDIO_BASE_CONTROL_REG); cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MDIO_BASE_CONTROL_REG << 2); e100_send_mdio_cmd(cmd, 1); data |= 0x8000; for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) { e100_send_mdio_bit(GET_BIT(bitCounter, data)); }}/* Called by upper layers if they decide it took too long to complete * sending a packet - we need to reset and stuff. */static voide100_tx_timeout(struct net_device *dev){ struct net_local *np = (struct net_local *)dev->priv; unsigned long flags; spin_lock_irqsave(&np->lock, flags); printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, tx_done(dev) ? "IRQ problem" : "network cable problem"); /* remember we got an error */ np->stats.tx_errors++; /* reset the TX DMA in case it has hung on something */ RESET_DMA(NETWORK_TX_DMA_NBR); WAIT_DMA(NETWORK_TX_DMA_NBR); /* Reset the transceiver. */ e100_reset_transceiver(); /* and get rid of the packets that never got an interrupt */ while (myFirstTxDesc != myNextTxDesc) { dev_kfree_skb(myFirstTxDesc->skb); myFirstTxDesc->skb = 0; myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); } /* Set up transmit DMA channel so it can be restarted later */ *R_DMA_CH0_FIRST = 0; *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); /* tell the upper layers we're ok again */ netif_wake_queue(dev); spin_unlock_irqrestore(&np->lock, flags);}/* This will only be invoked if the driver is _not_ in XOFF state. * What this means is that we need not check it, and that this * invariant will hold if we make sure that the netif_*_queue() * calls are done at the proper times. */static inte100_send_packet(struct sk_buff *skb, struct net_device *dev){ struct net_local *np = (struct net_local *)dev->priv; unsigned char *buf = skb->data; unsigned long flags; #ifdef ETHDEBUG printk("send packet len %d\n", length);#endif spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */ myNextTxDesc->skb = skb; dev->trans_start = jiffies; e100_hardware_send_packet(buf, skb->len); myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); /* Stop queue if full */ if (myNextTxDesc == myFirstTxDesc) { netif_stop_queue(dev); } spin_unlock_irqrestore(&np->lock, flags); return 0;}/* * The typical workload of the driver: * Handle the network interface interrupts. */static irqreturn_te100rxtx_interrupt(int irq, void *dev_id, struct pt_regs * regs){ struct net_device *dev = (struct net_device *)dev_id; struct net_local *np = (struct net_local *)dev->priv; unsigned long irqbits = *R_IRQ_MASK2_RD; /* Disable RX/TX IRQs to avoid reentrancy */ *R_IRQ_MASK2_CLR = IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); /* Handle received packets */ if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { /* acknowledge the eop interrupt */ *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); /* check if one or more complete packets were indeed received */ while (*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) { /* Take out the buffer and give it to the OS, then * allocate a new buffer to put a packet in. */ e100_rx(dev); ((struct net_local *)dev->priv)->stats.rx_packets++; /* restart/continue on the channel, for safety */ *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); /* clear dma channel 1 eop/descr irq bits */ *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do); /* now, we might have gotten another packet so we have to loop back and check if so */ } } /* Report any packets that have been sent */ while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) && myFirstTxDesc != myNextTxDesc) { np->stats.tx_bytes += myFirstTxDesc->skb->len; np->stats.tx_packets++; /* dma is ready with the transmission of the data in tx_skb, so now we can release the skb memory */ dev_kfree_skb_irq(myFirstTxDesc->skb); myFirstTxDesc->skb = 0; myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); } if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { /* acknowledge the eop interrupt and wake up queue */ *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); netif_wake_queue(dev); } /* Enable RX/TX IRQs again */ *R_IRQ_MASK2_SET = IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); return IRQ_HANDLED;}static irqreturn_te100nw_interrupt(int irq, void *dev_id, struct pt_regs * regs){ struct net_device *dev = (struct net_device *)dev_id; struct net_local *np = (struct net_local *)dev->priv; unsigned long irqbits = *R_IRQ_MASK0_RD; /* check for underrun irq */ if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) { SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); np->stats.tx_errors++; D(printk("ethernet receiver underrun!\n")); } /* check for overrun irq */ if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { update_rx_stats(&np->stats); /* this will ack the irq */ D(printk("ethernet receiver overrun!\n")); } /* check for excessive collision irq */ if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) { SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr); np->stats.tx_errors++; D(printk("ethernet excessive collisions!\n")); } return IRQ_HANDLED;}/* We have a good packet(s), get it/them out of the buffers. */static voide100_rx(struct net_device *dev){ struct sk_buff *skb; int length = 0; struct net_local *np = (struct net_local *)dev->priv; unsigned char *skb_data_ptr;#ifdef ETHDEBUG int i;#endif if (!led_active && time_after(jiffies, led_next_time)) { /* light the network leds depending on the current speed. */ e100_set_network_leds(NETWORK_ACTIVITY); /* Set the earliest time we may clear the LED */ led_next_time = jiffies + NET_FLASH_TIME; led_active = 1; mod_timer(&clear_led_timer, jiffies + HZ/10); } length = myNextRxDesc->descr.hw_len - 4; ((struct net_local *)dev->priv)->stats.rx_bytes += length;#ifdef ETHDEBUG printk("Got a packet of length %d:\n", length); /* dump the first bytes in the packet */ skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf); for (i = 0; i < 8; i++) { printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8, skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3], skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]); skb_data_ptr += 8; }#endif if (length < RX_COPYBREAK) { /* Small packet, copy data */ skb = dev_alloc_skb(length - ETHER_HEAD_LEN); if (!skb) { np->stats.rx_errors++; printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); return; } skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */#ifdef ETHDEBUG printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", skb->head, skb->data, skb->tail, skb->end); printk("copying packet to 0x%x.\n", skb_data_ptr);#endif memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length); } else { /* Large packet, send directly to upper layers and allocate new * memory (aligned to cache line boundary to avoid bug). * Before sending the skb to upper layers we must make sure that * skb->data points to the aligned start of the packet. */ int align; struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); if (!new_skb) { np->stats.rx_errors++; printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); return; } skb = myNextRxDesc->skb; align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; skb_put(skb, length + align); skb_pull(skb, align); /* Remove alignment bytes */ myNextRxDesc->skb = new_skb; myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); } skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); /* Send the packet to the upper layers */ netif_rx(skb); /* Prepare for next packet */ myNextRxDesc->descr.status = 0; myPrevRxDesc = myNextRxDesc; myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); rx_queue_len++; /* Check if descriptors should be returned */ if (rx_queue_len == RX_QUEUE_THRESHOLD) { flush_etrax_cache();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -