📄 e1000_ethtool.c
字号:
REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); } REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); before = (((mac->type == e1000_ich8lan) || (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); if (mac->type >= e1000_82543) { REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); if ((mac->type != e1000_ich8lan) && (mac->type != e1000_ich9lan)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); for (i = 0; i < mac->rar_entry_count; i++) { REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 0x8003FFFF, 0xFFFFFFFF); } } else { REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x01FFFFFF); REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFF000, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TXCW, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFF000, 0xFFFFFFFF); } for (i = 0; i < mac->mta_reg_count; i++) REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); *data = 0; return 0;}static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data){ u16 temp; u16 checksum = 0; u16 i; *data = 0; /* Read and add up the contents of the EEPROM */ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { *data = 1; break; } checksum += temp; } /* If Checksum is not Correct return error else test passed */ if ((checksum != (u16) NVM_SUM) && !(*data)) *data = 2; return *data;}static irqreturn_t e1000_test_intr(int irq, void *data){ struct net_device *netdev = (struct net_device *) data; struct e1000_adapter *adapter = netdev_priv(netdev); adapter->test_icr |= E1000_READ_REG(&adapter->hw, E1000_ICR); return IRQ_HANDLED;}static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data){ struct net_device *netdev = adapter->netdev; u32 mask, i=0, shared_int = TRUE; u32 irq = adapter->pdev->irq; *data = 0; /* NOTE: we don't test MSI interrupts here, yet */ /* Hook up test interrupt handler just for this test */ if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) shared_int = FALSE; else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, netdev->name, netdev)) { *data = 1; return -1; } DPRINTK(HW, INFO, "testing %s interrupt\n", (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF); msleep(10); /* Test each interrupt */ for (; i < 10; i++) { if (((adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) continue; /* Interrupt to test */ mask = 1 << i; if (!shared_int) { /* Disable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; E1000_WRITE_REG(&adapter->hw, E1000_IMC, mask); E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask); msleep(10); if (adapter->test_icr & mask) { *data = 3; break; } } /* Enable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was not posted to the bus, the * test failed. */ adapter->test_icr = 0; E1000_WRITE_REG(&adapter->hw, E1000_IMS, mask); E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask); msleep(10); if (!(adapter->test_icr & mask)) { *data = 4; break; } if (!shared_int) { /* Disable the other interrupts to be reported in * the cause register and then force the other * interrupts and see if any get posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~mask & 0x00007FFF); E1000_WRITE_REG(&adapter->hw, E1000_ICS, ~mask & 0x00007FFF); msleep(10); if (adapter->test_icr) { *data = 5; break; } } } /* Disable all the interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF); msleep(10); /* Unhook test interrupt handler */ free_irq(irq, netdev); return *data;}static void e1000_free_desc_rings(struct e1000_adapter *adapter){ struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring; struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; int i; if (tx_ring->desc && tx_ring->buffer_info) { for (i = 0; i < tx_ring->count; i++) { if (tx_ring->buffer_info[i].dma) pci_unmap_single(pdev, tx_ring->buffer_info[i].dma, tx_ring->buffer_info[i].length, PCI_DMA_TODEVICE); if (tx_ring->buffer_info[i].skb) dev_kfree_skb(tx_ring->buffer_info[i].skb); } } if (rx_ring->desc && rx_ring->buffer_info) { for (i = 0; i < rx_ring->count; i++) { if (rx_ring->buffer_info[i].dma) pci_unmap_single(pdev, rx_ring->buffer_info[i].dma, E1000_RXBUFFER_2048, PCI_DMA_FROMDEVICE); if (rx_ring->buffer_info[i].skb) dev_kfree_skb(rx_ring->buffer_info[i].skb); } } if (tx_ring->desc) { pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } if (rx_ring->desc) { pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } kfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; kfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; return;}static int e1000_setup_desc_rings(struct e1000_adapter *adapter){ struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring; struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; u32 rctl; int i, ret_val; /* Setup Tx descriptor ring and Tx buffers */ if (!tx_ring->count) tx_ring->count = E1000_DEFAULT_TXD; if (!(tx_ring->buffer_info = kcalloc(tx_ring->count, sizeof(struct e1000_buffer), GFP_KERNEL))) { ret_val = 1; goto err_nomem; } tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, &tx_ring->dma))) { ret_val = 2; goto err_nomem; } tx_ring->next_to_use = tx_ring->next_to_clean = 0; E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); E1000_WRITE_REG(&adapter->hw, E1000_TCTL, E1000_TCTL_MULR | E1000_TCTL_PSP | E1000_TCTL_EN | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); for (i = 0; i < tx_ring->count; i++) { struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); struct sk_buff *skb; unsigned int size = 1024; if (!(skb = alloc_skb(size, GFP_KERNEL))) { ret_val = 3; goto err_nomem; } skb_put(skb, size); tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].length = skb->len; tx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, skb->len, PCI_DMA_TODEVICE); tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); tx_desc->lower.data = cpu_to_le32(skb->len); tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS); if (adapter->hw.mac.type < e1000_82543) tx_desc->lower.data |= E1000_TXD_CMD_RPS; else tx_desc->lower.data |= E1000_TXD_CMD_RS; tx_desc->upper.data = 0; } /* Setup Rx descriptor ring and Rx buffers */ if (!rx_ring->count) rx_ring->count = E1000_DEFAULT_RXD; if (!(rx_ring->buffer_info = kcalloc(rx_ring->count, sizeof(struct e1000_rx_buffer), GFP_KERNEL))) { ret_val = 4; goto err_nomem; } rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma))) { ret_val = 5; goto err_nomem; } rx_ring->next_to_use = rx_ring->next_to_clean = 0; rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), ((u64) rx_ring->dma >> 32)); E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), rx_ring->size); E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0); rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); for (i = 0; i < rx_ring->count; i++) { struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); struct sk_buff *skb; if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL))) { ret_val = 6; goto err_nomem; } skb_reserve(skb, NET_IP_ALIGN); rx_ring->buffer_info[i].skb = skb; rx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048, PCI_DMA_FROMDEVICE); rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); memset(skb->data, 0x00, skb->len); } return 0;err_nomem: e1000_free_desc_rings(adapter); return ret_val;}static void e1000_phy_disable_receiver(struct e1000_adapter *adapter){ /* Write out to PHY registers 29 and 30 to disable the Receiver. */ e1000_write_phy_reg(&adapter->hw, 29, 0x001F); e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC); e1000_write_phy_reg(&adapter->hw, 29, 0x001A); e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);}static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter){ u16 phy_reg; /* Because we reset the PHY above, we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock. This * value defaults back to a 2.5MHz clock when the PHY is reset. */ e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); phy_reg |= M88E1000_EPSCR_TX_CLK_25; e1000_write_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); /* In addition, because of the s/w reset above, we need to enable * CRS on TX. This must be set for both full and half duplex * operation. */ e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);}static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter){ u32 ctrl_reg; u16 phy_reg; /* Setup the Device Control Register for PHY loopback test. */ ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ E1000_CTRL_FD); /* Force Duplex to FULL */ E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg); /* Read the PHY Specific Control Register (0x10) */ e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); /* Clear Auto-Crossover bits in PHY Specific Control Register * (bits 6:5). */ phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg); /* Perform software reset on the PHY */ e1000_phy_commit(&adapter->hw); /* Have to setup TX_CLK and TX_CRS after software reset */ e1000_phy_reset_clk_and_crs(adapter); e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8100); /* Wait for reset to complete. */ udelay(500); /* Have to setup TX_CLK and TX_CRS after software reset */ e1000_phy_reset_clk_and_crs(adapter); /* Write out to PHY registers 29 and 30 to disable the Receiver. */ e1000_phy_disable_receiver(adapter); /* Set the loopback bit in the PHY control register. */ e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -