📄 at_main.c
字号:
printk(KERN_ERR "ATL1e: Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
at_reset_hw(&adapter->hw);
return PCI_ERS_RESULT_RECOVERED;
}
/**
* at_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the at_resume routine.
*/
static void at_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct at_adapter *adapter = netdev->priv;
if (netif_running(netdev)) {
if (at_up(adapter)) {
printk("ATL1e: can't bring device back up after reset\n");
return;
}
}
netif_device_attach(netdev);
}
#endif /* CONFIG_AT_PCI_ERS */
/**
* at_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
inline void
at_irq_enable(struct at_adapter *adapter)
{
#if 0
int c = atomic_read(&adapter->irq_sem);
AT_DBG("irq_sem=%d\n", c);
#endif
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
AT_WRITE_FLUSH(&adapter->hw);
}
}
/**
* at_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
inline void
at_irq_disable(struct at_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
AT_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
/**
* at_sw_init - Initialize general software structures (struct at_adapter)
* @adapter: board private structure to initialize
*
* at_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
static int __devinit
at_sw_init(struct at_adapter *adapter)
{
struct at_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
u32 val;
#ifdef CONFIG_AT_NAPI
int i;
#endif
/* PCI config space info */
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
val = AT_READ_REG(hw, REG_PHY_STATUS);
/* nic type */
if (hw->revision_id >= 0xF0) {
hw->nic_type = athr_l2e_revB;
// printk("L2e version B\n");
} else {
if (val&PHY_STATUS_100M) {
hw->nic_type = athr_l1e;
// printk("L1e !!!!\n");
} else {
hw->nic_type = athr_l2e_revA;
// printk("L2e version A\n");
}
}
if (val&PHY_STATUS_EMI_CA)
hw->emi_ca = TRUE;
else
hw->emi_ca = FALSE;
adapter->wol = AT_WUFC_MAG;
adapter->ict = 50000; // 100ms
adapter->link_speed = SPEED_0; // hardware init
adapter->link_duplex = FULL_DUPLEX; //
hw->rrs_type = at_rrs_disable;
hw->phy_configured = FALSE;
hw->preamble_len = 7;
hw->smb_timer = 200000;
hw->max_frame_size = adapter->netdev->mtu;
hw->dmar_block = at_dma_req_1024;
hw->dmaw_block = at_dma_req_1024;
hw->rx_jumbo_th = (hw->max_frame_size+
ENET_HEADER_SIZE +
VLAN_SIZE +
ETHERNET_FCS_SIZE + 7)>>3;
hw->indirect_tab = 0;
hw->base_cpu = 0;
hw->rrs_type = at_rrs_disable;
adapter->num_rx_queues = 1;
#ifdef CONFIG_AT_MQ
hw->indirect_tab = 0xE4E4E4E4;
hw->rrs_type =
at_rrs_ipv4 | at_rrs_ipv4_tcp |
at_rrs_ipv6 | at_rss_ipv6_tcp ;
adapter->num_rx_queues = min(4, num_online_cpus());
AT_DBG("Multiqueue Enabled: Rx Queue count = %u %s\n",
adapter->num_rx_queues,
((adapter->num_rx_queues == 1)
? ((num_online_cpus() > 1)
? "(due to unsupported feature in current adapter)"
: "(due to unsupported system configuration)")
: ""));
#endif//CONFIG_AT_MQ
if (at_alloc_queues(adapter)) {
AT_ERR("Unable to allocate memory for queues\n");
return -ENOMEM;
}
#ifdef CONFIG_AT_NAPI
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->polling_netdev[i].priv = adapter;
adapter->polling_netdev[i].poll = &at_clean;
adapter->polling_netdev[i].weight = 64;
dev_hold(&adapter->polling_netdev[i]);
set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
}
spin_lock_init(&adapter->tx_queue_lock);
#endif
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
}
#ifdef CONFIG_AT_NAPI
/**
* at_clean - NAPI Rx polling callback
* @adapter: board private structure
**/
static int
at_clean(struct net_device *poll_dev, int *budget)
{
struct at_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
int i = 0, work_done = 0;
boolean_t tx_cleaned = FALSE;
/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;
/* Keep link state information with original netdev */
if (!netif_carrier_ok(adapter->netdev))
goto quit_polling;
while (poll_dev != &adapter->polling_netdev[i]) {
i++;
if (unlikely(i == adapter->num_rx_queues))
BUG();
}
/* at_clean is called per-cpu. This lock protects
* tx_ring[0] from being cleaned by multiple cpus
* simultaneously. A failure obtaining the lock means
* tx_ring[0] is currently being cleaned anyway. */
tx_cleaned = TRUE;
at_clean_rx_irq(adapter, i, &work_done, work_to_do);
*budget -= work_done;
poll_dev->quota -= work_done;
/* If no Tx and not enough Rx work done, exit the polling mode */
if ((tx_cleaned && (work_done < work_to_do)) || !netif_running(poll_dev)) {
quit_polling:
netif_rx_complete(poll_dev);
if (test_bit(__AT_DOWN, &adapter->flags))
atomic_dec(&adapter->irq_sem);
else
at_irq_enable(adapter);
return 0;
}
return 1;
}
#endif
/**
* at_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time. The polling_netdev array is
* intended for Multiqueue, but should work fine with a single queue.
**/
static int __devinit
at_alloc_queues(struct at_adapter *adapter)
{
#ifdef CONFIG_AT_NAPI
int size;
#endif
#ifdef CONFIG_AT_NAPI
size = sizeof(struct net_device) * adapter->num_rx_queues;
adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
if (!adapter->polling_netdev) {
return -ENOMEM;
}
memset(adapter->polling_netdev, 0, size);
#endif
#ifdef CONFIG_AT_MQ
adapter->rx_sched_call_data.func = at_rx_schedule;
adapter->rx_sched_call_data.info = adapter->netdev;
//cpu in the system, zeroing them. Objects should be dereferenced using per_cpu_ptr/get_cpu_ptr macros only.
adapter->cpu_netdev = alloc_percpu(struct net_device *); //allocate one copy of the object for every present ...arg->>size
#endif
return AT_SUCCESS;
}
#ifdef CONFIG_AT_MQ
static void __devinit
at_setup_queue_mapping(struct at_adapter *adapter)
{
int i, cpu;
adapter->rx_sched_call_data.func = at_rx_schedule;
adapter->rx_sched_call_data.info = adapter->netdev;
cpus_clear(adapter->rx_sched_call_data.cpumask);
lock_cpu_hotplug();
i = 0;
for_each_online_cpu(cpu) {
/* This is incomplete because we'd like to assign separate
* physical cpus to these netdev polling structures and
* avoid saturating a subset of cpus.
*/
if (i < adapter->num_rx_queues) {
*per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
adapter->rx_cpu[i] = cpu;
cpu_set(cpu, adapter->cpumask);
} else
*per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
i++;
}
unlock_cpu_hotplug();
}
#endif//CONFIG_AT_MQ
#ifdef CONFIG_AT_MQ
void
at_rx_schedule(void *data)
{
struct net_device *poll_dev, *netdev = data;
struct at_adapter *adapter = netdev->priv;
int this_cpu = get_cpu();//preempt_disable(); smp_processor_id();
poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
if (poll_dev == NULL) {
put_cpu();
return;
}
if (likely(netif_rx_schedule_prep(poll_dev)))
__netif_rx_schedule(poll_dev);
else
at_irq_enable(adapter);
put_cpu();
}
#endif
int
at_reset(struct at_adapter *adapter)
{
int ret;
if (AT_SUCCESS != (ret = at_reset_hw(&adapter->hw)))
return ret;
return at_init_hw(&adapter->hw);
}
/**
* at_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
static int
at_open(struct net_device *netdev)
{
struct at_adapter *adapter = netdev_priv(netdev);
int err;
u32 val;
DEBUGFUNC("at_open !");
/* disallow open during test */
if (test_bit(__AT_TESTING, &adapter->flags))
return -EBUSY;
/* allocate rx/tx dma buffer & descriptors */
if((err = at_setup_ring_resources(adapter)))
return err;
if((err = at_init_hw(&adapter->hw))) {
err = -EIO;
goto err_init_hw;
}
/* hardware has been reset, we need to reload some things */
at_set_multi(netdev);
init_ring_ptrs(adapter);
#ifdef NETIF_F_HW_VLAN_TX
at_restore_vlan(adapter);
#endif
if (at_configure(adapter)) {
err = -EIO;
goto err_config;
}
if ((err = at_request_irq(adapter)))
goto err_req_irq;
clear_bit(__AT_DOWN, &adapter->flags);
#ifdef CONFIG_AT_MQ
at_setup_queue_mapping(adapter);
#endif
#ifdef CONFIG_AT_NAPI
netif_poll_enable(netdev);
#endif
mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ);
val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val|MASTER_CTRL_MANUAL_INT);
at_irq_enable(adapter);
return 0;
err_init_hw:
err_req_irq:
err_config:
at_free_ring_resources(adapter);
at_reset_hw(&adapter->hw);
return err;
}
/**
* at_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
*
* The close entry point is called when an interface is de-activated
* by the OS. The hardware is still under the drivers control, but
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -