ibmveth.c
来自「linux 内核源代码」· C语言 代码 · 共 1,549 行 · 第 1/3 页
C
1,549 行
rxq_entries += adapter->rx_buff_pool[i].size; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); if(!adapter->rx_queue.queue_addr) { ibmveth_error_printk("unable to allocate rx queue pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); if((dma_mapping_error(adapter->buffer_list_dma) ) || (dma_mapping_error(adapter->filter_list_dma)) || (dma_mapping_error(adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } adapter->rx_queue.index = 0; adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); mac_address = mac_address >> 16; rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; rxq_desc.fields.address = adapter->rx_queue.queue_dma; ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); if(lpar_rc != H_SUCCESS) { ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n", adapter->buffer_list_dma, adapter->filter_list_dma, rxq_desc.desc, mac_address); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENONET; } for(i = 0; i<IbmVethNumBufferPools; i++) { if(!adapter->rx_buff_pool[i].active) continue; if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { ibmveth_error_printk("unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM ; } } ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); do { rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return rc; } ibmveth_debug_printk("initial replenish cycle\n"); ibmveth_interrupt(netdev->irq, netdev); netif_start_queue(netdev); ibmveth_debug_printk("open complete\n"); return 0;}static int ibmveth_close(struct net_device *netdev){ struct ibmveth_adapter *adapter = netdev->priv; long lpar_rc; ibmveth_debug_printk("close starting\n"); napi_disable(&adapter->napi); if (!adapter->pool_config) netif_stop_queue(netdev); free_irq(netdev->irq, netdev); do { lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); if(lpar_rc != H_SUCCESS) { ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", lpar_rc); } adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); ibmveth_cleanup(adapter); ibmveth_debug_printk("close complete\n"); return 0;}static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); cmd->speed = SPEED_1000; cmd->duplex = DUPLEX_FULL; cmd->port = PORT_FIBRE; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_ENABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 1; return 0;}static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);}static u32 netdev_get_link(struct net_device *dev) { return 1;}static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data){ struct ibmveth_adapter *adapter = dev->priv; if (data) adapter->rx_csum = 1; else { /* * Since the ibmveth firmware interface does not have the concept of * separate tx/rx checksum offload enable, if rx checksum is disabled * we also have to disable tx checksum offload. Once we disable rx * checksum offload, we are no longer allowed to send tx buffers that * are not properly checksummed. */ adapter->rx_csum = 0; dev->features &= ~NETIF_F_IP_CSUM; }}static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data){ struct ibmveth_adapter *adapter = dev->priv; if (data) { dev->features |= NETIF_F_IP_CSUM; adapter->rx_csum = 1; } else dev->features &= ~NETIF_F_IP_CSUM;}static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, void (*done) (struct net_device *, u32)){ struct ibmveth_adapter *adapter = dev->priv; u64 set_attr, clr_attr, ret_attr; long ret; int rc1 = 0, rc2 = 0; int restart = 0; if (netif_running(dev)) { restart = 1; adapter->pool_config = 1; ibmveth_close(dev); adapter->pool_config = 0; } set_attr = 0; clr_attr = 0; if (data) set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; else clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, set_attr, &ret_attr); if (ret != H_SUCCESS) { rc1 = -EIO; ibmveth_error_printk("unable to change checksum offload settings." " %d rc=%ld\n", data, ret); ret = h_illan_attributes(adapter->vdev->unit_address, set_attr, clr_attr, &ret_attr); } else done(dev, data); } else { rc1 = -EIO; ibmveth_error_printk("unable to change checksum offload settings." " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); } if (restart) rc2 = ibmveth_open(dev); return rc1 ? rc1 : rc2;}static int ibmveth_set_rx_csum(struct net_device *dev, u32 data){ struct ibmveth_adapter *adapter = dev->priv; if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) return 0; return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);}static int ibmveth_set_tx_csum(struct net_device *dev, u32 data){ struct ibmveth_adapter *adapter = dev->priv; int rc = 0; if (data && (dev->features & NETIF_F_IP_CSUM)) return 0; if (!data && !(dev->features & NETIF_F_IP_CSUM)) return 0; if (data && !adapter->rx_csum) rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); else ibmveth_set_tx_csum_flags(dev, data); return rc;}static u32 ibmveth_get_rx_csum(struct net_device *dev){ struct ibmveth_adapter *adapter = dev->priv; return adapter->rx_csum;}static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data){ int i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);}static int ibmveth_get_sset_count(struct net_device *dev, int sset){ switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ibmveth_stats); default: return -EOPNOTSUPP; }}static void ibmveth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data){ int i; struct ibmveth_adapter *adapter = dev->priv; for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);}static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .get_link = netdev_get_link, .set_tx_csum = ibmveth_set_tx_csum, .get_rx_csum = ibmveth_get_rx_csum, .set_rx_csum = ibmveth_set_rx_csum, .get_strings = ibmveth_get_strings, .get_sset_count = ibmveth_get_sset_count, .get_ethtool_stats = ibmveth_get_ethtool_stats,};static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd){ return -EOPNOTSUPP;}#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev){ struct ibmveth_adapter *adapter = netdev->priv; union ibmveth_buf_desc desc; unsigned long lpar_rc; unsigned long correlator; unsigned long flags; unsigned int retry_count; unsigned int tx_dropped = 0; unsigned int tx_bytes = 0; unsigned int tx_packets = 0; unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { ibmveth_error_printk("tx: failed to checksum packet\n"); tx_dropped++; goto out; } if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); /* Need to zero out the checksum */ buf[0] = 0; buf[1] = 0; } if (dma_mapping_error(desc.fields.address)) { ibmveth_error_printk("tx: unable to map xmit buffer\n"); tx_map_failed++; tx_dropped++; goto out; } /* send the frame. Arbitrarily set retrycount to 1024 */ correlator = 0; retry_count = 1024; do { lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, desc.desc, 0, 0, 0, 0, 0, correlator, &correlator); } while ((lpar_rc == H_BUSY) && (retry_count--)); if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, skb->len, desc.fields.address); tx_send_failed++; tx_dropped++; } else { tx_packets++; tx_bytes += skb->len; netdev->trans_start = jiffies; } dma_unmap_single(&adapter->vdev->dev, desc.fields.address, skb->len, DMA_TO_DEVICE);out: spin_lock_irqsave(&adapter->stats_lock, flags); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; spin_unlock_irqrestore(&adapter->stats_lock, flags); dev_kfree_skb(skb); return 0;}static int ibmveth_poll(struct napi_struct *napi, int budget){ struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); struct net_device *netdev = adapter->netdev; int frames_processed = 0; unsigned long lpar_rc; restart_poll: do { struct sk_buff *skb; if (!ibmveth_rxq_pending_buffer(adapter)) break; rmb(); if (!ibmveth_rxq_buffer_valid(adapter)) { wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; ibmveth_debug_printk("recycling invalid buffer\n"); ibmveth_rxq_recycle_buffer(adapter); } else { int length = ibmveth_rxq_frame_length(adapter); int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); skb = ibmveth_rxq_get_buffer(adapter); if (csum_good) skb->ip_summed = CHECKSUM_UNNECESSARY; ibmveth_rxq_harvest_buffer(adapter); skb_reserve(skb, offset); skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); netif_receive_skb(skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; frames_processed++; netdev->last_rx = jiffies; } } while (frames_processed < budget); ibmveth_replenish_task(adapter); if (frames_processed < budget) { /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); ibmveth_assert(lpar_rc == H_SUCCESS); netif_rx_complete(netdev, napi); if (ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; } } return frames_processed;}static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance){ struct net_device *netdev = dev_instance; struct ibmveth_adapter *adapter = netdev->priv; unsigned long lpar_rc; if (netif_rx_schedule_prep(netdev, &adapter->napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); ibmveth_assert(lpar_rc == H_SUCCESS); __netif_rx_schedule(netdev, &adapter->napi); } return IRQ_HANDLED;}static void ibmveth_set_multicast_list(struct net_device *netdev){ struct ibmveth_adapter *adapter = netdev->priv; unsigned long lpar_rc; if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastEnableRecv | IbmVethMcastDisableFiltering, 0); if(lpar_rc != H_SUCCESS) { ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); } } else { struct dev_mc_list *mclist = netdev->mc_list; int i; /* clear the filter table & disable filtering */ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastEnableRecv | IbmVethMcastDisableFiltering | IbmVethMcastClearFilterTable, 0); if(lpar_rc != H_SUCCESS) { ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); } /* add the addresses to the filter table */ for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) { // add the multicast address to the filter table unsigned long mcast_addr = 0;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?