📄 myri_sbus.c
字号:
skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN); skb_pull(skb, dev->hard_header_len); eth = skb->mac.ethernet; #ifdef DEBUG_HEADER DHDR(("myri_type_trans: ")); dump_ehdr(eth);#endif if (*eth->h_dest & 1) { if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) { if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; rawp = skb->data; /* This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *)rawp == 0xFFFF) return htons(ETH_P_802_3); /* Real 802.2 LLC */ return htons(ETH_P_802_2);}static void myri_rx(struct myri_eth *mp, struct net_device *dev){ struct recvq *rq = mp->rq; struct recvq *rqa = mp->rqack; int entry = sbus_readl(&rqa->head); int limit = sbus_readl(&rqa->tail); int drops; DRX(("entry[%d] limit[%d] ", entry, limit)); if (entry == limit) return; drops = 0; DRX(("\n")); while (entry != limit) { struct myri_rxd *rxdack = &rqa->myri_rxd[entry]; u32 csum = sbus_readl(&rxdack->csum); int len = sbus_readl(&rxdack->myri_scatters[0].len); int index = sbus_readl(&rxdack->ctx); struct myri_rxd *rxd = &rq->myri_rxd[rq->tail]; struct sk_buff *skb = mp->rx_skbs[index]; /* Ack it. */ sbus_writel(NEXT_RX(entry), &rqa->head); /* Check for errors. */ DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum)); sbus_dma_sync_single(mp->myri_sdev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { DRX(("ERROR[")); mp->enet_stats.rx_errors++; if (len < (ETH_HLEN + MYRI_PAD_LEN)) { DRX(("BAD_LENGTH] ")); mp->enet_stats.rx_length_errors++; } else { DRX(("NO_PADDING] ")); mp->enet_stats.rx_frame_errors++; } /* Return it to the LANAI. */ drop_it: drops++; DRX(("DROP ")); mp->enet_stats.rx_dropped++; sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); goto next; } DRX(("len[%d] ", len)); if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; u32 dma_addr; DRX(("BIGBUFF ")); new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { DRX(("skb_alloc(FAILED) ")); goto drop_it; } sbus_unmap_single(mp->myri_sdev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); mp->rx_skbs[index] = new_skb; new_skb->dev = dev; skb_put(new_skb, RX_ALLOC_SIZE); dma_addr = sbus_map_single(mp->myri_sdev, new_skb->data, RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); sbus_writel(dma_addr, &rxd->myri_scatters[0].addr); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); /* Trim the original skb for the netif. */ DRX(("trim(%d) ", len)); skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len); DRX(("SMALLBUFF ")); if (copy_skb == NULL) { DRX(("dev_alloc_skb(FAILED) ")); goto drop_it; } /* DMA sync already done above. */ copy_skb->dev = dev; DRX(("resv_and_put ")); skb_put(copy_skb, len); memcpy(copy_skb->data, skb->data, len); /* Reuse original ring buffer. */ DRX(("reuse ")); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); skb = copy_skb; } /* Just like the happy meal we get checksums from this card. */ skb->csum = csum; skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */ skb->protocol = myri_type_trans(skb, dev); DRX(("prot[%04x] netif_rx ", skb->protocol)); netif_rx(skb); mp->enet_stats.rx_packets++; next: DRX(("NEXT\n")); entry = NEXT_RX(entry); }}static void myri_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct net_device *dev = (struct net_device *) dev_id; struct myri_eth *mp = (struct myri_eth *) dev->priv; unsigned long lregs = mp->lregs; struct myri_channel *chan = &mp->shmem->channel; u32 status; status = sbus_readl(lregs + LANAI_ISTAT); DIRQ(("myri_interrupt: status[%08x] ", status)); if (status & ISTAT_HOST) { u32 softstate; DIRQ(("IRQ_DISAB ")); myri_disable_irq(lregs, mp->cregs); softstate = sbus_readl(&chan->state); DIRQ(("state[%08x] ", softstate)); if (softstate != STATE_READY) { DIRQ(("myri_not_so_happy ")); myri_is_not_so_happy(mp); } DIRQ(("\nmyri_rx: ")); myri_rx(mp, dev); DIRQ(("\nistat=ISTAT_HOST ")); sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT); DIRQ(("IRQ_ENAB ")); myri_enable_irq(lregs, mp->cregs); } DIRQ(("\n"));}static int myri_open(struct net_device *dev){ struct myri_eth *mp = (struct myri_eth *) dev->priv; return myri_init(mp, in_interrupt());}static int myri_close(struct net_device *dev){ struct myri_eth *mp = (struct myri_eth *) dev->priv; myri_clean_rings(mp); return 0;}static void myri_tx_timeout(struct net_device *dev){ struct myri_eth *mp = (struct myri_eth *) dev->priv; printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); mp->enet_stats.tx_errors++; myri_init(mp, 0); netif_start_queue(dev);}static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct myri_eth *mp = (struct myri_eth *) dev->priv; struct sendq *sq = mp->sq; struct myri_txd *txd; unsigned long flags; unsigned int head, tail; int len, entry; u32 dma_addr; DTX(("myri_start_xmit: ")); myri_tx(mp, dev); netif_stop_queue(dev); /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */ head = sbus_readl(&sq->head); tail = sbus_readl(&sq->tail); if (!TX_BUFFS_AVAIL(head, tail)) { DTX(("no buffs available, returning 1\n")); return 1; } save_and_cli(flags); DHDR(("xmit[skbdata(%p)]\n", skb->data));#ifdef DEBUG_HEADER dump_ehdr_and_myripad(((unsigned char *) skb->data));#endif /* XXX Maybe this can go as well. */ len = skb->len; if (len & 3) { DTX(("len&3 ")); len = (len + 4) & (~3); } entry = sbus_readl(&sq->tail); txd = &sq->myri_txd[entry]; mp->tx_skbs[entry] = skb; /* Must do this before we sbus map it. */ if (skb->data[MYRI_PAD_LEN] & 0x1) { sbus_writew(0xffff, &txd->addr[0]); sbus_writew(0xffff, &txd->addr[1]); sbus_writew(0xffff, &txd->addr[2]); sbus_writew(0xffff, &txd->addr[3]); } else { sbus_writew(0xffff, &txd->addr[0]); sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]); sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]); sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]); } dma_addr = sbus_map_single(mp->myri_sdev, skb->data, len, SBUS_DMA_TODEVICE); sbus_writel(dma_addr, &txd->myri_gathers[0].addr); sbus_writel(len, &txd->myri_gathers[0].len); sbus_writel(1, &txd->num_sg); sbus_writel(KERNEL_CHANNEL, &txd->chan); sbus_writel(len, &txd->len); sbus_writel((u32)-1, &txd->csum_off); sbus_writel(0, &txd->csum_field); sbus_writel(NEXT_TX(entry), &sq->tail); DTX(("BangTheChip ")); bang_the_chip(mp); DTX(("tbusy=0, returning 0\n")); netif_start_queue(dev); restore_flags(flags); return 0;}/* Create the MyriNet MAC header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */static int myri_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len){ struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);#ifdef DEBUG_HEADER DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1])); dump_ehdr(eth);#endif /* Set the MyriNET padding identifier. */ pad[0] = MYRI_PAD_LEN; pad[1] = 0xab; /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length * in here instead. It is up to the 802.2 layer to carry protocol information. */ if (type != ETH_P_802_3) eth->h_proto = htons(type); else eth->h_proto = htons(len); /* Set the source hardware address. */ if (saddr) memcpy(eth->h_source, saddr, dev->addr_len); else memcpy(eth->h_source, dev->dev_addr, dev->addr_len); /* Anyway, the loopback-device should never use this function... */ if (dev->flags & IFF_LOOPBACK) { int i; for (i = 0; i < dev->addr_len; i++) eth->h_dest[i] = 0; return(dev->hard_header_len); } if (daddr) { memcpy(eth->h_dest, daddr, dev->addr_len); return dev->hard_header_len; } return -dev->hard_header_len;}/* Rebuild the MyriNet MAC header. This is called after an ARP * (or in future other address resolution) has completed on this * sk_buff. We now let ARP fill in the other fields. */static int myri_rebuild_header(struct sk_buff *skb){ unsigned char *pad = (unsigned char *) skb->data; struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN); struct net_device *dev = skb->dev;#ifdef DEBUG_HEADER DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1])); dump_ehdr(eth);#endif /* Refill MyriNet padding identifiers, this is just being anal. */ pad[0] = MYRI_PAD_LEN; pad[1] = 0xab; switch (eth->h_proto) {#ifdef CONFIG_INET case __constant_htons(ETH_P_IP): return arp_find(eth->h_dest, skb);#endif default: printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n", dev->name, (int)eth->h_proto); memcpy(eth->h_source, dev->dev_addr, dev->addr_len); return 0; break; } return 0; }int myri_header_cache(struct neighbour *neigh, struct hh_cache *hh){ unsigned short type = hh->hh_type; unsigned char *pad = (unsigned char *) hh->hh_data; struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -