📄 sunhme.c
字号:
#endif#ifdef RXDEBUG#define RXD(x) printk x#else#define RXD(x)#endif/* Originally I use to handle the allocation failure by just giving back just * that one ring buffer to the happy meal. Problem is that usually when that * condition is triggered, the happy meal expects you to do something reasonable * with all of the packets it has DMA'd in. So now I just drop the entire * ring when we cannot get a new skb and give them all back to the happy meal, * maybe things will be "happier" now. */static inline void happy_meal_rx(struct happy_meal *hp, struct device *dev, struct hmeal_gregs *gregs){ struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; struct happy_meal_rxd *this; int elem = hp->rx_new, drops = 0; RXD(("RX<")); this = &rxbase[elem]; while(!(this->rx_flags & RXFLAG_OWN)) { struct sk_buff *skb; unsigned int flags = this->rx_flags; int len = flags >> 16; u16 csum = flags & RXFLAG_CSUM; RXD(("[%d ", elem)); /* Check for errors. */ if((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { RXD(("ERR(%08x)]", flags)); hp->net_stats.rx_errors++; if(len < ETH_ZLEN) hp->net_stats.rx_length_errors++; if(len & (RXFLAG_OVERFLOW >> 16)) { hp->net_stats.rx_over_errors++; hp->net_stats.rx_fifo_errors++; } /* Return it to the Happy meal. */ drop_it: hp->net_stats.rx_dropped++; this->rx_addr = kva_to_hva(hp, hp->rx_skbs[elem]->data); this->rx_flags = (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)); goto next; } skb = hp->rx_skbs[elem];#ifdef NEED_DMA_SYNCHRONIZATION mmu_sync_dma(kva_to_hva(hp, skb->data), skb->len, hp->happy_sbus_dev->my_bus);#endif if(len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, (GFP_DMA|GFP_ATOMIC)); if(!new_skb) { drops++; goto drop_it; } hp->rx_skbs[elem] = new_skb; new_skb->dev = dev; skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET)); rxbase[elem].rx_addr = kva_to_hva(hp, new_skb->data); skb_reserve(new_skb, RX_OFFSET); rxbase[elem].rx_flags = (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len + 2); if(!copy_skb) { drops++; goto drop_it; } copy_skb->dev = dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); memcpy(copy_skb->data, skb->data, len); /* Reuse original ring buffer. */ rxbase[elem].rx_addr = kva_to_hva(hp, skb->data); rxbase[elem].rx_flags = (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)); skb = copy_skb; } /* This card is _fucking_ hot... */ if(!(csum ^ 0xffff)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; RXD(("len=%d csum=%4x]", len, csum)); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); hp->net_stats.rx_packets++; hp->net_stats.rx_bytes+=len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } hp->rx_new = elem; if(drops) printk("%s: Memory squeeze, deferring packet.\n", hp->dev->name); RXD((">"));}#ifdef CONFIG_PCIstatic inline void pci_happy_meal_rx(struct happy_meal *hp, struct device *dev, struct hmeal_gregs *gregs){ struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; struct happy_meal_rxd *this; unsigned int flags; int elem = hp->rx_new, drops = 0; RXD(("RX<")); this = &rxbase[elem];#ifdef __sparc_v9__ __asm__ __volatile__("lduwa [%1] %2, %0" : "=r" (flags) : "r" (&this->rx_flags), "i" (ASI_PL));#else flags = flip_dword(this->rx_flags); /* FIXME */#endif while(!(flags & RXFLAG_OWN)) { struct sk_buff *skb; int len; u16 csum; RXD(("[%d ", elem)); len = flags >> 16; csum = flags & RXFLAG_CSUM; /* Check for errors. */ if((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { RXD(("ERR(%08x)]", flags)); hp->net_stats.rx_errors++; if(len < ETH_ZLEN) hp->net_stats.rx_length_errors++; if(len & (RXFLAG_OVERFLOW >> 16)) { hp->net_stats.rx_over_errors++; hp->net_stats.rx_fifo_errors++; } /* Return it to the Happy meal. */ drop_it: hp->net_stats.rx_dropped++; pcihme_write_rxd(this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), (u32) virt_to_bus((volatile void *)hp->rx_skbs[elem]->data)); goto next; } skb = hp->rx_skbs[elem]; if(len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, (GFP_DMA|GFP_ATOMIC)); if(!new_skb) { drops++; goto drop_it; } hp->rx_skbs[elem] = new_skb; new_skb->dev = dev; skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET)); pcihme_write_rxd(&rxbase[elem], (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), (u32)virt_to_bus((volatile void *)new_skb->data)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len + 2); if(!copy_skb) { drops++; goto drop_it; } copy_skb->dev = dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); memcpy(copy_skb->data, skb->data, len); /* Reuse original ring buffer. */ pcihme_write_rxd(&rxbase[elem], (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), (u32)virt_to_bus((volatile void *)skb->data)); skb = copy_skb; } /* This card is _fucking_ hot... */ if(!~(csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; RXD(("len=%d csum=%4x]", len, csum)); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); hp->net_stats.rx_packets++; hp->net_stats.rx_bytes+=len; next: elem = NEXT_RX(elem); this = &rxbase[elem];#ifdef __sparc_v9__ __asm__ __volatile__("lduwa [%1] %2, %0" : "=r" (flags) : "r" (&this->rx_flags), "i" (ASI_PL));#else flags = flip_dword(this->rx_flags); /* FIXME */#endif } hp->rx_new = elem; if(drops) printk("%s: Memory squeeze, deferring packet.\n", hp->dev->name); RXD((">"));}#endif#ifndef __sparc_v9__static inline void sun4c_happy_meal_rx(struct happy_meal *hp, struct device *dev, struct hmeal_gregs *gregs){ struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; struct happy_meal_rxd *this; struct hmeal_buffers *hbufs = hp->sun4c_buffers; __u32 hbufs_dvma = hp->s4c_buf_dvma; int elem = hp->rx_new, drops = 0; RXD(("RX<")); this = &rxbase[elem]; while(!(this->rx_flags & RXFLAG_OWN)) { struct sk_buff *skb; unsigned int flags = this->rx_flags; unsigned char *thisbuf = &hbufs->rx_buf[elem][0]; __u32 thisbuf_dvma = hbufs_dvma + hbuf_offset(rx_buf, elem); int len = flags >> 16; RXD(("[%d ", elem)); /* Check for errors. */ if((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { RXD(("ERR(%08x)]", flags)); hp->net_stats.rx_errors++; if(len < ETH_ZLEN) hp->net_stats.rx_length_errors++; if(len & (RXFLAG_OVERFLOW >> 16)) { hp->net_stats.rx_over_errors++; hp->net_stats.rx_fifo_errors++; } hp->net_stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if(skb == 0) { drops++; hp->net_stats.rx_dropped++; } else { RXD(("len=%d]", len)); skb->dev = hp->dev; skb_reserve(skb, 2); skb_put(skb, len); eth_copy_and_sum(skb, (thisbuf+2), len, 0); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); hp->net_stats.rx_packets++; hp->net_stats.rx_bytes+=len; } } /* Return the buffer to the Happy Meal. */ this->rx_addr = thisbuf_dvma; this->rx_flags = (RXFLAG_OWN | ((SUN4C_RX_BUFF_SIZE - RX_OFFSET) << 16)); elem = NEXT_RX(elem); this = &rxbase[elem]; } hp->rx_new = elem; if(drops) printk("%s: Memory squeeze, deferring packet.\n", hp->dev->name); RXD((">"));}static inline void sun4d_happy_meal_rx(struct happy_meal *hp, struct device *dev, struct hmeal_gregs *gregs){ struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; struct happy_meal_rxd *this; int elem = hp->rx_new, drops = 0; __u32 va; RXD(("RX<")); this = &rxbase[elem]; while(!(this->rx_flags & RXFLAG_OWN)) { struct sk_buff *skb; unsigned int flags = this->rx_flags; int len = flags >> 16; u16 csum = flags & RXFLAG_CSUM; RXD(("[%d ", elem)); /* Check for errors. */ if((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { RXD(("ERR(%08x)]", flags)); hp->net_stats.rx_errors++; if(len < ETH_ZLEN) hp->net_stats.rx_length_errors++; if(len & (RXFLAG_OVERFLOW >> 16)) { hp->net_stats.rx_over_errors++; hp->net_stats.rx_fifo_errors++; } /* Return it to the Happy meal. */ drop_it: hp->net_stats.rx_dropped++; va = (__u32)hp->sun4d_buffers + elem * PAGE_SIZE; this->rx_addr = iounit_map_dma_page(va, hp->rx_skbs[elem]->data, hp->happy_sbus_dev->my_bus); this->rx_flags = (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)); goto next; } skb = hp->rx_skbs[elem]; if(len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, (GFP_DMA | GFP_ATOMIC)); if(!new_skb) { drops++; goto drop_it; } hp->rx_skbs[elem] = new_skb; new_skb->dev = dev; skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET)); va = (__u32)hp->sun4d_buffers + elem * PAGE_SIZE; rxbase[elem].rx_addr = iounit_map_dma_page(va, new_skb->data, hp->happy_sbus_dev->my_bus); skb_reserve(new_skb, RX_OFFSET); rxbase[elem].rx_flags = (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len + 2); if(!copy_skb) { drops++; goto drop_it; } copy_skb->dev = dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); memcpy(copy_skb->data, skb->data, len); /* Reuse original ring buffer. */ va = (__u32)hp->sun4d_buffers + elem * PAGE_SIZE; rxbase[elem].rx_addr = iounit_map_dma_page(va, skb->data, hp->happy_sbus_dev->my_bus); rxbase[elem].rx_flags = (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)); skb = copy_skb; } /* This card is _fucking_ hot... */ if(!(csum ^ 0xffff)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; RXD(("len=%d csum=%4x]", len, csum)); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); hp->net_stats.rx_packets++; hp->net_stats.rx_bytes+=len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } hp->rx_new = elem; if(drops) printk("%s: Memory squeeze, deferring packet.\n", hp->dev->name); RXD((">"));}#endifstatic void happy_meal_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct device *dev = (struct device *) dev_id; struct happy_meal *hp = (struct happy_meal *) dev->priv; struct hmeal_gregs *gregs = hp->gregs; struct hmeal_tcvregs *tregs = hp->tcvregs; unsigned int happy_status = hme_read32(hp, &gregs->stat); HMD(("happy_meal_interrupt: status=%08x ", happy_status)); dev->interrupt = 1; if(happy_status & GREG_STAT_ERRORS) { HMD(("ERRORS ")); if(happy_meal_is_not_so_happy(hp, gregs, /* un- */ happy_status)) { dev->interrupt = 0; return; } } if(happy_status & GREG_STAT_MIFIRQ) { HMD(("MIFIRQ ")); happy_meal_mif_interrupt(hp, gregs, tregs); } if(happy_status & GREG_STAT_TXALL) { HMD(("TXALL ")); happy_meal_tx(hp); } if(happy_status & GREG_STAT_RXTOHOST) { HMD(("RXTOHOST ")); happy_meal_rx(hp, dev, gregs); } if(dev->tbusy && (TX_BUFFS_AVAIL(hp) >= 0)) { hp->dev->tbusy = 0; mark_bh(NET_BH); } dev->interrupt = 0; HMD(("done
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -