📄 pcilynx.c
字号:
if (count == 0 || *offset > PCILYNX_MAX_MEMORY) { return -ENOSPC; } /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */ switch (md->type) { case aux: if (copy_from_user(md->lynx->aux_port+(*offset), buffer, count)) return -EFAULT; break; case ram: if (copy_from_user(md->lynx->local_ram+(*offset), buffer, count)) return -EFAULT; break; case rom: /* the ROM may be writeable */ if (copy_from_user(md->lynx->local_rom+(*offset), buffer, count)) return -EFAULT; break; } file->f_pos += count; return count;}#endif /* CONFIG_IEEE1394_PCILYNX_PORTS *//******************************************************** * Global stuff (interrupt handler, init/shutdown code) * ********************************************************/static void lynx_irq_handler(int irq, void *dev_id, struct pt_regs *regs_are_unused){ struct ti_lynx *lynx = (struct ti_lynx *)dev_id; struct hpsb_host *host = lynx->host; u32 intmask; u32 linkint; linkint = reg_read(lynx, LINK_INT_STATUS); intmask = reg_read(lynx, PCI_INT_STATUS); if (!(intmask & PCI_INT_INT_PEND)) return; PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask, linkint); reg_write(lynx, LINK_INT_STATUS, linkint); reg_write(lynx, PCI_INT_STATUS, intmask);#ifdef CONFIG_IEEE1394_PCILYNX_PORTS if (intmask & PCI_INT_AUX_INT) { atomic_inc(&lynx->aux_intr_seen); wake_up_interruptible(&lynx->aux_intr_wait); } if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) { wake_up_interruptible(&lynx->mem_dma_intr_wait); }#endif if (intmask & PCI_INT_1394) { if (linkint & LINK_INT_PHY_TIMEOUT) { PRINT(KERN_INFO, lynx->id, "PHY timeout occurred"); } if (linkint & LINK_INT_PHY_BUSRESET) { PRINT(KERN_INFO, lynx->id, "bus reset interrupt"); lynx->selfid_size = -1; lynx->phy_reg0 = -1; if (!host->in_bus_reset) hpsb_bus_reset(host); } if (linkint & LINK_INT_PHY_REG_RCVD) { u32 reg; spin_lock(&lynx->phy_reg_lock); reg = reg_read(lynx, LINK_PHY); spin_unlock(&lynx->phy_reg_lock); if (!host->in_bus_reset) { PRINT(KERN_INFO, lynx->id, "phy reg received without reset"); } else if (reg & 0xf00) { PRINT(KERN_INFO, lynx->id, "unsolicited phy reg %d received", (reg >> 8) & 0xf); } else { lynx->phy_reg0 = reg & 0xff; handle_selfid(lynx, host); } } if (linkint & LINK_INT_ISO_STUCK) { PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck"); } if (linkint & LINK_INT_ASYNC_STUCK) { PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck"); } if (linkint & LINK_INT_SENT_REJECT) { PRINT(KERN_INFO, lynx->id, "sent reject"); } if (linkint & LINK_INT_TX_INVALID_TC) { PRINT(KERN_INFO, lynx->id, "invalid transaction code"); } if (linkint & LINK_INT_GRF_OVERFLOW) { /* flush FIFO if overflow happens during reset */ if (host->in_bus_reset) reg_write(lynx, FIFO_CONTROL, FIFO_CONTROL_GRF_FLUSH); PRINT(KERN_INFO, lynx->id, "GRF overflow"); } if (linkint & LINK_INT_ITF_UNDERFLOW) { PRINT(KERN_INFO, lynx->id, "ITF underflow"); } if (linkint & LINK_INT_ATF_UNDERFLOW) { PRINT(KERN_INFO, lynx->id, "ATF underflow"); } } if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) { PRINTD(KERN_DEBUG, lynx->id, "iso receive"); spin_lock(&lynx->iso_rcv.lock); lynx->iso_rcv.stat[lynx->iso_rcv.next] = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV)); lynx->iso_rcv.used++; lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL; if ((lynx->iso_rcv.next == lynx->iso_rcv.last) || !lynx->iso_rcv.chan_count) { PRINTD(KERN_DEBUG, lynx->id, "stopped"); reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0); } run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next, CHANNEL_ISO_RCV); spin_unlock(&lynx->iso_rcv.lock); tasklet_schedule(&lynx->iso_rcv.tq); } if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) { PRINTD(KERN_DEBUG, lynx->id, "async sent"); spin_lock(&lynx->async.queue_lock); if (list_empty(&lynx->async.pcl_queue)) { spin_unlock(&lynx->async.queue_lock); PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)"); } else { struct ti_pcl pcl; u32 ack; struct hpsb_packet *packet; get_pcl(lynx, lynx->async.pcl, &pcl); packet = driver_packet(lynx->async.pcl_queue.next); list_del(&packet->driver_list); pci_unmap_single(lynx->dev, lynx->async.header_dma, packet->header_size, PCI_DMA_TODEVICE); if (packet->data_size) { pci_unmap_single(lynx->dev, lynx->async.data_dma, packet->data_size, PCI_DMA_TODEVICE); } if (!list_empty(&lynx->async.queue)) { send_next(lynx, hpsb_async); } spin_unlock(&lynx->async.queue_lock); if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) { if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) { ack = (pcl.pcl_status >> 15) & 0xf; PRINTD(KERN_INFO, lynx->id, "special ack %d", ack); ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR); } else { ack = (pcl.pcl_status >> 15) & 0xf; } } else { PRINT(KERN_INFO, lynx->id, "async packet was not completed"); ack = ACKX_SEND_ERROR; } hpsb_packet_sent(host, packet, ack); } } if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) { PRINTD(KERN_DEBUG, lynx->id, "iso sent"); spin_lock(&lynx->iso_send.queue_lock); if (list_empty(&lynx->iso_send.pcl_queue)) { spin_unlock(&lynx->iso_send.queue_lock); PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet"); } else { struct ti_pcl pcl; u32 ack; struct hpsb_packet *packet; get_pcl(lynx, lynx->iso_send.pcl, &pcl); packet = driver_packet(lynx->iso_send.pcl_queue.next); list_del(&packet->driver_list); pci_unmap_single(lynx->dev, lynx->iso_send.header_dma, packet->header_size, PCI_DMA_TODEVICE); if (packet->data_size) { pci_unmap_single(lynx->dev, lynx->iso_send.data_dma, packet->data_size, PCI_DMA_TODEVICE); } if (!list_empty(&lynx->iso_send.queue)) { send_next(lynx, hpsb_iso); } spin_unlock(&lynx->iso_send.queue_lock); if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) { if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) { ack = (pcl.pcl_status >> 15) & 0xf; PRINTD(KERN_INFO, lynx->id, "special ack %d", ack); ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR); } else { ack = (pcl.pcl_status >> 15) & 0xf; } } else { PRINT(KERN_INFO, lynx->id, "iso send packet was not completed"); ack = ACKX_SEND_ERROR; } hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR } } if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) { /* general receive DMA completed */ int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV)); PRINTD(KERN_DEBUG, lynx->id, "received packet size %d", stat & 0x1fff); if (stat & DMA_CHAN_STAT_SELFID) { lynx->selfid_size = stat & 0x1fff; handle_selfid(lynx, host); } else { quadlet_t *q_data = lynx->rcv_page; if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) { cpu_to_be32s(q_data + 3); } hpsb_packet_received(host, q_data, stat & 0x1fff, 0); } run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV); }}static void iso_rcv_bh(struct ti_lynx *lynx){ unsigned int idx; quadlet_t *data; unsigned long flags; spin_lock_irqsave(&lynx->iso_rcv.lock, flags); while (lynx->iso_rcv.used) { idx = lynx->iso_rcv.last; spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags); data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE] + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE; if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) { PRINT(KERN_ERR, lynx->id, "iso length mismatch 0x%08x/0x%08x", *data, lynx->iso_rcv.stat[idx]); } if (lynx->iso_rcv.stat[idx] & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) { PRINT(KERN_INFO, lynx->id, "iso receive error on %d to 0x%p", idx, data); } else { hpsb_packet_received(lynx->host, data, lynx->iso_rcv.stat[idx] & 0x1fff, 0); } spin_lock_irqsave(&lynx->iso_rcv.lock, flags); lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL; lynx->iso_rcv.used--; } if (lynx->iso_rcv.chan_count) { reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), DMA_WORD1_CMP_ENABLE_MASTER); } spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);}static void remove_card(struct pci_dev *dev){ struct ti_lynx *lynx; int i; lynx = pci_get_drvdata(dev); if (!lynx) return; pci_set_drvdata(dev, NULL); switch (lynx->state) { case is_host: reg_write(lynx, PCI_INT_ENABLE, 0); hpsb_remove_host(lynx->host); case have_intr: reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->dev->irq, lynx); /* Disable IRM Contender */ if (lynx->phyic.reg_1394a) set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4)); /* Let all other nodes know to ignore us */ lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT); case have_iomappings: reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); /* Fix buggy cards with autoboot pin not tied low: */ reg_write(lynx, DMA0_CHAN_CTRL, 0); iounmap(lynx->registers); iounmap(lynx->local_rom); iounmap(lynx->local_ram); iounmap(lynx->aux_port); case have_1394_buffers: for (i = 0; i < ISORCV_PAGES; i++) { if (lynx->iso_rcv.page[i]) { pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->iso_rcv.page[i], lynx->iso_rcv.page_dma[i]); } } pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page, lynx->rcv_page_dma); case have_aux_buf:#ifdef CONFIG_IEEE1394_PCILYNX_PORTS pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer, lynx->mem_dma_buffer_dma);#endif case have_pcl_mem:#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem, lynx->pcl_mem_dma);#endif case clear: /* do nothing - already freed */ ; } tasklet_kill(&lynx->iso_rcv.tq); hpsb_unref_host(lynx->host);}static int __devinit add_card(struct pci_dev *dev, const struct pci_device_id *devid_is_unused){#define FAIL(fmt, args...) do { \ PRINT_G(KERN_ERR, fmt , ## args); \ remove_card(dev); \ return error; \ } while (0) char irq_buf[16]; struct hpsb_host *host; struct ti_lynx *lynx; /* shortcut to currently handled device */ struct ti_pcl pcl;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -