📄 ohci1394.c
字号:
recv->dma_offset -= recv->buf_stride*recv->nblocks; } /* parse timestamp */ cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8); cycle &= 0x1FFF; /* advance to next packet */ recv->dma_offset += 4; /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { recv->dma_offset -= recv->buf_stride*recv->nblocks; } hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy); } if (wake) hpsb_iso_wake(iso);}static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int loop; /* loop over all blocks */ for (loop = 0; loop < recv->nblocks; loop++) { /* check block_dma to see if it's done */ struct dma_cmd *im = &recv->block[recv->block_dma]; /* check the DMA descriptor for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(im->status) >> 16; /* rescount is the number of bytes *remaining to be written* in the block */ u16 rescount = le32_to_cpu(im->status) & 0xFFFF; unsigned char event = xferstatus & 0x1F; if (!event) { /* nothing has happened to this block yet */ break; } if (event != 0x11) { atomic_inc(&iso->overflows); PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event); } if (rescount != 0) { /* the card is still writing to this block; we can't touch it until it's done */ break; } /* OK, the block is finished... */ /* sync our view of the block */ dma_region_sync(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride); /* reset the DMA descriptor */ im->status = recv->buf_stride; /* advance block_dma */ recv->block_dma = (recv->block_dma + 1) % recv->nblocks; if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) { atomic_inc(&iso->overflows); DBGMSG(recv->ohci->id, "ISO reception overflow - " "ran out of DMA blocks"); } } /* parse any packets that have arrived */ ohci_iso_recv_bufferfill_parse(iso, recv);}static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int count; int wake = 0; /* loop over the entire buffer */ for (count = 0; count < recv->nblocks; count++) { u32 packet_len = 0; /* pointer to the DMA descriptor */ struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma; /* check the DMA descriptor for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(il->status) >> 16; u16 rescount = le32_to_cpu(il->status) & 0xFFFF; unsigned char event = xferstatus & 0x1F; if (!event) { /* this packet hasn't come in yet; we are done for now */ goto out; } if (event == 0x11) { /* packet received successfully! */ /* rescount is the number of bytes *remaining* in the packet buffer, after the packet was written */ packet_len = recv->buf_stride - rescount; } else if (event == 0x02) { PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - packet too long for buffer\n"); } else if (event) { PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event); } /* sync our view of the buffer */ dma_region_sync(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride); /* record the per-packet info */ { /* iso header is 8 bytes ahead of the data payload */ unsigned char *hdr; unsigned int offset; unsigned short cycle; unsigned char channel, tag, sy; offset = iso->pkt_dma * recv->buf_stride; hdr = iso->data_buf.kvirt + offset; /* skip iso header */ offset += 8; packet_len -= 8; cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF; channel = hdr[5] & 0x3F; tag = hdr[5] >> 6; sy = hdr[4] & 0xF; hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy); } /* reset the DMA descriptor */ il->status = recv->buf_stride; wake = 1; recv->block_dma = iso->pkt_dma; } out: if (wake) hpsb_iso_wake(iso);}static void ohci_iso_recv_task(unsigned long data){ struct hpsb_iso *iso = (struct hpsb_iso*) data; struct ohci_iso_recv *recv = iso->hostdata; if (recv->dma_mode == BUFFER_FILL_MODE) ohci_iso_recv_bufferfill_task(iso, recv); else ohci_iso_recv_packetperbuf_task(iso, recv);}/************************************ rawiso ISO transmission ************************************/struct ohci_iso_xmit { struct ti_ohci *ohci; struct dma_prog_region prog; struct ohci1394_iso_tasklet task; int task_active; u32 ContextControlSet; u32 ContextControlClear; u32 CommandPtr;};/* transmission DMA program:one OUTPUT_MORE_IMMEDIATE for the IT headerone OUTPUT_LAST for the buffer data */struct iso_xmit_cmd { struct dma_cmd output_more_immediate; u8 iso_hdr[8]; u32 unused[2]; struct dma_cmd output_last;};static int ohci_iso_xmit_init(struct hpsb_iso *iso);static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);static void ohci_iso_xmit_task(unsigned long data);static int ohci_iso_xmit_init(struct hpsb_iso *iso){ struct ohci_iso_xmit *xmit; unsigned int prog_size; int ctx; int ret = -ENOMEM; xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL); if (!xmit) return -ENOMEM; iso->hostdata = xmit; xmit->ohci = iso->host->hostdata; xmit->task_active = 0; dma_prog_region_init(&xmit->prog); prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets; if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev)) goto err; ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT, ohci_iso_xmit_task, (unsigned long) iso); if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) goto err; xmit->task_active = 1; /* xmit context registers are spaced 16 bytes apart */ ctx = xmit->task.context; xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx; xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx; xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx; return 0; err: ohci_iso_xmit_shutdown(iso); return ret;}static void ohci_iso_xmit_stop(struct hpsb_iso *iso){ struct ohci_iso_xmit *xmit = iso->hostdata; /* disable interrupts */ reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context); /* halt DMA */ if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) { /* XXX the DMA context will lock up if you try to send too much data! */ PRINT(KERN_ERR, xmit->ohci->id, "you probably exceeded the OHCI card's bandwidth limit - " "reload the module and reduce xmit bandwidth"); }}static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso){ struct ohci_iso_xmit *xmit = iso->hostdata; if (xmit->task_active) { ohci_iso_xmit_stop(iso); ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task); xmit->task_active = 0; } dma_prog_region_free(&xmit->prog); kfree(xmit); iso->hostdata = NULL;}static void ohci_iso_xmit_task(unsigned long data){ struct hpsb_iso *iso = (struct hpsb_iso*) data; struct ohci_iso_xmit *xmit = iso->hostdata; int wake = 0; int count; /* check the whole buffer if necessary, starting at pkt_dma */ for (count = 0; count < iso->buf_packets; count++) { int cycle; /* DMA descriptor */ struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma); /* check for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16; u8 event = xferstatus & 0x1F; if (!event) { /* packet hasn't been sent yet; we are done for now */ break; } if (event != 0x11) PRINT(KERN_ERR, xmit->ohci->id, "IT DMA error - OHCI error code 0x%02x\n", event); /* at least one packet went out, so wake up the writer */ wake = 1; /* parse cycle */ cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF; /* tell the subsystem the packet has gone out */ hpsb_iso_packet_sent(iso, cycle, event != 0x11); /* reset the DMA descriptor for next time */ cmd->output_last.status = 0; } if (wake) hpsb_iso_wake(iso);}static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info){ struct ohci_iso_xmit *xmit = iso->hostdata; int next_i, prev_i; struct iso_xmit_cmd *next, *prev; unsigned int offset; unsigned short len; unsigned char tag, sy; /* check that the packet doesn't cross a page boundary (we could allow this if we added OUTPUT_MORE descriptor support) */ if (cross_bound(info->offset, info->len)) { PRINT(KERN_ERR, xmit->ohci->id, "rawiso xmit: packet %u crosses a page boundary", iso->first_packet); return -EINVAL; } offset = info->offset; len = info->len; tag = info->tag; sy = info->sy; /* sync up the card's view of the buffer */ dma_region_sync(&iso->data_buf, offset, len); /* append first_packet to the DMA chain */ /* by linking the previous descriptor to it */ /* (next will become the new end of the DMA chain) */ next_i = iso->first_packet; prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1); next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i); prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i); /* set up the OUTPUT_MORE_IMMEDIATE descriptor */ memset(next, 0, sizeof(struct iso_xmit_cmd)); next->output_more_immediate.control = cpu_to_le32(0x02000008); /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */ /* tcode = 0xA, and sy */ next->iso_hdr[0] = 0xA0 | (sy & 0xF); /* tag and channel number */ next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F); /* transmission speed */ next->iso_hdr[2] = iso->speed & 0x7; /* payload size */ next->iso_hdr[6] = len & 0xFF; next->iso_hdr[7] = len >> 8; /* set up the OUTPUT_LAST */ next->output_last.control = cpu_to_le32(1 << 28); next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */ next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */ next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */ next->output_last.control |= cpu_to_le32(len); /* payload bus address */ next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset)); /* leave branchAddress at zero for now */ /* re-write the previous DMA descriptor to chain to this one */ /* set prev branch address to point to next (Z=3) */ prev->output_last.branchAddress = cpu_to_le32( dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3); /* disable interrupt, unless required by the IRQ interval */ if (prev_i % iso->irq_interval) { prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */ } else { prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */ } wmb(); /* wake DMA in case it is sleeping */ reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12); /* issue a dummy read of the cycle timer to force all PCI writes to be posted immediately */ mb(); //OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer); return 0;}static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle){ struct ohci_iso_xmit *xmit = iso->hostdata; /* clear out the control register */ reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF); wmb(); /* address and length of first descriptor block (Z=3) */ reg_write(xmit->ohci, xmit->CommandPtr, dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3); /* cycle match */ if (cycle != -1) { u32 start = cycle & 0x1FFF; /* 'cycle' is only mod 8000, but we also need two 'seconds' bits - just snarf them from the current time */ //OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25; /* advance one second to give some extra time for DMA to start */ seconds += 1; start |= (seconds & 3) << 13; reg_write(xmit
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -