📄 ohci1394.c
字号:
}/*********************************** * rawiso ISO transmission * ***********************************/struct ohci_iso_xmit { struct ti_ohci *ohci; struct dma_prog_region prog; struct ohci1394_iso_tasklet task; int task_active; u32 ContextControlSet; u32 ContextControlClear; u32 CommandPtr;};/* transmission DMA program: one OUTPUT_MORE_IMMEDIATE for the IT header one OUTPUT_LAST for the buffer data */struct iso_xmit_cmd { struct dma_cmd output_more_immediate; u8 iso_hdr[8]; u32 unused[2]; struct dma_cmd output_last;};static int ohci_iso_xmit_init(struct hpsb_iso *iso);static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);static void ohci_iso_xmit_task(unsigned long data);static int ohci_iso_xmit_init(struct hpsb_iso *iso){ struct ohci_iso_xmit *xmit; unsigned int prog_size; int ctx; int ret = -ENOMEM; xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL); if (!xmit) return -ENOMEM; iso->hostdata = xmit; xmit->ohci = iso->host->hostdata; xmit->task_active = 0; dma_prog_region_init(&xmit->prog); prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets; if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev)) goto err; ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT, ohci_iso_xmit_task, (unsigned long) iso); if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) goto err; xmit->task_active = 1; /* xmit context registers are spaced 16 bytes apart */ ctx = xmit->task.context; xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx; xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx; xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx; return 0;err: ohci_iso_xmit_shutdown(iso); return ret;}static void ohci_iso_xmit_stop(struct hpsb_iso *iso){ struct ohci_iso_xmit *xmit = iso->hostdata; /* disable interrupts */ reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context); /* halt DMA */ if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) { /* XXX the DMA context will lock up if you try to send too much data! */ PRINT(KERN_ERR, xmit->ohci->id, "you probably exceeded the OHCI card's bandwidth limit - " "reload the module and reduce xmit bandwidth"); }}static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso){ struct ohci_iso_xmit *xmit = iso->hostdata; if (xmit->task_active) { ohci_iso_xmit_stop(iso); ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task); xmit->task_active = 0; } dma_prog_region_free(&xmit->prog); kfree(xmit); iso->hostdata = NULL;}static void ohci_iso_xmit_task(unsigned long data){ struct hpsb_iso *iso = (struct hpsb_iso*) data; struct ohci_iso_xmit *xmit = iso->hostdata; int wake = 0; int count; /* check the whole buffer if necessary, starting at pkt_dma */ for (count = 0; count < iso->buf_packets; count++) { int cycle; /* DMA descriptor */ struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma); /* check for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16; u8 event = xferstatus & 0x1F; if (!event) { /* packet hasn't been sent yet; we are done for now */ break; } if (event != 0x11) PRINT(KERN_ERR, xmit->ohci->id, "IT DMA error - OHCI error code 0x%02x\n", event); /* at least one packet went out, so wake up the writer */ wake = 1; /* parse cycle */ cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF; /* tell the subsystem the packet has gone out */ hpsb_iso_packet_sent(iso, cycle, event != 0x11); /* reset the DMA descriptor for next time */ cmd->output_last.status = 0; } if (wake) hpsb_iso_wake(iso);}static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info){ struct ohci_iso_xmit *xmit = iso->hostdata; int next_i, prev_i; struct iso_xmit_cmd *next, *prev; unsigned int offset; unsigned short len; unsigned char tag, sy; /* check that the packet doesn't cross a page boundary (we could allow this if we added OUTPUT_MORE descriptor support) */ if (cross_bound(info->offset, info->len)) { PRINT(KERN_ERR, xmit->ohci->id, "rawiso xmit: packet %u crosses a page boundary", iso->first_packet); return -EINVAL; } offset = info->offset; len = info->len; tag = info->tag; sy = info->sy; /* sync up the card's view of the buffer */ dma_region_sync(&iso->data_buf, offset, len); /* append first_packet to the DMA chain */ /* by linking the previous descriptor to it */ /* (next will become the new end of the DMA chain) */ next_i = iso->first_packet; prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1); next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i); prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i); /* set up the OUTPUT_MORE_IMMEDIATE descriptor */ memset(next, 0, sizeof(struct iso_xmit_cmd)); next->output_more_immediate.control = cpu_to_le32(0x02000008); /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */ /* tcode = 0xA, and sy */ next->iso_hdr[0] = 0xA0 | (sy & 0xF); /* tag and channel number */ next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F); /* transmission speed */ next->iso_hdr[2] = iso->speed & 0x7; /* payload size */ next->iso_hdr[6] = len & 0xFF; next->iso_hdr[7] = len >> 8; /* set up the OUTPUT_LAST */ next->output_last.control = cpu_to_le32(1 << 28); next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */ next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */ next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */ next->output_last.control |= cpu_to_le32(len); /* payload bus address */ next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset)); /* leave branchAddress at zero for now */ /* re-write the previous DMA descriptor to chain to this one */ /* set prev branch address to point to next (Z=3) */ prev->output_last.branchAddress = cpu_to_le32( dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3); /* disable interrupt, unless required by the IRQ interval */ if (prev_i % iso->irq_interval) { prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */ } else { prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */ } wmb(); /* wake DMA in case it is sleeping */ reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12); /* issue a dummy read of the cycle timer to force all PCI writes to be posted immediately */ mb(); reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer); return 0;}static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle){ struct ohci_iso_xmit *xmit = iso->hostdata; /* clear out the control register */ reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF); wmb(); /* address and length of first descriptor block (Z=3) */ reg_write(xmit->ohci, xmit->CommandPtr, dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3); /* cycle match */ if (cycle != -1) { u32 start = cycle & 0x1FFF; /* 'cycle' is only mod 8000, but we also need two 'seconds' bits - just snarf them from the current time */ u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25; /* advance one second to give some extra time for DMA to start */ seconds += 1; start |= (seconds & 3) << 13; reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16)); } /* enable interrupts */ reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context); /* run */ reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000); mb(); /* wait 100 usec to give the card time to go active */ udelay(100); /* check the RUN bit */ if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) { PRINT(KERN_ERR, xmit->ohci->id, "Error starting IT DMA (ContextControl 0x%08x)\n", reg_read(xmit->ohci, xmit->ContextControlSet)); return -1; } return 0;}static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg){ switch(cmd) { case XMIT_INIT: return ohci_iso_xmit_init(iso); case XMIT_START: return ohci_iso_xmit_start(iso, arg); case XMIT_STOP: ohci_iso_xmit_stop(iso); return 0; case XMIT_QUEUE: return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg); case XMIT_SHUTDOWN: ohci_iso_xmit_shutdown(iso); return 0; case RECV_INIT: return ohci_iso_recv_init(iso); case RECV_START: { int *args = (int*) arg; return ohci_iso_recv_start(iso, args[0], args[1], args[2]); } case RECV_STOP: ohci_iso_recv_stop(iso); return 0; case RECV_RELEASE: ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg); return 0; case RECV_FLUSH: ohci_iso_recv_task((unsigned long) iso); return 0; case RECV_SHUTDOWN: ohci_iso_recv_shutdown(iso); return 0; case RECV_LISTEN_CHANNEL: ohci_iso_recv_change_channel(iso, arg, 1); return 0; case RECV_UNLISTEN_CHANNEL: ohci_iso_recv_change_channel(iso, arg, 0); return 0; case RECV_SET_CHANNEL_MASK: ohci_iso_recv_set_channel_mask(iso, *((u64*) arg)); return 0; default: PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet", cmd); break; } return -EINVAL;}/*************************************** * IEEE-1394 functionality section END * ***************************************//******************************************************** * Global stuff (interrupt handler, init/shutdown code) * ********************************************************/static void dma_trm_reset(struct dma_trm_ctx *d){ unsigned long flags; LIST_HEAD(packet_list); ohci1394_stop_context(d->ohci, d->ctrlClear, NULL); /* Lock the context, reset it and release it. Move the packets * that were pending in the context to packet_list and free * them after releasing the lock. */ spin_lock_irqsave(&d->lock, flags); list_splice(&d->fifo_list, &packet_list); list_splice(&d->pending_list, &packet_list); INIT_LIST_HEAD(&d->fifo_list); INIT_LIST_HEAD(&d->pending_list); d->branchAddrPtr = NULL; d->sent_ind = d->prg_ind; d->free_prgs = d->num_desc; spin_unlock_irqrestore(&d->lock, flags); /* Now process subsystem callbacks for the packets from the * context. */ while (!list_empty(&packet_list)) { struct hpsb_packet *p = driver_packet(packet_list.next); PRINT(KERN_INFO, d->ohci->id, "AT dma reset ctx=%d, aborting transmission", d->ctx); list_del(&p->driver_list); hpsb_packet_sent(d->ohci->host, p, ACKX_ABORTED); }}static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci, quadlet_t rx_event, quadlet_t tx_event){ struct list_head *lh; struct ohci1394_iso_tasklet *t; unsigned long mask; spin_lock(&ohci->iso_tasklet_list_lock); list_for_each(lh, &ohci->iso_tasklet_list) { t = list_entry(lh, struct ohci1394_iso_tasklet, link); mask = 1 << t->context; if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask) tasklet_schedule(&t->tasklet); else if (rx_event & mask) tasklet_schedule(&t->tasklet); } spin_unlock(&ohci->iso_tasklet_list_lock);}static void ohci_irq_handler(int irq, void *dev_id, struct pt_regs *regs_are_unused){ quadlet_t event, node_id; struct ti_ohci *ohci = (struct ti_ohci *)dev_id; struct hpsb_host *host = ohci->host; int phyid = -1, isroot = 0; unsigned long flags; /* Read and clear the interrupt event register. Don't clear * the busReset event, though. This is done when we get the * selfIDComplete interrupt. */ spin_lock_irqsave(&ohci->event_lock, flags); event = reg_read(ohci, OHCI1394_IntEventClear); reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); spin_unlock_irqrestore(&ohci->event_lock, flags); if (!event) return; DBGMSG(ohci->id, "IntEvent: %08x", event); if (event & OHCI1394_unrecoverableError) { int ctx; PRINT(KERN_ERR, ohci->id, "Unrecoverable error!"); if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800) PRINT(KERN_ERR, ohci->id, "Async Req Tx Context died: " "ctrl[%08x] cmdptr[%08x]", reg_read(ohci, OHCI1394_AsReqTrContextControlSet), reg_read(ohci, OHCI1394_AsReqTrCommandPtr)); if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800) PRINT(KERN_ERR, ohci->id, "Async Rsp Tx Context died: " "ctrl[%08x] cmdptr[%08x]", reg_read(ohci, OHCI1394_AsRspTrContextControlSet), reg_read(ohci, OHCI1394_AsRspTrCommandPtr)); if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800) PRINT(KERN_ERR, ohci->id, "Async Req Rcv Context d
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -