📄 ohci1394.c
字号:
*/ DBGMSG(ohci->id, "Cycle master enabled"); reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000); } } else { /* disable cycleTimer, cycleMaster, cycleSource */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00700000); } break; case CANCEL_REQUESTS: DBGMSG(ohci->id, "Cancel request received"); dma_trm_reset(&ohci->at_req_context); dma_trm_reset(&ohci->at_resp_context); break; case MODIFY_USAGE: if (arg) { MOD_INC_USE_COUNT; } else { MOD_DEC_USE_COUNT; } retval = 1; break; case ISO_LISTEN_CHANNEL: { u64 mask; if (arg<0 || arg>63) { PRINT(KERN_ERR, ohci->id, "%s: IS0 listen channel %d is out of range", __FUNCTION__, arg); return -EFAULT; } mask = (u64)0x1<<arg; spin_lock_irqsave(&ohci->IR_channel_lock, flags); if (ohci->ISO_channel_usage & mask) { PRINT(KERN_ERR, ohci->id, "%s: IS0 listen channel %d is already used", __FUNCTION__, arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); return -EFAULT; } ohci->ISO_channel_usage |= mask; if (arg>31) reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, 1<<(arg-32)); else reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, 1<<arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); DBGMSG(ohci->id, "Listening enabled on channel %d", arg); break; } case ISO_UNLISTEN_CHANNEL: { u64 mask; if (arg<0 || arg>63) { PRINT(KERN_ERR, ohci->id, "%s: IS0 unlisten channel %d is out of range", __FUNCTION__, arg); return -EFAULT; } mask = (u64)0x1<<arg; spin_lock_irqsave(&ohci->IR_channel_lock, flags); if (!(ohci->ISO_channel_usage & mask)) { PRINT(KERN_ERR, ohci->id, "%s: IS0 unlisten channel %d is not used", __FUNCTION__, arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); return -EFAULT; } ohci->ISO_channel_usage &= ~mask; if (arg>31) reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 1<<(arg-32)); else reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 1<<arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); DBGMSG(ohci->id, "Listening disabled on channel %d", arg); break; } default: PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet", cmd); break; } return retval;}/*************************************** * IEEE-1394 functionality section END * ***************************************//******************************************************** * Global stuff (interrupt handler, init/shutdown code) * ********************************************************/static void dma_trm_reset(struct dma_trm_ctx *d){ unsigned long flags; LIST_HEAD(packet_list); ohci1394_stop_context(d->ohci, d->ctrlClear, NULL); /* Lock the context, reset it and release it. Move the packets * that were pending in the context to packet_list and free * them after releasing the lock. */ spin_lock_irqsave(&d->lock, flags); list_splice(&d->fifo_list, &packet_list); list_splice(&d->pending_list, &packet_list); INIT_LIST_HEAD(&d->fifo_list); INIT_LIST_HEAD(&d->pending_list); d->branchAddrPtr = NULL; d->sent_ind = d->prg_ind; d->free_prgs = d->num_desc; spin_unlock_irqrestore(&d->lock, flags); /* Now process subsystem callbacks for the packets from the * context. */ while (!list_empty(&packet_list)) { struct hpsb_packet *p = driver_packet(packet_list.next); PRINT(KERN_INFO, d->ohci->id, "AT dma reset ctx=%d, aborting transmission", d->ctx); list_del(&p->driver_list); hpsb_packet_sent(d->ohci->host, p, ACKX_ABORTED); }}static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci, quadlet_t rx_event, quadlet_t tx_event){ struct list_head *lh; struct ohci1394_iso_tasklet *t; unsigned long mask; spin_lock(&ohci->iso_tasklet_list_lock); list_for_each(lh, &ohci->iso_tasklet_list) { t = list_entry(lh, struct ohci1394_iso_tasklet, link); mask = 1 << t->context; if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask) tasklet_schedule(&t->tasklet); if (t->type == OHCI_ISO_RECEIVE && rx_event & mask) tasklet_schedule(&t->tasklet); } spin_unlock(&ohci->iso_tasklet_list_lock);}static void ohci_irq_handler(int irq, void *dev_id, struct pt_regs *regs_are_unused){ quadlet_t event, node_id; struct ti_ohci *ohci = (struct ti_ohci *)dev_id; struct hpsb_host *host = ohci->host; int phyid = -1, isroot = 0; unsigned long flags; /* Read and clear the interrupt event register. Don't clear * the busReset event, though, this is done when we get the * selfIDComplete interrupt. */ spin_lock_irqsave(&ohci->event_lock, flags); event = reg_read(ohci, OHCI1394_IntEventClear); reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); spin_unlock_irqrestore(&ohci->event_lock, flags); if (!event) return; DBGMSG(ohci->id, "IntEvent: %08x", event); /* Die right here an now */ if (event & OHCI1394_unrecoverableError) { PRINT(KERN_ERR, ohci->id, "Unrecoverable error, shutting down card!"); return; } if (event & OHCI1394_cycleInconsistent) { /* We subscribe to the cycleInconsistent event only to * clear the corresponding event bit... otherwise, * isochronous cycleMatch DMA wont work. */ DBGMSG(ohci->id, "OHCI1394_cycleInconsistent"); event &= ~OHCI1394_cycleInconsistent; } if (event & OHCI1394_busReset) { /* The busReset event bit can't be cleared during the * selfID phase, so we disable busReset interrupts, to * avoid burying the cpu in interrupt requests. */ spin_lock_irqsave(&ohci->event_lock, flags); reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset); if (ohci->dev->vendor == PCI_VENDOR_ID_APPLE && ohci->dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) { udelay(10); while(reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); spin_unlock_irqrestore(&ohci->event_lock, flags); udelay(10); spin_lock_irqsave(&ohci->event_lock, flags); } } spin_unlock_irqrestore(&ohci->event_lock, flags); if (!host->in_bus_reset) { DBGMSG(ohci->id, "irq_handler: Bus reset requested%s", (attempt_root) ? " and attempting to become root" : ""); /* Subsystem call */ hpsb_bus_reset(ohci->host); } event &= ~OHCI1394_busReset; } /* XXX: We need a way to also queue the OHCI1394_reqTxComplete, * but for right now we simply run it upon reception, to make sure * we get sent acks before response packets. This sucks mainly * because it halts the interrupt handler. */ if (event & OHCI1394_reqTxComplete) { struct dma_trm_ctx *d = &ohci->at_req_context; DBGMSG(ohci->id, "Got reqTxComplete interrupt " "status=0x%08X", reg_read(ohci, d->ctrlSet)); if (reg_read(ohci, d->ctrlSet) & 0x800) ohci1394_stop_context(ohci, d->ctrlClear, "reqTxComplete"); else dma_trm_tasklet ((unsigned long)d); event &= ~OHCI1394_reqTxComplete; } if (event & OHCI1394_respTxComplete) { struct dma_trm_ctx *d = &ohci->at_resp_context; DBGMSG(ohci->id, "Got respTxComplete interrupt " "status=0x%08X", reg_read(ohci, d->ctrlSet)); if (reg_read(ohci, d->ctrlSet) & 0x800) ohci1394_stop_context(ohci, d->ctrlClear, "respTxComplete"); else tasklet_schedule(&d->task); event &= ~OHCI1394_respTxComplete; } if (event & OHCI1394_RQPkt) { struct dma_rcv_ctx *d = &ohci->ar_req_context; DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X", reg_read(ohci, d->ctrlSet)); if (reg_read(ohci, d->ctrlSet) & 0x800) ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt"); else tasklet_schedule(&d->task); event &= ~OHCI1394_RQPkt; } if (event & OHCI1394_RSPkt) { struct dma_rcv_ctx *d = &ohci->ar_resp_context; DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X", reg_read(ohci, d->ctrlSet)); if (reg_read(ohci, d->ctrlSet) & 0x800) ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt"); else tasklet_schedule(&d->task); event &= ~OHCI1394_RSPkt; } if (event & OHCI1394_isochRx) { quadlet_t rx_event; rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event); ohci_schedule_iso_tasklets(ohci, rx_event, 0); event &= ~OHCI1394_isochRx; } if (event & OHCI1394_isochTx) { quadlet_t tx_event; tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event); ohci_schedule_iso_tasklets(ohci, 0, tx_event); event &= ~OHCI1394_isochTx; } if (event & OHCI1394_selfIDComplete) { if (host->in_bus_reset) { node_id = reg_read(ohci, OHCI1394_NodeID); /* If our nodeid is not valid, give a msec delay * to let it settle in and try again. */ if (!(node_id & 0x80000000)) { mdelay(1); node_id = reg_read(ohci, OHCI1394_NodeID); } if (node_id & 0x80000000) { /* NodeID valid */ phyid = node_id & 0x0000003f; isroot = (node_id & 0x40000000) != 0; DBGMSG(ohci->id, "SelfID interrupt received " "(phyid %d, %s)", phyid, (isroot ? "root" : "not root")); handle_selfid(ohci, host, phyid, isroot); } else { PRINT(KERN_ERR, ohci->id, "SelfID interrupt received, but " "NodeID is not valid: %08X", node_id); } /* Accept Physical requests from all nodes. */ reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff); reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff); } else PRINT(KERN_ERR, ohci->id, "SelfID received outside of bus reset sequence"); /* Finally, we clear the busReset event and reenable * the busReset interrupt. */ spin_lock_irqsave(&ohci->event_lock, flags); reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); spin_unlock_irqrestore(&ohci->event_lock, flags); event &= ~OHCI1394_selfIDComplete; /* Turn on phys dma reception. We should * probably manage the filtering somehow, * instead of blindly turning it on. */ /* * CAUTION! * Some chips (TI TSB43AB22) won't take a value in * the PhyReqFilter register until after the IntEvent * is cleared for bus reset, and even then a short * delay is required. */ if (phys_dma) { mdelay(1); reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff); reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff); reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000); } DBGMSG(ohci->id, "PhyReqFilter=%08x%08x\n", reg_read(ohci,OHCI1394_PhyReqFilterHiSet), reg_read(ohci,OHCI1394_PhyReqFilterLoSet)); } /* Make sure we handle everything, just in case we accidentally * enabled an interrupt that we didn't write a handler for. */ if (event) PRINT(KERN_ERR, ohci->id, "Unhandled interrupt(s) 0x%08x", event);}/* Put the buffer back into the dma context */static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx){ struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci); DBGMSG(ohci->id, "Inserting dma buf ctx=%d idx=%d", d->ctx, idx); d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size); d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0); idx = (idx + d->num_desc - 1 ) % d->num_desc; d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001); /* wake up the dma context if necessary */ if (!(reg_read(ohci, d->ctrlSet) & 0x400)) { PRINT(KERN_INFO, ohci->id, "Waking dma ctx=%d ... processing is probably too slow", d->ctx); reg_write(ohci, d->ctrlSet, 0x1000); }}#define cond_le32_to_cpu(data, noswap) \ (noswap ? data : le32_to_cpu(data))static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0, -1, 0, -1, 0, -1, -1, 16, -1};/* * Determine the length of a packet in the buffer * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca> */static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr, int offset, unsigned char tcode, int noswap){ int length = -1; if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) { length = TCODE_SIZE[tcode]; if (length == 0) { if (offset + 12 >= d->buf_size) { length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc] [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16); } else { length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16); } length += 20; } } else if (d->type == DMA_CTX_ISO) { /* Assumption: buffer fill mode with header/trailer */ length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8; } if (length > 0 && length % 4) length += 4 - (length % 4); return length;}/* Tasklet that processes dma receive buffers */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -