📄 ohci1394.c
字号:
DBGMSG(ohci->id, "Receive DMA ctx=%d initialized", d->ctx);}/* Initialize the dma transmit context */static void initialize_dma_trm_ctx(struct dma_trm_ctx *d){ struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci); /* Stop the context */ ohci1394_stop_context(ohci, d->ctrlClear, NULL); d->prg_ind = 0; d->sent_ind = 0; d->free_prgs = d->num_desc; d->branchAddrPtr = NULL; INIT_LIST_HEAD(&d->fifo_list); INIT_LIST_HEAD(&d->pending_list); if (d->type == DMA_CTX_ISO) { /* enable interrupts */ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx); } DBGMSG(ohci->id, "Transmit DMA ctx=%d initialized", d->ctx);}/* Count the number of available iso contexts */static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg){ int i,ctx=0; u32 tmp; reg_write(ohci, reg, 0xffffffff); tmp = reg_read(ohci, reg); DBGMSG(ohci->id,"Iso contexts reg: %08x implemented: %08x", reg, tmp); /* Count the number of contexts */ for (i=0; i<32; i++) { if (tmp & 1) ctx++; tmp >>= 1; } return ctx;}static void ohci_init_config_rom(struct ti_ohci *ohci);/* Global initialization */static void ohci_initialize(struct ti_ohci *ohci){ char irq_buf[16]; quadlet_t buf; spin_lock_init(&ohci->phy_reg_lock); spin_lock_init(&ohci->event_lock); /* Put some defaults to these undefined bus options */ buf = reg_read(ohci, OHCI1394_BusOptions); buf |= 0xE0000000; /* Enable IRMC, CMC and ISC */ buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ buf &= ~0x18000000; /* Disable PMC and BMC */ reg_write(ohci, OHCI1394_BusOptions, buf); /* Set the bus number */ reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); /* Enable posted writes */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable); /* Clear link control register */ reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); /* Enable cycle timer and cycle master and set the IRM * contender bit in our self ID packets. */ reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000); set_phy_reg_mask(ohci, 4, 0xc0); /* Clear interrupt registers */ reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); /* Set up self-id dma buffer */ reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus); /* enable self-id dma */ reg_write(ohci, OHCI1394_LinkControlSet, 0x00000200); /* Set the Config ROM mapping register */ reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus); /* Initialize the Config ROM */ ohci_init_config_rom(ohci); /* Now get our max packet size */ ohci->max_packet_size = 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1); /* Don't accept phy packets into AR request context */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); /* Clear the interrupt mask */ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); /* Clear the interrupt mask */ reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); /* Initialize AR dma */ initialize_dma_rcv_ctx(&ohci->ar_req_context, 0); initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0); /* Initialize AT dma */ initialize_dma_trm_ctx(&ohci->at_req_context); initialize_dma_trm_ctx(&ohci->at_resp_context); /* * Accept AT requests from all nodes. This probably * will have to be controlled from the subsystem * on a per node basis. */ reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000); /* Specify AT retries */ reg_write(ohci, OHCI1394_ATRetries, OHCI1394_MAX_AT_REQ_RETRIES | (OHCI1394_MAX_AT_RESP_RETRIES<<4) | (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); /* We don't want hardware swapping */ reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap); /* Enable interrupts */ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_unrecoverableError | OHCI1394_masterIntEnable | OHCI1394_busReset | OHCI1394_selfIDComplete | OHCI1394_RSPkt | OHCI1394_RQPkt | OHCI1394_respTxComplete | OHCI1394_reqTxComplete | OHCI1394_isochRx | OHCI1394_isochTx | OHCI1394_cycleInconsistent); /* Enable link */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); buf = reg_read(ohci, OHCI1394_Version);#ifndef __sparc__ sprintf (irq_buf, "%d", ohci->dev->irq);#else sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));#endif PRINT(KERN_INFO, ohci->id, "OHCI-1394 %d.%d (PCI): IRQ=[%s] " "MMIO=[%lx-%lx] Max Packet=[%d]", ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10), ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf, pci_resource_start(ohci->dev, 0), pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1, ohci->max_packet_size);}/* * Insert a packet in the DMA fifo and generate the DMA prg * FIXME: rewrite the program in order to accept packets crossing * page boundaries. * check also that a single dma descriptor doesn't cross a * page boundary. */static void insert_packet(struct ti_ohci *ohci, struct dma_trm_ctx *d, struct hpsb_packet *packet){ u32 cycleTimer; int idx = d->prg_ind; DBGMSG(ohci->id, "Inserting packet for node " NODE_BUS_FMT ", tlabel=%d, tcode=0x%x, speed=%d", NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel, packet->tcode, packet->speed_code); d->prg_cpu[idx]->begin.address = 0; d->prg_cpu[idx]->begin.branchAddress = 0; if (d->type == DMA_CTX_ASYNC_RESP) { /* * For response packets, we need to put a timeout value in * the 16 lower bits of the status... let's try 1 sec timeout */ cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer); d->prg_cpu[idx]->begin.status = cpu_to_le32( (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) | ((cycleTimer&0x01fff000)>>12)); DBGMSG(ohci->id, "cycleTimer: %08x timeStamp: %08x", cycleTimer, d->prg_cpu[idx]->begin.status); } else d->prg_cpu[idx]->begin.status = 0; if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) { if (packet->type == hpsb_raw) { d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4); d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]); d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]); } else { d->prg_cpu[idx]->data[0] = packet->speed_code<<16 | (packet->header[0] & 0xFFFF); if (packet->tcode == TCODE_ISO_DATA) { /* Sending an async stream packet */ d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000; } else { /* Sending a normal async request or response */ d->prg_cpu[idx]->data[1] = (packet->header[1] & 0xFFFF) | (packet->header[0] & 0xFFFF0000); d->prg_cpu[idx]->data[2] = packet->header[2]; d->prg_cpu[idx]->data[3] = packet->header[3]; } packet_swab(d->prg_cpu[idx]->data, packet->tcode); } if (packet->data_size) { /* block transmit */ if (packet->tcode == TCODE_STREAM_DATA){ d->prg_cpu[idx]->begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 0x8); } else { d->prg_cpu[idx]->begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 0x10); } d->prg_cpu[idx]->end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_IRQ | DMA_CTL_BRANCH | packet->data_size); /* * Check that the packet data buffer * does not cross a page boundary. */ if (cross_bound((unsigned long)packet->data, packet->data_size)>0) { /* FIXME: do something about it */ PRINT(KERN_ERR, ohci->id, "%s: packet data addr: %p size %Zd bytes " "cross page boundary", __FUNCTION__, packet->data, packet->data_size); } d->prg_cpu[idx]->end.address = cpu_to_le32( pci_map_single(ohci->dev, packet->data, packet->data_size, PCI_DMA_TODEVICE)); OHCI_DMA_ALLOC("single, block transmit packet"); d->prg_cpu[idx]->end.branchAddress = 0; d->prg_cpu[idx]->end.status = 0; if (d->branchAddrPtr) *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3); d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress); } else { /* quadlet transmit */ if (packet->type == hpsb_raw) d->prg_cpu[idx]->begin.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_IMMEDIATE | DMA_CTL_IRQ | DMA_CTL_BRANCH | (packet->header_size + 4)); else d->prg_cpu[idx]->begin.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_IMMEDIATE | DMA_CTL_IRQ | DMA_CTL_BRANCH | packet->header_size); if (d->branchAddrPtr) *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x2); d->branchAddrPtr = &(d->prg_cpu[idx]->begin.branchAddress); } } else { /* iso packet */ d->prg_cpu[idx]->data[0] = packet->speed_code<<16 | (packet->header[0] & 0xFFFF); d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000; packet_swab(d->prg_cpu[idx]->data, packet->tcode); d->prg_cpu[idx]->begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 0x8); d->prg_cpu[idx]->end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_UPDATE | DMA_CTL_IRQ | DMA_CTL_BRANCH | packet->data_size); d->prg_cpu[idx]->end.address = cpu_to_le32( pci_map_single(ohci->dev, packet->data, packet->data_size, PCI_DMA_TODEVICE)); OHCI_DMA_ALLOC("single, iso transmit packet"); d->prg_cpu[idx]->end.branchAddress = 0; d->prg_cpu[idx]->end.status = 0; DBGMSG(ohci->id, "Iso xmit context info: header[%08x %08x]\n" " begin=%08x %08x %08x %08x\n" " %08x %08x %08x %08x\n" " end =%08x %08x %08x %08x", d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1], d->prg_cpu[idx]->begin.control, d->prg_cpu[idx]->begin.address, d->prg_cpu[idx]->begin.branchAddress, d->prg_cpu[idx]->begin.status, d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1], d->prg_cpu[idx]->data[2], d->prg_cpu[idx]->data[3], d->prg_cpu[idx]->end.control, d->prg_cpu[idx]->end.address, d->prg_cpu[idx]->end.branchAddress, d->prg_cpu[idx]->end.status); if (d->branchAddrPtr) *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3); d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress); } d->free_prgs--; /* queue the packet in the appropriate context queue */ list_add_tail(&packet->driver_list, &d->fifo_list); d->prg_ind = (d->prg_ind+1)%d->num_desc;}/* * This function fills the FIFO with the (eventual) pending packets * and runs or wakes up the DMA prg if necessary. * * The function MUST be called with the d->lock held. */ static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d){ struct hpsb_packet *p; int idx,z; if (list_empty(&d->pending_list) || d->free_prgs == 0) return 0; p = driver_packet(d->pending_list.next); idx = d->prg_ind; z = (p->data_size) ? 3 : 2; /* insert the packets into the dma fifo */ while (d->free_prgs > 0 && !list_empty(&d->pending_list)) { struct hpsb_packet *p = driver_packet(d->pending_list.next); list_del(&p->driver_list); insert_packet(ohci, d, p); } if (d->free_prgs == 0) DBGMSG(ohci->id, "Transmit DMA FIFO ctx=%d is full... waiting", d->ctx); /* Is the context running ? (should be unless it is the first packet to be sent in this context) */ if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) { DBGMSG(ohci->id,"Starting transmit DMA ctx=%d",d->ctx); reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z); run_context(ohci, d->ctrlSet, NULL); } else { /* Wake up the dma context if necessary */ if (!(reg_read(ohci, d->ctrlSet) & 0x400)) { DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx); } /* do this always, to avoid race condition */ reg_write(ohci, d->ctrlSet, 0x1000); } return 1;}/* Transmission of an async or iso packet */static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet){ struct ti_ohci *ohci = host->hostdata; struct dma_trm_ctx *d; unsigned long flags; if (packet->data_size > ohci->max_packet_size) { PRINT(KERN_ERR, ohci->id, "Transmit packet size %Zd is too big", packet->data_size); return 0; } /* Decide whether we have an iso, a request, or a response packet */ if (packet->type == hpsb_raw) d = &ohci->at_req_context; else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) { /* The legacy IT DMA context is initialized on first * use. However, the alloc cannot be run from * interrupt context, so we bail out if that is the * case. I don't see anyone sending ISO packets from * interrupt context anyway... */ if (ohci->it_legacy_context.ohci == NULL) { if (in_interrupt()) { PRINT(KERN_ERR, ohci->id, "legacy IT context cannot be initialized during interrupt"); return 0; } if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context, DMA_CTX_ISO, 0, IT_NUM_DESC, OHCI1394_IsoXmitContextBase) < 0) { PRINT(KERN_ERR, ohci->id, "error initializing legacy IT context"); return 0; } initialize_dma_trm_ctx(&ohci->it_legacy_context); } d = &ohci->it_legacy_context; } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA)) d = &ohci->at_resp_context; else d = &ohci->at_req_context; spin_lock_irqsave(&d->lock,flags); list_add_tail(&packet->driver_list, &d->pending_list); dma_trm_flush(ohci, d); spin_unlock_irqrestore(&d->lock,flags); return 1;}static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg){ struct ti_ohci *ohci = host->hostdata; int retval = 0; unsigned long flags; int phy_reg; switch (cmd) { case RESET_BUS: switch (arg) { case SHORT_RESET:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -