📄 ohci1394.c
字号:
ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task); recv->task_active = 0; } dma_prog_region_free(&recv->prog); kfree(recv); iso->hostdata = NULL;}/* set up a "gapped" ring buffer DMA program */static void ohci_iso_recv_program(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; int blk; /* address of 'branch' field in previous DMA descriptor */ u32 *prev_branch = NULL; for (blk = 0; blk < recv->nblocks; blk++) { u32 control; /* the DMA descriptor */ struct dma_cmd *cmd = &recv->block[blk]; /* offset of the DMA descriptor relative to the DMA prog buffer */ unsigned long prog_offset = blk * sizeof(struct dma_cmd); /* offset of this packet's data within the DMA buffer */ unsigned long buf_offset = blk * recv->buf_stride; if (recv->dma_mode == BUFFER_FILL_MODE) { control = 2 << 28; /* INPUT_MORE */ } else { control = 3 << 28; /* INPUT_LAST */ } control |= 8 << 24; /* s = 1, update xferStatus and resCount */ /* interrupt on last block, and at intervals */ if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) { control |= 3 << 20; /* want interrupt */ } control |= 3 << 18; /* enable branch to address */ control |= recv->buf_stride; cmd->control = cpu_to_le32(control); cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset)); cmd->branchAddress = 0; /* filled in on next loop */ cmd->status = cpu_to_le32(recv->buf_stride); /* link the previous descriptor to this one */ if (prev_branch) { *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1); } prev_branch = &cmd->branchAddress; } /* the final descriptor's branch address and Z should be left at 0 */}/* listen or unlisten to a specific channel (multi-channel mode only) */static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen){ struct ohci_iso_recv *recv = iso->hostdata; int reg, i; if (channel < 32) { reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear; i = channel; } else { reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear; i = channel - 32; } reg_write(recv->ohci, reg, (1 << i)); /* issue a dummy read to force all PCI writes to be posted immediately */ mb(); reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);}static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask){ struct ohci_iso_recv *recv = iso->hostdata; int i; for (i = 0; i < 64; i++) { if (mask & (1ULL << i)) { if (i < 32) reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i)); else reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32))); } else { if (i < 32) reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i)); else reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32))); } } /* issue a dummy read to force all PCI writes to be posted immediately */ mb(); reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);}static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync){ struct ohci_iso_recv *recv = iso->hostdata; struct ti_ohci *ohci = recv->ohci; u32 command, contextMatch; reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF); wmb(); /* always keep ISO headers */ command = (1 << 30); if (recv->dma_mode == BUFFER_FILL_MODE) command |= (1 << 31); reg_write(recv->ohci, recv->ContextControlSet, command); /* match on specified tags */ contextMatch = tag_mask << 28; if (iso->channel == -1) { /* enable multichannel reception */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 28)); } else { /* listen on channel */ contextMatch |= iso->channel; } if (cycle != -1) { u32 seconds; /* enable cycleMatch */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 29)); /* set starting cycle */ cycle &= 0x1FFF; /* 'cycle' is only mod 8000, but we also need two 'seconds' bits - just snarf them from the current time */ seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25; /* advance one second to give some extra time for DMA to start */ seconds += 1; cycle |= (seconds & 3) << 13; contextMatch |= cycle << 12; } if (sync != -1) { /* set sync flag on first DMA descriptor */ struct dma_cmd *cmd = &recv->block[recv->block_dma]; cmd->control |= cpu_to_le32(DMA_CTL_WAIT); /* match sync field */ contextMatch |= (sync&0xf)<<8; } reg_write(recv->ohci, recv->ContextMatch, contextMatch); /* address of first descriptor block */ command = dma_prog_region_offset_to_bus(&recv->prog, recv->block_dma * sizeof(struct dma_cmd)); command |= 1; /* Z=1 */ reg_write(recv->ohci, recv->CommandPtr, command); /* enable interrupts */ reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context); wmb(); /* run */ reg_write(recv->ohci, recv->ContextControlSet, 0x8000); /* issue a dummy read of the cycle timer register to force all PCI writes to be posted immediately */ mb(); reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer); /* check RUN */ if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) { PRINT(KERN_ERR, "Error starting IR DMA (ContextControl 0x%08x)\n", reg_read(recv->ohci, recv->ContextControlSet)); return -1; } return 0;}static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block){ /* re-use the DMA descriptor for the block */ /* by linking the previous descriptor to it */ int next_i = block; int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1); struct dma_cmd *next = &recv->block[next_i]; struct dma_cmd *prev = &recv->block[prev_i]; /* ignore out-of-range requests */ if ((block < 0) || (block > recv->nblocks)) return; /* 'next' becomes the new end of the DMA chain, so disable branch and enable interrupt */ next->branchAddress = 0; next->control |= cpu_to_le32(3 << 20); next->status = cpu_to_le32(recv->buf_stride); /* link prev to next */ prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, sizeof(struct dma_cmd) * next_i) | 1); /* Z=1 */ /* disable interrupt on previous DMA descriptor, except at intervals */ if ((prev_i % recv->block_irq_interval) == 0) { prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */ } else { prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */ } wmb(); /* wake up DMA in case it fell asleep */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));}static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv, struct hpsb_iso_packet_info *info){ /* release the memory where the packet was */ recv->released_bytes += info->total_len; /* have we released enough memory for one block? */ while (recv->released_bytes > recv->buf_stride) { ohci_iso_recv_release_block(recv, recv->block_reader); recv->block_reader = (recv->block_reader + 1) % recv->nblocks; recv->released_bytes -= recv->buf_stride; }}static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info){ struct ohci_iso_recv *recv = iso->hostdata; if (recv->dma_mode == BUFFER_FILL_MODE) { ohci_iso_recv_bufferfill_release(recv, info); } else { ohci_iso_recv_release_block(recv, info - iso->infos); }}/* parse all packets from blocks that have been fully received */static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int wake = 0; int runaway = 0; struct ti_ohci *ohci = recv->ohci; while (1) { /* we expect the next parsable packet to begin at recv->dma_offset */ /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */ unsigned int offset; unsigned short len, cycle, total_len; unsigned char channel, tag, sy; unsigned char *p = iso->data_buf.kvirt; unsigned int this_block = recv->dma_offset/recv->buf_stride; /* don't loop indefinitely */ if (runaway++ > 100000) { atomic_inc(&iso->overflows); PRINT(KERN_ERR, "IR DMA error - Runaway during buffer parsing!\n"); break; } /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */ if (this_block == recv->block_dma) break; wake = 1; /* parse data length, tag, channel, and sy */ /* note: we keep our own local copies of 'len' and 'offset' so the user can't mess with them by poking in the mmap area */ len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8); if (len > 4096) { PRINT(KERN_ERR, "IR DMA error - bogus 'len' value %u\n", len); } channel = p[recv->dma_offset+1] & 0x3F; tag = p[recv->dma_offset+1] >> 6; sy = p[recv->dma_offset+0] & 0xF; /* advance to data payload */ recv->dma_offset += 4; /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { recv->dma_offset -= recv->buf_stride*recv->nblocks; } /* dma_offset now points to the first byte of the data payload */ offset = recv->dma_offset; /* advance to xferStatus/timeStamp */ recv->dma_offset += len; total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */ /* payload is padded to 4 bytes */ if (len % 4) { recv->dma_offset += 4 - (len%4); total_len += 4 - (len%4); } /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { /* uh oh, the packet data wraps from the last to the first DMA block - make the packet contiguous by copying its "tail" into the guard page */ int guard_off = recv->buf_stride*recv->nblocks; int tail_len = len - (guard_off - offset); if (tail_len > 0 && tail_len < recv->buf_stride) { memcpy(iso->data_buf.kvirt + guard_off, iso->data_buf.kvirt, tail_len); } recv->dma_offset -= recv->buf_stride*recv->nblocks; } /* parse timestamp */ cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8); cycle &= 0x1FFF; /* advance to next packet */ recv->dma_offset += 4; /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { recv->dma_offset -= recv->buf_stride*recv->nblocks; } hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy); } if (wake) hpsb_iso_wake(iso);}static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int loop; struct ti_ohci *ohci = recv->ohci; /* loop over all blocks */ for (loop = 0; loop < recv->nblocks; loop++) { /* check block_dma to see if it's done */ struct dma_cmd *im = &recv->block[recv->block_dma]; /* check the DMA descriptor for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(im->status) >> 16; /* rescount is the number of bytes *remaining to be written* in the block */ u16 rescount = le32_to_cpu(im->status) & 0xFFFF; unsigned char event = xferstatus & 0x1F; if (!event) { /* nothing has happened to this block yet */ break; } if (event != 0x11) { atomic_inc(&iso->overflows); PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event); } if (rescount != 0) { /* the card is still writing to this block; we can't touch it until it's done */ break; } /* OK, the block is finished... */ /* sync our view of the block */ dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride); /* reset the DMA descriptor */ im->status = recv->buf_stride; /* advance block_dma */ recv->block_dma = (recv->block_dma + 1) % recv->nblocks; if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) { atomic_inc(&iso->overflows); DBGMSG("ISO reception overflow - " "ran out of DMA blocks"); } } /* parse any packets that have arrived */ ohci_iso_recv_bufferfill_parse(iso, recv);}static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int count; int wake = 0; struct ti_ohci *ohci = recv->ohci; /* loop over the entire buffer */ for (count = 0; count < recv->nblocks; count++) { u32 packet_len = 0; /* pointer to the DMA descriptor */ struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma; /* check the DMA descriptor for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(il->status) >> 16; u16 rescount = le32_to_cpu(il->status) & 0xFFFF; unsigned char event = xferstatus & 0x1F; if (!event) { /* this packet hasn't come in yet; we are done for now */ goto out; } if (event == 0x11) { /* packet received successfully! */ /* rescount is the number of bytes *remaining* in the packet buffer, after the packet was written */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -