📄 ohci1394.c
字号:
} /* the final descriptor's branch address and Z should be left at 0 */}/* listen or unlisten to a specific channel (multi-channel mode only) */static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen){ struct ohci_iso_recv *recv = iso->hostdata; int reg, i; if (channel < 32) { reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear; i = channel; } else { reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear; i = channel - 32; } reg_write(recv->ohci, reg, (1 << i)); /* issue a dummy read to force all PCI writes to be posted immediately */ mb(); reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);}static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask){ struct ohci_iso_recv *recv = iso->hostdata; int i; for (i = 0; i < 64; i++) { if (mask & (1ULL << i)) { if (i < 32) reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i)); else reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32))); } else { if (i < 32) reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i)); else reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32))); } } /* issue a dummy read to force all PCI writes to be posted immediately */ mb(); reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);}static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync){ struct ohci_iso_recv *recv = iso->hostdata; u32 command, contextMatch; reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF); wmb(); /* always keep ISO headers */ command = (1 << 30); if (recv->dma_mode == BUFFER_FILL_MODE) command |= (1 << 31); reg_write(recv->ohci, recv->ContextControlSet, command); /* match on specified tags */ contextMatch = tag_mask << 28; if (iso->channel == -1) { /* enable multichannel reception */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 28)); } else { /* listen on channel */ contextMatch |= iso->channel; } if (cycle != -1) { u32 seconds; /* enable cycleMatch */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 29)); /* set starting cycle */ cycle &= 0x1FFF; /* 'cycle' is only mod 8000, but we also need two 'seconds' bits - just snarf them from the current time */ seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25; /* advance one second to give some extra time for DMA to start */ seconds += 1; cycle |= (seconds & 3) << 13; contextMatch |= cycle << 12; } if (sync != -1) { /* set sync flag on first DMA descriptor */ struct dma_cmd *cmd = &recv->block[recv->block_dma]; cmd->control |= cpu_to_le32(DMA_CTL_WAIT); /* match sync field */ contextMatch |= (sync&0xf)<<8; } reg_write(recv->ohci, recv->ContextMatch, contextMatch); /* address of first descriptor block */ command = dma_prog_region_offset_to_bus(&recv->prog, recv->block_dma * sizeof(struct dma_cmd)); command |= 1; /* Z=1 */ reg_write(recv->ohci, recv->CommandPtr, command); /* enable interrupts */ reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context); wmb(); /* run */ reg_write(recv->ohci, recv->ContextControlSet, 0x8000); /* issue a dummy read of the cycle timer register to force all PCI writes to be posted immediately */ mb(); reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer); /* check RUN */ if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) { PRINT(KERN_ERR, recv->ohci->id, "Error starting IR DMA (ContextControl 0x%08x)\n", reg_read(recv->ohci, recv->ContextControlSet)); return -1; } return 0;}static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block){ /* re-use the DMA descriptor for the block */ /* by linking the previous descriptor to it */ int next_i = block; int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1); struct dma_cmd *next = &recv->block[next_i]; struct dma_cmd *prev = &recv->block[prev_i]; /* 'next' becomes the new end of the DMA chain, so disable branch and enable interrupt */ next->branchAddress = 0; next->control |= cpu_to_le32(3 << 20); next->status = cpu_to_le32(recv->buf_stride); /* link prev to next */ prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, sizeof(struct dma_cmd) * next_i) | 1); /* Z=1 */ /* disable interrupt on previous DMA descriptor, except at intervals */ if ((prev_i % recv->block_irq_interval) == 0) { prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */ } else { prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */ } wmb(); /* wake up DMA in case it fell asleep */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));}static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv, struct hpsb_iso_packet_info *info){ int len; /* release the memory where the packet was */ len = info->len; /* add the wasted space for padding to 4 bytes */ if (len % 4) len += 4 - (len % 4); /* add 8 bytes for the OHCI DMA data format overhead */ len += 8; recv->released_bytes += len; /* have we released enough memory for one block? */ while (recv->released_bytes > recv->buf_stride) { ohci_iso_recv_release_block(recv, recv->block_reader); recv->block_reader = (recv->block_reader + 1) % recv->nblocks; recv->released_bytes -= recv->buf_stride; }}static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info){ struct ohci_iso_recv *recv = iso->hostdata; if (recv->dma_mode == BUFFER_FILL_MODE) { ohci_iso_recv_bufferfill_release(recv, info); } else { ohci_iso_recv_release_block(recv, info - iso->infos); }}/* parse all packets from blocks that have been fully received */static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int wake = 0; int runaway = 0; while (1) { /* we expect the next parsable packet to begin at recv->dma_offset */ /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */ unsigned int offset; unsigned short len, cycle; unsigned char channel, tag, sy; unsigned char *p = iso->data_buf.kvirt; unsigned int this_block = recv->dma_offset/recv->buf_stride; /* don't loop indefinitely */ if (runaway++ > 100000) { atomic_inc(&iso->overflows); PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - Runaway during buffer parsing!\n"); break; } /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */ if (this_block == recv->block_dma) break; wake = 1; /* parse data length, tag, channel, and sy */ /* note: we keep our own local copies of 'len' and 'offset' so the user can't mess with them by poking in the mmap area */ len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8); if (len > 4096) { PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - bogus 'len' value %u\n", len); } channel = p[recv->dma_offset+1] & 0x3F; tag = p[recv->dma_offset+1] >> 6; sy = p[recv->dma_offset+0] & 0xF; /* advance to data payload */ recv->dma_offset += 4; /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { recv->dma_offset -= recv->buf_stride*recv->nblocks; } /* dma_offset now points to the first byte of the data payload */ offset = recv->dma_offset; /* advance to xferStatus/timeStamp */ recv->dma_offset += len; /* payload is padded to 4 bytes */ if (len % 4) { recv->dma_offset += 4 - (len%4); } /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { /* uh oh, the packet data wraps from the last to the first DMA block - make the packet contiguous by copying its "tail" into the guard page */ int guard_off = recv->buf_stride*recv->nblocks; int tail_len = len - (guard_off - offset); if (tail_len > 0 && tail_len < recv->buf_stride) { memcpy(iso->data_buf.kvirt + guard_off, iso->data_buf.kvirt, tail_len); } recv->dma_offset -= recv->buf_stride*recv->nblocks; } /* parse timestamp */ cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8); cycle &= 0x1FFF; /* advance to next packet */ recv->dma_offset += 4; /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { recv->dma_offset -= recv->buf_stride*recv->nblocks; } hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy); } if (wake) hpsb_iso_wake(iso);}static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int loop; /* loop over all blocks */ for (loop = 0; loop < recv->nblocks; loop++) { /* check block_dma to see if it's done */ struct dma_cmd *im = &recv->block[recv->block_dma]; /* check the DMA descriptor for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(im->status) >> 16; /* rescount is the number of bytes *remaining to be written* in the block */ u16 rescount = le32_to_cpu(im->status) & 0xFFFF; unsigned char event = xferstatus & 0x1F; if (!event) { /* nothing has happened to this block yet */ break; } if (event != 0x11) { atomic_inc(&iso->overflows); PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event); } if (rescount != 0) { /* the card is still writing to this block; we can't touch it until it's done */ break; } /* OK, the block is finished... */ /* sync our view of the block */ dma_region_sync(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride); /* reset the DMA descriptor */ im->status = recv->buf_stride; /* advance block_dma */ recv->block_dma = (recv->block_dma + 1) % recv->nblocks; if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) { atomic_inc(&iso->overflows); DBGMSG(recv->ohci->id, "ISO reception overflow - " "ran out of DMA blocks"); } } /* parse any packets that have arrived */ ohci_iso_recv_bufferfill_parse(iso, recv);}static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int count; int wake = 0; /* loop over the entire buffer */ for (count = 0; count < recv->nblocks; count++) { u32 packet_len = 0; /* pointer to the DMA descriptor */ struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma; /* check the DMA descriptor for new writes to xferStatus */ u16 xferstatus = le32_to_cpu(il->status) >> 16; u16 rescount = le32_to_cpu(il->status) & 0xFFFF; unsigned char event = xferstatus & 0x1F; if (!event) { /* this packet hasn't come in yet; we are done for now */ goto out; } if (event == 0x11) { /* packet received successfully! */ /* rescount is the number of bytes *remaining* in the packet buffer, after the packet was written */ packet_len = recv->buf_stride - rescount; } else if (event == 0x02) { PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - packet too long for buffer\n"); } else if (event) { PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event); } /* sync our view of the buffer */ dma_region_sync(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride); /* record the per-packet info */ { /* iso header is 8 bytes ahead of the data payload */ unsigned char *hdr; unsigned int offset; unsigned short cycle; unsigned char channel, tag, sy; offset = iso->pkt_dma * recv->buf_stride; hdr = iso->data_buf.kvirt + offset; /* skip iso header */ offset += 8; packet_len -= 8; cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF; channel = hdr[5] & 0x3F; tag = hdr[5] >> 6; sy = hdr[4] & 0xF; hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy); } /* reset the DMA descriptor */ il->status = recv->buf_stride; wake = 1; recv->block_dma = iso->pkt_dma; }out: if (wake) hpsb_iso_wake(iso);}static void ohci_iso_recv_task(unsigned long data){ struct hpsb_iso *iso = (struct hpsb_iso*) data; struct ohci_iso_recv *recv = iso->hostdata; if (recv->dma_mode == BUFFER_FILL_MODE) ohci_iso_recv_bufferfill_task(iso, recv); else ohci_iso_recv_packetperbuf_task(iso, recv);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -