📄 ohci1394.c
字号:
recv->ohci->dev)) goto err; recv->block = (struct dma_cmd*) recv->prog.kvirt; //enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE,OHCI_ISO_MULTICHANNEL_RECEIVE } ohci1394_init_iso_tasklet(&recv->task, iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE : OHCI_ISO_RECEIVE, ohci_iso_recv_task, (unsigned long) iso); if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) goto err; recv->task_active = 1; /* recv context registers are spaced 32 bytes apart */ //OHCI1394_IsoRcvContextControlSet: 0x400,see pdf_doc,p138 //OHCI1394_IsoRcvContextControlClear: 0x404 //OHCI1394_IsoRcvCommandPtr: 0x40C //OHCI1394_IsoRcvContextMatch: 0x410 ctx = recv->task.context; recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx; recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx; recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx; recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx; if (iso->channel == -1) { /* clear multi-channel selection mask */ //OHCI1394_IRMultiChanMaskHiClear,0x74,see pdf_doc,p141 //OHCI1394_IRMultiChanMaskLoClear,0x7C,see pdf_doc,p142 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF); reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF); } /* write the DMA program */ ohci_iso_recv_program(iso); DBGMSG(ohci->id, "ohci_iso_recv_init: %s mode, DMA buffer is %lu pages" " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d", recv->dma_mode == BUFFER_FILL_MODE ? "buffer-fill" : "packet-per-buffer", iso->buf_size/PAGE_SIZE, iso->buf_size, recv->nblocks, recv->buf_stride, recv->block_irq_interval); return 0; err: ohci_iso_recv_shutdown(iso); return ret;}static void ohci_iso_recv_stop(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; /* disable interrupts */ //OHCI1394_IsoRecvIntMaskClear:0xAC,see pdf_doc,p68 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context); /* halt DMA */ //OHCI1394_IsoRcvContextControlClear,0x404,see pdf_doc,p138 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);}static void ohci_iso_recv_shutdown(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; if (recv->task_active) { ohci_iso_recv_stop(iso); ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task); recv->task_active = 0; } dma_prog_region_free(&recv->prog); kfree(recv); iso->hostdata = NULL;}/* set up a "gapped" ring buffer DMA program *///see pdf_doc,p129static void ohci_iso_recv_program(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; int blk; /* address of 'branch' field in previous DMA descriptor */ u32 *prev_branch = NULL; for (blk = 0; blk < recv->nblocks; blk++) { u32 control; /* the DMA descriptor */ struct dma_cmd *cmd = &recv->block[blk]; /* offset of the DMA descriptor relative to the DMA prog buffer */ unsigned long prog_offset = blk * sizeof(struct dma_cmd); /* offset of this packet's data within the DMA buffer */ unsigned long buf_offset = blk * recv->buf_stride; if (recv->dma_mode == BUFFER_FILL_MODE) { control = 2 << 28; /* INPUT_MORE */ //see pdf_doc,p129 } else { control = 3 << 28; /* INPUT_LAST */ } control |= 8 << 24; /* s = 1, update xferStatus and resCount */ //see pdf_doc,p129 /* interrupt on last block, and at intervals */ if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) { //see pdf_doc,p129 control |= 3 << 20; /* want interrupt */ } control |= 3 << 18; /* enable branch to address */ control |= recv->buf_stride; cmd->control = cpu_to_le32(control); cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset)); cmd->branchAddress = 0; /* filled in on next loop */ cmd->status = cpu_to_le32(recv->buf_stride); /* link the previous descriptor to this one */ if (prev_branch) { *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1); } prev_branch = &cmd->branchAddress; } /* the final descriptor's branch address and Z should be left at 0 */}/* listen or unlisten to a specific channel (multi-channel mode only) */static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen){ struct ohci_iso_recv *recv = iso->hostdata; int reg, i; //OHCI1394_IRMultiChanMaskHiClear,0x74,see pdf_doc,p141 //OHCI1394_IRMultiChanMaskLoClear,0x7C,see pdf_doc,p142 if (channel < 32) { //OHCI1394_IRMultiChanMaskLoSet:0x078,see pdf_doc,p142 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear; i = channel; } else { //OHCI1394_IRMultiChanMaskHiSet:0x070,see pdf_doc,p141 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear; i = channel - 32; } reg_write(recv->ohci, reg, (1 << i)); /* issue a dummy read to force all PCI writes to be posted immediately */ mb(); //OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);}static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask){ struct ohci_iso_recv *recv = iso->hostdata; int i; //OHCI1394_IRMultiChanMaskHiClear,0x74,see pdf_doc,p141 //OHCI1394_IRMultiChanMaskLoClear,0x7C,see pdf_doc,p142 for (i = 0; i < 64; i++) { if (mask & (1ULL << i)) { if (i < 32) //OHCI1394_IRMultiChanMaskLoSet:0x078,see pdf_doc,p142 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i)); else //OHCI1394_IRMultiChanMaskHiSet:0x070,see pdf_doc,p141 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32))); } else { if (i < 32) reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i)); else reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32))); } } /* issue a dummy read to force all PCI writes to be posted immediately */ mb(); //OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);}static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync){ struct ohci_iso_recv *recv = iso->hostdata; u32 command, contextMatch; //OHCI1394_IsoRcvContextControlClear:0x404,see pdf_doc,p138 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF); wmb(); /* always keep ISO headers */ command = (1 << 30); if (recv->dma_mode == BUFFER_FILL_MODE) command |= (1 << 31); reg_write(recv->ohci, recv->ContextControlSet, command); /* match on specified tags */ //see pdf_doc,p140 contextMatch = tag_mask << 28; if (iso->channel == -1) { /* enable multichannel reception */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 28)); //see pdf_doc,p138 } else { /* listen on channel */ contextMatch |= iso->channel; //see pdf_doc,p140 } if (cycle != -1) { u32 seconds; /* enable cycleMatch */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 29)); //see pdf_doc,p140 /* set starting cycle */ cycle &= 0x1FFF; /* 'cycle' is only mod 8000, but we also need two 'seconds' bits - just snarf them from the current time */ //OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25; /* advance one second to give some extra time for DMA to start */ seconds += 1; cycle |= (seconds & 3) << 13; contextMatch |= cycle << 12; } if (sync != -1) { /* set sync flag on first DMA descriptor */ struct dma_cmd *cmd = &recv->block[recv->block_dma]; cmd->control |= cpu_to_le32(DMA_CTL_WAIT); /* match sync field */ contextMatch |= (sync&0xf)<<8; } //see pdf_doc,p140 reg_write(recv->ohci, recv->ContextMatch, contextMatch); /* address of first descriptor block */ command = dma_prog_region_offset_to_bus(&recv->prog, recv->block_dma * sizeof(struct dma_cmd)); command |= 1; /* Z=1 */ //see pdf_doc,p137 reg_write(recv->ohci, recv->CommandPtr, command); /* enable interrupts */ //OHCI1394_IsoRecvIntMaskSet:0xA8,see pdf_doc,p68 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context); wmb(); /* run */ //OHCI1394_IsoRcvContextControlSet:0x400,see pdf_doc,p138 reg_write(recv->ohci, recv->ContextControlSet, 0x8000); /* issue a dummy read of the cycle timer register to force all PCI writes to be posted immediately */ mb(); //OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer); /* check RUN */ if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) { PRINT(KERN_ERR, recv->ohci->id, "Error starting IR DMA (ContextControl 0x%08x)\n", reg_read(recv->ohci, recv->ContextControlSet)); return -1; } return 0;}static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block){ /* re-use the DMA descriptor for the block */ /* by linking the previous descriptor to it */ int next_i = block; int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1); struct dma_cmd *next = &recv->block[next_i]; struct dma_cmd *prev = &recv->block[prev_i]; /* 'next' becomes the new end of the DMA chain, so disable branch and enable interrupt */ next->branchAddress = 0; next->control |= cpu_to_le32(3 << 20); next->status = cpu_to_le32(recv->buf_stride); /* link prev to next */ prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, sizeof(struct dma_cmd) * next_i) | 1); /* Z=1 */ /* disable interrupt on previous DMA descriptor, except at intervals */ if ((prev_i % recv->block_irq_interval) == 0) { prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */ } else { prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */ } wmb(); /* wake up DMA in case it fell asleep */ reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));}static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv, struct hpsb_iso_packet_info *info){ int len; /* release the memory where the packet was */ len = info->len; /* add the wasted space for padding to 4 bytes */ if (len % 4) len += 4 - (len % 4); /* add 8 bytes for the OHCI DMA data format overhead */ len += 8; recv->released_bytes += len; /* have we released enough memory for one block? */ while (recv->released_bytes > recv->buf_stride) { ohci_iso_recv_release_block(recv, recv->block_reader); recv->block_reader = (recv->block_reader + 1) % recv->nblocks; recv->released_bytes -= recv->buf_stride; }}static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info){ struct ohci_iso_recv *recv = iso->hostdata; if (recv->dma_mode == BUFFER_FILL_MODE) { ohci_iso_recv_bufferfill_release(recv, info); } else { ohci_iso_recv_release_block(recv, info - iso->infos); }}/* parse all packets from blocks that have been fully received */static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv){ int wake = 0; int runaway = 0; while (1) { /* we expect the next parsable packet to begin at recv->dma_offset */ /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */ unsigned int offset; unsigned short len, cycle; unsigned char channel, tag, sy; unsigned char *p = iso->data_buf.kvirt; unsigned int this_block = recv->dma_offset/recv->buf_stride; /* don't loop indefinitely */ if (runaway++ > 100000) { atomic_inc(&iso->overflows); PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - Runaway during buffer parsing!\n"); break; } /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */ if (this_block == recv->block_dma) break; wake = 1; /* parse data length, tag, channel, and sy */ /* note: we keep our own local copies of 'len' and 'offset' so the user can't mess with them by poking in the mmap area */ len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8); if (len > 4096) { PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - bogus 'len' value %u\n", len); } channel = p[recv->dma_offset+1] & 0x3F; tag = p[recv->dma_offset+1] >> 6; sy = p[recv->dma_offset+0] & 0xF; /* advance to data payload */ recv->dma_offset += 4; /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { recv->dma_offset -= recv->buf_stride*recv->nblocks; } /* dma_offset now points to the first byte of the data payload */ offset = recv->dma_offset; /* advance to xferStatus/timeStamp */ recv->dma_offset += len; /* payload is padded to 4 bytes */ if (len % 4) { recv->dma_offset += 4 - (len%4); } /* check for wrap-around */ if (recv->dma_offset >= recv->buf_stride*recv->nblocks) { /* uh oh, the packet data wraps from the last to the first DMA block - make the packet contiguous by copying its "tail" into the guard page */ int guard_off = recv->buf_stride*recv->nblocks; int tail_len = len - (guard_off - offset); if (tail_len > 0 && tail_len < recv->buf_stride) { memcpy(iso->data_buf.kvirt + guard_off, iso->data_buf.kvirt, tail_len); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -