📄 ohci1394.c
字号:
phy_reg = get_phy_reg(ohci, 5); phy_reg |= 0x40; set_phy_reg(ohci, 5, phy_reg); /* set ISBR */ break; case LONG_RESET: phy_reg = get_phy_reg(ohci, 1); phy_reg |= 0x40; set_phy_reg(ohci, 1, phy_reg); /* set IBR */ break; case SHORT_RESET_NO_FORCE_ROOT: phy_reg = get_phy_reg(ohci, 1); if (phy_reg & 0x80) { phy_reg &= ~0x80; set_phy_reg(ohci, 1, phy_reg); /* clear RHB */ } phy_reg = get_phy_reg(ohci, 5); phy_reg |= 0x40; set_phy_reg(ohci, 5, phy_reg); /* set ISBR */ break; case LONG_RESET_NO_FORCE_ROOT: phy_reg = get_phy_reg(ohci, 1); phy_reg &= ~0x80; phy_reg |= 0x40; set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */ break; case SHORT_RESET_FORCE_ROOT: phy_reg = get_phy_reg(ohci, 1); if (!(phy_reg & 0x80)) { phy_reg |= 0x80; set_phy_reg(ohci, 1, phy_reg); /* set RHB */ } phy_reg = get_phy_reg(ohci, 5); phy_reg |= 0x40; set_phy_reg(ohci, 5, phy_reg); /* set ISBR */ break; case LONG_RESET_FORCE_ROOT: phy_reg = get_phy_reg(ohci, 1); phy_reg |= 0xc0; set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */ break; default: retval = -1; } break; case GET_CYCLE_COUNTER: retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer); break; case SET_CYCLE_COUNTER: reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg); break; case SET_BUS_ID: PRINT(KERN_ERR, ohci->id, "devctl command SET_BUS_ID err"); break; case ACT_CYCLE_MASTER: if (arg) { /* check if we are root and other nodes are present */ u32 nodeId = reg_read(ohci, OHCI1394_NodeID); if ((nodeId & (1<<30)) && (nodeId & 0x3f)) { /* * enable cycleTimer, cycleMaster */ DBGMSG(ohci->id, "Cycle master enabled"); reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000); } } else { /* disable cycleTimer, cycleMaster, cycleSource */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00700000); } break; case CANCEL_REQUESTS: DBGMSG(ohci->id, "Cancel request received"); dma_trm_reset(&ohci->at_req_context); dma_trm_reset(&ohci->at_resp_context); break; case MODIFY_USAGE: if (arg) { MOD_INC_USE_COUNT; } else { MOD_DEC_USE_COUNT; } retval = 1; break; case ISO_LISTEN_CHANNEL: { u64 mask; if (arg<0 || arg>63) { PRINT(KERN_ERR, ohci->id, "%s: IS0 listen channel %d is out of range", __FUNCTION__, arg); return -EFAULT; } /* activate the legacy IR context */ if (ohci->ir_legacy_context.ohci == NULL) { if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context, DMA_CTX_ISO, 0, IR_NUM_DESC, IR_BUF_SIZE, IR_SPLIT_BUF_SIZE, OHCI1394_IsoRcvContextBase) < 0) { PRINT(KERN_ERR, ohci->id, "%s: failed to allocate an IR context", __FUNCTION__); return -ENOMEM; } ohci->ir_legacy_channels = 0; initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1); DBGMSG(ohci->id, "ISO receive legacy context activated"); } mask = (u64)0x1<<arg; spin_lock_irqsave(&ohci->IR_channel_lock, flags); if (ohci->ISO_channel_usage & mask) { PRINT(KERN_ERR, ohci->id, "%s: IS0 listen channel %d is already used", __FUNCTION__, arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); return -EFAULT; } ohci->ISO_channel_usage |= mask; ohci->ir_legacy_channels |= mask; if (arg>31) reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, 1<<(arg-32)); else reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, 1<<arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); DBGMSG(ohci->id, "Listening enabled on channel %d", arg); break; } case ISO_UNLISTEN_CHANNEL: { u64 mask; if (arg<0 || arg>63) { PRINT(KERN_ERR, ohci->id, "%s: IS0 unlisten channel %d is out of range", __FUNCTION__, arg); return -EFAULT; } mask = (u64)0x1<<arg; spin_lock_irqsave(&ohci->IR_channel_lock, flags); if (!(ohci->ISO_channel_usage & mask)) { PRINT(KERN_ERR, ohci->id, "%s: IS0 unlisten channel %d is not used", __FUNCTION__, arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); return -EFAULT; } ohci->ISO_channel_usage &= ~mask; ohci->ir_legacy_channels &= ~mask; if (arg>31) reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 1<<(arg-32)); else reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 1<<arg); spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); DBGMSG(ohci->id, "Listening disabled on channel %d", arg); if (ohci->ir_legacy_channels == 0) { free_dma_rcv_ctx(&ohci->ir_legacy_context); DBGMSG(ohci->id, "ISO receive legacy context deactivated"); } break; } default: PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet", cmd); break; } return retval;}/*********************************** * rawiso ISO reception * ***********************************//* We use either buffer-fill or packet-per-buffer DMA mode. The DMA buffer is split into "blocks" (regions described by one DMA descriptor). Each block must be one page or less in size, and must not cross a page boundary. There is one little wrinkle with buffer-fill mode: a packet that starts in the final block may wrap around into the first block. But the user API expects all packets to be contiguous. Our solution is to keep the very last page of the DMA buffer in reserve - if a packet spans the gap, we copy its tail into this page.*/struct ohci_iso_recv { struct ti_ohci *ohci; struct ohci1394_iso_tasklet task; int task_active; enum { BUFFER_FILL_MODE, PACKET_PER_BUFFER_MODE } dma_mode; /* memory and PCI mapping for the DMA descriptors */ struct dma_prog_region prog; struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */ /* how many DMA blocks fit in the buffer */ unsigned int nblocks; /* stride of DMA blocks */ unsigned int buf_stride; /* number of blocks to batch between interrupts */ int block_irq_interval; /* block that DMA will finish next */ int block_dma; /* (buffer-fill only) block that the reader will release next */ int block_reader; /* (buffer-fill only) bytes of buffer the reader has released, less than one block */ int released_bytes; /* (buffer-fill only) buffer offset at which the next packet will appear */ int dma_offset; /* OHCI DMA context control registers */ u32 ContextControlSet; u32 ContextControlClear; u32 CommandPtr; u32 ContextMatch;};static void ohci_iso_recv_task(unsigned long data);static void ohci_iso_recv_stop(struct hpsb_iso *iso);static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);static void ohci_iso_recv_program(struct hpsb_iso *iso);static int ohci_iso_recv_init(struct hpsb_iso *iso){ struct ti_ohci *ohci = iso->host->hostdata; struct ohci_iso_recv *recv; int ctx; int ret = -ENOMEM; recv = kmalloc(sizeof(*recv), SLAB_KERNEL); if (!recv) return -ENOMEM; iso->hostdata = recv; recv->ohci = ohci; recv->task_active = 0; dma_prog_region_init(&recv->prog); recv->block = NULL; /* use buffer-fill mode, unless irq_interval is 1 (note: multichannel requires buffer-fill) */ if (iso->irq_interval == 1 && iso->channel != -1) { recv->dma_mode = PACKET_PER_BUFFER_MODE; } else { recv->dma_mode = BUFFER_FILL_MODE; } /* set nblocks, buf_stride, block_irq_interval */ if (recv->dma_mode == BUFFER_FILL_MODE) { recv->buf_stride = PAGE_SIZE; /* one block per page of data in the DMA buffer, minus the final guard page */ recv->nblocks = iso->buf_size/PAGE_SIZE - 1; if (recv->nblocks < 3) { DBGMSG(ohci->id, "ohci_iso_recv_init: DMA buffer too small"); goto err; } /* iso->irq_interval is in packets - translate that to blocks */ /* (err, sort of... 1 is always the safest value) */ recv->block_irq_interval = iso->irq_interval / recv->nblocks; if (recv->block_irq_interval*4 > recv->nblocks) recv->block_irq_interval = recv->nblocks/4; if (recv->block_irq_interval < 1) recv->block_irq_interval = 1; } else { int max_packet_size; recv->nblocks = iso->buf_packets; recv->block_irq_interval = 1; /* choose a buffer stride */ /* must be a power of 2, and <= PAGE_SIZE */ max_packet_size = iso->buf_size / iso->buf_packets; for (recv->buf_stride = 8; recv->buf_stride < max_packet_size; recv->buf_stride *= 2); if (recv->buf_stride*iso->buf_packets > iso->buf_size || recv->buf_stride > PAGE_SIZE) { /* this shouldn't happen, but anyway... */ DBGMSG(ohci->id, "ohci_iso_recv_init: problem choosing a buffer stride"); goto err; } } recv->block_reader = 0; recv->released_bytes = 0; recv->block_dma = 0; recv->dma_offset = 0; /* size of DMA program = one descriptor per block */ if (dma_prog_region_alloc(&recv->prog, sizeof(struct dma_cmd) * recv->nblocks, recv->ohci->dev)) goto err; recv->block = (struct dma_cmd*) recv->prog.kvirt; ohci1394_init_iso_tasklet(&recv->task, iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE : OHCI_ISO_RECEIVE, ohci_iso_recv_task, (unsigned long) iso); if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) goto err; recv->task_active = 1; /* recv context registers are spaced 32 bytes apart */ ctx = recv->task.context; recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx; recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx; recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx; recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx; if (iso->channel == -1) { /* clear multi-channel selection mask */ reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF); reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF); } /* write the DMA program */ ohci_iso_recv_program(iso); DBGMSG(ohci->id, "ohci_iso_recv_init: %s mode, DMA buffer is %lu pages" " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d", recv->dma_mode == BUFFER_FILL_MODE ? "buffer-fill" : "packet-per-buffer", iso->buf_size/PAGE_SIZE, iso->buf_size, recv->nblocks, recv->buf_stride, recv->block_irq_interval); return 0;err: ohci_iso_recv_shutdown(iso); return ret;}static void ohci_iso_recv_stop(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; /* disable interrupts */ reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context); /* halt DMA */ ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);}static void ohci_iso_recv_shutdown(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; if (recv->task_active) { ohci_iso_recv_stop(iso); ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task); recv->task_active = 0; } dma_prog_region_free(&recv->prog); kfree(recv); iso->hostdata = NULL;}/* set up a "gapped" ring buffer DMA program */static void ohci_iso_recv_program(struct hpsb_iso *iso){ struct ohci_iso_recv *recv = iso->hostdata; int blk; /* address of 'branch' field in previous DMA descriptor */ u32 *prev_branch = NULL; for (blk = 0; blk < recv->nblocks; blk++) { u32 control; /* the DMA descriptor */ struct dma_cmd *cmd = &recv->block[blk]; /* offset of the DMA descriptor relative to the DMA prog buffer */ unsigned long prog_offset = blk * sizeof(struct dma_cmd); /* offset of this packet's data within the DMA buffer */ unsigned long buf_offset = blk * recv->buf_stride; if (recv->dma_mode == BUFFER_FILL_MODE) { control = 2 << 28; /* INPUT_MORE */ } else { control = 3 << 28; /* INPUT_LAST */ } control |= 8 << 24; /* s = 1, update xferStatus and resCount */ /* interrupt on last block, and at intervals */ if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) { control |= 3 << 20; /* want interrupt */ } control |= 3 << 18; /* enable branch to address */ control |= recv->buf_stride; cmd->control = cpu_to_le32(control); cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset)); cmd->branchAddress = 0; /* filled in on next loop */ cmd->status = cpu_to_le32(recv->buf_stride); /* link the previous descriptor to this one */ if (prev_branch) { *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1); } prev_branch = &cmd->branchAddress;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -