📄 ohci1394.c
字号:
static void dma_rcv_tasklet (unsigned long data){ struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data; struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci); unsigned int split_left, idx, offset, rescount; unsigned char tcode; int length, bytes_left, ack; unsigned long flags; quadlet_t *buf_ptr; char *split_ptr; char msg[256]; spin_lock_irqsave(&d->lock, flags); idx = d->buf_ind; offset = d->buf_offset; buf_ptr = d->buf_cpu[idx] + offset/4; rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff; bytes_left = d->buf_size - rescount - offset; while (bytes_left > 0) { tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf; /* packet_length() will return < 4 for an error */ length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming); if (length < 4) { /* something is wrong */ sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d", tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming), d->ctx, length); ohci1394_stop_context(ohci, d->ctrlClear, msg); spin_unlock_irqrestore(&d->lock, flags); return; } /* The first case is where we have a packet that crosses * over more than one descriptor. The next case is where * it's all in the first descriptor. */ if ((offset + length) > d->buf_size) { DBGMSG(ohci->id,"Split packet rcv'd"); if (length > d->split_buf_size) { ohci1394_stop_context(ohci, d->ctrlClear, "Split packet size exceeded"); d->buf_ind = idx; d->buf_offset = offset; spin_unlock_irqrestore(&d->lock, flags); return; } if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status) == d->buf_size) { /* Other part of packet not written yet. * this should never happen I think * anyway we'll get it on the next call. */ PRINT(KERN_INFO, ohci->id, "Got only half a packet!"); d->buf_ind = idx; d->buf_offset = offset; spin_unlock_irqrestore(&d->lock, flags); return; } split_left = length; split_ptr = (char *)d->spb; memcpy(split_ptr,buf_ptr,d->buf_size-offset); split_left -= d->buf_size-offset; split_ptr += d->buf_size-offset; insert_dma_buffer(d, idx); idx = (idx+1) % d->num_desc; buf_ptr = d->buf_cpu[idx]; offset=0; while (split_left >= d->buf_size) { memcpy(split_ptr,buf_ptr,d->buf_size); split_ptr += d->buf_size; split_left -= d->buf_size; insert_dma_buffer(d, idx); idx = (idx+1) % d->num_desc; buf_ptr = d->buf_cpu[idx]; } if (split_left > 0) { memcpy(split_ptr, buf_ptr, split_left); offset = split_left; buf_ptr += offset/4; } } else { DBGMSG(ohci->id,"Single packet rcv'd"); memcpy(d->spb, buf_ptr, length); offset += length; buf_ptr += length/4; if (offset==d->buf_size) { insert_dma_buffer(d, idx); idx = (idx+1) % d->num_desc; buf_ptr = d->buf_cpu[idx]; offset=0; } } /* We get one phy packet to the async descriptor for each * bus reset. We always ignore it. */ if (tcode != OHCI1394_TCODE_PHY) { if (!ohci->no_swap_incoming) packet_swab(d->spb, tcode, (length - 4) >> 2); DBGMSG(ohci->id, "Packet received from node" " %d ack=0x%02X spd=%d tcode=0x%X" " length=%d ctx=%d tlabel=%d", (d->spb[1]>>16)&0x3f, (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f, (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3, tcode, length, d->ctx, (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f); ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f) == 0x11) ? 1 : 0; hpsb_packet_received(ohci->host, d->spb, length-4, ack); }#ifdef OHCI1394_DEBUG else PRINT (KERN_DEBUG, ohci->id, "Got phy packet ctx=%d ... discarded", d->ctx);#endif rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff; bytes_left = d->buf_size - rescount - offset; } d->buf_ind = idx; d->buf_offset = offset; spin_unlock_irqrestore(&d->lock, flags);}/* Bottom half that processes sent packets */static void dma_trm_tasklet (unsigned long data){ struct dma_trm_ctx *d = (struct dma_trm_ctx*)data; struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci); struct hpsb_packet *packet; unsigned long flags; u32 ack; size_t datasize; spin_lock_irqsave(&d->lock, flags); while (!list_empty(&d->fifo_list)) { packet = driver_packet(d->fifo_list.next); datasize = packet->data_size; if (datasize && packet->type != hpsb_raw) ack = le32_to_cpu( d->prg_cpu[d->sent_ind]->end.status) >> 16; else ack = le32_to_cpu( d->prg_cpu[d->sent_ind]->begin.status) >> 16; if (ack == 0) /* this packet hasn't been sent yet*/ break; if (!(ack & 0x10)) { /* XXX: This is an OHCI evt_* code. We need to handle * this specially! For right now, we just fake an * ackx_send_error. */ PRINT(KERN_DEBUG, ohci->id, "Received OHCI evt_* error 0x%x", ack & 0xf); ack = (ack & 0xffe0) | ACK_BUSY_A; }#ifdef OHCI1394_DEBUG if (datasize) DBGMSG(ohci->id, "Packet sent to node %d tcode=0x%X tLabel=" "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d", (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1]) >>16)&0x3f, (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0]) >>4)&0xf, (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0]) >>10)&0x3f, ack&0x1f, (ack>>5)&0x3, le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]) >>16, d->ctx); else DBGMSG(ohci->id, "Packet sent to node %d tcode=0x%X tLabel=" "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d", (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1]) >>16)&0x3f, (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0]) >>4)&0xf, (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0]) >>10)&0x3f, ack&0x1f, (ack>>5)&0x3, le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]), d->ctx);#endif list_del(&packet->driver_list); hpsb_packet_sent(ohci->host, packet, ack & 0xf); if (datasize) { pci_unmap_single(ohci->dev, cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address), datasize, PCI_DMA_TODEVICE); OHCI_DMA_FREE("single Xmit data packet"); } d->sent_ind = (d->sent_ind+1)%d->num_desc; d->free_prgs++; } dma_trm_flush(ohci, d); spin_unlock_irqrestore(&d->lock, flags);}static void free_dma_rcv_ctx(struct dma_rcv_ctx *d){ int i; if (d->ohci == NULL) return; DBGMSG(d->ohci->id, "Freeing dma_rcv_ctx %d", d->ctx); ohci1394_stop_context(d->ohci, d->ctrlClear, NULL); if (d->type == DMA_CTX_ISO) ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_tasklet); else tasklet_kill(&d->task); if (d->buf_cpu) { for (i=0; i<d->num_desc; i++) if (d->buf_cpu[i] && d->buf_bus[i]) { pci_free_consistent( d->ohci->dev, d->buf_size, d->buf_cpu[i], d->buf_bus[i]); OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i); } kfree(d->buf_cpu); kfree(d->buf_bus); } if (d->prg_cpu) { for (i=0; i<d->num_desc; i++) if (d->prg_cpu[i] && d->prg_bus[i]) { pci_free_consistent( d->ohci->dev, sizeof(struct dma_cmd), d->prg_cpu[i], d->prg_bus[i]); OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i); } kfree(d->prg_cpu); kfree(d->prg_bus); } if (d->spb) kfree(d->spb); /* Mark this context as freed. */ d->ohci = NULL;}static intalloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, enum context_type type, int ctx, int num_desc, int buf_size, int split_buf_size, int context_base){ int i; d->ohci = ohci; d->type = type; d->ctx = ctx; d->num_desc = num_desc; d->buf_size = buf_size; d->split_buf_size = split_buf_size; d->ctrlSet = context_base + OHCI1394_ContextControlSet; d->ctrlClear = context_base + OHCI1394_ContextControlClear; d->cmdPtr = context_base + OHCI1394_ContextCommandPtr; d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL); d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL); if (d->buf_cpu == NULL || d->buf_bus == NULL) { PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer"); free_dma_rcv_ctx(d); return -ENOMEM; } memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*)); memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t)); d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*), GFP_KERNEL); d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL); if (d->prg_cpu == NULL || d->prg_bus == NULL) { PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg"); free_dma_rcv_ctx(d); return -ENOMEM; } memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*)); memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t)); d->spb = kmalloc(d->split_buf_size, GFP_KERNEL); if (d->spb == NULL) { PRINT(KERN_ERR, ohci->id, "Failed to allocate split buffer"); free_dma_rcv_ctx(d); return -ENOMEM; } for (i=0; i<d->num_desc; i++) { d->buf_cpu[i] = pci_alloc_consistent(ohci->dev, d->buf_size, d->buf_bus+i); OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i); if (d->buf_cpu[i] != NULL) { memset(d->buf_cpu[i], 0, d->buf_size); } else { PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer"); free_dma_rcv_ctx(d); return -ENOMEM; } d->prg_cpu[i] = pci_alloc_consistent(ohci->dev, sizeof(struct dma_cmd), d->prg_bus+i); OHCI_DMA_ALLOC("consistent dma_rcv prg[%d]", i); if (d->prg_cpu[i] != NULL) { memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd)); } else { PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg"); free_dma_rcv_ctx(d); return -ENOMEM; } } spin_lock_init(&d->lock); if (type == DMA_CTX_ISO) { ohci1394_init_iso_tasklet(&ohci->ir_tasklet, OHCI_ISO_RECEIVE, dma_rcv_tasklet, (unsigned long) d); if (ohci1394_register_iso_tasklet(ohci, &ohci->ir_tasklet) < 0) { PRINT(KERN_ERR, ohci->id, "No IR DMA context available"); free_dma_rcv_ctx(d); return -EBUSY; } } else tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d); return 0;}static void free_dma_trm_ctx(struct dma_trm_ctx *d){ int i; if (d->ohci == NULL) return; DBGMSG(d->ohci->id, "Freeing dma_trm_ctx %d", d->ctx); ohci1394_stop_context(d->ohci, d->ctrlClear, NULL); if (d->type == DMA_CTX_ISO) ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->it_tasklet); else tasklet_kill(&d->task); if (d->prg_cpu) { for (i=0; i<d->num_desc; i++) if (d->prg_cpu[i] && d->prg_bus[i]) { pci_free_consistent( d->ohci->dev, sizeof(struct at_dma_prg), d->prg_cpu[i], d->prg_bus[i]); OHCI_DMA_FREE("consistent dma_trm prg[%d]", i); } kfree(d->prg_cpu); kfree(d->prg_bus); } /* Mark this context as freed. */ d->ohci = NULL;}static intalloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, enum context_type type, int ctx, int num_desc, int context_base){ int i; d->ohci = ohci; d->type = type; d->ctx = ctx; d->num_desc = num_desc; d->ctrlSet = context_base + OHCI1394_ContextControlSet; d->ctrlClear = context_base + OHCI1394_ContextControlClear; d->cmdPtr = context_base + OHCI1394_ContextCommandPtr; d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*), GFP_KERNEL); d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL); if (d->prg_cpu == NULL || d->prg_bus == NULL) { PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg"); free_dma_trm_ctx(d); return -ENOMEM; } memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*)); memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t)); for (i = 0; i < d->num_desc; i++) { d->prg_cpu[i] = pci_alloc_consistent(ohci->dev, sizeof(struct at_dma_prg), d->prg_bus+i); OHCI_DMA_ALLOC("consistent dma_trm prg[%d]", i); if (d->prg_cpu[i] != NULL) { memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg)); } else { PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg"); free_dma_trm_ctx(d); return -ENOMEM;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -