📄 isp116x-hcd.c
字号:
list_del_init(&ep->schedule); return; } /* periodic deschedule */ DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct isp116x_ep *temp; struct isp116x_ep **prev = &isp116x->periodic[i]; while (*prev && ((temp = *prev) != ep)) prev = &temp->next; if (*prev) *prev = ep->next; isp116x->load[i] -= ep->load; } ep->branch = PERIODIC_SIZE; isp116x_to_hcd(isp116x)->self.bandwidth_allocated -= ep->load / ep->period; /* switch irq type? */ if (!--isp116x->periodic_count) { isp116x->irqenb &= ~HCuPINT_SOF; isp116x->irqenb |= HCuPINT_ATL; }}/* Scan transfer lists, schedule transfers, send data off to chip. */static void start_atl_transfers(struct isp116x *isp116x){ struct isp116x_ep *last_ep = NULL, *ep; struct urb *urb; u16 load = 0; int len, index, speed, byte_time; if (atomic_read(&isp116x->atl_finishing)) return; if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state)) return; /* FIFO not empty? */ if (isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_FULL) return; isp116x->atl_active = NULL; isp116x->atl_buflen = isp116x->atl_bufshrt = 0; /* Schedule int transfers */ if (isp116x->periodic_count) { isp116x->fmindex = index = (isp116x->fmindex + 1) & (PERIODIC_SIZE - 1); if ((load = isp116x->load[index])) { /* Bring all int transfers for this frame into the active queue */ isp116x->atl_active = last_ep = isp116x->periodic[index]; while (last_ep->next) last_ep = (last_ep->active = last_ep->next); last_ep->active = NULL; } } /* Schedule control/bulk transfers */ list_for_each_entry(ep, &isp116x->async, schedule) { urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); speed = urb->dev->speed; byte_time = speed == USB_SPEED_LOW ? BYTE_TIME_LOWSPEED : BYTE_TIME_FULLSPEED; if (ep->nextpid == USB_PID_SETUP) { len = sizeof(struct usb_ctrlrequest); } else if (ep->nextpid == USB_PID_ACK) { len = 0; } else { /* Find current free length ... */ len = (MAX_LOAD_LIMIT - load) / byte_time; /* ... then limit it to configured max size ... */ len = min(len, speed == USB_SPEED_LOW ? MAX_TRANSFER_SIZE_LOWSPEED : MAX_TRANSFER_SIZE_FULLSPEED); /* ... and finally cut to the multiple of MaxPacketSize, or to the real length if there's enough room. */ if (len < (urb->transfer_buffer_length - urb->actual_length)) { len -= len % ep->maxpacket; if (!len) continue; } else len = urb->transfer_buffer_length - urb->actual_length; BUG_ON(len < 0); } load += len * byte_time; if (load > MAX_LOAD_LIMIT) break; ep->active = NULL; ep->length = len; if (last_ep) last_ep->active = ep; else isp116x->atl_active = ep; last_ep = ep; } /* Avoid starving of endpoints */ if ((&isp116x->async)->next != (&isp116x->async)->prev) list_move(&isp116x->async, (&isp116x->async)->next); if (isp116x->atl_active) { preproc_atl_queue(isp116x); pack_fifo(isp116x); }}/* Finish the processed transfers*/static void finish_atl_transfers(struct isp116x *isp116x, struct pt_regs *regs){ struct isp116x_ep *ep; struct urb *urb; if (!isp116x->atl_active) return; /* Fifo not ready? */ if (!(isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_DONE)) return; atomic_inc(&isp116x->atl_finishing); unpack_fifo(isp116x); postproc_atl_queue(isp116x); for (ep = isp116x->atl_active; ep; ep = ep->active) { urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); /* USB_PID_ACK check here avoids finishing of control transfers, for which TD_DATAUNDERRUN occured, while URB_SHORT_NOT_OK was set */ if (urb && urb->status != -EINPROGRESS && ep->nextpid != USB_PID_ACK) finish_request(isp116x, ep, urb, regs); } atomic_dec(&isp116x->atl_finishing);}static irqreturn_t isp116x_irq(struct usb_hcd *hcd, struct pt_regs *regs){ struct isp116x *isp116x = hcd_to_isp116x(hcd); u16 irqstat; irqreturn_t ret = IRQ_NONE; spin_lock(&isp116x->lock); isp116x_write_reg16(isp116x, HCuPINTENB, 0); irqstat = isp116x_read_reg16(isp116x, HCuPINT); isp116x_write_reg16(isp116x, HCuPINT, irqstat); if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) { ret = IRQ_HANDLED; finish_atl_transfers(isp116x, regs); } if (irqstat & HCuPINT_OPR) { u32 intstat = isp116x_read_reg32(isp116x, HCINTSTAT); isp116x_write_reg32(isp116x, HCINTSTAT, intstat); if (intstat & HCINT_UE) { ERR("Unrecoverable error\n"); /* What should we do here? Reset? */ } if (intstat & HCINT_RHSC) /* When root hub or any of its ports is going to come out of suspend, it may take more than 10ms for status bits to stabilize. */ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(20) + 1); if (intstat & HCINT_RD) { DBG("---- remote wakeup\n"); usb_hcd_resume_root_hub(hcd); ret = IRQ_HANDLED; } irqstat &= ~HCuPINT_OPR; ret = IRQ_HANDLED; } if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) { start_atl_transfers(isp116x); } isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb); spin_unlock(&isp116x->lock); return ret;}/*-----------------------------------------------------------------*//* usb 1.1 says max 90% of a frame is available for periodic transfers. * this driver doesn't promise that much since it's got to handle an * IRQ per packet; irq handling latencies also use up that time. *//* out of 1000 us */#define MAX_PERIODIC_LOAD 600static int balance(struct isp116x *isp116x, u16 period, u16 load){ int i, branch = -ENOSPC; /* search for the least loaded schedule branch of that period which has enough bandwidth left unreserved. */ for (i = 0; i < period; i++) { if (branch < 0 || isp116x->load[branch] > isp116x->load[i]) { int j; for (j = i; j < PERIODIC_SIZE; j += period) { if ((isp116x->load[j] + load) > MAX_PERIODIC_LOAD) break; } if (j < PERIODIC_SIZE) continue; branch = i; } } return branch;}/* NB! ALL the code above this point runs with isp116x->lock held, irqs off*//*-----------------------------------------------------------------*/static int isp116x_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, struct urb *urb, gfp_t mem_flags){ struct isp116x *isp116x = hcd_to_isp116x(hcd); struct usb_device *udev = urb->dev; unsigned int pipe = urb->pipe; int is_out = !usb_pipein(pipe); int type = usb_pipetype(pipe); int epnum = usb_pipeendpoint(pipe); struct isp116x_ep *ep = NULL; unsigned long flags; int i; int ret = 0; urb_dbg(urb, "Enqueue"); if (type == PIPE_ISOCHRONOUS) { ERR("Isochronous transfers not supported\n"); urb_dbg(urb, "Refused to enqueue"); return -ENXIO; } /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { ep = kzalloc(sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } spin_lock_irqsave(&isp116x->lock, flags); if (!HC_IS_RUNNING(hcd->state)) { ret = -ENODEV; goto fail; } if (hep->hcpriv) ep = hep->hcpriv; else { INIT_LIST_HEAD(&ep->schedule); ep->udev = usb_get_dev(udev); ep->epnum = epnum; ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); usb_settoggle(udev, epnum, is_out, 0); if (type == PIPE_CONTROL) { ep->nextpid = USB_PID_SETUP; } else if (is_out) { ep->nextpid = USB_PID_OUT; } else { ep->nextpid = USB_PID_IN; } if (urb->interval) { /* With INT URBs submitted, the driver works with SOF interrupt enabled and ATL interrupt disabled. After the PTDs are written to fifo ram, the chip starts fifo processing and usb transfers after the next SOF and continues until the transfers are finished (succeeded or failed) or the frame ends. Therefore, the transfers occur only in every second frame, while fifo reading/writing and data processing occur in every other second frame. */ if (urb->interval < 2) urb->interval = 2; if (urb->interval > 2 * PERIODIC_SIZE) urb->interval = 2 * PERIODIC_SIZE; ep->period = urb->interval >> 1; ep->branch = PERIODIC_SIZE; ep->load = usb_calc_bus_time(udev->speed, !is_out, (type == PIPE_ISOCHRONOUS), usb_maxpacket(udev, pipe, is_out)) / 1000; } hep->hcpriv = ep; ep->hep = hep; } /* maybe put endpoint into schedule */ switch (type) { case PIPE_CONTROL: case PIPE_BULK: if (list_empty(&ep->schedule)) list_add_tail(&ep->schedule, &isp116x->async); break; case PIPE_INTERRUPT: urb->interval = ep->period; ep->length = min((int)ep->maxpacket, urb->transfer_buffer_length); /* urb submitted for already existing endpoint */ if (ep->branch < PERIODIC_SIZE) break; ret = ep->branch = balance(isp116x, ep->period, ep->load); if (ret < 0) goto fail; ret = 0; urb->start_frame = (isp116x->fmindex & (PERIODIC_SIZE - 1)) + ep->branch; /* sort each schedule branch by period (slow before fast) to share the faster parts of the tree without needing dummy/placeholder nodes */ DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct isp116x_ep **prev = &isp116x->periodic[i]; struct isp116x_ep *here = *prev; while (here && ep != here) { if (ep->period > here->period) break; prev = &here->next; here = *prev; } if (ep != here) { ep->next = here; *prev = ep; } isp116x->load[i] += ep->load; } hcd->self.bandwidth_allocated += ep->load / ep->period; /* switch over to SOFint */ if (!isp116x->periodic_count++) { isp116x->irqenb &= ~HCuPINT_ATL; isp116x->irqenb |= HCuPINT_SOF; isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb); } } /* in case of unlink-during-submit */ spin_lock(&urb->lock); if (urb->status != -EINPROGRESS) { spin_unlock(&urb->lock); finish_request(isp116x, ep, urb, NULL); ret = 0; goto fail; } urb->hcpriv = hep; spin_unlock(&urb->lock); start_atl_transfers(isp116x); fail: spin_unlock_irqrestore(&isp116x->lock, flags); return ret;}/* Dequeue URBs.*/static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb){ struct isp116x *isp116x = hcd_to_isp116x(hcd); struct usb_host_endpoint *hep; struct isp116x_ep *ep, *ep_act; unsigned long flags; spin_lock_irqsave(&isp116x->lock, flags); hep = urb->hcpriv; /* URB already unlinked (or never linked)? */ if (!hep) { spin_unlock_irqrestore(&isp116x->lock, flags); return 0; } ep = hep->hcpriv; WARN_ON(hep != ep->hep); /* In front of queue? */ if (ep->hep->urb_list.next == &urb->urb_list) /* active? */ for (ep_act = isp116x->atl_active; ep_act; ep_act = ep_act->active) if (ep_act == ep) { VDBG("dequeue, urb %p active; wait for irq\n", urb); urb = NULL; break; } if (urb) finish_request(isp116x, ep, urb, NULL); spin_unlock_irqrestore(&isp116x->lock, flags); return 0;}static void isp116x_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep){ int i; struct isp116x_ep *ep = hep->hcpriv;; if (!ep) return; /* assume we'd just wait for the irq */ for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++) msleep(3); if (!list_empty(&hep->urb_list)) WARN("ep %p not empty?\n", ep); usb_put_dev(ep->udev); kfree(ep); hep->hcpriv = NULL;}static int isp116x_get_frame(struct usb_hcd *hcd){ struct isp116x *isp116x = hcd_to_isp116x(hcd); u32 fmnum; unsigned long flags;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -