📄 dm_usb_isp1362.c
字号:
int len, index, speed, byte_time; if (atomic_read(&isp1362->atl_finishing)) return; if (!HC_IS_RUNNING(isp1362_to_hcd(isp1362)->state)) return; /* FIFO not empty? */ if (isp1362_read_reg16(isp1362, HCBUFSTAT) & HCBUFSTAT_ATL_FULL) return; isp1362->atl_active = NULL; isp1362->atl_buflen = isp1362->atl_bufshrt = 0; /* Schedule int transfers */ if (isp1362->periodic_count) { isp1362->fmindex = index = (isp1362->fmindex + 1) & (PERIODIC_SIZE - 1); if ((load = isp1362->load[index])) { /* Bring all int transfers for this frame into the active queue */ isp1362->atl_active = last_ep = isp1362->periodic[index]; while (last_ep->next) last_ep = (last_ep->active = last_ep->next); last_ep->active = NULL; } } /* Schedule control/bulk transfers */ list_for_each_entry(ep, &isp1362->async, schedule) { urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); speed = urb->dev->speed; byte_time = speed == USB_SPEED_LOW ? BYTE_TIME_LOWSPEED : BYTE_TIME_FULLSPEED; if (ep->nextpid == USB_PID_SETUP) { len = sizeof(struct usb_ctrlrequest); } else if (ep->nextpid == USB_PID_ACK) { len = 0; } else { /* Find current free length ... */ len = (MAX_LOAD_LIMIT - load) / byte_time; /* ... then limit it to configured max size ... */ len = min(len, speed == USB_SPEED_LOW ? MAX_TRANSFER_SIZE_LOWSPEED : MAX_TRANSFER_SIZE_FULLSPEED); /* ... and finally cut to the multiple of MaxPacketSize, or to the real length if there's enough room. */ if (len < (urb->transfer_buffer_length - urb->actual_length)) { len -= len % ep->maxpacket; if (!len) continue; } else len = urb->transfer_buffer_length - urb->actual_length; BUG_ON(len < 0); } load += len * byte_time; if (load > MAX_LOAD_LIMIT) break; ep->active = NULL; ep->length = len; if (last_ep) last_ep->active = ep; else isp1362->atl_active = ep; last_ep = ep; } /* Avoid starving of endpoints */ if ((&isp1362->async)->next != (&isp1362->async)->prev) list_move(&isp1362->async, (&isp1362->async)->next); if (isp1362->atl_active) { preproc_atl_queue(isp1362); pack_fifo(isp1362); }}/* Finish the processed transfers*/static void finish_atl_transfers(struct isp1362 *isp1362, struct pt_regs *regs){ struct isp1362_ep *ep; struct urb *urb; if (!isp1362->atl_active) return; /* Fifo not ready? */ if (!(isp1362_read_reg16(isp1362, HCBUFSTAT) & HCBUFSTAT_ATL_DONE)) return; atomic_inc(&isp1362->atl_finishing); unpack_fifo(isp1362); postproc_atl_queue(isp1362); for (ep = isp1362->atl_active; ep; ep = ep->active) { urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); /* USB_PID_ACK check here avoids finishing of control transfers, for which TD_DATAUNDERRUN occured, while URB_SHORT_NOT_OK was set */ if (urb && urb->status != -EINPROGRESS && ep->nextpid != USB_PID_ACK) finish_request(isp1362, ep, urb, regs); } atomic_dec(&isp1362->atl_finishing);}static irqreturn_t isp1362_irq(struct usb_hcd *hcd, struct pt_regs *regs){ struct isp1362 *isp1362 = hcd_to_isp1362(hcd); u16 irqstat; irqreturn_t ret = IRQ_NONE; spin_lock(&isp1362->lock); isp1362_write_reg16(isp1362, HCuPINTENB, 0); irqstat = isp1362_read_reg16(isp1362, HCuPINT); isp1362_write_reg16(isp1362, HCuPINT, irqstat); if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) { ret = IRQ_HANDLED; finish_atl_transfers(isp1362, regs); } if (irqstat & HCuPINT_OPR) { u32 intstat = isp1362_read_reg32(isp1362, HCINTSTAT); isp1362_write_reg32(isp1362, HCINTSTAT, intstat); if (intstat & HCINT_UE) { ERR("Unrecoverable error\n"); /* What should we do here? Reset? */ } if (intstat & HCINT_RHSC) /* When root hub or any of its ports is going to come out of suspend, it may take more than 10ms for status bits to stabilize. */ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(20) + 1); if (intstat & HCINT_RD) { DBG("---- remote wakeup\n");/// schedule_work(&isp1362->rh_resume); ret = IRQ_HANDLED; } irqstat &= ~HCuPINT_OPR; ret = IRQ_HANDLED; } if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) { start_atl_transfers(isp1362); } isp1362_write_reg16(isp1362, HCuPINTENB, isp1362->irqenb); spin_unlock(&isp1362->lock); return ret;}/*-----------------------------------------------------------------*//* usb 1.1 says max 90% of a frame is available for periodic transfers. * this driver doesn't promise that much since it's got to handle an * IRQ per packet; irq handling latencies also use up that time. *//* out of 1000 us */#define MAX_PERIODIC_LOAD 600static int balance(struct isp1362 *isp1362, u16 period, u16 load){ int i, branch = -ENOSPC; /* search for the least loaded schedule branch of that period which has enough bandwidth left unreserved. */ for (i = 0; i < period; i++) { if (branch < 0 || isp1362->load[branch] > isp1362->load[i]) { int j; for (j = i; j < PERIODIC_SIZE; j += period) { if ((isp1362->load[j] + load) > MAX_PERIODIC_LOAD) break; } if (j < PERIODIC_SIZE) continue; branch = i; } } return branch;}/* NB! ALL the code above this point runs with isp1362->lock held, irqs off*//*-----------------------------------------------------------------*/static int isp1362_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, struct urb *urb, unsigned mem_flags){ struct isp1362 *isp1362 = hcd_to_isp1362(hcd); struct usb_device *udev = urb->dev; unsigned int pipe = urb->pipe; int is_out = !usb_pipein(pipe); int type = usb_pipetype(pipe); int epnum = usb_pipeendpoint(pipe); struct isp1362_ep *ep = NULL; unsigned long flags; int i; int ret = 0; urb_dbg(urb, "Enqueue"); if (type == PIPE_ISOCHRONOUS) { ERR("Isochronous transfers not supported\n"); urb_dbg(urb, "Refused to enqueue"); return -ENXIO; } /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { ep = kzalloc(sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } spin_lock_irqsave(&isp1362->lock, flags); if (!HC_IS_RUNNING(hcd->state)) { ret = -ENODEV; goto fail; } if (hep->hcpriv) ep = hep->hcpriv; else { INIT_LIST_HEAD(&ep->schedule); ep->udev = usb_get_dev(udev); ep->epnum = epnum; ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); usb_settoggle(udev, epnum, is_out, 0); if (type == PIPE_CONTROL) { ep->nextpid = USB_PID_SETUP; } else if (is_out) { ep->nextpid = USB_PID_OUT; } else { ep->nextpid = USB_PID_IN; } if (urb->interval) { /* With INT URBs submitted, the driver works with SOF interrupt enabled and ATL interrupt disabled. After the PTDs are written to fifo ram, the chip starts fifo processing and usb transfers after the next SOF and continues until the transfers are finished (succeeded or failed) or the frame ends. Therefore, the transfers occur only in every second frame, while fifo reading/writing and data processing occur in every other second frame. */ if (urb->interval < 2) urb->interval = 2; if (urb->interval > 2 * PERIODIC_SIZE) urb->interval = 2 * PERIODIC_SIZE; ep->period = urb->interval >> 1; ep->branch = PERIODIC_SIZE; ep->load = usb_calc_bus_time(udev->speed, !is_out, (type == PIPE_ISOCHRONOUS), usb_maxpacket(udev, pipe, is_out)) / 1000; } hep->hcpriv = ep; ep->hep = hep; } /* maybe put endpoint into schedule */ switch (type) { case PIPE_CONTROL: case PIPE_BULK: if (list_empty(&ep->schedule)) list_add_tail(&ep->schedule, &isp1362->async); break; case PIPE_INTERRUPT: urb->interval = ep->period; ep->length = min((int)ep->maxpacket, urb->transfer_buffer_length); /* urb submitted for already existing endpoint */ if (ep->branch < PERIODIC_SIZE) break; ret = ep->branch = balance(isp1362, ep->period, ep->load); if (ret < 0) goto fail; ret = 0; urb->start_frame = (isp1362->fmindex & (PERIODIC_SIZE - 1)) + ep->branch; /* sort each schedule branch by period (slow before fast) to share the faster parts of the tree without needing dummy/placeholder nodes */ DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct isp1362_ep **prev = &isp1362->periodic[i]; struct isp1362_ep *here = *prev; while (here && ep != here) { if (ep->period > here->period) break; prev = &here->next; here = *prev; } if (ep != here) { ep->next = here; *prev = ep; } isp1362->load[i] += ep->load; } hcd->self.bandwidth_allocated += ep->load / ep->period; /* switch over to SOFint */ if (!isp1362->periodic_count++) { isp1362->irqenb &= ~HCuPINT_ATL; isp1362->irqenb |= HCuPINT_SOF; isp1362_write_reg16(isp1362, HCuPINTENB, isp1362->irqenb); } } /* in case of unlink-during-submit */ spin_lock(&urb->lock); if (urb->status != -EINPROGRESS) { spin_unlock(&urb->lock); finish_request(isp1362, ep, urb, NULL); ret = 0; goto fail; } urb->hcpriv = hep; spin_unlock(&urb->lock); start_atl_transfers(isp1362); fail: spin_unlock_irqrestore(&isp1362->lock, flags); return ret;}/* Dequeue URBs.*/static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb){ struct isp1362 *isp1362 = hcd_to_isp1362(hcd); struct usb_host_endpoint *hep; struct isp1362_ep *ep, *ep_act; unsigned long flags; spin_lock_irqsave(&isp1362->lock, flags); hep = urb->hcpriv; /* URB already unlinked (or never linked)? */ if (!hep) { spin_unlock_irqrestore(&isp1362->lock, flags); return 0; } ep = hep->hcpriv; WARN_ON(hep != ep->hep); /* In front of queue? */ if (ep->hep->urb_list.next == &urb->urb_list) /* active? */ for (ep_act = isp1362->atl_active; ep_act; ep_act = ep_act->active) if (ep_act == ep) { VDBG("dequeue, urb %p active; wait for irq\n", urb); urb = NULL; break; } if (urb) finish_request(isp1362, ep, urb, NULL); spin_unlock_irqrestore(&isp1362->lock, flags); return 0;}static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep){ int i; struct isp1362_ep *ep = hep->hcpriv;; if (!ep) return; /* assume we'd just wait for the irq */ for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++) msleep(3); if (!list_empty(&hep->urb_list)) WARN("ep %p not empty?\n", ep); usb_put_dev(ep->udev); kfree(ep); hep->hcpriv = NULL;}static int isp1362_get_frame(struct usb_hcd *hcd){ struct isp1362 *isp1362 = hcd_to_isp1362(hcd); u32 fmnum; unsigned long flags; spin_lock_irqsave(&isp1362->lock, flags); fmnum = isp1362_read_reg32(isp1362, HCFMNUM); spin_unlock_irqrestore(&isp1362->lock, flags); return (int)fmnum;}/*----------------------------------------------------------------*//* Adapted from ohci-hub.c. Currently we don't support autosuspend.*/static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf){ struct isp1362 *isp1362 = hcd_to_isp1362(hcd); int ports, i, changed = 0; unsigned long flags; if (!HC_IS_RUNNING(hcd->state)) return -1; /* Report no status change now, if we are scheduled to be called later */ if (timer_pending(&hcd->rh_timer)) return 0; ports = isp1362->rhdesca & RH_A_NDP; spin_lock_irqsave(&isp1362->lock, flags); isp1362->rhstatus = isp1362_read_reg32(isp1362, HCRHSTATUS); if (isp1362->rhstatus & (RH_HS_LPSC | RH_HS_OCIC)) buf[0] = changed = 1; else buf[0] = 0; for (i = 0; i < ports; i++) { u32 status = isp1362->rhport[i] = isp1362_read_reg32(isp1362, i ? HCRHPORT2 : HCRHPORT1); if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { changed = 1; buf[0] |= 1 << (i + 1); continue; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -