📄 ehci-sched.c
字号:
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask; } else dbg ("reused previous qh %p schedule", qh); /* stuff into the periodic schedule */ qh->qh_state = QH_STATE_LINKED; dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)", qh, qh->usecs, qh->c_usecs, qh->period, frame, uframe, qh->gap_uf); do { if (unlikely (ehci->pshadow [frame].ptr != 0)) {// FIXME -- just link toward the end, before any qh with a shorter period,// AND accommodate it already having been linked here (after some other qh)// AS WELL AS updating the schedule checking logic BUG (); } else { ehci->pshadow [frame].qh = qh_get (qh); ehci->periodic [frame] = QH_NEXT (qh->qh_dma); } wmb (); frame += qh->period; } while (frame < ehci->periodic_size); /* update per-qh bandwidth for usbfs */ hcd_to_bus (&ehci->hcd)->bandwidth_allocated += (qh->usecs + qh->c_usecs) / qh->period; /* maybe enable periodic schedule processing */ if (!ehci->periodic_sched++) status = enable_periodic (ehci);done: return status;}static int intr_submit ( struct ehci_hcd *ehci, struct urb *urb, struct list_head *qtd_list, int mem_flags) { unsigned epnum; unsigned long flags; struct ehci_qh *qh; struct hcd_dev *dev; int is_input; int status = 0; struct list_head empty; /* get endpoint and transfer/schedule data */ epnum = usb_pipeendpoint (urb->pipe); is_input = usb_pipein (urb->pipe); if (is_input) epnum |= 0x10; spin_lock_irqsave (&ehci->lock, flags); dev = (struct hcd_dev *)urb->dev->hcpriv; /* get qh and force any scheduling errors */ INIT_LIST_HEAD (&empty); qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]); if (qh == 0) { status = -ENOMEM; goto done; } if (qh->qh_state == QH_STATE_IDLE) { if ((status = qh_schedule (ehci, qh)) != 0) goto done; } /* then queue the urb's tds to the qh */ qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]); BUG_ON (qh == 0); /* ... update usbfs periodic stats */ hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;done: spin_unlock_irqrestore (&ehci->lock, flags); if (status) qtd_list_free (ehci, urb, qtd_list); return status;}/*-------------------------------------------------------------------------*//* ehci_iso_stream ops work with both ITD and SITD */static struct ehci_iso_stream *iso_stream_alloc (int mem_flags){ struct ehci_iso_stream *stream; stream = kmalloc(sizeof *stream, mem_flags); if (likely (stream != 0)) { memset (stream, 0, sizeof(*stream)); INIT_LIST_HEAD(&stream->td_list); INIT_LIST_HEAD(&stream->free_list); stream->next_uframe = -1; stream->refcount = 1; } return stream;}static voidiso_stream_init ( struct ehci_iso_stream *stream, struct usb_device *dev, int pipe, unsigned interval){ static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; u32 buf1; unsigned epnum, maxp; int is_input; long bandwidth; /* * this might be a "high bandwidth" highspeed endpoint, * as encoded in the ep descriptor's wMaxPacket field */ epnum = usb_pipeendpoint (pipe); is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; if (is_input) { maxp = dev->epmaxpacketin [epnum]; buf1 = (1 << 11); } else { maxp = dev->epmaxpacketout [epnum]; buf1 = 0; } /* knows about ITD vs SITD */ if (dev->speed == USB_SPEED_HIGH) { unsigned multi = hb_mult(maxp); stream->highspeed = 1; maxp = max_packet(maxp); buf1 |= maxp; maxp *= multi; stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); stream->buf1 = cpu_to_le32 (buf1); stream->buf2 = cpu_to_le32 (multi); /* usbfs wants to report the average usecs per frame tied up * when transfers on this endpoint are scheduled ... */ stream->usecs = HS_USECS_ISO (maxp); bandwidth = stream->usecs * 8; bandwidth /= 1 << (interval - 1); } else { u32 addr; addr = dev->ttport << 24; addr |= dev->tt->hub->devnum << 16; addr |= epnum << 8; addr |= dev->devnum; stream->usecs = HS_USECS_ISO (maxp); if (is_input) { u32 tmp; addr |= 1 << 31; stream->c_usecs = stream->usecs; stream->usecs = HS_USECS_ISO (1); stream->raw_mask = 1; /* pessimistic c-mask */ tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp) / (125 * 1000); stream->raw_mask |= 3 << (tmp + 9); } else stream->raw_mask = smask_out [maxp / 188]; bandwidth = stream->usecs + stream->c_usecs; bandwidth /= 1 << (interval + 2); /* stream->splits gets created from raw_mask later */ stream->address = cpu_to_le32 (addr); } stream->bandwidth = bandwidth; stream->udev = dev; stream->bEndpointAddress = is_input | epnum; stream->interval = interval; stream->maxp = maxp;}static voidiso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream){ stream->refcount--; /* free whenever just a dev->ep reference remains. * not like a QH -- no persistent state (toggle, halt) */ if (stream->refcount == 1) { int is_in; struct hcd_dev *dev = stream->udev->hcpriv; // BUG_ON (!list_empty(&stream->td_list)); while (!list_empty (&stream->free_list)) { struct list_head *entry; entry = stream->free_list.next; list_del (entry); /* knows about ITD vs SITD */ if (stream->highspeed) { struct ehci_itd *itd; itd = list_entry (entry, struct ehci_itd, itd_list); dma_pool_free (ehci->itd_pool, itd, itd->itd_dma); } else { struct ehci_sitd *sitd; sitd = list_entry (entry, struct ehci_sitd, sitd_list); dma_pool_free (ehci->sitd_pool, sitd, sitd->sitd_dma); } } is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0; stream->bEndpointAddress &= 0x0f; dev->ep[is_in + stream->bEndpointAddress] = NULL; if (stream->rescheduled) { ehci_info (ehci, "ep%d%s-iso rescheduled " "%lu times in %lu seconds\n", stream->bEndpointAddress, is_in ? "in" : "out", stream->rescheduled, ((jiffies - stream->start)/HZ) ); } kfree(stream); }}static inline struct ehci_iso_stream *iso_stream_get (struct ehci_iso_stream *stream){ if (likely (stream != 0)) stream->refcount++; return stream;}static struct ehci_iso_stream *iso_stream_find (struct ehci_hcd *ehci, struct urb *urb){ unsigned epnum; struct hcd_dev *dev; struct ehci_iso_stream *stream; unsigned long flags; epnum = usb_pipeendpoint (urb->pipe); if (usb_pipein(urb->pipe)) epnum += 0x10; spin_lock_irqsave (&ehci->lock, flags); dev = (struct hcd_dev *)urb->dev->hcpriv; stream = dev->ep [epnum]; if (unlikely (stream == 0)) { stream = iso_stream_alloc(GFP_ATOMIC); if (likely (stream != 0)) { /* dev->ep owns the initial refcount */ dev->ep[epnum] = stream; iso_stream_init(stream, urb->dev, urb->pipe, urb->interval); } /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ } else if (unlikely (stream->hw_info1 != 0)) { ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", urb->dev->devpath, epnum & 0x0f, (epnum & 0x10) ? "in" : "out"); stream = NULL; } /* caller guarantees an eventual matching iso_stream_put */ stream = iso_stream_get (stream); spin_unlock_irqrestore (&ehci->lock, flags); return stream;}/*-------------------------------------------------------------------------*//* ehci_iso_sched ops can be shared, ITD-only, or SITD-only */static struct ehci_iso_sched *iso_sched_alloc (unsigned packets, int mem_flags){ struct ehci_iso_sched *iso_sched; int size = sizeof *iso_sched; size += packets * sizeof (struct ehci_iso_packet); iso_sched = kmalloc (size, mem_flags); if (likely (iso_sched != 0)) { memset(iso_sched, 0, size); INIT_LIST_HEAD (&iso_sched->td_list); } return iso_sched;}static inline voiditd_sched_init ( struct ehci_iso_sched *iso_sched, struct ehci_iso_stream *stream, struct urb *urb){ unsigned i; dma_addr_t dma = urb->transfer_dma; /* how many uframes are needed for these transfers */ iso_sched->span = urb->number_of_packets * stream->interval; /* figure out per-uframe itd fields that we'll need later * when we fit new itds into the schedule. */ for (i = 0; i < urb->number_of_packets; i++) { struct ehci_iso_packet *uframe = &iso_sched->packet [i]; unsigned length; dma_addr_t buf; u32 trans; length = urb->iso_frame_desc [i].length; buf = dma + urb->iso_frame_desc [i].offset; trans = EHCI_ISOC_ACTIVE; trans |= buf & 0x0fff; if (unlikely (((i + 1) == urb->number_of_packets)) && !(urb->transfer_flags & URB_NO_INTERRUPT)) trans |= EHCI_ITD_IOC; trans |= length << 16; uframe->transaction = cpu_to_le32 (trans); /* might need to cross a buffer page within a td */ uframe->bufp = (buf & ~(u64)0x0fff); buf += length; if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) uframe->cross = 1; }}static voidiso_sched_free ( struct ehci_iso_stream *stream, struct ehci_iso_sched *iso_sched){ if (!iso_sched) return; // caller must hold ehci->lock! list_splice (&iso_sched->td_list, &stream->free_list); kfree (iso_sched);}static intitd_urb_transaction ( struct ehci_iso_stream *stream, struct ehci_hcd *ehci, struct urb *urb, int mem_flags){ struct ehci_itd *itd; dma_addr_t itd_dma; int i; unsigned num_itds; struct ehci_iso_sched *sched; unsigned long flags; sched = iso_sched_alloc (urb->number_of_packets, mem_flags); if (unlikely (sched == 0)) return -ENOMEM; itd_sched_init (sched, stream, urb); if (urb->interval < 8) num_itds = 1 + (sched->span + 7) / 8; else num_itds = urb->number_of_packets; /* allocate/init ITDs */ spin_lock_irqsave (&ehci->lock, flags); for (i = 0; i < num_itds; i++) { /* free_list.next might be cache-hot ... but maybe * the HC caches it too. avoid that issue for now. */ /* prefer previously-allocated itds */ if (likely (!list_empty(&stream->free_list))) { itd = list_entry (stream->free_list.prev, struct ehci_itd, itd_list); list_del (&itd->itd_list); itd_dma = itd->itd_dma; } else itd = NULL; if (!itd) { spin_unlock_irqrestore (&ehci->lock, flags); itd = dma_pool_alloc (ehci->itd_pool, mem_flags, &itd_dma); spin_lock_irqsave (&ehci->lock, flags); } if (unlikely (0 == itd)) { iso_sched_free (stream, sched); return -ENOMEM; } memset (itd, 0, sizeof *itd); itd->itd_dma = itd_dma; list_add (&itd->itd_list, &sched->td_list); } spin_unlock_irqrestore (&ehci->lock, flags); /* temporarily store schedule info in hcpriv */ urb->hcpriv = sched; urb->error_count = 0; return 0;}/*-------------------------------------------------------------------------*/static inline intitd_slot_ok ( struct ehci_hcd *ehci, u32 mod, u32 uframe, u8 usecs, u32 period){ uframe %= period; do { /* can't commit more than 80% periodic == 100 usec */ if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) > (100 - usecs)) return 0; /* we know urb->interval is 2^N uframes */ uframe += period; } while (uframe < mod); return 1;}static inline intsitd_slot_ok ( struct ehci_hcd *ehci, u32 mod, struct ehci_iso_stream *stream, u32 uframe, struct ehci_iso_sched *sched, u32 period_uframes){ u32 mask, tmp; u32 frame, uf; mask = stream->raw_mask << (uframe & 7); /* for IN, don't wrap CSPLIT into the next frame */ if (mask & ~0xffff) return 0; /* this multi-pass logic is simple, but performance may * suffer when the schedule data isn't cached. */ /* check bandwidth */ uframe %= period_uframes;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -