📄 usb-uhci-q.c
字号:
else if (high_bw && ((type == PIPE_BULK) || (type == PIPE_CONTROL)) &&
(hcpriv->use_loop) && time_after(jiffies, hcpriv->started + IDLE_TIMEOUT))
disable_desc_loop(uhci, urb);
}
uhci->timeout_check=jiffies;
}
/*###########################################################################*/
// INTERRUPT PROCESSING ROUTINES
/*###########################################################################*/
/*
* Map status to standard result codes
*
* <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)
* <dir_out> is True for output TDs and False for input TDs.
*/
static int uhci_map_status (int status, int dir_out)
{
if (!status)
return 0;
if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
return -EPROTO;
if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
if (dir_out)
return -ETIMEDOUT;
else
return -EILSEQ;
}
if (status & TD_CTRL_NAK) /* NAK */
return -ETIMEDOUT;
if (status & TD_CTRL_BABBLE) /* Babble */
return -EOVERFLOW;
if (status & TD_CTRL_DBUFERR) /* Buffer error */
return -ENOSR;
if (status & TD_CTRL_STALLED) /* Stalled */
return -EPIPE;
if (status & TD_CTRL_ACTIVE) /* Active */
return 0;
return -EPROTO;
}
/*-------------------------------------------------------------------*/
static void correct_data_toggles(struct urb *urb)
{
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe),
!uhci_get_toggle (urb));
while(urb) {
urb_priv_t *priv=urb->hcpriv;
uhci_desc_t *qh = list_entry (priv->desc_list.next, uhci_desc_t, desc_list);
struct list_head *p = qh->vertical.next;
uhci_desc_t *td;
dbg("URB to correct %p\n", urb);
for (; p != &qh->vertical; p = p->next) {
td = list_entry (p, uhci_desc_t, vertical);
td->hw.td.info^=cpu_to_le32(1<<TD_TOKEN_TOGGLE);
}
urb=priv->next_queued_urb;
}
}
/*-------------------------------------------------------------------*/
/*
* For IN-control transfers, process_transfer gets a bit more complicated,
* since there are devices that return less data (eg. strings) than they
* have announced. This leads to a queue abort due to the short packet,
* the status stage is not executed. If this happens, the status stage
* is manually re-executed.
* mode: PROCESS_TRANSFER_REGULAR: regular (unlink QH)
* PROCESS_TRANSFER_DONT_UNLINK: QHs already unlinked (for async unlink_urb)
*/
static int process_transfer (struct uhci_hcd *uhci, struct urb *urb, int mode)
{
urb_priv_t *urb_priv = urb->hcpriv;
struct list_head *qhl = urb_priv->desc_list.next;
uhci_desc_t *qh = list_entry (qhl, uhci_desc_t, desc_list);
struct list_head *p = qh->vertical.next;
uhci_desc_t *desc= list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
uhci_desc_t *last_desc = list_entry (desc->vertical.prev, uhci_desc_t, vertical);
int data_toggle = uhci_get_toggle (urb); // save initial data_toggle
int maxlength; // extracted and remapped info from TD
int actual_length;
int status = 0, ret = 0;
//dbg("process_transfer: urb %p, urb_priv %p, qh %p last_desc %p\n",urb,urb_priv, qh, last_desc);
/* if the status phase has been retriggered and the
queue is empty or the last status-TD is inactive, the retriggered
status stage is completed
*/
if (urb_priv->flags &&
((qh->hw.qh.element == cpu_to_le32(UHCI_PTR_TERM)) || !is_td_active(desc)))
goto transfer_finished;
urb->actual_length=0;
for (; p != &qh->vertical; p = p->next) {
desc = list_entry (p, uhci_desc_t, vertical);
if (is_td_active(desc)) { // do not process active TDs
if (mode == CLEAN_TRANSFER_DELETION_MARK) // if called from async_unlink
uhci_clean_transfer(uhci, urb, qh, CLEAN_TRANSFER_DELETION_MARK);
return ret;
}
actual_length = uhci_actual_length(desc); // extract transfer parameters from TD
maxlength = (((le32_to_cpu(desc->hw.td.info) >> 21) & 0x7ff) + 1) & 0x7ff;
status = uhci_map_status (uhci_status_bits (le32_to_cpu(desc->hw.td.status)), usb_pipeout (urb->pipe));
if (status == -EPIPE) { // see if EP is stalled
// set up stalled condition
usb_endpoint_halt (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
}
if (status && (status != -EPIPE) && (status != -EOVERFLOW)) {
// if any error occurred stop processing of further TDs
// only set ret if status returned an error
ret = status;
urb->error_count++;
break;
}
else if ((le32_to_cpu(desc->hw.td.info) & 0xff) != USB_PID_SETUP)
urb->actual_length += actual_length;
// got less data than requested
if ( (actual_length < maxlength)) {
if (urb->transfer_flags & USB_DISABLE_SPD) {
status = -EREMOTEIO; // treat as real error
dbg("process_transfer: SPD!!");
break; // exit after this TD because SP was detected
}
// short read during control-IN: re-start status stage
if ((usb_pipetype (urb->pipe) == PIPE_CONTROL)) {
if (uhci_packetid(le32_to_cpu(last_desc->hw.td.info)) == USB_PID_OUT) {
set_qh_element(qh, last_desc->dma_addr); // re-trigger status stage
dbg("short packet during control transfer, retrigger status stage @ %p",last_desc);
urb_priv->flags = 1; // mark as short control packet
return 0;
}
}
// all other cases: short read is OK
data_toggle = uhci_toggle (le32_to_cpu(desc->hw.td.info));
break;
}
else if (status) {
ret = status;
urb->error_count++;
break;
}
data_toggle = uhci_toggle (le32_to_cpu(desc->hw.td.info));
queue_dbg("process_transfer: len:%d status:%x mapped:%x toggle:%d",
actual_length, le32_to_cpu(desc->hw.td.status),status, data_toggle);
}
/* toggle correction for short bulk transfers (nonqueued/queued) */
if (usb_pipetype (urb->pipe) == PIPE_BULK ) {
urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;
struct urb *next_queued_urb=priv->next_queued_urb;
if (next_queued_urb) {
urb_priv_t *next_priv=(urb_priv_t*)next_queued_urb->hcpriv;
uhci_desc_t *qh = list_entry (next_priv->desc_list.next, uhci_desc_t, desc_list);
uhci_desc_t *first_td=list_entry (qh->vertical.next, uhci_desc_t, vertical);
if (data_toggle == uhci_toggle (le32_to_cpu(first_td->hw.td.info))) {
err("process_transfer: fixed toggle");
correct_data_toggles(next_queued_urb);
}
}
else
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe), !data_toggle);
}
transfer_finished:
uhci_clean_transfer(uhci, urb, qh, mode);
urb->status = status;
if (high_bw)
disable_desc_loop(uhci,urb);
dbg("process_transfer: (end) urb %p, wanted len %d, len %d status %x err %d",
urb,urb->transfer_buffer_length,urb->actual_length, urb->status, urb->error_count);
return ret;
}
/*-------------------------------------------------------------------*/
static int process_interrupt (struct uhci_hcd *uhci, struct urb *urb, int mode)
{
urb_priv_t *urb_priv = urb->hcpriv;
struct list_head *p = urb_priv->desc_list.next;
uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
int actual_length, status = 0, i, ret = -EINPROGRESS;
//dbg("urb contains interrupt request");
for (i = 0; p != &urb_priv->desc_list; p = p->next, i++) // Maybe we allow more than one TD later ;-)
{
desc = list_entry (p, uhci_desc_t, desc_list);
if (is_td_active(desc) || !(desc->hw.td.status & cpu_to_le32(TD_CTRL_IOC))) {
// do not process active TDs or one-shot TDs (->no recycling)
//dbg("TD ACT Status @%p %08x",desc,le32_to_cpu(desc->hw.td.status));
break;
}
// extract transfer parameters from TD
actual_length = uhci_actual_length(desc);
status = uhci_map_status (uhci_status_bits (le32_to_cpu(desc->hw.td.status)), usb_pipeout (urb->pipe));
// see if EP is stalled
if (status == -EPIPE) {
// set up stalled condition
usb_endpoint_halt (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
}
// if any error occurred: ignore this td, and continue
if (status != 0) {
//uhci_show_td (desc);
urb->error_count++;
goto recycle;
}
else
urb->actual_length = actual_length;
recycle:
((urb_priv_t*)urb->hcpriv)->flags=1; // set to detect unlink during completion
uhci_urb_dma_sync(uhci, urb, urb->hcpriv);
if (urb->complete) {
//dbg("process_interrupt: calling completion, status %i",status);
urb->status = status;
spin_unlock(&uhci->urb_list_lock);
urb->complete ((struct urb *) urb);
spin_lock(&uhci->urb_list_lock);
}
if ((urb->status != -ECONNABORTED) && (urb->status != ECONNRESET) &&
(urb->status != -ENOENT) && ((urb_priv_t*)urb->hcpriv)->flags) {
urb->status = -EINPROGRESS;
// Recycle INT-TD if interval!=0, else mark TD as one-shot
if (urb->interval) {
desc->hw.td.info &= cpu_to_le32(~(1 << TD_TOKEN_TOGGLE));
if (status==0) {
((urb_priv_t*)urb->hcpriv)->started=jiffies;
desc->hw.td.info |= cpu_to_le32((uhci_get_toggle (urb) << TD_TOKEN_TOGGLE));
uhci_do_toggle (urb);
} else {
desc->hw.td.info |= cpu_to_le32((!uhci_get_toggle (urb) << TD_TOKEN_TOGGLE));
}
desc->hw.td.status= cpu_to_le32(TD_CTRL_ACTIVE | TD_CTRL_IOC |
(urb->transfer_flags & USB_DISABLE_SPD ? 0 : TD_CTRL_SPD) | (3 << 27));
if (urb->dev->speed == USB_SPEED_LOW)
desc->hw.td.status |=
__constant_cpu_to_le32 (TD_CTRL_LS);
mb();
}
else {
uhci_unlink_urb_async(uhci, urb, UNLINK_ASYNC_STORE_URB);
uhci_do_toggle (urb); // correct toggle after unlink
clr_td_ioc(desc); // inactivate TD
}
}
if (mode == PROCESS_INT_REMOVE) {
INIT_LIST_HEAD(&desc->horizontal);
list_add_tail (&desc->horizontal, &uhci->free_desc_td);
desc->last_used=UHCI_GET_CURRENT_FRAME(uhci);
}
}
return ret;
}
/*-------------------------------------------------------------------*/
// mode: PROCESS_ISO_REGULAR: processing only for done TDs, unlink TDs
// mode: PROCESS_ISO_FORCE: force processing, don't unlink TDs (already unlinked)
static int process_iso (struct uhci_hcd *uhci, struct urb *urb, int mode)
{
urb_priv_t *urb_priv = urb->hcpriv;
struct list_head *p = urb_priv->desc_list.next, *p_tmp;
uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
int i, ret = 0;
int now=UHCI_GET_CURRENT_FRAME(uhci);
dbg("urb contains iso request");
if (is_td_active(desc) && mode==PROCESS_ISO_REGULAR)
return -EXDEV; // last TD not finished
urb->error_count = 0;
urb->actual_length = 0;
urb->status = 0;
dbg("process iso urb %p, %li, %i, %i, %i %08x",urb,jiffies,UHCI_GET_CURRENT_FRAME(s),
urb->number_of_packets,mode,le32_to_cpu(desc->hw.td.status));
for (i = 0; p != &urb_priv->desc_list; i++) {
desc = list_entry (p, uhci_desc_t, desc_list);
//uhci_show_td(desc);
if (is_td_active(desc)) {
// means we have completed the last TD, but not the TDs before
desc->hw.td.status &= cpu_to_le32(~TD_CTRL_ACTIVE);
dbg("TD still active (%x)- grrr. paranoia!", le32_to_cpu(desc->hw.td.status));
ret = -EXDEV;
urb->iso_frame_desc[i].status = ret;
unlink_td (uhci, desc, 1);
goto err;
}
if (mode == PROCESS_ISO_REGULAR)
unlink_td (uhci, desc, 1);
if (urb->number_of_packets <= i) {
dbg("urb->number_of_packets (%d)<=(%d)", urb->number_of_packets, i);
ret = -EINVAL;
goto err;
}
urb->iso_frame_desc[i].actual_length = uhci_actual_length(desc);
urb->iso_frame_desc[i].status = uhci_map_status (uhci_status_bits (le32_to_cpu(desc->hw.td.status)), usb_pipeout (urb->pipe));
urb->actual_length += urb->iso_frame_desc[i].actual_length;
err:
if (urb->iso_frame_desc[i].status != 0) {
urb->error_count++;
urb->status = urb->iso_frame_desc[i].status;
}
dbg("process_iso: %i: len:%d %08x status:%x",
i, urb->iso_frame_desc[i].actual_length, le32_to_cpu(desc->hw.td.status),urb->iso_frame_desc[i].status);
p_tmp = p;
p = p->next;
list_del (p_tmp);
// add to cool down pool
INIT_LIST_HEAD(&desc->horizontal);
list_add_tail (&desc->horizontal, &uhci->free_desc_td);
desc->last_used=now;
}
dbg("process_iso: exit %i (%d), actual_len %i", i, ret,urb->actual_length);
return ret;
}
/*-------------------------------------------------------------------*/
// called with urb_list_lock set
static int process_urb (struct uhci_hcd *uhci, struct list_head *p)
{
struct urb *urb;
urb_priv_t *priv;
int type, ret = 0;
priv=list_entry (p, urb_priv_t, urb_list);
urb=priv->urb;
// dbg("process_urb p %p, udev %p",urb, urb->dev);
type=usb_pipetype (urb->pipe);
switch (type) {
case PIPE_CONTROL:
ret = process_transfer (uhci, urb, CLEAN_TRANSFER_REGULAR);
break;
case PIPE_BULK:
// if a submit is fiddling with bulk queues, ignore it for now
if (!uhci->avoid_bulk.counter)
ret = process_transfer (uhci, urb, CLEAN_TRANSFER_REGULAR);
else
return 0;
break;
case PIPE_ISOCHRONOUS:
ret = process_iso (uhci, urb, PROCESS_ISO_REGULAR);
break;
case PIPE_INTERRUPT:
ret = process_interrupt (uhci, urb, PROCESS_INT_REGULAR);
break;
}
if (urb->status != -EINPROGRESS && type != PIPE_INTERRUPT) {
dequeue_urb (uhci, urb);
uhci_free_priv(uhci, urb, urb->hcpriv);
spin_unlock(&uhci->urb_list_lock);
dbg("giveback urb %p, status %i, length %i\n",
urb, urb->status, urb->transfer_buffer_length);
usb_hcd_giveback_urb(&uhci->hcd, urb);
spin_lock(&uhci->urb_list_lock);
}
return ret;
}
/*###########################################################################*/
// EMERGENCY ROOM
/*###########################################################################*/
/* used to reanimate a halted hostcontroller which signals no interrupts anymore.
This is a shortcut for unloading and reloading the module, and should be only
used as the last resort, but some VIA chips need it.
*/
static int hc_defibrillate(struct uhci_hcd *uhci)
{
int ret;
err("Watchdog timeout, host controller obviously clinically dead, defibrillating...\n"
"Expect disconnections for all devices on this controller!");
uhci->running=0;
outw (USBCMD_HCRESET, (int)uhci->hcd.regs + USBCMD);
uhci_stop(&uhci->hcd);
ret=init_skel(uhci);
if (ret)
return -ENOMEM;
set_td_ioc(uhci->td128ms); // enable watchdog interrupt
hc_irq_run(uhci);
uhci->reanimations++;
err("Host controller restart done...");
return 0;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -