📄 usb-uhci-mem.c
字号:
n=0;
while (q != &uhci->free_desc_td && (force || n<100)) {
td = list_entry (q, uhci_desc_t, horizontal);
q=td->horizontal.prev;
if (((td->last_used!=now)&&(td->last_used+1!=now)) || force) {
list_del (&td->horizontal);
delete_desc(uhci,td);
}
n++;
}
}
/*-------------------------------------------------------------------*/
static void uhci_switch_timer_int(struct uhci_hcd *uhci)
{
if (!list_empty(&uhci->urb_unlinked))
set_td_ioc(uhci->td1ms);
else
clr_td_ioc(uhci->td1ms);
if (uhci->timeout_urbs)
set_td_ioc(uhci->td32ms);
else
clr_td_ioc(uhci->td32ms);
wmb();
}
/*-------------------------------------------------------------------*/
static void enable_desc_loop(struct uhci_hcd *uhci, struct urb *urb)
{
unsigned long flags;
if (urb->transfer_flags & USB_NO_FSBR)
return;
spin_lock_irqsave (&uhci->qh_lock, flags);
uhci->chain_end->hw.qh.head&=cpu_to_le32(~UHCI_PTR_TERM);
mb();
uhci->loop_usage++;
((urb_priv_t*)urb->hcpriv)->use_loop=1;
spin_unlock_irqrestore (&uhci->qh_lock, flags);
}
/*-------------------------------------------------------------------*/
static void disable_desc_loop(struct uhci_hcd *uhci, struct urb *urb)
{
unsigned long flags;
if (urb->transfer_flags & USB_NO_FSBR)
return;
spin_lock_irqsave (&uhci->qh_lock, flags);
if (((urb_priv_t*)urb->hcpriv)->use_loop) {
uhci->loop_usage--;
if (!uhci->loop_usage) {
uhci->chain_end->hw.qh.head|=cpu_to_le32(UHCI_PTR_TERM);
mb();
}
((urb_priv_t*)urb->hcpriv)->use_loop=0;
}
spin_unlock_irqrestore (&uhci->qh_lock, flags);
}
/*-------------------------------------------------------------------*/
static void queue_urb_unlocked (struct uhci_hcd *uhci, struct urb *urb)
{
urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;
int type;
type=usb_pipetype (urb->pipe);
if (high_bw && ((type == PIPE_BULK) || (type == PIPE_CONTROL)))
enable_desc_loop(uhci, urb);
urb->status = -EINPROGRESS;
priv->started=jiffies;
list_add (&priv->urb_list, &uhci->urb_list);
if (urb->timeout)
uhci->timeout_urbs++;
uhci_switch_timer_int(uhci);
}
/*-------------------------------------------------------------------*/
static void queue_urb (struct uhci_hcd *uhci, struct urb *urb)
{
unsigned long flags=0;
spin_lock_irqsave (&uhci->urb_list_lock, flags);
queue_urb_unlocked(uhci,urb);
spin_unlock_irqrestore (&uhci->urb_list_lock, flags);
}
/*-------------------------------------------------------------------*/
static void dequeue_urb (struct uhci_hcd *uhci, struct urb *urb)
{
urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;
int type;
dbg("dequeue URB %p",urb);
type=usb_pipetype (urb->pipe);
if (high_bw && ((type == PIPE_BULK) || (type == PIPE_CONTROL)))
disable_desc_loop(uhci, urb);
list_del (&priv->urb_list);
if (urb->timeout && uhci->timeout_urbs)
uhci->timeout_urbs--;
}
/*###########################################################################*/
// INIT/FREE FRAME LAYOUT IN MEMORY
/*###########################################################################*/
// Removes ALL qhs in chain (paranoia!)
static void cleanup_skel (struct uhci_hcd *uhci)
{
unsigned int n;
uhci_desc_t *td;
dbg("cleanup_skel");
clean_descs(uhci,1);
dbg("clean_descs done");
if (uhci->td32ms) {
unlink_td(uhci,uhci->td32ms,1);
delete_desc(uhci, uhci->td32ms);
}
if (uhci->td128ms) {
unlink_td(uhci,uhci->td128ms,1);
delete_desc(uhci, uhci->td128ms);
}
for (n = 0; n < 8; n++) {
td = uhci->int_chain[n];
clean_td_chain (uhci, td);
}
if (uhci->iso_td) {
for (n = 0; n < 1024; n++) {
td = uhci->iso_td[n];
clean_td_chain (uhci, td);
}
kfree (uhci->iso_td);
}
if (uhci->framelist)
pci_free_consistent(uhci->uhci_pci, PAGE_SIZE,
uhci->framelist, uhci->framelist_dma);
if (uhci->control_chain) {
// completed init_skel?
struct list_head *p;
uhci_desc_t *qh, *qh1;
qh = uhci->control_chain;
while ((p = qh->horizontal.next) != &qh->horizontal) {
qh1 = list_entry (p, uhci_desc_t, horizontal);
delete_qh (uhci, qh1);
}
delete_qh (uhci, qh);
}
else {
if (uhci->ls_control_chain)
delete_desc (uhci, uhci->ls_control_chain);
if (uhci->control_chain)
delete_desc (uhci, uhci->control_chain);
if (uhci->bulk_chain)
delete_desc (uhci, uhci->bulk_chain);
if (uhci->chain_end)
delete_desc (uhci, uhci->chain_end);
}
if (uhci->desc_pool) {
pci_pool_destroy(uhci->desc_pool);
uhci->desc_pool = NULL;
}
uhci->ls_control_chain = NULL;
uhci->control_chain = NULL;
uhci->bulk_chain = NULL;
uhci->chain_end = NULL;
for (n = 0; n < 8; n++)
uhci->int_chain[n] = NULL;
dbg("cleanup_skel finished");
}
/*-------------------------------------------------------------------*/
// allocates framelist and qh-skeletons
// only HW-links provide continous linking, SW-links stay in their domain (ISO/INT)
static int init_skel (struct uhci_hcd *uhci)
{
int n, ret;
uhci_desc_t *qh, *td;
init_dbg("init_skel");
uhci->framelist = pci_alloc_consistent(uhci->uhci_pci, PAGE_SIZE,
&uhci->framelist_dma);
if (!uhci->framelist)
return -ENOMEM;
memset (uhci->framelist, 0, 4096);
init_dbg("creating descriptor pci_pool");
uhci->desc_pool = pci_pool_create("uhci_desc", uhci->uhci_pci,
sizeof(uhci_desc_t), 16, 0,
GFP_DMA | GFP_ATOMIC);
if (!uhci->desc_pool)
goto init_skel_cleanup;
init_dbg("allocating iso desc pointer list");
uhci->iso_td = (uhci_desc_t **) kmalloc (1024 * sizeof (uhci_desc_t*), GFP_KERNEL);
if (!uhci->iso_td)
goto init_skel_cleanup;
uhci->ls_control_chain = NULL;
uhci->control_chain = NULL;
uhci->bulk_chain = NULL;
uhci->chain_end = NULL;
for (n = 0; n < 8; n++)
uhci->int_chain[n] = NULL;
init_dbg("allocating iso descs");
for (n = 0; n < 1024; n++) {
// allocate skeleton iso/irq-tds
if (alloc_td (uhci, &td, 0))
goto init_skel_cleanup;
uhci->iso_td[n] = td;
uhci->framelist[n] = cpu_to_le32((__u32) td->dma_addr);
}
init_dbg("allocating qh: chain_end");
if (alloc_qh (uhci, &qh))
goto init_skel_cleanup;
uhci->chain_end = qh;
if (alloc_td (uhci, &td, 0))
goto init_skel_cleanup;
fill_td (td, 0 * TD_CTRL_IOC, 0, 0); // generate 1ms interrupt (enabled on demand)
insert_td (uhci, qh, td, 0);
qh->hw.qh.element &= cpu_to_le32(~UHCI_PTR_TERM); // remove TERM bit
uhci->td1ms=td;
dbg("allocating qh: bulk_chain");
if (alloc_qh (uhci, &qh))
goto init_skel_cleanup;
insert_qh (uhci, uhci->chain_end, qh, 0);
uhci->bulk_chain = qh;
dbg("allocating qh: control_chain");
if ((ret = alloc_qh (uhci, &qh)))
goto init_skel_cleanup;
insert_qh (uhci, uhci->bulk_chain, qh, 0);
uhci->control_chain = qh;
// disabled reclamation loop
if (high_bw)
set_qh_head(uhci->chain_end, uhci->control_chain->dma_addr | UHCI_PTR_QH | UHCI_PTR_TERM);
init_dbg("allocating qh: ls_control_chain");
if (alloc_qh (uhci, &qh))
goto init_skel_cleanup;
insert_qh (uhci, uhci->control_chain, qh, 0);
uhci->ls_control_chain = qh;
init_dbg("allocating skeleton INT-TDs");
for (n = 0; n < 8; n++) {
uhci_desc_t *td;
if (alloc_td (uhci, &td, 0))
goto init_skel_cleanup;
uhci->int_chain[n] = td;
if (n == 0) {
set_td_link(uhci->int_chain[0], uhci->ls_control_chain->dma_addr | UHCI_PTR_QH);
}
else {
set_td_link(uhci->int_chain[n], uhci->int_chain[0]->dma_addr);
}
}
init_dbg("Linking skeleton INT-TDs");
for (n = 0; n < 1024; n++) {
// link all iso-tds to the interrupt chains
int m, o;
dbg("framelist[%i]=%x",n,le32_to_cpu(uhci->framelist[n]));
if ((n&127)==127)
((uhci_desc_t*) uhci->iso_td[n])->hw.td.link = cpu_to_le32(uhci->int_chain[0]->dma_addr);
else
for (o = 1, m = 2; m <= 128; o++, m += m)
if ((n & (m - 1)) == ((m - 1) / 2))
set_td_link(((uhci_desc_t*) uhci->iso_td[n]), uhci->int_chain[o]->dma_addr);
}
if (alloc_td (uhci, &td, 0))
goto init_skel_cleanup;
fill_td (td, 0 * TD_CTRL_IOC, 0, 0); // generate 32ms interrupt (activated later)
uhci->td32ms=td;
insert_td_horizontal (uhci, uhci->int_chain[5], td);
if (alloc_td (uhci, &td, 0))
goto init_skel_cleanup;
fill_td (td, 0 * TD_CTRL_IOC, 0, 0); // generate 128ms interrupt (activated later)
uhci->td128ms=td;
insert_td_horizontal (uhci, uhci->int_chain[7], td);
mb();
init_dbg("init_skel exit");
return 0;
init_skel_cleanup:
cleanup_skel (uhci);
return -ENOMEM;
}
/*###########################################################################*/
// UHCI PRIVATE DATA
/*###########################################################################*/
urb_priv_t *uhci_alloc_priv(int mem_flags)
{
urb_priv_t *p;
#ifdef DEBUG_SLAB
p = kmem_cache_alloc(urb_priv_kmem, SLAB_FLAG);
#else
p = kmalloc (sizeof (urb_priv_t), mem_flags);
#endif
if (p) {
memset(p, 0, sizeof(urb_priv_t));
INIT_LIST_HEAD (&p->urb_list);
}
return p;
}
/*-------------------------------------------------------------------*/
void uhci_free_priv(struct uhci_hcd *uhci, struct urb *urb, urb_priv_t* p)
{
uhci_urb_dma_unmap(uhci, urb, p);
#ifdef DEBUG_SLAB
err("free_priv %p",p);
kmem_cache_free(urb_priv_kmem, p);
#else
kfree (p);
#endif
urb->hcpriv = NULL;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -