📄 trident.c
字号:
static void start_adc(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; unsigned int chan_num = dmabuf->channel->num; struct trident_card *card = state->card; unsigned long flags; spin_lock_irqsave(&card->lock, flags); if ((dmabuf->mapped || dmabuf->count < (signed)dmabuf->dmasize) && dmabuf->ready) { dmabuf->enable |= ADC_RUNNING; trident_enable_voice_irq(card, chan_num); trident_start_voice(card, chan_num); } spin_unlock_irqrestore(&card->lock, flags);}/* stop playback (lock held) */extern __inline__ void __stop_dac(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; unsigned int chan_num = dmabuf->channel->num; struct trident_card *card = state->card; dmabuf->enable &= ~DAC_RUNNING; trident_stop_voice(card, chan_num); if (state->chans_num == 6) { trident_stop_voice(card, state->other_states[0]->dmabuf.channel->num); trident_stop_voice(card, state->other_states[1]->dmabuf.channel->num); trident_stop_voice(card, state->other_states[2]->dmabuf.channel->num); trident_stop_voice(card, state->other_states[3]->dmabuf.channel->num); } trident_disable_voice_irq(card, chan_num);}static void stop_dac(struct trident_state *state){ struct trident_card *card = state->card; unsigned long flags; spin_lock_irqsave(&card->lock, flags); __stop_dac(state); spin_unlock_irqrestore(&card->lock, flags);} static void start_dac(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; unsigned int chan_num = dmabuf->channel->num; struct trident_card *card = state->card; unsigned long flags; spin_lock_irqsave(&card->lock, flags); if ((dmabuf->mapped || dmabuf->count > 0) && dmabuf->ready) { dmabuf->enable |= DAC_RUNNING; trident_enable_voice_irq(card, chan_num); trident_start_voice(card, chan_num); if (state->chans_num == 6) { trident_start_voice(card, state->other_states[0]->dmabuf.channel->num); trident_start_voice(card, state->other_states[1]->dmabuf.channel->num); trident_start_voice(card, state->other_states[2]->dmabuf.channel->num); trident_start_voice(card, state->other_states[3]->dmabuf.channel->num); } } spin_unlock_irqrestore(&card->lock, flags);}#define DMABUF_DEFAULTORDER (15-PAGE_SHIFT)#define DMABUF_MINORDER 1/* allocate DMA buffer, playback and recording buffer should be allocated seperately */static int alloc_dmabuf(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; void *rawbuf; int order; struct page *page, *pend; /* alloc as big a chunk as we can, FIXME: is this necessary ?? */ for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) if ((rawbuf = pci_alloc_consistent(state->card->pci_dev, PAGE_SIZE << order, &dmabuf->dma_handle))) break; if (!rawbuf) return -ENOMEM;#ifdef DEBUG printk("trident: allocated %ld (order = %d) bytes at %p\n", PAGE_SIZE << order, order, rawbuf);#endif dmabuf->ready = dmabuf->mapped = 0; dmabuf->rawbuf = rawbuf; dmabuf->buforder = order; /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1); for (page = virt_to_page(rawbuf); page <= pend; page++) mem_map_reserve(page); return 0;}/* free DMA buffer */static void dealloc_dmabuf(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; struct page *page, *pend; if (dmabuf->rawbuf) { /* undo marking the pages as reserved */ pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++) mem_map_unreserve(page); pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder, dmabuf->rawbuf, dmabuf->dma_handle); } dmabuf->rawbuf = NULL; dmabuf->mapped = dmabuf->ready = 0;}static int prog_dmabuf(struct trident_state *state, unsigned rec){ struct dmabuf *dmabuf = &state->dmabuf; unsigned bytepersec; struct trident_state *s = state; unsigned bufsize, dma_nums; unsigned long flags; int ret, i, order; struct page *page, *pend; lock_set_fmt(state); if (state->chans_num == 6) dma_nums = 5; else dma_nums = 1; for (i = 0; i < dma_nums; i++) { if (i > 0) { s = state->other_states[i - 1]; dmabuf = &s->dmabuf; dmabuf->fmt = state->dmabuf.fmt; dmabuf->rate = state->dmabuf.rate; } spin_lock_irqsave(&s->card->lock, flags); dmabuf->hwptr = dmabuf->swptr = dmabuf->total_bytes = 0; dmabuf->count = dmabuf->error = 0; spin_unlock_irqrestore(&s->card->lock, flags); /* allocate DMA buffer if not allocated yet */ if (!dmabuf->rawbuf) { if (i == 0) { if ((ret = alloc_dmabuf(state))) { unlock_set_fmt(state); return ret; } } else { if ((order = state->dmabuf.buforder - 1) >= DMABUF_MINORDER) dmabuf->rawbuf = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order); if (!dmabuf->rawbuf) { free_pages((unsigned long)state->dmabuf.rawbuf, state->dmabuf.buforder); state->dmabuf.rawbuf = NULL; i-=2; for (; i >= 0; i--) free_pages((unsigned long)state->other_states[i]->dmabuf.rawbuf, state->other_states[i]->dmabuf.buforder); unlock_set_fmt(state); return -ENOMEM; } dmabuf->ready = dmabuf->mapped = 0; dmabuf->buforder = order; pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << order) - 1); for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++) mem_map_reserve(page); } } /* FIXME: figure out all this OSS fragment stuff */ bytepersec = dmabuf->rate << sample_shift[dmabuf->fmt]; bufsize = PAGE_SIZE << dmabuf->buforder; if (dmabuf->ossfragshift) { if ((1000 << dmabuf->ossfragshift) < bytepersec) dmabuf->fragshift = ld2(bytepersec/1000); else dmabuf->fragshift = dmabuf->ossfragshift; } else { /* lets hand out reasonable big ass buffers by default */ dmabuf->fragshift = (dmabuf->buforder + PAGE_SHIFT -2); } dmabuf->numfrag = bufsize >> dmabuf->fragshift; while (dmabuf->numfrag < 4 && dmabuf->fragshift > 3) { dmabuf->fragshift--; dmabuf->numfrag = bufsize >> dmabuf->fragshift; } dmabuf->fragsize = 1 << dmabuf->fragshift; if (dmabuf->ossmaxfrags >= 4 && dmabuf->ossmaxfrags < dmabuf->numfrag) dmabuf->numfrag = dmabuf->ossmaxfrags; dmabuf->fragsamples = dmabuf->fragsize >> sample_shift[dmabuf->fmt]; dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift; memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80, dmabuf->dmasize); spin_lock_irqsave(&s->card->lock, flags); if (rec) { trident_rec_setup(s); } else { trident_play_setup(s); } spin_unlock_irqrestore(&s->card->lock, flags); /* set the ready flag for the dma buffer */ dmabuf->ready = 1;#ifdef DEBUG printk("trident: prog_dmabuf(%d), sample rate = %d, format = %d, numfrag = %d, " "fragsize = %d dmasize = %d\n", dmabuf->channel->num, dmabuf->rate, dmabuf->fmt, dmabuf->numfrag, dmabuf->fragsize, dmabuf->dmasize);#endif } unlock_set_fmt(state); return 0;}/* we are doing quantum mechanics here, the buffer can only be empty, half or full filled i.e. |------------|------------| or |xxxxxxxxxxxx|------------| or |xxxxxxxxxxxx|xxxxxxxxxxxx| but we almost always get this |xxxxxx------|------------| or |xxxxxxxxxxxx|xxxxx-------| so we have to clear the tail space to "silence" |xxxxxx000000|------------| or |xxxxxxxxxxxx|xxxxxx000000|*/static void trident_clear_tail(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; unsigned swptr; unsigned char silence = (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80; unsigned int len; unsigned long flags; spin_lock_irqsave(&state->card->lock, flags); swptr = dmabuf->swptr; spin_unlock_irqrestore(&state->card->lock, flags); if (swptr == 0 || swptr == dmabuf->dmasize / 2 || swptr == dmabuf->dmasize) return; if (swptr < dmabuf->dmasize/2) len = dmabuf->dmasize/2 - swptr; else len = dmabuf->dmasize - swptr; memset(dmabuf->rawbuf + swptr, silence, len); spin_lock_irqsave(&state->card->lock, flags); dmabuf->swptr += len; dmabuf->count += len; spin_unlock_irqrestore(&state->card->lock, flags); /* restart the dma machine in case it is halted */ start_dac(state);}static int drain_dac(struct trident_state *state, int nonblock){ DECLARE_WAITQUEUE(wait, current); struct dmabuf *dmabuf = &state->dmabuf; unsigned long flags; unsigned long tmo; int count; if (dmabuf->mapped || !dmabuf->ready) return 0; add_wait_queue(&dmabuf->wait, &wait); for (;;) { /* It seems that we have to set the current state to TASK_INTERRUPTIBLE every time to make the process really go to sleep */ current->state = TASK_INTERRUPTIBLE; spin_lock_irqsave(&state->card->lock, flags); count = dmabuf->count; spin_unlock_irqrestore(&state->card->lock, flags); if (count <= 0) break; if (signal_pending(current)) break; if (nonblock) { remove_wait_queue(&dmabuf->wait, &wait); current->state = TASK_RUNNING; return -EBUSY; } /* No matter how much data left in the buffer, we have to wait untill CSO == ESO/2 or CSO == ESO when address engine interrupts */ tmo = (dmabuf->dmasize * HZ) / dmabuf->rate; tmo >>= sample_shift[dmabuf->fmt]; if (!schedule_timeout(tmo ? tmo : 1) && tmo){ printk(KERN_ERR "trident: drain_dac, dma timeout?\n"); break; } } remove_wait_queue(&dmabuf->wait, &wait); current->state = TASK_RUNNING; if (signal_pending(current)) return -ERESTARTSYS; return 0;}/* update buffer manangement pointers, especially, dmabuf->count and dmabuf->hwptr */static void trident_update_ptr(struct trident_state *state){ struct dmabuf *dmabuf = &state->dmabuf; unsigned hwptr, swptr; int clear_cnt = 0; int diff; unsigned char silence; unsigned half_dmasize; /* update hardware pointer */ hwptr = trident_get_dma_addr(state); diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize; dmabuf->hwptr = hwptr; dmabuf->total_bytes += diff; /* error handling and process wake up for ADC */ if (dmabuf->enable == ADC_RUNNING) { if (dmabuf->mapped) { dmabuf->count -= diff; if (dmabuf->count >= (signed)dmabuf->fragsize) wake_up(&dmabuf->wait); } else { dmabuf->count += diff; if (dmabuf->count < 0 || dmabuf->count > dmabuf->dmasize) { /* buffer underrun or buffer overrun, we have no way to recover it here, just stop the machine and let the process force hwptr and swptr to sync */ __stop_adc(state); dmabuf->error++; } if (dmabuf->count < (signed)dmabuf->dmasize/2) wake_up(&dmabuf->wait); } } /* error handling and process wake up for DAC */ if (dmabuf->enable == DAC_RUNNING) { if (dmabuf->mapped) { dmabuf->count += diff; if (dmabuf->count >= (signed)dmabuf->fragsize) wake_up(&dmabuf->wait); } else { dmabuf->count -= diff; if (dmabuf->count < 0 || dmabuf->count > dmabuf->dmasize) { /* buffer underrun or buffer overrun, we have no way to recover it here, just stop the machine and let the process force hwptr and swptr to sync */ __stop_dac(state); dmabuf->error++; } else if (!dmabuf->endcleared) { swptr = dmabuf->swptr; silence = (dmabuf->fmt & TRIDENT_FMT_16BIT ? 0 : 0x80); if (dmabuf->update_flag & ALI_ADDRESS_INT_UPDATE) { /* We must clear end data of 1/2 dmabuf if needed. According to 1/2 algorithm of Address Engine Interrupt, check the validation of the data of half dmasize. */ half_dmasize = dmabuf->dmasize / 2; if ((diff = hwptr - half_dmasize) < 0 ) diff = hwptr; if ((dmabuf->count + diff) < half_dmasize) { //there is invalid data in the end of half buffer if ((clear_cnt = half_dmasize - swptr) < 0) clear_cnt += half_dmasize; //clear the invalid data memset (dmabuf->rawbuf + swptr, silence, clear_cnt); if (state->chans_num == 6) { clear_cnt = clear_cnt / 2; swptr = swptr / 2; memset (state->other_states[0]->dmabuf.rawbuf + swptr, silence, clear_cnt); memset (state->other_states[1]->dmabuf.rawbuf + swptr, silence, clear_cnt); memset (state->other_states[2]->dmabuf.rawbuf + swptr, silence, clear_cnt); memset (state->other_states[3]->dmabuf.rawbuf + swptr, silence, clear_cnt); } dmabuf->endcleared = 1; } } else if (dmabuf->count < (signed) dmabuf->fragsize) { clear_cnt = dmabuf->fragsize; if ((swptr + clear_cnt) > dmabuf->dmasize) clear_cnt = dmabuf->dmasize - swptr; memset (dmabuf->rawbuf + swptr, silence, clear_cnt); if (state->chans_num == 6) { clear_cnt = clear_cnt / 2; swptr = swptr / 2; memset (state->other_states[0]->dmabuf.rawbuf + swptr, silence, clear_cnt); memset (state->other_states[1]->dmabuf.rawbuf + swptr, silence, clear_cnt); memset (state->other_states[2]->dmabuf.rawbuf + swptr, silence, clear_cnt); memset (state->other_states[3]->dmabuf.rawbuf + swptr, silence, clear_cnt); } dmabuf->endcleared = 1; } } /* trident_update_ptr is called by interrupt handler or by process via ioctl/poll, we only wake up the waiting process when we have more than 1/2 buffer free (always true for interrupt handler) */ if (dmabuf->count < (signed)dmabuf->dmasize/2) wake_up(&dmabuf->wait); } } dmabuf->update_flag &= ~ALI_ADDRESS_INT_UPDATE;}static void trident_address_interrupt(struct trident_card *card){ int i; struct trident_state *state; /* Update the pointers for all channels we are running. */ /* FIXME: should read interrupt status only once */ for (i = 0; i < NR_HW_CH; i++) { if (trident_check_channel_interrupt(card, 63 - i)) { trident_ack_channel_interrupt(card, 63 - i); if ((state = card->states[i]) != NULL) { trident_update_ptr(state); } else { printk("trident: spurious channel irq %d.\n", 63 - i); trident_stop_voice(card, 63 - i); trident_disable_voice_irq(card, 63 - i); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -