📄 cs46xx.c
字号:
struct dmabuf *dmabuf = &state->dmabuf; struct page *map, *mapend; if (dmabuf->rawbuf) { // Undo prog_dmabuf()'s marking the pages as reserved mapend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); for (map = virt_to_page(dmabuf->rawbuf); map <= mapend; map++) cs4x_mem_map_unreserve(map); free_dmabuf(state->card, dmabuf); } if (dmabuf->tmpbuff) { // Undo prog_dmabuf()'s marking the pages as reserved mapend = virt_to_page(dmabuf->tmpbuff + (PAGE_SIZE << dmabuf->buforder_tmpbuff) - 1); for (map = virt_to_page(dmabuf->tmpbuff); map <= mapend; map++) cs4x_mem_map_unreserve(map); free_dmabuf2(state->card, dmabuf); } dmabuf->rawbuf = NULL; dmabuf->tmpbuff = NULL; dmabuf->mapped = dmabuf->ready = 0; dmabuf->SGok = 0;}static int __prog_dmabuf(struct cs_state *state){ struct dmabuf *dmabuf = &state->dmabuf; unsigned long flags; unsigned long allocated_pages, allocated_bytes; unsigned long tmp1, tmp2, fmt=0; unsigned long *ptmp = (unsigned long *) dmabuf->pbuf; unsigned long SGarray[9], nSGpages=0; int ret; CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()+ \n"));/* * check for CAPTURE and use only non-sg for initial release */ if(dmabuf->type == CS_TYPE_ADC) { CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf() ADC\n")); /* * add in non-sg support for capture. */ spin_lock_irqsave(&state->card->lock, flags); /* add code to reset the rawbuf memory. TRW */ resync_dma_ptrs(state); dmabuf->total_bytes = dmabuf->blocks = 0; dmabuf->count = dmabuf->error = dmabuf->underrun = 0; dmabuf->SGok = 0; spin_unlock_irqrestore(&state->card->lock, flags); /* allocate DMA buffer if not allocated yet */ if (!dmabuf->rawbuf || !dmabuf->tmpbuff) if ((ret = alloc_dmabuf(state))) return ret; /* * static image only supports 16Bit signed, stereo - hard code fmt */ fmt = CS_FMT_16BIT | CS_FMT_STEREO; dmabuf->numfrag = 2; dmabuf->fragsize = 2048; dmabuf->fragsamples = 2048 >> sample_shift[fmt]; dmabuf->dmasize = 4096; dmabuf->fragshift = 11; memset(dmabuf->rawbuf, (fmt & CS_FMT_16BIT) ? 0 : 0x80, dmabuf->dmasize); memset(dmabuf->tmpbuff, (fmt & CS_FMT_16BIT) ? 0 : 0x80, PAGE_SIZE<<dmabuf->buforder_tmpbuff); /* * Now set up the ring */ spin_lock_irqsave(&state->card->lock, flags); cs_rec_setup(state); spin_unlock_irqrestore(&state->card->lock, flags); /* set the ready flag for the dma buffer */ dmabuf->ready = 1; CS_DBGOUT(CS_PARMS, 4, printk( "cs46xx: prog_dmabuf(): CAPTURE rate=%d fmt=0x%x numfrag=%d " "fragsize=%d dmasize=%d\n", dmabuf->rate, dmabuf->fmt, dmabuf->numfrag, dmabuf->fragsize, dmabuf->dmasize) ); CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- 0 \n")); return 0; } else if (dmabuf->type == CS_TYPE_DAC) { /* * Must be DAC */ CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf() DAC\n")); spin_lock_irqsave(&state->card->lock, flags); resync_dma_ptrs(state); dmabuf->total_bytes = dmabuf->blocks = 0; dmabuf->count = dmabuf->error = dmabuf->underrun = 0; dmabuf->SGok = 0; spin_unlock_irqrestore(&state->card->lock, flags); /* allocate DMA buffer if not allocated yet */ if (!dmabuf->rawbuf) if ((ret = alloc_dmabuf(state))) return ret; allocated_pages = 1 << dmabuf->buforder; allocated_bytes = allocated_pages*PAGE_SIZE; if(allocated_pages < 2) { CS_DBGOUT(CS_FUNCTION, 4, printk( "cs46xx: prog_dmabuf() Error: allocated_pages too small (%d)\n", (unsigned)allocated_pages)); return -ENOMEM; } /* Use all the pages allocated, fragsize 4k. */ /* Use 'pbuf' for S/G page map table. */ dmabuf->SGok = 1; /* Use S/G. */ nSGpages = allocated_bytes/4096; /* S/G pages always 4k. */ /* Set up S/G variables. */ *ptmp = virt_to_bus(dmabuf->rawbuf); *(ptmp+1) = 0x00000008; for(tmp1= 1; tmp1 < nSGpages; tmp1++) { *(ptmp+2*tmp1) = virt_to_bus( (dmabuf->rawbuf)+4096*tmp1); if( tmp1 == nSGpages-1) tmp2 = 0xbfff0000; else tmp2 = 0x80000000+8*(tmp1+1); *(ptmp+2*tmp1+1) = tmp2; } SGarray[0] = 0x82c0200d; SGarray[1] = 0xffff0000; SGarray[2] = *ptmp; SGarray[3] = 0x00010600; SGarray[4] = *(ptmp+2); SGarray[5] = 0x80000010; SGarray[6] = *ptmp; SGarray[7] = *(ptmp+2); SGarray[8] = (virt_to_bus(dmabuf->pbuf) & 0xffff000) | 0x10; if (dmabuf->SGok) { dmabuf->numfrag = nSGpages; dmabuf->fragsize = 4096; dmabuf->fragsamples = 4096 >> sample_shift[dmabuf->fmt]; dmabuf->fragshift = 12; dmabuf->dmasize = dmabuf->numfrag*4096; } else { SGarray[0] = 0xf2c0000f; SGarray[1] = 0x00000200; SGarray[2] = 0; SGarray[3] = 0x00010600; SGarray[4]=SGarray[5]=SGarray[6]=SGarray[7]=SGarray[8] = 0; dmabuf->numfrag = 2; dmabuf->fragsize = 2048; dmabuf->fragsamples = 2048 >> sample_shift[dmabuf->fmt]; dmabuf->dmasize = 4096; dmabuf->fragshift = 11; } for(tmp1 = 0; tmp1 < sizeof(SGarray)/4; tmp1++) cs461x_poke( state->card, BA1_PDTC+tmp1*4, SGarray[tmp1]); memset(dmabuf->rawbuf, (dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80, dmabuf->dmasize); /* * Now set up the ring */ spin_lock_irqsave(&state->card->lock, flags); cs_play_setup(state); spin_unlock_irqrestore(&state->card->lock, flags); /* set the ready flag for the dma buffer */ dmabuf->ready = 1; CS_DBGOUT(CS_PARMS, 4, printk( "cs46xx: prog_dmabuf(): PLAYBACK rate=%d fmt=0x%x numfrag=%d " "fragsize=%d dmasize=%d\n", dmabuf->rate, dmabuf->fmt, dmabuf->numfrag, dmabuf->fragsize, dmabuf->dmasize) ); CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- \n")); return 0; } else { CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- Invalid Type %d\n", dmabuf->type)); } return 1;}static int prog_dmabuf(struct cs_state *state){ int ret; down(&state->sem); ret = __prog_dmabuf(state); up(&state->sem); return ret;}static void cs_clear_tail(struct cs_state *state){}static int drain_dac(struct cs_state *state, int nonblock){ DECLARE_WAITQUEUE(wait, current); struct dmabuf *dmabuf = &state->dmabuf; struct cs_card *card=state->card; unsigned long flags; unsigned long tmo; int count; CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: drain_dac()+ \n")); if (dmabuf->mapped || !dmabuf->ready) { CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: drain_dac()- 0, not ready\n")); return 0; } add_wait_queue(&dmabuf->wait, &wait); for (;;) { /* It seems that we have to set the current state to TASK_INTERRUPTIBLE every time to make the process really go to sleep */ current->state = TASK_INTERRUPTIBLE; spin_lock_irqsave(&state->card->lock, flags); count = dmabuf->count; spin_unlock_irqrestore(&state->card->lock, flags); if (count <= 0) break; if (signal_pending(current)) break; if (nonblock) { remove_wait_queue(&dmabuf->wait, &wait); current->state = TASK_RUNNING; return -EBUSY; } tmo = (dmabuf->dmasize * HZ) / dmabuf->rate; tmo >>= sample_shift[dmabuf->fmt]; tmo += (2048*HZ)/dmabuf->rate; if (!schedule_timeout(tmo ? tmo : 1) && tmo){ printk(KERN_ERR "cs46xx: drain_dac, dma timeout? %d\n", count); break; } } remove_wait_queue(&dmabuf->wait, &wait); current->state = TASK_RUNNING; if (signal_pending(current)) { CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: drain_dac()- -ERESTARTSYS\n")); /* * set to silence and let that clear the fifos. */ cs461x_clear_serial_FIFOs(card, CS_TYPE_DAC); return -ERESTARTSYS; } CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: drain_dac()- 0\n")); return 0;}/* update buffer manangement pointers, especially, dmabuf->count and dmabuf->hwptr */static void cs_update_ptr(struct cs_card *card, int wake){ struct cs_state *state; struct dmabuf *dmabuf; unsigned hwptr; int diff; /* error handling and process wake up for ADC */ state = card->states[0]; if(state) { dmabuf = &state->dmabuf; if (dmabuf->enable & ADC_RUNNING) { /* update hardware pointer */ hwptr = cs_get_dma_addr(state); diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize; CS_DBGOUT(CS_PARMS, 9, printk( "cs46xx: cs_update_ptr()+ ADC hwptr=%d diff=%d\n", hwptr,diff) ); dmabuf->hwptr = hwptr; dmabuf->total_bytes += diff; dmabuf->count += diff; if (dmabuf->count > dmabuf->dmasize) dmabuf->count = dmabuf->dmasize; if(dmabuf->mapped) { if (wake && dmabuf->count >= (signed)dmabuf->fragsize) wake_up(&dmabuf->wait); } else { if (wake && dmabuf->count > 0) wake_up(&dmabuf->wait); } } }/* * Now the DAC */ state = card->states[1]; if(state) { dmabuf = &state->dmabuf; /* error handling and process wake up for DAC */ if (dmabuf->enable & DAC_RUNNING) { /* update hardware pointer */ hwptr = cs_get_dma_addr(state); diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize; CS_DBGOUT(CS_PARMS, 9, printk( "cs46xx: cs_update_ptr()+ DAC hwptr=%d diff=%d\n", hwptr,diff) ); dmabuf->hwptr = hwptr; dmabuf->total_bytes += diff; if (dmabuf->mapped) { dmabuf->count += diff; if (wake && dmabuf->count >= (signed)dmabuf->fragsize) wake_up(&dmabuf->wait); /* * other drivers use fragsize, but don't see any sense * in that, since dmasize is the buffer asked for * via mmap. */ if( dmabuf->count > dmabuf->dmasize) dmabuf->count &= dmabuf->dmasize-1; } else { dmabuf->count -= diff; /* * backfill with silence and clear out the last * "diff" number of bytes. */ if(hwptr >= diff) { memset(dmabuf->rawbuf + hwptr - diff, (dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80, diff); } else { memset(dmabuf->rawbuf, (dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80, (unsigned)hwptr); memset((void *)((unsigned)dmabuf->rawbuf + dmabuf->dmasize + hwptr - diff), (dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80, diff - hwptr); } if (dmabuf->count < 0 || dmabuf->count > dmabuf->dmasize) { CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO "cs46xx: ERROR DAC count<0 or count > dmasize (%d)\n", dmabuf->count)); /* * buffer underrun or buffer overrun, reset the * count of bytes written back to 0. */ if(dmabuf->count < 0) dmabuf->underrun=1; dmabuf->count = 0; dmabuf->error++; } if (wake && dmabuf->count < (signed)dmabuf->dmasize/2) wake_up(&dmabuf->wait); } } }}/* hold spinlock for the following! */static void cs_handle_midi(struct cs_card *card){ unsigned char ch; int wake; unsigned temp1; wake = 0; while (!(cs461x_peekBA0(card, BA0_MIDSR) & MIDSR_RBE)) { ch = cs461x_peekBA0(card, BA0_MIDRP); if (card->midi.icnt < CS_MIDIINBUF) { card->midi.ibuf[card->midi.iwr] = ch; card->midi.iwr = (card->midi.iwr + 1) % CS_MIDIINBUF; card->midi.icnt++; } wake = 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -