📄 ibmcsiti.c
字号:
s->dma_adc.ossfragshift = 4;
if (s->dma_adc.ossfragshift > 15)
s->dma_adc.ossfragshift = 15;
if (s->dma_adc.ossmaxfrags < 4)
s->dma_adc.ossmaxfrags = 4;
}
if (file->f_mode & FMODE_WRITE) {
s->dma_dac.ossfragshift = val & 0xffff;
s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
if (s->dma_dac.ossfragshift < 4)
s->dma_dac.ossfragshift = 4;
if (s->dma_dac.ossfragshift > 15)
s->dma_dac.ossfragshift = 15;
if (s->dma_dac.ossmaxfrags < 4)
s->dma_dac.ossmaxfrags = 4;
}
return 0;
case SNDCTL_DSP_SUBDIVIDE:
printk(KERN_DEBUG "SNDCTL_DSP_SUBDIVIDE\n");
if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
(file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
return -EINVAL;
if (get_user(val, (int *)arg))
return -EFAULT;
if (val != 1 && val != 2 && val != 4)
return -EINVAL;
if (file->f_mode & FMODE_READ)
s->dma_adc.subdivision = val;
if (file->f_mode & FMODE_WRITE)
s->dma_dac.subdivision = val;
return 0;
case SOUND_PCM_READ_RATE:
printk(KERN_DEBUG "SNDCTL_PCM_READ_RATE\n");
return put_user(IBMCSI_DEFAULT_SAMPLING_RATE, (int *)arg);
case SOUND_PCM_READ_CHANNELS:
printk(KERN_DEBUG "SNDCTL_PCM_READ_CHANNELS\n");
return put_user(2, (int *)arg);
case SOUND_PCM_READ_BITS:
printk(KERN_DEBUG "SNDCTL_PCM_READ_BITS\n");
return put_user(16,(int *)arg);
case SOUND_PCM_WRITE_FILTER:
case SNDCTL_DSP_SETSYNCRO:
case SOUND_PCM_READ_FILTER:
printk(KERN_DEBUG "SNDCTL_xxx\n");
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static int ibmcsiti_dsp_release(struct inode *inode, struct file *file)
{
struct ibmcsiti_state *s = (struct ibmcsiti_state *)file->private_data;
VALIDATE_STATE(s);
lock_kernel(); /* FIXME: do we need this? */
if (file->f_mode & FMODE_WRITE)
drain_dac(s, file->f_flags & O_NONBLOCK);
down(&s->open_sem);
if (file->f_mode & FMODE_WRITE) {
stop_dac(s);
del_timer_sync(&s->dac_timer);
synchronize_irq();
dealloc_dmabuf(s, &s->dma_dac);
}
if (file->f_mode & FMODE_READ) {
stop_adc(s);
del_timer_sync(&s->adc_timer);
dealloc_dmabuf(s, &s->dma_adc);
}
s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
wake_up(&s->open_wait);
up(&s->open_sem);
unlock_kernel();
return 0;
}
/*****************************************************************************/
/* /dev/dsp suppport routines */
/*****************************************************************************/
/*****************************************************************************/
/* Start DAC (Digital to Analog Conversion - Audio Playback) */
/*****************************************************************************/
/* Design Notes:
*
* This routine is designed so it can be called safely as many times
* as desired whenever it is necessary to ensure that the DAC process
* is running, conditions permitting.
*
* In scatter/gather mode, we grab a descriptor (if available) and
* enqueue it at the bottom of active descriptor chain. The
* descriptors are configured to form a FIFO queue, not a loop /
* ping-pong, to avoid replaying the same chunk of data. (*)
*
* * Once a chunk of data has been processed, we need to remove it
* from the queue/loop. We have to do such housekeeping on timer
* interrupt; we cannot use DMA EOT interrupt, because the PPC 405 DMA
* core traditionally halts scatter / gather DMA whenever it raises an
* interrupt, until the interrupt is cleared. Since timer interrupts
* are the lowest priority under Linux, they can be masked out under
* stress conditions. Therefore, if we use a looping list / ping-pong
* descriptors, we may not be able to remove an already processed
* chunk of data in time, causing it to be played back again and again
* as the DMAC loops through the list.
*
* When an underrun has occurred, the DMAC will be reading dummy
* neutral data off the last (anchor) descriptor, which loops on
* itself. In this case, we disable the CSI and halt the DMAC
* temporarily, then enqueue the new descriptor and restart the
* scatter/gather process afterwards.
*
* Whether we have reached the anchor descriptor, is determined by
* reading the DMA source address and comparing it with the anchor's
* dummy data address. Since the desriptor address field in the DMAC
* points to the NEXT descriptor to be used, we cannot use it to
* determine whether we are at the anchor (underrun) or just before
* anchor (normal case after the first pass).
*
* To avoid possible race conditions at the descriptor boundary
* (i.e. DMAC going off to the next descriptor after we have read the
* DMAC values), if the current descriptor has only a couple samples
* to go, we wait in a loop until things stabilize.
*/
static void start_dac(struct ibmcsiti_state *s)
{
unsigned long flags;
struct dma_sgdt *dt;
unsigned int count;
DBG(printk("start_dac_sg state %8.8x count %d ready %d sgcnt %d hwptr %d\n",
s->state, s->dma_dac.count, s->dma_dac.ready,
s->dma_dac.sg_count, s->dma_dac.hwptr));
/* Spinlock held from here to end */
spin_lock_irqsave(&s->lock, flags);
if (s->dma_dac.sg_count) { /* There is data that needs to be posted */
if (s->dma_dac.hwptr >= s->dma_dac.dmasize) {/* Sanity check */
s->dma_dac.hwptr = 0;
}
count = s->dma_dac.sg_count;
if (s->dma_dac.hwptr + count > s->dma_dac.dmasize) {
count = s->dma_dac.dmasize - s->dma_dac.hwptr;
}
/* 4 bytes per sample, DMA count limited to 64K */
if (count > 65536*4)
count = 65536*4 ;
if (count > 4) { /* Got enough data to do DMA */
/* Get descriptor */
dt = get_sgdt(&(s->dac_free_sgdt_q));
if (dt) { /* Descriptor available */
DBG(printk("new dt %8.8x ",dt));
/* FIXME: Break data into
* fragments. Easiest way for now! */
dt->ccw = (unsigned int) (IBMCSI_TXDMA_GO_NOI);
dt->ctrl = 0x80000000 | ((count/4)&0xffff);
dt->srcP = virt_to_phys(s->dma_dac.rawbuf
+ s->dma_dac.hwptr);
dt->destP = CSI0_TBUF;
dt->nextP = virt_to_phys(s->dac_sgdt_lastV);
asm volatile ("sync");
DBG(printk("TX SG %8.8x, last %8.8x\n",
mfdcr(TX_SG),s->dac_sgdt_lastV));
DBG(printk("TX SA %8.8x, count %d \n",
mfdcr(IBMCSI_TXDMA_SA),
mfdcr(IBMCSI_TXDMA_CT)));
DBG(printk("last->phys %8.8lx\n",
s->dac_sgdt_lastV->srcP));
if (mfdcr(DCRN_ASGC) & TX_SG_ENABLE) {
unsigned int current_sa = mfdcr(IBMCSI_TXDMA_SA);
if (current_sa == s->dac_sgdt_lastV->srcP) {
s->dac_active_sgdt_q = dt;
stop_dac(s);
udelay(100);
} else if (mfdcr(IBMCSI_TXDMA_CT) <= 2) {
int timeout = 0;
/* No time to safely
* reprogram this
* pass. Wait until
* this pass
* completes. */
while (current_sa == mfdcr(IBMCSI_TXDMA_SA)) {
timeout++;
if (timeout > 1000000) {
printk("ibmcsi: DMA timeout!!\n");
break;
}
};
if (mfdcr(IBMCSI_TXDMA_SA) == s->dac_sgdt_lastV->srcP) {
stop_dac(s);
udelay(100);
}
}
} else {
/* This gets programmed only if not active */
s->dac_active_sgdt_q = dt;
}
/* Insert new dt between last anchor
* and previous data */
dt->nextV = s->dac_sgdt_lastV;
dt->prevV = s->dac_sgdt_lastV->prevV;
if (dt->prevV) {
dt->prevV->nextV = dt;
dt->prevV->nextP = virt_to_phys(dt);
}
s->dac_sgdt_lastV->prevV = dt;
if (mfdcr(DCRN_ASGC) & TX_SG_ENABLE) {
if (mfdcr(TX_SG) == virt_to_phys(s->dac_sgdt_lastV) ) {
/* Next descriptor is
* the bottom
* anchor */
mtdcr(TX_SG, virt_to_phys(dt));
/* Replace the next
* descriptor address
* with myself. Not
* sure if this
* works */
}
}
asm volatile ("sync");
/* Update count and pointer */
s->dma_dac.sg_count -= count;
s->dma_dac.hwptr += count;
if (s->dma_dac.hwptr >= s->dma_dac.dmasize)
s->dma_dac.hwptr = 0;
} else { /* End if descriptor available */
printk("0dt\n");
}
/* If DMA is not already running, kick it. */
if (! (mfdcr(DCRN_ASGC) & TX_SG_ENABLE)) {
unsigned int current_csi_er
= IBMCSI_READ(CSI0_ER);
s->state |= IBMCSI_DAC_RUNNING;
ibmcsi_stop_csi_sync(); /* FIXME: add timeout */
IBMCSI_WRITE(CSI0_ER,
IBMCSI_READ(CSI0_ER)
& ~CSI_ER_TXEN);
/* Clear Terminal Count etc. */
mtdcr(IBMCSI_DMA_SR,
DCRN_DMA_SR_ALL(IBMCSI_TXDMA));
/* Clear CSI underrun */
IBMCSI_WRITE(CSI0_SR, CSI_SR_TOD);
/* Write address of the first
* scatter/gather descriptor table */
mtdcr(TX_SG,
virt_to_phys(s->dac_active_sgdt_q));
/* Enable scatter/gather */
mtdcr(DCRN_ASGC, mfdcr(DCRN_ASGC)
| TX_SG_ENABLE | TX_SG_MASK);
/* Set up CSI config */
IBMCSI_WRITE(CSI0_CFG, IBMCSI_TI_CFG);
asm volatile ("sync");
/* Start CSI, enable slot 0 and Tx. */
IBMCSI_WRITE(CSI0_ER,
current_csi_er | CSI_ER_ESLOT(0)
| CSI_ER_ESLOT(1)
| CSI_ER_TXEN | CSI_ER_ECSI);
udelay(100);
DBG(printk("CSI SA %8.8x, CT %8.8x",
mfdcr(IBMCSI_TXDMA_SA),
mfdcr(IBMCSI_TXDMA_CT)));
DBG(printk("%2.2x %8.8x\n",
IBMCSI_READ(CSI0_SR),
mfdcr(DCRN_DMASR)));
}
} /* End count not zero */
}
spin_unlock_irqrestore(&s->lock, flags);
}
/************************************************************************/
/* Start ADC (Analog to Digital Conversion - Audio Capture) */
/************************************************************************/
/* Design Notes:
*
* Like its companion start_dac(), this routine is designed to be
* called as many times as desired whenever it is necessary to ensure
* that the ADC process is running, conditions permitting.
*
* Unlike DAC, ADC uses a ring descriptor scheme because we can simply
* let the DMAC overwrite stale data when an overrun occurs. All the
* descriptors in the ADC chain point to the same buffer (the entire
* DMA buffer). In theory it might be enough to use just one
* descriptor that loops on itself, but in this implementation we will
* use redundant multiple descriptors, so that we can detect an
* overrun condition trivially, by comparing the current descriptor
* address with the previous one's. (This is done in the timer int
* handler.) So long as timer interrupts are not masked out for the
* duration of the entire descriptor loop (in which case we are most
* likely in much deeeper trouble than an overrun anyway), we should
* be able to detect an overrun reliably in this manner.
*
* If we want to be truly paranoid, and/or have faith in kernel
* timekeeping functions and incoming audio data rate, we could also
* use timebase and fewer descriptors to do the same; you would still
* have to worry about things like * timebase accuracy (e.g. tb_to_us
* doesn't work with slow timebase clock).
*/
static void start_adc(struct ibmcsiti_state *s)
{
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
if (! (mfdcr(DCRN_ASGC) & RX_SG_ENABLE)) { /* In hardware we trust */
unsigned int current_csi_er = IBMCSI_READ(CSI0_ER);
s->state |= IBMCSI_ADC_RUNNING;
ibmcsi_stop_csi_sync(); /* FIXME: add timeout */
IBMCSI_WRITE(CSI0_ER, IBMCSI_READ(CSI0_ER) & ~CSI_ER_RXEN );
/* Clear Terminal Count etc. for Rx channel */
mtdcr(IBMCSI_DMA_SR, DCRN_DMA_SR_ALL(IBMCSI_RXDMA));
/* Clear the CSI status flag for Rx */
IBMCSI_WRITE(CSI0_SR, CSI_SR_ROD);
/* Write the first scatter/gather descriptor address */
mtdcr(RX_SG, virt_to_phys(s->adc_active_sgdt_q));
s->adc_hw_prev_sgdt = s->adc_active_sgdt_q;
s->adc_sw_prev_sgdt = s->adc_active_sgdt_q;
s->dma_adc.hwptr = 0; /* We will never halt ADC so we can init here */
s->dma_adc.swptr = 0; /* Otherwise we will have to
* figure out how to restart. */
/* Enable scatter/gather DMA */
mtdcr(DCRN_ASGC, mfdcr(DCRN_ASGC) | RX_SG_ENABLE | RX_SG_MASK);
/* Set up CSI config */
IBMCSI_WRITE(CSI0_CFG, IBMCSI_TI_CFG);
asm volatile("sync");
/* Start CSI, enable slot 0 and Rx */
IBMCSI_WRITE(CSI0_ER, current_csi_er
| CSI_ER_ESLOT(0) | CSI_ER_ESLOT(1)
| CSI_ER_RXEN | CSI_ER_ECSI );
udelay(100);
}
spin_unlock_irqrestore(&s->lock, flags);
}
static inline void stop_dac(struct ibmcsiti_state *s)
{
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
/* Stop CSI synchronously - let transfer complete */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -