📄 cx23885-core.c
字号:
n = cx23885_risc_decode(risc); for (j = 1; j < n; j++) { risc = cx_read(ch->ctrl_start + 4 * (i + j)); printk("%s: iq %x: 0x%08x [ arg #%d ]\n", dev->name, i+j, risc, j); } } printk("%s: fifo: 0x%08x -> 0x%x\n", dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size); printk("%s: ctrl: 0x%08x -> 0x%x\n", dev->name, ch->ctrl_start, ch->ctrl_start + 6*16); printk("%s: ptr1_reg: 0x%08x\n", dev->name, cx_read(ch->ptr1_reg)); printk("%s: ptr2_reg: 0x%08x\n", dev->name, cx_read(ch->ptr2_reg)); printk("%s: cnt1_reg: 0x%08x\n", dev->name, cx_read(ch->cnt1_reg)); printk("%s: cnt2_reg: 0x%08x\n", dev->name, cx_read(ch->cnt2_reg));}void cx23885_risc_disasm(struct cx23885_tsport *port, struct btcx_riscmem *risc){ struct cx23885_dev *dev = port->dev; unsigned int i, j, n; printk("%s: risc disasm: %p [dma=0x%08lx]\n", dev->name, risc->cpu, (unsigned long)risc->dma); for (i = 0; i < (risc->size >> 2); i += n) { printk("%s: %04d: ", dev->name, i); n = cx23885_risc_decode(risc->cpu[i]); for (j = 1; j < n; j++) printk("%s: %04d: 0x%08x [ arg #%d ]\n", dev->name, i + j, risc->cpu[i + j], j); if (risc->cpu[i] == RISC_JUMP) break; }}void cx23885_shutdown(struct cx23885_dev *dev){ /* disable RISC controller */ cx_write(DEV_CNTRL2, 0); /* Disable all IR activity */ cx_write(IR_CNTRL_REG, 0); /* Disable Video A/B activity */ cx_write(VID_A_DMA_CTL, 0); cx_write(VID_B_DMA_CTL, 0); cx_write(VID_C_DMA_CTL, 0); /* Disable Audio activity */ cx_write(AUD_INT_DMA_CTL, 0); cx_write(AUD_EXT_DMA_CTL, 0); /* Disable Serial port */ cx_write(UART_CTL, 0); /* Disable Interrupts */ cx_write(PCI_INT_MSK, 0); cx_write(VID_A_INT_MSK, 0); cx_write(VID_B_INT_MSK, 0); cx_write(VID_C_INT_MSK, 0); cx_write(AUDIO_INT_INT_MSK, 0); cx_write(AUDIO_EXT_INT_MSK, 0);}void cx23885_reset(struct cx23885_dev *dev){ dprintk(1, "%s()\n", __FUNCTION__); cx23885_shutdown(dev); cx_write(PCI_INT_STAT, 0xffffffff); cx_write(VID_A_INT_STAT, 0xffffffff); cx_write(VID_B_INT_STAT, 0xffffffff); cx_write(VID_C_INT_STAT, 0xffffffff); cx_write(AUDIO_INT_INT_STAT, 0xffffffff); cx_write(AUDIO_EXT_INT_STAT, 0xffffffff); cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000); mdelay(100); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH01 ], 188*4, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH02 ], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH03 ], 188*4, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH04 ], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH05 ], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH06 ], 188*4, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH07 ], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH08 ], 128, 0); cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH09 ], 128, 0); cx23885_gpio_setup(dev);}static int cx23885_pci_quirks(struct cx23885_dev *dev){ dprintk(1, "%s()\n", __FUNCTION__); /* The cx23885 bridge has a weird bug which causes NMI to be asserted * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not * occur on the cx23887 bridge. */ if(dev->bridge == CX23885_BRIDGE_885) cx_clear(RDR_TLCTL0, 1 << 4); return 0;}static int get_resources(struct cx23885_dev *dev){ if (request_mem_region(pci_resource_start(dev->pci,0), pci_resource_len(dev->pci,0), dev->name)) return 0; printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n", dev->name, (unsigned long long)pci_resource_start(dev->pci,0)); return -EBUSY;}static void cx23885_timeout(unsigned long data);int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc, u32 reg, u32 mask, u32 value);static int cx23885_init_tsport(struct cx23885_dev *dev, struct cx23885_tsport *port, int portno){ dprintk(1, "%s(portno=%d)\n", __FUNCTION__, portno); /* Transport bus init dma queue - Common settings */ port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */ port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */ spin_lock_init(&port->slock); port->dev = dev; port->nr = portno; INIT_LIST_HEAD(&port->mpegq.active); INIT_LIST_HEAD(&port->mpegq.queued); port->mpegq.timeout.function = cx23885_timeout; port->mpegq.timeout.data = (unsigned long)port; init_timer(&port->mpegq.timeout); switch(portno) { case 1: port->reg_gpcnt = VID_B_GPCNT; port->reg_gpcnt_ctl = VID_B_GPCNT_CTL; port->reg_dma_ctl = VID_B_DMA_CTL; port->reg_lngth = VID_B_LNGTH; port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL; port->reg_gen_ctrl = VID_B_GEN_CTL; port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS; port->reg_sop_status = VID_B_SOP_STATUS; port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT; port->reg_vld_misc = VID_B_VLD_MISC; port->reg_ts_clk_en = VID_B_TS_CLK_EN; port->reg_src_sel = VID_B_SRC_SEL; port->reg_ts_int_msk = VID_B_INT_MSK; port->reg_ts_int_stat = VID_B_INT_STAT; port->sram_chno = SRAM_CH03; /* VID_B */ port->pci_irqmask = 0x02; /* VID_B bit1 */ break; case 2: port->reg_gpcnt = VID_C_GPCNT; port->reg_gpcnt_ctl = VID_C_GPCNT_CTL; port->reg_dma_ctl = VID_C_DMA_CTL; port->reg_lngth = VID_C_LNGTH; port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL; port->reg_gen_ctrl = VID_C_GEN_CTL; port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS; port->reg_sop_status = VID_C_SOP_STATUS; port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT; port->reg_vld_misc = VID_C_VLD_MISC; port->reg_ts_clk_en = VID_C_TS_CLK_EN; port->reg_src_sel = 0; port->reg_ts_int_msk = VID_C_INT_MSK; port->reg_ts_int_stat = VID_C_INT_STAT; port->sram_chno = SRAM_CH06; /* VID_C */ port->pci_irqmask = 0x04; /* VID_C bit2 */ break; default: BUG(); } cx23885_risc_stopper(dev->pci, &port->mpegq.stopper, port->reg_dma_ctl, port->dma_ctl_val, 0x00); return 0;}static int cx23885_dev_setup(struct cx23885_dev *dev){ int i; mutex_init(&dev->lock); atomic_inc(&dev->refcount); dev->nr = cx23885_devcount++; sprintf(dev->name, "cx23885[%d]", dev->nr); mutex_lock(&devlist); list_add_tail(&dev->devlist, &cx23885_devlist); mutex_unlock(&devlist); /* Configure the internal memory */ if(dev->pci->device == 0x8880) { dev->bridge = CX23885_BRIDGE_887; dev->sram_channels = cx23887_sram_channels; } else if(dev->pci->device == 0x8852) { dev->bridge = CX23885_BRIDGE_885; dev->sram_channels = cx23885_sram_channels; } else BUG(); dprintk(1, "%s() Memory configured for PCIe bridge type %d\n", __FUNCTION__, dev->bridge); /* board config */ dev->board = UNSET; if (card[dev->nr] < cx23885_bcount) dev->board = card[dev->nr]; for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++) if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor && dev->pci->subsystem_device == cx23885_subids[i].subdevice) dev->board = cx23885_subids[i].card; if (UNSET == dev->board) { dev->board = CX23885_BOARD_UNKNOWN; cx23885_card_list(dev); } dev->pci_bus = dev->pci->bus->number; dev->pci_slot = PCI_SLOT(dev->pci->devfn); dev->pci_irqmask = 0x001f00; /* External Master 1 Bus */ dev->i2c_bus[0].nr = 0; dev->i2c_bus[0].dev = dev; dev->i2c_bus[0].reg_stat = I2C1_STAT; dev->i2c_bus[0].reg_ctrl = I2C1_CTRL; dev->i2c_bus[0].reg_addr = I2C1_ADDR; dev->i2c_bus[0].reg_rdata = I2C1_RDATA; dev->i2c_bus[0].reg_wdata = I2C1_WDATA; dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */ /* External Master 2 Bus */ dev->i2c_bus[1].nr = 1; dev->i2c_bus[1].dev = dev; dev->i2c_bus[1].reg_stat = I2C2_STAT; dev->i2c_bus[1].reg_ctrl = I2C2_CTRL; dev->i2c_bus[1].reg_addr = I2C2_ADDR; dev->i2c_bus[1].reg_rdata = I2C2_RDATA; dev->i2c_bus[1].reg_wdata = I2C2_WDATA; dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */ /* Internal Master 3 Bus */ dev->i2c_bus[2].nr = 2; dev->i2c_bus[2].dev = dev; dev->i2c_bus[2].reg_stat = I2C3_STAT; dev->i2c_bus[2].reg_ctrl = I2C3_CTRL; dev->i2c_bus[2].reg_addr = I2C3_ADDR; dev->i2c_bus[2].reg_rdata = I2C3_RDATA; dev->i2c_bus[2].reg_wdata = I2C3_WDATA; dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */ if(cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) cx23885_init_tsport(dev, &dev->ts1, 1); if(cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) cx23885_init_tsport(dev, &dev->ts2, 2); if (get_resources(dev) < 0) { printk(KERN_ERR "CORE %s No more PCIe resources for " "subsystem: %04x:%04x\n", dev->name, dev->pci->subsystem_vendor, dev->pci->subsystem_device); cx23885_devcount--; return -ENODEV; } /* PCIe stuff */ dev->lmmio = ioremap(pci_resource_start(dev->pci,0), pci_resource_len(dev->pci,0)); dev->bmmio = (u8 __iomem *)dev->lmmio; printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", dev->name, dev->pci->subsystem_vendor, dev->pci->subsystem_device, cx23885_boards[dev->board].name, dev->board, card[dev->nr] == dev->board ? "insmod option" : "autodetected"); cx23885_pci_quirks(dev); /* init hardware */ cx23885_reset(dev); cx23885_i2c_register(&dev->i2c_bus[0]); cx23885_i2c_register(&dev->i2c_bus[1]); cx23885_i2c_register(&dev->i2c_bus[2]); cx23885_call_i2c_clients (&dev->i2c_bus[0], TUNER_SET_STANDBY, NULL); cx23885_card_setup(dev); cx23885_ir_init(dev); if(cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { if (cx23885_dvb_register(&dev->ts1) < 0) { printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n", __FUNCTION__); } } if(cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { if (cx23885_dvb_register(&dev->ts2) < 0) { printk(KERN_ERR "%s() Failed to register dvb adapters on VID_C\n", __FUNCTION__); } } return 0;}void cx23885_dev_unregister(struct cx23885_dev *dev){ release_mem_region(pci_resource_start(dev->pci,0), pci_resource_len(dev->pci,0)); if (!atomic_dec_and_test(&dev->refcount)) return; if(cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) cx23885_dvb_unregister(&dev->ts1); if(cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) cx23885_dvb_unregister(&dev->ts2); cx23885_i2c_unregister(&dev->i2c_bus[2]); cx23885_i2c_unregister(&dev->i2c_bus[1]); cx23885_i2c_unregister(&dev->i2c_bus[0]); iounmap(dev->lmmio);}static u32* cx23885_risc_field(u32 *rp, struct scatterlist *sglist, unsigned int offset, u32 sync_line, unsigned int bpl, unsigned int padding, unsigned int lines){ struct scatterlist *sg; unsigned int line, todo; /* sync instruction */ if (sync_line != NO_SYNC_LINE) *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); /* scan lines */ sg = sglist; for (line = 0; line < lines; line++) { while (offset && offset >= sg_dma_len(sg)) { offset -= sg_dma_len(sg); sg++; } if (bpl <= sg_dma_len(sg)-offset) { /* fits into current chunk */ *(rp++)=cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl); *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); *(rp++)=cpu_to_le32(0); /* bits 63-32 */ offset+=bpl; } else { /* scanline needs to be split */ todo = bpl; *(rp++)=cpu_to_le32(RISC_WRITE|RISC_SOL| (sg_dma_len(sg)-offset)); *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); *(rp++)=cpu_to_le32(0); /* bits 63-32 */ todo -= (sg_dma_len(sg)-offset); offset = 0; sg++; while (todo > sg_dma_len(sg)) { *(rp++)=cpu_to_le32(RISC_WRITE| sg_dma_len(sg)); *(rp++)=cpu_to_le32(sg_dma_address(sg)); *(rp++)=cpu_to_le32(0); /* bits 63-32 */ todo -= sg_dma_len(sg); sg++; } *(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo); *(rp++)=cpu_to_le32(sg_dma_address(sg)); *(rp++)=cpu_to_le32(0); /* bits 63-32 */ offset += todo; } offset += padding; } return rp;}int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc, struct scatterlist *sglist, unsigned int top_offset, unsigned int bottom_offset, unsigned int bpl, unsigned int padding, unsigned int lines){ u32 instructions, fields; u32 *rp; int rc; fields = 0; if (UNSET != top_offset) fields++; if (UNSET != bottom_offset) fields++; /* estimate risc mem: worst case is one write per page border + one write per scan line + syncs + jump (all 2 dwords). Padding can cause next bpl to start close to a page border. First DMA region may be smaller than PAGE_SIZE */ /* write and jump need and extra dword */ instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines); instructions += 2; if ((rc = btcx_riscmem_alloc(pci,risc,instructions*12)) < 0) return rc; /* write risc instructions */ rp = risc->cpu; if (UNSET != top_offset) rp = cx23885_risc_field(rp, sglist, top_offset, 0, bpl, padding, lines); if (UNSET != bottom_offset) rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, bpl, padding, lines); /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size); return 0;}int cx23885_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc, struct scatterlist *sglist, unsigned int bpl, unsigned int lines){ u32 instructions; u32 *rp; int rc; /* estimate risc mem: worst case is one write per page border + one write per scan line + syncs + jump (all 2 dwords). Here there is no padding and no sync. First DMA region may be smaller than PAGE_SIZE */ /* Jump and write need an extra dword */ instructions = 1 + (bpl * lines) / PAGE_SIZE + lines; instructions += 1; if ((rc = btcx_riscmem_alloc(pci,risc,instructions*12)) < 0) return rc; /* write risc instructions */ rp = risc->cpu; rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines); /* save pointer to jmp instruction address */ risc->jmp = rp; BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size); return 0;}int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc, u32 reg, u32 mask, u32 value){ u32 *rp; int rc; if ((rc = btcx_riscmem_alloc(pci, risc, 4*16)) < 0) return rc; /* write risc instructions */ rp = risc->cpu; *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2); *(rp++) = cpu_to_le32(reg); *(rp++) = cpu_to_le32(value); *(rp++) = cpu_to_le32(mask); *(rp++) = cpu_to_le32(RISC_JUMP); *(rp++) = cpu_to_le32(risc->dma); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ return 0;}void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf){ struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb, 0, 0); videobuf_dma_unmap(q, dma); videobuf_dma_free(dma); btcx_riscmem_free((struct pci_dev *)q->dev, &buf->risc); buf->vb.state = STATE_NEEDS_INIT;}static int cx23885_start_dma(struct cx23885_tsport *port, struct cx23885_dmaqueue *q,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -