📄 pcilynx.c
字号:
pcl.buffer[0].pointer = d->header_dma; pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size; pcl.buffer[1].pointer = d->data_dma; switch (packet->type) { case hpsb_async: pcl.buffer[0].control |= PCL_CMD_XMT; break; case hpsb_iso: pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE; break; case hpsb_raw: pcl.buffer[0].control |= PCL_CMD_UNFXMT; break; } if (!packet->data_be) { pcl.buffer[1].control |= PCL_BIGENDIAN; } put_pcl(lynx, d->pcl, &pcl); run_pcl(lynx, d->pcl_start, d->channel);}#if 0static int lynx_detect(struct hpsb_host_template *tmpl){ struct hpsb_host *host; int i; init_driver(); for (i = 0; i < num_of_cards; i++) { host = hpsb_get_host(tmpl, 0); if (host == NULL) { /* simply don't init more after out of mem */ return i; } host->hostdata = &cards[i]; cards[i].host = host; } return num_of_cards;}#endifstatic int lynx_initialize(struct hpsb_host *host){ struct ti_lynx *lynx = host->hostdata; struct ti_pcl pcl; int i; u32 *pcli; lynx->selfid_size = -1; lynx->phy_reg0 = -1; lynx->async.queue = NULL; pcl.next = pcl_bus(lynx, lynx->rcv_pcl); put_pcl(lynx, lynx->rcv_pcl_start, &pcl); pcl.next = PCL_NEXT_INVALID; pcl.async_error_next = PCL_NEXT_INVALID;#ifdef __BIG_ENDIAN pcl.buffer[0].control = PCL_CMD_RCV | 16; pcl.buffer[1].control = PCL_LAST_BUFF | 4080;#else pcl.buffer[0].control = PCL_CMD_RCV | PCL_BIGENDIAN | 16; pcl.buffer[1].control = PCL_LAST_BUFF | 4080;#endif pcl.buffer[0].pointer = lynx->rcv_page_dma; pcl.buffer[1].pointer = lynx->rcv_page_dma + 16; put_pcl(lynx, lynx->rcv_pcl, &pcl); pcl.next = pcl_bus(lynx, lynx->async.pcl); pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl); put_pcl(lynx, lynx->async.pcl_start, &pcl); pcl.next = pcl_bus(lynx, lynx->iso_send.pcl); pcl.async_error_next = PCL_NEXT_INVALID; put_pcl(lynx, lynx->iso_send.pcl_start, &pcl); pcl.next = PCL_NEXT_INVALID; pcl.async_error_next = PCL_NEXT_INVALID; pcl.buffer[0].control = PCL_CMD_RCV | 4;#ifndef __BIG_ENDIAN pcl.buffer[0].control |= PCL_BIGENDIAN;#endif pcl.buffer[1].control = PCL_LAST_BUFF | 2044; for (i = 0; i < NUM_ISORCV_PCL; i++) { int page = i / ISORCV_PER_PAGE; int sec = i % ISORCV_PER_PAGE; pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page] + sec * MAX_ISORCV_SIZE; pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4; put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl); } pcli = (u32 *)&pcl; for (i = 0; i < NUM_ISORCV_PCL; i++) { pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]); } put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl); /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */ reg_write(lynx, FIFO_SIZES, 0x003030a0); /* 20 byte threshold before triggering PCI transfer */ reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24); /* threshold on both send FIFOs before transmitting: FIFO size - cache line size - 1 */ i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff; i = 0x30 - i - 1; reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i); reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394); reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW | LINK_INT_ATF_UNDERFLOW); reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0); reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4); reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0); reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV), DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER); run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV); reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0); reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4); reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0); reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0); run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV); reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX); if (!lynx->phyic.reg_1394a) { /* attempt to enable contender bit -FIXME- would this work * elsewhere? */ reg_set_bits(lynx, GPIO_CTRL_A, 0x1); reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1); } else { /* set the contender bit in the extended PHY register * set. (Should check that bis 0,1,2 (=0xE0) is set * in register 2?) */ i = get_phy_reg(lynx, 4); if (i != -1) set_phy_reg(lynx, 4, i | 0x40); } return 1;}static void lynx_release(struct hpsb_host *host){ struct ti_lynx *lynx; if (host != NULL) { lynx = host->hostdata; remove_card(lynx->dev); } else {#ifdef CONFIG_IEEE1394_PCILYNX_PORTS unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);#endif }}static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet){ struct ti_lynx *lynx = host->hostdata; struct lynx_send_data *d; unsigned long flags; if (packet->data_size >= 4096) { PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)", packet->data_size); return 0; } switch (packet->type) { case hpsb_async: case hpsb_raw: d = &lynx->async; break; case hpsb_iso: d = &lynx->iso_send; break; default: PRINT(KERN_ERR, lynx->id, "invalid packet type %d", packet->type); return 0; } packet->xnext = NULL; if (packet->tcode == TCODE_WRITEQ || packet->tcode == TCODE_READQ_RESPONSE) { cpu_to_be32s(&packet->header[3]); } spin_lock_irqsave(&d->queue_lock, flags); if (d->queue == NULL) { d->queue = packet; d->queue_last = packet; send_next(lynx, packet->type); } else { d->queue_last->xnext = packet; d->queue_last = packet; } spin_unlock_irqrestore(&d->queue_lock, flags); return 1;}static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg){ struct ti_lynx *lynx = host->hostdata; int retval = 0; struct hpsb_packet *packet, *lastpacket; unsigned long flags; switch (cmd) { case RESET_BUS: if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) { retval = 0; break; } if (arg) { arg = 3 << 6; } else { arg = 1 << 6; } retval = get_phy_reg(lynx, 1); arg |= (retval == -1 ? 63 : retval); retval = 0; PRINT(KERN_INFO, lynx->id, "resetting bus on request%s", (host->attempt_root ? " and attempting to become root" : "")); lynx->selfid_size = -1; lynx->phy_reg0 = -1; set_phy_reg(lynx, 1, arg); break; case GET_CYCLE_COUNTER: retval = reg_read(lynx, CYCLE_TIMER); break; case SET_CYCLE_COUNTER: reg_write(lynx, CYCLE_TIMER, arg); break; case SET_BUS_ID: reg_write(lynx, LINK_ID, (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000)); break; case ACT_CYCLE_MASTER: if (arg) { reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); } else { reg_clear_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); } break; case CANCEL_REQUESTS: spin_lock_irqsave(&lynx->async.queue_lock, flags); reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0); packet = lynx->async.queue; lynx->async.queue = NULL; spin_unlock_irqrestore(&lynx->async.queue_lock, flags); while (packet != NULL) { lastpacket = packet; packet = packet->xnext; hpsb_packet_sent(host, lastpacket, ACKX_ABORTED); } break; case MODIFY_USAGE: if (arg) { MOD_INC_USE_COUNT; } else { MOD_DEC_USE_COUNT; } break; case ISO_LISTEN_CHANNEL: spin_lock_irqsave(&lynx->iso_rcv.lock, flags); if (lynx->iso_rcv.chan_count++ == 0) { reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), DMA_WORD1_CMP_ENABLE_MASTER); } spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags); break; case ISO_UNLISTEN_CHANNEL: spin_lock_irqsave(&lynx->iso_rcv.lock, flags); if (--lynx->iso_rcv.chan_count == 0) { reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0); } spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags); break; default: PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd); retval = -1; } return retval;}/*************************************** * IEEE-1394 functionality section END * ***************************************/#ifdef CONFIG_IEEE1394_PCILYNX_PORTS/* VFS functions for local bus / aux device access. Access to those * is implemented as a character device instead of block devices * because buffers are not wanted for this. Therefore llseek (from * VFS) can be used for these char devices with obvious effects. */static int mem_open(struct inode*, struct file*);static int mem_release(struct inode*, struct file*);static unsigned int aux_poll(struct file*, struct poll_table_struct*);static loff_t mem_llseek(struct file*, loff_t, int);static ssize_t mem_read (struct file*, char*, size_t, loff_t*);static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);static struct file_operations aux_ops = { OWNER_THIS_MODULE read: mem_read, write: mem_write, poll: aux_poll, llseek: mem_llseek, open: mem_open, release: mem_release,};static void aux_setup_pcls(struct ti_lynx *lynx){ struct ti_pcl pcl; pcl.next = PCL_NEXT_INVALID; pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl); put_pcl(lynx, lynx->dmem_pcl, &pcl);}static int mem_open(struct inode *inode, struct file *file){ int cid = MINOR(inode->i_rdev); enum { t_rom, t_aux, t_ram } type; struct memdata *md; V22_COMPAT_MOD_INC_USE_COUNT; if (cid < PCILYNX_MINOR_AUX_START) { /* just for completeness */ V22_COMPAT_MOD_DEC_USE_COUNT; return -ENXIO; } else if (cid < PCILYNX_MINOR_ROM_START) { cid -= PCILYNX_MINOR_AUX_START; if (cid >= num_of_cards || !cards[cid].aux_port) { V22_COMPAT_MOD_DEC_USE_COUNT; return -ENXIO; } type = t_aux; } else if (cid < PCILYNX_MINOR_RAM_START) { cid -= PCILYNX_MINOR_ROM_START; if (cid >= num_of_cards || !cards[cid].local_rom) { V22_COMPAT_MOD_DEC_USE_COUNT; return -ENXIO; } type = t_rom; } else { /* WARNING: Know what you are doing when opening RAM. * It is currently used inside the driver! */ cid -= PCILYNX_MINOR_RAM_START; if (cid >= num_of_cards || !cards[cid].local_ram) { V22_COMPAT_MOD_DEC_USE_COUNT; return -ENXIO; } type = t_ram; } md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL); if (md == NULL) { V22_COMPAT_MOD_DEC_USE_COUNT;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -