📄 cpqarray.c
字号:
c->req.hdr.blk = ida[(h->ctlr<<CTLR_SHIFT) + MINOR(creq->rq_dev)].start_sect + creq->sector; c->rq = creq;DBGPX( if (bh == NULL) panic("bh == NULL?"); printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);); seg = 0; lastdataend = NULL; while(bh) { if (bh->b_data == lastdataend) { tmp_sg[seg-1].size += bh->b_size; lastdataend += bh->b_size; } else { if (seg == SG_MAX) BUG(); tmp_sg[seg].size = bh->b_size; tmp_sg[seg].start_addr = bh->b_data; lastdataend = bh->b_data + bh->b_size; seg++; } bh = bh->b_reqnext; } /* Now do all the DMA Mappings */ for( i=0; i < seg; i++) { c->req.sg[i].size = tmp_sg[i].size; c->req.sg[i].addr = (__u32) pci_map_single( h->pci_dev, tmp_sg[i].start_addr, tmp_sg[i].size, (creq->cmd == READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); }DBGPX( printk("Submitting %d sectors in %d segments\n", sect, seg); ); c->req.hdr.sg_cnt = seg; c->req.hdr.blk_cnt = creq->nr_sectors; c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE; c->type = CMD_RWREQ; spin_lock_irq(&io_request_lock); /* Put the request on the tail of the request queue */ addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; goto queue_next;startio: start_io(h);}/* * start_io submits everything on a controller's request queue * and moves it to the completion queue. * * Interrupts had better be off if you're in here */static void start_io(ctlr_info_t *h){ cmdlist_t *c; while((c = h->reqQ) != NULL) { /* Can't do anything if we're busy */ if (h->access.fifo_full(h) == 0) return; /* Get the first entry from the request Q */ removeQ(&h->reqQ, c); h->Qdepth--; /* Tell the controller to do our bidding */ h->access.submit_command(h, c); /* Get onto the completion Q */ addQ(&h->cmpQ, c); }}static inline void complete_buffers(struct buffer_head *bh, int ok){ struct buffer_head *xbh; while(bh) { xbh = bh->b_reqnext; bh->b_reqnext = NULL; blk_finished_io(bh->b_size >> 9); bh->b_end_io(bh, ok); bh = xbh; }}/* * Mark all buffers that cmd was responsible for */static inline void complete_command(cmdlist_t *cmd, int timeout){ int ok=1; int i; if (cmd->req.hdr.rcode & RCODE_NONFATAL && (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) { printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n", cmd->ctlr, cmd->hdr.unit); hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN; } if (cmd->req.hdr.rcode & RCODE_FATAL) { printk(KERN_WARNING "Fatal error on ida/c%dd%d\n", cmd->ctlr, cmd->hdr.unit); ok = 0; } if (cmd->req.hdr.rcode & RCODE_INVREQ) { printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd, cmd->req.hdr.blk, cmd->req.hdr.blk_cnt, cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode); ok = 0; } if (timeout) ok = 0; /* unmap the DMA mapping for all the scatter gather elements */ for(i=0; i<cmd->req.hdr.sg_cnt; i++) { pci_unmap_single(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, cmd->req.sg[i].size, (cmd->req.hdr.cmd == IDA_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); } complete_buffers(cmd->rq->bh, ok); DBGPX(printk("Done with %p\n", cmd->rq);); end_that_request_last(cmd->rq);}/* * The controller will interrupt us upon completion of commands. * Find the command on the completion queue, remove it, tell the OS and * try to queue up more IO */static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs){ ctlr_info_t *h = dev_id; cmdlist_t *c; unsigned long istat; unsigned long flags; __u32 a,a1; istat = h->access.intr_pending(h); /* Is this interrupt for us? */ if (istat == 0) return; /* * If there are completed commands in the completion queue, * we had better do something about it. */ spin_lock_irqsave(&io_request_lock, flags); if (istat & FIFO_NOT_EMPTY) { while((a = h->access.command_completed(h))) { a1 = a; a &= ~3; if ((c = h->cmpQ) == NULL) { printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1); continue; } while(c->busaddr != a) { c = c->next; if (c == h->cmpQ) break; } /* * If we've found the command, take it off the * completion Q and free it */ if (c->busaddr == a) { removeQ(&h->cmpQ, c); /* Check for invalid command. * Controller returns command error, * But rcode = 0. */ if((a1 & 0x03) && (c->req.hdr.rcode == 0)) { c->req.hdr.rcode = RCODE_INVREQ; } if (c->type == CMD_RWREQ) { complete_command(c, 0); cmd_free(h, c, 1); } else if (c->type == CMD_IOCTL_PEND) { c->type = CMD_IOCTL_DONE; } continue; } } } /* * See if we can queue up some more IO */ do_ida_request(BLK_DEFAULT_QUEUE(MAJOR_NR + h->ctlr)); spin_unlock_irqrestore(&io_request_lock, flags);}/* * This timer was for timing out requests that haven't happened after * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to * reset a flags structure so we don't flood the user with * "Non-Fatal error" messages. */static void ida_timer(unsigned long tdata){ ctlr_info_t *h = (ctlr_info_t*)tdata; h->timer.expires = jiffies + IDA_TIMER; add_timer(&h->timer); h->misc_tflags = 0;}/* * ida_ioctl does some miscellaneous stuff like reporting drive geometry, * setting readahead and submitting commands from userspace to the controller. */static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg){ int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR; int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT; int error; int diskinfo[4]; struct hd_geometry *geo = (struct hd_geometry *)arg; ida_ioctl_t *io = (ida_ioctl_t*)arg; ida_ioctl_t my_io; switch(cmd) { case HDIO_GETGEO: if (hba[ctlr]->drv[dsk].cylinders) { diskinfo[0] = hba[ctlr]->drv[dsk].heads; diskinfo[1] = hba[ctlr]->drv[dsk].sectors; diskinfo[2] = hba[ctlr]->drv[dsk].cylinders; } else { diskinfo[0] = 0xff; diskinfo[1] = 0x3f; diskinfo[2] = hba[ctlr]->drv[dsk].nr_blks / (0xff*0x3f); } put_user(diskinfo[0], &geo->heads); put_user(diskinfo[1], &geo->sectors); put_user(diskinfo[2], &geo->cylinders); put_user(ida[(ctlr<<CTLR_SHIFT)+MINOR(inode->i_rdev)].start_sect, &geo->start); return 0; case IDAGETDRVINFO: return copy_to_user(&io->c.drv,&hba[ctlr]->drv[dsk],sizeof(drv_info_t)); case BLKGETSIZE: return put_user(ida[(ctlr<<CTLR_SHIFT)+MINOR(inode->i_rdev)].nr_sects, (unsigned long *)arg); case BLKGETSIZE64: return put_user((u64)(ida[(ctlr<<CTLR_SHIFT)+MINOR(inode->i_rdev)].nr_sects) << 9, (u64*)arg); case BLKRRPART: return revalidate_logvol(inode->i_rdev, 1); case IDAPASSTHRU: if (!suser()) return -EPERM; error = copy_from_user(&my_io, io, sizeof(my_io)); if (error) return error; error = ida_ctlr_ioctl(ctlr, dsk, &my_io); if (error) return error; error = copy_to_user(io, &my_io, sizeof(my_io)); return error; case IDAGETCTLRSIG: if (!arg) return -EINVAL; put_user(hba[ctlr]->ctlr_sig, (int*)arg); return 0; case IDAREVALIDATEVOLS: return revalidate_allvol(inode->i_rdev); case IDADRIVERVERSION: if (!arg) return -EINVAL; put_user(DRIVER_VERSION, (unsigned long*)arg); return 0; case IDAGETPCIINFO: { ida_pci_info_struct pciinfo; if (!arg) return -EINVAL; pciinfo.bus = hba[ctlr]->pci_dev->bus->number; pciinfo.dev_fn = hba[ctlr]->pci_dev->devfn; pciinfo.board_id = hba[ctlr]->board_id; if(copy_to_user((void *) arg, &pciinfo, sizeof( ida_pci_info_struct))) return -EFAULT; return(0); } case BLKFLSBUF: case BLKBSZSET: case BLKBSZGET: case BLKROSET: case BLKROGET: case BLKRASET: case BLKRAGET: case BLKELVGET: case BLKELVSET: case BLKPG: return blk_ioctl(inode->i_rdev, cmd, arg); default: return -EINVAL; } }/* * ida_ctlr_ioctl is for passing commands to the controller from userspace. * The command block (io) has already been copied to kernel space for us, * however, any elements in the sglist need to be copied to kernel space * or copied back to userspace. * * Only root may perform a controller passthru command, however I'm not doing * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and * putting a 64M buffer in the sglist is probably a *bad* idea. */static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io){ ctlr_info_t *h = hba[ctlr]; cmdlist_t *c; void *p = NULL; unsigned long flags; int error; if ((c = cmd_alloc(h, 0)) == NULL) return -ENOMEM; c->ctlr = ctlr; c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk; c->hdr.size = sizeof(rblk_t) >> 2; c->size += sizeof(rblk_t); c->req.hdr.cmd = io->cmd; c->req.hdr.blk = io->blk; c->req.hdr.blk_cnt = io->blk_cnt; c->type = CMD_IOCTL_PEND; /* Pre submit processing */ switch(io->cmd) { case PASSTHRU_A: p = kmalloc(io->sg[0].size, GFP_KERNEL); if (!p) { error = -ENOMEM; cmd_free(h, c, 0); return(error); } copy_from_user(p, (void*)io->sg[0].addr, io->sg[0].size); c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), sizeof(ida_ioctl_t), PCI_DMA_BIDIRECTIONAL); c->req.sg[0].size = io->sg[0].size; c->req.sg[0].addr = pci_map_single(h->pci_dev, p, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); c->req.hdr.sg_cnt = 1; break; case IDA_READ: case READ_FLASH_ROM: case SENSE_CONTROLLER_PERFORMANCE: p = kmalloc(io->sg[0].size, GFP_KERNEL); if (!p) { error = -ENOMEM; cmd_free(h, c, 0); return(error); } c->req.sg[0].size = io->sg[0].size; c->req.sg[0].addr = pci_map_single(h->pci_dev, p, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); c->req.hdr.sg_cnt = 1; break; case IDA_WRITE: case IDA_WRITE_MEDIA: case DIAG_PASS_THRU: case COLLECT_BUFFER: case WRITE_FLASH_ROM: p = kmalloc(io->sg[0].size, GFP_KERNEL); if (!p) { error = -ENOMEM; cmd_free(h, c, 0); return(error); } copy_from_user(p, (void*)io->sg[0].addr, io->sg[0].size); c->req.sg[0].size = io->sg[0].size; c->req.sg[0].addr = pci_map_single(h->pci_dev, p, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); c->req.hdr.sg_cnt = 1; break; default: c->req.sg[0].size = sizeof(io->c); c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); c->req.hdr.sg_cnt = 1; } /* Put the request on the tail of the request queue */ spin_lock_irqsave(&io_request_lock, flags); addQ(&h->reqQ, c); h->Qdepth++; start_io(h); spin_unlock_irqrestore(&io_request_lock, flags); /* Wait for completion */ while(c->type != CMD_IOCTL_DONE) schedule(); /* Unmap the DMA */ pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); /* Post submit processing */ switch(io->cmd) { case PASSTHRU_A: pci_unmap_single(h->pci_dev, c->req.hdr.blk, sizeof(ida_ioctl_t), PCI_DMA_BIDIRECTIONAL); case IDA_READ: case DIAG_PASS_THRU: case SENSE_CONTROLLER_PERFORMANCE: case READ_FLASH_ROM: copy_to_user((void*)io->sg[0].addr, p, io->sg[0].size); /* fall through and free p */ case IDA_WRITE: case IDA_WRITE_MEDIA: case COLLECT_BUFFER: case WRITE_FLASH_ROM: kfree(p); break; default:; /* Nothing to do */ } io->rcode = c->req.hdr.rcode; cmd_free(h, c, 0); return(0);}/* * Commands are pre-allocated in a large block. Here we use a simple bitmap * scheme to suballocte them to the driver. Operations that are not time * critical (and can wait for kmalloc and possibly sleep) can pass in NULL * as the first argument to get a new command. */static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool){ cmdlist_t * c; int i; dma_addr_t cmd_dhandle; if (!get_from_pool) { c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, sizeof(cmdlist_t), &cmd_dhandle); if(c==NULL) return NULL; } else { do { i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS); if (i == NR_CMDS) return NULL; } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0); c = h->cmd_pool + i; cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t); h->nr_allocs++; } memset(c, 0, sizeof(cmdlist_t)); c->busaddr = cmd_dhandle; return c;}static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool){ int i; if (!got_from_pool) { pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c, c->busaddr); } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -