⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sd.c

📁 GNU Mach 微内核源代码, 基于美国卡内基美隆大学的 Mach 研究项目
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* If we had an ILLEGAL REQUEST returned, then we may have	 * performed an unsupported command.  The only thing this should be 	 * would be a ten byte read where only a six byte read was supported.	 * Also, on a system where READ CAPACITY failed, we have read past	 * the end of the disk. 	 */	if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {	    if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {		rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;		requeue_sd_request(SCpnt);		result = 0;	    } else {		/* ???? */	    }	}	if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {	    printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",		   SCpnt->host->host_no, (int) SCpnt->channel, 		   (int) SCpnt->target, (int) SCpnt->lun);	    print_command(SCpnt->cmnd);	    print_sense("sd", SCpnt);	    SCpnt = end_scsi_request(SCpnt, 0, block_sectors);	    requeue_sd_request(SCpnt);	    return;	 }    }  /* driver byte != 0 */    if (result) {	printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",	       rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,	       rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,	   rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,	     rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);		if (driver_byte(result) & DRIVER_SENSE)	    print_sense("sd", SCpnt);	SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);	requeue_sd_request(SCpnt);	return;    }}/* * requeue_sd_request() is the request handler function for the sd driver. * Its function in life is to take block device requests, and translate * them to SCSI commands. */static void do_sd_request (void){    Scsi_Cmnd * SCpnt = NULL;    Scsi_Device * SDev;    struct request * req = NULL;    unsigned long flags;    int flag = 0;        save_flags(flags);    while (1==1){	cli();	if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {	    restore_flags(flags);	    return;	}		INIT_SCSI_REQUEST;        SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;                /*         * I am not sure where the best place to do this is.  We need         * to hook in a place where we are likely to come if in user         * space.         */        if( SDev->was_reset )        {	    /*	     * We need to relock the door, but we might	     * be in an interrupt handler.  Only do this	     * from user space, since we do not want to	     * sleep from an interrupt.	     */	    if( SDev->removable && !intr_count )	    {                scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);		/* scsi_ioctl may allow CURRENT to change, so start over. */		SDev->was_reset = 0;		continue;	    }	    SDev->was_reset = 0;        }			/* We have to be careful here. allocate_device will get a free pointer,	 * but there is no guarantee that it is queueable.  In normal usage, 	 * we want to call this, because other types of devices may have the 	 * host all tied up, and we want to make sure that we have at least 	 * one request pending for this type of device. We can also come 	 * through here while servicing an interrupt, because of the need to 	 * start another command. If we call allocate_device more than once, 	 * then the system can wedge if the command is not queueable. The 	 * request_queueable function is safe because it checks to make sure 	 * that the host is able to take another command before it returns	 * a pointer.  	 */	if (flag++ == 0)	    SCpnt = allocate_device(&CURRENT,			   rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0); 	else SCpnt = NULL;		/*	 * The following restore_flags leads to latency problems.  FIXME.	 * Using a "sti()" gets rid of the latency problems but causes	 * race conditions and crashes.	 */	restore_flags(flags);	/* This is a performance enhancement. We dig down into the request 	 * list and try to find a queueable request (i.e. device not busy, 	 * and host able to accept another command. If we find one, then we 	 * queue it. This can make a big difference on systems with more than 	 * one disk drive.  We want to have the interrupts off when monkeying 	 * with the request list, because otherwise the kernel might try to 	 * slip in a request in between somewhere. 	 */	if (!SCpnt && sd_template.nr_dev > 1){	    struct request *req1;	    req1 = NULL;	    cli();	    req = CURRENT;	    while(req){		SCpnt = request_queueable(req,                                    rscsi_disks[DEVICE_NR(req->rq_dev)].device);		if(SCpnt) break;		req1 = req;		req = req->next;	    }	    if (SCpnt && req->rq_status == RQ_INACTIVE) {		if (req == CURRENT) 		    CURRENT = CURRENT->next;		else		    req1->next = req->next;	    }	    restore_flags(flags);	}		if (!SCpnt) return; /* Could not find anything to do */		/* Queue command */	requeue_sd_request(SCpnt);    }  /* While */}    static void requeue_sd_request (Scsi_Cmnd * SCpnt){    int dev, devm, block, this_count;    unsigned char cmd[10];    int bounce_size, contiguous;    int max_sg;    struct buffer_head * bh, *bhp;    char * buff, *bounce_buffer;     repeat:        if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {	do_sd_request();	return;    }        devm =  MINOR(SCpnt->request.rq_dev);    dev = DEVICE_NR(SCpnt->request.rq_dev);    block = SCpnt->request.sector;    this_count = 0;#ifdef DEBUG    printk("Doing sd request, dev = %d, block = %d\n", devm, block);#endif        if (devm >= (sd_template.dev_max << 4) || 	!rscsi_disks[dev].device ||	block + SCpnt->request.nr_sectors > sd[devm].nr_sects)    {	SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);	goto repeat;    }        block += sd[devm].start_sect;        if (rscsi_disks[dev].device->changed)    {	/*	 * quietly refuse to do anything to a changed disc until the changed 	 * bit has been reset	 */	/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */	SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);	goto repeat;    }    #ifdef DEBUG    printk("sd%c : real dev = /dev/sd%c, block = %d\n", 	   'a' + devm, dev, block);#endif        /*     * If we have a 1K hardware sectorsize, prevent access to single     * 512 byte sectors.  In theory we could handle this - in fact     * the scsi cdrom driver must be able to handle this because     * we typically use 1K blocksizes, and cdroms typically have     * 2K hardware sectorsizes.  Of course, things are simpler     * with the cdrom, since it is read-only.  For performance     * reasons, the filesystems should be able to handle this     * and not force the scsi disk driver to use bounce buffers     * for this.     */    if (rscsi_disks[dev].sector_size == 1024)	if((block & 1) || (SCpnt->request.nr_sectors & 1)) {	    printk("sd.c:Bad block number requested");	    SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);	    goto repeat;	}        switch (SCpnt->request.cmd)    {    case WRITE :	if (!rscsi_disks[dev].device->writeable)	{	    SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);	    goto repeat;	}	cmd[0] = WRITE_6;	break;    case READ :	cmd[0] = READ_6;	break;    default :	panic ("Unknown sd command %d\n", SCpnt->request.cmd);    }        SCpnt->this_count = 0;        /* If the host adapter can deal with very large scatter-gather     * requests, it is a waste of time to cluster      */    contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);    bounce_buffer = NULL;    bounce_size = (SCpnt->request.nr_sectors << 9);        /* First see if we need a bounce buffer for this request. If we do, make      * sure that we can allocate a buffer. Do not waste space by allocating      * a bounce buffer if we are straddling the 16Mb line      */     if (contiguous && SCpnt->request.bh &&	((long) SCpnt->request.bh->b_data) 	+ (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD 	&& SCpnt->host->unchecked_isa_dma) {	if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)	    bounce_buffer = (char *) scsi_malloc(bounce_size);	if(!bounce_buffer) contiguous = 0;    }        if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)	for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 	    bhp = bhp->b_reqnext) {	    if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 		if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);		contiguous = 0;		break;	    } 	}    if (!SCpnt->request.bh || contiguous) {		/* case of page request (i.e. raw device), or unlinked buffer */	this_count = SCpnt->request.nr_sectors;	buff = SCpnt->request.buffer;	SCpnt->use_sg = 0;	    } else if (SCpnt->host->sg_tablesize == 0 ||	       (need_isa_buffer && dma_free_sectors <= 10)) {		/* Case of host adapter that cannot scatter-gather.  We also	 * come here if we are running low on DMA buffer memory.  We set	 * a threshold higher than that we would need for this request so	 * we leave room for other requests.  Even though we would not need	 * it all, we need to be conservative, because if we run low enough	 * we have no choice but to panic. 	 */	if (SCpnt->host->sg_tablesize != 0 &&	    need_isa_buffer && 	    dma_free_sectors <= 10)	    printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");		this_count = SCpnt->request.current_nr_sectors;	buff = SCpnt->request.buffer;	SCpnt->use_sg = 0;	    } else {		/* Scatter-gather capable host adapter */	struct scatterlist * sgpnt;	int count, this_count_max;	int counted;		bh = SCpnt->request.bh;	this_count = 0;	this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);	count = 0;	bhp = NULL;	while(bh) {	    if ((this_count + (bh->b_size >> 9)) > this_count_max) break;	    if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||	       !CLUSTERABLE_DEVICE(SCpnt) ||	       (SCpnt->host->unchecked_isa_dma &&		((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {		if (count < SCpnt->host->sg_tablesize) count++;		else break;	    }	    this_count += (bh->b_size >> 9);	    bhp = bh;	    bh = bh->b_reqnext;	}#if 0	if(SCpnt->host->unchecked_isa_dma &&	   ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;#endif	SCpnt->use_sg = count;  /* Number of chains */	/* scsi_malloc can only allocate in chunks of 512 bytes */	count  = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;	SCpnt->sglist_len = count;	max_sg = count / sizeof(struct scatterlist);	if(SCpnt->host->sg_tablesize < max_sg) 	    max_sg = SCpnt->host->sg_tablesize;	sgpnt = (struct scatterlist * ) scsi_malloc(count);	if (!sgpnt) {	    printk("Warning - running *really* short on DMA buffers\n");	    SCpnt->use_sg = 0;    /* No memory left - bail out */	    this_count = SCpnt->request.current_nr_sectors;	    buff = SCpnt->request.buffer;	} else {	    memset(sgpnt, 0, count);  /* Zero so it is easy to fill, but only				       * if memory is available 				       */	    buff = (char *) sgpnt;	    counted = 0;	    for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;		count < SCpnt->use_sg && bh; 		count++, bh = bhp) {				bhp = bh->b_reqnext;				if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;		sgpnt[count].length += bh->b_size;		counted += bh->b_size >> 9;				if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > 		    ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&		    !sgpnt[count].alt_address) {		    sgpnt[count].alt_address = sgpnt[count].address;		    /* We try to avoid exhausting the DMA pool, since it is 		     * easier to control usage here. In other places we might 		     * have a more pressing need, and we would be screwed if 		     * we ran out */		    if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {			sgpnt[count].address = NULL;		    } else {			sgpnt[count].address = 			    (char *) scsi_malloc(sgpnt[count].length);		    }		    /* If we start running low on DMA buffers, we abort the 		     * scatter-gather operation, and free all of the memory 		     * we have allocated.  We want to ensure that all scsi 		     * operations are able to do at least a non-scatter/gather		     * operation */		    if(sgpnt[count].address == NULL){ /* Out of dma memory */#if 0			printk("Warning: Running low on SCSI DMA buffers");			/* Try switching back to a non s-g operation. */			while(--count >= 0){			    if(sgpnt[count].alt_address) 				scsi_free(sgpnt[count].address, 					  sgpnt[count].length);			}			this_count = SCpnt->request.current_nr_sectors;			buff = SCpnt->request.buffer;			SCpnt->use_sg = 0;			scsi_free(sgpnt, SCpnt->sglist_len);#endif			SCpnt->use_sg = count;			this_count = counted -= bh->b_size >> 9;			break;		    }		    		}				/* Only cluster buffers if we know that we can supply DMA 		 * buffers large enough to satisfy the request. Do not cluster		 * a new request if this would mean that we suddenly need to 		 * start using DMA bounce buffers */		if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) 		   && CLUSTERABLE_DEVICE(SCpnt)) {		    char * tmp;		    		    if (((long) sgpnt[count].address) + sgpnt[count].length +			bhp->b_size - 1 > ISA_DMA_THRESHOLD && 			(SCpnt->host->unchecked_isa_dma) &&			!sgpnt[count].alt_address) continue;		    		    if(!sgpnt[count].alt_address) {count--; continue; }		    if(dma_free_sectors > 10)			tmp = (char *) scsi_malloc(sgpnt[count].length 						   + bhp->b_size);		    else {			tmp = NULL;			max_sg = SCpnt->use_sg;		    }		    if(tmp){			scsi_free(sgpnt[count].address, sgpnt[count].length);			sgpnt[count].address = tmp;			count--;			continue;		    }		    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -