dasd.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 2,193 行 · 第 1/4 页

C
2,193
字号
		    cqr->status != DASD_CQR_FAILED &&		    cqr->status != DASD_CQR_ERROR)			break;		/*  Process requests with DASD_CQR_ERROR */		if (cqr->status == DASD_CQR_ERROR) {			if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {				cqr->status = DASD_CQR_FAILED;				cqr->stopclk = get_clock();			} else {				if (cqr->irb.esw.esw0.erw.cons) {					erp_fn = device->discipline->						erp_action(cqr);					erp_fn(cqr);				} else					dasd_default_erp_action(cqr);			}			goto restart;		}		/* First of all call extended error reporting. */		if (dasd_eer_enabled(device) &&		    cqr->status == DASD_CQR_FAILED) {			dasd_eer_write(device, cqr, DASD_EER_FATALERROR);			/* restart request  */			cqr->status = DASD_CQR_QUEUED;			cqr->retries = 255;			device->stopped |= DASD_STOPPED_QUIESCE;			goto restart;		}		/* Process finished ERP request. */		if (cqr->refers) {			__dasd_process_erp(device, cqr);			goto restart;		}		/* Rechain finished requests to final queue */		cqr->endclk = get_clock();		list_move_tail(&cqr->list, final_queue);	}}static voiddasd_end_request_cb(struct dasd_ccw_req * cqr, void *data){	struct request *req;	struct dasd_device *device;	int status;	req = (struct request *) data;	device = cqr->device;	dasd_profile_end(device, cqr, req);	status = cqr->device->discipline->free_cp(cqr,req);	spin_lock_irq(&device->request_queue_lock);	dasd_end_request(req, status);	spin_unlock_irq(&device->request_queue_lock);}/* * Fetch requests from the block device queue. */static inline void__dasd_process_blk_queue(struct dasd_device * device){	request_queue_t *queue;	struct request *req;	struct dasd_ccw_req *cqr;	int nr_queued;	queue = device->request_queue;	/* No queue ? Then there is nothing to do. */	if (queue == NULL)		return;	/*	 * We requeue request from the block device queue to the ccw	 * queue only in two states. In state DASD_STATE_READY the	 * partition detection is done and we need to requeue requests	 * for that. State DASD_STATE_ONLINE is normal block device	 * operation.	 */	if (device->state != DASD_STATE_READY &&	    device->state != DASD_STATE_ONLINE)		return;	nr_queued = 0;	/* Now we try to fetch requests from the request queue */	list_for_each_entry(cqr, &device->ccw_queue, list)		if (cqr->status == DASD_CQR_QUEUED)			nr_queued++;	while (!blk_queue_plugged(queue) &&	       elv_next_request(queue) &&		nr_queued < DASD_CHANQ_MAX_SIZE) {		req = elv_next_request(queue);		if (device->features & DASD_FEATURE_READONLY &&		    rq_data_dir(req) == WRITE) {			DBF_DEV_EVENT(DBF_ERR, device,				      "Rejecting write request %p",				      req);			blkdev_dequeue_request(req);			dasd_end_request(req, 0);			continue;		}		if (device->stopped & DASD_STOPPED_DC_EIO) {			blkdev_dequeue_request(req);			dasd_end_request(req, 0);			continue;		}		cqr = device->discipline->build_cp(device, req);		if (IS_ERR(cqr)) {			if (PTR_ERR(cqr) == -ENOMEM)				break;	/* terminate request queue loop */			DBF_DEV_EVENT(DBF_ERR, device,				      "CCW creation failed (rc=%ld) "				      "on request %p",				      PTR_ERR(cqr), req);			blkdev_dequeue_request(req);			dasd_end_request(req, 0);			continue;		}		cqr->callback = dasd_end_request_cb;		cqr->callback_data = (void *) req;		cqr->status = DASD_CQR_QUEUED;		blkdev_dequeue_request(req);		list_add_tail(&cqr->list, &device->ccw_queue);		dasd_profile_start(device, cqr, req);		nr_queued++;	}}/* * Take a look at the first request on the ccw queue and check * if it reached its expire time. If so, terminate the IO. */static inline void__dasd_check_expire(struct dasd_device * device){	struct dasd_ccw_req *cqr;	if (list_empty(&device->ccw_queue))		return;	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);	if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {		if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {			if (device->discipline->term_IO(cqr) != 0)				/* Hmpf, try again in 1/10 sec */				dasd_set_timer(device, 10);		}	}}/* * Take a look at the first request on the ccw queue and check * if it needs to be started. */static inline void__dasd_start_head(struct dasd_device * device){	struct dasd_ccw_req *cqr;	int rc;	if (list_empty(&device->ccw_queue))		return;	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);	if (cqr->status != DASD_CQR_QUEUED)		return;	/* Non-temporary stop condition will trigger fail fast */	if (device->stopped & ~DASD_STOPPED_PENDING &&	    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&	    (!dasd_eer_enabled(device))) {		cqr->status = DASD_CQR_FAILED;		dasd_schedule_bh(device);		return;	}	/* Don't try to start requests if device is stopped */	if (device->stopped)		return;	rc = device->discipline->start_IO(cqr);	if (rc == 0)		dasd_set_timer(device, cqr->expires);	else if (rc == -EACCES) {		dasd_schedule_bh(device);	} else		/* Hmpf, try again in 1/2 sec */		dasd_set_timer(device, 50);}/* * Remove requests from the ccw queue.  */static voiddasd_flush_ccw_queue(struct dasd_device * device, int all){	struct list_head flush_queue;	struct list_head *l, *n;	struct dasd_ccw_req *cqr;	INIT_LIST_HEAD(&flush_queue);	spin_lock_irq(get_ccwdev_lock(device->cdev));	list_for_each_safe(l, n, &device->ccw_queue) {		cqr = list_entry(l, struct dasd_ccw_req, list);		/* Flush all request or only block device requests? */		if (all == 0 && cqr->callback == dasd_end_request_cb)			continue;		if (cqr->status == DASD_CQR_IN_IO)			device->discipline->term_IO(cqr);		if (cqr->status != DASD_CQR_DONE ||		    cqr->status != DASD_CQR_FAILED) {			cqr->status = DASD_CQR_FAILED;			cqr->stopclk = get_clock();		}		/* Process finished ERP request. */		if (cqr->refers) {			__dasd_process_erp(device, cqr);			continue;		}		/* Rechain request on device request queue */		cqr->endclk = get_clock();		list_move_tail(&cqr->list, &flush_queue);	}	spin_unlock_irq(get_ccwdev_lock(device->cdev));	/* Now call the callback function of flushed requests */	list_for_each_safe(l, n, &flush_queue) {		cqr = list_entry(l, struct dasd_ccw_req, list);		if (cqr->callback != NULL)			(cqr->callback)(cqr, cqr->callback_data);	}}/* * Acquire the device lock and process queues for the device. */static voiddasd_tasklet(struct dasd_device * device){	struct list_head final_queue;	struct list_head *l, *n;	struct dasd_ccw_req *cqr;	atomic_set (&device->tasklet_scheduled, 0);	INIT_LIST_HEAD(&final_queue);	spin_lock_irq(get_ccwdev_lock(device->cdev));	/* Check expire time of first request on the ccw queue. */	__dasd_check_expire(device);	/* Finish off requests on ccw queue */	__dasd_process_ccw_queue(device, &final_queue);	spin_unlock_irq(get_ccwdev_lock(device->cdev));	/* Now call the callback function of requests with final status */	list_for_each_safe(l, n, &final_queue) {		cqr = list_entry(l, struct dasd_ccw_req, list);		list_del_init(&cqr->list);		if (cqr->callback != NULL)			(cqr->callback)(cqr, cqr->callback_data);	}	spin_lock_irq(&device->request_queue_lock);	spin_lock(get_ccwdev_lock(device->cdev));	/* Get new request from the block device request queue */	__dasd_process_blk_queue(device);	/* Now check if the head of the ccw queue needs to be started. */	__dasd_start_head(device);	spin_unlock(get_ccwdev_lock(device->cdev));	spin_unlock_irq(&device->request_queue_lock);	dasd_put_device(device);}/* * Schedules a call to dasd_tasklet over the device tasklet. */voiddasd_schedule_bh(struct dasd_device * device){	/* Protect against rescheduling. */	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)		return;	dasd_get_device(device);	tasklet_hi_schedule(&device->tasklet);}/* * Queue a request to the head of the ccw_queue. Start the I/O if * possible. */voiddasd_add_request_head(struct dasd_ccw_req *req){	struct dasd_device *device;	unsigned long flags;	device = req->device;	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);	req->status = DASD_CQR_QUEUED;	req->device = device;	list_add(&req->list, &device->ccw_queue);	/* let the bh start the request to keep them in order */	dasd_schedule_bh(device);	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);}/* * Queue a request to the tail of the ccw_queue. Start the I/O if * possible. */voiddasd_add_request_tail(struct dasd_ccw_req *req){	struct dasd_device *device;	unsigned long flags;	device = req->device;	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);	req->status = DASD_CQR_QUEUED;	req->device = device;	list_add_tail(&req->list, &device->ccw_queue);	/* let the bh start the request to keep them in order */	dasd_schedule_bh(device);	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);}/* * Wakeup callback. */static voiddasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data){	wake_up((wait_queue_head_t *) data);}static inline int_wait_for_wakeup(struct dasd_ccw_req *cqr){	struct dasd_device *device;	int rc;	device = cqr->device;	spin_lock_irq(get_ccwdev_lock(device->cdev));	rc = ((cqr->status == DASD_CQR_DONE ||	       cqr->status == DASD_CQR_FAILED) &&	      list_empty(&cqr->list));	spin_unlock_irq(get_ccwdev_lock(device->cdev));	return rc;}/* * Attempts to start a special ccw queue and waits for its completion. */intdasd_sleep_on(struct dasd_ccw_req * cqr){	wait_queue_head_t wait_q;	struct dasd_device *device;	int rc;		device = cqr->device;	spin_lock_irq(get_ccwdev_lock(device->cdev));		init_waitqueue_head (&wait_q);	cqr->callback = dasd_wakeup_cb;	cqr->callback_data = (void *) &wait_q;	cqr->status = DASD_CQR_QUEUED;	list_add_tail(&cqr->list, &device->ccw_queue);		/* let the bh start the request to keep them in order */	dasd_schedule_bh(device);		spin_unlock_irq(get_ccwdev_lock(device->cdev));	wait_event(wait_q, _wait_for_wakeup(cqr));		/* Request status is either done or failed. */	rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;	return rc;}/* * Attempts to start a special ccw queue and wait interruptible * for its completion. */intdasd_sleep_on_interruptible(struct dasd_ccw_req * cqr){	wait_queue_head_t wait_q;	struct dasd_device *device;	int rc, finished;	device = cqr->device;	spin_lock_irq(get_ccwdev_lock(device->cdev));	init_waitqueue_head (&wait_q);	cqr->callback = dasd_wakeup_cb;	cqr->callback_data = (void *) &wait_q;	cqr->status = DASD_CQR_QUEUED;	list_add_tail(&cqr->list, &device->ccw_queue);	/* let the bh start the request to keep them in order */	dasd_schedule_bh(device);	spin_unlock_irq(get_ccwdev_lock(device->cdev));	finished = 0;	while (!finished) {		rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));		if (rc != -ERESTARTSYS) {			/* Request is final (done or failed) */			rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;			break;		}		spin_lock_irq(get_ccwdev_lock(device->cdev));		switch (cqr->status) {		case DASD_CQR_IN_IO:                        /* terminate runnig cqr */			if (device->discipline->term_IO) {				cqr->retries = -1;				device->discipline->term_IO(cqr);				/*nished =				 * wait (non-interruptible) for final status				 * because signal ist still pending				 */				spin_unlock_irq(get_ccwdev_lock(device->cdev));				wait_event(wait_q, _wait_for_wakeup(cqr));				spin_lock_irq(get_ccwdev_lock(device->cdev));				rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;				finished = 1;			}			break;		case DASD_CQR_QUEUED:			/* request  */			list_del_init(&cqr->list);			rc = -EIO;			finished = 1;			break;		default:			/* cqr with 'non-interruptable' status - just wait */			break;		}		spin_unlock_irq(get_ccwdev_lock(device->cdev));	}	return rc;}/* * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock * for eckd devices) the currently running request has to be terminated * and be put back to status queued, before the special request is added * to the head of the queue. Then the special request is waited on normally. */static inline int_dasd_term_running_cqr(struct dasd_device *device){	struct dasd_ccw_req *cqr;	int rc;	if (list_empty(&device->ccw_queue))		return 0;	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);	rc = device->discipline->term_IO(cqr);	if (rc == 0) {		/* termination successful */		cqr->status = DASD_CQR_QUEUED;		cqr->startclk = cqr->stopclk = 0;		cqr->starttime = 0;	}	return rc;}intdasd_sleep_on_immediatly(struct dasd_ccw_req * cqr){	wait_queue_head_t wait_q;	struct dasd_device *device;	int rc;		device = cqr->device;	spin_lock_irq(get_ccwdev_lock(device->cdev));	rc = _dasd_term_running_cqr(device);	if (rc) {		spin_unlock_irq(get_ccwdev_lock(device->cdev));		return rc;	}		init_waitqueue_head (&wait_q);	cqr->callback = dasd_wakeup_cb;	cqr->callback_data = (void *) &wait_q;	cqr->status = DASD_CQR_QUEUED;	list_add(&cqr->list, &device->ccw_queue);		/* let the bh start the request to keep them in order */	dasd_schedule_bh(device);		spin_unlock_irq(get_ccwdev_lock(device->cdev));	wait_event(wait_q, _wait_for_wakeup(cqr));		/* Request status is either done or failed. */	rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;	return rc;}/* * Cancels a request that was started with dasd_sleep_on_req. * This is useful to timeout requests. The request will be * terminated if it is currently in i/o. * Returns 1 if the request has been terminated. */intdasd_cancel_req(struct dasd_ccw_req *cqr){	struct dasd_device *device = cqr->device;	unsigned long flags;	int rc;	rc = 0;	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);	switch (cqr->status) {	case DASD_CQR_QUEUED:		/* request was not started - just set to failed */		cqr->status = DASD_CQR_FAILED;		break;	case DASD_CQR_IN_IO:		/* request in IO - terminate IO and release again */		if (device->discipline->term_IO(cqr) != 0)			/* what to do if unable to terminate ??????			   e.g. not _IN_IO */			cqr->status = DASD_CQR_FAILED;		cqr->stopclk = get_clock();		rc = 1;		break;	case DASD_CQR_DONE:	case DASD_CQR_FAILED:		/* already finished - do nothing */		break;	default:		DEV_MESSAGE(KERN_ALERT, device,			    "invalid status %02x in request",			    cqr->status);		BUG();	}	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);	dasd_schedule_bh(device);	return rc;}/* * SECTION: Block device operations (request queue, partitions, open, release). *//*

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?