📄 dasd.c
字号:
* posts the buffer_cache about a finalized request */static inline voiddasd_end_request(struct request *req, int uptodate){ if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) BUG(); add_disk_randomness(req->rq_disk); end_that_request_last(req);}/* * Process finished error recovery ccw. */static inline void__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr){ dasd_erp_fn_t erp_fn; if (cqr->status == DASD_CQR_DONE) DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); else DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); erp_fn = device->discipline->erp_postaction(cqr); erp_fn(cqr);}/* * Process ccw request queue. */static inline void__dasd_process_ccw_queue(struct dasd_device * device, struct list_head *final_queue){ struct list_head *l, *n; struct dasd_ccw_req *cqr; dasd_erp_fn_t erp_fn;restart: /* Process request with final status. */ list_for_each_safe(l, n, &device->ccw_queue) { cqr = list_entry(l, struct dasd_ccw_req, list); /* Stop list processing at the first non-final request. */ if (cqr->status != DASD_CQR_DONE && cqr->status != DASD_CQR_FAILED && cqr->status != DASD_CQR_ERROR) break; /* Process requests with DASD_CQR_ERROR */ if (cqr->status == DASD_CQR_ERROR) { if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { cqr->status = DASD_CQR_FAILED; cqr->stopclk = get_clock(); } else { if (cqr->irb.esw.esw0.erw.cons) { erp_fn = device->discipline->erp_action(cqr); erp_fn(cqr); } else dasd_default_erp_action(cqr); } goto restart; } /* Process finished ERP request. */ if (cqr->refers) { __dasd_process_erp(device, cqr); goto restart; } /* Rechain finished requests to final queue */ cqr->endclk = get_clock(); list_move_tail(&cqr->list, final_queue); }}static voiddasd_end_request_cb(struct dasd_ccw_req * cqr, void *data){ struct request *req; req = (struct request *) data; dasd_profile_end(cqr->device, cqr, req); spin_lock_irq(&cqr->device->request_queue_lock); dasd_end_request(req, (cqr->status == DASD_CQR_DONE)); spin_unlock_irq(&cqr->device->request_queue_lock); dasd_sfree_request(cqr, cqr->device);}/* * Fetch requests from the block device queue. */static inline void__dasd_process_blk_queue(struct dasd_device * device){ request_queue_t *queue; struct request *req; struct dasd_ccw_req *cqr; int nr_queued; queue = device->request_queue; /* No queue ? Then there is nothing to do. */ if (queue == NULL) return; /* * We requeue request from the block device queue to the ccw * queue only in two states. In state DASD_STATE_READY the * partition detection is done and we need to requeue requests * for that. State DASD_STATE_ONLINE is normal block device * operation. */ if (device->state != DASD_STATE_READY && device->state != DASD_STATE_ONLINE) return; nr_queued = 0; /* Now we try to fetch requests from the request queue */ list_for_each_entry(cqr, &device->ccw_queue, list) if (cqr->status == DASD_CQR_QUEUED) nr_queued++; while (!blk_queue_plugged(queue) && elv_next_request(queue) && nr_queued < DASD_CHANQ_MAX_SIZE) { req = elv_next_request(queue); if (test_bit(DASD_FLAG_RO, &device->flags) && rq_data_dir(req) == WRITE) { DBF_EVENT(DBF_ERR, "(%s) Rejecting write request %p", device->cdev->dev.bus_id, req); blkdev_dequeue_request(req); dasd_end_request(req, 0); continue; } if (device->stopped & DASD_STOPPED_DC_EIO) { blkdev_dequeue_request(req); dasd_end_request(req, 0); continue; } cqr = device->discipline->build_cp(device, req); if (IS_ERR(cqr)) { if (PTR_ERR(cqr) == -ENOMEM) break; /* terminate request queue loop */ DBF_EVENT(DBF_ERR, "(%s) CCW creation failed on request %p", device->cdev->dev.bus_id, req); blkdev_dequeue_request(req); dasd_end_request(req, 0); continue; } cqr->callback = dasd_end_request_cb; cqr->callback_data = (void *) req; cqr->status = DASD_CQR_QUEUED; blkdev_dequeue_request(req); list_add_tail(&cqr->list, &device->ccw_queue); dasd_profile_start(device, cqr, req); nr_queued++; }}/* * Take a look at the first request on the ccw queue and check * if it reached its expire time. If so, terminate the IO. */static inline void__dasd_check_expire(struct dasd_device * device){ struct dasd_ccw_req *cqr; if (list_empty(&device->ccw_queue)) return; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { if (device->discipline->term_IO(cqr) != 0) /* Hmpf, try again in 1/10 sec */ dasd_set_timer(device, 10); } }}/* * Take a look at the first request on the ccw queue and check * if it needs to be started. */static inline void__dasd_start_head(struct dasd_device * device){ struct dasd_ccw_req *cqr; int rc; if (list_empty(&device->ccw_queue)) return; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); if ((cqr->status == DASD_CQR_QUEUED) && (!device->stopped)) { /* try to start the first I/O that can be started */ rc = device->discipline->start_IO(cqr); if (rc == 0) dasd_set_timer(device, cqr->expires); else /* Hmpf, try again in 1/2 sec */ dasd_set_timer(device, 50); }}/* * Remove requests from the ccw queue. */static voiddasd_flush_ccw_queue(struct dasd_device * device, int all){ struct list_head flush_queue; struct list_head *l, *n; struct dasd_ccw_req *cqr; INIT_LIST_HEAD(&flush_queue); spin_lock_irq(get_ccwdev_lock(device->cdev)); list_for_each_safe(l, n, &device->ccw_queue) { cqr = list_entry(l, struct dasd_ccw_req, list); /* Flush all request or only block device requests? */ if (all == 0 && cqr->callback == dasd_end_request_cb) continue; if (cqr->status == DASD_CQR_IN_IO) device->discipline->term_IO(cqr); if (cqr->status != DASD_CQR_DONE || cqr->status != DASD_CQR_FAILED) { cqr->status = DASD_CQR_FAILED; cqr->stopclk = get_clock(); } /* Process finished ERP request. */ if (cqr->refers) { __dasd_process_erp(device, cqr); continue; } /* Rechain request on device request queue */ cqr->endclk = get_clock(); list_move_tail(&cqr->list, &flush_queue); } spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* Now call the callback function of flushed requests */ list_for_each_safe(l, n, &flush_queue) { cqr = list_entry(l, struct dasd_ccw_req, list); if (cqr->callback != NULL) (cqr->callback)(cqr, cqr->callback_data); }}/* * Acquire the device lock and process queues for the device. */static voiddasd_tasklet(struct dasd_device * device){ struct list_head final_queue; struct list_head *l, *n; struct dasd_ccw_req *cqr; atomic_set (&device->tasklet_scheduled, 0); INIT_LIST_HEAD(&final_queue); spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Check expire time of first request on the ccw queue. */ __dasd_check_expire(device); /* Finish off requests on ccw queue */ __dasd_process_ccw_queue(device, &final_queue); spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* Now call the callback function of requests with final status */ list_for_each_safe(l, n, &final_queue) { cqr = list_entry(l, struct dasd_ccw_req, list); list_del(&cqr->list); if (cqr->callback != NULL) (cqr->callback)(cqr, cqr->callback_data); } spin_lock_irq(&device->request_queue_lock); spin_lock(get_ccwdev_lock(device->cdev)); /* Get new request from the block device request queue */ __dasd_process_blk_queue(device); /* Now check if the head of the ccw queue needs to be started. */ __dasd_start_head(device); spin_unlock(get_ccwdev_lock(device->cdev)); spin_unlock_irq(&device->request_queue_lock); dasd_put_device(device);}/* * Schedules a call to dasd_tasklet over the device tasklet. */voiddasd_schedule_bh(struct dasd_device * device){ /* Protect against rescheduling. */ if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) return; dasd_get_device(device); tasklet_hi_schedule(&device->tasklet);}/* * Queue a request to the head of the ccw_queue. Start the I/O if * possible. */voiddasd_add_request_head(struct dasd_ccw_req *req){ struct dasd_device *device; unsigned long flags; device = req->device; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); req->status = DASD_CQR_QUEUED; req->device = device; list_add(&req->list, &device->ccw_queue); /* let the bh start the request to keep them in order */ dasd_schedule_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);}/* * Queue a request to the tail of the ccw_queue. Start the I/O if * possible. */voiddasd_add_request_tail(struct dasd_ccw_req *req){ struct dasd_device *device; unsigned long flags; device = req->device; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); req->status = DASD_CQR_QUEUED; req->device = device; list_add_tail(&req->list, &device->ccw_queue); /* let the bh start the request to keep them in order */ dasd_schedule_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);}/* * Wakeup callback. */static voiddasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data){ wake_up((wait_queue_head_t *) data);}static inline int_wait_for_wakeup(struct dasd_ccw_req *cqr){ struct dasd_device *device; int rc; device = cqr->device; spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = cqr->status == DASD_CQR_DONE || cqr->status == DASD_CQR_FAILED; spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc;}/* * Attempts to start a special ccw queue and waits for its completion. */intdasd_sleep_on(struct dasd_ccw_req * cqr){ wait_queue_head_t wait_q; struct dasd_device *device; int rc; device = cqr->device; spin_lock_irq(get_ccwdev_lock(device->cdev)); init_waitqueue_head (&wait_q); cqr->callback = dasd_wakeup_cb; cqr->callback_data = (void *) &wait_q; cqr->status = DASD_CQR_QUEUED; list_add_tail(&cqr->list, &device->ccw_queue); /* let the bh start the request to keep them in order */ dasd_schedule_bh(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); wait_event(wait_q, _wait_for_wakeup(cqr)); /* Request status is either done or failed. */ rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; return rc;}/* * Attempts to start a special ccw queue and wait interruptible * for its completion. */intdasd_sleep_on_interruptible(struct dasd_ccw_req * cqr){ wait_queue_head_t wait_q; struct dasd_device *device; int rc, finished; device = cqr->device; spin_lock_irq(get_ccwdev_lock(device->cdev)); init_waitqueue_head (&wait_q); cqr->callback = dasd_wakeup_cb; cqr->callback_data = (void *) &wait_q; cqr->status = DASD_CQR_QUEUED; list_add_tail(&cqr->list, &device->ccw_queue); /* let the bh start the request to keep them in order */ dasd_schedule_bh(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); finished = 0; while (!finished) { rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); if (rc != -ERESTARTSYS) { /* Request status is either done or failed. */ rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; break; } spin_lock_irq(get_ccwdev_lock(device->cdev)); if (cqr->status == DASD_CQR_IN_IO && device->discipline->term_IO(cqr) == 0) { list_del(&cqr->list); finished = 1; } spin_unlock_irq(get_ccwdev_lock(device->cdev)); } return rc;}/* * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock * for eckd devices) the currently running request has to be terminated * and be put back to status queued, before the special request is added * to the head of the queue. Then the special request is waited on normally. */static inline int_dasd_term_running_cqr(struct dasd_device *device){ struct dasd_ccw_req *cqr; int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); rc = device->discipline->term_IO(cqr); if (rc == 0) { /* termination successful */ cqr->status = DASD_CQR_QUEUED; cqr->startclk = cqr->stopclk = 0; cqr->starttime = 0; } return rc;}intdasd_sleep_on_immediatly(struct dasd_ccw_req * cqr){ wait_queue_head_t wait_q; struct dasd_device *device; int rc; device = cqr->device; spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = _dasd_term_running_cqr(device); if (rc) { spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } init_waitqueue_head (&wait_q); cqr->callback = dasd_wakeup_cb; cqr->callback_data = (void *) &wait_q; cqr->status = DASD_CQR_QUEUED; list_add(&cqr->list, &device->ccw_queue); /* let the bh start the request to keep them in order */ dasd_schedule_bh(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); wait_event(wait_q, _wait_for_wakeup(cqr)); /* Request status is either done or failed. */ rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; return rc;}/* * Cancels a request that was started with dasd_sleep_on_req. * This is useful to timeout requests. The request will be * terminated if it is currently in i/o. * Returns 1 if the request has been terminated. */intdasd_cancel_req(struct dasd_ccw_req *cqr){ struct dasd_device *device = cqr->device; unsigned long flags; int rc; rc = 0; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); switch (cqr->status) { case DASD_CQR_QUEUED: /* request was not started - just set to failed */ cqr->status = DASD_CQR_FAILED; break; case DASD_CQR_IN_IO: /* request in IO - terminate IO and release again */ if (device->discipline->term_IO(cqr) != 0) /* what to do if unable to terminate ??????
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -