📄 device_fsm.c
字号:
}static voidccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event){ struct irb *irb; irb = (struct irb *) __LC_IRB; /* Accumulate status. We don't do basic sense. */ ccw_device_accumulate_irb(cdev, irb); /* Try to start delayed device verification. */ ccw_device_online_verify(cdev, 0); /* Note: Don't call handler for cio initiated clear! */}static voidccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event){ struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); ccw_device_set_timeout(cdev, 0); /* OK, i/o is dead now. Call interrupt handler. */ cdev->private->state = DEV_STATE_ONLINE; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-ETIMEDOUT)); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, ccw_device_nopath_notify, (void *)cdev); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else if (cdev->private->flags.doverify) /* Start delayed path verification. */ ccw_device_online_verify(cdev, 0);}static voidccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event){ int ret; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); return; } if (ret == -ENODEV) { struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, ccw_device_nopath_notify, (void *)cdev); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else dev_fsm_event(cdev, DEV_EVENT_NOTOPER); return; } //FIXME: Can we get here? cdev->private->state = DEV_STATE_ONLINE; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-ETIMEDOUT));}static voidccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event){ struct irb *irb; struct subchannel *sch; irb = (struct irb *) __LC_IRB; /* * Accumulate status and find out if a basic sense is needed. * This is fine since we have already adapted the lpm. */ ccw_device_accumulate_irb(cdev, irb); if (cdev->private->flags.dosense) { if (ccw_device_do_sense(cdev, irb) == 0) { cdev->private->state = DEV_STATE_W4SENSE; } return; } /* Iff device is idle, reset timeout. */ sch = to_subchannel(cdev->dev.parent); if (!stsch(sch->irq, &sch->schib)) if (sch->schib.scsw.actl == 0) ccw_device_set_timeout(cdev, 0); /* Call the handler. */ ccw_device_call_handler(cdev); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, ccw_device_nopath_notify, (void *)cdev); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else if (cdev->private->flags.doverify) ccw_device_online_verify(cdev, 0);}static voidccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event){ int ret; struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); ccw_device_set_timeout(cdev, 0); ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); cdev->private->state = DEV_STATE_TIMEOUT_KILL; return; } if (ret == -ENODEV) { if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, ccw_device_nopath_notify, (void *)cdev); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else dev_fsm_event(cdev, DEV_EVENT_NOTOPER); return; } if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-ETIMEDOUT)); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, ccw_device_nopath_notify, (void *)cdev); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else if (cdev->private->flags.doverify) /* Start delayed path verification. */ ccw_device_online_verify(cdev, 0);}static voidccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event){ /* When the I/O has terminated, we have to start verification. */ if (cdev->private->options.pgroup) cdev->private->flags.doverify = 1;}static voidccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event){ struct irb *irb; switch (dev_event) { case DEV_EVENT_INTERRUPT: irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ if ((irb->scsw.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && (!irb->scsw.cc)) /* FIXME: we should restart stlck here, but this * is extremely unlikely ... */ goto out_wakeup; ccw_device_accumulate_irb(cdev, irb); /* We don't care about basic sense etc. */ break; default: /* timeout */ break; }out_wakeup: wake_up(&cdev->private->wait_q);}static voidccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event){ struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) /* Couldn't enable the subchannel for i/o. Sick device. */ return; /* After 60s the device recognition is considered to have failed. */ ccw_device_set_timeout(cdev, 60*HZ); cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; ccw_device_sense_id_start(cdev);}voiddevice_trigger_reprobe(struct subchannel *sch){ struct ccw_device *cdev; if (!sch->dev.driver_data) return; cdev = sch->dev.driver_data; if (cdev->private->state != DEV_STATE_DISCONNECTED) return; /* Update some values. */ if (stsch(sch->irq, &sch->schib)) return; /* * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ */ sch->lpm = sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom & sch->opm; /* Re-set some bits in the pmcw that were lost. */ sch->schib.pmcw.isc = 3; sch->schib.pmcw.csense = 1; sch->schib.pmcw.ena = 0; if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; /* We should also udate ssd info, but this has to wait. */ ccw_device_start_id(cdev, 0);}static voidccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event){ struct subchannel *sch; sch = to_subchannel(cdev->dev.parent); /* * An interrupt in state offline means a previous disable was not * successful. Try again. */ cio_disable_subchannel(sch);}static voidccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event){ retry_set_schib(cdev); cdev->private->state = DEV_STATE_ONLINE; dev_fsm_event(cdev, dev_event);}static voidccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event){ ccw_device_set_timeout(cdev, 0); if (dev_event == DEV_EVENT_NOTOPER) cdev->private->state = DEV_STATE_NOT_OPER; else cdev->private->state = DEV_STATE_OFFLINE; wake_up(&cdev->private->wait_q);}static voidccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event){ int ret; ret = ccw_device_cancel_halt_clear(cdev); switch (ret) { case 0: cdev->private->state = DEV_STATE_OFFLINE; wake_up(&cdev->private->wait_q); break; case -ENODEV: cdev->private->state = DEV_STATE_NOT_OPER; wake_up(&cdev->private->wait_q); break; default: ccw_device_set_timeout(cdev, HZ/10); }}/* * No operation action. This is used e.g. to ignore a timeout event in * state offline. */static voidccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event){}/* * Bug operation action. */static voidccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event){ printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n", cdev->private->state, dev_event); BUG();}/* * device statemachine */fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_STATE_NOT_OPER] = { [DEV_EVENT_NOTOPER] = ccw_device_nop, [DEV_EVENT_INTERRUPT] = ccw_device_bug, [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_SENSE_PGID] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_SENSE_ID] = { [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_OFFLINE] = { [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_VERIFY] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_ONLINE] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_irq, [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, [DEV_EVENT_VERIFY] = ccw_device_online_verify, }, [DEV_STATE_W4SENSE] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_online_verify, }, [DEV_STATE_DISBAND_PGID] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_BOXED] = { [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, [DEV_EVENT_VERIFY] = ccw_device_nop, }, /* states to wait for i/o completion before doing something */ [DEV_STATE_CLEAR_VERIFY] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_TIMEOUT_KILL] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME }, [DEV_STATE_WAIT4IO] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, }, [DEV_STATE_QUIESCE] = { [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, }, /* special states for devices gone not operational */ [DEV_STATE_DISCONNECTED] = { [DEV_EVENT_NOTOPER] = ccw_device_nop, [DEV_EVENT_INTERRUPT] = ccw_device_start_id, [DEV_EVENT_TIMEOUT] = ccw_device_bug, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_DISCONNECTED_SENSE_ID] = { [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, [DEV_EVENT_VERIFY] = ccw_device_nop, }, [DEV_STATE_CMFCHANGE] = { [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, },};/* * io_subchannel_irq is called for "real" interrupts or for status * pending conditions on msch. */voidio_subchannel_irq (struct device *pdev){ struct ccw_device *cdev; cdev = to_subchannel(pdev)->dev.driver_data; CIO_TRACE_EVENT (3, "IRQ"); CIO_TRACE_EVENT (3, pdev->bus_id); if (cdev) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);}EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -