📄 chsc.c
字号:
if (chpid < 0) CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", __FUNCTION__); else s390_set_chpid_offline(chpid); break; case 2: /* i/o resource accessibiliy */ CIO_CRW_EVENT(4, "chsc_process_crw: " "channel subsystem reports some I/O " "devices may have become accessible\n"); pr_debug("Data received after sei: \n"); pr_debug("Validity flags: %x\n", sei_area->vf); /* allocate a new channel path structure, if needed */ status = get_chp_status(sei_area->rsid); if (status < 0) new_channel_path(sei_area->rsid); else if (!status) return 0; if ((sei_area->vf & 0x80) == 0) { pr_debug("chpid: %x\n", sei_area->rsid); ret = s390_process_res_acc(sei_area->rsid, 0, 0); } else if ((sei_area->vf & 0xc0) == 0x80) { pr_debug("chpid: %x link addr: %x\n", sei_area->rsid, sei_area->fla); ret = s390_process_res_acc(sei_area->rsid, sei_area->fla, 0xff00); } else if ((sei_area->vf & 0xc0) == 0xc0) { pr_debug("chpid: %x full link addr: %x\n", sei_area->rsid, sei_area->fla); ret = s390_process_res_acc(sei_area->rsid, sei_area->fla, 0xffff); } pr_debug("\n"); break; default: /* other stuff */ CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", sei_area->cc); break; } } while (sei_area->flags & 0x80); return ret;}static intchp_add(int chpid){ struct subchannel *sch; int irq, ret, rc; char dbf_txt[15]; if (!get_chp_status(chpid)) return 0; /* no need to do the rest */ sprintf(dbf_txt, "cadd%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); rc = 0; for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { int i; sch = get_subchannel_by_schid(irq); if (!sch) { struct schib schib; if (stsch(irq, &schib)) { /* We're through */ if (need_rescan) rc = -EAGAIN; break; } if (need_rescan) { rc = -EAGAIN; continue; } /* Put it on the slow path. */ ret = css_enqueue_subchannel_slow(irq); if (ret) { css_clear_subchannel_slow_list(); need_rescan = 1; } rc = -EAGAIN; continue; } spin_lock(&sch->lock); for (i=0; i<8; i++) if (sch->schib.pmcw.chpid[i] == chpid) { if (stsch(sch->irq, &sch->schib) != 0) { /* Endgame. */ spin_unlock(&sch->lock); return rc; } break; } if (i==8) { spin_unlock(&sch->lock); return rc; } sch->lpm = ((sch->schib.pmcw.pim & sch->schib.pmcw.pam & sch->schib.pmcw.pom) | 0x80 >> i) & sch->opm; if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); spin_unlock(&sch->lock); put_device(&sch->dev); } return rc;}/* * Handling of crw machine checks with channel path source. */intchp_process_crw(int chpid, int on){ if (on == 0) { /* Path has gone. We use the link incident routine.*/ s390_set_chpid_offline(chpid); return 0; /* De-register is async anyway. */ } /* * Path has come. Allocate a new channel path structure, * if needed. */ if (get_chp_status(chpid) < 0) new_channel_path(chpid); /* Avoid the extra overhead in process_rec_acc. */ return chp_add(chpid);}static inline int__check_for_io_and_kill(struct subchannel *sch, int index){ int cc; if (!device_is_online(sch)) /* cio could be doing I/O. */ return 0; cc = stsch(sch->irq, &sch->schib); if (cc) return 0; if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { device_set_waiting(sch); return 1; } return 0;}static inline void__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on){ int chp, old_lpm; unsigned long flags; if (!sch->ssd_info.valid) return; spin_lock_irqsave(&sch->lock, flags); old_lpm = sch->lpm; for (chp = 0; chp < 8; chp++) { if (sch->ssd_info.chpid[chp] != chpid) continue; if (on) { sch->opm |= (0x80 >> chp); sch->lpm |= (0x80 >> chp); if (!old_lpm) device_trigger_reprobe(sch); else if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); } else { sch->opm &= ~(0x80 >> chp); sch->lpm &= ~(0x80 >> chp); /* * Give running I/O a grace period in which it * can successfully terminate, even using the * just varied off path. Then kill it. */ if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { if (css_enqueue_subchannel_slow(sch->irq)) { css_clear_subchannel_slow_list(); need_rescan = 1; } } else if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); } break; } spin_unlock_irqrestore(&sch->lock, flags);}static ints390_subchannel_vary_chpid_off(struct device *dev, void *data){ struct subchannel *sch; __u8 *chpid; sch = to_subchannel(dev); chpid = data; __s390_subchannel_vary_chpid(sch, *chpid, 0); return 0;}static ints390_subchannel_vary_chpid_on(struct device *dev, void *data){ struct subchannel *sch; __u8 *chpid; sch = to_subchannel(dev); chpid = data; __s390_subchannel_vary_chpid(sch, *chpid, 1); return 0;}/* * Function: s390_vary_chpid * Varies the specified chpid online or offline */static ints390_vary_chpid( __u8 chpid, int on){ char dbf_text[15]; int status, irq, ret; struct subchannel *sch; sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); CIO_TRACE_EVENT( 2, dbf_text); status = get_chp_status(chpid); if (status < 0) { printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); return -EINVAL; } if (!on && !status) { printk(KERN_ERR "chpid %x is already offline\n", chpid); return -EINVAL; } set_chp_logically_online(chpid, on); /* * Redo PathVerification on the devices the chpid connects to */ bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? s390_subchannel_vary_chpid_on : s390_subchannel_vary_chpid_off); if (!on) goto out; /* Scan for new devices on varied on path. */ for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { struct schib schib; if (need_rescan) break; sch = get_subchannel_by_schid(irq); if (sch) { put_device(&sch->dev); continue; } if (stsch(irq, &schib)) /* We're through */ break; /* Put it on the slow path. */ ret = css_enqueue_subchannel_slow(irq); if (ret) { css_clear_subchannel_slow_list(); need_rescan = 1; } }out: if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); return 0;}/* * Files for the channel path entries. */static ssize_tchp_status_show(struct device *dev, struct device_attribute *attr, char *buf){ struct channel_path *chp = container_of(dev, struct channel_path, dev); if (!chp) return 0; return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : sprintf(buf, "offline\n"));}static ssize_tchp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count){ struct channel_path *cp = container_of(dev, struct channel_path, dev); char cmd[10]; int num_args; int error; num_args = sscanf(buf, "%5s", cmd); if (!num_args) return count; if (!strnicmp(cmd, "on", 2)) error = s390_vary_chpid(cp->id, 1); else if (!strnicmp(cmd, "off", 3)) error = s390_vary_chpid(cp->id, 0); else error = -EINVAL; return error < 0 ? error : count;}static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);static ssize_tchp_type_show(struct device *dev, struct device_attribute *attr, char *buf){ struct channel_path *chp = container_of(dev, struct channel_path, dev); if (!chp) return 0; return sprintf(buf, "%x\n", chp->desc.desc);}static DEVICE_ATTR(type, 0444, chp_type_show, NULL);static struct attribute * chp_attrs[] = { &dev_attr_status.attr, &dev_attr_type.attr, NULL,};static struct attribute_group chp_attr_group = { .attrs = chp_attrs,};static voidchp_release(struct device *dev){ struct channel_path *cp; cp = container_of(dev, struct channel_path, dev); kfree(cp);}static intchsc_determine_channel_path_description(int chpid, struct channel_path_desc *desc){ int ccode, ret; struct { struct chsc_header request; u32 : 24; u32 first_chpid : 8; u32 : 24; u32 last_chpid : 8; u32 zeroes1; struct chsc_header response; u32 zeroes2; struct channel_path_desc desc; } *scpd_area; scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area) return -ENOMEM; scpd_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x0002, }; scpd_area->first_chpid = chpid; scpd_area->last_chpid = chpid; ccode = chsc(scpd_area); if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out; } switch (scpd_area->response.code) { case 0x0001: /* Success. */ memcpy(desc, &scpd_area->desc, sizeof(struct channel_path_desc)); ret = 0; break; case 0x0003: /* Invalid block. */ case 0x0007: /* Invalid format. */ case 0x0008: /* Other invalid block. */ CIO_CRW_EVENT(2, "Error in chsc request block!\n"); ret = -EINVAL; break; case 0x0004: /* Command not provided in model. */ CIO_CRW_EVENT(2, "Model does not provide scpd\n"); ret = -EOPNOTSUPP; break; default: CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", scpd_area->response.code); ret = -EIO; }out: free_page((unsigned long)scpd_area); return ret;}/* * Entries for chpids on the system bus. * This replaces /proc/chpids. */static intnew_channel_path(int chpid){ struct channel_path *chp; int ret; chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL); if (!chp) return -ENOMEM; memset(chp, 0, sizeof(struct channel_path)); /* fill in status, etc. */ chp->id = chpid; chp->state = 1; chp->dev = (struct device) { .parent = &css_bus_device, .release = chp_release, }; snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); /* Obtain channel path description and fill it in. */ ret = chsc_determine_channel_path_description(chpid, &chp->desc); if (ret) goto out_free; /* make it known to the system */ ret = device_register(&chp->dev); if (ret) { printk(KERN_WARNING "%s: could not register %02x\n", __func__, chpid); goto out_free; } ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); if (ret) { device_unregister(&chp->dev); goto out_free; } else chps[chpid] = chp; return ret;out_free: kfree(chp); return ret;}void *chsc_get_chp_desc(struct subchannel *sch, int chp_no){ struct channel_path *chp; struct channel_path_desc *desc; chp = chps[sch->schib.pmcw.chpid[chp_no]]; if (!chp) return NULL; desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); if (!desc) return NULL; memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); return desc;}static int __initchsc_alloc_sei_area(void){ sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sei_page) printk(KERN_WARNING"Can't allocate page for processing of " \ "chsc machine checks!\n"); return (sei_page ? 0 : -ENOMEM);}subsys_initcall(chsc_alloc_sei_area);struct css_general_char css_general_characteristics;struct css_chsc_char css_chsc_characteristics;int __initchsc_determine_css_characteristics(void){ int result; struct { struct chsc_header request; u32 reserved1; u32 reserved2; u32 reserved3; struct chsc_header response; u32 reserved4; u32 general_char[510]; u32 chsc_char[518]; } *scsc_area; scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scsc_area) { printk(KERN_WARNING"cio: Was not able to determine available" \ "CHSCs due to no memory.\n"); return -ENOMEM; } scsc_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x0010, }; result = chsc(scsc_area); if (result) { printk(KERN_WARNING"cio: Was not able to determine " \ "available CHSCs, cc=%i.\n", result); result = -EIO; goto exit; } if (scsc_area->response.code != 1) { printk(KERN_WARNING"cio: Was not able to determine " \ "available CHSCs.\n"); result = -EIO; goto exit; } memcpy(&css_general_characteristics, scsc_area->general_char, sizeof(css_general_characteristics)); memcpy(&css_chsc_characteristics, scsc_area->chsc_char, sizeof(css_chsc_characteristics));exit: free_page ((unsigned long) scsc_area); return result;}EXPORT_SYMBOL_GPL(css_general_characteristics);EXPORT_SYMBOL_GPL(css_chsc_characteristics);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -