📄 cio.c
字号:
sch->lock = cio_get_console_lock(); } else { err = cio_create_sch_lock(sch); if (err) goto out; } mutex_init(&sch->reg_mutex); /* Set a name for the subchannel */ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, schid.sch_no); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. * If stsch gets an exception, it means the current subchannel set * is not valid. */ ccode = stsch_err (schid, &sch->schib); if (ccode) { err = (ccode == 3) ? -ENXIO : ccode; goto out; } /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; /* * ... just being curious we check for non I/O subchannels */ if (sch->st != 0) { CIO_DEBUG(KERN_INFO, 0, "cio: Subchannel 0.%x.%04x reports " "non-I/O subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ err = sch->st; goto out; } /* Initialization for io subchannels. */ if (!sch->schib.pmcw.dnv) { /* io subchannel but device number is invalid. */ err = -ENODEV; goto out; } /* Devno is valid. */ if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ CIO_MSG_EVENT(4, "Blacklisted device detected " "at devno %04X, subchannel set %x\n", sch->schib.pmcw.dev, sch->schid.ssid); err = -ENODEV; goto out; } if (cio_is_console(sch->schid)) sch->opm = 0xff; else sch->opm = chp_get_sch_opm(sch); sch->lpm = sch->schib.pmcw.pam & sch->opm; CIO_DEBUG(KERN_INFO, 0, "cio: Detected device %04x on subchannel 0.%x.%04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", sch->schib.pmcw.dev, sch->schid.ssid, sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* * We now have to initially ... * ... set "interruption subclass" * ... enable "concurrent sense" * ... enable "multipath mode" if more than one * CHPID is available. This is done regardless * whether multiple paths are available for us. */ sch->schib.pmcw.isc = 3; /* could be smth. else */ sch->schib.pmcw.csense = 1; /* concurrent sense */ sch->schib.pmcw.ena = 0; if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; /* multipath mode */ /* clean up possible residual cmf stuff */ sch->schib.pmcw.mme = 0; sch->schib.pmcw.mbfc = 0; sch->schib.pmcw.mbi = 0; sch->schib.mba = 0; return 0;out: if (!cio_is_console(schid)) kfree(sch->lock); sch->lock = NULL; return err;}/* * do_IRQ() handles all normal I/O device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). * */voiddo_IRQ (struct pt_regs *regs){ struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; struct pt_regs *old_regs; old_regs = set_irq_regs(regs); irq_enter(); asm volatile ("mc 0,0"); if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) /** * Make sure that the i/o interrupt did not "overtake" * the last HZ timer interrupt. */ account_ticks(S390_lowcore.int_clock); /* * Get interrupt information from lowcore */ tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; irb = (struct irb *) __LC_IRB; do { kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; /* * Non I/O-subchannel thin interrupts are processed differently */ if (tpi_info->adapter_IO == 1 && tpi_info->int_type == IO_INTERRUPT_TYPE) { do_adapter_IO(); continue; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (sch) spin_lock(sch->lock); /* Store interrupt response block to lowcore. */ if (tsch (tpi_info->schid, irb) == 0 && sch) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); /* Call interrupt handler if there is one. */ if (sch->driver && sch->driver->irq) sch->driver->irq(&sch->dev); } if (sch) spin_unlock(sch->lock); /* * Are more interrupts pending? * If so, the tpi instruction will update the lowcore * to hold the info for the next interrupt. * We don't do this for VM because a tpi drops the cpu * out of the sie which costs more cycles than it saves. */ } while (!MACHINE_IS_VM && tpi (NULL) != 0); irq_exit(); set_irq_regs(old_regs);}#ifdef CONFIG_CCW_CONSOLEstatic struct subchannel console_subchannel;static int console_subchannel_in_use;/* * busy wait for the next interrupt on the console */voidwait_cons_dev (void){ unsigned long cr6 __attribute__ ((aligned (8))); unsigned long save_cr6 __attribute__ ((aligned (8))); /* * before entering the spinlock we may already have * processed the interrupt on a different CPU... */ if (!console_subchannel_in_use) return; /* disable all but isc 7 (console device) */ __ctl_store (save_cr6, 6, 6); cr6 = 0x01000000; __ctl_load (cr6, 6, 6); do { spin_unlock(console_subchannel.lock); if (!cio_tpi()) cpu_relax(); spin_lock(console_subchannel.lock); } while (console_subchannel.schib.scsw.actl != 0); /* * restore previous isc value */ __ctl_load (save_cr6, 6, 6);}static intcio_test_for_console(struct subchannel_id schid, void *data){ if (stsch_err(schid, &console_subchannel.schib) != 0) return -ENXIO; if (console_subchannel.schib.pmcw.dnv && console_subchannel.schib.pmcw.dev == console_devno) { console_irq = schid.sch_no; return 1; /* found */ } return 0;}static intcio_get_console_sch_no(void){ struct subchannel_id schid; init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; if (stsch(schid, &console_subchannel.schib) != 0 || !console_subchannel.schib.pmcw.dnv) return -1; console_devno = console_subchannel.schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for_each_subchannel(cio_test_for_console, NULL); if (console_irq == -1) return -1; } else { /* unlike in 2.4, we cannot autoprobe here, since * the channel subsystem is not fully initialized. * With some luck, the HWC console can take over */ printk(KERN_WARNING "cio: No ccw console found!\n"); return -1; } return console_irq;}struct subchannel *cio_probe_console(void){ int sch_no, ret; struct subchannel_id schid; if (xchg(&console_subchannel_in_use, 1) != 0) return ERR_PTR(-EBUSY); sch_no = cio_get_console_sch_no(); if (sch_no == -1) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); init_subchannel_id(&schid); schid.sch_no = sch_no; ret = cio_validate_subchannel(&console_subchannel, schid); if (ret) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); } /* * enable console I/O-interrupt subclass 7 */ ctl_set_bit(6, 24); console_subchannel.schib.pmcw.isc = 7; console_subchannel.schib.pmcw.intparm = (__u32)(unsigned long)&console_subchannel; ret = cio_modify(&console_subchannel); if (ret) { console_subchannel_in_use = 0; return ERR_PTR(ret); } return &console_subchannel;}voidcio_release_console(void){ console_subchannel.schib.pmcw.intparm = 0; cio_modify(&console_subchannel); ctl_clear_bit(6, 24); console_subchannel_in_use = 0;}/* Bah... hack to catch console special sausages. */intcio_is_console(struct subchannel_id schid){ if (!console_subchannel_in_use) return 0; return schid_equal(&schid, &console_subchannel.schid);}struct subchannel *cio_get_console_subchannel(void){ if (!console_subchannel_in_use) return NULL; return &console_subchannel;}#endifstatic int__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib){ int retry, cc; cc = 0; for (retry=0;retry<3;retry++) { schib->pmcw.ena = 0; cc = msch(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); stsch(schid, schib); if (!schib->pmcw.ena) return 0; } return -EBUSY; /* uhm... */}/* we can't use the normal udelay here, since it enables external interrupts */static void udelay_reset(unsigned long usecs){ uint64_t start_cc, end_cc; asm volatile ("STCK %0" : "=m" (start_cc)); do { cpu_relax(); asm volatile ("STCK %0" : "=m" (end_cc)); } while (((end_cc - start_cc)/4096) < usecs);}static int__clear_subchannel_easy(struct subchannel_id schid){ int retry; if (csch(schid)) return -ENODEV; for (retry=0;retry<20;retry++) { struct tpi_info ti; if (tpi(&ti)) { tsch(ti.schid, (struct irb *)__LC_IRB); if (schid_equal(&ti.schid, &schid)) return 0; } udelay_reset(100); } return -EBUSY;}static int pgm_check_occured;static void cio_reset_pgm_check_handler(void){ pgm_check_occured = 1;}static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr){ int rc; pgm_check_occured = 0; s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; rc = stsch(schid, addr); s390_base_pgm_handler_fn = NULL; /* The program check handler could have changed pgm_check_occured. */ barrier(); if (pgm_check_occured) return -EIO; else return rc;}static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data){ struct schib schib; if (stsch_reset(schid, &schib)) return -ENXIO; if (!schib.pmcw.ena) return 0; switch(__disable_subchannel_easy(schid, &schib)) { case 0: case -ENODEV: break; default: /* -EBUSY */ if (__clear_subchannel_easy(schid)) break; /* give up... */ stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } return 0;}static atomic_t chpid_reset_count;static void s390_reset_chpids_mcck_handler(void){ struct crw crw; struct mci *mci; /* Check for pending channel report word. */ mci = (struct mci *)&S390_lowcore.mcck_interruption_code; if (!mci->cp) return; /* Process channel report words. */ while (stcrw(&crw) == 0) { /* Check for responses to RCHP. */ if (crw.slct && crw.rsc == CRW_RSC_CPATH) atomic_dec(&chpid_reset_count); }}#define RCHP_TIMEOUT (30 * USEC_PER_SEC)static void css_reset(void){ int i, ret; unsigned long long timeout; struct chp_id chpid; /* Reset subchannels. */ for_each_subchannel(__shutdown_subchannel_easy, NULL); /* Reset channel paths. */ s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; /* Enable channel report machine checks. */ __ctl_set_bit(14, 28); /* Temporarily reenable machine checks. */ local_mcck_enable(); chp_id_init(&chpid); for (i = 0; i <= __MAX_CHPID; i++) { chpid.id = i; ret = rchp(chpid); if ((ret == 0) || (ret == 2)) /* * rchp either succeeded, or another rchp is already * in progress. In either case, we'll get a crw. */ atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ timeout = get_clock() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { if (get_clock() > timeout) break; cpu_relax(); } /* Disable machine checks again. */ local_mcck_disable(); /* Disable channel report machine checks. */ __ctl_clear_bit(14, 28); s390_base_mcck_handler_fn = NULL;}static struct reset_call css_reset_call = { .fn = css_reset,};static int __init init_css_reset_call(void){ atomic_set(&chpid_reset_count, 0); register_reset_call(&css_reset_call); return 0;}arch_initcall(init_css_reset_call);struct sch_match_id { struct subchannel_id schid; struct ccw_dev_id devid; int rc;};static int __reipl_subchannel_match(struct subchannel_id schid, void *data){ struct schib schib; struct sch_match_id *match_id = data; if (stsch_reset(schid, &schib)) return -ENXIO; if (schib.pmcw.dnv && (schib.pmcw.dev == match_id->devid.devno) && (schid.ssid == match_id->devid.ssid)) { match_id->schid = schid; match_id->rc = 0; return 1; } return 0;}static int reipl_find_schid(struct ccw_dev_id *devid, struct subchannel_id *schid){ struct sch_match_id match_id; match_id.devid = *devid; match_id.rc = -ENODEV; for_each_subchannel(__reipl_subchannel_match, &match_id); if (match_id.rc == 0) *schid = match_id.schid; return match_id.rc;}extern void do_reipl_asm(__u32 schid);/* Make sure all subchannels are quiet before we re-ipl an lpar. */void reipl_ccw_dev(struct ccw_dev_id *devid){ struct subchannel_id schid; s390_reset_system(); if (reipl_find_schid(devid, &schid) != 0) panic("IPL Device not found\n"); do_reipl_asm(*((__u32*)&schid));}int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo){ struct subchannel_id schid; struct schib schib; schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; if (!schid.one) return -ENODEV; if (stsch(schid, &schib)) return -ENODEV; if (!schib.pmcw.dnv) return -ENODEV; iplinfo->devno = schib.pmcw.dev; iplinfo->is_qdio = schib.pmcw.qf; return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -