📄 s390io.c
字号:
ioinfo[irq]->devstat.devno ); s390_displayhex( buffer, ioinfo[irq]->irq_desc.dev_id->ii.sense.data, ioinfo[irq]->irq_desc.dev_id->rescnt); } /* endif */ }#endif if (cio_debug_initialized) { stsch(irq, &(ioinfo[irq]->schib) ); sprintf( buffer, "s390_start_IO(%04X) - irb for " "device %04X, after status pending\n", irq, ioinfo[irq]->devstat.devno ); s390_displayhex2( buffer, &(ioinfo[irq]->devstat.ii.irb) , sizeof(irb_t), 2); sprintf( buffer, "s390_start_IO(%04X) - schib for " "device %04X, after status pending\n", irq, ioinfo[irq]->devstat.devno ); s390_displayhex2( buffer, &(ioinfo[irq]->schib) , sizeof(schib_t), 2); if (ioinfo[irq]->devstat.flag & DEVSTAT_FLAG_SENSE_AVAIL) { sprintf( buffer, "s390_start_IO(%04X) - sense " "data for " "device %04X, after status pending\n", irq, ioinfo[irq]->devstat.devno ); s390_displayhex2( buffer, ioinfo[irq]->irq_desc.dev_id->ii.sense.data, ioinfo[irq]->irq_desc.dev_id->rescnt, 2); } /* endif */ } } else { ret = -EIO; ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER; ioinfo[irq]->ui.flags.oper = 1; } /* endif */ break; case 2 : /* busy */ ret = -EBUSY; break; default: /* device/path not operational */ if ( flag & DOIO_VALID_LPM ) { ioinfo[irq]->opm &= ~lpm; } else { ioinfo[irq]->opm = 0; } /* endif */ if ( ioinfo[irq]->opm == 0 ) { ioinfo[irq]->ui.flags.oper = 0; ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER; } /* endif */ ret = -ENODEV; memcpy( ioinfo[irq]->irq_desc.dev_id, &(ioinfo[irq]->devstat), sizeof( devstat_t) );#ifdef CONFIG_DEBUG_IO stsch(irq, &(ioinfo[irq]->schib) ); sprintf( buffer, "s390_start_IO(%04X) - schib for " "device %04X, after 'not oper' status\n", irq, ioinfo[irq]->devstat.devno ); s390_displayhex( buffer, &(ioinfo[irq]->schib), sizeof(schib_t));#endif if (cio_debug_initialized) { stsch(irq, &(ioinfo[irq]->schib) ); sprintf( buffer, "s390_start_IO(%04X) - schib for " "device %04X, after 'not oper' status\n", irq, ioinfo[irq]->devstat.devno ); s390_displayhex2( buffer, &(ioinfo[irq]->schib), sizeof(schib_t), 2); } break; } /* endswitch */ if ( flag & DOIO_WAIT_FOR_INTERRUPT) { disable_cpu_sync_isc( irq ); } /* endif */ if ( flag & DOIO_DONT_CALL_INTHDLR ) { ioinfo[irq]->ui.flags.repnone = 0; } /* endif */ return( ret);}int do_IO( int irq, /* IRQ */ ccw1_t *cpa, /* channel program address */ unsigned long user_intparm, /* interruption parameter */ __u8 lpm, /* logical path mask */ unsigned long flag) /* flags : see above */{ int ret = 0; char dbf_txt[15]; SANITY_CHECK(irq); /* handler registered ? or free_irq() in process already ? */ if ( !ioinfo[irq]->ui.flags.ready || ioinfo[irq]->ui.flags.unready ) { return( -ENODEV ); } /* endif */ if (cio_debug_initialized) { sprintf(dbf_txt, "doIO%x", irq); debug_text_event(cio_debug_trace_id, 4, dbf_txt); } /* * Note: We ignore the device operational status - if not operational, * the SSCH will lead to an -ENODEV condition ... */ if ( !ioinfo[irq]->ui.flags.busy ) /* last I/O completed ? */ { ret = s390_start_IO( irq, cpa, user_intparm, lpm, flag); } else if ( ioinfo[irq]->ui.flags.fast ) { /* * If primary status was received and ending status is missing, * the device driver won't be notified on the ending status * if early (fast) interrupt notification was requested. * Therefore we have to queue the next incoming request. If * halt_IO() is issued while there is a request queued, a HSCH * needs to be issued and the queued request must be deleted * but its intparm must be returned (see halt_IO() processing) */ if ( ioinfo[irq]->ui.flags.w4final && !ioinfo[irq]->ui.flags.doio_q ) { ioinfo[irq]->qflag = flag; ioinfo[irq]->qcpa = cpa; ioinfo[irq]->qintparm = user_intparm; ioinfo[irq]->qlpm = lpm; } else { ret = -EBUSY; } /* endif */ } else { ret = -EBUSY; } /* endif */ return( ret );}/* * resume suspended I/O operation */int resume_IO( int irq){ int ret = 0; char dbf_txt[15]; SANITY_CHECK(irq); if (cio_debug_initialized) { sprintf(dbf_txt, "resIO%x", irq); debug_text_event(cio_debug_trace_id, 4, dbf_txt); } /* * We allow for 'resume' requests only for active I/O operations */ if ( ioinfo[irq]->ui.flags.busy ) { int ccode; ccode = rsch( irq); if (cio_debug_initialized) { sprintf(dbf_txt, "ccode:%d",ccode); debug_text_event(cio_debug_trace_id, 4, dbf_txt); } switch (ccode) { case 0 : break; case 1 : s390_process_IRQ( irq ); ret = -EBUSY; break; case 2 : ret = -EINVAL; break; case 3 : /* * useless to wait for request completion * as device is no longer operational ! */ ioinfo[irq]->ui.flags.oper = 0; ioinfo[irq]->ui.flags.busy = 0; ret = -ENODEV; break; } /* endswitch */ } else { ret = -ENOTCONN; } /* endif */ return( ret);}/* * Note: The "intparm" parameter is not used by the halt_IO() function * itself, as no ORB is built for the HSCH instruction. However, * it allows the device interrupt handler to associate the upcoming * interrupt with the halt_IO() request. */int halt_IO( int irq, unsigned long user_intparm, unsigned long flag) /* possible DOIO_WAIT_FOR_INTERRUPT */{ int ret; int ccode; char dbf_txt[15]; SANITY_CHECK(irq); /* * we only allow for halt_IO if the device has an I/O handler associated */ if ( !ioinfo[irq]->ui.flags.ready ) { ret = -ENODEV; } /* * we ignore the halt_io() request if ending_status was received but * a SENSE operation is waiting for completion. */ else if ( ioinfo[irq]->ui.flags.w4sense ) { ret = 0; } else { if (cio_debug_initialized) { sprintf(dbf_txt, "haltIO%x", irq); debug_text_event(cio_debug_trace_id, 2, dbf_txt); } /* * If sync processing was requested we lock the sync ISC, * modify the device to present interrupts for this ISC only * and switch the CPU to handle this ISC + the console ISC * exclusively. */ if ( flag & DOIO_WAIT_FOR_INTERRUPT ) { ret = enable_cpu_sync_isc( irq); if ( ret ) { return( ret); } /* endif */ } /* endif */ /* * Issue "Halt subchannel" and process condition code */ ccode = hsch( irq ); if (cio_debug_initialized) { sprintf(dbf_txt, "ccode:%d",ccode); debug_text_event(cio_debug_trace_id, 2, dbf_txt); } switch ( ccode ) { case 0: ioinfo[irq]->ui.flags.haltio = 1; if ( !ioinfo[irq]->ui.flags.doio ) { ioinfo[irq]->ui.flags.busy = 1; ioinfo[irq]->u_intparm = user_intparm; ioinfo[irq]->devstat.cstat = 0; ioinfo[irq]->devstat.dstat = 0; ioinfo[irq]->devstat.lpum = 0; ioinfo[irq]->devstat.flag = DEVSTAT_HALT_FUNCTION; ioinfo[irq]->devstat.scnt = 0; } else { ioinfo[irq]->devstat.flag |= DEVSTAT_HALT_FUNCTION; } /* endif */ /* * If synchronous I/O processing is requested, we have * to wait for the corresponding interrupt to occur by * polling the interrupt condition. However, as multiple * interrupts may be outstanding, we must not just wait * for the first interrupt, but must poll until ours * pops up. */ if ( flag & DOIO_WAIT_FOR_INTERRUPT ) { int io_sub; __u32 io_parm; psw_t io_new_psw; int ccode; int ready = 0; struct _lowcore *lc = NULL; /* * We shouldn't perform a TPI loop, waiting for * an interrupt to occur, but should load a * WAIT PSW instead. Otherwise we may keep the * channel subsystem busy, not able to present * the interrupt. When our sync. interrupt * arrived we reset the I/O old PSW to its * original value. */ memcpy( &io_new_psw, &lc->io_new_psw, sizeof(psw_t)); ccode = iac(); switch (ccode) { case 0: // primary-space io_sync_wait.mask = _IO_PSW_MASK | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT; break; case 1: // secondary-space io_sync_wait.mask = _IO_PSW_MASK | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT; break; case 2: // access-register io_sync_wait.mask = _IO_PSW_MASK | _PSW_ACC_REG_MODE | _PSW_IO_WAIT; break; case 3: // home-space io_sync_wait.mask = _IO_PSW_MASK | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT; break; default: panic( "halt_IO() : unexpected " "address-space-control %d\n", ccode); break; } /* endswitch */ io_sync_wait.addr = FIX_PSW(&&hio_wakeup); /* * Martin didn't like modifying the new PSW, now we take * a fast exit in do_IRQ() instead */ *(__u32 *)__LC_SYNC_IO_WORD = 1; do { __load_psw( io_sync_wait );hio_wakeup: io_parm = *(__u32 *)__LC_IO_INT_PARM; io_sub = (__u32)*(__u16 *)__LC_SUBCHANNEL_NR; ready = s390_process_IRQ( io_sub ); } while ( !((io_sub == irq) && (ready == 1)) ); *(__u32 *)__LC_SYNC_IO_WORD = 0; } /* endif */ ret = 0; break; case 1 : /* status pending */ ioinfo[irq]->devstat.flag |= DEVSTAT_STATUS_PENDING; /* * initialize the device driver specific devstat irb area */ memset( &ioinfo[irq]->irq_desc.dev_id->ii.irb, '\0', sizeof( irb_t) ); /* * Let the common interrupt handler process the pending * status. However, we must avoid calling the user * action handler, as it won't be prepared to handle * a pending status during do_IO() processing inline. * This also implies that s390_process_IRQ must * terminate synchronously - especially if device * sensing is required. */ ioinfo[irq]->ui.flags.s_pend = 1; ioinfo[irq]->ui.flags.busy = 1; ioinfo[irq]->ui.flags.doio = 1; s390_process_IRQ( irq ); ioinfo[irq]->ui.flags.s_pend = 0; ioinfo[irq]->ui.flags.busy = 0; ioinfo[irq]->ui.flags.doio = 0; ioinfo[irq]->ui.flags.repall = 0; ioinfo[irq]->ui.flags.w4final = 0; ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS; /* * In multipath mode a condition code 3 implies the last * path has gone, except we have previously restricted * the I/O to a particular path. A condition code 1 * (0 won't occur) results in return code EIO as well * as 3 with another path than the one used (i.e. path available mask is non-zero). */ if ( ioinfo[irq]->devstat.ii.irb.scsw.cc == 3 ) { ret = -ENODEV; ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER; ioinfo[irq]->ui.flags.oper = 0; } else { ret = -EIO; ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER; ioinfo[irq]->ui.flags.oper = 1; } /* endif */ break; case 2 : /* busy */ ret = -EBUSY; break; default: /* device not operational */ ret = -ENODEV; break; } /* endswitch */ if ( flag & DOIO_WAIT_FOR_INTERRUPT ) { disable_cpu_sync_isc( irq ); } /* endif */ } /* endif */ return( ret );}/* * Note: The "intparm" parameter is not used by the clear_IO() function * itself, as no ORB is built for the CSCH instruction. However, * it allows the device interrupt handler to associate the upcoming * interrupt with the clear_IO() request. */int clear_IO( int irq, unsigned long user_intparm, unsigned long flag) /* possible DOIO_WAIT_FOR_INTERRUPT */{ int ret = 0; int ccode; char dbf_txt[15]; SANITY_CHECK(irq); if ( ioinfo[irq] == INVALID_STORAGE_AREA ) { return( -ENODEV); } /* * we only allow for clear_IO if the device has an I/O handler associated */ if ( !ioinfo[irq]->ui.flags.ready ) { ret = -ENODEV;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -