📄 s390io.c
字号:
&& ( sync_isc_locked ) ) { disable_cpu_sync_isc( irq ); spin_unlock_irqrestore( &sync_isc, psw_flags); sync_isc_locked = 0; // local setting ioinfo[irq]->ui.flags.syncio = 0; // global setting } /* endif */ return( ret);}int do_IO( int irq, /* IRQ */ ccw1_t *cpa, /* channel program address */ unsigned long user_intparm, /* interruption parameter */ __u8 lpm, /* logical path mask */ unsigned long flag) /* flags : see above */{ int ret = 0; if ( irq > highest_subchannel || irq < 0 ) { return( -ENODEV ); } /* endif */ if ( ioinfo[irq] == INVALID_STORAGE_AREA ) { return( -ENODEV); } /* handler registered ? or free_irq() in process already ? */ if ( !ioinfo[irq]->ui.flags.ready || ioinfo[irq]->ui.flags.unready ) { return( -ENODEV ); } /* endif */ /* * Note: We ignore the device operational status - if not operational, * the SSCH will lead to an -ENODEV condition ... */ if ( !ioinfo[irq]->ui.flags.busy ) /* last I/O completed ? */ { ret = s390_start_IO( irq, cpa, user_intparm, lpm, flag); } else if ( ioinfo[irq]->ui.flags.fast ) { /* * If primary status was received and ending status is missing, * the device driver won't be notified on the ending status * if early (fast) interrupt notification was requested. * Therefore we have to queue the next incoming request. If * halt_IO() is issued while there is a request queued, a HSCH * needs to be issued and the queued request must be deleted * but its intparm must be returned (see halt_IO() processing) */ if ( ioinfo[irq]->ui.flags.w4final && !ioinfo[irq]->ui.flags.doio_q ) { ioinfo[irq]->qflag = flag; ioinfo[irq]->qcpa = cpa; ioinfo[irq]->qintparm = user_intparm; ioinfo[irq]->qlpm = lpm; } else { ret = -EBUSY; } /* endif */ } else { ret = -EBUSY; } /* endif */ return( ret );}/* * resume suspended I/O operation */int resume_IO( int irq){ int ret = 0; if ( irq > highest_subchannel || irq < 0 ) { return( -ENODEV ); } /* endif */ if ( ioinfo[irq] == INVALID_STORAGE_AREA ) { return( -ENODEV); } /* * We allow for 'resume' requests only for active I/O operations */ if ( ioinfo[irq]->ui.flags.busy ) { int ccode; ccode = rsch( irq); switch (ccode) { case 0 : break; case 1 : s390_process_IRQ( irq ); ret = -EBUSY; break; case 2 : ret = -EINVAL; break; case 3 : /* * useless to wait for request completion * as device is no longer operational ! */ ioinfo[irq]->ui.flags.oper = 0; ioinfo[irq]->ui.flags.busy = 0; ret = -ENODEV; break; } /* endswitch */ } else { ret = -ENOTCONN; } /* endif */ return( ret);}/* * Note: The "intparm" parameter is not used by the halt_IO() function * itself, as no ORB is built for the HSCH instruction. However, * it allows the device interrupt handler to associate the upcoming * interrupt with the halt_IO() request. */int halt_IO( int irq, unsigned long user_intparm, unsigned long flag) /* possible DOIO_WAIT_FOR_INTERRUPT */{ int ret; int ccode; unsigned long psw_flags; int sync_isc_locked = 0; if ( irq > highest_subchannel || irq < 0 ) { ret = -ENODEV; } if ( ioinfo[irq] == INVALID_STORAGE_AREA ) { return( -ENODEV); } /* * we only allow for halt_IO if the device has an I/O handler associated */ else if ( !ioinfo[irq]->ui.flags.ready ) { ret = -ENODEV; } /* * we ignore the halt_io() request if ending_status was received but * a SENSE operation is waiting for completion. */ else if ( ioinfo[irq]->ui.flags.w4sense ) { ret = 0; } /* * We don't allow for halt_io with a sync do_IO() requests pending. */ else if ( ioinfo[irq]->ui.flags.syncio ) { ret = -EBUSY; } else { /* * If sync processing was requested we lock the sync ISC, * modify the device to present interrupts for this ISC only * and switch the CPU to handle this ISC + the console ISC * exclusively. */ if ( flag & DOIO_WAIT_FOR_INTERRUPT ) { // // check whether we run recursively (sense processing) // if ( !ioinfo[irq]->ui.flags.syncio ) { spin_lock_irqsave( &sync_isc, psw_flags); ret = enable_cpu_sync_isc( irq); if ( ret ) { spin_unlock_irqrestore( &sync_isc, psw_flags); return( ret); } else { sync_isc_locked = 1; // local ioinfo[irq]->ui.flags.syncio = 1; // global } /* endif */ } /* endif */ } /* endif */ /* * Issue "Halt subchannel" and process condition code */ ccode = hsch( irq ); switch ( ccode ) { case 0: ioinfo[irq]->ui.flags.haltio = 1; if ( !ioinfo[irq]->ui.flags.doio ) { ioinfo[irq]->ui.flags.busy = 1; ioinfo[irq]->u_intparm = user_intparm; ioinfo[irq]->devstat.cstat = 0; ioinfo[irq]->devstat.dstat = 0; ioinfo[irq]->devstat.lpum = 0; ioinfo[irq]->devstat.flag = DEVSTAT_HALT_FUNCTION; ioinfo[irq]->devstat.scnt = 0; } else { ioinfo[irq]->devstat.flag |= DEVSTAT_HALT_FUNCTION; } /* endif */ /* * If synchronous I/O processing is requested, we have * to wait for the corresponding interrupt to occur by * polling the interrupt condition. However, as multiple * interrupts may be outstanding, we must not just wait * for the first interrupt, but must poll until ours * pops up. */ if ( flag & DOIO_WAIT_FOR_INTERRUPT ) { int io_sub; __u32 io_parm; psw_t io_new_psw; int ccode; int ready = 0; struct _lowcore *lc = NULL; /* * We shouldn't perform a TPI loop, waiting for * an interrupt to occur, but should load a * WAIT PSW instead. Otherwise we may keep the * channel subsystem busy, not able to present * the interrupt. When our sync. interrupt * arrived we reset the I/O old PSW to its * original value. */ memcpy( &io_new_psw, &lc->io_new_psw, sizeof(psw_t)); ccode = iac(); switch (ccode) { case 0: // primary-space io_sync_wait.mask = _IO_PSW_MASK | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT; break; case 1: // secondary-space io_sync_wait.mask = _IO_PSW_MASK | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT; break; case 2: // access-register io_sync_wait.mask = _IO_PSW_MASK | _PSW_ACC_REG_MODE | _PSW_IO_WAIT; break; case 3: // home-space io_sync_wait.mask = _IO_PSW_MASK | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT; break; default: panic( "halt_IO() : unexpected " "address-space-control %d\n", ccode); break; } /* endswitch */ io_sync_wait.addr = FIX_PSW(&&hio_wakeup); /* * Martin didn't like modifying the new PSW, now we take * a fast exit in do_IRQ() instead */ *(__u32 *)__LC_SYNC_IO_WORD = 1; do { asm volatile ( "lpsw %0" : : "m" (io_sync_wait) );hio_wakeup: io_parm = *(__u32 *)__LC_IO_INT_PARM; io_sub = (__u32)*(__u16 *)__LC_SUBCHANNEL_NR; ready = s390_process_IRQ( io_sub ); } while ( !((io_sub == irq) && (ready == 1)) ); *(__u32 *)__LC_SYNC_IO_WORD = 0; } /* endif */ ret = 0; break; case 1 : /* status pending */ ioinfo[irq]->devstat.flag |= DEVSTAT_STATUS_PENDING; /* * initialize the device driver specific devstat irb area */ memset( &((devstat_t *) ioinfo[irq]->irq_desc.action->dev_id)->ii.irb, '\0', sizeof( irb_t) ); /* * Let the common interrupt handler process the pending * status. However, we must avoid calling the user * action handler, as it won't be prepared to handle * a pending status during do_IO() processing inline. * This also implies that s390_process_IRQ must * terminate synchronously - especially if device * sensing is required. */ ioinfo[irq]->ui.flags.s_pend = 1; ioinfo[irq]->ui.flags.busy = 1; ioinfo[irq]->ui.flags.doio = 1; s390_process_IRQ( irq ); ioinfo[irq]->ui.flags.s_pend = 0; ioinfo[irq]->ui.flags.busy = 0; ioinfo[irq]->ui.flags.doio = 0; ioinfo[irq]->ui.flags.repall = 0; ioinfo[irq]->ui.flags.w4final = 0; ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS; /* * In multipath mode a condition code 3 implies the last * path has gone, except we have previously restricted * the I/O to a particular path. A condition code 1 * (0 won't occur) results in return code EIO as well * as 3 with another path than the one used (i.e. path available mask is non-zero). */ if ( ioinfo[irq]->devstat.ii.irb.scsw.cc == 3 ) { ret = -ENODEV; ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER; ioinfo[irq]->ui.flags.oper = 0; } else { ret = -EIO; ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER; ioinfo[irq]->ui.flags.oper = 1; } /* endif */ break; case 2 : /* busy */ ret = -EBUSY; break; default: /* device not operational */ ret = -ENODEV; break; } /* endswitch */ if ( ( flag & DOIO_WAIT_FOR_INTERRUPT ) && ( sync_isc_locked ) ) { sync_isc_locked = 0; // local setting ioinfo[irq]->ui.flags.syncio = 0; // global setting disable_cpu_sync_isc( irq ); spin_unlock_irqrestore( &sync_isc, psw_flags); } /* endif */ } /* endif */ return( ret );}/* * Note: The "intparm" parameter is not used by the clear_IO() function * itself, as no ORB is built for the CSCH instruction. However, * it allows the device interrupt handler to associate the upcoming * interrupt with the clear_IO() request. */int clear_IO( int irq, unsigned long user_intparm, unsigned long flag) /* possible DOIO_WAIT_FOR_INTERRUPT */{ int ret; int ccode; unsigned long psw_flags; int sync_isc_locked = 0; if ( irq > highest_subchannel || irq < 0 ) { ret = -ENODEV; } if ( ioinfo[irq] == INVALID_STORAGE_AREA ) { return( -ENODEV); } /* * we only allow for halt_IO if the device has an I/O handler associated */ else if ( !ioinfo[irq]->ui.flags.ready ) { ret = -ENODEV; } /* * we ignore the halt_io() request if ending_status was received but * a SENSE operation is waiting for completion. */ else if ( ioinfo[irq]->ui.flags.w4sense ) { ret = 0; } /* * We don't allow for halt_io with a sync do_IO() requests pending. * Concurrent I/O is possible in SMP environments only, but the * sync. I/O request can be gated to one CPU at a time only. */ else if ( ioinfo[irq]->ui.flags.syncio ) { ret = -EBUSY; } else { /* * If sync processing was requested we lock the sync ISC, * modify the device to present interrupts for this ISC only * and switch the CPU to handle this ISC + the console ISC * exclusively. */ if ( flag & DOIO_WAIT_FOR_INTERRUPT ) { // // check whether we run recursively (sense processing) // if ( !ioinfo[irq]->ui.flags.syncio ) { spin_lock_irqsave( &sync_isc, psw_flags); ret = enable_cpu_sync_isc( irq); if ( ret ) { spin_unlock_irqrestore( &sync_isc, psw_flags); return( ret); } else { sync_isc_locked = 1; // local ioinfo[irq]->ui.flags.syncio = 1; // global } /* endif */ } /* endif */ } /* endif */ /* * Issue "Halt subchannel" and process condition code */ ccode = csch( irq ); switch ( ccode ) { case 0: ioinfo[irq]->ui.flags.haltio = 1; if ( !ioinfo[irq]->ui.flags.doio ) { ioinfo[irq]->ui.flags.busy = 1; ioinfo[irq]->u_intparm = user_intparm; ioinfo[irq]->devstat.cstat = 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -