📄 mdisk.c
字号:
devchar.rdc_len = sizeof(mdisk_dev_char_t); if (dia210(&devchar) == 0) { if (devchar.vdev_class == DEV_CLASS_FBA) { block = 2; } else { block = 3; } bio = dev->bio; for (b=512;b<4097;b=b*2) { rc = mdisk_init_io(dev, b, 0, 64); if (rc > 4) { continue; } memset(&bio[0], 0, sizeof(mdisk_bio_t)); bio[0].type = MDISK_READ_REQ; bio[0].block_number = block; bio[0].buffer = virt_to_phys(&label); dev->nr_bhs = 1; if (mdisk_rw_io_clustered(dev, &bio[0], 1, (unsigned long) dev, MDISK_SYNC) == 0 ) { if (label[0] != 0xc3d4e2f1) { /* CMS1 */ printk ( KERN_WARNING "mnd: %4lX " "is not CMS format\n", mdisk_setup_data.vdev[i]); rc = mdisk_term_io(dev); return 1; } if (label[13] == 0) { printk ( KERN_WARNING "mnd: %4lX " "is not reserved\n", mdisk_setup_data.vdev[i]); rc = mdisk_term_io(dev); return 2; } mdisk_setup_data.size[i] = (label[7] - 1 - label[13]) * (label[3] >> 9) >> 1; mdisk_setup_data.blksize[i] = label[3]; mdisk_setup_data.offset[i] = label[13] + 1; rc = mdisk_term_io(dev); return rc; } rc = mdisk_term_io(dev); } printk ( KERN_WARNING "mnd: Cannot read label of %4lX " "- is it formatted?\n", mdisk_setup_data.vdev[i]); return 3; } return 4;}/* * this handles a clustered request in success case * all buffers are detach and marked uptodate to the kernel * then CURRENT->bh is set to the last processed but not * update buffer */static __inline__ voidmdisk_end_request(int nr_bhs){ int i; struct buffer_head *bh; struct request *req; if (nr_bhs > 1) { req = CURRENT; bh = req->bh; for (i=0; i < nr_bhs-1; i++) { req->bh = bh->b_reqnext; bh->b_reqnext = NULL; bh->b_end_io(bh,1); bh = req->bh; } /* * set CURRENT to last processed, not marked buffer */ req->buffer = bh->b_data; req->current_nr_sectors = bh->b_size >> 9; CURRENT = req; } end_request(1);}/* * Block-driver specific functions */void mdisk_request(request_queue_t *queue){ mdisk_Dev *dev; mdisk_bio_t *bio; struct buffer_head *bh; unsigned int sector, nr, offset; int rc,rw,i; i = 0; while(CURRENT) { INIT_REQUEST; /* Check if the minor number is in range */ if (DEVICE_NR(CURRENT_DEV) > MDISK_DEVS) { static int count = 0; if (count++ < 5) /* print the message at most five times */ printk(KERN_WARNING "mnd: request for minor %d out of range\n", DEVICE_NR(CURRENT_DEV) ) ; end_request(0); continue; } /* * Pointer to device structure, from the static array */ dev = mdisk_devices + DEVICE_NR(CURRENT_DEV); /* * check, if operation is past end of devices */ if (CURRENT->nr_sectors + CURRENT->sector > dev->size) { static int count = 0; if (count++ < 5) printk(KERN_WARNING "mnd%c: request past end of device\n", DEVICE_NR(CURRENT_DEV)); end_request(0); continue; } /* * do command (read or write) */ switch(CURRENT->cmd) { case READ: rw = MDISK_READ_REQ; break; case WRITE: rw = MDISK_WRITE_REQ; break; default: /* can't happen */ end_request(0); continue; } /* * put the clustered requests in mdisk_bio array * nr_sectors is checked against max_sectors in make_request * nr_sectors and sector are always blocks of 512 * but bh_size depends on the filesystems size */ sector = CURRENT->sector>>dev->blkshift; bh = CURRENT->bh; bio = dev->bio; dev->nr_bhs = 0; /* * sector is translated to block in minidisk context * */ offset = 0; for (nr = 0,i = 0; nr < CURRENT->nr_sectors && bh; nr+=dev->blkmult, sector++,i++) { memset(&bio[i], 0, sizeof(mdisk_bio_t)); bio[i].type = rw; bio[i].block_number = sector; bio[i].buffer = virt_to_phys(bh->b_data+offset); offset += dev->blksize; if (bh->b_size <= offset) { offset = 0; bh = bh->b_reqnext; dev->nr_bhs++; } } if (( rc = mdisk_rw_io_clustered(dev, &bio[0], i, (unsigned long) dev,#ifdef CONFIG_MDISK_SYNC MDISK_SYNC#else MDISK_ASYNC#endif )) > 8 ) { printk(KERN_WARNING "mnd%c: %s request failed rc %d" " sector %ld nr_sectors %ld \n", DEVICE_NR(CURRENT_DEV), rw == MDISK_READ_REQ ? "read" : "write", rc, CURRENT->sector, CURRENT->nr_sectors); end_request(0); continue; } i = 0; /* * Synchron: looping to end of request (INIT_REQUEST has return) * Asynchron: end_request done in bottom half */#ifdef CONFIG_MDISK_SYNC mdisk_end_request(dev->nr_bhs);#else if (rc == 0) mdisk_end_request(dev->nr_bhs); else return;#endif }}/* * mdisk interrupt handler called when read/write request finished * queues and marks a bottom half. * */void do_mdisk_interrupt(void){ u16 code; mdisk_Dev *dev; code = S390_lowcore.cpu_addr; if ((code >> 8) != 0x03) { printk("mnd: wrong sub-interruption code %d",code>>8); return; } /* * pointer to devives structure given as external interruption * parameter */ dev = (mdisk_Dev*) S390_lowcore.ext_params; dev->status = code & 0x00ff; queue_task(&dev->tqueue, &tq_immediate); mark_bh(IMMEDIATE_BH);}/* * the bottom half checks the status of request * on success it calls end_request and calls mdisk_request * if more transfer to do */static voiddo_mdisk_bh(void *data){ mdisk_Dev *dev = (mdisk_Dev*) data; unsigned long flags; spin_lock_irqsave(&io_request_lock, flags); /* * check for status of asynchronous rw */ if (dev->status != 0x00) { printk("mnd: status of async rw %d",dev->status); end_request(0); } else { /* * end request for clustered requests */ if (CURRENT) mdisk_end_request(dev->nr_bhs); } /* * if more to do, call mdisk_request */ if (CURRENT) mdisk_request(NULL); spin_unlock_irqrestore(&io_request_lock, flags);}void /* Added fuction HSM 12/03/99 */mdisk_handler (int cpu, void *ds, struct pt_regs *regs){ printk (KERN_ERR "mnd: received I/O interrupt... shouldn't happen\n");}int __init mdisk_init(void){ int rc,i; mdisk_Dev *dev; request_queue_t *q; /* * register block device */ if (register_blkdev(MAJOR_NR,"mnd",&mdisk_fops) < 0) { printk("mnd: unable to get major %d for mini disk\n" ,MAJOR_NR); return MAJOR_NR; } q = BLK_DEFAULT_QUEUE(MAJOR_NR); blk_init_queue(q, mdisk_request); blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0); /* * setup sizes for available devices */ read_ahead[MAJOR_NR] = MDISK_RAHEAD; /* 8 sector (4kB) read-ahead */ blk_size[MAJOR_NR] = mdisk_sizes; /* size of reserved mdisk */ blksize_size[MAJOR_NR] = mdisk_blksizes; /* blksize of device */ hardsect_size[MAJOR_NR] = mdisk_hardsects; max_sectors[MAJOR_NR] = mdisk_maxsectors; blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); for (i=0;i<MDISK_DEVS;i++) { if (mdisk_setup_data.vdev[i] == 0) { continue; } /* Added block HSM 12/03/99 */ if ( request_irq(get_irq_by_devno(mdisk_setup_data.vdev[i]), mdisk_handler, 0, "mnd", &(mdisk_devices[i].dev_status)) ){ printk ( KERN_WARNING "mnd: Cannot acquire I/O irq of" " %4lX for paranoia reasons, skipping\n", mdisk_setup_data.vdev[i]); continue; } /* * open VM minidisk low level device */ dev = &mdisk_devices[i]; dev->bio=mdisk_bio[i]; dev->iob=&mdisk_iob[i]; dev->vdev = mdisk_setup_data.vdev[i]; if ( mdisk_setup_data.size[i] == 0 ) rc = mdisk_read_label(dev, i); dev->size = mdisk_setup_data.size[i] * 2; /* buffer 512 b */ dev->blksize = mdisk_setup_data.blksize[i]; dev->tqueue.routine = do_mdisk_bh; dev->tqueue.data = dev; dev->blkmult = dev->blksize/512; dev->blkshift = dev->blkmult==1?0: dev->blkmult==2?1: dev->blkmult==4?2: dev->blkmult==8?3:-1; mdisk_sizes[i] = mdisk_setup_data.size[i]; mdisk_blksizes[i] = mdisk_setup_data.blksize[i]; mdisk_hardsects[i] = mdisk_setup_data.blksize[i]; /* * max sectors for one clustered req */ mdisk_maxsectors[i] = MDISK_MAXSECTORS*dev->blkmult; rc = mdisk_init_io(dev, mdisk_setup_data.blksize[i], mdisk_setup_data.offset[i],/* offset in vdev*/ dev->size>>dev->blkshift /* size in blocks */ ); if (rc > 4) { printk("mnd%c: init failed (rc: %d)\n",'a'+i,rc); mdisk_sizes[i] = 0; continue; } /* * set vdev in device structure for further rw access * vdev and size given by linload */ printk("mnd%c: register device at major %X with %d blocks %d blksize \n", 'a' + i, MAJOR_NR, dev->size>>dev->blkshift,dev->blkmult*512); } /* * enable service-signal external interruptions, * Control Register 0 bit 22 := 1 * (besides PSW bit 7 must be set to 1 somewhere for external * interruptions) */ ctl_set_bit(0, 9); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -