📄 ll_rw_blk.c
字号:
* * generic_make_request() does not return any status. The * success/failure status of the request, along with notification of * completion, is delivered asynchronously through the bh->b_end_io * function described (one day) else where. * * The caller of generic_make_request must make sure that b_page, * b_addr, b_size are set to describe the memory buffer, that b_rdev * and b_rsector are set to describe the device address, and the * b_end_io and optionally b_private are set to describe how * completion notification should be signaled. BH_Mapped should also * be set (to confirm that b_dev and b_blocknr are valid). * * generic_make_request and the drivers it calls may use b_reqnext, * and may change b_rdev and b_rsector. So the values of these fields * should NOT be depended on after the call to generic_make_request. * Because of this, the caller should record the device address * information in b_dev and b_blocknr. * * Apart from those fields mentioned above, no other fields, and in * particular, no other flags, are changed by generic_make_request or * any lower level drivers. * */void generic_make_request (int rw, struct buffer_head * bh){ int major = MAJOR(bh->b_rdev); request_queue_t *q; if (!bh->b_end_io) BUG(); if (blk_size[major]) { unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1; unsigned int sector, count; count = bh->b_size >> 9; sector = bh->b_rsector; if (maxsector < count || maxsector - count < sector) { bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped); if (blk_size[major][MINOR(bh->b_rdev)]) { /* This may well happen - the kernel calls bread() without checking the size of the device, e.g., when mounting a device. */ printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_rdev), rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_rdev)]); } bh->b_end_io(bh, 0); return; } } /* * Resolve the mapping until finished. (drivers are * still free to implement/resolve their own stacking * by explicitly returning 0) */ /* NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ do { q = blk_get_queue(bh->b_rdev); if (!q) { printk(KERN_ERR "generic_make_request: Trying to access nonexistent block-device %s (%ld)\n", kdevname(bh->b_rdev), bh->b_rsector); buffer_IO_error(bh); break; } } while (q->make_request_fn(q, rw, bh));}/** * submit_bh: submit a buffer_head to the block device later for I/O * @rw: whether to %READ or %WRITE, or mayve to %READA (read ahead) * @bh: The &struct buffer_head which describes the I/O * * submit_bh() is very similar in purpose to generic_make_request(), and * uses that function to do most of the work. * * The extra functionality provided by submit_bh is to determine * b_rsector from b_blocknr and b_size, and to set b_rdev from b_dev. * This is is appropriate for IO requests that come from the buffer * cache and page cache which (currently) always use aligned blocks. */void submit_bh(int rw, struct buffer_head * bh){ if (!test_bit(BH_Lock, &bh->b_state)) BUG(); set_bit(BH_Req, &bh->b_state); /* * First step, 'identity mapping' - RAID or LVM might * further remap this. */ bh->b_rdev = bh->b_dev; bh->b_rsector = bh->b_blocknr * (bh->b_size>>9); generic_make_request(rw, bh); switch (rw) { case WRITE: kstat.pgpgout++; break; default: kstat.pgpgin++; break; }}/* * Default IO end handler, used by "ll_rw_block()". */static void end_buffer_io_sync(struct buffer_head *bh, int uptodate){ mark_buffer_uptodate(bh, uptodate); unlock_buffer(bh);}/** * ll_rw_block: low-level access to block devices * @rw: whether to %READ or %WRITE or maybe %READA (readahead) * @nr: number of &struct buffer_heads in the array * @bhs: array of pointers to &struct buffer_head * * ll_rw_block() takes an array of pointers to &struct buffer_heads, * and requests an I/O operation on them, either a %READ or a %WRITE. * The third %READA option is described in the documentation for * generic_make_request() which ll_rw_block() calls. * * This function provides extra functionality that is not in * generic_make_request() that is relevant to buffers in the buffer * cache or page cache. In particular it drops any buffer that it * cannot get a lock on (with the BH_Lock state bit), any buffer that * appears to be clean when doing a write request, and any buffer that * appears to be up-to-date when doing read request. Further it marks * as clean buffers that are processed for writing (the buffer cache * wont assume that they are actually clean until the buffer gets * unlocked). * * ll_rw_block sets b_end_io to simple completion handler that marks * the buffer up-to-date (if approriate), unlocks the buffer and wakes * any waiters. As client that needs a more interesting completion * routine should call submit_bh() (or generic_make_request()) * directly. * * Caveat: * All of the buffers must be for the same device, and must also be * of the current approved size for the device. */void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]){ unsigned int major; int correct_size; int i; major = MAJOR(bhs[0]->b_dev); /* Determine correct block size for this device. */ correct_size = BLOCK_SIZE; if (blksize_size[major]) { i = blksize_size[major][MINOR(bhs[0]->b_dev)]; if (i) correct_size = i; } /* Verify requested block sizes. */ for (i = 0; i < nr; i++) { struct buffer_head *bh; bh = bhs[i]; if (bh->b_size != correct_size) { printk(KERN_NOTICE "ll_rw_block: device %s: " "only %d-char blocks implemented (%u)\n", kdevname(bhs[0]->b_dev), correct_size, bh->b_size); goto sorry; } } if ((rw & WRITE) && is_read_only(bhs[0]->b_dev)) { printk(KERN_NOTICE "Can't write to read-only device %s\n", kdevname(bhs[0]->b_dev)); goto sorry; } for (i = 0; i < nr; i++) { struct buffer_head *bh; bh = bhs[i]; /* Only one thread can actually submit the I/O. */ if (test_and_set_bit(BH_Lock, &bh->b_state)) continue; /* We have the buffer lock */ bh->b_end_io = end_buffer_io_sync; switch(rw) { case WRITE: if (!atomic_set_buffer_clean(bh)) /* Hmmph! Nothing to write */ goto end_io; __mark_buffer_clean(bh); break; case READA: case READ: if (buffer_uptodate(bh)) /* Hmmph! Already have it */ goto end_io; break; default: BUG(); end_io: bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); continue; } submit_bh(rw, bh); } return;sorry: /* Make sure we don't get infinite dirty retries.. */ for (i = 0; i < nr; i++) mark_buffer_clean(bhs[i]);}#ifdef CONFIG_STRAM_SWAPextern int stram_device_init (void);#endif/* * First step of what used to be end_request * * 0 means continue with end_that_request_last, * 1 means we are done */int end_that_request_first (struct request *req, int uptodate, char *name){ struct buffer_head * bh; int nsect; req->errors = 0; if (!uptodate) printk("end_request: I/O error, dev %s (%s), sector %lu\n", kdevname(req->rq_dev), name, req->sector); if ((bh = req->bh) != NULL) { nsect = bh->b_size >> 9; req->bh = bh->b_reqnext; bh->b_reqnext = NULL; bh->b_end_io(bh, uptodate); if ((bh = req->bh) != NULL) { req->hard_sector += nsect; req->hard_nr_sectors -= nsect; req->sector = req->hard_sector; req->nr_sectors = req->hard_nr_sectors; req->current_nr_sectors = bh->b_size >> 9; if (req->nr_sectors < req->current_nr_sectors) { req->nr_sectors = req->current_nr_sectors; printk("end_request: buffer-list destroyed\n"); } req->buffer = bh->b_data; return 1; } } return 0;}void end_that_request_last(struct request *req){ if (req->e) { printk("end_that_request_last called with non-dequeued req\n"); BUG(); } if (req->sem != NULL) up(req->sem); blkdev_release_request(req);}int __init blk_dev_init(void){ struct blk_dev_struct *dev; request_cachep = kmem_cache_create("blkdev_requests", sizeof(struct request), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!request_cachep) panic("Can't create request pool slab cache\n"); for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) dev->queue = NULL; memset(ro_bits,0,sizeof(ro_bits)); memset(max_readahead, 0, sizeof(max_readahead)); memset(max_sectors, 0, sizeof(max_sectors));#ifdef CONFIG_AMIGA_Z2RAM z2_init();#endif#ifdef CONFIG_STRAM_SWAP stram_device_init();#endif#ifdef CONFIG_BLK_DEV_RAM rd_init();#endif#ifdef CONFIG_BLK_DEV_LOOP loop_init();#endif#ifdef CONFIG_ISP16_CDI isp16_init();#endif#if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_IDE) ide_init(); /* this MUST precede hd_init */#endif#if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD) hd_init();#endif#ifdef CONFIG_BLK_DEV_PS2 ps2esdi_init();#endif#ifdef CONFIG_BLK_DEV_XD xd_init();#endif#ifdef CONFIG_BLK_DEV_MFM mfm_init();#endif#ifdef CONFIG_PARIDE { extern void paride_init(void); paride_init(); };#endif#ifdef CONFIG_MAC_FLOPPY swim3_init();#endif#ifdef CONFIG_BLK_DEV_SWIM_IOP swimiop_init();#endif#ifdef CONFIG_AMIGA_FLOPPY amiga_floppy_init();#endif#ifdef CONFIG_ATARI_FLOPPY atari_floppy_init();#endif#ifdef CONFIG_BLK_DEV_FD floppy_init();#else#if defined(__i386__) /* Do we even need this? */ outb_p(0xc, 0x3f2);#endif#endif#ifdef CONFIG_CDU31A cdu31a_init();#endif#ifdef CONFIG_ATARI_ACSI acsi_init();#endif#ifdef CONFIG_MCD mcd_init();#endif#ifdef CONFIG_MCDX mcdx_init();#endif#ifdef CONFIG_SBPCD sbpcd_init();#endif#ifdef CONFIG_AZTCD aztcd_init();#endif#ifdef CONFIG_CDU535 sony535_init();#endif#ifdef CONFIG_GSCD gscd_init();#endif#ifdef CONFIG_CM206 cm206_init();#endif#ifdef CONFIG_OPTCD optcd_init();#endif#ifdef CONFIG_SJCD sjcd_init();#endif#ifdef CONFIG_APBLOCK ap_init();#endif#ifdef CONFIG_DDV ddv_init();#endif#ifdef CONFIG_BLK_DEV_NBD nbd_init();#endif#ifdef CONFIG_MDISK mdisk_init();#endif#ifdef CONFIG_DASD dasd_init();#endif#ifdef CONFIG_SUN_JSFLASH jsfd_init();#endif#ifdef CONFIG_BLK_DEV_LVM lvm_init();#endif return 0;};EXPORT_SYMBOL(io_request_lock);EXPORT_SYMBOL(end_that_request_first);EXPORT_SYMBOL(end_that_request_last);EXPORT_SYMBOL(blk_init_queue);EXPORT_SYMBOL(blk_get_queue);EXPORT_SYMBOL(blk_cleanup_queue);EXPORT_SYMBOL(blk_queue_headactive);EXPORT_SYMBOL(blk_queue_pluggable);EXPORT_SYMBOL(blk_queue_make_request);EXPORT_SYMBOL(generic_make_request);EXPORT_SYMBOL(blkdev_release_request);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -