📄 ll_rw_blk.c
字号:
BUG(); /* Test device size, when known. */ if (blk_size[major]) minorsize = blk_size[major][MINOR(bh->b_rdev)]; if (minorsize) { unsigned long maxsector = (minorsize << 1) + 1; unsigned long sector = bh->b_rsector; unsigned int count = bh->b_size >> 9; if (maxsector < count || maxsector - count < sector) { /* Yecch */ bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped); /* This may well happen - the kernel calls bread() without checking the size of the device, e.g., when mounting a device. */ printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "%s: rw=%d, want=%ld, limit=%d\n", kdevname(bh->b_rdev), rw, (sector + count)>>1, minorsize); /* Yecch again */ bh->b_end_io(bh, 0); return; } } /* * Resolve the mapping until finished. (drivers are * still free to implement/resolve their own stacking * by explicitly returning 0) */ /* NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ do { q = blk_get_queue(bh->b_rdev); if (!q) { printk(KERN_ERR "generic_make_request: Trying to access " "nonexistent block-device %s (%ld)\n", kdevname(bh->b_rdev), bh->b_rsector); buffer_IO_error(bh); break; } } while (q->make_request_fn(q, rw, bh));}/** * submit_bh: submit a buffer_head to the block device later for I/O * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) * @bh: The &struct buffer_head which describes the I/O * * submit_bh() is very similar in purpose to generic_make_request(), and * uses that function to do most of the work. * * The extra functionality provided by submit_bh is to determine * b_rsector from b_blocknr and b_size, and to set b_rdev from b_dev. * This is is appropriate for IO requests that come from the buffer * cache and page cache which (currently) always use aligned blocks. */void submit_bh(int rw, struct buffer_head * bh){ int count = bh->b_size >> 9; if (!test_bit(BH_Lock, &bh->b_state)) BUG(); set_bit(BH_Req, &bh->b_state); /* * First step, 'identity mapping' - RAID or LVM might * further remap this. */ bh->b_rdev = bh->b_dev; bh->b_rsector = bh->b_blocknr * count; generic_make_request(rw, bh); switch (rw) { case WRITE: kstat.pgpgout += count; break; default: kstat.pgpgin += count; break; }}/** * ll_rw_block: low-level access to block devices * @rw: whether to %READ or %WRITE or maybe %READA (readahead) * @nr: number of &struct buffer_heads in the array * @bhs: array of pointers to &struct buffer_head * * ll_rw_block() takes an array of pointers to &struct buffer_heads, * and requests an I/O operation on them, either a %READ or a %WRITE. * The third %READA option is described in the documentation for * generic_make_request() which ll_rw_block() calls. * * This function provides extra functionality that is not in * generic_make_request() that is relevant to buffers in the buffer * cache or page cache. In particular it drops any buffer that it * cannot get a lock on (with the BH_Lock state bit), any buffer that * appears to be clean when doing a write request, and any buffer that * appears to be up-to-date when doing read request. Further it marks * as clean buffers that are processed for writing (the buffer cache * wont assume that they are actually clean until the buffer gets * unlocked). * * ll_rw_block sets b_end_io to simple completion handler that marks * the buffer up-to-date (if approriate), unlocks the buffer and wakes * any waiters. As client that needs a more interesting completion * routine should call submit_bh() (or generic_make_request()) * directly. * * Caveat: * All of the buffers must be for the same device, and must also be * of the current approved size for the device. */void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]){ unsigned int major; int correct_size; int i; if (!nr) return; major = MAJOR(bhs[0]->b_dev); /* Determine correct block size for this device. */ correct_size = get_hardsect_size(bhs[0]->b_dev); /* Verify requested block sizes. */ for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; if (bh->b_size % correct_size) { printk(KERN_NOTICE "ll_rw_block: device %s: " "only %d-char blocks implemented (%u)\n", kdevname(bhs[0]->b_dev), correct_size, bh->b_size); goto sorry; } } if ((rw & WRITE) && is_read_only(bhs[0]->b_dev)) { printk(KERN_NOTICE "Can't write to read-only device %s\n", kdevname(bhs[0]->b_dev)); goto sorry; } for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; /* Only one thread can actually submit the I/O. */ if (test_and_set_bit(BH_Lock, &bh->b_state)) continue; /* We have the buffer lock */ atomic_inc(&bh->b_count); bh->b_end_io = end_buffer_io_sync; switch(rw) { case WRITE: if (!atomic_set_buffer_clean(bh)) /* Hmmph! Nothing to write */ goto end_io; __mark_buffer_clean(bh); break; case READA: case READ: if (buffer_uptodate(bh)) /* Hmmph! Already have it */ goto end_io; break; default: BUG(); end_io: bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); continue; } submit_bh(rw, bh); } return;sorry: /* Make sure we don't get infinite dirty retries.. */ for (i = 0; i < nr; i++) mark_buffer_clean(bhs[i]);}#ifdef CONFIG_STRAM_SWAPextern int stram_device_init (void);#endif/** * end_that_request_first - end I/O on one buffer. * @req: the request being processed * @uptodate: 0 for I/O error * @name: the name printed for an I/O error * * Description: * Ends I/O on the first buffer attached to @req, and sets it up * for the next buffer_head (if any) in the cluster. * * Return: * 0 - we are done with this request, call end_that_request_last() * 1 - still buffers pending for this request * * Caveat: * Drivers implementing their own end_request handling must call * blk_finished_io() appropriately. **/int end_that_request_first (struct request *req, int uptodate, char *name){ struct buffer_head * bh; int nsect; req->errors = 0; if (!uptodate) printk("end_request: I/O error, dev %s (%s), sector %lu\n", kdevname(req->rq_dev), name, req->sector); if ((bh = req->bh) != NULL) { nsect = bh->b_size >> 9; blk_finished_io(nsect); req->bh = bh->b_reqnext; bh->b_reqnext = NULL; bh->b_end_io(bh, uptodate); if ((bh = req->bh) != NULL) { req->hard_sector += nsect; req->hard_nr_sectors -= nsect; req->sector = req->hard_sector; req->nr_sectors = req->hard_nr_sectors; req->current_nr_sectors = bh->b_size >> 9; if (req->nr_sectors < req->current_nr_sectors) { req->nr_sectors = req->current_nr_sectors; printk("end_request: buffer-list destroyed\n"); } req->buffer = bh->b_data; return 1; } } return 0;}void end_that_request_last(struct request *req){ if (req->waiting != NULL) complete(req->waiting); blkdev_release_request(req);}#define MB(kb) ((kb) << 10)int __init blk_dev_init(void){ struct blk_dev_struct *dev; int total_ram; request_cachep = kmem_cache_create("blkdev_requests", sizeof(struct request), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!request_cachep) panic("Can't create request pool slab cache\n"); for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) dev->queue = NULL; memset(ro_bits,0,sizeof(ro_bits)); memset(max_readahead, 0, sizeof(max_readahead)); memset(max_sectors, 0, sizeof(max_sectors)); total_ram = nr_free_pages() << (PAGE_SHIFT - 10); /* * Free request slots per queue. * (Half for reads, half for writes) */ queue_nr_requests = 64; if (total_ram > MB(32)) queue_nr_requests = 128; /* * Batch frees according to queue length */ batch_requests = queue_nr_requests/4; printk("block: %d slots per queue, batch=%d\n", queue_nr_requests, batch_requests);#ifdef CONFIG_AMIGA_Z2RAM z2_init();#endif#ifdef CONFIG_STRAM_SWAP stram_device_init();#endif#ifdef CONFIG_BLK_DEV_RAM rd_init();#endif#ifdef CONFIG_ISP16_CDI isp16_init();#endif#if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_IDE) ide_init(); /* this MUST precede hd_init */#endif#if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD) hd_init();#endif#ifdef CONFIG_BLK_DEV_PS2 ps2esdi_init();#endif#ifdef CONFIG_BLK_DEV_XD xd_init();#endif#ifdef CONFIG_BLK_DEV_MFM mfm_init();#endif#ifdef CONFIG_PARIDE { extern void paride_init(void); paride_init(); }#endif#ifdef CONFIG_MAC_FLOPPY swim3_init();#endif#ifdef CONFIG_BLK_DEV_SWIM_IOP swimiop_init();#endif#ifdef CONFIG_AMIGA_FLOPPY amiga_floppy_init();#endif#ifdef CONFIG_ATARI_FLOPPY atari_floppy_init();#endif#ifdef CONFIG_BLK_DEV_FD1772 fd1772_init();#endif#ifdef CONFIG_BLK_DEV_FD floppy_init();#endif#ifdef FLOPPY_BOOT_DISABLE outb_p(0xc, 0x3f2);#endif#ifdef CONFIG_CDU31A cdu31a_init();#endif#ifdef CONFIG_ATARI_ACSI acsi_init();#endif#ifdef CONFIG_MCD mcd_init();#endif#ifdef CONFIG_MCDX mcdx_init();#endif#ifdef CONFIG_SBPCD sbpcd_init();#endif#ifdef CONFIG_AZTCD aztcd_init();#endif#ifdef CONFIG_CDU535 sony535_init();#endif#ifdef CONFIG_GSCD gscd_init();#endif#ifdef CONFIG_CM206 cm206_init();#endif#ifdef CONFIG_OPTCD optcd_init();#endif#ifdef CONFIG_SJCD sjcd_init();#endif#ifdef CONFIG_APBLOCK ap_init();#endif#ifdef CONFIG_DDV ddv_init();#endif#ifdef CONFIG_MDISK mdisk_init();#endif#ifdef CONFIG_DASD dasd_init();#endif#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_BLOCK) tapeblock_init();#endif#ifdef CONFIG_BLK_DEV_XPRAM xpram_init();#endif#ifdef CONFIG_SUN_JSFLASH jsfd_init();#endif return 0;}EXPORT_SYMBOL(io_request_lock);EXPORT_SYMBOL(end_that_request_first);EXPORT_SYMBOL(end_that_request_last);EXPORT_SYMBOL(blk_init_queue);EXPORT_SYMBOL(blk_get_queue);EXPORT_SYMBOL(blk_cleanup_queue);EXPORT_SYMBOL(blk_queue_headactive);EXPORT_SYMBOL(blk_queue_make_request);EXPORT_SYMBOL(generic_make_request);EXPORT_SYMBOL(blkdev_release_request);EXPORT_SYMBOL(generic_unplug_device);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -