⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dm.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * * This file is released under the GPL. */#include "dm.h"#include "dm-bio-list.h"#include <linux/init.h>#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/blkpg.h>#include <linux/bio.h>#include <linux/buffer_head.h>#include <linux/mempool.h>#include <linux/slab.h>#include <linux/idr.h>static const char *_name = DM_NAME;static unsigned int major = 0;static unsigned int _major = 0;/* * One of these is allocated per bio. */struct dm_io {	struct mapped_device *md;	int error;	struct bio *bio;	atomic_t io_count;};/* * One of these is allocated per target within a bio.  Hopefully * this will be simplified out one day. */struct target_io {	struct dm_io *io;	struct dm_target *ti;	union map_info info;};/* * Bits for the md->flags field. */#define DMF_BLOCK_IO 0#define DMF_SUSPENDED 1#define DMF_FS_LOCKED 2struct mapped_device {	struct rw_semaphore lock;	rwlock_t map_lock;	atomic_t holders;	unsigned long flags;	request_queue_t *queue;	struct gendisk *disk;	/*	 * A list of ios that arrived while we were suspended.	 */	atomic_t pending;	wait_queue_head_t wait; 	struct bio_list deferred;	/*	 * The current mapping.	 */	struct dm_table *map;	/*	 * io objects are allocated from here.	 */	mempool_t *io_pool;	mempool_t *tio_pool;	/*	 * Event handling.	 */	atomic_t event_nr;	wait_queue_head_t eventq;	/*	 * freeze/thaw support require holding onto a super block	 */	struct super_block *frozen_sb;};#define MIN_IOS 256static kmem_cache_t *_io_cache;static kmem_cache_t *_tio_cache;static int __init local_init(void){	int r;	/* allocate a slab for the dm_ios */	_io_cache = kmem_cache_create("dm_io",				      sizeof(struct dm_io), 0, 0, NULL, NULL);	if (!_io_cache)		return -ENOMEM;	/* allocate a slab for the target ios */	_tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),				       0, 0, NULL, NULL);	if (!_tio_cache) {		kmem_cache_destroy(_io_cache);		return -ENOMEM;	}	_major = major;	r = register_blkdev(_major, _name);	if (r < 0) {		kmem_cache_destroy(_tio_cache);		kmem_cache_destroy(_io_cache);		return r;	}	if (!_major)		_major = r;	return 0;}static void local_exit(void){	kmem_cache_destroy(_tio_cache);	kmem_cache_destroy(_io_cache);	if (unregister_blkdev(_major, _name) < 0)		DMERR("devfs_unregister_blkdev failed");	_major = 0;	DMINFO("cleaned up");}/* * We have a lot of init/exit functions, so it seems easier to * store them in an array.  The disposable macro 'xx' * expands a prefix into a pair of function names. */static struct {	int (*init) (void);	void (*exit) (void);} _inits[] = {#define xx(n) {n ## _init, n ## _exit},	xx(local)	xx(dm_target)	xx(dm_linear)	xx(dm_stripe)	xx(dm_interface)#undef xx};static int __init dm_init(void){	const int count = ARRAY_SIZE(_inits);	int r, i;	for (i = 0; i < count; i++) {		r = _inits[i].init();		if (r)			goto bad;	}	return 0;      bad:	while (i--)		_inits[i].exit();	return r;}static void __exit dm_exit(void){	int i = ARRAY_SIZE(_inits);	while (i--)		_inits[i].exit();}/* * Block device functions */static int dm_blk_open(struct inode *inode, struct file *file){	struct mapped_device *md;	md = inode->i_bdev->bd_disk->private_data;	dm_get(md);	return 0;}static int dm_blk_close(struct inode *inode, struct file *file){	struct mapped_device *md;	md = inode->i_bdev->bd_disk->private_data;	dm_put(md);	return 0;}static inline struct dm_io *alloc_io(struct mapped_device *md){	return mempool_alloc(md->io_pool, GFP_NOIO);}static inline void free_io(struct mapped_device *md, struct dm_io *io){	mempool_free(io, md->io_pool);}static inline struct target_io *alloc_tio(struct mapped_device *md){	return mempool_alloc(md->tio_pool, GFP_NOIO);}static inline void free_tio(struct mapped_device *md, struct target_io *tio){	mempool_free(tio, md->tio_pool);}/* * Add the bio to the list of deferred io. */static int queue_io(struct mapped_device *md, struct bio *bio){	down_write(&md->lock);	if (!test_bit(DMF_BLOCK_IO, &md->flags)) {		up_write(&md->lock);		return 1;	}	bio_list_add(&md->deferred, bio);	up_write(&md->lock);	return 0;		/* deferred successfully */}/* * Everyone (including functions in this file), should use this * function to access the md->map field, and make sure they call * dm_table_put() when finished. */struct dm_table *dm_get_table(struct mapped_device *md){	struct dm_table *t;	read_lock(&md->map_lock);	t = md->map;	if (t)		dm_table_get(t);	read_unlock(&md->map_lock);	return t;}/*----------------------------------------------------------------- * CRUD START: *   A more elegant soln is in the works that uses the queue *   merge fn, unfortunately there are a couple of changes to *   the block layer that I want to make for this.  So in the *   interests of getting something for people to use I give *   you this clearly demarcated crap. *---------------------------------------------------------------*//* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */static inline void dec_pending(struct dm_io *io, int error){	if (error)		io->error = error;	if (atomic_dec_and_test(&io->io_count)) {		if (atomic_dec_and_test(&io->md->pending))			/* nudge anyone waiting on suspend queue */			wake_up(&io->md->wait);		bio_endio(io->bio, io->bio->bi_size, io->error);		free_io(io->md, io);	}}static int clone_endio(struct bio *bio, unsigned int done, int error){	int r = 0;	struct target_io *tio = bio->bi_private;	struct dm_io *io = tio->io;	dm_endio_fn endio = tio->ti->type->end_io;	if (bio->bi_size)		return 1;	if (!bio_flagged(bio, BIO_UPTODATE) && !error)		error = -EIO;	if (endio) {		r = endio(tio->ti, bio, error, &tio->info);		if (r < 0)			error = r;		else if (r > 0)			/* the target wants another shot at the io */			return 1;	}	free_tio(io->md, tio);	dec_pending(io, error);	bio_put(bio);	return r;}static sector_t max_io_len(struct mapped_device *md,			   sector_t sector, struct dm_target *ti){	sector_t offset = sector - ti->begin;	sector_t len = ti->len - offset;	/*	 * Does the target need to split even further ?	 */	if (ti->split_io) {		sector_t boundary;		boundary = dm_round_up(offset + 1, ti->split_io) - offset;		if (len > boundary)			len = boundary;	}	return len;}static void __map_bio(struct dm_target *ti, struct bio *clone,		      struct target_io *tio){	int r;	/*	 * Sanity checks.	 */	BUG_ON(!clone->bi_size);	clone->bi_end_io = clone_endio;	clone->bi_private = tio;	/*	 * Map the clone.  If r == 0 we don't need to do	 * anything, the target has assumed ownership of	 * this io.	 */	atomic_inc(&tio->io->io_count);	r = ti->type->map(ti, clone, &tio->info);	if (r > 0)		/* the bio has been remapped so dispatch it */		generic_make_request(clone);	else if (r < 0) {		/* error the io and bail out */		struct dm_io *io = tio->io;		free_tio(tio->io->md, tio);		dec_pending(io, -EIO);		bio_put(clone);	}}struct clone_info {	struct mapped_device *md;	struct dm_table *map;	struct bio *bio;	struct dm_io *io;	sector_t sector;	sector_t sector_count;	unsigned short idx;};/* * Creates a little bio that is just does part of a bvec. */static struct bio *split_bvec(struct bio *bio, sector_t sector,			      unsigned short idx, unsigned int offset,			      unsigned int len){	struct bio *clone;	struct bio_vec *bv = bio->bi_io_vec + idx;	clone = bio_alloc(GFP_NOIO, 1);	*clone->bi_io_vec = *bv;	clone->bi_sector = sector;	clone->bi_bdev = bio->bi_bdev;	clone->bi_rw = bio->bi_rw;	clone->bi_vcnt = 1;	clone->bi_size = to_bytes(len);	clone->bi_io_vec->bv_offset = offset;	clone->bi_io_vec->bv_len = clone->bi_size;	return clone;}/* * Creates a bio that consists of range of complete bvecs. */static struct bio *clone_bio(struct bio *bio, sector_t sector,			     unsigned short idx, unsigned short bv_count,			     unsigned int len){	struct bio *clone;	clone = bio_clone(bio, GFP_NOIO);	clone->bi_sector = sector;	clone->bi_idx = idx;	clone->bi_vcnt = idx + bv_count;	clone->bi_size = to_bytes(len);	clone->bi_flags &= ~(1 << BIO_SEG_VALID);	return clone;}static void __clone_and_map(struct clone_info *ci){	struct bio *clone, *bio = ci->bio;	struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);	sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);	struct target_io *tio;	/*	 * Allocate a target io object.	 */	tio = alloc_tio(ci->md);	tio->io = ci->io;	tio->ti = ti;	memset(&tio->info, 0, sizeof(tio->info));	if (ci->sector_count <= max) {		/*		 * Optimise for the simple case where we can do all of		 * the remaining io with a single clone.		 */		clone = clone_bio(bio, ci->sector, ci->idx,				  bio->bi_vcnt - ci->idx, ci->sector_count);		__map_bio(ti, clone, tio);		ci->sector_count = 0;	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {		/*		 * There are some bvecs that don't span targets.		 * Do as many of these as possible.		 */		int i;		sector_t remaining = max;		sector_t bv_len;		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {			bv_len = to_sector(bio->bi_io_vec[i].bv_len);			if (bv_len > remaining)				break;			remaining -= bv_len;			len += bv_len;		}		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);		__map_bio(ti, clone, tio);		ci->sector += len;		ci->sector_count -= len;		ci->idx = i;	} else {		/*		 * Create two copy bios to deal with io that has		 * been split across a target.		 */		struct bio_vec *bv = bio->bi_io_vec + ci->idx;		clone = split_bvec(bio, ci->sector, ci->idx,				   bv->bv_offset, max);		__map_bio(ti, clone, tio);		ci->sector += max;		ci->sector_count -= max;		ti = dm_table_find_target(ci->map, ci->sector);		len = to_sector(bv->bv_len) - max;		clone = split_bvec(bio, ci->sector, ci->idx,				   bv->bv_offset + to_bytes(max), len);		tio = alloc_tio(ci->md);		tio->io = ci->io;		tio->ti = ti;		memset(&tio->info, 0, sizeof(tio->info));		__map_bio(ti, clone, tio);		ci->sector += len;		ci->sector_count -= len;		ci->idx++;	}}/* * Split the bio into several clones. */static void __split_bio(struct mapped_device *md, struct bio *bio){	struct clone_info ci;	ci.map = dm_get_table(md);	if (!ci.map) {		bio_io_error(bio, bio->bi_size);		return;	}	ci.md = md;	ci.bio = bio;	ci.io = alloc_io(md);	ci.io->error = 0;	atomic_set(&ci.io->io_count, 1);	ci.io->bio = bio;	ci.io->md = md;	ci.sector = bio->bi_sector;	ci.sector_count = bio_sectors(bio);	ci.idx = bio->bi_idx;	atomic_inc(&md->pending);	while (ci.sector_count)		__clone_and_map(&ci);	/* drop the extra reference count */	dec_pending(ci.io, 0);	dm_table_put(ci.map);}/*----------------------------------------------------------------- * CRUD END *---------------------------------------------------------------*//* * The request function that just remaps the bio built up by * dm_merge_bvec. */static int dm_request(request_queue_t *q, struct bio *bio){	int r;	struct mapped_device *md = q->queuedata;	down_read(&md->lock);	/*	 * If we're suspended we have to queue	 * this io for later.	 */	while (test_bit(DMF_BLOCK_IO, &md->flags)) {		up_read(&md->lock);		if (bio_rw(bio) == READA) {			bio_io_error(bio, bio->bi_size);			return 0;		}		r = queue_io(md, bio);		if (r < 0) {			bio_io_error(bio, bio->bi_size);			return 0;		} else if (r == 0)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -