⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dm.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	 * Store md for cleanup instead of tio which is about to get freed.	 */	bio->bi_private = md->bs;	bio_put(bio);	free_tio(md, tio);}static sector_t max_io_len(struct mapped_device *md,			   sector_t sector, struct dm_target *ti){	sector_t offset = sector - ti->begin;	sector_t len = ti->len - offset;	/*	 * Does the target need to split even further ?	 */	if (ti->split_io) {		sector_t boundary;		boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))			   - offset;		if (len > boundary)			len = boundary;	}	return len;}static void __map_bio(struct dm_target *ti, struct bio *clone,		      struct dm_target_io *tio){	int r;	sector_t sector;	struct mapped_device *md;	/*	 * Sanity checks.	 */	BUG_ON(!clone->bi_size);	clone->bi_end_io = clone_endio;	clone->bi_private = tio;	/*	 * Map the clone.  If r == 0 we don't need to do	 * anything, the target has assumed ownership of	 * this io.	 */	atomic_inc(&tio->io->io_count);	sector = clone->bi_sector;	r = ti->type->map(ti, clone, &tio->info);	if (r == DM_MAPIO_REMAPPED) {		/* the bio has been remapped so dispatch it */		blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,				    tio->io->bio->bi_bdev->bd_dev,				    clone->bi_sector, sector);		generic_make_request(clone);	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {		/* error the io and bail out, or requeue it if needed */		md = tio->io->md;		dec_pending(tio->io, r);		/*		 * Store bio_set for cleanup.		 */		clone->bi_private = md->bs;		bio_put(clone);		free_tio(md, tio);	} else if (r) {		DMWARN("unimplemented target map return value: %d", r);		BUG();	}}struct clone_info {	struct mapped_device *md;	struct dm_table *map;	struct bio *bio;	struct dm_io *io;	sector_t sector;	sector_t sector_count;	unsigned short idx;};static void dm_bio_destructor(struct bio *bio){	struct bio_set *bs = bio->bi_private;	bio_free(bio, bs);}/* * Creates a little bio that is just does part of a bvec. */static struct bio *split_bvec(struct bio *bio, sector_t sector,			      unsigned short idx, unsigned int offset,			      unsigned int len, struct bio_set *bs){	struct bio *clone;	struct bio_vec *bv = bio->bi_io_vec + idx;	clone = bio_alloc_bioset(GFP_NOIO, 1, bs);	clone->bi_destructor = dm_bio_destructor;	*clone->bi_io_vec = *bv;	clone->bi_sector = sector;	clone->bi_bdev = bio->bi_bdev;	clone->bi_rw = bio->bi_rw;	clone->bi_vcnt = 1;	clone->bi_size = to_bytes(len);	clone->bi_io_vec->bv_offset = offset;	clone->bi_io_vec->bv_len = clone->bi_size;	return clone;}/* * Creates a bio that consists of range of complete bvecs. */static struct bio *clone_bio(struct bio *bio, sector_t sector,			     unsigned short idx, unsigned short bv_count,			     unsigned int len, struct bio_set *bs){	struct bio *clone;	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);	__bio_clone(clone, bio);	clone->bi_destructor = dm_bio_destructor;	clone->bi_sector = sector;	clone->bi_idx = idx;	clone->bi_vcnt = idx + bv_count;	clone->bi_size = to_bytes(len);	clone->bi_flags &= ~(1 << BIO_SEG_VALID);	return clone;}static int __clone_and_map(struct clone_info *ci){	struct bio *clone, *bio = ci->bio;	struct dm_target *ti;	sector_t len = 0, max;	struct dm_target_io *tio;	ti = dm_table_find_target(ci->map, ci->sector);	if (!dm_target_is_valid(ti))		return -EIO;	max = max_io_len(ci->md, ci->sector, ti);	/*	 * Allocate a target io object.	 */	tio = alloc_tio(ci->md);	tio->io = ci->io;	tio->ti = ti;	memset(&tio->info, 0, sizeof(tio->info));	if (ci->sector_count <= max) {		/*		 * Optimise for the simple case where we can do all of		 * the remaining io with a single clone.		 */		clone = clone_bio(bio, ci->sector, ci->idx,				  bio->bi_vcnt - ci->idx, ci->sector_count,				  ci->md->bs);		__map_bio(ti, clone, tio);		ci->sector_count = 0;	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {		/*		 * There are some bvecs that don't span targets.		 * Do as many of these as possible.		 */		int i;		sector_t remaining = max;		sector_t bv_len;		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {			bv_len = to_sector(bio->bi_io_vec[i].bv_len);			if (bv_len > remaining)				break;			remaining -= bv_len;			len += bv_len;		}		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,				  ci->md->bs);		__map_bio(ti, clone, tio);		ci->sector += len;		ci->sector_count -= len;		ci->idx = i;	} else {		/*		 * Handle a bvec that must be split between two or more targets.		 */		struct bio_vec *bv = bio->bi_io_vec + ci->idx;		sector_t remaining = to_sector(bv->bv_len);		unsigned int offset = 0;		do {			if (offset) {				ti = dm_table_find_target(ci->map, ci->sector);				if (!dm_target_is_valid(ti))					return -EIO;				max = max_io_len(ci->md, ci->sector, ti);				tio = alloc_tio(ci->md);				tio->io = ci->io;				tio->ti = ti;				memset(&tio->info, 0, sizeof(tio->info));			}			len = min(remaining, max);			clone = split_bvec(bio, ci->sector, ci->idx,					   bv->bv_offset + offset, len,					   ci->md->bs);			__map_bio(ti, clone, tio);			ci->sector += len;			ci->sector_count -= len;			offset += to_bytes(len);		} while (remaining -= len);		ci->idx++;	}	return 0;}/* * Split the bio into several clones. */static int __split_bio(struct mapped_device *md, struct bio *bio){	struct clone_info ci;	int error = 0;	ci.map = dm_get_table(md);	if (unlikely(!ci.map))		return -EIO;	ci.md = md;	ci.bio = bio;	ci.io = alloc_io(md);	ci.io->error = 0;	atomic_set(&ci.io->io_count, 1);	ci.io->bio = bio;	ci.io->md = md;	ci.sector = bio->bi_sector;	ci.sector_count = bio_sectors(bio);	ci.idx = bio->bi_idx;	start_io_acct(ci.io);	while (ci.sector_count && !error)		error = __clone_and_map(&ci);	/* drop the extra reference count */	dec_pending(ci.io, error);	dm_table_put(ci.map);	return 0;}/*----------------------------------------------------------------- * CRUD END *---------------------------------------------------------------*//* * The request function that just remaps the bio built up by * dm_merge_bvec. */static int dm_request(struct request_queue *q, struct bio *bio){	int r = -EIO;	int rw = bio_data_dir(bio);	struct mapped_device *md = q->queuedata;	/*	 * There is no use in forwarding any barrier request since we can't	 * guarantee it is (or can be) handled by the targets correctly.	 */	if (unlikely(bio_barrier(bio))) {		bio_endio(bio, -EOPNOTSUPP);		return 0;	}	down_read(&md->io_lock);	disk_stat_inc(dm_disk(md), ios[rw]);	disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));	/*	 * If we're suspended we have to queue	 * this io for later.	 */	while (test_bit(DMF_BLOCK_IO, &md->flags)) {		up_read(&md->io_lock);		if (bio_rw(bio) != READA)			r = queue_io(md, bio);		if (r <= 0)			goto out_req;		/*		 * We're in a while loop, because someone could suspend		 * before we get to the following read lock.		 */		down_read(&md->io_lock);	}	r = __split_bio(md, bio);	up_read(&md->io_lock);out_req:	if (r < 0)		bio_io_error(bio);	return 0;}static void dm_unplug_all(struct request_queue *q){	struct mapped_device *md = q->queuedata;	struct dm_table *map = dm_get_table(md);	if (map) {		dm_table_unplug_all(map);		dm_table_put(map);	}}static int dm_any_congested(void *congested_data, int bdi_bits){	int r;	struct mapped_device *md = (struct mapped_device *) congested_data;	struct dm_table *map = dm_get_table(md);	if (!map || test_bit(DMF_BLOCK_IO, &md->flags))		r = bdi_bits;	else		r = dm_table_any_congested(map, bdi_bits);	dm_table_put(map);	return r;}/*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/static DEFINE_IDR(_minor_idr);static void free_minor(int minor){	spin_lock(&_minor_lock);	idr_remove(&_minor_idr, minor);	spin_unlock(&_minor_lock);}/* * See if the device with a specific minor # is free. */static int specific_minor(struct mapped_device *md, int minor){	int r, m;	if (minor >= (1 << MINORBITS))		return -EINVAL;	r = idr_pre_get(&_minor_idr, GFP_KERNEL);	if (!r)		return -ENOMEM;	spin_lock(&_minor_lock);	if (idr_find(&_minor_idr, minor)) {		r = -EBUSY;		goto out;	}	r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);	if (r)		goto out;	if (m != minor) {		idr_remove(&_minor_idr, m);		r = -EBUSY;		goto out;	}out:	spin_unlock(&_minor_lock);	return r;}static int next_free_minor(struct mapped_device *md, int *minor){	int r, m;	r = idr_pre_get(&_minor_idr, GFP_KERNEL);	if (!r)		return -ENOMEM;	spin_lock(&_minor_lock);	r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);	if (r) {		goto out;	}	if (m >= (1 << MINORBITS)) {		idr_remove(&_minor_idr, m);		r = -ENOSPC;		goto out;	}	*minor = m;out:	spin_unlock(&_minor_lock);	return r;}static struct block_device_operations dm_blk_dops;/* * Allocate and initialise a blank device with a given minor. */static struct mapped_device *alloc_dev(int minor){	int r;	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);	void *old_md;	if (!md) {		DMWARN("unable to allocate device, out of memory.");		return NULL;	}	if (!try_module_get(THIS_MODULE))		goto bad0;	/* get a minor number for the dev */	if (minor == DM_ANY_MINOR)		r = next_free_minor(md, &minor);	else		r = specific_minor(md, minor);	if (r < 0)		goto bad1;	memset(md, 0, sizeof(*md));	init_rwsem(&md->io_lock);	init_MUTEX(&md->suspend_lock);	spin_lock_init(&md->pushback_lock);	rwlock_init(&md->map_lock);	atomic_set(&md->holders, 1);	atomic_set(&md->open_count, 0);	atomic_set(&md->event_nr, 0);	atomic_set(&md->uevent_seq, 0);	INIT_LIST_HEAD(&md->uevent_list);	spin_lock_init(&md->uevent_lock);	md->queue = blk_alloc_queue(GFP_KERNEL);	if (!md->queue)		goto bad1_free_minor;	md->queue->queuedata = md;	md->queue->backing_dev_info.congested_fn = dm_any_congested;	md->queue->backing_dev_info.congested_data = md;	blk_queue_make_request(md->queue, dm_request);	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);	md->queue->unplug_fn = dm_unplug_all;	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);	if (!md->io_pool)		goto bad2;	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);	if (!md->tio_pool)		goto bad3;	md->bs = bioset_create(16, 16);	if (!md->bs)		goto bad_no_bioset;	md->disk = alloc_disk(1);	if (!md->disk)		goto bad4;	atomic_set(&md->pending, 0);	init_waitqueue_head(&md->wait);	init_waitqueue_head(&md->eventq);	md->disk->major = _major;	md->disk->first_minor = minor;	md->disk->fops = &dm_blk_dops;	md->disk->queue = md->queue;	md->disk->private_data = md;	sprintf(md->disk->disk_name, "dm-%d", minor);	add_disk(md->disk);	format_dev_t(md->name, MKDEV(_major, minor));	/* Populate the mapping, nobody knows we exist yet */	spin_lock(&_minor_lock);	old_md = idr_replace(&_minor_idr, md, minor);	spin_unlock(&_minor_lock);	BUG_ON(old_md != MINOR_ALLOCED);	return md; bad4:	bioset_free(md->bs); bad_no_bioset:	mempool_destroy(md->tio_pool); bad3:	mempool_destroy(md->io_pool); bad2:	blk_cleanup_queue(md->queue); bad1_free_minor:	free_minor(minor); bad1:	module_put(THIS_MODULE); bad0:	kfree(md);	return NULL;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -