⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dm-snap.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
static void get_pending_exception(struct dm_snap_pending_exception *pe){	atomic_inc(&pe->ref_count);}static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe){	struct dm_snap_pending_exception *primary_pe;	struct bio *origin_bios = NULL;	primary_pe = pe->primary_pe;	/*	 * If this pe is involved in a write to the origin and	 * it is the last sibling to complete then release	 * the bios for the original write to the origin.	 */	if (primary_pe &&	    atomic_dec_and_test(&primary_pe->ref_count))		origin_bios = bio_list_get(&primary_pe->origin_bios);	/*	 * Free the pe if it's not linked to an origin write or if	 * it's not itself a primary pe.	 */	if (!primary_pe || primary_pe != pe)		free_pending_exception(pe);	/*	 * Free the primary pe if nothing references it.	 */	if (primary_pe && !atomic_read(&primary_pe->ref_count))		free_pending_exception(primary_pe);	return origin_bios;}static void pending_complete(struct dm_snap_pending_exception *pe, int success){	struct dm_snap_exception *e;	struct dm_snapshot *s = pe->snap;	struct bio *origin_bios = NULL;	struct bio *snapshot_bios = NULL;	int error = 0;	if (!success) {		/* Read/write error - snapshot is unusable */		down_write(&s->lock);		__invalidate_snapshot(s, -EIO);		error = 1;		goto out;	}	e = alloc_exception();	if (!e) {		down_write(&s->lock);		__invalidate_snapshot(s, -ENOMEM);		error = 1;		goto out;	}	*e = pe->e;	down_write(&s->lock);	if (!s->valid) {		free_exception(e);		error = 1;		goto out;	}	/*	 * Add a proper exception, and remove the	 * in-flight exception from the list.	 */	insert_exception(&s->complete, e); out:	remove_exception(&pe->e);	snapshot_bios = bio_list_get(&pe->snapshot_bios);	origin_bios = put_pending_exception(pe);	up_write(&s->lock);	/* Submit any pending write bios */	if (error)		error_bios(snapshot_bios);	else		flush_bios(snapshot_bios);	flush_bios(origin_bios);}static void commit_callback(void *context, int success){	struct dm_snap_pending_exception *pe = context;	pending_complete(pe, success);}/* * Called when the copy I/O has finished.  kcopyd actually runs * this code so don't block. */static void copy_callback(int read_err, unsigned int write_err, void *context){	struct dm_snap_pending_exception *pe = context;	struct dm_snapshot *s = pe->snap;	if (read_err || write_err)		pending_complete(pe, 0);	else		/* Update the metadata if we are persistent */		s->store.commit_exception(&s->store, &pe->e, commit_callback,					  pe);}/* * Dispatches the copy operation to kcopyd. */static void start_copy(struct dm_snap_pending_exception *pe){	struct dm_snapshot *s = pe->snap;	struct io_region src, dest;	struct block_device *bdev = s->origin->bdev;	sector_t dev_size;	dev_size = get_dev_size(bdev);	src.bdev = bdev;	src.sector = chunk_to_sector(s, pe->e.old_chunk);	src.count = min(s->chunk_size, dev_size - src.sector);	dest.bdev = s->cow->bdev;	dest.sector = chunk_to_sector(s, pe->e.new_chunk);	dest.count = src.count;	/* Hand over to kcopyd */	kcopyd_copy(s->kcopyd_client,		    &src, 1, &dest, 0, copy_callback, pe);}/* * Looks to see if this snapshot already has a pending exception * for this chunk, otherwise it allocates a new one and inserts * it into the pending table. * * NOTE: a write lock must be held on snap->lock before calling * this. */static struct dm_snap_pending_exception *__find_pending_exception(struct dm_snapshot *s, struct bio *bio){	struct dm_snap_exception *e;	struct dm_snap_pending_exception *pe;	chunk_t chunk = sector_to_chunk(s, bio->bi_sector);	/*	 * Is there a pending exception for this already ?	 */	e = lookup_exception(&s->pending, chunk);	if (e) {		/* cast the exception to a pending exception */		pe = container_of(e, struct dm_snap_pending_exception, e);		goto out;	}	/*	 * Create a new pending exception, we don't want	 * to hold the lock while we do this.	 */	up_write(&s->lock);	pe = alloc_pending_exception();	down_write(&s->lock);	if (!s->valid) {		free_pending_exception(pe);		return NULL;	}	e = lookup_exception(&s->pending, chunk);	if (e) {		free_pending_exception(pe);		pe = container_of(e, struct dm_snap_pending_exception, e);		goto out;	}	pe->e.old_chunk = chunk;	bio_list_init(&pe->origin_bios);	bio_list_init(&pe->snapshot_bios);	pe->primary_pe = NULL;	atomic_set(&pe->ref_count, 0);	pe->snap = s;	pe->started = 0;	if (s->store.prepare_exception(&s->store, &pe->e)) {		free_pending_exception(pe);		return NULL;	}	get_pending_exception(pe);	insert_exception(&s->pending, &pe->e); out:	return pe;}static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,			    struct bio *bio){	bio->bi_bdev = s->cow->bdev;	bio->bi_sector = chunk_to_sector(s, e->new_chunk) +		(bio->bi_sector & s->chunk_mask);}static int snapshot_map(struct dm_target *ti, struct bio *bio,			union map_info *map_context){	struct dm_snap_exception *e;	struct dm_snapshot *s = ti->private;	int r = DM_MAPIO_REMAPPED;	chunk_t chunk;	struct dm_snap_pending_exception *pe = NULL;	chunk = sector_to_chunk(s, bio->bi_sector);	/* Full snapshots are not usable */	/* To get here the table must be live so s->active is always set. */	if (!s->valid)		return -EIO;	/* FIXME: should only take write lock if we need	 * to copy an exception */	down_write(&s->lock);	if (!s->valid) {		r = -EIO;		goto out_unlock;	}	/* If the block is already remapped - use that, else remap it */	e = lookup_exception(&s->complete, chunk);	if (e) {		remap_exception(s, e, bio);		goto out_unlock;	}	/*	 * Write to snapshot - higher level takes care of RW/RO	 * flags so we should only get this if we are	 * writeable.	 */	if (bio_rw(bio) == WRITE) {		pe = __find_pending_exception(s, bio);		if (!pe) {			__invalidate_snapshot(s, -ENOMEM);			r = -EIO;			goto out_unlock;		}		remap_exception(s, &pe->e, bio);		bio_list_add(&pe->snapshot_bios, bio);		r = DM_MAPIO_SUBMITTED;		if (!pe->started) {			/* this is protected by snap->lock */			pe->started = 1;			up_write(&s->lock);			start_copy(pe);			goto out;		}	} else		/*		 * FIXME: this read path scares me because we		 * always use the origin when we have a pending		 * exception.  However I can't think of a		 * situation where this is wrong - ejt.		 */		bio->bi_bdev = s->origin->bdev; out_unlock:	up_write(&s->lock); out:	return r;}static void snapshot_resume(struct dm_target *ti){	struct dm_snapshot *s = ti->private;	down_write(&s->lock);	s->active = 1;	up_write(&s->lock);}static int snapshot_status(struct dm_target *ti, status_type_t type,			   char *result, unsigned int maxlen){	struct dm_snapshot *snap = ti->private;	switch (type) {	case STATUSTYPE_INFO:		if (!snap->valid)			snprintf(result, maxlen, "Invalid");		else {			if (snap->store.fraction_full) {				sector_t numerator, denominator;				snap->store.fraction_full(&snap->store,							  &numerator,							  &denominator);				snprintf(result, maxlen, "%llu/%llu",					(unsigned long long)numerator,					(unsigned long long)denominator);			}			else				snprintf(result, maxlen, "Unknown");		}		break;	case STATUSTYPE_TABLE:		/*		 * kdevname returns a static pointer so we need		 * to make private copies if the output is to		 * make sense.		 */		snprintf(result, maxlen, "%s %s %c %llu",			 snap->origin->name, snap->cow->name,			 snap->type,			 (unsigned long long)snap->chunk_size);		break;	}	return 0;}/*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/static int __origin_write(struct list_head *snapshots, struct bio *bio){	int r = DM_MAPIO_REMAPPED, first = 0;	struct dm_snapshot *snap;	struct dm_snap_exception *e;	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;	chunk_t chunk;	LIST_HEAD(pe_queue);	/* Do all the snapshots on this origin */	list_for_each_entry (snap, snapshots, list) {		down_write(&snap->lock);		/* Only deal with valid and active snapshots */		if (!snap->valid || !snap->active)			goto next_snapshot;		/* Nothing to do if writing beyond end of snapshot */		if (bio->bi_sector >= dm_table_get_size(snap->table))			goto next_snapshot;		/*		 * Remember, different snapshots can have		 * different chunk sizes.		 */		chunk = sector_to_chunk(snap, bio->bi_sector);		/*		 * Check exception table to see if block		 * is already remapped in this snapshot		 * and trigger an exception if not.		 *		 * ref_count is initialised to 1 so pending_complete()		 * won't destroy the primary_pe while we're inside this loop.		 */		e = lookup_exception(&snap->complete, chunk);		if (e)			goto next_snapshot;		pe = __find_pending_exception(snap, bio);		if (!pe) {			__invalidate_snapshot(snap, -ENOMEM);			goto next_snapshot;		}		if (!primary_pe) {			/*			 * Either every pe here has same			 * primary_pe or none has one yet.			 */			if (pe->primary_pe)				primary_pe = pe->primary_pe;			else {				primary_pe = pe;				first = 1;			}			bio_list_add(&primary_pe->origin_bios, bio);			r = DM_MAPIO_SUBMITTED;		}		if (!pe->primary_pe) {			pe->primary_pe = primary_pe;			get_pending_exception(primary_pe);		}		if (!pe->started) {			pe->started = 1;			list_add_tail(&pe->list, &pe_queue);		} next_snapshot:		up_write(&snap->lock);	}	if (!primary_pe)		return r;	/*	 * If this is the first time we're processing this chunk and	 * ref_count is now 1 it means all the pending exceptions	 * got completed while we were in the loop above, so it falls to	 * us here to remove the primary_pe and submit any origin_bios.	 */	if (first && atomic_dec_and_test(&primary_pe->ref_count)) {		flush_bios(bio_list_get(&primary_pe->origin_bios));		free_pending_exception(primary_pe);		/* If we got here, pe_queue is necessarily empty. */		return r;	}	/*	 * Now that we have a complete pe list we can start the copying.	 */	list_for_each_entry_safe(pe, next_pe, &pe_queue, list)		start_copy(pe);	return r;}/* * Called on a write from the origin driver. */static int do_origin(struct dm_dev *origin, struct bio *bio){	struct origin *o;	int r = DM_MAPIO_REMAPPED;	down_read(&_origins_lock);	o = __lookup_origin(origin->bdev);	if (o)		r = __origin_write(&o->snapshots, bio);	up_read(&_origins_lock);	return r;}/* * Origin: maps a linear range of a device, with hooks for snapshotting. *//* * Construct an origin mapping: <dev_path> * The context for an origin is merely a 'struct dm_dev *' * pointing to the real device. */static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv){	int r;	struct dm_dev *dev;	if (argc != 1) {		ti->error = "origin: incorrect number of arguments";		return -EINVAL;	}	r = dm_get_device(ti, argv[0], 0, ti->len,			  dm_table_get_mode(ti->table), &dev);	if (r) {		ti->error = "Cannot get target device";		return r;	}	ti->private = dev;	return 0;}static void origin_dtr(struct dm_target *ti){	struct dm_dev *dev = ti->private;	dm_put_device(ti, dev);}static int origin_map(struct dm_target *ti, struct bio *bio,		      union map_info *map_context){	struct dm_dev *dev = ti->private;	bio->bi_bdev = dev->bdev;	/* Only tell snapshots if this is a write */	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;}#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))/* * Set the target "split_io" field to the minimum of all the snapshots' * chunk sizes. */static void origin_resume(struct dm_target *ti){	struct dm_dev *dev = ti->private;	struct dm_snapshot *snap;	struct origin *o;	chunk_t chunk_size = 0;	down_read(&_origins_lock);	o = __lookup_origin(dev->bdev);	if (o)		list_for_each_entry (snap, &o->snapshots, list)			chunk_size = min_not_zero(chunk_size, snap->chunk_size);	up_read(&_origins_lock);	ti->split_io = chunk_size;}static int origin_status(struct dm_target *ti, status_type_t type, char *result,			 unsigned int maxlen){	struct dm_dev *dev = ti->private;	switch (type) {	case STATUSTYPE_INFO:		result[0] = '\0';		break;	case STATUSTYPE_TABLE:		snprintf(result, maxlen, "%s", dev->name);		break;	}	return 0;}static struct target_type origin_target = {	.name    = "snapshot-origin",	.version = {1, 5, 0},	.module  = THIS_MODULE,	.ctr     = origin_ctr,	.dtr     = origin_dtr,	.map     = origin_map,	.resume  = origin_resume,	.status  = origin_status,};static struct target_type snapshot_target = {	.name    = "snapshot",	.version = {1, 5, 0},	.module  = THIS_MODULE,	.ctr     = snapshot_ctr,	.dtr     = snapshot_dtr,	.map     = snapshot_map,	.resume  = snapshot_resume,	.status  = snapshot_status,};static int __init dm_snapshot_init(void){	int r;	r = dm_register_target(&snapshot_target);	if (r) {		DMERR("snapshot target register failed %d", r);		return r;	}	r = dm_register_target(&origin_target);	if (r < 0) {		DMERR("Origin target register failed %d", r);		goto bad1;	}	r = init_origin_hash();	if (r) {		DMERR("init_origin_hash failed.");		goto bad2;	}	exception_cache = KMEM_CACHE(dm_snap_exception, 0);	if (!exception_cache) {		DMERR("Couldn't create exception cache.");		r = -ENOMEM;		goto bad3;	}	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);	if (!pending_cache) {		DMERR("Couldn't create pending cache.");		r = -ENOMEM;		goto bad4;	}	pending_pool = mempool_create_slab_pool(128, pending_cache);	if (!pending_pool) {		DMERR("Couldn't create pending pool.");		r = -ENOMEM;		goto bad5;	}	ksnapd = create_singlethread_workqueue("ksnapd");	if (!ksnapd) {		DMERR("Failed to create ksnapd workqueue.");		r = -ENOMEM;		goto bad6;	}	return 0;      bad6:	mempool_destroy(pending_pool);      bad5:	kmem_cache_destroy(pending_cache);      bad4:	kmem_cache_destroy(exception_cache);      bad3:	exit_origin_hash();      bad2:	dm_unregister_target(&origin_target);      bad1:	dm_unregister_target(&snapshot_target);	return r;}static void __exit dm_snapshot_exit(void){	int r;	destroy_workqueue(ksnapd);	r = dm_unregister_target(&snapshot_target);	if (r)		DMERR("snapshot unregister failed %d", r);	r = dm_unregister_target(&origin_target);	if (r)		DMERR("origin unregister failed %d", r);	exit_origin_hash();	mempool_destroy(pending_pool);	kmem_cache_destroy(pending_cache);	kmem_cache_destroy(exception_cache);}/* Module hooks */module_init(dm_snapshot_init);module_exit(dm_snapshot_exit);MODULE_DESCRIPTION(DM_NAME " snapshot target");MODULE_AUTHOR("Joe Thornber");MODULE_LICENSE("GPL");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -