⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dm.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */#include "dm.h"#include "dm-bio-list.h"#include "dm-uevent.h"#include <linux/init.h>#include <linux/module.h>#include <linux/mutex.h>#include <linux/moduleparam.h>#include <linux/blkpg.h>#include <linux/bio.h>#include <linux/buffer_head.h>#include <linux/mempool.h>#include <linux/slab.h>#include <linux/idr.h>#include <linux/hdreg.h>#include <linux/blktrace_api.h>#include <linux/smp_lock.h>#define DM_MSG_PREFIX "core"static const char *_name = DM_NAME;static unsigned int major = 0;static unsigned int _major = 0;static DEFINE_SPINLOCK(_minor_lock);/* * One of these is allocated per bio. */struct dm_io {	struct mapped_device *md;	int error;	struct bio *bio;	atomic_t io_count;	unsigned long start_time;};/* * One of these is allocated per target within a bio.  Hopefully * this will be simplified out one day. */struct dm_target_io {	struct dm_io *io;	struct dm_target *ti;	union map_info info;};union map_info *dm_get_mapinfo(struct bio *bio){	if (bio && bio->bi_private)		return &((struct dm_target_io *)bio->bi_private)->info;	return NULL;}#define MINOR_ALLOCED ((void *)-1)/* * Bits for the md->flags field. */#define DMF_BLOCK_IO 0#define DMF_SUSPENDED 1#define DMF_FROZEN 2#define DMF_FREEING 3#define DMF_DELETING 4#define DMF_NOFLUSH_SUSPENDING 5struct mapped_device {	struct rw_semaphore io_lock;	struct semaphore suspend_lock;	spinlock_t pushback_lock;	rwlock_t map_lock;	atomic_t holders;	atomic_t open_count;	unsigned long flags;	struct request_queue *queue;	struct gendisk *disk;	char name[16];	void *interface_ptr;	/*	 * A list of ios that arrived while we were suspended.	 */	atomic_t pending;	wait_queue_head_t wait;	struct bio_list deferred;	struct bio_list pushback;	/*	 * The current mapping.	 */	struct dm_table *map;	/*	 * io objects are allocated from here.	 */	mempool_t *io_pool;	mempool_t *tio_pool;	struct bio_set *bs;	/*	 * Event handling.	 */	atomic_t event_nr;	wait_queue_head_t eventq;	atomic_t uevent_seq;	struct list_head uevent_list;	spinlock_t uevent_lock; /* Protect access to uevent_list */	/*	 * freeze/thaw support require holding onto a super block	 */	struct super_block *frozen_sb;	struct block_device *suspended_bdev;	/* forced geometry settings */	struct hd_geometry geometry;};#define MIN_IOS 256static struct kmem_cache *_io_cache;static struct kmem_cache *_tio_cache;static int __init local_init(void){	int r;	/* allocate a slab for the dm_ios */	_io_cache = KMEM_CACHE(dm_io, 0);	if (!_io_cache)		return -ENOMEM;	/* allocate a slab for the target ios */	_tio_cache = KMEM_CACHE(dm_target_io, 0);	if (!_tio_cache) {		kmem_cache_destroy(_io_cache);		return -ENOMEM;	}	r = dm_uevent_init();	if (r) {		kmem_cache_destroy(_tio_cache);		kmem_cache_destroy(_io_cache);		return r;	}	_major = major;	r = register_blkdev(_major, _name);	if (r < 0) {		kmem_cache_destroy(_tio_cache);		kmem_cache_destroy(_io_cache);		dm_uevent_exit();		return r;	}	if (!_major)		_major = r;	return 0;}static void local_exit(void){	kmem_cache_destroy(_tio_cache);	kmem_cache_destroy(_io_cache);	unregister_blkdev(_major, _name);	dm_uevent_exit();	_major = 0;	DMINFO("cleaned up");}int (*_inits[])(void) __initdata = {	local_init,	dm_target_init,	dm_linear_init,	dm_stripe_init,	dm_interface_init,};void (*_exits[])(void) = {	local_exit,	dm_target_exit,	dm_linear_exit,	dm_stripe_exit,	dm_interface_exit,};static int __init dm_init(void){	const int count = ARRAY_SIZE(_inits);	int r, i;	for (i = 0; i < count; i++) {		r = _inits[i]();		if (r)			goto bad;	}	return 0;      bad:	while (i--)		_exits[i]();	return r;}static void __exit dm_exit(void){	int i = ARRAY_SIZE(_exits);	while (i--)		_exits[i]();}/* * Block device functions */static int dm_blk_open(struct inode *inode, struct file *file){	struct mapped_device *md;	spin_lock(&_minor_lock);	md = inode->i_bdev->bd_disk->private_data;	if (!md)		goto out;	if (test_bit(DMF_FREEING, &md->flags) ||	    test_bit(DMF_DELETING, &md->flags)) {		md = NULL;		goto out;	}	dm_get(md);	atomic_inc(&md->open_count);out:	spin_unlock(&_minor_lock);	return md ? 0 : -ENXIO;}static int dm_blk_close(struct inode *inode, struct file *file){	struct mapped_device *md;	md = inode->i_bdev->bd_disk->private_data;	atomic_dec(&md->open_count);	dm_put(md);	return 0;}int dm_open_count(struct mapped_device *md){	return atomic_read(&md->open_count);}/* * Guarantees nothing is using the device before it's deleted. */int dm_lock_for_deletion(struct mapped_device *md){	int r = 0;	spin_lock(&_minor_lock);	if (dm_open_count(md))		r = -EBUSY;	else		set_bit(DMF_DELETING, &md->flags);	spin_unlock(&_minor_lock);	return r;}static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo){	struct mapped_device *md = bdev->bd_disk->private_data;	return dm_get_geometry(md, geo);}static int dm_blk_ioctl(struct inode *inode, struct file *file,			unsigned int cmd, unsigned long arg){	struct mapped_device *md;	struct dm_table *map;	struct dm_target *tgt;	int r = -ENOTTY;	/* We don't really need this lock, but we do need 'inode'. */	unlock_kernel();	md = inode->i_bdev->bd_disk->private_data;	map = dm_get_table(md);	if (!map || !dm_table_get_size(map))		goto out;	/* We only support devices that have a single target */	if (dm_table_get_num_targets(map) != 1)		goto out;	tgt = dm_table_get_target(map, 0);	if (dm_suspended(md)) {		r = -EAGAIN;		goto out;	}	if (tgt->type->ioctl)		r = tgt->type->ioctl(tgt, inode, file, cmd, arg);out:	dm_table_put(map);	lock_kernel();	return r;}static struct dm_io *alloc_io(struct mapped_device *md){	return mempool_alloc(md->io_pool, GFP_NOIO);}static void free_io(struct mapped_device *md, struct dm_io *io){	mempool_free(io, md->io_pool);}static struct dm_target_io *alloc_tio(struct mapped_device *md){	return mempool_alloc(md->tio_pool, GFP_NOIO);}static void free_tio(struct mapped_device *md, struct dm_target_io *tio){	mempool_free(tio, md->tio_pool);}static void start_io_acct(struct dm_io *io){	struct mapped_device *md = io->md;	io->start_time = jiffies;	preempt_disable();	disk_round_stats(dm_disk(md));	preempt_enable();	dm_disk(md)->in_flight = atomic_inc_return(&md->pending);}static int end_io_acct(struct dm_io *io){	struct mapped_device *md = io->md;	struct bio *bio = io->bio;	unsigned long duration = jiffies - io->start_time;	int pending;	int rw = bio_data_dir(bio);	preempt_disable();	disk_round_stats(dm_disk(md));	preempt_enable();	dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);	disk_stat_add(dm_disk(md), ticks[rw], duration);	return !pending;}/* * Add the bio to the list of deferred io. */static int queue_io(struct mapped_device *md, struct bio *bio){	down_write(&md->io_lock);	if (!test_bit(DMF_BLOCK_IO, &md->flags)) {		up_write(&md->io_lock);		return 1;	}	bio_list_add(&md->deferred, bio);	up_write(&md->io_lock);	return 0;		/* deferred successfully */}/* * Everyone (including functions in this file), should use this * function to access the md->map field, and make sure they call * dm_table_put() when finished. */struct dm_table *dm_get_table(struct mapped_device *md){	struct dm_table *t;	read_lock(&md->map_lock);	t = md->map;	if (t)		dm_table_get(t);	read_unlock(&md->map_lock);	return t;}/* * Get the geometry associated with a dm device */int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo){	*geo = md->geometry;	return 0;}/* * Set the geometry of a device. */int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo){	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;	if (geo->start > sz) {		DMWARN("Start sector is beyond the geometry limits.");		return -EINVAL;	}	md->geometry = *geo;	return 0;}/*----------------------------------------------------------------- * CRUD START: *   A more elegant soln is in the works that uses the queue *   merge fn, unfortunately there are a couple of changes to *   the block layer that I want to make for this.  So in the *   interests of getting something for people to use I give *   you this clearly demarcated crap. *---------------------------------------------------------------*/static int __noflush_suspending(struct mapped_device *md){	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);}/* * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */static void dec_pending(struct dm_io *io, int error){	unsigned long flags;	/* Push-back supersedes any I/O errors */	if (error && !(io->error > 0 && __noflush_suspending(io->md)))		io->error = error;	if (atomic_dec_and_test(&io->io_count)) {		if (io->error == DM_ENDIO_REQUEUE) {			/*			 * Target requested pushing back the I/O.			 * This must be handled before the sleeper on			 * suspend queue merges the pushback list.			 */			spin_lock_irqsave(&io->md->pushback_lock, flags);			if (__noflush_suspending(io->md))				bio_list_add(&io->md->pushback, io->bio);			else				/* noflush suspend was interrupted. */				io->error = -EIO;			spin_unlock_irqrestore(&io->md->pushback_lock, flags);		}		if (end_io_acct(io))			/* nudge anyone waiting on suspend queue */			wake_up(&io->md->wait);		if (io->error != DM_ENDIO_REQUEUE) {			blk_add_trace_bio(io->md->queue, io->bio,					  BLK_TA_COMPLETE);			bio_endio(io->bio, io->error);		}		free_io(io->md, io);	}}static void clone_endio(struct bio *bio, int error){	int r = 0;	struct dm_target_io *tio = bio->bi_private;	struct mapped_device *md = tio->io->md;	dm_endio_fn endio = tio->ti->type->end_io;	if (!bio_flagged(bio, BIO_UPTODATE) && !error)		error = -EIO;	if (endio) {		r = endio(tio->ti, bio, error, &tio->info);		if (r < 0 || r == DM_ENDIO_REQUEUE)			/*			 * error and requeue request are handled			 * in dec_pending().			 */			error = r;		else if (r == DM_ENDIO_INCOMPLETE)			/* The target will handle the io */			return;		else if (r) {			DMWARN("unimplemented target endio return value: %d", r);			BUG();		}	}	dec_pending(tio->io, error);	/*

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -