⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dm-table.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	atomic_inc(&dd->count);	if (!check_device_area(dd, start, len)) {		DMWARN("device %s too small for target", path);		dm_put_device(ti, dd);		return -EINVAL;	}	*result = dd;	return 0;}void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev){	struct request_queue *q = bdev_get_queue(bdev);	struct io_restrictions *rs = &ti->limits;	/*	 * Combine the device limits low.	 *	 * FIXME: if we move an io_restriction struct	 *        into q this would just be a call to	 *        combine_restrictions_low()	 */	rs->max_sectors =		min_not_zero(rs->max_sectors, q->max_sectors);	/* FIXME: Device-Mapper on top of RAID-0 breaks because DM	 *        currently doesn't honor MD's merge_bvec_fn routine.	 *        In this case, we'll force DM to use PAGE_SIZE or	 *        smaller I/O, just to be safe. A better fix is in the	 *        works, but add this for the time being so it will at	 *        least operate correctly.	 */	if (q->merge_bvec_fn)		rs->max_sectors =			min_not_zero(rs->max_sectors,				     (unsigned int) (PAGE_SIZE >> 9));	rs->max_phys_segments =		min_not_zero(rs->max_phys_segments,			     q->max_phys_segments);	rs->max_hw_segments =		min_not_zero(rs->max_hw_segments, q->max_hw_segments);	rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);	rs->max_segment_size =		min_not_zero(rs->max_segment_size, q->max_segment_size);	rs->max_hw_sectors =		min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);	rs->seg_boundary_mask =		min_not_zero(rs->seg_boundary_mask,			     q->seg_boundary_mask);	rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);	rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);}EXPORT_SYMBOL_GPL(dm_set_device_limits);int dm_get_device(struct dm_target *ti, const char *path, sector_t start,		  sector_t len, int mode, struct dm_dev **result){	int r = __table_get_device(ti->table, ti, path,				   start, len, mode, result);	if (!r)		dm_set_device_limits(ti, (*result)->bdev);	return r;}/* * Decrement a devices use count and remove it if necessary. */void dm_put_device(struct dm_target *ti, struct dm_dev *dd){	if (atomic_dec_and_test(&dd->count)) {		close_dev(dd, ti->table->md);		list_del(&dd->list);		kfree(dd);	}}/* * Checks to see if the target joins onto the end of the table. */static int adjoin(struct dm_table *table, struct dm_target *ti){	struct dm_target *prev;	if (!table->num_targets)		return !ti->begin;	prev = &table->targets[table->num_targets - 1];	return (ti->begin == (prev->begin + prev->len));}/* * Used to dynamically allocate the arg array. */static char **realloc_argv(unsigned *array_size, char **old_argv){	char **argv;	unsigned new_size;	new_size = *array_size ? *array_size * 2 : 64;	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);	if (argv) {		memcpy(argv, old_argv, *array_size * sizeof(*argv));		*array_size = new_size;	}	kfree(old_argv);	return argv;}/* * Destructively splits up the argument list to pass to ctr. */int dm_split_args(int *argc, char ***argvp, char *input){	char *start, *end = input, *out, **argv = NULL;	unsigned array_size = 0;	*argc = 0;	if (!input) {		*argvp = NULL;		return 0;	}	argv = realloc_argv(&array_size, argv);	if (!argv)		return -ENOMEM;	while (1) {		start = end;		/* Skip whitespace */		while (*start && isspace(*start))			start++;		if (!*start)			break;	/* success, we hit the end */		/* 'out' is used to remove any back-quotes */		end = out = start;		while (*end) {			/* Everything apart from '\0' can be quoted */			if (*end == '\\' && *(end + 1)) {				*out++ = *(end + 1);				end += 2;				continue;			}			if (isspace(*end))				break;	/* end of token */			*out++ = *end++;		}		/* have we already filled the array ? */		if ((*argc + 1) > array_size) {			argv = realloc_argv(&array_size, argv);			if (!argv)				return -ENOMEM;		}		/* we know this is whitespace */		if (*end)			end++;		/* terminate the string and put it in the array */		*out = '\0';		argv[*argc] = start;		(*argc)++;	}	*argvp = argv;	return 0;}static void check_for_valid_limits(struct io_restrictions *rs){	if (!rs->max_sectors)		rs->max_sectors = SAFE_MAX_SECTORS;	if (!rs->max_hw_sectors)		rs->max_hw_sectors = SAFE_MAX_SECTORS;	if (!rs->max_phys_segments)		rs->max_phys_segments = MAX_PHYS_SEGMENTS;	if (!rs->max_hw_segments)		rs->max_hw_segments = MAX_HW_SEGMENTS;	if (!rs->hardsect_size)		rs->hardsect_size = 1 << SECTOR_SHIFT;	if (!rs->max_segment_size)		rs->max_segment_size = MAX_SEGMENT_SIZE;	if (!rs->seg_boundary_mask)		rs->seg_boundary_mask = -1;	if (!rs->bounce_pfn)		rs->bounce_pfn = -1;}int dm_table_add_target(struct dm_table *t, const char *type,			sector_t start, sector_t len, char *params){	int r = -EINVAL, argc;	char **argv;	struct dm_target *tgt;	if ((r = check_space(t)))		return r;	tgt = t->targets + t->num_targets;	memset(tgt, 0, sizeof(*tgt));	if (!len) {		DMERR("%s: zero-length target", dm_device_name(t->md));		return -EINVAL;	}	tgt->type = dm_get_target_type(type);	if (!tgt->type) {		DMERR("%s: %s: unknown target type", dm_device_name(t->md),		      type);		return -EINVAL;	}	tgt->table = t;	tgt->begin = start;	tgt->len = len;	tgt->error = "Unknown error";	/*	 * Does this target adjoin the previous one ?	 */	if (!adjoin(t, tgt)) {		tgt->error = "Gap in table";		r = -EINVAL;		goto bad;	}	r = dm_split_args(&argc, &argv, params);	if (r) {		tgt->error = "couldn't split parameters (insufficient memory)";		goto bad;	}	r = tgt->type->ctr(tgt, argc, argv);	kfree(argv);	if (r)		goto bad;	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;	/* FIXME: the plan is to combine high here and then have	 * the merge fn apply the target level restrictions. */	combine_restrictions_low(&t->limits, &tgt->limits);	return 0; bad:	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);	dm_put_target_type(tgt->type);	return r;}static int setup_indexes(struct dm_table *t){	int i;	unsigned int total = 0;	sector_t *indexes;	/* allocate the space for *all* the indexes */	for (i = t->depth - 2; i >= 0; i--) {		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);		total += t->counts[i];	}	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);	if (!indexes)		return -ENOMEM;	/* set up internal nodes, bottom-up */	for (i = t->depth - 2, total = 0; i >= 0; i--) {		t->index[i] = indexes;		indexes += (KEYS_PER_NODE * t->counts[i]);		setup_btree_index(i, t);	}	return 0;}/* * Builds the btree to index the map. */int dm_table_complete(struct dm_table *t){	int r = 0;	unsigned int leaf_nodes;	check_for_valid_limits(&t->limits);	/* how many indexes will the btree have ? */	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);	/* leaf layer has already been set up */	t->counts[t->depth - 1] = leaf_nodes;	t->index[t->depth - 1] = t->highs;	if (t->depth >= 2)		r = setup_indexes(t);	return r;}static DEFINE_MUTEX(_event_lock);void dm_table_event_callback(struct dm_table *t,			     void (*fn)(void *), void *context){	mutex_lock(&_event_lock);	t->event_fn = fn;	t->event_context = context;	mutex_unlock(&_event_lock);}void dm_table_event(struct dm_table *t){	/*	 * You can no longer call dm_table_event() from interrupt	 * context, use a bottom half instead.	 */	BUG_ON(in_interrupt());	mutex_lock(&_event_lock);	if (t->event_fn)		t->event_fn(t->event_context);	mutex_unlock(&_event_lock);}sector_t dm_table_get_size(struct dm_table *t){	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;}struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index){	if (index >= t->num_targets)		return NULL;	return t->targets + index;}/* * Search the btree for the correct target. * * Caller should check returned pointer with dm_target_is_valid() * to trap I/O beyond end of device. */struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector){	unsigned int l, n = 0, k = 0;	sector_t *node;	for (l = 0; l < t->depth; l++) {		n = get_child(n, k);		node = get_node(t, l, n);		for (k = 0; k < KEYS_PER_NODE; k++)			if (node[k] >= sector)				break;	}	return &t->targets[(KEYS_PER_NODE * n) + k];}void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q){	/*	 * Make sure we obey the optimistic sub devices	 * restrictions.	 */	blk_queue_max_sectors(q, t->limits.max_sectors);	q->max_phys_segments = t->limits.max_phys_segments;	q->max_hw_segments = t->limits.max_hw_segments;	q->hardsect_size = t->limits.hardsect_size;	q->max_segment_size = t->limits.max_segment_size;	q->max_hw_sectors = t->limits.max_hw_sectors;	q->seg_boundary_mask = t->limits.seg_boundary_mask;	q->bounce_pfn = t->limits.bounce_pfn;	if (t->limits.no_cluster)		q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);	else		q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);}unsigned int dm_table_get_num_targets(struct dm_table *t){	return t->num_targets;}struct list_head *dm_table_get_devices(struct dm_table *t){	return &t->devices;}int dm_table_get_mode(struct dm_table *t){	return t->mode;}static void suspend_targets(struct dm_table *t, unsigned postsuspend){	int i = t->num_targets;	struct dm_target *ti = t->targets;	while (i--) {		if (postsuspend) {			if (ti->type->postsuspend)				ti->type->postsuspend(ti);		} else if (ti->type->presuspend)			ti->type->presuspend(ti);		ti++;	}}void dm_table_presuspend_targets(struct dm_table *t){	if (!t)		return;	return suspend_targets(t, 0);}void dm_table_postsuspend_targets(struct dm_table *t){	if (!t)		return;	return suspend_targets(t, 1);}int dm_table_resume_targets(struct dm_table *t){	int i, r = 0;	for (i = 0; i < t->num_targets; i++) {		struct dm_target *ti = t->targets + i;		if (!ti->type->preresume)			continue;		r = ti->type->preresume(ti);		if (r)			return r;	}	for (i = 0; i < t->num_targets; i++) {		struct dm_target *ti = t->targets + i;		if (ti->type->resume)			ti->type->resume(ti);	}	return 0;}int dm_table_any_congested(struct dm_table *t, int bdi_bits){	struct list_head *d, *devices;	int r = 0;	devices = dm_table_get_devices(t);	for (d = devices->next; d != devices; d = d->next) {		struct dm_dev *dd = list_entry(d, struct dm_dev, list);		struct request_queue *q = bdev_get_queue(dd->bdev);		r |= bdi_congested(&q->backing_dev_info, bdi_bits);	}	return r;}void dm_table_unplug_all(struct dm_table *t){	struct list_head *d, *devices = dm_table_get_devices(t);	for (d = devices->next; d != devices; d = d->next) {		struct dm_dev *dd = list_entry(d, struct dm_dev, list);		struct request_queue *q = bdev_get_queue(dd->bdev);		blk_unplug(q);	}}struct mapped_device *dm_table_get_md(struct dm_table *t){	dm_get(t->md);	return t->md;}EXPORT_SYMBOL(dm_vcalloc);EXPORT_SYMBOL(dm_get_device);EXPORT_SYMBOL(dm_put_device);EXPORT_SYMBOL(dm_table_event);EXPORT_SYMBOL(dm_table_get_size);EXPORT_SYMBOL(dm_table_get_mode);EXPORT_SYMBOL(dm_table_get_md);EXPORT_SYMBOL(dm_table_put);EXPORT_SYMBOL(dm_table_get);EXPORT_SYMBOL(dm_table_unplug_all);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -