📄 dm-table.c
字号:
/* * Copyright (C) 2001 Sistina Software (UK) Limited. * * This file is released under the GPL. */#include "dm.h"#include <linux/module.h>#include <linux/vmalloc.h>#include <linux/blkdev.h>#include <linux/namei.h>#include <linux/ctype.h>#include <linux/slab.h>#include <linux/interrupt.h>#include <asm/atomic.h>#define MAX_DEPTH 16#define NODE_SIZE L1_CACHE_BYTES#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)struct dm_table { atomic_t holders; /* btree table */ unsigned int depth; unsigned int counts[MAX_DEPTH]; /* in nodes */ sector_t *index[MAX_DEPTH]; unsigned int num_targets; unsigned int num_allocated; sector_t *highs; struct dm_target *targets; /* * Indicates the rw permissions for the new logical * device. This should be a combination of FMODE_READ * and FMODE_WRITE. */ int mode; /* a list of devices used by this table */ struct list_head devices; /* * These are optimistic limits taken from all the * targets, some targets will need smaller limits. */ struct io_restrictions limits; /* events get handed up using this callback */ void (*event_fn)(void *); void *event_context;};/* * Similar to ceiling(log_size(n)) */static unsigned int int_log(unsigned long n, unsigned long base){ int result = 0; while (n > 1) { n = dm_div_up(n, base); result++; } return result;}/* * Returns the minimum that is _not_ zero, unless both are zero. */#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))/* * Combine two io_restrictions, always taking the lower value. */static void combine_restrictions_low(struct io_restrictions *lhs, struct io_restrictions *rhs){ lhs->max_sectors = min_not_zero(lhs->max_sectors, rhs->max_sectors); lhs->max_phys_segments = min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); lhs->max_hw_segments = min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); lhs->max_segment_size = min_not_zero(lhs->max_segment_size, rhs->max_segment_size); lhs->seg_boundary_mask = min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);}/* * Calculate the index of the child node of the n'th node k'th key. */static inline unsigned int get_child(unsigned int n, unsigned int k){ return (n * CHILDREN_PER_NODE) + k;}/* * Return the n'th node of level l from table t. */static inline sector_t *get_node(struct dm_table *t, unsigned int l, unsigned int n){ return t->index[l] + (n * KEYS_PER_NODE);}/* * Return the highest key that you could lookup from the n'th * node on level l of the btree. */static sector_t high(struct dm_table *t, unsigned int l, unsigned int n){ for (; l < t->depth - 1; l++) n = get_child(n, CHILDREN_PER_NODE - 1); if (n >= t->counts[l]) return (sector_t) - 1; return get_node(t, l, n)[KEYS_PER_NODE - 1];}/* * Fills in a level of the btree based on the highs of the level * below it. */static int setup_btree_index(unsigned int l, struct dm_table *t){ unsigned int n, k; sector_t *node; for (n = 0U; n < t->counts[l]; n++) { node = get_node(t, l, n); for (k = 0U; k < KEYS_PER_NODE; k++) node[k] = high(t, l + 1, get_child(n, k)); } return 0;}void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size){ unsigned long size; void *addr; /* * Check that we're not going to overflow. */ if (nmemb > (ULONG_MAX / elem_size)) return NULL; size = nmemb * elem_size; addr = vmalloc(size); if (addr) memset(addr, 0, size); return addr;}/* * highs, and targets are managed as dynamic arrays during a * table load. */static int alloc_targets(struct dm_table *t, unsigned int num){ sector_t *n_highs; struct dm_target *n_targets; int n = t->num_targets; /* * Allocate both the target array and offset array at once. */ n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; n_targets = (struct dm_target *) (n_highs + num); if (n) { memcpy(n_highs, t->highs, sizeof(*n_highs) * n); memcpy(n_targets, t->targets, sizeof(*n_targets) * n); } memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); vfree(t->highs); t->num_allocated = num; t->highs = n_highs; t->targets = n_targets; return 0;}int dm_table_create(struct dm_table **result, int mode, unsigned num_targets){ struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; memset(t, 0, sizeof(*t)); INIT_LIST_HEAD(&t->devices); atomic_set(&t->holders, 1); if (!num_targets) num_targets = KEYS_PER_NODE; num_targets = dm_round_up(num_targets, KEYS_PER_NODE); if (alloc_targets(t, num_targets)) { kfree(t); t = NULL; return -ENOMEM; } t->mode = mode; *result = t; return 0;}static void free_devices(struct list_head *devices){ struct list_head *tmp, *next; for (tmp = devices->next; tmp != devices; tmp = next) { struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); next = tmp->next; kfree(dd); }}void table_destroy(struct dm_table *t){ unsigned int i; /* free the indexes (see dm_table_complete) */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { struct dm_target *tgt = t->targets + i; if (tgt->type->dtr) tgt->type->dtr(tgt); dm_put_target_type(tgt->type); } vfree(t->highs); /* free the device list */ if (t->devices.next != &t->devices) { DMWARN("devices still present during destroy: " "dm_table_remove_device calls missing"); free_devices(&t->devices); } kfree(t);}void dm_table_get(struct dm_table *t){ atomic_inc(&t->holders);}void dm_table_put(struct dm_table *t){ if (!t) return; if (atomic_dec_and_test(&t->holders)) table_destroy(t);}/* * Checks to see if we need to extend highs or targets. */static inline int check_space(struct dm_table *t){ if (t->num_targets >= t->num_allocated) return alloc_targets(t, t->num_allocated * 2); return 0;}/* * Convert a device path to a dev_t. */static int lookup_device(const char *path, dev_t *dev){ int r; struct nameidata nd; struct inode *inode; if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd))) return r; inode = nd.dentry->d_inode; if (!inode) { r = -ENOENT; goto out; } if (!S_ISBLK(inode->i_mode)) { r = -ENOTBLK; goto out; } *dev = inode->i_rdev; out: path_release(&nd); return r;}/* * See if we've already got a device in the list. */static struct dm_dev *find_device(struct list_head *l, dev_t dev){ struct dm_dev *dd; list_for_each_entry (dd, l, list) if (dd->bdev->bd_dev == dev) return dd; return NULL;}/* * Open a device so we can use it as a map destination. */static int open_dev(struct dm_dev *d, dev_t dev){ static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; if (d->bdev) BUG(); bdev = open_by_devnum(dev, d->mode); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_claim(bdev, _claim_ptr); if (r) blkdev_put(bdev); else d->bdev = bdev; return r;}/* * Close a device that we've been using. */static void close_dev(struct dm_dev *d){ if (!d->bdev) return; bd_release(d->bdev); blkdev_put(d->bdev); d->bdev = NULL;}/* * If possible (ie. blk_size[major] is set), this checks an area * of a destination device is valid. */static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len){ sector_t dev_size; dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; return ((start < dev_size) && (len <= (dev_size - start)));}/* * This upgrades the mode on an already open dm_dev. Being * careful to leave things as they were if we fail to reopen the * device. */static int upgrade_mode(struct dm_dev *dd, int new_mode){ int r; struct dm_dev dd_copy; dev_t dev = dd->bdev->bd_dev; dd_copy = *dd; dd->mode |= new_mode; dd->bdev = NULL; r = open_dev(dd, dev); if (!r) close_dev(&dd_copy); else *dd = dd_copy; return r;}/* * Add a device to the list, or just increment the usage count if * it's already present. */static int __table_get_device(struct dm_table *t, struct dm_target *ti, const char *path, sector_t start, sector_t len, int mode, struct dm_dev **result){ int r; dev_t dev; struct dm_dev *dd; unsigned int major, minor; if (!t) BUG(); if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ dev = MKDEV(major, minor); if (MAJOR(dev) != major || MINOR(dev) != minor) return -EOVERFLOW; } else { /* convert the path to a device */ if ((r = lookup_device(path, &dev))) return r; } dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; dd->mode = mode; dd->bdev = NULL; if ((r = open_dev(dd, dev))) { kfree(dd); return r; } atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); } else if (dd->mode != (mode | dd->mode)) { r = upgrade_mode(dd, mode); if (r) return r; } atomic_inc(&dd->count); if (!check_device_area(dd, start, len)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -