📄 dm-raid1.c
字号:
* Target functions *---------------------------------------------------------------*/static struct mirror_set *alloc_context(unsigned int nr_mirrors, uint32_t region_size, struct dm_target *ti, struct dirty_log *dl){ size_t len; struct mirror_set *ms = NULL; if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) return NULL; len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); ms = kzalloc(len, GFP_KERNEL); if (!ms) { ti->error = "Cannot allocate mirror context"; return NULL; } spin_lock_init(&ms->lock); ms->ti = ti; ms->nr_mirrors = nr_mirrors; ms->nr_regions = dm_sector_div_up(ti->len, region_size); ms->in_sync = 0; ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; ms->io_client = dm_io_client_create(DM_IO_PAGES); if (IS_ERR(ms->io_client)) { ti->error = "Error creating dm_io client"; kfree(ms); return NULL; } if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { ti->error = "Error creating dirty region hash"; dm_io_client_destroy(ms->io_client); kfree(ms); return NULL; } return ms;}static void free_context(struct mirror_set *ms, struct dm_target *ti, unsigned int m){ while (m--) dm_put_device(ti, ms->mirror[m].dev); dm_io_client_destroy(ms->io_client); rh_exit(&ms->rh); kfree(ms);}static inline int _check_region_size(struct dm_target *ti, uint32_t size){ return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) || size > ti->len);}static int get_mirror(struct mirror_set *ms, struct dm_target *ti, unsigned int mirror, char **argv){ unsigned long long offset; if (sscanf(argv[1], "%llu", &offset) != 1) { ti->error = "Invalid offset"; return -EINVAL; } if (dm_get_device(ti, argv[0], offset, ti->len, dm_table_get_mode(ti->table), &ms->mirror[mirror].dev)) { ti->error = "Device lookup failure"; return -ENXIO; } ms->mirror[mirror].ms = ms; ms->mirror[mirror].offset = offset; return 0;}/* * Create dirty log: log_type #log_params <log_params> */static struct dirty_log *create_dirty_log(struct dm_target *ti, unsigned int argc, char **argv, unsigned int *args_used){ unsigned int param_count; struct dirty_log *dl; if (argc < 2) { ti->error = "Insufficient mirror log arguments"; return NULL; } if (sscanf(argv[1], "%u", ¶m_count) != 1) { ti->error = "Invalid mirror log argument count"; return NULL; } *args_used = 2 + param_count; if (argc < *args_used) { ti->error = "Insufficient mirror log arguments"; return NULL; } dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); if (!dl) { ti->error = "Error creating mirror dirty log"; return NULL; } if (!_check_region_size(ti, dl->type->get_region_size(dl))) { ti->error = "Invalid region size"; dm_destroy_dirty_log(dl); return NULL; } return dl;}static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, unsigned *args_used){ unsigned num_features; struct dm_target *ti = ms->ti; *args_used = 0; if (!argc) return 0; if (sscanf(argv[0], "%u", &num_features) != 1) { ti->error = "Invalid number of features"; return -EINVAL; } argc--; argv++; (*args_used)++; if (num_features > argc) { ti->error = "Not enough arguments to support feature count"; return -EINVAL; } if (!strcmp("handle_errors", argv[0])) ms->features |= DM_RAID1_HANDLE_ERRORS; else { ti->error = "Unrecognised feature requested"; return -EINVAL; } (*args_used)++; return 0;}/* * Construct a mirror mapping: * * log_type #log_params <log_params> * #mirrors [mirror_path offset]{2,} * [#features <features>] * * log_type is "core" or "disk" * #log_params is between 1 and 3 * * If present, features must be "handle_errors". */static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv){ int r; unsigned int nr_mirrors, m, args_used; struct mirror_set *ms; struct dirty_log *dl; dl = create_dirty_log(ti, argc, argv, &args_used); if (!dl) return -EINVAL; argv += args_used; argc -= args_used; if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { ti->error = "Invalid number of mirrors"; dm_destroy_dirty_log(dl); return -EINVAL; } argv++, argc--; if (argc < nr_mirrors * 2) { ti->error = "Too few mirror arguments"; dm_destroy_dirty_log(dl); return -EINVAL; } ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); if (!ms) { dm_destroy_dirty_log(dl); return -ENOMEM; } /* Get the mirror parameter sets */ for (m = 0; m < nr_mirrors; m++) { r = get_mirror(ms, ti, m, argv); if (r) { free_context(ms, ti, m); return r; } argv += 2; argc -= 2; } ti->private = ms; ti->split_io = ms->rh.region_size; ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); if (!ms->kmirrord_wq) { DMERR("couldn't start kmirrord"); r = -ENOMEM; goto err_free_context; } INIT_WORK(&ms->kmirrord_work, do_mirror); r = parse_features(ms, argc, argv, &args_used); if (r) goto err_destroy_wq; argv += args_used; argc -= args_used; /* * Any read-balancing addition depends on the * DM_RAID1_HANDLE_ERRORS flag being present. * This is because the decision to balance depends * on the sync state of a region. If the above * flag is not present, we ignore errors; and * the sync state may be inaccurate. */ if (argc) { ti->error = "Too many mirror arguments"; r = -EINVAL; goto err_destroy_wq; } r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); if (r) goto err_destroy_wq; wake(ms); return 0;err_destroy_wq: destroy_workqueue(ms->kmirrord_wq);err_free_context: free_context(ms, ti, ms->nr_mirrors); return r;}static void mirror_dtr(struct dm_target *ti){ struct mirror_set *ms = (struct mirror_set *) ti->private; flush_workqueue(ms->kmirrord_wq); kcopyd_client_destroy(ms->kcopyd_client); destroy_workqueue(ms->kmirrord_wq); free_context(ms, ti, ms->nr_mirrors);}static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw){ int should_wake = 0; struct bio_list *bl; bl = (rw == WRITE) ? &ms->writes : &ms->reads; spin_lock(&ms->lock); should_wake = !(bl->head); bio_list_add(bl, bio); spin_unlock(&ms->lock); if (should_wake) wake(ms);}/* * Mirror mapping function */static int mirror_map(struct dm_target *ti, struct bio *bio, union map_info *map_context){ int r, rw = bio_rw(bio); struct mirror *m; struct mirror_set *ms = ti->private; map_context->ll = bio_to_region(&ms->rh, bio); if (rw == WRITE) { queue_bio(ms, bio, rw); return DM_MAPIO_SUBMITTED; } r = ms->rh.log->type->in_sync(ms->rh.log, bio_to_region(&ms->rh, bio), 0); if (r < 0 && r != -EWOULDBLOCK) return r; if (r == -EWOULDBLOCK) /* FIXME: ugly */ r = DM_MAPIO_SUBMITTED; /* * We don't want to fast track a recovery just for a read * ahead. So we just let it silently fail. * FIXME: get rid of this. */ if (!r && rw == READA) return -EIO; if (!r) { /* Pass this io over to the daemon */ queue_bio(ms, bio, rw); return DM_MAPIO_SUBMITTED; } m = choose_mirror(ms, bio->bi_sector); if (!m) return -EIO; map_bio(ms, m, bio); return DM_MAPIO_REMAPPED;}static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error, union map_info *map_context){ int rw = bio_rw(bio); struct mirror_set *ms = (struct mirror_set *) ti->private; region_t region = map_context->ll; /* * We need to dec pending if this was a write. */ if (rw == WRITE) rh_dec(&ms->rh, region); return 0;}static void mirror_postsuspend(struct dm_target *ti){ struct mirror_set *ms = (struct mirror_set *) ti->private; struct dirty_log *log = ms->rh.log; rh_stop_recovery(&ms->rh); /* Wait for all I/O we generated to complete */ wait_event(_kmirrord_recovery_stopped, !atomic_read(&ms->rh.recovery_in_flight)); if (log->type->postsuspend && log->type->postsuspend(log)) /* FIXME: need better error handling */ DMWARN("log suspend failed");}static void mirror_resume(struct dm_target *ti){ struct mirror_set *ms = (struct mirror_set *) ti->private; struct dirty_log *log = ms->rh.log; if (log->type->resume && log->type->resume(log)) /* FIXME: need better error handling */ DMWARN("log resume failed"); rh_start_recovery(&ms->rh);}static int mirror_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen){ unsigned int m, sz = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; switch (type) { case STATUSTYPE_INFO: DMEMIT("%d ", ms->nr_mirrors); for (m = 0; m < ms->nr_mirrors; m++) DMEMIT("%s ", ms->mirror[m].dev->name); DMEMIT("%llu/%llu 0 ", (unsigned long long)ms->rh.log->type-> get_sync_count(ms->rh.log), (unsigned long long)ms->nr_regions); sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz); break; case STATUSTYPE_TABLE: sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); DMEMIT("%d", ms->nr_mirrors); for (m = 0; m < ms->nr_mirrors; m++) DMEMIT(" %s %llu", ms->mirror[m].dev->name, (unsigned long long)ms->mirror[m].offset); if (ms->features & DM_RAID1_HANDLE_ERRORS) DMEMIT(" 1 handle_errors"); } return 0;}static struct target_type mirror_target = { .name = "mirror", .version = {1, 0, 3}, .module = THIS_MODULE, .ctr = mirror_ctr, .dtr = mirror_dtr, .map = mirror_map, .end_io = mirror_end_io, .postsuspend = mirror_postsuspend, .resume = mirror_resume, .status = mirror_status,};static int __init dm_mirror_init(void){ int r; r = dm_dirty_log_init(); if (r) return r; r = dm_register_target(&mirror_target); if (r < 0) { DMERR("Failed to register mirror target"); dm_dirty_log_exit(); } return r;}static void __exit dm_mirror_exit(void){ int r; r = dm_unregister_target(&mirror_target); if (r < 0) DMERR("unregister failed %d", r); dm_dirty_log_exit();}/* Module hooks */module_init(dm_mirror_init);module_exit(dm_mirror_exit);MODULE_DESCRIPTION(DM_NAME " mirror target");MODULE_AUTHOR("Joe Thornber");MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -