📄 lvm.c
字号:
lv_ptr->lv_status &= ~LV_ACTIVE; lv_ptr->lv_snapshot_org = NULL; lv_ptr->lv_snapshot_prev = NULL; lv_ptr->lv_snapshot_next = NULL; lv_ptr->lv_block_exception = NULL; lv_ptr->lv_iobuf = NULL; lv_ptr->lv_COW_table_iobuf = NULL; lv_ptr->lv_snapshot_hash_table = NULL; lv_ptr->lv_snapshot_hash_table_size = 0; lv_ptr->lv_snapshot_hash_mask = 0; init_rwsem(&lv_ptr->lv_lock); lv_ptr->lv_snapshot_use_rate = 0; vg_ptr->lv[l] = lv_ptr; /* get the PE structures from user space if this is not a snapshot logical volume */ if (!(lv_ptr->lv_access & LV_SNAPSHOT)) { size = lv_ptr->lv_allocated_le * sizeof(pe_t); if ((lv_ptr->lv_current_pe = vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- LV_CREATE: vmalloc error LV_CURRENT_PE of %d Byte " "at line %d\n", lvm_name, size, __LINE__); P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -ENOMEM; } if (copy_from_user(lv_ptr->lv_current_pe, pep, size)) { P_IOCTL("ERROR: copying PE ptr %p (%d bytes)\n", pep, sizeof(size)); vfree(lv_ptr->lv_current_pe); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -EFAULT; } /* correct the PE count in PVs */ for (le = 0; le < lv_ptr->lv_allocated_le; le++) { vg_ptr->pe_allocated++; for (p = 0; p < vg_ptr->pv_cur; p++) { if (vg_ptr->pv[p]->pv_dev == lv_ptr->lv_current_pe[le].dev) vg_ptr->pv[p]->pe_allocated++; } } } else { /* Get snapshot exception data and block list */ if (lvbe != NULL) { lv_ptr->lv_snapshot_org = vg_ptr->lv[LV_BLK(lv_ptr->lv_snapshot_minor)]; if (lv_ptr->lv_snapshot_org != NULL) { size = lv_ptr->lv_remap_end * sizeof(lv_block_exception_t); if(!size) { printk(KERN_WARNING "%s -- zero length exception table requested\n", lvm_name); kfree(lv_ptr); return -EINVAL; } if ((lv_ptr->lv_block_exception = vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- lvm_do_lv_create: vmalloc error LV_BLOCK_EXCEPTION " "of %d byte at line %d\n", lvm_name, size, __LINE__); P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -ENOMEM; } if (copy_from_user(lv_ptr->lv_block_exception, lvbe, size)) { vfree(lv_ptr->lv_block_exception); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -EFAULT; } if(lv_ptr->lv_block_exception[0].rsector_org == LVM_SNAPSHOT_DROPPED_SECTOR) { printk(KERN_WARNING "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n", lvm_name); activate = 0; } /* point to the original logical volume */ lv_ptr = lv_ptr->lv_snapshot_org; lv_ptr->lv_snapshot_minor = 0; lv_ptr->lv_snapshot_org = lv_ptr; /* our new one now back points to the previous last in the chain which can be the original logical volume */ lv_ptr = vg_ptr->lv[l]; /* now lv_ptr points to our new last snapshot logical volume */ lv_ptr->lv_current_pe = lv_ptr->lv_snapshot_org->lv_current_pe; lv_ptr->lv_allocated_snapshot_le = lv_ptr->lv_allocated_le; lv_ptr->lv_allocated_le = lv_ptr->lv_snapshot_org->lv_allocated_le; lv_ptr->lv_current_le = lv_ptr->lv_snapshot_org->lv_current_le; lv_ptr->lv_size = lv_ptr->lv_snapshot_org->lv_size; lv_ptr->lv_stripes = lv_ptr->lv_snapshot_org->lv_stripes; lv_ptr->lv_stripesize = lv_ptr->lv_snapshot_org->lv_stripesize; /* Update the VG PE(s) used by snapshot reserve space. */ vg_ptr->pe_allocated += lv_ptr->lv_allocated_snapshot_le; if ((ret = lvm_snapshot_alloc(lv_ptr)) != 0) { vfree(lv_ptr->lv_block_exception); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return ret; } for ( e = 0; e < lv_ptr->lv_remap_ptr; e++) lvm_hash_link (lv_ptr->lv_block_exception + e, lv_ptr->lv_block_exception[e].rdev_org, lv_ptr->lv_block_exception[e].rsector_org, lv_ptr); /* need to fill the COW exception table data into the page for disk i/o */ if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) { kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -EINVAL; } init_waitqueue_head(&lv_ptr->lv_snapshot_wait); } else { kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -EFAULT; } } else { kfree(vg_ptr->lv[l]); vg_ptr->lv[l] = NULL; return -EINVAL; } } /* if ( vg[VG_CHR(minor)]->lv[l]->lv_access & LV_SNAPSHOT) */ lv_ptr = vg_ptr->lv[l]; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].start_sect = 0; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].nr_sects = lv_ptr->lv_size; lvm_size[MINOR(lv_ptr->lv_dev)] = lv_ptr->lv_size >> 1; vg_lv_map[MINOR(lv_ptr->lv_dev)].vg_number = vg_ptr->vg_number; vg_lv_map[MINOR(lv_ptr->lv_dev)].lv_number = lv_ptr->lv_number; LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead); vg_ptr->lv_cur++; lv_ptr->lv_status = lv_status_save; __update_hardsectsize(lv_ptr); /* optionally add our new snapshot LV */ if (lv_ptr->lv_access & LV_SNAPSHOT) { lv_t *org = lv_ptr->lv_snapshot_org, *last; /* sync the original logical volume */ fsync_dev(org->lv_dev);#ifdef LVM_VFS_ENHANCEMENT /* VFS function call to sync and lock the filesystem */ fsync_dev_lockfs(org->lv_dev);#endif down_write(&org->lv_lock); org->lv_access |= LV_SNAPSHOT_ORG; lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */ /* Link in the list of snapshot volumes */ for (last = org; last->lv_snapshot_next; last = last->lv_snapshot_next); lv_ptr->lv_snapshot_prev = last; last->lv_snapshot_next = lv_ptr; up_write(&org->lv_lock); } /* activate the logical volume */ if(activate) lv_ptr->lv_status |= LV_ACTIVE; else lv_ptr->lv_status &= ~LV_ACTIVE; if ( lv_ptr->lv_access & LV_WRITE) set_device_ro(lv_ptr->lv_dev, 0); else set_device_ro(lv_ptr->lv_dev, 1);#ifdef LVM_VFS_ENHANCEMENT/* VFS function call to unlock the filesystem */ if (lv_ptr->lv_access & LV_SNAPSHOT) unlockfs(lv_ptr->lv_snapshot_org->lv_dev);#endif lv_ptr->vg = vg_ptr; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = lvm_fs_create_lv(vg_ptr, lv_ptr); return 0;} /* lvm_do_lv_create() *//* * character device support function logical volume remove */static int lvm_do_lv_remove(int minor, char *lv_name, int l){ uint le, p; vg_t *vg_ptr = vg[VG_CHR(minor)]; lv_t *lv_ptr; if (l == -1) { for (l = 0; l < vg_ptr->lv_max; l++) { if (vg_ptr->lv[l] != NULL && strcmp(vg_ptr->lv[l]->lv_name, lv_name) == 0) { break; } } } if (l == vg_ptr->lv_max) return -ENXIO; lv_ptr = vg_ptr->lv[l];#ifdef LVM_TOTAL_RESET if (lv_ptr->lv_open > 0 && lvm_reset_spindown == 0)#else if (lv_ptr->lv_open > 0)#endif return -EBUSY; /* check for deletion of snapshot source while snapshot volume still exists */ if ((lv_ptr->lv_access & LV_SNAPSHOT_ORG) && lv_ptr->lv_snapshot_next != NULL) return -EPERM; lvm_fs_remove_lv(vg_ptr, lv_ptr); if (lv_ptr->lv_access & LV_SNAPSHOT) { /* * Atomically make the the snapshot invisible * to the original lv before playing with it. */ lv_t * org = lv_ptr->lv_snapshot_org; down_write(&org->lv_lock); /* remove this snapshot logical volume from the chain */ lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr->lv_snapshot_next; if (lv_ptr->lv_snapshot_next != NULL) { lv_ptr->lv_snapshot_next->lv_snapshot_prev = lv_ptr->lv_snapshot_prev; } /* no more snapshots? */ if (!org->lv_snapshot_next) { org->lv_access &= ~LV_SNAPSHOT_ORG; } up_write(&org->lv_lock); lvm_snapshot_release(lv_ptr); /* Update the VG PE(s) used by snapshot reserve space. */ vg_ptr->pe_allocated -= lv_ptr->lv_allocated_snapshot_le; } lv_ptr->lv_status |= LV_SPINDOWN; /* sync the buffers */ fsync_dev(lv_ptr->lv_dev); lv_ptr->lv_status &= ~LV_ACTIVE; /* invalidate the buffers */ invalidate_buffers(lv_ptr->lv_dev); /* reset generic hd */ lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].start_sect = -1; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].nr_sects = 0; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = 0; lvm_size[MINOR(lv_ptr->lv_dev)] = 0; /* reset VG/LV mapping */ vg_lv_map[MINOR(lv_ptr->lv_dev)].vg_number = ABS_MAX_VG; vg_lv_map[MINOR(lv_ptr->lv_dev)].lv_number = -1; /* correct the PE count in PVs if this is not a snapshot logical volume */ if (!(lv_ptr->lv_access & LV_SNAPSHOT)) { /* only if this is no snapshot logical volume because we share the lv_current_pe[] structs with the original logical volume */ for (le = 0; le < lv_ptr->lv_allocated_le; le++) { vg_ptr->pe_allocated--; for (p = 0; p < vg_ptr->pv_cur; p++) { if (vg_ptr->pv[p]->pv_dev == lv_ptr->lv_current_pe[le].dev) vg_ptr->pv[p]->pe_allocated--; } } vfree(lv_ptr->lv_current_pe); } P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__); kfree(lv_ptr); vg_ptr->lv[l] = NULL; vg_ptr->lv_cur--; return 0;} /* lvm_do_lv_remove() *//* * logical volume extend / reduce */static int __extend_reduce_snapshot(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) { ulong size; lv_block_exception_t *lvbe; if (!new_lv->lv_block_exception) return -ENXIO; size = new_lv->lv_remap_end * sizeof(lv_block_exception_t); if ((lvbe = vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- lvm_do_lv_extend_reduce: vmalloc " "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n", lvm_name, size, __LINE__); return -ENOMEM; } if ((new_lv->lv_remap_end > old_lv->lv_remap_end) && (copy_from_user(lvbe, new_lv->lv_block_exception, size))) { vfree(lvbe); return -EFAULT; } new_lv->lv_block_exception = lvbe; if (lvm_snapshot_alloc_hash_table(new_lv)) { vfree(new_lv->lv_block_exception); return -ENOMEM; } return 0;}static int __extend_reduce(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) { ulong size, l, p, end; pe_t *pe; /* allocate space for new pe structures */ size = new_lv->lv_current_le * sizeof(pe_t); if ((pe = vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- lvm_do_lv_extend_reduce: " "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n", lvm_name, size, __LINE__); return -ENOMEM; } /* get the PE structures from user space */ if (copy_from_user(pe, new_lv->lv_current_pe, size)) { if(old_lv->lv_access & LV_SNAPSHOT) vfree(new_lv->lv_snapshot_hash_table); vfree(pe); return -EFAULT; } new_lv->lv_current_pe = pe; /* reduce allocation counters on PV(s) */ for (l = 0; l < old_lv->lv_allocated_le; l++) { vg_ptr->pe_allocated--; for (p = 0; p < vg_ptr->pv_cur; p++) { if (vg_ptr->pv[p]->pv_dev == old_lv->lv_current_pe[l].dev) { vg_ptr->pv[p]->pe_allocated--; break; } } } /* extend the PE count in PVs */ for (l = 0; l < new_lv->lv_allocated_le; l++) { vg_ptr->pe_allocated++; for (p = 0; p < vg_ptr->pv_cur; p++) { if (vg_ptr->pv[p]->pv_dev == new_lv->lv_current_pe[l].dev) { vg_ptr->pv[p]->pe_allocated++; break; } } } /* save availiable i/o statistic data */ if (old_lv->lv_stripes < 2) { /* linear logical volume */ end = min(old_lv->lv_current_le, new_lv->lv_current_le); for (l = 0; l < end; l++) { new_lv->lv_current_pe[l].reads += old_lv->lv_current_pe[l].reads; new_lv->lv_current_pe[l].writes += old_lv->lv_current_pe[l].writes; } } else { /* striped logical volume */ uint i, j, source, dest, end, old_stripe_size, new_stripe_size; old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes; new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes; end = min(old_stripe_size, new_stripe_size); for (i = source = dest = 0; i < new_lv->lv_stripes; i++) { for (j = 0; j < end; j++) { new_lv->lv_current_pe[dest + j].reads += old_lv->lv_current_pe[source + j].reads; new_lv->lv_current_pe[dest + j].writes += old_lv->lv_current_pe[source + j].writes; } source += old_stripe_size; dest += new_stripe_size; } } return 0;}static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *new_lv){ int r; ulong l, e, size; vg_t *vg_ptr = vg[VG_CHR(minor)]; lv_t *old_lv; pe_t *pe; if ((pe = new_lv->lv_current_pe) == NULL) return -EINVAL; for (l = 0; l < vg_ptr->lv_max; l++) if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name)) break; if (l == vg_ptr->lv_max) return -ENXIO; old_lv = vg_ptr->lv[l]; if (old_lv->lv_access & LV_SNAPSHOT) { /* only perform this operation on active snapshots */ if (old_lv->lv_status & LV_ACTIVE) r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv); else r = -EPERM; } else r = __extend_reduce(vg_ptr, old_lv, new_lv); if(r) return r; /* copy relevent fields */ down_write(&old_lv->lv_lock); if(new_lv->lv_access & LV_SNAPSHOT) { size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ? old_lv->lv_remap_ptr : new_lv->lv_remap_end; size *= s
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -