📄 lvm-snap.c
字号:
org_pe_start, pe_off, org_virt_sector);#endif iobuf = lv_snap->lv_iobuf; blksize_org = lvm_get_blksize(org_phys_dev); blksize_snap = lvm_get_blksize(snap_phys_dev); max_blksize = max(blksize_org, blksize_snap); min_blksize = min(blksize_org, blksize_snap); max_sectors = KIO_MAX_SECTORS * (min_blksize>>9); if (chunk_size % (max_blksize>>9)) goto fail_blksize; while (chunk_size) { nr_sectors = min(chunk_size, max_sectors); chunk_size -= nr_sectors; iobuf->length = nr_sectors << 9; if(!lvm_snapshot_prepare_blocks(iobuf->blocks, org_start, nr_sectors, blksize_org)) goto fail_prepare; if (brw_kiovec(READ, 1, &iobuf, org_phys_dev, iobuf->blocks, blksize_org) != (nr_sectors<<9)) goto fail_raw_read; if(!lvm_snapshot_prepare_blocks(iobuf->blocks, snap_start, nr_sectors, blksize_snap)) goto fail_prepare; if (brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, iobuf->blocks, blksize_snap) != (nr_sectors<<9)) goto fail_raw_write; }#ifdef DEBUG_SNAPSHOT /* invalidate the logical snapshot buffer cache */ invalidate_snap_cache(virt_start, lv_snap->lv_chunk_size, lv_snap->lv_dev);#endif /* the original chunk is now stored on the snapshot volume so update the execption table */ lv_snap->lv_block_exception[idx].rdev_org = org_phys_dev; lv_snap->lv_block_exception[idx].rsector_org = org_start; lvm_hash_link(lv_snap->lv_block_exception + idx, org_phys_dev, org_start, lv_snap); lv_snap->lv_remap_ptr = idx + 1; if (lv_snap->lv_snapshot_use_rate > 0) { if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >= lv_snap->lv_snapshot_use_rate) wake_up_interruptible(&lv_snap->lv_snapshot_wait); } return 0; /* slow path */ out: lvm_drop_snapshot(vg, lv_snap, reason); return 1; fail_out_of_space: reason = "out of space"; goto out; fail_raw_read: reason = "read error"; goto out; fail_raw_write: reason = "write error"; goto out; fail_blksize: reason = "blocksize error"; goto out; fail_prepare: reason = "couldn't prepare kiovec blocks " "(start probably isn't block aligned)"; goto out;}int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors){ int bytes, nr_pages, err, i; bytes = sectors * SECTOR_SIZE; nr_pages = (bytes + ~PAGE_MASK) >> PAGE_SHIFT; err = expand_kiobuf(iobuf, nr_pages); if (err) goto out; err = -ENOMEM; iobuf->locked = 1; iobuf->nr_pages = 0; for (i = 0; i < nr_pages; i++) { struct page * page; page = alloc_page(GFP_KERNEL); if (!page) goto out; iobuf->maplist[i] = page; LockPage(page); iobuf->nr_pages++; } iobuf->offset = 0; err = 0; out: return err;}static int calc_max_buckets(void){ unsigned long mem; mem = num_physpages << PAGE_SHIFT; mem /= 100; mem *= 2; mem /= sizeof(struct list_head); return mem;}int lvm_snapshot_alloc_hash_table(lv_t * lv){ int err; unsigned long buckets, max_buckets, size; struct list_head * hash; buckets = lv->lv_remap_end; max_buckets = calc_max_buckets(); buckets = min(buckets, max_buckets); while (buckets & (buckets-1)) buckets &= (buckets-1); size = buckets * sizeof(struct list_head); err = -ENOMEM; hash = vmalloc(size); lv->lv_snapshot_hash_table = hash; if (!hash) goto out; lv->lv_snapshot_hash_table_size = size; lv->lv_snapshot_hash_mask = buckets-1; while (buckets--) INIT_LIST_HEAD(hash+buckets); err = 0;out: return err;}int lvm_snapshot_alloc(lv_t * lv_snap){ int ret, max_sectors; /* allocate kiovec to do chunk io */ ret = alloc_kiovec(1, &lv_snap->lv_iobuf); if (ret) goto out; max_sectors = KIO_MAX_SECTORS << (PAGE_SHIFT-9); ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf, max_sectors); if (ret) goto out_free_kiovec; /* allocate kiovec to do exception table io */ ret = alloc_kiovec(1, &lv_snap->lv_COW_table_iobuf); if (ret) goto out_free_kiovec; ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf, PAGE_SIZE/SECTOR_SIZE); if (ret) goto out_free_both_kiovecs; ret = lvm_snapshot_alloc_hash_table(lv_snap); if (ret) goto out_free_both_kiovecs;out: return ret;out_free_both_kiovecs: unmap_kiobuf(lv_snap->lv_COW_table_iobuf); free_kiovec(1, &lv_snap->lv_COW_table_iobuf); lv_snap->lv_COW_table_iobuf = NULL;out_free_kiovec: unmap_kiobuf(lv_snap->lv_iobuf); free_kiovec(1, &lv_snap->lv_iobuf); lv_snap->lv_iobuf = NULL; if (lv_snap->lv_snapshot_hash_table != NULL) vfree(lv_snap->lv_snapshot_hash_table); lv_snap->lv_snapshot_hash_table = NULL; goto out;}void lvm_snapshot_release(lv_t * lv){ if (lv->lv_block_exception) { vfree(lv->lv_block_exception); lv->lv_block_exception = NULL; } if (lv->lv_snapshot_hash_table) { vfree(lv->lv_snapshot_hash_table); lv->lv_snapshot_hash_table = NULL; lv->lv_snapshot_hash_table_size = 0; } if (lv->lv_iobuf) { kiobuf_wait_for_io(lv->lv_iobuf); unmap_kiobuf(lv->lv_iobuf); free_kiovec(1, &lv->lv_iobuf); lv->lv_iobuf = NULL; } if (lv->lv_COW_table_iobuf) { kiobuf_wait_for_io(lv->lv_COW_table_iobuf); unmap_kiobuf(lv->lv_COW_table_iobuf); free_kiovec(1, &lv->lv_COW_table_iobuf); lv->lv_COW_table_iobuf = NULL; }}static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap, int idx, const char **reason) { int blksize_snap; int end_of_table; int idx_COW_table; uint pvn; ulong snap_pe_start, COW_table_sector_offset, COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block; ulong blocks[1]; kdev_t snap_phys_dev; lv_block_exception_t *be; struct kiobuf * COW_table_iobuf = lv_snap->lv_COW_table_iobuf; lv_COW_table_disk_t * lv_COW_table = ( lv_COW_table_disk_t *) page_address(lv_snap->lv_COW_table_iobuf->maplist[0]); COW_chunks_per_pe = LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg, lv_snap); COW_entries_per_pe = LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg, lv_snap); /* get physical addresse of destination chunk */ snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new; snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size; blksize_snap = lvm_get_blksize(snap_phys_dev); COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t); idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block; if ( idx_COW_table == 0) memset(lv_COW_table, 0, blksize_snap); /* sector offset into the on disk COW table */ COW_table_sector_offset = (idx % COW_entries_per_pe) / (SECTOR_SIZE / sizeof(lv_COW_table_disk_t)); /* COW table block to write next */ blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10); /* store new COW_table entry */ be = lv_snap->lv_block_exception + idx; if(_pv_get_number(vg, be->rdev_org, &pvn)) goto fail_pv_get_number; lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn); lv_COW_table[idx_COW_table].pv_org_rsector = cpu_to_le64(be->rsector_org); if(_pv_get_number(vg, snap_phys_dev, &pvn)) goto fail_pv_get_number; lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn); lv_COW_table[idx_COW_table].pv_snap_rsector = cpu_to_le64(be->rsector_new); COW_table_iobuf->length = blksize_snap; if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev, blocks, blksize_snap) != blksize_snap) goto fail_raw_write; /* initialization of next COW exception table block with zeroes */ end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1; if (idx_COW_table % COW_entries_per_block == COW_entries_per_block - 1 || end_of_table) { /* don't go beyond the end */ if (idx + 1 >= lv_snap->lv_remap_end) goto out; memset(lv_COW_table, 0, blksize_snap); if (end_of_table) { idx++; snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new; snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size; blksize_snap = lvm_get_blksize(snap_phys_dev); blocks[0] = snap_pe_start >> (blksize_snap >> 10); } else blocks[0]++; if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev, blocks, blksize_snap) != blksize_snap) goto fail_raw_write; } out: return 0; fail_raw_write: *reason = "write error"; return 1; fail_pv_get_number: *reason = "_pv_get_number failed"; return 1;}/* * FIXME_1.2 * This function is a bit of a hack; we need to ensure that the * snapshot is never made active again, because it will surely be * corrupt. At the moment we do not have access to the LVM metadata * from within the kernel. So we set the first exception to point to * sector 1 (which will always be within the metadata, and as such * invalid). User land tools will check for this when they are asked * to activate the snapshot and prevent this from happening. */static void _disable_snapshot(vg_t *vg, lv_t *lv) { const char *err; lv->lv_block_exception[0].rsector_org = LVM_SNAPSHOT_DROPPED_SECTOR; if(_write_COW_table_block(vg, lv, 0, &err) < 0) { printk(KERN_ERR "%s -- couldn't disable snapshot: %s\n", lvm_name, err); }}MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -