📄 block-qcow2.c
字号:
tap_aio_free(&s->async);#endif qcow_free_snapshots(bs); refcount_close(bs); qemu_free(s->l1_table); qemu_free(s->l2_cache); qemu_free(s->cluster_cache); qemu_free(s->cluster_data); close(fd); return -1;}static int qcow_set_key(struct disk_driver *bs, const char *key){ BDRVQcowState *s = bs->private; uint8_t keybuf[16]; int len, i; memset(keybuf, 0, 16); len = strlen(key); if (len > 16) len = 16; /* XXX: we could compress the chars to 7 bits to increase entropy */ for(i = 0;i < len;i++) { keybuf[i] = key[i]; } s->crypt_method = s->crypt_method_header; if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) return -1; if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) return -1;#if 0 /* test */ { uint8_t in[16]; uint8_t out[16]; uint8_t tmp[16]; for(i=0;i<16;i++) in[i] = i; AES_encrypt(in, tmp, &s->aes_encrypt_key); AES_decrypt(tmp, out, &s->aes_decrypt_key); for(i = 0; i < 16; i++) printf(" %02x", tmp[i]); printf("\n"); for(i = 0; i < 16; i++) printf(" %02x", out[i]); printf("\n"); }#endif return 0;}/* The crypt function is compatible with the linux cryptoloop algorithm for < 4 GB images. NOTE: out_buf == in_buf is supported */static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num, uint8_t *out_buf, const uint8_t *in_buf, int nb_sectors, int enc, const AES_KEY *key){ union { uint64_t ll[2]; uint8_t b[16]; } ivec; int i; for(i = 0; i < nb_sectors; i++) { ivec.ll[0] = cpu_to_le64(sector_num); ivec.ll[1] = 0; AES_cbc_encrypt(in_buf, out_buf, 512, key, ivec.b, enc); sector_num++; in_buf += 512; out_buf += 512; }}static int copy_sectors(struct disk_driver *bs, uint64_t start_sect, uint64_t cluster_offset, int n_start, int n_end){ BDRVQcowState *s = bs->private; int n, ret; n = n_end - n_start; if (n <= 0) return 0; ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n); if (ret < 0) return ret; if (s->crypt_method) { encrypt_sectors(s, start_sect + n_start, s->cluster_data, s->cluster_data, n, 1, &s->aes_encrypt_key); } ret = bdrv_pwrite(s->fd, cluster_offset + 512*n_start, s->cluster_data, n*512); if (ret < 0) return ret; return 0;}static void l2_cache_reset(struct disk_driver *bs){ BDRVQcowState *s = bs->private; memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t)); memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));}static inline int l2_cache_new_entry(struct disk_driver *bs){ BDRVQcowState *s = bs->private; uint32_t min_count; int min_index, i; /* find a new entry in the least used one */ min_index = 0; min_count = 0xffffffff; for(i = 0; i < L2_CACHE_SIZE; i++) { if (s->l2_cache_counts[i] < min_count) { min_count = s->l2_cache_counts[i]; min_index = i; } } return min_index;}static int64_t align_offset(int64_t offset, int n){ offset = (offset + n - 1) & ~(n - 1); return offset;}static int grow_l1_table(struct disk_driver *bs, int min_size){ BDRVQcowState *s = bs->private; int new_l1_size, new_l1_size2, ret, i; uint64_t *new_l1_table; uint64_t new_l1_table_offset; uint64_t data64; uint32_t data32; new_l1_size = s->l1_size; if (min_size <= new_l1_size) return 0; while (min_size > new_l1_size) { new_l1_size = (new_l1_size * 3 + 1) / 2; }#ifdef DEBUG_ALLOC2 DPRINTF("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);#endif new_l1_size2 = sizeof(uint64_t) * new_l1_size; new_l1_table = qemu_mallocz(new_l1_size2); if (!new_l1_table) return -ENOMEM; memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); /* write new table (align to cluster) */ new_l1_table_offset = alloc_clusters(bs, new_l1_size2); for(i = 0; i < s->l1_size; i++) new_l1_table[i] = cpu_to_be64(new_l1_table[i]); if (lseek(s->fd, new_l1_table_offset, SEEK_SET) == -1) goto fail; ret = write(s->fd, new_l1_table, new_l1_size2); if (ret != new_l1_size2) goto fail; for(i = 0; i < s->l1_size; i++) new_l1_table[i] = be64_to_cpu(new_l1_table[i]); /* set new table */ data64 = cpu_to_be64(new_l1_table_offset); if (lseek(s->fd, offsetof(QCowHeader, l1_table_offset), SEEK_SET) == -1) goto fail; if (write(s->fd, &data64, sizeof(data64)) != sizeof(data64)) goto fail; data32 = cpu_to_be32(new_l1_size); if (bdrv_pwrite(s->fd, offsetof(QCowHeader, l1_size), &data32, sizeof(data32)) != sizeof(data32)) goto fail; qemu_free(s->l1_table); free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t)); s->l1_table_offset = new_l1_table_offset; s->l1_table = new_l1_table; s->l1_size = new_l1_size; return 0; fail: qemu_free(s->l1_table); return -EIO;}/* 'allocate' is: * * 0 not to allocate. * * 1 to allocate a normal cluster (for sector indexes 'n_start' to * 'n_end') * * 2 to allocate a compressed cluster of size * 'compressed_size'. 'compressed_size' must be > 0 and < * cluster_size * * return 0 if not allocated. */static uint64_t get_cluster_offset(struct disk_driver *bs, uint64_t offset, int allocate, int compressed_size, int n_start, int n_end){ BDRVQcowState *s = bs->private; int min_index, i, j, l1_index, l2_index, ret; uint64_t l2_offset, *l2_table, cluster_offset, tmp, old_l2_offset; l1_index = offset >> (s->l2_bits + s->cluster_bits); if (l1_index >= s->l1_size) { /* outside l1 table is allowed: we grow the table if needed */ if (!allocate) return 0; if (grow_l1_table(bs, l1_index + 1) < 0) { DPRINTF("Could not grow L1 table"); return 0; } } l2_offset = s->l1_table[l1_index]; if (!l2_offset) { if (!allocate) return 0; l2_allocate: old_l2_offset = l2_offset; /* allocate a new l2 entry */ l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); /* update the L1 entry */ s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED); if (bdrv_pwrite(s->fd, s->l1_table_offset + l1_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp)) return 0; min_index = l2_cache_new_entry(bs); l2_table = s->l2_cache + (min_index << s->l2_bits); if (old_l2_offset == 0) { memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); } else { if (bdrv_pread(s->fd, old_l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != s->l2_size * sizeof(uint64_t)) return 0; } if (bdrv_pwrite(s->fd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != s->l2_size * sizeof(uint64_t)) return 0; } else { if (!(l2_offset & QCOW_OFLAG_COPIED)) { if (allocate) { free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t)); goto l2_allocate; } } else { l2_offset &= ~QCOW_OFLAG_COPIED; } for(i = 0; i < L2_CACHE_SIZE; i++) { if (l2_offset == s->l2_cache_offsets[i]) { /* increment the hit count */ if (++s->l2_cache_counts[i] == 0xffffffff) { for(j = 0; j < L2_CACHE_SIZE; j++) { s->l2_cache_counts[j] >>= 1; } } l2_table = s->l2_cache + (i << s->l2_bits); goto found; } } /* not found: load a new entry in the least used one */ min_index = l2_cache_new_entry(bs); l2_table = s->l2_cache + (min_index << s->l2_bits); if (bdrv_pread(s->fd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != s->l2_size * sizeof(uint64_t)) { DPRINTF("Could not read L2 table"); return 0; } } s->l2_cache_offsets[min_index] = l2_offset; s->l2_cache_counts[min_index] = 1;found: l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); cluster_offset = be64_to_cpu(l2_table[l2_index]); if (!cluster_offset) { if (!allocate) { return cluster_offset; } } else if (!(cluster_offset & QCOW_OFLAG_COPIED)) { if (!allocate) return cluster_offset; /* free the cluster */ if (cluster_offset & QCOW_OFLAG_COMPRESSED) { int nb_csectors; nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511, nb_csectors * 512); } else { free_clusters(bs, cluster_offset, s->cluster_size); } } else { cluster_offset &= ~QCOW_OFLAG_COPIED; return cluster_offset; } if (allocate == 1) { /* allocate a new cluster */ cluster_offset = alloc_clusters(bs, s->cluster_size); /* we must initialize the cluster content which won't be written */ if ((n_end - n_start) < s->cluster_sectors) { uint64_t start_sect; start_sect = (offset & ~(s->cluster_size - 1)) >> 9; ret = copy_sectors(bs, start_sect, cluster_offset, 0, n_start); if (ret < 0) return 0; ret = copy_sectors(bs, start_sect, cluster_offset, n_end, s->cluster_sectors); if (ret < 0) return 0; } tmp = cpu_to_be64(cluster_offset | QCOW_OFLAG_COPIED); } else { int nb_csectors; cluster_offset = alloc_bytes(bs, compressed_size); nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - (cluster_offset >> 9); cluster_offset |= QCOW_OFLAG_COMPRESSED | ((uint64_t)nb_csectors << s->csize_shift); /* compressed clusters never have the copied flag */ tmp = cpu_to_be64(cluster_offset); } /* update L2 table */ l2_table[l2_index] = tmp; if (bdrv_pwrite(s->fd, l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp)) return 0; return cluster_offset;}static int qcow_is_allocated(struct disk_driver *bs, int64_t sector_num, int nb_sectors, int *pnum){ BDRVQcowState *s = bs->private; int index_in_cluster, n; uint64_t cluster_offset; cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); index_in_cluster = sector_num & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; *pnum = n; return (cluster_offset != 0);}static int decompress_buffer(uint8_t *out_buf, int out_buf_size, const uint8_t *buf, int buf_size){ z_stream strm1, *strm = &strm1; int ret, out_len; memset(strm, 0, sizeof(*strm)); strm->next_in = (uint8_t *)buf; strm->avail_in = buf_size; strm->next_out = out_buf; strm->avail_out = out_buf_size; ret = inflateInit2(strm, -12); if (ret != Z_OK) return -1; ret = inflate(strm, Z_FINISH); out_len = strm->next_out - out_buf; if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || out_len != out_buf_size) { inflateEnd(strm); return -1; } inflateEnd(strm); return 0;}static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset){ int ret, csize, nb_csectors, sector_offset; uint64_t coffset; coffset = cluster_offset & s->cluster_offset_mask; if (s->cluster_cache_offset != coffset) { nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; sector_offset = coffset & 511; csize = nb_csectors * 512 - sector_offset; ret = bdrv_read(s->fd, coffset >> 9, s->cluster_data, nb_csectors); if (ret < 0) { return -1; } if (decompress_buffer(s->cluster_cache, s->cluster_size, s->cluster_data + sector_offset, csize) < 0) { return -1; } s->cluster_cache_offset = coffset; } return 0;}/* handle reading after the end of the backing file */static int backing_read1(struct disk_driver *bs, int64_t sector_num, uint8_t *buf, int nb_sectors){ int n1; BDRVQcowState* s = bs->private; if ((sector_num + nb_sectors) <= s->total_sectors) return nb_sectors; if (sector_num >= s->total_sectors) n1 = 0; else n1 = s->total_sectors - sector_num; memset(buf + n1 * 512, 0, 512 * (nb_sectors - n1)); return n1;}/** * Reads a number of sectors from the image (synchronous) */static int qcow_read(struct disk_driver *bs, uint64_t sector_num, uint8_t *buf, int nb_sectors){ BDRVQcowState *s = bs->private; int ret, index_in_cluster, n, n1; uint64_t cluster_offset; while (nb_sectors > 0) { cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); index_in_cluster = sector_num & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; if (!cluster_offset) { if (bs->next) { /* Read from backing file */ struct disk_driver *parent = bs->next; ret = qcow_sync_read(parent, sector_num, nb_sectors, (char*) buf, NULL, 0, NULL);#if 0 /* read from the base image */ n1 = backing_read1(s->backing_hd, sector_num, buf, n); if (n1 > 0) { ret = bdrv_read(((BDRVQcowState*) s->backing_hd)->fd, sector_num, buf, n1); if (ret < 0) { DPRINTF("read from backing file failed: ret = %d; errno = %d\n", ret, errno);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -