📄 block-qcow2.c
字号:
return -1; } }#endif } else { memset(buf, 0, 512 * n); } } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { if (decompress_cluster(s, cluster_offset) < 0) { DPRINTF("read/decompression failed: errno = %d\n", errno); return -1; } memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); } else { ret = bdrv_pread(s->fd, cluster_offset + index_in_cluster * 512, buf, n * 512); if (ret != n * 512) { DPRINTF("read failed: ret = %d != n * 512 = %d; errno = %d\n", ret, n * 512, errno); DPRINTF(" cluster_offset = %"PRIx64", index = %d; sector_num = %"PRId64"", cluster_offset, index_in_cluster, sector_num); return -1; } if (s->crypt_method) { encrypt_sectors(s, sector_num, buf, buf, n, 0, &s->aes_decrypt_key); } } nb_sectors -= n; sector_num += n; buf += n * 512; } return 0;}/** * Writes a number of sectors to the image (synchronous) */static int qcow_write(struct disk_driver *bs, uint64_t sector_num, const uint8_t *buf, int nb_sectors){ BDRVQcowState *s = bs->private; int ret, index_in_cluster, n; uint64_t cluster_offset; while (nb_sectors > 0) { index_in_cluster = sector_num & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0, index_in_cluster, index_in_cluster + n); if (!cluster_offset) { DPRINTF("qcow_write: cluster_offset == 0\n"); DPRINTF(" index = %d; sector_num = %"PRId64"\n", index_in_cluster, sector_num); return -1; } if (s->crypt_method) { encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1, &s->aes_encrypt_key); ret = bdrv_pwrite(s->fd, cluster_offset + index_in_cluster * 512, s->cluster_data, n * 512); } else { ret = bdrv_pwrite(s->fd, cluster_offset + index_in_cluster * 512, buf, n * 512); } if (ret != n * 512) { DPRINTF("write failed: ret = %d != n * 512 = %d; errno = %d\n", ret, n * 512, errno); DPRINTF(" cluster_offset = %"PRIx64", index = %d; sector_num = %"PRId64"\n", cluster_offset, index_in_cluster, sector_num); return -1; } nb_sectors -= n; sector_num += n; buf += n * 512; } s->cluster_cache_offset = -1; /* disable compressed cache */ return 0;}#ifdef USE_AIO/* * QCOW2 specific AIO functions */static int qcow_queue_read(struct disk_driver *bs, uint64_t sector, int nb_sectors, char *buf, td_callback_t cb, int id, void *private){ BDRVQcowState *s = bs->private; int i, index_in_cluster, n, ret; int rsp = 0; uint64_t cluster_offset; /*Check we can get a lock*/ for (i = 0; i < nb_sectors; i++) if (!tap_aio_can_lock(&s->async, sector + i)) return cb(bs, -EBUSY, sector, nb_sectors, id, private); while (nb_sectors > 0) { cluster_offset = get_cluster_offset(bs, sector << 9, 0, 0, 0, 0); index_in_cluster = sector & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; if (s->async.iocb_free_count == 0 || !tap_aio_lock(&s->async, sector)) return cb(bs, -EBUSY, sector, nb_sectors, id, private); if (!cluster_offset) { /* The requested sector is not allocated */ tap_aio_unlock(&s->async, sector); ret = cb(bs, BLK_NOT_ALLOCATED, sector, n, id, private); if (ret == -EBUSY) { /* mark remainder of request * as busy and try again later */ return cb(bs, -EBUSY, sector + n, nb_sectors - n, id, private); } else { rsp += ret; } } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { /* sync read for compressed clusters */ tap_aio_unlock(&s->async, sector); if (decompress_cluster(s, cluster_offset) < 0) { rsp += cb(bs, -EIO, sector, nb_sectors, id, private); goto done; } memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); rsp += cb(bs, 0, sector, n, id, private); } else { /* async read */ tap_aio_read(&s->async, s->fd, n * 512, (cluster_offset + index_in_cluster * 512), buf, cb, id, sector, private); } /* Prepare for next sector to read */ nb_sectors -= n; sector += n; buf += n * 512; }done: return rsp;}static int qcow_queue_write(struct disk_driver *bs, uint64_t sector, int nb_sectors, char *buf, td_callback_t cb, int id, void *private){ BDRVQcowState *s = bs->private; int i, n, index_in_cluster; uint64_t cluster_offset; const uint8_t *src_buf; /*Check we can get a lock*/ for (i = 0; i < nb_sectors; i++) if (!tap_aio_can_lock(&s->async, sector + i)) return cb(bs, -EBUSY, sector, nb_sectors, id, private); while (nb_sectors > 0) { index_in_cluster = sector & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; if (s->async.iocb_free_count == 0 || !tap_aio_lock(&s->async, sector)) return cb(bs, -EBUSY, sector, nb_sectors, id, private); cluster_offset = get_cluster_offset(bs, sector << 9, 1, 0, index_in_cluster, index_in_cluster+n); if (!cluster_offset) { DPRINTF("Ooops, no write cluster offset!\n"); tap_aio_unlock(&s->async, sector); return cb(bs, -EIO, sector, nb_sectors, id, private); } // TODO Encryption tap_aio_write(&s->async, s->fd, n * 512, (cluster_offset + index_in_cluster*512), buf, cb, id, sector, private); /* Prepare for next sector to write */ nb_sectors -= n; sector += n; buf += n * 512; } s->cluster_cache_offset = -1; /* disable compressed cache */ return 0;}#endif /* USE_AIO */static int qcow_close(struct disk_driver *bs){ BDRVQcowState *s = bs->private; #ifdef USE_AIO io_destroy(s->async.aio_ctx.aio_ctx); tap_aio_free(&s->async);#else close(s->poll_pipe[0]); close(s->poll_pipe[1]);#endif qemu_free(s->l1_table); qemu_free(s->l2_cache); qemu_free(s->cluster_cache); qemu_free(s->cluster_data); refcount_close(bs); return close(s->fd);}/* XXX: use std qcow open function ? */typedef struct QCowCreateState { int cluster_size; int cluster_bits; uint16_t *refcount_block; uint64_t *refcount_table; int64_t l1_table_offset; int64_t refcount_table_offset; int64_t refcount_block_offset;} QCowCreateState;static void create_refcount_update(QCowCreateState *s, int64_t offset, int64_t size){ int refcount; int64_t start, last, cluster_offset; uint16_t *p; start = offset & ~(s->cluster_size - 1); last = (offset + size - 1) & ~(s->cluster_size - 1); for(cluster_offset = start; cluster_offset <= last; cluster_offset += s->cluster_size) { p = &s->refcount_block[cluster_offset >> s->cluster_bits]; refcount = be16_to_cpu(*p); refcount++; *p = cpu_to_be16(refcount); }}static int qcow_submit(struct disk_driver *bs){ struct BDRVQcowState *s = (struct BDRVQcowState*) bs->private; fsync(s->fd); return tap_aio_submit(&s->async);}/*********************************************************//* snapshot support */static void qcow_free_snapshots(struct disk_driver *bs){ BDRVQcowState *s = bs->private; int i; for(i = 0; i < s->nb_snapshots; i++) { qemu_free(s->snapshots[i].name); qemu_free(s->snapshots[i].id_str); } qemu_free(s->snapshots); s->snapshots = NULL; s->nb_snapshots = 0;}static int qcow_read_snapshots(struct disk_driver *bs){ BDRVQcowState *s = bs->private; QCowSnapshotHeader h; QCowSnapshot *sn; int i, id_str_size, name_size; int64_t offset; uint32_t extra_data_size; offset = s->snapshots_offset; s->snapshots = qemu_mallocz(s->nb_snapshots * sizeof(QCowSnapshot)); if (!s->snapshots) goto fail; for(i = 0; i < s->nb_snapshots; i++) { offset = align_offset(offset, 8); if (bdrv_pread(s->fd, offset, &h, sizeof(h)) != sizeof(h)) goto fail; offset += sizeof(h); sn = s->snapshots + i; sn->l1_table_offset = be64_to_cpu(h.l1_table_offset); sn->l1_size = be32_to_cpu(h.l1_size); sn->vm_state_size = be32_to_cpu(h.vm_state_size); sn->date_sec = be32_to_cpu(h.date_sec); sn->date_nsec = be32_to_cpu(h.date_nsec); sn->vm_clock_nsec = be64_to_cpu(h.vm_clock_nsec); extra_data_size = be32_to_cpu(h.extra_data_size); id_str_size = be16_to_cpu(h.id_str_size); name_size = be16_to_cpu(h.name_size); offset += extra_data_size; sn->id_str = qemu_malloc(id_str_size + 1); if (!sn->id_str) goto fail; if (bdrv_pread(s->fd, offset, sn->id_str, id_str_size) != id_str_size) goto fail; offset += id_str_size; sn->id_str[id_str_size] = '\0'; sn->name = qemu_malloc(name_size + 1); if (!sn->name) goto fail; if (bdrv_pread(s->fd, offset, sn->name, name_size) != name_size) goto fail; offset += name_size; sn->name[name_size] = '\0'; } s->snapshots_size = offset - s->snapshots_offset; return 0;fail: qcow_free_snapshots(bs); return -1;}/*********************************************************//* refcount handling */static int refcount_init(struct disk_driver *bs){ BDRVQcowState *s = bs->private; int ret, refcount_table_size2, i; s->refcount_block_cache = qemu_malloc(s->cluster_size); if (!s->refcount_block_cache) goto fail; refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t); s->refcount_table = qemu_malloc(refcount_table_size2); if (!s->refcount_table) goto fail; if (s->refcount_table_size > 0) { ret = bdrv_pread(s->fd, s->refcount_table_offset, s->refcount_table, refcount_table_size2); if (ret != refcount_table_size2) goto fail; for(i = 0; i < s->refcount_table_size; i++) be64_to_cpus(&s->refcount_table[i]); } return 0; fail: return -ENOMEM;}static void refcount_close(struct disk_driver *bs){ BDRVQcowState *s = bs->private; qemu_free(s->refcount_block_cache); qemu_free(s->refcount_table);}static int load_refcount_block(struct disk_driver *bs, int64_t refcount_block_offset){ BDRVQcowState *s = bs->private; int ret; ret = bdrv_pread(s->fd, refcount_block_offset, s->refcount_block_cache, s->cluster_size); if (ret != s->cluster_size) return -EIO; s->refcount_block_cache_offset = refcount_block_offset; return 0;}static int get_refcount(struct disk_driver *bs, int64_t cluster_index){ BDRVQcowState *s = bs->private; int refcount_table_index, block_index; int64_t refcount_block_offset; refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); if (refcount_table_index >= s->refcount_table_size) return 0; refcount_block_offset = s->refcount_table[refcount_table_index]; if (!refcount_block_offset) return 0; if (refcount_block_offset != s->refcount_block_cache_offset) { /* better than nothing: return allocated if read error */ if (load_refcount_block(bs, refcount_block_offset) < 0) return 1; } block_index = cluster_index & ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); return be16_to_cpu(s->refcount_block_cache[block_index]);}/* return < 0 if error */static int64_t alloc_clusters_noref(struct disk_driver *bs, int64_t size){ BDRVQcowState *s = bs->private; int i, nb_clusters; nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits; for(;;) { if (get_refcount(bs, s->free_cluster_index) == 0) { s->free_cluster_index++; for(i = 1; i < nb_clusters; i++) { if (get_refcount(bs, s->free_cluster_index) != 0) goto not_found; s->free_cluster_index++; }#ifdef DEBUG_ALLOC2 DPRINTF("alloc_clusters: size=%ld -> %ld\n", size, (s->free_cluster_index - nb_clusters) << s->cluster_bits);#endif return (s->free_cluster_index - nb_clusters) << s->cluster_bits; } else { not_found: s->free_cluster_index++; } }}static int64_t alloc_clusters(struct disk_driver *bs, int64_t size){ int64_t offset; offset = alloc_clusters_noref(bs, size); update_refcount(bs, offset, size, 1); return offset;}/* only used to allocate compressed sectors. We try to allocate contiguous sectors. size must be <= cluster_size */static int64_t alloc_bytes(struct disk_driver *bs, int size){ BDRVQcowState *s = bs->private; int64_t offset, cluster_offset; int free_in_cluster; assert(size > 0 && size <= s->cluster_size); if (s->free_byte_offset == 0) { s->free_byte_offset = alloc_clusters(bs, s->cluster_size); }redo: free_in_cluster = s->cluster_size - (s->free_byte_offset & (s->cluster_size - 1)); if (size <= free_in_cluster) { /* enough space in current cluster */ offset = s->free_byte_offset; s->free_byte_offset += size; free_in_cluster -= size; if (free_in_cluster == 0) s->free_byte_offset = 0; if ((offset & (s->cluster_size - 1)) != 0) update_cluster_refcount(bs, offset >> s->cluster_bits, 1); } else { offset = alloc_clusters(bs, s->cluster_size); cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -