[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC PATCH v2 05/23] qcow2: Factor out handle_alloc()
From: |
Kevin Wolf |
Subject: |
[Qemu-devel] [RFC PATCH v2 05/23] qcow2: Factor out handle_alloc() |
Date: |
Wed, 13 Feb 2013 14:21:55 +0100 |
Signed-off-by: Kevin Wolf <address@hidden>
---
block/qcow2-cluster.c | 240 +++++++++++++++++++++++++++++++------------------
trace-events | 1 +
2 files changed, 152 insertions(+), 89 deletions(-)
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 5c4b7cc..919cb39 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -856,6 +856,146 @@ static int do_alloc_cluster_offset(BlockDriverState *bs,
uint64_t guest_offset,
}
/*
+ * Allocates new clusters for an area that either is yet unallocated or needs a
+ * copy on write. If *host_offset is non-zero, clusters are only allocated if
+ * the new allocation can match the specified host offset.
+ *
+ * Note that guest_offset may not be cluster aligned.
+ *
+ * Returns:
+ * 0: if no clusters could be allocated. *bytes is set to 0,
+ * *host_offset is left unchanged.
+ *
+ * 1: if new clusters were allocated. *bytes may be decreased if the
+ * new allocation doesn't cover all of the requested area.
+ * *host_offset is updated to contain the host offset of the first
+ * newly allocated cluster.
+ *
+ * -errno: in error cases
+ *
+ * TODO Get rid of nb_clusters, keep_clusters, n_start, n_end
+ * TODO Make *bytes actually behave as specified above
+ */
+static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
+ uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m,
+ unsigned int nb_clusters, int keep_clusters, int n_start, int n_end)
+{
+ BDRVQcowState *s = bs->opaque;
+ int l2_index;
+ uint64_t *l2_table;
+ uint64_t entry;
+ int ret;
+
+ uint64_t alloc_offset;
+ uint64_t alloc_cluster_offset;
+ uint64_t keep_bytes = keep_clusters * s->cluster_size;
+
+ trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
+ *bytes);
+ assert(*bytes > 0);
+
+ /* Find L2 entry for the first involved cluster */
+ ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
+ if (ret < 0) {
+ return ret;
+ }
+
+ entry = be64_to_cpu(l2_table[l2_index + keep_clusters]);
+
+ /* For the moment, overwrite compressed clusters one by one */
+ if (entry & QCOW_OFLAG_COMPRESSED) {
+ nb_clusters = 1;
+ } else {
+ nb_clusters = count_cow_clusters(s, nb_clusters, l2_table,
+ l2_index + keep_clusters);
+ }
+
+ ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (nb_clusters == 0) {
+ *bytes = 0;
+ return 0;
+ }
+
+ /* Calculate start and size of allocation */
+ alloc_offset = guest_offset + keep_bytes;
+
+ if (keep_clusters == 0) {
+ alloc_cluster_offset = 0;
+ } else {
+ alloc_cluster_offset = *host_offset + keep_bytes;
+ }
+
+ /* Allocate, if necessary at a given offset in the image file */
+ ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
+ &nb_clusters);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ /* save info needed for meta data update */
+ if (nb_clusters > 0) {
+ /*
+ * requested_sectors: Number of sectors from the start of the first
+ * newly allocated cluster to the end of the (possibly shortened
+ * before) write request.
+ *
+ * avail_sectors: Number of sectors from the start of the first
+ * newly allocated to the end of the last newly allocated cluster.
+ *
+ * nb_sectors: The number of sectors from the start of the first
+ * newly allocated cluster to the end of the aread that the write
+ * request actually writes to (excluding COW at the end)
+ */
+ int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
+ int avail_sectors = nb_clusters
+ << (s->cluster_bits - BDRV_SECTOR_BITS);
+ int alloc_n_start = keep_clusters == 0 ? n_start : 0;
+ int nb_sectors = MIN(requested_sectors, avail_sectors);
+
+ if (keep_clusters == 0) {
+ *host_offset = alloc_cluster_offset;
+ }
+
+ *m = g_malloc0(sizeof(**m));
+
+ **m = (QCowL2Meta) {
+ .alloc_offset = alloc_cluster_offset,
+ .offset = alloc_offset & ~(s->cluster_size - 1),
+ .nb_clusters = nb_clusters,
+ .nb_available = nb_sectors,
+
+ .cow_start = {
+ .offset = 0,
+ .nb_sectors = alloc_n_start,
+ },
+ .cow_end = {
+ .offset = nb_sectors * BDRV_SECTOR_SIZE,
+ .nb_sectors = avail_sectors - nb_sectors,
+ },
+ };
+ qemu_co_queue_init(&(*m)->dependent_requests);
+ QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
+
+ *bytes = nb_clusters * s->cluster_size;
+ } else {
+ *bytes = 0;
+ return 0;
+ }
+
+ return 1;
+
+fail:
+ if (*m && (*m)->nb_clusters > 0) {
+ QLIST_REMOVE(*m, next_in_flight);
+ }
+ return ret;
+}
+
+/*
* alloc_cluster_offset
*
* For a given offset on the virtual disk, find the cluster offset in qcow2
@@ -974,93 +1114,21 @@ again:
}
/* If there is something left to allocate, do that now */
- if (nb_clusters > 0) {
- uint64_t alloc_offset;
- uint64_t alloc_cluster_offset;
- uint64_t keep_bytes = keep_clusters * s->cluster_size;
-
- ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
- if (ret < 0) {
- return ret;
- }
-
- /* For the moment, overwrite compressed clusters one by one */
- uint64_t entry = be64_to_cpu(l2_table[l2_index + keep_clusters]);
- if (entry & QCOW_OFLAG_COMPRESSED) {
- nb_clusters = 1;
- } else {
- nb_clusters = count_cow_clusters(s, nb_clusters, l2_table,
- l2_index + keep_clusters);
- }
-
- ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
- if (ret < 0) {
- return ret;
- }
-
- /* Calculate start and size of allocation */
- alloc_offset = offset + keep_bytes;
-
- if (keep_clusters == 0) {
- alloc_cluster_offset = 0;
- } else {
- alloc_cluster_offset = cluster_offset + keep_bytes;
- }
-
- /* Allocate, if necessary at a given offset in the image file */
- ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
- &nb_clusters);
- if (ret < 0) {
- goto fail;
- }
-
- /* save info needed for meta data update */
- if (nb_clusters > 0) {
- /*
- * requested_sectors: Number of sectors from the start of the first
- * newly allocated cluster to the end of the (possibly shortened
- * before) write request.
- *
- * avail_sectors: Number of sectors from the start of the first
- * newly allocated to the end of the last newly allocated cluster.
- *
- * nb_sectors: The number of sectors from the start of the first
- * newly allocated cluster to the end of the aread that the write
- * request actually writes to (excluding COW at the end)
- */
- int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
- int avail_sectors = nb_clusters
- << (s->cluster_bits - BDRV_SECTOR_BITS);
- int alloc_n_start = keep_clusters == 0 ? n_start : 0;
- int nb_sectors = MIN(requested_sectors, avail_sectors);
-
- if (keep_clusters == 0) {
- cluster_offset = alloc_cluster_offset;
- }
+ if (nb_clusters == 0) {
+ goto done;
+ }
- *m = g_malloc0(sizeof(**m));
-
- **m = (QCowL2Meta) {
- .alloc_offset = alloc_cluster_offset,
- .offset = alloc_offset & ~(s->cluster_size - 1),
- .nb_clusters = nb_clusters,
- .nb_available = nb_sectors,
-
- .cow_start = {
- .offset = 0,
- .nb_sectors = alloc_n_start,
- },
- .cow_end = {
- .offset = nb_sectors * BDRV_SECTOR_SIZE,
- .nb_sectors = avail_sectors - nb_sectors,
- },
- };
- qemu_co_queue_init(&(*m)->dependent_requests);
- QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
- }
+ cur_bytes = nb_clusters * s->cluster_size;
+ ret = handle_alloc(bs, offset, &cluster_offset, &cur_bytes, m,
+ nb_clusters, keep_clusters, n_start, n_end);
+ if (ret < 0) {
+ return ret;
}
+ nb_clusters = size_to_clusters(s, cur_bytes);
+
/* Some cleanup work */
+done:
sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
if (sectors > n_end) {
sectors = n_end;
@@ -1071,12 +1139,6 @@ again:
*host_offset = cluster_offset;
return 0;
-
-fail:
- if (*m && (*m)->nb_clusters > 0) {
- QLIST_REMOVE(*m, next_in_flight);
- }
- return ret;
}
static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
diff --git a/trace-events b/trace-events
index 1011f27..68b4638 100644
--- a/trace-events
+++ b/trace-events
@@ -481,6 +481,7 @@ qcow2_writev_done_part(void *co, int cur_nr_sectors) "co %p
cur_nr_sectors %d"
qcow2_writev_data(void *co, uint64_t offset) "co %p offset %" PRIx64
qcow2_alloc_clusters_offset(void *co, uint64_t offset, int n_start, int n_end)
"co %p offet %" PRIx64 " n_start %d n_end %d"
+qcow2_handle_alloc(void *co, uint64_t guest_offset, uint64_t host_offset,
uint64_t bytes) "co %p guest_offet %" PRIx64 " host_offset %" PRIx64 " bytes %"
PRIx64
qcow2_do_alloc_clusters_offset(void *co, uint64_t guest_offset, uint64_t
host_offset, int nb_clusters) "co %p guest_offet %" PRIx64 " host_offset %"
PRIx64 " nb_clusters %d"
qcow2_cluster_alloc_phys(void *co) "co %p"
qcow2_cluster_link_l2(void *co, int nb_clusters) "co %p nb_clusters %d"
--
1.7.6.5
- [Qemu-devel] [RFC PATCH v2 00/23] qcow2: Delayed COW, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 05/23] qcow2: Factor out handle_alloc(),
Kevin Wolf <=
- [Qemu-devel] [RFC PATCH v2 04/23] qcow2: Decouple cluster allocation from cluster reuse code, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 09/23] qcow2: Clean up handle_alloc(), Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 08/23] qcow2: Finalise interface of handle_alloc(), Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 07/23] qcow2: handle_alloc(): Get rid of keep_clusters parameter, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 12/23] qcow2: handle_copied(): Get rid of keep_clusters parameter, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 11/23] qcow2: handle_copied(): Get rid of nb_clusters parameter, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 06/23] qcow2: handle_alloc(): Get rid of nb_clusters parameter, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 15/23] qcow2: Allow requests with multiple l2metas, Kevin Wolf, 2013/02/13
- [Qemu-devel] [RFC PATCH v2 10/23] qcow2: Factor out handle_copied(), Kevin Wolf, 2013/02/13