qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH for-2.6 13/14] qemu-img: In "map" output, support ex


From: Fam Zheng
Subject: [Qemu-devel] [PATCH for-2.6 13/14] qemu-img: In "map" output, support external file name
Date: Tue, 24 Nov 2015 13:22:10 +0800

The new "file" output parameter of bdrv_get_block_status tells which
file the valid offset is referring to, we can use the information and
output the filename.

The iotest 122 reference output is updated accordingly.

Signed-off-by: Fam Zheng <address@hidden>
---
 qemu-img.c                 | 12 ++++++++----
 tests/qemu-iotests/122.out | 16 ++++++++--------
 2 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/qemu-img.c b/qemu-img.c
index 97be910..666af66 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -2142,7 +2142,7 @@ typedef struct MapEntry {
     int64_t start;
     int64_t length;
     int64_t offset;
-    BlockDriverState *bs;
+    BlockDriverState *file;
 } MapEntry;
 
 static void dump_map_entry(OutputFormat output_format, MapEntry *e,
@@ -2155,12 +2155,13 @@ static void dump_map_entry(OutputFormat output_format, 
MapEntry *e,
     case OFORMAT_HUMAN:
         if ((e->flags & BDRV_BLOCK_DATA) &&
             !(e->flags & BDRV_BLOCK_OFFSET_VALID)) {
-            error_report("File contains external, encrypted or compressed 
clusters.");
+            error_report("File contains encrypted or compressed clusters.");
             exit(1);
         }
         if ((e->flags & (BDRV_BLOCK_DATA|BDRV_BLOCK_ZERO)) == BDRV_BLOCK_DATA) 
{
             printf("%#-16"PRIx64"%#-16"PRIx64"%#-16"PRIx64"%s\n",
-                   e->start, e->length, e->offset, e->bs->filename);
+                   e->start, e->length, e->offset,
+                   e->file ? e->file->filename : "");
         }
         /* This format ignores the distinction between 0, ZERO and ZERO|DATA.
          * Modify the flags here to allow more coalescing.
@@ -2186,6 +2187,9 @@ static void dump_map_entry(OutputFormat output_format, 
MapEntry *e,
         qdict_put(dict, "data", qbool_from_bool(e->flags & BDRV_BLOCK_DATA));
         if (e->flags & BDRV_BLOCK_OFFSET_VALID) {
             qdict_put(dict, "offset", qint_from_int(e->offset));
+            if (e->file) {
+                qdict_put(dict, "file", qstring_from_str(e->file->filename));
+            }
         }
         str = qobject_to_json(QOBJECT(dict));
         printf("%s\n", qstring_get_str(str));
@@ -2236,7 +2240,7 @@ static int get_block_status(BlockDriverState *bs, int64_t 
sector_num,
     e->flags = ret & ~BDRV_BLOCK_OFFSET_MASK;
     e->offset = ret & BDRV_BLOCK_OFFSET_MASK;
     e->depth = depth;
-    e->bs = bs;
+    e->file = file;
     return 0;
 }
 
diff --git a/tests/qemu-iotests/122.out b/tests/qemu-iotests/122.out
index 3c119a8..cb249ce 100644
--- a/tests/qemu-iotests/122.out
+++ b/tests/qemu-iotests/122.out
@@ -114,7 +114,7 @@ read 3145728/3145728 bytes at offset 0
 3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 read 63963136/63963136 bytes at offset 3145728
 61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-[{"length": 6291456, "start": 0, "zero": false, "offset": 327680, "depth": 0, 
"data": true}
+[{"length": 6291456, "start": 0, "zero": false, "offset": 327680, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ,{"length": 60817408, "start": 6291456, "zero": true, "depth": 0, "data": 
false}
 ]
 
@@ -140,7 +140,7 @@ read 30408704/30408704 bytes at offset 3145728
 29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 read 33554432/33554432 bytes at offset 33554432
 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-[{"length": 67108864, "start": 0, "zero": false, "offset": 327680, "depth": 0, 
"data": true}
+[{"length": 67108864, "start": 0, "zero": false, "offset": 327680, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ]
 
 convert -c -S 0 with source backing file:
@@ -160,7 +160,7 @@ read 30408704/30408704 bytes at offset 3145728
 29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 read 33554432/33554432 bytes at offset 33554432
 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-[{"length": 67108864, "start": 0, "zero": false, "offset": 327680, "depth": 0, 
"data": true}
+[{"length": 67108864, "start": 0, "zero": false, "offset": 327680, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ]
 
 convert -c -S 0 -B ...
@@ -186,11 +186,11 @@ wrote 1024/1024 bytes at offset 17408
 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 
 convert -S 4k
-[{"length": 1024, "start": 0, "zero": false, "offset": 8192, "depth": 0, 
"data": true}
+[{"length": 1024, "start": 0, "zero": false, "offset": 8192, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ,{"length": 7168, "start": 1024, "zero": true, "depth": 0, "data": false}
-,{"length": 1024, "start": 8192, "zero": false, "offset": 9216, "depth": 0, 
"data": true}
+,{"length": 1024, "start": 8192, "zero": false, "offset": 9216, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ,{"length": 8192, "start": 9216, "zero": true, "depth": 0, "data": false}
-,{"length": 1024, "start": 17408, "zero": false, "offset": 10240, "depth": 0, 
"data": true}
+,{"length": 1024, "start": 17408, "zero": false, "offset": 10240, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ,{"length": 67090432, "start": 18432, "zero": true, "depth": 0, "data": false}
 ]
 
@@ -204,9 +204,9 @@ convert -c -S 4k
 ]
 
 convert -S 8k
-[{"length": 9216, "start": 0, "zero": false, "offset": 8192, "depth": 0, 
"data": true}
+[{"length": 9216, "start": 0, "zero": false, "offset": 8192, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ,{"length": 8192, "start": 9216, "zero": true, "depth": 0, "data": false}
-,{"length": 1024, "start": 17408, "zero": false, "offset": 17408, "depth": 0, 
"data": true}
+,{"length": 1024, "start": 17408, "zero": false, "offset": 17408, "depth": 0, 
"file": "TEST_DIR/t.IMGFMT.orig", "data": true}
 ,{"length": 67090432, "start": 18432, "zero": true, "depth": 0, "data": false}
 ]
 
-- 
2.4.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]