qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2] vmdk: improve streamOptimized vmdk support


From: Milos Vyletel
Subject: Re: [Qemu-devel] [PATCH v2] vmdk: improve streamOptimized vmdk support
Date: Tue, 5 Aug 2014 12:44:01 -0400

On Tue, Aug 5, 2014 at 1:27 AM, Fam Zheng <address@hidden> wrote:
> On Mon, 08/04 13:10, Milos Vyletel wrote:
>> VMDK's streamOptimized format is different from regular sparse format.
>> L1(GD) and L2(GT) tables are not predefined but rather generated and
>> written during image creation mainly because there is no way to tell
>> how much space data will occupy once they are compressed. Also the
>> location of header, L1 and L2 tables differ.
>>
>> - L2 tables (grain tables) are written after all grains they point to
>> - L1 tables are written after all grains and L2 tables
>> - footer at the end is used instead of header in first sector
>>
>> Images generated by qemu-img could not be imported (as part of OVA archive)
>> to neither VMWare nor OVM because of errors.
>
> Does putting a monolithicSparse into the OVA work in this case?

It does not. I did not try to import it to OVM but ESXi server
complained about the format of vmdk in OVA archive (don't have exact
error message anymore). I did look in OVF specifications and it does
not state that the vmdk must be streamOptimized but it looks like
VMWare is enforcing it.

I can try to create OVA with monolithicSparse image and retry if you
want to know exact error I got.

>
>>
>> - VMWare during OVA import:
>> Not a supported disk format (sparse VMDK too old)
>>
>> - OVM's vbox-img during conversion:
>> vbox-img: error: Error while copying the image: VERR_EOF
>>
>> This patch fixes streamOptimized support in qemu which was not fully
>> compatible with VMDK specifications as defined in latest avaialble version
>> at https://www.vmware.com/support/developer/vddk/vmdk_50_technote.pdf.
>>
>> Qemu generated images are identical to the ones generated by VMWare and
>> OVM (vbox-img) with the exception of DescriptorFile but that is expected
>> (CID and some additional DDB entries differ). They were also succesfully
>> imported to VMWare vCloud, ESXi and Oracle OVM.
>>
>> Signed-off-by: Milos Vyletel <address@hidden>
>> ---
>> v2 changes:
>> - updated commit message description with errors received
>> - style/grammar fixes (clean checkpatch pass)
>> - removed l2_table pointer from VmdkExtent struct
>> - fixed memory leak in vmdk_write_footer()
>>
>>  block/vmdk.c |  365 
>> ++++++++++++++++++++++++++++++++++++++++++++++------------
>>  1 files changed, 290 insertions(+), 75 deletions(-)
>>
>> diff --git a/block/vmdk.c b/block/vmdk.c
>> index 0517bba..ecaca8f 100644
>> --- a/block/vmdk.c
>> +++ b/block/vmdk.c
>> @@ -81,6 +81,21 @@ typedef struct {
>>      uint16_t compressAlgorithm;
>>  } QEMU_PACKED VMDK4Header;
>>
>> +typedef struct {
>> +    uint64_t val;
>> +    uint32_t size;
>> +    uint32_t type;
>> +    uint8_t pad[BDRV_SECTOR_SIZE - sizeof(uint64_t) - 2*sizeof(uint32_t)];
>> +} QEMU_PACKED VMDK4MetaMarker;
>> +
>> +typedef struct {
>> +    VMDK4MetaMarker footer_marker;
>> +    uint32_t magic;
>> +    VMDK4Header header;
>> +    uint8_t pad[BDRV_SECTOR_SIZE - sizeof(uint32_t) - sizeof(VMDK4Header)];
>> +    VMDK4MetaMarker eos_marker;
>> +} QEMU_PACKED VMDK4Footer;
>> +
>>  #define L2_CACHE_SIZE 16
>>
>>  typedef struct VmdkExtent {
>> @@ -89,12 +104,14 @@ typedef struct VmdkExtent {
>>      bool compressed;
>>      bool has_marker;
>>      bool has_zero_grain;
>> +    bool has_footer;
>>      int version;
>>      int64_t sectors;
>>      int64_t end_sector;
>>      int64_t flat_start_offset;
>>      int64_t l1_table_offset;
>>      int64_t l1_backup_table_offset;
>> +    uint32_t l1_index;
>>      uint32_t *l1_table;
>>      uint32_t *l1_backup_table;
>>      unsigned int l1_size;
>> @@ -107,6 +124,8 @@ typedef struct VmdkExtent {
>>
>>      int64_t cluster_sectors;
>>      char *type;
>> +
>> +    VMDK4Footer footer;
>
> This adds hunderds of bytes into the memory structure, but most are
> unnecessary. I'm dubious about that. Fields in the footer all have 
> counterparts
> in header, which are as well fields of VmdkExtent. So please reuse them 
> instead
> of introducing redundancy.
>
> When we close the extent, we could reuse the same logic as in
> vmdk_create_extent, to generate footer dynamically, instead of writting what 
> is
> read when closing. That way footer here is not needed.
>

Good point. I'll remove footer from VmdkExtent structure.

>>  } VmdkExtent;
>>
>>  typedef struct BDRVVmdkState {
>> @@ -125,7 +144,6 @@ typedef struct BDRVVmdkState {
>>
>>  typedef struct VmdkMetaData {
>>      uint32_t offset;
>> -    unsigned int l1_index;
>>      unsigned int l2_index;
>>      unsigned int l2_offset;
>>      int valid;
>> @@ -555,14 +573,50 @@ static char *vmdk_read_desc(BlockDriverState *file, 
>> uint64_t desc_offset,
>>      return buf;
>>  }
>>
>> +static int vmdk_read_footer(BlockDriverState *bs,
>> +                            VMDK4Footer *footer)
>> +{
>> +    int ret;
>> +
>> +    /*
>> +    * footer starts 3 sectors from end
>> +    * - footer marker
>> +    * - footer
>> +    * - end-of-stream marker
>> +    */
>> +    ret = bdrv_pread(bs->file,
>> +        (bs->file->total_sectors - 3) * BDRV_SECTOR_SIZE,
>> +        footer, sizeof(*footer));
>> +    if (ret < 0) {
>> +        goto out;
>> +    }
>> +
>> +    /* Some sanity checks for the footer */
>> +    if (be32_to_cpu(footer->magic) != VMDK4_MAGIC ||
>> +        le32_to_cpu(footer->footer_marker.size) != 0  ||
>> +        le32_to_cpu(footer->footer_marker.type) != MARKER_FOOTER ||
>> +        le64_to_cpu(footer->eos_marker.val) != 0  ||
>> +        le32_to_cpu(footer->eos_marker.size) != 0  ||
>> +        le32_to_cpu(footer->eos_marker.type) != MARKER_END_OF_STREAM) {
>> +        ret = -EINVAL;
>> +        goto out;
>> +    }
>> +
>> +    ret = VMDK_OK;
>> + out:
>> +    return ret;
>> +}
>> +
>>  static int vmdk_open_vmdk4(BlockDriverState *bs,
>>                             BlockDriverState *file,
>>                             int flags, Error **errp)
>>  {
>>      int ret;
>> +    bool has_footer = false;
>>      uint32_t magic;
>>      uint32_t l1_size, l1_entry_sectors;
>>      VMDK4Header header;
>> +    VMDK4Footer footer;
>>      VmdkExtent *extent;
>>      BDRVVmdkState *s = bs->opaque;
>>      int64_t l1_backup_offset = 0;
>> @@ -593,48 +647,13 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
>>
>>      if (le64_to_cpu(header.gd_offset) == VMDK4_GD_AT_END) {
>>          /*
>> -         * The footer takes precedence over the header, so read it in. The
>> -         * footer starts at offset -1024 from the end: One sector for the
>> -         * footer, and another one for the end-of-stream marker.
>> +         * The footer takes precedence over the header, so read it in.
>>           */
>> -        struct {
>> -            struct {
>> -                uint64_t val;
>> -                uint32_t size;
>> -                uint32_t type;
>> -                uint8_t pad[512 - 16];
>> -            } QEMU_PACKED footer_marker;
>> -
>> -            uint32_t magic;
>> -            VMDK4Header header;
>> -            uint8_t pad[512 - 4 - sizeof(VMDK4Header)];
>> -
>> -            struct {
>> -                uint64_t val;
>> -                uint32_t size;
>> -                uint32_t type;
>> -                uint8_t pad[512 - 16];
>> -            } QEMU_PACKED eos_marker;
>> -        } QEMU_PACKED footer;
>> -
>> -        ret = bdrv_pread(file,
>> -            bs->file->total_sectors * 512 - 1536,
>> -            &footer, sizeof(footer));
>> +        ret = vmdk_read_footer(bs, &footer);
>>          if (ret < 0) {
>>              return ret;
>>          }
>> -
>> -        /* Some sanity checks for the footer */
>> -        if (be32_to_cpu(footer.magic) != VMDK4_MAGIC ||
>> -            le32_to_cpu(footer.footer_marker.size) != 0  ||
>> -            le32_to_cpu(footer.footer_marker.type) != MARKER_FOOTER ||
>> -            le64_to_cpu(footer.eos_marker.val) != 0  ||
>> -            le32_to_cpu(footer.eos_marker.size) != 0  ||
>> -            le32_to_cpu(footer.eos_marker.type) != MARKER_END_OF_STREAM)
>> -        {
>> -            return -EINVAL;
>> -        }
>> -
>> +        has_footer = true;
>>          header = footer.header;
>>      }
>>
>> @@ -645,11 +664,15 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
>>          error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
>>                    bs->device_name, "vmdk", buf);
>>          return -ENOTSUP;
>> -    } else if (le32_to_cpu(header.version) == 3 && (flags & BDRV_O_RDWR)) {
>> +    } else if (le32_to_cpu(header.version) == 3 &&
>> +               (flags & BDRV_O_RDWR) &&
>> +               !(le64_to_cpu(header.flags) & VMDK4_FLAG_COMPRESS)) {
>>          /* VMware KB 2064959 explains that version 3 added support for
>>           * persistent changed block tracking (CBT), and backup software can
>>           * read it as version=1 if it doesn't care about the changed area
>> -         * information. So we are safe to enable read only. */
>> +         * information. So we are safe to enable read only.
>> +         * Note that this does not apply to streamOptimized images which
>> +         * are written only once and are used as transport format */
>>          error_setg(errp, "VMDK version 3 must be read only");
>>          return -EINVAL;
>>      }
>> @@ -689,11 +712,21 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
>>      if (ret < 0) {
>>          return ret;
>>      }
>> +    if (has_footer) {
>> +        extent->has_footer = has_footer;
>> +        extent->footer = footer;
>> +    }
>> +
>>      extent->compressed =
>>          le16_to_cpu(header.compressAlgorithm) == VMDK4_COMPRESSION_DEFLATE;
>>      if (extent->compressed) {
>>          g_free(s->create_type);
>>          s->create_type = g_strdup("streamOptimized");
>> +
>> +        if (flags & BDRV_O_RDWR) {
>> +            bdrv_truncate(file,
>> +                    le64_to_cpu(header.grain_offset) * BDRV_SECTOR_SIZE);
>
> Looks dangerous. What if has_footer == false, as for the imaged created from
> older implementation?
>
Will check for has_footer.

>> +        }
>>      }
>>      extent->has_marker = le32_to_cpu(header.flags) & VMDK4_FLAG_MARKER;
>>      extent->version = le32_to_cpu(header.version);
>> @@ -998,6 +1031,12 @@ static int vmdk_L2update(VmdkExtent *extent, 
>> VmdkMetaData *m_data)
>>      uint32_t offset;
>>      QEMU_BUILD_BUG_ON(sizeof(offset) != sizeof(m_data->offset));
>>      offset = cpu_to_le32(m_data->offset);
>> +
>> +    /* do not update on streamOptimized */
>> +    if (extent->compressed) {
>> +        return VMDK_OK;
>> +    }
>> +
>>      /* update L2 table */
>>      if (bdrv_pwrite_sync(
>>                  extent->file,
>> @@ -1008,7 +1047,7 @@ static int vmdk_L2update(VmdkExtent *extent, 
>> VmdkMetaData *m_data)
>>      }
>>      /* update backup L2 table */
>>      if (extent->l1_backup_table_offset != 0) {
>> -        m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
>> +        m_data->l2_offset = extent->l1_backup_table[extent->l1_index];
>>          if (bdrv_pwrite_sync(
>>                      extent->file,
>>                      ((int64_t)m_data->l2_offset * 512)
>> @@ -1024,6 +1063,113 @@ static int vmdk_L2update(VmdkExtent *extent, 
>> VmdkMetaData *m_data)
>>      return VMDK_OK;
>>  }
>>
>> +static int vmdk_write_footer(BlockDriverState *bs,
>> +                             VMDK4Footer *footer,
>> +                             VmdkExtent *extent)
>> +{
>> +    int i, ret, gd_buf_size;
>> +    uint32_t *gd_buf = NULL;
>> +    uint32_t grains, gd_sectors, gt_size, gt_count;
>> +    uint64_t offset;
>> +    VMDK4Header header;
>> +    VMDK4MetaMarker gd_marker;
>> +
>> +    header = footer->header;
>> +    offset = le64_to_cpu(header.gd_offset);
>> +
>> +    grains = DIV_ROUND_UP(header.capacity, header.granularity);
>> +    gt_size = DIV_ROUND_UP(header.num_gtes_per_gt * sizeof(uint32_t),
>> +                           BDRV_SECTOR_SIZE);
>> +    gt_count = DIV_ROUND_UP(grains, header.num_gtes_per_gt);
>> +    gd_sectors = DIV_ROUND_UP(gt_count * sizeof(uint32_t), 
>> BDRV_SECTOR_SIZE);
>> +
>> +    /* write grain directory marker */
>> +    memset(&gd_marker, 0, sizeof(gd_marker));
>> +    gd_marker.val = cpu_to_le64(gd_sectors);
>> +    gd_marker.type = cpu_to_le32(MARKER_GRAIN_DIRECTORY);
>> +
>> +    ret = bdrv_pwrite(bs, offset * BDRV_SECTOR_SIZE,
>> +                      &gd_marker, sizeof(gd_marker));
>> +    if (ret < 0) {
>> +        return ret;
>> +    }
>> +    offset += sizeof(gd_marker) / BDRV_SECTOR_SIZE;
>> +
>> +    /* write grain directory */
>> +    gd_buf_size = gd_sectors * BDRV_SECTOR_SIZE;
>> +    gd_buf = g_malloc0(gd_buf_size);
>> +    if (extent) {
>> +        /* copy over L1 table if we have it */
>> +        for (i = 0; i < gt_count; i++) {
>> +            gd_buf[i] = cpu_to_le32(extent->l1_table[i]);
>> +        }
>> +    }
>> +    ret = bdrv_pwrite(bs, offset * BDRV_SECTOR_SIZE, gd_buf, gd_buf_size);
>> +    if (ret < 0) {
>> +        goto exit;
>> +    }
>> +
>> +    /* save real gd_offset */
>> +    footer->header.gd_offset = cpu_to_le64(offset);
>> +    offset += gd_sectors;
>> +
>> +    /* write footer */
>> +    ret = bdrv_pwrite(bs, offset * BDRV_SECTOR_SIZE, footer, 
>> sizeof(*footer));
>> +    if (ret < 0) {
>> +        goto exit;
>> +    }
>> +
>> +    ret = 0;
>> + exit:
>> +    g_free(gd_buf);
>> +    return ret;
>> +}
>> +
>> +static int vmdk_write_grain_table(VmdkExtent *extent)
>> +{
>> +    int i, ret;
>> +    uint64_t offset;
>> +    uint32_t *l2_table = NULL;
>> +    VMDK4MetaMarker gtm;
>> +
>> +    for (i = 0; i < L2_CACHE_SIZE; i++) {
>> +        if (extent->l1_table[extent->l1_index] == 
>> extent->l2_cache_offsets[i]) {
>> +            l2_table = extent->l2_cache + (i * extent->l2_size);
>> +        }
>> +    }
>> +    if (!l2_table) {
>> +        ret = -EINVAL;
>> +        goto out;
>> +    }
>> +
>> +    offset = le64_to_cpu(extent->footer.header.gd_offset) << 9;
>> +
>> +    memset(&gtm, 0, sizeof(gtm));
>> +    gtm.val = cpu_to_le32((extent->l2_size * sizeof(uint32_t)) >> 9);
>> +    gtm.type = cpu_to_le32(MARKER_GRAIN_TABLE);
>> +    if (bdrv_pwrite(extent->file, offset, &gtm, sizeof(gtm)) < 0) {
>> +        ret = -EIO;
>> +        goto out;
>> +    }
>> +
>> +    offset += sizeof(gtm);
>> +    extent->l1_table[extent->l1_index] = offset >> 9;
>> +    if (bdrv_pwrite(extent->file, offset,
>> +                    l2_table, extent->l2_size * sizeof(uint32_t)
>> +                   ) != extent->l2_size * sizeof(uint32_t)) {
>> +        ret = -EIO;
>> +        goto out;
>> +    }
>> +
>> +    offset += extent->l2_size * sizeof(uint32_t);
>> +    extent->l1_table_offset = offset;
>> +    extent->footer.header.gd_offset = cpu_to_le64(offset >> 9);
>> +
>> +    ret = 0;
>> + out:
>> +    return ret;
>> +}
>> +
>>  static int get_cluster_offset(BlockDriverState *bs,
>>                                      VmdkExtent *extent,
>>                                      VmdkMetaData *m_data,
>> @@ -1032,7 +1178,7 @@ static int get_cluster_offset(BlockDriverState *bs,
>>                                      uint64_t *cluster_offset)
>>  {
>>      unsigned int l1_index, l2_offset, l2_index;
>> -    int min_index, i, j;
>> +    int min_index, i, j, ret;
>>      uint32_t min_count, *l2_table;
>>      bool zeroed = false;
>>
>> @@ -1046,6 +1192,22 @@ static int get_cluster_offset(BlockDriverState *bs,
>>
>>      offset -= (extent->end_sector - extent->sectors) * SECTOR_SIZE;
>>      l1_index = (offset >> 9) / extent->l1_entry_sectors;
>> +    if (extent->compressed && !extent->l1_table[l1_index]) {
>> +        if (l1_index) {
>> +            /* grain (L2) tables follow compressed data so first L2 table 
>> will
>> +             * be written when we move to next index or when we close image.
>> +             * that is why we need to save l1_index in extent itself for 
>> easy
>> +             * access from both here and vmdk_close */
>> +            ret = vmdk_write_grain_table(extent);
>> +            if (ret < 0) {
>> +                return ret;
>> +            }
>> +        }
>> +        /* allocate new L2; set it to GD offset for now */
>> +        extent->l1_table[l1_index] = extent->l1_table_offset;
>> +    }
>> +    extent->l1_index = l1_index;
>> +
>>      if (l1_index >= extent->l1_size) {
>>          return VMDK_ERROR;
>>      }
>> @@ -1092,7 +1254,6 @@ static int get_cluster_offset(BlockDriverState *bs,
>>
>>      if (m_data) {
>>          m_data->valid = 1;
>> -        m_data->l1_index = l1_index;
>>          m_data->l2_index = l2_index;
>>          m_data->offset = *cluster_offset;
>>          m_data->l2_offset = l2_offset;
>> @@ -1208,6 +1369,7 @@ static int vmdk_write_extent(VmdkExtent *extent, 
>> int64_t cluster_offset,
>>      uLongf buf_len;
>>      const uint8_t *write_buf = buf;
>>      int write_len = nb_sectors * 512;
>> +    uint64_t gd_offset;
>>
>>      if (extent->compressed) {
>>          if (!extent->has_marker) {
>> @@ -1234,6 +1396,13 @@ static int vmdk_write_extent(VmdkExtent *extent, 
>> int64_t cluster_offset,
>>          ret = ret < 0 ? ret : -EIO;
>>          goto out;
>>      }
>> +    if (extent->compressed) {
>> +        /* update GD offset after each write */
>> +        gd_offset = bdrv_getlength(extent->file);
>> +        extent->l1_table_offset = gd_offset;
>> +        gd_offset /= BDRV_SECTOR_SIZE;
>> +        extent->footer.header.gd_offset = cpu_to_le64(gd_offset);
>> +    }
>>      ret = 0;
>>   out:
>>      g_free(data);
>> @@ -1532,10 +1701,12 @@ static int vmdk_create_extent(const char *filename, 
>> int64_t filesize,
>>      int ret, i;
>>      BlockDriverState *bs = NULL;
>>      VMDK4Header header;
>> +    VMDK4Footer footer;
>>      Error *local_err = NULL;
>>      uint32_t tmp, magic, grains, gd_sectors, gt_size, gt_count;
>>      uint32_t *gd_buf = NULL;
>>      int gd_buf_size;
>> +    uint64_t grain_offset, rgd_offset, gd_offset;
>>
>>      ret = bdrv_create_file(filename, opts, &local_err);
>>      if (ret < 0) {
>> @@ -1560,28 +1731,38 @@ static int vmdk_create_extent(const char *filename, 
>> int64_t filesize,
>>      }
>>      magic = cpu_to_be32(VMDK4_MAGIC);
>>      memset(&header, 0, sizeof(header));
>> -    header.version = zeroed_grain ? 2 : 1;
>> -    header.flags = VMDK4_FLAG_RGD | VMDK4_FLAG_NL_DETECT
>> -                   | (compress ? VMDK4_FLAG_COMPRESS | VMDK4_FLAG_MARKER : 
>> 0)
>> +    memset(&footer, 0, sizeof(footer));
>> +
>> +    header.version = (compress ? 3 : zeroed_grain ? 2 : 1);
>> +    header.flags = VMDK4_FLAG_NL_DETECT
>> +                   | (compress ? VMDK4_FLAG_COMPRESS | VMDK4_FLAG_MARKER
>> +                               : VMDK4_FLAG_RGD)
>>                     | (zeroed_grain ? VMDK4_FLAG_ZERO_GRAIN : 0);
>>      header.compressAlgorithm = compress ? VMDK4_COMPRESSION_DEFLATE : 0;
>>      header.capacity = filesize / BDRV_SECTOR_SIZE;
>>      header.granularity = 128;
>>      header.num_gtes_per_gt = BDRV_SECTOR_SIZE;
>>
>> -    grains = DIV_ROUND_UP(filesize / BDRV_SECTOR_SIZE, header.granularity);
>> +    grains = DIV_ROUND_UP(header.capacity, header.granularity);
>>      gt_size = DIV_ROUND_UP(header.num_gtes_per_gt * sizeof(uint32_t),
>>                             BDRV_SECTOR_SIZE);
>>      gt_count = DIV_ROUND_UP(grains, header.num_gtes_per_gt);
>>      gd_sectors = DIV_ROUND_UP(gt_count * sizeof(uint32_t), 
>> BDRV_SECTOR_SIZE);
>>
>>      header.desc_offset = 1;
>> -    header.desc_size = 20;
>> -    header.rgd_offset = header.desc_offset + header.desc_size;
>> -    header.gd_offset = header.rgd_offset + gd_sectors + (gt_size * 
>> gt_count);
>> -    header.grain_offset =
>> +    header.desc_size = (compress ? 2 : 20);
>> +    rgd_offset = header.desc_offset + header.desc_size;
>> +    header.rgd_offset = ((header.flags & VMDK4_FLAG_RGD) ? rgd_offset : 0);
>> +    gd_offset = rgd_offset + gd_sectors + (gt_size * gt_count);
>> +    header.gd_offset = (compress ? VMDK4_GD_AT_END : gd_offset);
>> +    grain_offset =
>>          ROUND_UP(header.gd_offset + gd_sectors + (gt_size * gt_count),
>>                   header.granularity);
>> +    /* streamOptimized reserves first 128 sectors */
>> +    header.grain_offset = (compress ? header.granularity : grain_offset);
>> +    /* streamOptimzed's grain directory is at the end */
>> +    gd_offset = header.grain_offset + 1;
>> +
>>      /* swap endianness for all header fields */
>>      header.version = cpu_to_le32(header.version);
>>      header.flags = cpu_to_le32(header.flags);
>> @@ -1618,30 +1799,55 @@ static int vmdk_create_extent(const char *filename, 
>> int64_t filesize,
>>          goto exit;
>>      }
>>
>> -    /* write grain directory */
>> -    gd_buf_size = gd_sectors * BDRV_SECTOR_SIZE;
>> -    gd_buf = g_malloc0(gd_buf_size);
>> -    for (i = 0, tmp = le64_to_cpu(header.rgd_offset) + gd_sectors;
>> -         i < gt_count; i++, tmp += gt_size) {
>> -        gd_buf[i] = cpu_to_le32(tmp);
>> -    }
>> -    ret = bdrv_pwrite(bs, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
>> -                      gd_buf, gd_buf_size);
>> -    if (ret < 0) {
>> -        error_set(errp, QERR_IO_ERROR);
>> -        goto exit;
>> -    }
>> +    if (compress) {
>> +        /* footer marker */
>> +        footer.footer_marker.val = cpu_to_le64(1);
>> +        footer.footer_marker.type = cpu_to_le32(MARKER_FOOTER);
>>
>> -    /* write backup grain directory */
>> -    for (i = 0, tmp = le64_to_cpu(header.gd_offset) + gd_sectors;
>> -         i < gt_count; i++, tmp += gt_size) {
>> -        gd_buf[i] = cpu_to_le32(tmp);
>> -    }
>> -    ret = bdrv_pwrite(bs, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
>> -                      gd_buf, gd_buf_size);
>> -    if (ret < 0) {
>> -        error_set(errp, QERR_IO_ERROR);
>> -        goto exit;
>> +        /* header */
>> +        footer.magic = cpu_to_be32(VMDK4_MAGIC);
>> +        footer.header = header;
>> +
>> +        /* grain directory offset */
>> +        footer.header.gd_offset = cpu_to_le64(gd_offset);
>> +
>> +        /* EOS marker */
>> +        footer.eos_marker.type = cpu_to_le32(MARKER_END_OF_STREAM);
>> +
>> +        ret = vmdk_write_footer(bs, &footer, NULL);
>> +        if (ret < 0) {
>> +            error_set(errp, QERR_IO_ERROR);
>> +            goto exit;
>> +        }
>> +    } else {
>> +        /* write redundant grain directory (if applicable) */
>> +        if (le64_to_cpu(header.rgd_offset)) {
>> +            gd_buf_size = gd_sectors * BDRV_SECTOR_SIZE;
>> +            gd_buf = g_malloc0(gd_buf_size);
>> +            for (i = 0, tmp = le64_to_cpu(header.rgd_offset) + gd_sectors;
>> +                 i < gt_count; i++, tmp += gt_size) {
>> +                gd_buf[i] = cpu_to_le32(tmp);
>> +            }
>> +            ret = bdrv_pwrite(bs, le64_to_cpu(header.rgd_offset) *
>> +                                  BDRV_SECTOR_SIZE,
>> +                              gd_buf, gd_buf_size);
>> +            if (ret < 0) {
>> +                error_set(errp, QERR_IO_ERROR);
>> +                goto exit;
>> +            }
>> +        }
>> +
>> +        /* write grain directory */
>> +        for (i = 0, tmp = le64_to_cpu(header.gd_offset) + gd_sectors;
>> +             i < gt_count; i++, tmp += gt_size) {
>> +            gd_buf[i] = cpu_to_le32(tmp);
>> +        }
>> +        ret = bdrv_pwrite(bs, le64_to_cpu(header.gd_offset) * 
>> BDRV_SECTOR_SIZE,
>> +                          gd_buf, gd_buf_size);
>> +        if (ret < 0) {
>> +            error_set(errp, QERR_IO_ERROR);
>> +            goto exit;
>> +        }
>>      }
>>
>>      ret = 0;
>> @@ -1912,6 +2118,15 @@ exit:
>>  static void vmdk_close(BlockDriverState *bs)
>>  {
>>      BDRVVmdkState *s = bs->opaque;
>> +    VmdkExtent *extent = &s->extents[0];
>> +
>> +    if (extent->compressed) {
>> +        while (extent < &s->extents[s->num_extents]) {
>> +            vmdk_write_grain_table(extent);
>> +            vmdk_write_footer(extent->file, &extent->footer, extent);
>> +            extent++;
>> +        }
>> +    }
>>
>>      vmdk_free_extents(bs);
>>      g_free(s->create_type);
>> --
>> 1.7.1
>>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]