qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 04/11] vfio: Support for RamDiscardMgr in the !vIOMMU case


From: David Hildenbrand
Subject: Re: [PATCH v4 04/11] vfio: Support for RamDiscardMgr in the !vIOMMU case
Date: Thu, 14 Jan 2021 16:57:04 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.5.0

On 14.01.21 16:54, David Hildenbrand wrote:
> On 14.01.21 00:27, Alex Williamson wrote:
>> On Thu,  7 Jan 2021 14:34:16 +0100
>> David Hildenbrand <david@redhat.com> wrote:
>>
>>> Implement support for RamDiscardMgr, to prepare for virtio-mem
>>> support. Instead of mapping the whole memory section, we only map
>>> "populated" parts and update the mapping when notified about
>>> discarding/population of memory via the RamDiscardListener. Similarly, when
>>> syncing the dirty bitmaps, sync only the actually mapped (populated) parts
>>> by replaying via the notifier.
>>>
>>> Using virtio-mem with vfio is still blocked via
>>> ram_block_discard_disable()/ram_block_discard_require() after this patch.
>>>
>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>> Cc: Alex Williamson <alex.williamson@redhat.com>
>>> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
>>> Cc: Igor Mammedov <imammedo@redhat.com>
>>> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
>>> Cc: Peter Xu <peterx@redhat.com>
>>> Cc: Auger Eric <eric.auger@redhat.com>
>>> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
>>> Cc: teawater <teawaterz@linux.alibaba.com>
>>> Cc: Marek Kedzierski <mkedzier@redhat.com>
>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>> ---
>>>  hw/vfio/common.c              | 200 ++++++++++++++++++++++++++++++++++
>>>  include/hw/vfio/vfio-common.h |  12 ++
>>>  2 files changed, 212 insertions(+)
>>>
>>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>>> index 6ff1daa763..2bd219cf1d 100644
>>> --- a/hw/vfio/common.c
>>> +++ b/hw/vfio/common.c
>>> @@ -654,6 +654,136 @@ out:
>>>      rcu_read_unlock();
>>>  }
>>>  
>>> +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
>>> +                                            const MemoryRegion *mr,
>>> +                                            ram_addr_t offset, ram_addr_t 
>>> size)
>>> +{
>>> +    VFIORamDiscardListener *vrdl = container_of(rdl, 
>>> VFIORamDiscardListener,
>>> +                                                listener);
>>> +    const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>>> +    const hwaddr mr_end = MIN(offset + size,
>>> +                              vrdl->offset_within_region + vrdl->size);
>>> +    const hwaddr iova = mr_start - vrdl->offset_within_region +
>>> +                        vrdl->offset_within_address_space;
>>> +    int ret;
>>> +
>>> +    if (mr_start >= mr_end) {
>>> +        return;
>>> +    }
>>> +
>>> +    /* Unmap with a single call. */
>>> +    ret = vfio_dma_unmap(vrdl->container, iova, mr_end - mr_start, NULL);
>>> +    if (ret) {
>>> +        error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>>> +                     strerror(-ret));
>>> +    }
>>> +}
>>> +
>>> +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
>>> +                                            const MemoryRegion *mr,
>>> +                                            ram_addr_t offset, ram_addr_t 
>>> size)
>>> +{
>>> +    VFIORamDiscardListener *vrdl = container_of(rdl, 
>>> VFIORamDiscardListener,
>>> +                                                listener);
>>> +    const hwaddr mr_end = MIN(offset + size,
>>> +                              vrdl->offset_within_region + vrdl->size);
>>> +    hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>>> +    hwaddr mr_next, iova;
>>> +    void *vaddr;
>>> +    int ret;
>>> +
>>> +    /*
>>> +     * Map in (aligned within memory region) minimum granularity, so we can
>>> +     * unmap in minimum granularity later.
>>> +     */
>>> +    for (; mr_start < mr_end; mr_start = mr_next) {
>>> +        mr_next = ROUND_UP(mr_start + 1, vrdl->granularity);
>>> +        mr_next = MIN(mr_next, mr_end);
>>> +
>>> +        iova = mr_start - vrdl->offset_within_region +
>>> +               vrdl->offset_within_address_space;
>>> +        vaddr = memory_region_get_ram_ptr(vrdl->mr) + mr_start;
>>> +
>>> +        ret = vfio_dma_map(vrdl->container, iova, mr_next - mr_start,
>>> +                           vaddr, mr->readonly);
>>> +        if (ret) {
>>> +            /* Rollback */
>>> +            vfio_ram_discard_notify_discard(rdl, mr, offset, size);
>>> +            return ret;
>>> +        }
>>> +    }
>>> +    return 0;
>>> +}
>>> +
>>> +static void vfio_ram_discard_notify_discard_all(RamDiscardListener *rdl,
>>> +                                                const MemoryRegion *mr)
>>> +{
>>> +    VFIORamDiscardListener *vrdl = container_of(rdl, 
>>> VFIORamDiscardListener,
>>> +                                                listener);
>>> +    int ret;
>>> +
>>> +    /* Unmap with a single call. */
>>> +    ret = vfio_dma_unmap(vrdl->container, 
>>> vrdl->offset_within_address_space,
>>> +                         vrdl->size, NULL);
>>> +    if (ret) {
>>> +        error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>>> +                     strerror(-ret));
>>> +    }
>>> +}
>>> +
>>> +static void vfio_register_ram_discard_notifier(VFIOContainer *container,
>>> +                                               MemoryRegionSection 
>>> *section)
>>> +{
>>> +    RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
>>> +    RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
>>> +    VFIORamDiscardListener *vrdl;
>>> +
>>> +    vrdl = g_new0(VFIORamDiscardListener, 1);
>>> +    vrdl->container = container;
>>> +    vrdl->mr = section->mr;
>>> +    vrdl->offset_within_region = section->offset_within_region;
>>> +    vrdl->offset_within_address_space = 
>>> section->offset_within_address_space;
>>> +    vrdl->size = int128_get64(section->size);
>>> +    vrdl->granularity = rdmc->get_min_granularity(rdm, section->mr);
>>> +
>>> +    /* Ignore some corner cases not relevant in practice. */
>>> +    g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_region, 
>>> TARGET_PAGE_SIZE));
>>> +    g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_address_space,
>>> +                             TARGET_PAGE_SIZE));
>>> +    g_assert(QEMU_IS_ALIGNED(vrdl->size, TARGET_PAGE_SIZE));
>>
>> Should probably toss in a test of vrdl->granularity vs
>> container->pgsizes too, right?  Looks good otherwise:
> 
> Makes sense as a sanity check. What about
> 
> g_assert(vrdl->granularity && !is_power_of_2(vrdl->granularity));

^ g_assert(vrdl->granularity >= 1 << ctz64(container->pgsizes));

-- 
Thanks,

David / dhildenb




reply via email to

[Prev in Thread] Current Thread [Next in Thread]