qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [Resend RFC PATCH 4/4] VFIO: Read IOMMU fault info from


From: Alex Williamson
Subject: Re: [Qemu-devel] [Resend RFC PATCH 4/4] VFIO: Read IOMMU fault info from kernel space when get fault event
Date: Mon, 20 Feb 2017 14:09:08 -0700

On Mon, 20 Feb 2017 09:28:07 +0800
Lan Tianyu <address@hidden> wrote:

> This patch is to implement fault event handler with new vfio cmd to
> get fault info and notify vIOMMU device model.
> 
> Signed-off-by: Lan Tianyu <address@hidden>
> ---
>  hw/vfio/common.c           | 51 
> ++++++++++++++++++++++++++++++++++++++++++++++
>  linux-headers/linux/vfio.h | 22 ++++++++++++++++++++
>  2 files changed, 73 insertions(+)
> 
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 628b424..4f76e26 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -297,6 +297,57 @@ static bool 
> vfio_listener_skipped_section(MemoryRegionSection *section)
>  
>  static void vfio_iommu_fault(void *opaque)
>  {
> +    VFIOContainer *container = opaque;
> +    struct vfio_iommu_type1_get_fault_info *info;
> +    struct vfio_iommu_fault_info *fault_info;
> +    MemoryRegion *mr = container->space->as->root;
> +    int count = 0, i, ret;
> +    IOMMUFaultInfo tmp;
> +
> +    if (!event_notifier_test_and_clear(&container->fault_notifier)) {
> +        return;
> +    }
> +
> +    info = g_malloc0(sizeof(*info));
> +    if (!info) {
> +        error_report("vfio: Fail to allocate memory");
> +        return;
> +    }
> +
> +    info->argsz = sizeof(*info);
> +
> +    ret = ioctl(container->fd, VFIO_IOMMU_GET_FAULT_INFO, info);
> +    if (ret && ret != -ENOSPC) {
> +        error_report("vfio: Can't get fault info");
> +        goto err_exit;
> +    }
> +
> +    count = info->count;
> +    if (count <= 0) {
> +        goto err_exit;
> +    }
> +
> +    info = g_realloc(info, sizeof(*info) + count * sizeof(*fault_info));
> +    info->argsz = sizeof(*info) + count * sizeof(*fault_info);
> +    fault_info = info->fault_info;
> +
> +    ret = ioctl(container->fd, VFIO_IOMMU_GET_FAULT_INFO, info);
> +    if (ret) {
> +        error_report("vfio: Can't get fault info");
> +        goto err_exit;
> +    }
> +
> +    for (i = 0; i < info->count; i++) {
> +        tmp.addr = fault_info[i].addr;
> +        tmp.sid = fault_info[i].sid;
> +        tmp.fault_reason = fault_info[i].fault_reason;
> +        tmp.is_write = fault_info[i].is_write;
> +
> +        memory_region_iommu_fault_notify(mr, &tmp);
> +    }

Are there service requirements for handling these faults?  Can the
device wait indefinitely?  Can userspace handling of such faults meet
the device service requirements?  Is userspace handling sufficient for
the device's performance needs?  Do we get one eventfd per fault entry?
How do we know if the faults have overflowed?  Would an overflow be
fatal or would there be a retry mechanism?

> +
> +err_exit:
> +    g_free(info);
>  }
>  
>  static int vfio_set_iommu_fault_notifier(struct VFIOContainer *container)
> diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
> index ca890ee..8b172f5 100644
> --- a/linux-headers/linux/vfio.h
> +++ b/linux-headers/linux/vfio.h
> @@ -550,6 +550,28 @@ struct vfio_iommu_type1_set_fault_eventfd {
>  
>  #define VFIO_IOMMU_SET_FAULT_EVENTFD _IO(VFIO_TYPE, VFIO_BASE + 17)
>  
> +/*
> + * VFIO_IOMMU_GET_FAULT_INFO         _IO(VFIO_TYPE, VFIO_BASE + 18)
> + *
> + * Return IOMMU fault info to userspace.
> + */
> +
> +struct vfio_iommu_fault_info {
> +     __u64   addr;
> +     __u16   sid;
> +     __u8    fault_reason;
> +     __u8    is_write:1;
> +};
> +
> +struct vfio_iommu_type1_get_fault_info {
> +     __u32   argsz;
> +     __u32   flags;
> +     __u32   count;
> +     struct vfio_iommu_fault_info fault_info[];
> +};
> +
> +#define VFIO_IOMMU_GET_FAULT_INFO    _IO(VFIO_TYPE, VFIO_BASE + 18)
> +
>  /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
>  
>  /*




reply via email to

[Prev in Thread] Current Thread [Next in Thread]