qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 20/40] pci: use memory core for iommu support


From: Peter Maydell
Subject: Re: [Qemu-devel] [PATCH 20/40] pci: use memory core for iommu support
Date: Tue, 7 May 2013 19:30:07 +0100

On 7 May 2013 15:16, Paolo Bonzini <address@hidden> wrote:
> From: Avi Kivity <address@hidden>
>
> Use the new iommu support in the memory core for iommu support.  The only
> user, spapr, is also converted, but it still provides a DMAContext
> interface until the non-PCI bits switch to AddressSpace.
>
> Cc: Michael S. Tsirkin <address@hidden>
> Signed-off-by: Avi Kivity <address@hidden>
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
>  hw/pci/pci.c             |   53 
> ++++++++++++++++++++++++++--------------------
>  hw/ppc/spapr_pci.c       |   12 +++++++---
>  include/hw/pci/pci.h     |    7 ++++-
>  include/hw/pci/pci_bus.h |    5 ++-
>  4 files changed, 46 insertions(+), 31 deletions(-)
>
> diff --git a/hw/pci/pci.c b/hw/pci/pci.c
> index 16ed118..3eb397c 100644
> --- a/hw/pci/pci.c
> +++ b/hw/pci/pci.c
> @@ -279,6 +279,16 @@ int pci_find_domain(const PCIBus *bus)
>      return -1;
>  }
>
> +static MemoryRegion *pci_default_iommu(PCIBus *bus, void *opaque, int devfn)
> +{
> +    /* FIXME: inherit memory region from bus creator */
> +    return get_system_memory();
> +}

This seems a bit misnamed since it doesn't actually need to
return an iommu MemoryRegion (and in fact in most cases it
won't). What it's actually returning is "this is the memory
region representing the view for bus master DMA devices to
DMA into", I think.

Also, technically speaking get_system_memory() is never the
right answer, though in practice it's good enough for our
purposes. (returning get_system_memory() would allow a bus
master DMA device to access back into the PCI bus by
DMAing to the address in system memory where the PCI host
controller is mapped, which I'm guessing is not possible
on real hardware.)

As you kind of imply with the FIXME comment here, I think
what we probably actually eventually want is for pci_bus_init()
and friends to have a parameter for the DMA MemoryRegion
(but what this patch does is fine for now).

Once these patches go in I could use this to do the
versatile_pci SMAP registers, though that's more for
completeness than anything else.

> +
> +static void pci_default_iommu_dtor(MemoryRegion *mr)
> +{
> +}
> +
>  static void pci_bus_init(PCIBus *bus, DeviceState *parent,
>                           const char *name,
>                           MemoryRegion *address_space_mem,
> @@ -289,6 +299,7 @@ static void pci_bus_init(PCIBus *bus, DeviceState *parent,
>      bus->devfn_min = devfn_min;
>      bus->address_space_mem = address_space_mem;
>      bus->address_space_io = address_space_io;
> +    pci_setup_iommu(bus, pci_default_iommu, NULL, NULL);
>
>      /* host bridge */
>      QLIST_INIT(&bus->child);
> @@ -801,21 +812,15 @@ static PCIDevice *do_pci_register_device(PCIDevice 
> *pci_dev, PCIBus *bus,
>                       PCI_SLOT(devfn), PCI_FUNC(devfn), name, 
> bus->devices[devfn]->name);
>          return NULL;
>      }
> +
>      pci_dev->bus = bus;
> -    if (bus->dma_context_fn) {
> -        pci_dev->dma = bus->dma_context_fn(bus, bus->dma_context_opaque, 
> devfn);
> -    } else {
> -        /* FIXME: Make dma_context_fn use MemoryRegions instead, so this 
> path is
> -         * taken unconditionally */
> -        /* FIXME: inherit memory region from bus creator */
> -        memory_region_init_alias(&pci_dev->bus_master_enable_region, "bus 
> master",
> -                                 get_system_memory(), 0,
> -                                 memory_region_size(get_system_memory()));
> -        memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
> -        address_space_init(&pci_dev->bus_master_as, 
> &pci_dev->bus_master_enable_region);
> -        pci_dev->dma = g_new(DMAContext, 1);
> -        dma_context_init(pci_dev->dma, &pci_dev->bus_master_as);
> -    }
> +    pci_dev->iommu = bus->iommu_fn(bus, bus->iommu_opaque, devfn);
> +    memory_region_init_alias(&pci_dev->bus_master_enable_region, "bus 
> master",
> +                             pci_dev->iommu, 0, 
> memory_region_size(pci_dev->iommu));
> +    memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
> +    address_space_init(&pci_dev->bus_master_as, 
> &pci_dev->bus_master_enable_region);
> +    pci_dev->dma = g_new(DMAContext, 1);
> +    dma_context_init(pci_dev->dma, &pci_dev->bus_master_as);
>
>      pci_dev->devfn = devfn;
>      pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
> @@ -870,12 +875,12 @@ static void do_pci_unregister_device(PCIDevice *pci_dev)
>      pci_dev->bus->devices[pci_dev->devfn] = NULL;
>      pci_config_free(pci_dev);
>
> -    if (!pci_dev->bus->dma_context_fn) {
> -        address_space_destroy(&pci_dev->bus_master_as);
> -        memory_region_destroy(&pci_dev->bus_master_enable_region);
> -        g_free(pci_dev->dma);
> -        pci_dev->dma = NULL;
> -    }
> +    address_space_destroy(&pci_dev->bus_master_as);
> +    memory_region_del_subregion(&pci_dev->bus_master_enable_region, 
> pci_dev->iommu);
> +    pci_dev->bus->iommu_dtor_fn(pci_dev->iommu);
> +    memory_region_destroy(&pci_dev->bus_master_enable_region);
> +    g_free(pci_dev->dma);
> +    pci_dev->dma = NULL;
>  }
>
>  static void pci_unregister_io_regions(PCIDevice *pci_dev)
> @@ -2234,10 +2239,12 @@ static void pci_device_class_init(ObjectClass *klass, 
> void *data)
>      k->props = pci_props;
>  }
>
> -void pci_setup_iommu(PCIBus *bus, PCIDMAContextFunc fn, void *opaque)
> +void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, PCIIOMMUDestructorFunc 
> dtor,
> +                     void *opaque)
>  {
> -    bus->dma_context_fn = fn;
> -    bus->dma_context_opaque = opaque;
> +    bus->iommu_fn = fn;
> +    bus->iommu_dtor_fn = dtor ? dtor : pci_default_iommu_dtor;
> +    bus->iommu_opaque = opaque;
>  }
>
>  static const TypeInfo pci_device_type_info = {
> diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
> index eb64a8f..ffbb45e 100644
> --- a/hw/ppc/spapr_pci.c
> +++ b/hw/ppc/spapr_pci.c
> @@ -506,12 +506,11 @@ static const MemoryRegionOps spapr_msi_ops = {
>  /*
>   * PHB PCI device
>   */
> -static DMAContext *spapr_pci_dma_context_fn(PCIBus *bus, void *opaque,
> -                                            int devfn)
> +static MemoryRegion *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int 
> devfn)
>  {
>      sPAPRPHBState *phb = opaque;
>
> -    return spapr_tce_get_dma(phb->tcet);
> +    return spapr_tce_get_iommu(phb->tcet);
>  }
>
>  static int spapr_phb_init(SysBusDevice *s)
> @@ -651,7 +655,7 @@ static int spapr_phb_init(SysBusDevice *s)
>          fprintf(stderr, "Unable to create TCE table for %s\n", 
> sphb->dtbusname);
>          return -1;
>      }
> -    pci_setup_iommu(bus, spapr_pci_dma_context_fn, sphb);
> +    pci_setup_iommu(bus, spapr_pci_dma_iommu, NULL, sphb);
>
>      QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
>
> diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
> index 8d075ab..7e7b0f4 100644
> --- a/include/hw/pci/pci.h
> +++ b/include/hw/pci/pci.h
> @@ -242,6 +242,7 @@ struct PCIDevice {
>      PCIIORegion io_regions[PCI_NUM_REGIONS];
>      AddressSpace bus_master_as;
>      MemoryRegion bus_master_enable_region;
> +    MemoryRegion *iommu;
>      DMAContext *dma;
>
>      /* do not access the following fields */
> @@ -401,9 +402,11 @@ int pci_read_devaddr(Monitor *mon, const char *addr, int 
> *domp, int *busp,
>
>  void pci_device_deassert_intx(PCIDevice *dev);
>
> -typedef DMAContext *(*PCIDMAContextFunc)(PCIBus *, void *, int);
> +typedef MemoryRegion *(*PCIIOMMUFunc)(PCIBus *, void *, int);
> +typedef void (*PCIIOMMUDestructorFunc)(MemoryRegion *mr);
>
> -void pci_setup_iommu(PCIBus *bus, PCIDMAContextFunc fn, void *opaque);
> +void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, PCIIOMMUDestructorFunc 
> dtor,
> +                     void *opaque);

I'm not entirely convinced by this API. Some doc comments
might help...

thanks
-- PMM



reply via email to

[Prev in Thread] Current Thread [Next in Thread]