qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH 06/10] spapr_rtas: Add Dynamic DMA windows (


From: Alexey Kardashevskiy
Subject: Re: [Qemu-devel] [RFC PATCH 06/10] spapr_rtas: Add Dynamic DMA windows (DDW) RTAS calls support
Date: Tue, 12 Aug 2014 01:34:49 +1000
User-agent: Mozilla/5.0 (X11; Linux i686 on x86_64; rv:31.0) Gecko/20100101 Thunderbird/31.0

On 08/11/2014 09:51 PM, Alexander Graf wrote:
> 
> On 31.07.14 11:34, Alexey Kardashevskiy wrote:
>> This adds support for Dynamic DMA Windows (DDW) option defined by
>> the SPAPR specification which allows to have additional DMA window(s)
>> which can support page sizes other than 4K.
>>
>> The existing implementation of DDW in the guest tries to create one huge
>> DMA window with 64K or 16MB pages and map the entire guest RAM to. If it
>> succeeds, the guest switches to dma_direct_ops and never calls
>> TCE hypercalls (H_PUT_TCE,...) again. This enables VFIO devices to use
>> the entire RAM and not waste time on map/unmap.
>>
>> This adds 4 RTAS handlers:
>> * ibm,query-pe-dma-window
>> * ibm,create-pe-dma-window
>> * ibm,remove-pe-dma-window
>> * ibm,reset-pe-dma-window
>> These are registered from type_init() callback.
>>
>> These RTAS handlers are implemented in a separate file to avoid polluting
>> spapr_iommu.c with PHB.
>>
>> Signed-off-by: Alexey Kardashevskiy <address@hidden>
>> ---
>>   hw/ppc/Makefile.objs        |   3 +
>>   hw/ppc/spapr_rtas_ddw.c     | 296
>> ++++++++++++++++++++++++++++++++++++++++++++
>>   include/hw/pci-host/spapr.h |  18 +++
>>   include/hw/ppc/spapr.h      |   6 +-
>>   trace-events                |   4 +
>>   5 files changed, 326 insertions(+), 1 deletion(-)
>>   create mode 100644 hw/ppc/spapr_rtas_ddw.c
>>
>> diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
>> index edd44d0..9773294 100644
>> --- a/hw/ppc/Makefile.objs
>> +++ b/hw/ppc/Makefile.objs
>> @@ -7,6 +7,9 @@ obj-$(CONFIG_PSERIES) += spapr_pci.o
>>   ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
>>   obj-y += spapr_pci_vfio.o
>>   endif
>> +ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES), yy)
>> +obj-y += spapr_rtas_ddw.o
>> +endif
>>   # PowerPC 4xx boards
>>   obj-y += ppc405_boards.o ppc4xx_devs.o ppc405_uc.o ppc440_bamboo.o
>>   obj-y += ppc4xx_pci.o
>> diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c
>> new file mode 100644
>> index 0000000..943af2c
>> --- /dev/null
>> +++ b/hw/ppc/spapr_rtas_ddw.c
>> @@ -0,0 +1,296 @@
>> +/*
>> + * QEMU sPAPR Dynamic DMA windows support
>> + *
>> + * Copyright (c) 2014 Alexey Kardashevskiy, IBM Corporation.
>> + *
>> + *  This program is free software; you can redistribute it and/or modify
>> + *  it under the terms of the GNU General Public License as published by
>> + *  the Free Software Foundation; either version 2 of the License,
>> + *  or (at your option) any later version.
>> + *
>> + *  This program is distributed in the hope that it will be useful,
>> + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + *  GNU General Public License for more details.
>> + *
>> + *  You should have received a copy of the GNU General Public License
>> + *  along with this program; if not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include "hw/ppc/spapr.h"
>> +#include "hw/pci-host/spapr.h"
>> +#include "trace.h"
>> +
>> +static inline uint32_t spapr_iommu_fixmask(uint32_t cur_mask,
>> +                                           struct ppc_one_seg_page_size
>> *sps,
>> +                                           uint32_t query_mask,
>> +                                           int shift,
>> +                                           uint32_t add_mask)
>> +{
>> +    if ((sps->page_shift == shift) && (query_mask & add_mask)) {
>> +        cur_mask |= add_mask;
>> +    }
>> +    return cur_mask;
>> +}
>> +
>> +static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu,
>> +                                         sPAPREnvironment *spapr,
>> +                                         uint32_t token, uint32_t nargs,
>> +                                         target_ulong args,
>> +                                         uint32_t nret, target_ulong rets)
>> +{
>> +    CPUPPCState *env = &cpu->env;
>> +    sPAPRPHBState *sphb;
>> +    sPAPRPHBClass *spc;
>> +    uint64_t buid;
>> +    uint32_t addr, pgmask = 0;
>> +    uint32_t windows_available = 0, page_size_mask = 0;
>> +    long ret, i;
>> +
>> +    if ((nargs != 3) || (nret != 5)) {
>> +        goto param_error_exit;
>> +    }
>> +
>> +    buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
>> +    addr = rtas_ld(args, 0);
>> +    sphb = spapr_pci_find_phb(spapr, buid);
>> +    if (!sphb) {
>> +        goto param_error_exit;
>> +    }
>> +
>> +    spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
>> +    if (!spc->ddw_query) {
>> +        goto hw_error_exit;
>> +    }
>> +
>> +    ret = spc->ddw_query(sphb, &windows_available, &page_size_mask);
>> +    trace_spapr_iommu_ddw_query(buid, addr, windows_available,
>> +                                page_size_mask, pgmask, ret);
>> +    if (ret) {
>> +        goto hw_error_exit;
>> +    }
>> +
>> +    /* DBG! */
>> +    if (!(page_size_mask & DDW_PGSIZE_16M)) {
>> +        goto hw_error_exit;
>> +    }
>> +
>> +    /* Work out biggest possible page size */
>> +    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
>> +        int j;
>> +        struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
>> +        const struct { int shift; uint32_t mask; } masks[] = {
>> +            { 12, DDW_PGSIZE_4K },
>> +            { 16, DDW_PGSIZE_64K },
>> +            { 24, DDW_PGSIZE_16M },
>> +            { 25, DDW_PGSIZE_32M },
>> +            { 26, DDW_PGSIZE_64M },
>> +            { 27, DDW_PGSIZE_128M },
>> +            { 28, DDW_PGSIZE_256M },
>> +            { 34, DDW_PGSIZE_16G },
>> +        };
>> +        for (j = 0; j < ARRAY_SIZE(masks); ++j) {
>> +            pgmask = spapr_iommu_fixmask(pgmask, sps, page_size_mask,
>> +                                         masks[j].shift, masks[j].mask);
>> +        }
>> +    }
>> +
>> +    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
>> +    rtas_st(rets, 1, windows_available);
>> +    /* Return maximum number as all RAM was 4K pages */
>> +    rtas_st(rets, 2, ram_size >> SPAPR_TCE_PAGE_SHIFT);
>> +    rtas_st(rets, 3, pgmask);
>> +    rtas_st(rets, 4, pgmask); /* DMA migration mask */
>> +    return;
>> +
>> +hw_error_exit:
>> +    rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
>> +    return;
>> +
>> +param_error_exit:
>> +    rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
>> +}
>> +
>> +static void rtas_ibm_create_pe_dma_window(PowerPCCPU *cpu,
>> +                                          sPAPREnvironment *spapr,
>> +                                          uint32_t token, uint32_t nargs,
>> +                                          target_ulong args,
>> +                                          uint32_t nret, target_ulong rets)
>> +{
>> +    sPAPRPHBState *sphb;
>> +    sPAPRPHBClass *spc;
>> +    sPAPRTCETable *tcet = NULL;
>> +    uint32_t addr, page_shift, window_shift, liobn;
>> +    uint64_t buid;
>> +    long ret;
>> +
>> +    if ((nargs != 5) || (nret != 4)) {
>> +        goto param_error_exit;
>> +    }
>> +
>> +    buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
>> +    addr = rtas_ld(args, 0);
>> +    sphb = spapr_pci_find_phb(spapr, buid);
>> +    if (!sphb) {
>> +        goto param_error_exit;
>> +    }
>> +
>> +    spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
>> +    if (!spc->ddw_create) {
>> +        goto hw_error_exit;
>> +    }
>> +
>> +    page_shift = rtas_ld(args, 3);
>> +    window_shift = rtas_ld(args, 4);
>> +    liobn = sphb->dma_liobn + 0x10000;
> 
> What offset is this?

Some new LIOBN. Can be +1. May be worth defining as a macro.


> 
>> +
>> +    ret = spc->ddw_create(sphb, page_shift, window_shift, liobn, &tcet);
>> +    trace_spapr_iommu_ddw_create(buid, addr, 1 << page_shift,
>> +                                 1 << window_shift,
> 
> 1ULL? Otherwise 16G pages (and windows) won't work.


Right. Thanks. I'll fix. 16_G_ _pages_ are not supported anyway though.



-- 
Alexey



reply via email to

[Prev in Thread] Current Thread [Next in Thread]