qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [V4 1/4] hw/i386: Introduce AMD IO MMU


From: David kiarie
Subject: Re: [Qemu-devel] [V4 1/4] hw/i386: Introduce AMD IO MMU
Date: Mon, 15 Feb 2016 06:54:56 +0300

On Mon, Feb 15, 2016 at 6:41 AM, David kiarie <address@hidden> wrote:
> On Thu, Feb 4, 2016 at 6:03 PM, Michael S. Tsirkin <address@hidden> wrote:
>> On Mon, Jan 18, 2016 at 06:25:42PM +0300, David Kiarie wrote:
>>> Add AMD IO MMU emulation to Qemu in addition to Intel IO MMU.
>>> The IO MMU does basic translation, error checking and has a
>>> minimal IOTLB implementation.
>>>
>>> Signed-off-by: David Kiarie <address@hidden>
>>> ---
>>>  hw/i386/Makefile.objs |    1 +
>>>  hw/i386/amd_iommu.c   | 1409 
>>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>>  hw/i386/amd_iommu.h   |  399 ++++++++++++++
>>>  include/hw/pci/pci.h  |    2 +
>>>  4 files changed, 1811 insertions(+)
>>>  create mode 100644 hw/i386/amd_iommu.c
>>>  create mode 100644 hw/i386/amd_iommu.h
>>>
>>> diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
>>> index b52d5b8..2f1a265 100644
>>> --- a/hw/i386/Makefile.objs
>>> +++ b/hw/i386/Makefile.objs
>>> @@ -3,6 +3,7 @@ obj-y += multiboot.o
>>>  obj-y += pc.o pc_piix.o pc_q35.o
>>>  obj-y += pc_sysfw.o
>>>  obj-y += intel_iommu.o
>>> +obj-y += amd_iommu.o
>>>  obj-$(CONFIG_XEN) += ../xenpv/ xen/
>>>
>>>  obj-y += kvmvapic.o
>>> diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
>>> new file mode 100644
>>> index 0000000..20111fe
>>> --- /dev/null
>>> +++ b/hw/i386/amd_iommu.c
>>> @@ -0,0 +1,1409 @@
>>> +/*
>>> + * QEMU emulation of AMD IOMMU (AMD-Vi)
>>> + *
>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>> + * Copyright (C) 2015 David Kiarie, <address@hidden>
>>> + *
>>> + * This program is free software; you can redistribute it and/or modify
>>> + * it under the terms of the GNU General Public License as published by
>>> + * the Free Software Foundation; either version 2 of the License, or
>>> + * (at your option) any later version.
>>> +
>>> + * This program is distributed in the hope that it will be useful,
>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>> + * GNU General Public License for more details.
>>> +
>>> + * You should have received a copy of the GNU General Public License along
>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>> + *
>>> + * Cache implementation inspired by hw/i386/intel_iommu.c
>>
>> Link to hardware spec?
>>
>>> + *
>>> + */
>>> +#include "hw/i386/amd_iommu.h"
>>> +
>>> +//#define DEBUG_AMD_IOMMU
>>> +#ifdef DEBUG_AMD_IOMMU
>>> +enum {
>>> +    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
>>> +    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
>>> +};
>>> +
>>> +#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
>>> +static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
>>> +
>>> +#define IOMMU_DPRINTF(what, fmt, ...) do { \
>>> +    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
>>> +        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
>>> +                ## __VA_ARGS__); } \
>>> +    } while (0)
>>> +#else
>>> +#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
>>> +#endif
>>> +
>>> +/* configure MMIO registers at startup/reset */
>>> +static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr, uint64_t val,
>>> +                               uint64_t romask, uint64_t w1cmask)
>>> +{
>>> +    stq_le_p(&s->mmior[addr], val);
>>> +    stq_le_p(&s->romask[addr], romask);
>>> +    stq_le_p(&s->w1cmask[addr], w1cmask);
>>> +}
>>> +
>>> +static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
>>> +{
>>> +    return lduw_le_p(&s->mmior[addr]);
>>> +}
>>> +
>>> +static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
>>> +{
>>> +    return ldl_le_p(&s->mmior[addr]);
>>> +}
>>> +
>>> +static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
>>> +{
>>> +    return ldq_le_p(&s->mmior[addr]);
>>> +}
>>> +
>>> +/* internal write */
>>> +static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val, hwaddr 
>>> addr)
>>> +{
>>> +    stq_le_p(&s->mmior[addr], val);
>>> +}
>>> +
>>> +/* external write */
>>> +static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
>>> +{
>>> +    uint16_t romask = lduw_le_p(&s->romask[addr]);
>>> +    uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
>>> +    uint16_t oldval = lduw_le_p(&s->mmior[addr]);
>>> +    stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & 
>>> oldval));
>>> +}
>>> +
>>> +static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
>>> +{
>>> +    uint32_t romask = ldl_le_p(&s->romask[addr]);
>>> +    uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
>>> +    uint32_t oldval = ldl_le_p(&s->mmior[addr]);
>>> +    stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & 
>>> oldval));
>>> +}
>>> +
>>> +static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
>>> +{
>>> +    uint64_t romask = ldq_le_p(&s->romask[addr]);
>>> +    uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
>>> +    uint32_t oldval = ldq_le_p(&s->mmior[addr]);
>>> +    stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask & 
>>> oldval));
>>> +}
>>> +
>>> +static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
>>> +{
>>> +    /* event logging not enabled */
>>> +    if (!s->evtlog_enabled || *(uint64_t *)&s->mmior[MMIO_STATUS]
>>> +       | MMIO_STATUS_EVT_OVF) {
>>
>> Pls always put |,+ etc as last character on line, not first one.
>>
>>> +        return;
>>> +    }
>>> +
>>> +    /* event log buffer full */
>>> +    if (s->evtlog_tail >= s->evtlog_len) {
>>> +        *(uint64_t *)&s->mmior[MMIO_STATUS] |= MMIO_STATUS_EVT_OVF;
>>> +        /* generate interrupt */
>>> +    }
>>> +
>>> +    if (dma_memory_write(&address_space_memory, s->evtlog_len + 
>>> s->evtlog_tail,
>>> +       &evt, EVENT_LEN)) {
>>> +        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
>>> +                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
>>> +    }
>>> +
>>> +     s->evtlog_tail += EVENT_LEN;
>>> +     *(uint64_t *)&s->mmior[MMIO_STATUS] |= MMIO_STATUS_COMP_INT;
>>
>> I did not look at spec yet - how does guest know log has been written?
>> dma_memory_write might not be atomic - could this be a problem?
>>
>>> +}
>>> +
>>> +/* log an error encountered page-walking
>>> + *
>>> + * @addr: virtual address in translation request
>>> + */
>>> +static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
>>> +                                 dma_addr_t addr, uint8_t info)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +    uint8_t status;
>>> +
>>> +    info |= EVENT_IOPF_I;
>>> +
>>> +    /* encode information */
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint16_t *)&evt[3] = info;
>>> +    *(uint64_t *)&evt[4] = cpu_to_le64(addr);
>>
>> Endian-ness seems wrong?
>>
>>> +
>>> +    /* log a page fault */
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    status = pci_get_word(s->dev.config + PCI_STATUS);
>>> +    pci_set_word(s->dev.config + PCI_STATUS,
>>> +                 status | PCI_STATUS_SIG_TARGET_ABORT);
>>
>> Use pci_word_test_and_set_mask for this.
>>
>>> +}
>>> +/*
>>> + * log a master abort accessing device table
>>> + *  @devtab : address of device table entry
>>> + *  @info : error flags
>>> + */
>>> +static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
>>> +                                       dma_addr_t devtab, uint8_t info)
>>> +{
>>> +
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +    uint8_t status;
>>> +
>>> +    info |= EVENT_DEV_TAB_HW_ERROR;
>>> +
>>> +    /* encode information */
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint8_t *)&evt[3]  = info;
>>> +    *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    status = pci_get_word(s->dev.config + PCI_STATUS);
>>> +    pci_set_word(s->dev.config + PCI_STATUS,
>>> +                 status | PCI_STATUS_SIG_TARGET_ABORT);
>>> +}
>>> +
>>> +/* log a master abort encountered during a page-walk
>>> + *  @addr : address that couldn't be accessed
>>> + */
>>> +static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
>>> +                                        dma_addr_t addr, uint16_t info)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +    uint8_t status;
>>> +
>>> +    info |= EVENT_PAGE_TAB_HW_ERROR;
>>> +
>>> +    /* encode information */
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint8_t *)&evt[3]  = info;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    status = pci_get_word(s->dev.config + PCI_STATUS);
>>> +    pci_set_word(s->dev.config + PCI_STATUS,
>>> +                status | PCI_STATUS_SIG_TARGET_ABORT);
>>> +
>>> +}
>>> +
>>> +/* log an event trying to access command buffer
>>> + *   @addr : address that couldn't be accessed
>>> + */
>>> +static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t addr)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    /* encode information */
>>> +    *(uint8_t *)&evt[3]  = (uint8_t)EVENT_COMMAND_HW_ERROR;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +
>>> +    /* Abort the translation */
>>> +    uint8_t status = pci_get_word(s->dev.config + PCI_STATUS);
>>> +    pci_set_word(s->dev.config + PCI_STATUS,
>>> +                 status | PCI_STATUS_SIG_TARGET_ABORT);
>>> +}
>>> +
>>> +/* log an illegal comand event
>>> + *   @addr : address of illegal command
>>> + */
>>> +static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint8_t info,
>>> +                                           dma_addr_t addr)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    /* encode information */
>>> +    *(uint8_t *)&evt[3]  = (uint8_t)EVENT_ILLEGAL_COMMAND_ERROR;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +}
>>> +
>>> +/* log an error accessing device table
>>> + *
>>> + *  @devid : device owning the table entry
>>> + *  @devtab : address of device table entry
>>> + *  @info : error flags
>>> + */
>>> +static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t 
>>> devid,
>>> +                                              dma_addr_t addr, uint16_t 
>>> info)
>>> +{
>>> +    IOMMU_DPRINTF(ELOG, "");
>>> +
>>> +    uint16_t evt[8];
>>> +
>>> +    info |= EVENT_ILLEGAL_DEVTAB_ENTRY;
>>> +
>>> +    *(uint16_t *)&evt[0] = devid;
>>> +    *(uint8_t *)&evt[3]  = info;
>>> +    *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
>>> +
>>> +    amd_iommu_log_event(s, evt);
>>> +}
>>> +
>>> +static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer v2)
>>> +{
>>> +    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
>>> +}
>>> +
>>> +static guint amd_iommu_uint64_hash(gconstpointer v)
>>> +{
>>> +    return (guint)*(const uint64_t *)v;
>>> +}
>>> +
>>> +static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr 
>>> addr,
>>> +                                               uint64_t devid)
>>> +{
>>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>> +    return g_hash_table_lookup(s->iotlb, &key);
>>> +}
>>> +
>>> +static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
>>> +{
>>> +    assert(s->iotlb);
>>> +    g_hash_table_remove_all(s->iotlb);
>>> +}
>>> +
>>> +static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer 
>>> value,
>>> +                                                gpointer user_data)
>>> +{
>>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>>> +    uint16_t devid = *(uint16_t *)user_data;
>>> +    return entry->devid == devid;
>>> +}
>>> +
>>> +static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
>>> +                                        uint64_t devid)
>>> +{
>>> +    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
>>> +                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>> +    g_hash_table_remove(s->iotlb, &key);
>>> +}
>>> +
>>> +/* extract device id */
>>> +static inline uint16_t devid_extract(uint8_t *cmd)
>>> +{
>>> +    return (uint16_t)cmd[2] & INVAL_DEV_ID_MASK;
>>> +}
>>> +
>>> +static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
>>> +{
>>> +    uint16_t devid = devid_extract((uint8_t *)cmd);
>>> +    /* if invalidation of more than one page requested */
>>> +    if (INVAL_ALL(cmd[0])) {
>>> +        g_hash_table_foreach_remove(s->iotlb, 
>>> amd_iommu_iotlb_remove_by_devid,
>>> +                                    &devid);
>>> +    } else {
>>> +        hwaddr addr = (hwaddr)(cmd[1] & INVAL_ADDR_MASK);
>>> +        amd_iommu_iotlb_remove_page(s, addr, devid);
>>> +    }
>>> +}
>>> +
>>> +static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
>>> +                                   uint64_t gpa, uint64_t spa, uint64_t 
>>> perms,
>>> +                                   uint16_t domid)
>>> +{
>>> +    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
>>> +    uint64_t *key = g_malloc(sizeof(key));
>>> +    uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
>>> +
>>> +    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
>>> +                  " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
>>> +                  PCI_FUNC(devid), gpa, spa);
>>> +
>>> +    if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
>>> +        IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
>>> +        amd_iommu_iotlb_reset(s);
>>> +    }
>>> +
>>> +    entry->gfn = gfn;
>>> +    entry->domid = domid;
>>> +    entry->perms = perms;
>>> +    entry->translated_addr = spa;
>>> +    *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
>>> +    g_hash_table_replace(s->iotlb, key, entry);
>>> +}
>>> +
>>> +/* execute a completion wait command */
>>> +static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +    unsigned int addr;
>>> +
>>> +    /* completion store */
>>> +    if (cmd[0] & COM_COMPLETION_STORE_MASK) {
>>> +        addr = le64_to_cpu(*(uint64_t *)cmd) & COM_STORE_ADDRESS_MASK;
>>> +        if (dma_memory_write(&address_space_memory, addr, cmd + 8, 8)) {
>>> +            IOMMU_DPRINTF(ELOG, "error: fail to write at address 
>>> 0%x"PRIx64,
>>> +                          addr);
>>> +        }
>>> +    }
>>> +
>>> +    /* set completion interrupt */
>>> +    if (cmd[0] & COM_COMPLETION_INTR) {
>>> +        s->mmior[MMIO_STATUS] |= MMIO_STATUS_COMP_INT;
>>> +    }
>>> +}
>>> +
>>> +/* get command type */
>>> +static uint8_t opcode(uint8_t *cmd)
>>> +{
>>> +    return cmd[CMDBUF_ID_BYTE] >> CMDBUF_ID_RSHIFT;
>>> +}
>>> +
>>> +/* linux seems to be using reserved bits so I just log without abortig bug 
>>> */
>>> +static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
>>> +                                     uint8_t type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    /* This command should invalidate internal caches of which there isn't 
>>> */
>>> +    if (*(uint64_t *)&cmd[0] & CMD_INVAL_DEV_RSVD || *(uint64_t *)&cmd[2]) 
>>> {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +    }
>>> +#ifdef DEBUG_AMD_IOMMU
>>> +    uint16_t devid = devid_extract(cmd);
>>> +#endif
>>> +    IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
>>> +                  "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
>>> +                  PCI_FUNC(devid));
>>> +}
>>> +
>>> +static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd, uint8_t 
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if (*(uint32_t *)&cmd[1] & COMPLETION_WAIT_RSVD) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +    }
>>> +    /* pretend to wait for command execution to complete */
>>> +    IOMMU_DPRINTF(COMMAND, "completion wait requested with store address 
>>> 0x%"
>>> +                  PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
>>> +                  COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd + 8));
>>> +    amd_iommu_completion_wait(s, cmd);
>>> +}
>>> +
>>> +static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd, uint8_t 
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & COMPLETE_PPR_RQ_RSVD)
>>> +       || *(uint64_t *)&cmd[3] & 0xffff000000000000) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
>>> +}
>>> +
>>> +static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & INVAL_IOMMU_ALL_RSVD)
>>> +       || *(uint64_t *)&cmd[2]) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    amd_iommu_iotlb_reset(s);
>>> +    IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache requested");
>>> +}
>>> +
>>> +static inline uint16_t domid_extract(uint64_t *cmd)
>>> +{
>>> +    return (uint16_t)cmd[0] & INVAL_PAGES_DOMID;
>>> +}
>>> +
>>> +static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key, gpointer 
>>> value,
>>> +                                                gpointer user_data)
>>> +{
>>> +    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
>>> +    uint16_t domid = *(uint16_t *)user_data;
>>> +    return entry->domid == domid;
>>> +}
>>> +
>>> +/* we don't have devid - we can't remove pages by address */
>>> +static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +    uint16_t domid = domid_extract((uint64_t *)cmd);
>>> +
>>> +    if (*(uint64_t *)&cmd[0] & INVAL_IOMMU_PAGES_RSVD
>>> +        || *(uint32_t *)&cmd[2] & 0x00000ff0) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    g_hash_table_foreach_remove(s->iotlb, amd_iommu_iotlb_remove_by_domid,
>>> +                                &domid);
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16 
>>> "invalidated",
>>> +                  domid);
>>> +}
>>> +
>>> +static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd, uint8_t 
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & PRF_IOMMU_PAGES_RSVD)
>>> +        || (*(uint32_t *)&cmd[3] & 0x00000fd4)) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
>>> +}
>>> +
>>> +static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd, uint8_t 
>>> type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if ((*(uint64_t *)&cmd[0] & INVAL_INTR_TABLE_RSVD)
>>> +        || *(uint64_t *)&cmd[2]) {
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +        return;
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
>>> +}
>>> +
>>> +static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd, uint8_t type)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    if (*(uint32_t *)&cmd[2] & INVAL_IOTLB_PAGES_RSVD) {
>>
>> Again endian-ness seems wrong.
>>
>>> +        amd_iommu_log_illegalcom_error(s, type, s->cmdbuf + 
>>> s->cmdbuf_head);
>>> +        return;
>>> +    }
>>> +
>>> +    amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
>>> +    IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
>>> +}
>>> +
>>> +/* not honouring reserved bits is regarded as an illegal command */
>>> +static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint8_t type;
>>> +    uint8_t cmd[IOMMU_COMMAND_SIZE];
>>> +
>>> +    memset(cmd, 0, IOMMU_COMMAND_SIZE);
>>> +
>>> +    if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, 
>>> cmd,
>>> +       IOMMU_COMMAND_SIZE)) {
>>> +        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at 0x%"PRIx64
>>> +                      " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
>>> +        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
>>> +        return;
>>> +    }
>>> +
>>> +    type = opcode(cmd);
>>> +
>>> +    switch (type) {
>>> +    case CMD_COMPLETION_WAIT:
>>> +        iommu_completion_wait(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_INVAL_DEVTAB_ENTRY:
>>> +        iommu_inval_devtab_entry(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_INVAL_IOMMU_PAGES:
>>> +        iommu_inval_pages(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_INVAL_IOTLB_PAGES:
>>> +        iommu_inval_iotlb(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_INVAL_INTR_TABLE:
>>> +        iommu_inval_inttable(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_PREFETCH_IOMMU_PAGES:
>>> +        iommu_prefetch_pages(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_COMPLETE_PPR_REQUEST:
>>> +        iommu_complete_ppr(s, cmd, type);
>>> +        break;
>>> +
>>> +    case CMD_INVAL_IOMMU_ALL:
>>> +        iommu_inval_all(s, cmd, type);
>>> +        break;
>>> +
>>> +    default:
>>> +        IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
>>> +        /* log illegal command */
>>> +        amd_iommu_log_illegalcom_error(s, type,
>>> +                                       s->cmdbuf + s->cmdbuf_head);
>>> +        break;
>>> +    }
>>> +
>>> +}
>>> +
>>> +static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t *mmio_cmdbuf_head = (uint64_t *)s->mmior + MMIO_COMMAND_HEAD;
>>
>> Is MMIO_COMMAND_HEAD really in units of 8 bytes?
>> Please add extra () to make it clear what is going on here.
>>
>>> +
>>> +    if (!s->cmdbuf_enabled) {
>>> +        IOMMU_DPRINTF(COMMAND, "error: IO MMU trying to execute commands 
>>> with "
>>> +                      "command buffer disabled. IO MMU control value 
>>> 0x%"PRIx64,
>>> +                      amd_iommu_readq(s, MMIO_CONTROL));
>>> +        return;
>>> +    }
>>> +
>>> +    while (s->cmdbuf_head != s->cmdbuf_tail) {
>>> +        /* check if there is work to do. */
>>> +        IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 " 
>>> command "
>>> +                      "buffer tail at 0x%"PRIx32" command buffer base at 
>>> 0x%"
>>> +                      PRIx64, s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
>>> +         amd_iommu_cmdbuf_exec(s);
>>> +         s->cmdbuf_head += IOMMU_COMMAND_SIZE;
>>> +         amd_iommu_writeq_raw(s, s->cmdbuf_head, MMIO_COMMAND_HEAD);
>>> +
>>> +        /* wrap head pointer */
>>> +        if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
>>> +            s->cmdbuf_head = 0;
>>> +        }
>>> +    }
>>> +
>>> +    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
>>> +}
>>> +
>>> +/* System Software might never read from some of this fields but anyways */
>>> +static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned 
>>> size)
>>> +{
>>> +    AMDIOMMUState *s = opaque;
>>> +
>>> +    uint64_t val = -1;
>>> +    if (addr + size > MMIO_SIZE) {
>>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
>>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)MMIO_SIZE, addr, 
>>> size);
>>> +        return (uint64_t)-1;
>>> +    }
>>> +
>>> +    if (size == 2) {
>>> +        val = amd_iommu_readw(s, addr);
>>> +    } else if (size == 4) {
>>> +        val = amd_iommu_readl(s, addr);
>>> +    } else if (size == 8) {
>>> +        val = amd_iommu_readq(s, addr);
>>> +    }
>>> +
>>> +    switch (addr & ~0x07) {
>>> +    case MMIO_DEVICE_TABLE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                       addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_COMMAND_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_EVENT_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_CONTROL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                       addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_EXCL_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_EXCL_LIMIT:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_COMMAND_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_COMMAND_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_EVENT_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_EVENT_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_STATUS:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                      addr & ~0x07);
>>> +        break;
>>> +
>>> +    case MMIO_EXT_FEATURES:
>>> +        IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
>>> +                      addr, size, addr & ~0x07, val);
>>> +        break;
>>> +
>>> +    default:
>>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
>>> +                      ", size %d offset 0x%"PRIx64, addr, size,
>>> +                       addr & ~0x07);
>>> +    }
>>> +    return val;
>>> +}
>>> +
>>> +static void iommu_handle_control_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +    /*
>>> +     * read whatever is already written in case
>>> +     * software is writing in chucks less than 8 bytes
>>> +     */
>>> +    unsigned long control = amd_iommu_readq(s, MMIO_CONTROL);
>>> +    s->enabled = !!(control & MMIO_CONTROL_IOMMUEN);
>>> +
>>> +    s->ats_enabled = !!(control & MMIO_CONTROL_HTTUNEN);
>>> +    s->evtlog_enabled = s->enabled && !!(control & 
>>> MMIO_CONTROL_EVENTLOGEN);
>>> +
>>> +    s->evtlog_intr = !!(control & MMIO_CONTROL_EVENTINTEN);
>>> +    s->completion_wait_intr = !!(control & MMIO_CONTROL_COMWAITINTEN);
>>> +    s->cmdbuf_enabled = s->enabled && !!(control & MMIO_CONTROL_CMDBUFLEN);
>>> +
>>> +    /* update the flags depending on the control register */
>>> +    if (s->cmdbuf_enabled) {
>>> +        (*(uint64_t *)&s->mmior[MMIO_STATUS]) |= MMIO_STATUS_CMDBUF_RUN;
>>> +    } else {
>>> +        (*(uint64_t *)&s->mmior[MMIO_STATUS]) &= ~MMIO_STATUS_CMDBUF_RUN;
>>> +    }
>>> +    if (s->evtlog_enabled) {
>>> +        (*(uint64_t *)&s->mmior[MMIO_STATUS]) |= MMIO_STATUS_EVT_RUN;
>>> +    } else {
>>> +        (*(uint64_t *)&s->mmior[MMIO_STATUS]) &= ~MMIO_STATUS_EVT_RUN;
>>> +    }
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
>>> +
>>> +    amd_iommu_cmdbuf_run(s);
>>> +}
>>> +
>>> +static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
>>> +
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_DEVICE_TABLE);
>>> +    s->devtab = (dma_addr_t)(val & MMIO_DEVTAB_BASE_MASK);
>>> +
>>> +    /* set device table length */
>>> +    s->devtab_len = ((val & MMIO_DEVTAB_SIZE_MASK) + 1 *
>>> +                    (MMIO_DEVTAB_SIZE_UNIT / MMIO_DEVTAB_ENTRY_SIZE));
>>> +}
>>> +
>>> +static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s, MMIO_COMMAND_HEAD)
>>> +                     & MMIO_CMDBUF_HEAD_MASK;
>>> +    amd_iommu_cmdbuf_run(s);
>>> +}
>>> +
>>> +static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, MMIO_COMMAND_BASE)
>>> +                & MMIO_CMDBUF_BASE_MASK;
>>> +    s->cmdbuf_len = 1UL << (s->mmior[MMIO_CMDBUF_SIZE_BYTE]
>>> +                    & MMIO_CMDBUF_SIZE_MASK);
>>> +    s->cmdbuf_head = s->cmdbuf_tail = 0;
>>> +
>>> +}
>>> +
>>> +static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
>>> +{
>>> +    s->cmdbuf_tail = amd_iommu_readq(s, MMIO_COMMAND_TAIL)
>>> +                     & MMIO_CMDBUF_TAIL_MASK;
>>> +    amd_iommu_cmdbuf_run(s);
>>> +}
>>> +
>>> +static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_EXCL_LIMIT);
>>> +    s->excl_limit = (val & MMIO_EXCL_LIMIT_MASK) | MMIO_EXCL_LIMIT_LOW;
>>> +}
>>> +
>>> +static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_EVENT_BASE);
>>> +    s->evtlog = val & MMIO_EVTLOG_BASE_MASK;
>>> +    s->evtlog_len = 1UL << (*(uint64_t *)&s->mmior[MMIO_EVTLOG_SIZE_BYTE]
>>> +                    & MMIO_EVTLOG_SIZE_MASK);
>>> +}
>>> +
>>> +static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_EVENT_TAIL);
>>> +    s->evtlog_tail = val & MMIO_EVTLOG_TAIL_MASK;
>>> +}
>>> +
>>> +static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_EVENT_HEAD);
>>> +    s->evtlog_head = val & MMIO_EVTLOG_HEAD_MASK;
>>> +}
>>> +
>>> +static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_PPR_BASE);
>>> +    s->ppr_log = val & MMIO_PPRLOG_BASE_MASK;
>>> +    s->pprlog_len = 1UL << (*(uint64_t *)&s->mmior[MMIO_PPRLOG_SIZE_BYTE]
>>> +                    & MMIO_PPRLOG_SIZE_MASK);
>>> +}
>>> +
>>> +static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_PPR_HEAD);
>>> +    s->pprlog_head = val & MMIO_PPRLOG_HEAD_MASK;
>>> +}
>>> +
>>> +static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
>>> +{
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    uint64_t val = amd_iommu_readq(s, MMIO_PPR_TAIL);
>>> +    s->pprlog_tail = val & MMIO_PPRLOG_TAIL_MASK;
>>> +}
>>> +
>>> +/* FIXME: something might go wrong if System Software writes in chunks
>>> + * of one byte but linux writes in chunks of 4 bytes so currently it
>>> + * works correctly with linux but will definitely be busted if software
>>> + * reads/writes 8 bytes
>>> + */
>>> +static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
>>> +                                 unsigned size)
>>> +{
>>> +
>>> +    IOMMU_DPRINTF(COMMAND, "");
>>> +
>>> +    AMDIOMMUState *s = opaque;
>>> +    unsigned long offset = addr & 0x07;
>>> +
>>> +    if (addr + size > MMIO_SIZE) {
>>> +        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
>>> +                      ", got 0x%"PRIx64 " %d", (uint64_t)MMIO_SIZE, addr, 
>>> size);
>>> +        return;
>>> +    }
>>> +
>>> +    switch (addr & ~0x07) {
>>> +    case MMIO_CONTROL:
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr,  val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        iommu_handle_control_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_DEVICE_TABLE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +       /*  set device table address
>>> +        *   This also suffers from inability to tell whether software
>>> +        *   is done writing
>>> +        */
>>> +
>>> +        if (offset || (size == 8)) {
>>> +            iommu_handle_devtab_write(s);
>>> +        }
>>> +        break;
>>> +
>>> +    case MMIO_COMMAND_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +        iommu_handle_cmdhead_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_COMMAND_BASE:
>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +        /* FIXME - make sure System Software has finished writing incase
>>> +         * it writes in chucks less than 8 bytes in a robust way.As for
>>> +         * now, this hacks works for the linux driver
>>> +         */
>>> +        if (offset || (size == 8)) {
>>> +            iommu_handle_cmdbase_write(s);
>>> +        }
>>> +        break;
>>> +
>>> +    case MMIO_COMMAND_TAIL:
>>> +        IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_cmdtail_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_EVENT_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_evtbase_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_EVENT_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_evthead_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_EVENT_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_evttail_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_EXCL_LIMIT:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_excllim_write(s);
>>> +        break;
>>> +
>>> +        /* PPR log base - unused for now */
>>> +    case MMIO_PPR_BASE:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_pprbase_write(s);
>>> +        break;
>>> +        /* PPR log head - also unused for now */
>>> +    case MMIO_PPR_HEAD:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                       addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_pprhead_write(s);
>>> +        break;
>>> +        /* PPR log tail - unused for now */
>>> +    case MMIO_PPR_TAIL:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +        iommu_handle_pprtail_write(s);
>>> +        break;
>>> +
>>> +    case MMIO_EXT_FEATURES:
>>> +        IOMMU_DPRINTF(MMIO, "MMIO_EXT_FEATURES write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +        if (size == 2) {
>>> +            amd_iommu_writew(s, addr, val);
>>> +        } else if (size == 4) {
>>> +            amd_iommu_writel(s, addr, val);
>>> +        } else if (size == 8) {
>>> +            amd_iommu_writeq(s, addr, val);
>>> +        }
>>> +
>>> +            /* ignore write to ext_features */
>>> +    default:
>>> +        IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
>>> +                      ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
>>> +                      addr, size, val, offset);
>>> +    }
>>> +
>>> +}
>>> +
>>> +static inline uint64_t amd_iommu_get_perms(uint64_t entry)
>>> +{
>>> +    return (entry & (DEV_PERM_READ | DEV_PERM_WRITE)) >> DEV_PERM_SHIFT;
>>> +}
>>> +
>>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
>>> +{
>>> +    AMDIOMMUState *s = opaque;
>>> +    AMDIOMMUAddressSpace **iommu_as;
>>> +    int bus_num = pci_bus_num(bus);
>>> +
>>> +    /* just in case */
>>> +    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
>>> +    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
>>> +
>>> +    iommu_as = s->address_spaces[bus_num];
>>> +
>>> +    /* allocate memory during the first run */
>>> +    if (!iommu_as) {
>>> +        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) * 
>>> PCI_DEVFN_MAX);
>>> +        s->address_spaces[bus_num] = iommu_as;
>>> +    }
>>> +
>>> +    /* set up IOMMU region */
>>> +    if (!iommu_as[devfn]) {
>>> +        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
>>> +        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
>>> +        iommu_as[devfn]->devfn = (uint8_t)devfn;
>>> +        iommu_as[devfn]->iommu_state = s;
>>> +
>>> +        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
>>> +                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
>>> +        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
>>> +                           "amd-iommu");
>>> +    }
>>> +    return &iommu_as[devfn]->as;
>>> +}
>>> +
>>> +/* validate a page table entry */
>>> +static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
>>> +                                   uint64_t *dte)
>>> +{
>>> +    if ((dte[0] & DTE_LOWER_QUAD_RESERVED)
>>> +        || (dte[1] & DTE_MIDDLE_QUAD_RESERVED)
>>> +        || (dte[2] & DTE_UPPER_QUAD_RESERVED) || dte[3]) {
>>> +        amd_iommu_log_illegaldevtab_error(s, devid,
>>> +                                s->devtab + devid * DEVTAB_ENTRY_SIZE, 0);
>>> +        return false;
>>> +    }
>>> +
>>> +    return dte[0] & DEV_VALID && (dte[0] & DEV_TRANSLATION_VALID)
>>> +           && (dte[0] & DEV_PT_ROOT_MASK);
>>> +}
>>> +
>>> +/* get a device table entry given the devid */
>>> +static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t *entry)
>>> +{
>>> +    uint32_t offset = devid * DEVTAB_ENTRY_SIZE;
>>> +
>>> +    IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
>>> +
>>> +    if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
>>> +                        DEVTAB_ENTRY_SIZE)) {
>>> +        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab 
>>> 0x%"PRIx64
>>> +                      "offset 0x%"PRIx32, s->devtab, offset);
>>> +        /* log ever accessing dte */
>>> +        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
>>> +        return false;
>>> +    }
>>> +
>>> +    if (!amd_iommu_validate_dte(s, devid, entry)) {
>>> +        IOMMU_DPRINTF(MMU,
>>> +                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
>>> +        return false;
>>> +    }
>>> +
>>> +    return true;
>>> +}
>>> +
>>> +/* get pte translation mode */
>>> +static inline uint8_t get_pte_translation_mode(uint64_t pte)
>>> +{
>>> +    return (pte >> DEV_MODE_RSHIFT) & DEV_MODE_MASK;
>>> +}
>>> +
>>> +static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
>>> +                               IOMMUTLBEntry *ret, unsigned perms,
>>> +                               hwaddr addr)
>>> +{
>>> +    uint8_t level, oldlevel;
>>> +    unsigned present;
>>> +    uint64_t pte, pte_addr;
>>> +    uint64_t pte_perms;
>>> +    pte = dte[0];
>>> +
>>> +    level = get_pte_translation_mode(pte);
>>> +
>>> +    if (level >= 7 || level == 0) {
>>> +        IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 " detected"
>>> +                      "while translating 0x%"PRIx64, level, addr);
>>> +        return -1;
>>> +    }
>>> +
>>> +    while (level > 0) {
>>> +        pte_perms = amd_iommu_get_perms(pte);
>>> +        present = pte & 1;
>>> +        if (!present || perms != (perms & pte_perms)) {
>>> +            amd_iommu_page_fault(as->iommu_state, as->devfn, addr, perms);
>>> +            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr 
>>> 0x%"
>>> +                          PRIx64, addr);
>>> +            return -1;
>>> +        }
>>> +
>>> +        /* go to the next lower level */
>>> +        pte_addr = pte & DEV_PT_ROOT_MASK;
>>> +        /* add offset and load pte */
>>> +        pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
>>> +        pte = ldq_phys(&address_space_memory, pte_addr);
>>> +        oldlevel = level;
>>> +        level = get_pte_translation_mode(pte);
>>> +
>>> +        /* PT is corrupted or not there */
>>> +        if (level != oldlevel - 1) {
>>> +            return -1;
>>> +        }
>>> +    }
>>> +
>>> +    ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>> +    ret->translated_addr = (pte & DEV_PT_ROOT_MASK) & IOMMU_PAGE_MASK_4K;
>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +    ret->perm = IOMMU_RW;
>>> +    return 0;
>>> +}
>>> +
>>> +/* TODO : Mark addresses as Accessed and Dirty */
>>> +static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
>>> +                                   bool is_write, IOMMUTLBEntry *ret)
>>> +{
>>> +    AMDIOMMUState *s = as->iommu_state;
>>> +    uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
>>> +    IOMMUIOTLBEntry *iotlb_entry;
>>> +    uint8_t err;
>>> +    uint64_t entry[4];
>>> +
>>> +    /* try getting a cache entry first */
>>> +    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
>>> +
>>> +    if (iotlb_entry) {
>>> +        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
>>> +                      " hpa 0x%"PRIx64, PCI_BUS_NUM(devid), 
>>> PCI_SLOT(devid),
>>> +                      PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
>>> +        ret->iova = addr & IOMMU_PAGE_MASK_4K;
>>> +        ret->translated_addr = iotlb_entry->translated_addr;
>>> +        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +        ret->perm = iotlb_entry->perms;
>>> +        return;
>>> +    } else {
>>> +        if (!amd_iommu_get_dte(s, devid, entry)) {
>>> +            goto out;
>>> +        }
>>> +
>>> +        err = amd_iommu_page_walk(as, entry, ret,
>>> +                                  is_write ? IOMMU_PERM_WRITE : 
>>> IOMMU_PERM_READ,
>>> +                                  addr);
>>> +        if (err) {
>>> +            IOMMU_DPRINTF(MMU, "error: hardware error accessing page 
>>> tables"
>>> +                          " while translating addr 0x%"PRIx64, addr);
>>> +            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
>>> +            goto out;
>>> +        }
>>> +
>>> +        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
>>> +                               ret->perm, entry[1] & DEV_DOMID_ID_MASK);
>>> +        return;
>>> +    }
>>> +
>>> +out:
>>> +    ret->iova = addr;
>>> +    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>> +    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +    ret->perm = IOMMU_RW;
>>> +    return;
>>> +}
>>> +
>>> +static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
>>> +                                         bool is_write)
>>> +{
>>> +    IOMMU_DPRINTF(GENERAL, "");
>>> +
>>> +    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace, 
>>> iommu);
>>> +    AMDIOMMUState *s = as->iommu_state;
>>> +
>>> +    IOMMUTLBEntry ret = {
>>> +        .target_as = &address_space_memory,
>>> +        .iova = addr,
>>> +        .translated_addr = 0,
>>> +        .addr_mask = ~(hwaddr)0,
>>> +        .perm = IOMMU_NONE,
>>> +    };
>>> +
>>> +    if (!s->enabled) {
>>> +        /* IOMMU disabled - corresponds to iommu=off not
>>> +         * failure to provide any parameter
>>> +         */
>>> +        ret.iova = addr & IOMMU_PAGE_MASK_4K;
>>> +        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
>>> +        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
>>> +        ret.perm = IOMMU_RW;
>>> +        return ret;
>>> +    }
>>> +
>>> +    amd_iommu_do_translate(as, addr, is_write, &ret);
>>> +    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa 
>>> 0x%"PRIx64,
>>> +                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn), 
>>> addr,
>>> +                  ret.translated_addr);
>>> +
>>> +    return ret;
>>> +}
>>> +
>>> +static const MemoryRegionOps mmio_mem_ops = {
>>> +    .read = amd_iommu_mmio_read,
>>> +    .write = amd_iommu_mmio_write,
>>> +    .endianness = DEVICE_LITTLE_ENDIAN,
>>> +    .impl = {
>>> +        .min_access_size = 1,
>>> +        .max_access_size = 8,
>>> +    }
>>> +};
>>> +
>>> +static void amd_iommu_init(AMDIOMMUState *s)
>>> +{
>>> +    amd_iommu_iotlb_reset(s);
>>> +
>>> +    s->iommu_ops.translate = amd_iommu_translate;
>>> +
>>> +    s->devtab_len = 0;
>>> +    s->cmdbuf_len = 0;
>>> +    s->cmdbuf_head = 0;
>>> +    s->cmdbuf_tail = 0;
>>> +    s->evtlog_head = 0;
>>> +    s->evtlog_tail = 0;
>>> +    s->excl_enabled = false;
>>> +    s->excl_allow = false;
>>> +    s->mmio_enabled = false;
>>> +    s->enabled = false;
>>> +    s->ats_enabled = false;
>>> +    s->cmdbuf_enabled = false;
>>> +
>>> +    /* reset MMIO */
>>> +    memset(s->mmior, 0, MMIO_SIZE);
>>> +    amd_iommu_set_quad(s, MMIO_EXT_FEATURES, EXT_FEATURES, 
>>> 0xffffffffffffffef,
>>> +                       0);
>>> +    amd_iommu_set_quad(s, MMIO_STATUS, 0, 0x98, 0x67);
>>> +    /* reset device ident */
>>> +    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
>>> +    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
>>> +    pci_config_set_prog_interface(s->dev.config, 00);
>>> +    pci_config_set_class(s->dev.config, 0x0806);
>>> +
>>> +    /* add msi and hypertransport capabilities */
>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, CAPAB_REG_SIZE);
>>> +    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, CAPAB_REG_SIZE);
>>> +
>>> +    /* reset IOMMU specific capabilities  */
>>> +    pci_set_long(s->dev.config + s->capab_offset, CAPAB_FEATURES);
>>> +    pci_set_long(s->dev.config + s->capab_offset + CAPAB_BAR_LOW,
>>> +                 s->mmio.addr & ~(0xffff0000));
>>> +    pci_set_long(s->dev.config + s->capab_offset + CAPAB_BAR_HIGH,
>>> +                (s->mmio.addr & ~(0xffff)) >> 16);
>>> +    pci_set_long(s->dev.config + s->capab_offset + CAPAB_RANGE, 
>>> 0xff000000);
>>> +    pci_set_long(s->dev.config + s->capab_offset + CAPAB_MISC, MAX_PH_ADDR 
>>> |
>>> +                 MAX_GVA_ADDR | MAX_VA_ADDR);
>>> +}
>>> +
>>> +static void amd_iommu_reset(DeviceState *dev)
>>> +{
>>> +    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
>>> +
>>> +    amd_iommu_init(s);
>>> +}
>>> +
>>> +static void amd_iommu_realize(PCIDevice *dev, Error **error)
>>> +{
>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>> +
>>> +    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash,
>>> +                                     amd_iommu_uint64_equal, g_free, 
>>> g_free);
>>> +
>>> +    s->capab_offset = pci_add_capability(dev, CAPAB_ID_SEC, 0, CAPAB_SIZE);
>>> +
>>> +    amd_iommu_init(s);
>>> +
>>> +    /* set up MMIO */
>>> +    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "mmio",
>>> +                          MMIO_SIZE);
>>> +
>>> +    if (s->mmio.addr == IOMMU_BASE_ADDR) {
>>> +        return;
>>> +    }
>>> +
>>> +    s->mmio.addr = IOMMU_BASE_ADDR;
>>> +    memory_region_add_subregion(get_system_memory(), IOMMU_BASE_ADDR, 
>>> &s->mmio);
>>> +}
>>> +
>>> +static const VMStateDescription vmstate_amd_iommu = {
>>> +    .name = "amd-iommu",
>>> +    .fields  = (VMStateField[]) {
>>> +        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
>>> +        VMSTATE_END_OF_LIST()
>>> +    }
>>> +};
>>> +
>>> +static Property amd_iommu_properties[] = {
>>> +    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
>>> +    DEFINE_PROP_END_OF_LIST(),
>>> +};
>>> +
>>> +static void amd_iommu_uninit(PCIDevice *dev)
>>> +{
>>> +    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
>>> +    amd_iommu_iotlb_reset(s);
>>> +}
>>> +
>>> +static void amd_iommu_class_init(ObjectClass *klass, void* data)
>>> +{
>>> +    DeviceClass *dc = DEVICE_CLASS(klass);
>>> +    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
>>> +
>>> +    k->realize = amd_iommu_realize;
>>> +    k->exit = amd_iommu_uninit;
>>> +
>>> +    dc->reset = amd_iommu_reset;
>>> +    dc->vmsd = &vmstate_amd_iommu;
>>> +    dc->props = amd_iommu_properties;
>>> +}
>>> +
>>> +static const TypeInfo amd_iommu = {
>>> +    .name = TYPE_AMD_IOMMU_DEVICE,
>>> +    .parent = TYPE_PCI_DEVICE,
>>> +    .instance_size = sizeof(AMDIOMMUState),
>>> +    .class_init = amd_iommu_class_init
>>> +};
>>> +
>>> +static void amd_iommu_register_types(void)
>>> +{
>>> +    type_register_static(&amd_iommu);
>>> +}
>>> +
>>> +type_init(amd_iommu_register_types);
>>> diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
>>> new file mode 100644
>>> index 0000000..e08d6b4
>>> --- /dev/null
>>> +++ b/hw/i386/amd_iommu.h
>>> @@ -0,0 +1,399 @@
>>> +/*
>>> + * QEMU emulation of an AMD IOMMU (AMD-Vi)
>>> + *
>>> + * Copyright (C) 2011 Eduard - Gabriel Munteanu
>>> + * Copyright (C) 2015 David Kiarie, <address@hidden>
>>> + *
>>> + * This program is free software; you can redistribute it and/or modify
>>> + * it under the terms of the GNU General Public License as published by
>>> + * the Free Software Foundation; either version 2 of the License, or
>>> + * (at your option) any later version.
>>> +
>>> + * This program is distributed in the hope that it will be useful,
>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>> + * GNU General Public License for more details.
>>> +
>>> + * You should have received a copy of the GNU General Public License along
>>> + * with this program; if not, see <http://www.gnu.org/licenses/>.
>>> + */
>>> +
>>> +#ifndef AMD_IOMMU_H_
>>> +#define AMD_IOMMU_H_
>>> +
>>> +#include "hw/hw.h"
>>> +#include "hw/pci/pci.h"
>>> +#include "hw/sysbus.h"
>>> +#include "sysemu/dma.h"
>>
>> Pls prefix macros and global variables/functions with amd_iommu_
>> to avoid collision with common headers.
>>
>>> +
>>> +/* Capability registers */
>>> +#define CAPAB_HEADER            0x00
>>> +#define   CAPAB_REV_TYPE        0x02
>>> +#define   CAPAB_FLAGS           0x03
>>> +#define CAPAB_BAR_LOW           0x04
>>> +#define CAPAB_BAR_HIGH          0x08
>>> +#define CAPAB_RANGE             0x0C
>>> +#define CAPAB_MISC              0x10
>>> +#define CAPAB_MISC1             0x14
>>> +
>>> +#define CAPAB_SIZE              0x18
>>> +#define CAPAB_REG_SIZE          0x04
>>> +
>>> +/* Capability header data */
>>> +#define CAPAB_ID_SEC            0xff
>>> +#define CAPAB_FLAT_EXT          (1 << 28)
>>> +#define CAPAB_EFR_SUP           (1 << 27)
>>> +#define CAPAB_FLAG_NPCACHE      (1 << 26)
>>> +#define CAPAB_FLAG_HTTUNNEL     (1 << 25)
>>> +#define CAPAB_FLAG_IOTLBSUP     (1 << 24)
>>> +#define CAPAB_INIT_REV          (1 << 19)
>>> +#define CAPAB_INIT_TYPE         (3 << 16)
>>> +#define CAPAB_INIT_REV_TYPE     (CAPAB_REV | CAPAB_TYPE)
>>> +#define CAPAB_INIT_FLAGS        (CAPAB_FLAG_NPCACHE | CAPAB_FLAG_HTTUNNEL)
>>> +#define CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
>>> +#define CAPAB_BAR_MASK          (~((1UL << 14) - 1))
>>> +
>>> +/* MMIO registers */
>>> +#define MMIO_DEVICE_TABLE       0x0000
>>> +#define MMIO_COMMAND_BASE       0x0008
>>> +#define MMIO_EVENT_BASE         0x0010
>>> +#define MMIO_CONTROL            0x0018
>>> +#define MMIO_EXCL_BASE          0x0020
>>> +#define MMIO_EXCL_LIMIT         0x0028
>>> +#define MMIO_EXT_FEATURES       0x0030
>>> +#define MMIO_COMMAND_HEAD       0x2000
>>> +#define MMIO_COMMAND_TAIL       0x2008
>>> +#define MMIO_EVENT_HEAD         0x2010
>>> +#define MMIO_EVENT_TAIL         0x2018
>>> +#define MMIO_STATUS             0x2020
>>> +#define MMIO_PPR_BASE           0x0038
>>> +#define MMIO_PPR_HEAD           0x2030
>>> +#define MMIO_PPR_TAIL           0x2038
>>> +
>>> +#define MMIO_SIZE               0x4000
>>> +
>>> +#define MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
>>> +#define MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & 
>>> ~MMIO_DEVTAB_SIZE_MASK)
>>> +#define MMIO_DEVTAB_ENTRY_SIZE  32
>>> +#define MMIO_DEVTAB_SIZE_UNIT   4096
>>> +
>>> +/* some of this are similar but just for readability */
>>> +#define MMIO_CMDBUF_SIZE_BYTE       (MMIO_COMMAND_BASE + 7)
>>> +#define MMIO_CMDBUF_SIZE_MASK       0x0F
>>> +#define MMIO_CMDBUF_BASE_MASK       MMIO_DEVTAB_BASE_MASK
>>> +#define MMIO_CMDBUF_DEFAULT_SIZE    8
>>> +#define MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>> +#define MMIO_CMDBUF_TAIL_MASK       MMIO_EVTLOG_HEAD_MASK
>>> +
>>> +#define MMIO_EVTLOG_SIZE_BYTE       (MMIO_EVENT_BASE + 7)
>>> +#define MMIO_EVTLOG_SIZE_MASK       MMIO_CMDBUF_SIZE_MASK
>>> +#define MMIO_EVTLOG_BASE_MASK       MMIO_CMDBUF_BASE_MASK
>>> +#define MMIO_EVTLOG_DEFAULT_SIZE    MMIO_CMDBUF_DEFAULT_SIZE
>>> +#define MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
>>> +#define MMIO_EVTLOG_TAIL_MASK       MMIO_EVTLOG_HEAD_MASK
>>> +
>>> +#define MMIO_PPRLOG_SIZE_BYTE       (MMIO_EVENT_BASE + 7)
>>> +#define MMIO_PPRLOG_HEAD_MASK       MMIO_EVTLOG_HEAD_MASK
>>> +#define MMIO_PPRLOG_TAIL_MASK       MMIO_EVTLOG_HEAD_MASK
>>> +#define MMIO_PPRLOG_BASE_MASK       MMIO_EVTLOG_BASE_MASK
>>> +#define MMIO_PPRLOG_SIZE_MASK       MMIO_EVTLOG_SIZE_MASK
>>> +
>>> +#define MMIO_EXCL_BASE_MASK         MMIO_DEVTAB_BASE_MASK
>>> +#define MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
>>> +#define MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
>>> +#define MMIO_EXCL_LIMIT_MASK        MMIO_DEVTAB_BASE_MASK
>>> +#define MMIO_EXCL_LIMIT_LOW         0xFFF
>>> +
>>> +/* mmio control register flags */
>>> +#define MMIO_CONTROL_IOMMUEN        (1ULL << 0)
>>> +#define MMIO_CONTROL_HTTUNEN        (1ULL << 1)
>>> +#define MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
>>> +#define MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
>>> +#define MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
>>> +#define MMIO_CONTROL_PASSPW         (1ULL << 7)
>>> +#define MMIO_CONTROL_REPASSPW       (1ULL << 9)
>>> +#define MMIO_CONTROL_COHERENT       (1ULL << 10)
>>> +#define MMIO_CONTROL_ISOC           (1ULL << 11)
>>> +#define MMIO_CONTROL_CMDBUFLEN      (1ULL << 12)
>>> +#define MMIO_CONTROL_PPRLOGEN       (1ULL << 13)
>>> +#define MMIO_CONTROL_PPRINTEN       (1ULL << 14)
>>> +#define MMIO_CONTROL_PPREN          (1ULL << 15)
>>> +#define MMIO_CONTROL_GAEN           (1ULL << 16)
>>> +#define MMIO_CONTROL_GTEN           (1ULL << 17)
>>> +
>>> +/* MMIO status register bits */
>>> +#define MMIO_STATUS_PPR_OVFE    (1 << 18)
>>> +#define MMIO_STATUS_PPR_OVFEB   (1 << 17)
>>> +#define MMIO_STATUS_EVT_ACTIVE  (1 << 16)
>>> +#define MMIO_STATUS_EVT_OVFB    (1 << 15)
>>> +#define MMIO_STATUS_PPR_ACTIVE  (1 << 12)
>>> +#define MMIO_STATUS_PPR_OVFB    (1 << 11)
>>> +#define MMIO_STATUS_GA_INT      (1 << 10)
>>> +#define MMIO_STATUS_GA_RUN      (1 << 9)
>>> +#define MMIO_STATUS_GA_OVF      (1 << 8)
>>> +#define MMIO_STATUS_PPR_RUN     (1 << 7)
>>> +#define MMIO_STATUS_PPR_INT     (1 << 6)
>>> +#define MMIO_STATUS_PPR_OVF     (1 << 5)
>>> +#define MMIO_STATUS_CMDBUF_RUN  (1 << 4)
>>> +#define MMIO_STATUS_EVT_RUN     (1 << 3)
>>> +#define MMIO_STATUS_COMP_INT    (1 << 2)
>>> +#define MMIO_STATUS_EVT_INT     (1 << 1)
>>> +#define MMIO_STATUS_EVT_OVF     (1 << 0)
>>> +
>>> +#define CMDBUF_ID_BYTE              0x07
>>> +#define CMDBUF_ID_RSHIFT            4
>>> +
>>> +#define CMD_COMPLETION_WAIT         0x01
>>> +#define CMD_INVAL_DEVTAB_ENTRY      0x02
>>> +#define CMD_INVAL_IOMMU_PAGES       0x03
>>> +#define CMD_INVAL_IOTLB_PAGES       0x04
>>> +#define CMD_INVAL_INTR_TABLE        0x05
>>> +#define CMD_PREFETCH_IOMMU_PAGES    0x06
>>> +#define CMD_COMPLETE_PPR_REQUEST    0x07
>>> +#define CMD_INVAL_IOMMU_ALL         0x08
>>> +
>>> +#define DEVTAB_ENTRY_SIZE           32
>>> +
>>> +/* Device table entry bits 0:63 */
>>> +#define DEV_VALID                   (1ULL << 0)
>>> +#define DEV_TRANSLATION_VALID       (1ULL << 1)
>>> +#define DEV_MODE_MASK               0x7
>>> +#define DEV_MODE_RSHIFT             9
>>> +#define DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
>>> +#define DEV_PT_ROOT_RSHIFT          12
>>> +#define DEV_PERM_SHIFT              61
>>> +#define DEV_PERM_READ               (1ULL << 61)
>>> +#define DEV_PERM_WRITE              (1ULL << 62)
>>> +
>>> +/* Device table entry bits 64:127 */
>>> +#define DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
>>> +#define DEV_IOTLB_SUPPORT           (1ULL << 17)
>>> +#define DEV_SUPPRESS_PF             (1ULL << 18)
>>> +#define DEV_SUPPRESS_ALL_PF         (1ULL << 19)
>>> +#define DEV_IOCTL_MASK              (~3)
>>> +#define DEV_IOCTL_RSHIFT            20
>>> +#define   DEV_IOCTL_DENY            0
>>> +#define   DEV_IOCTL_PASSTHROUGH     1
>>> +#define   DEV_IOCTL_TRANSLATE       2
>>> +#define DEV_CACHE                   (1ULL << 37)
>>> +#define DEV_SNOOP_DISABLE           (1ULL << 38)
>>> +#define DEV_EXCL                    (1ULL << 39)
>>> +
>>> +/* Event codes and flags, as stored in the info field */
>>> +#define EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
>>> +#define EVENT_IOPF                  (0x2U << 12)
>>> +#define   EVENT_IOPF_I              (1U << 3)
>>> +#define   EVENT_IOPF_PR             (1U << 4)
>>> +#define   EVENT_IOPF_RW             (1U << 5)
>>> +#define   EVENT_IOPF_PE             (1U << 6)
>>> +#define   EVENT_IOPF_RZ             (1U << 7)
>>> +#define   EVENT_IOPF_TR             (1U << 8)
>>> +#define EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
>>> +#define EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
>>> +#define EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
>>> +#define EVENT_COMMAND_HW_ERROR      (0x6U << 12)
>>> +#define EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
>>> +#define EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
>>> +
>>> +#define EVENT_LEN                   16
>>> +
>>> +#define IOMMU_PERM_READ             (1 << 0)
>>> +#define IOMMU_PERM_WRITE            (1 << 1)
>>> +#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
>>> +
>>> +/* AMD RD890 Chipset */
>>> +#define PCI_DEVICE_ID_RD890_IOMMU   0x20
>>> +
>>> +#define FEATURE_PREFETCH            (1ULL << 0)
>>> +#define FEATURE_PPR                 (1ULL << 1)
>>> +#define FEATURE_NX                  (1ULL << 3)
>>> +#define FEATURE_GT                  (1ULL << 4)
>>> +#define FEATURE_IA                  (1ULL << 6)
>>> +#define FEATURE_GA                  (1ULL << 7)
>>> +#define FEATURE_HE                  (1ULL << 8)
>>> +#define FEATURE_PC                  (1ULL << 9)
>>> +
>>> +/* reserved DTE bits */
>>> +#define DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
>>> +#define DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
>>> +#define DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
>>> +
>>> +/* IOMMU paging mode */
>>> +#define GATS_MODE                 (6ULL <<  12)
>>> +#define HATS_MODE                 (6ULL <<  10)
>>> +
>>> +/* PCI SIG constants */
>>> +#define PCI_BUS_MAX 256
>>> +#define PCI_SLOT_MAX 32
>>> +#define PCI_FUNC_MAX 8
>>> +#define PCI_DEVFN_MAX 256
>>> +
>>> +/* IOTLB */
>>> +#define IOMMU_IOTLB_MAX_SIZE 1024
>>> +#define IOMMU_DEVID_SHIFT    36
>>> +
>>> +/* extended feature support */
>>> +#define EXT_FEATURES (FEATURE_PREFETCH | FEATURE_PPR | FEATURE_NX \
>>> +        | FEATURE_IA | FEATURE_GT | FEATURE_GA | FEATURE_HE | GATS_MODE | 
>>> HATS_MODE)
>>> +
>>> +/* capabilities header */
>>> +#define CAPAB_FEATURES (CAPAB_FLAT_EXT | CAPAB_FLAG_NPCACHE | \
>>> +        CAPAB_FLAG_IOTLBSUP | CAPAB_ID_SEC | CAPAB_INIT_TYPE | \
>>> +        CAPAB_FLAG_HTTUNNEL |  CAPAB_EFR_SUP)
>>> +
>>> +/* command constants */
>>> +#define COM_STORE_ADDRESS_MASK 0xffffffffffff8
>>> +#define COM_COMPLETION_STORE_MASK 0x1
>>> +#define COM_COMPLETION_INTR 0x2
>>> +#define COM_COMPLETION_DATA_OFF 0x8
>>> +#define IOMMU_COMMAND_SIZE 0x10
>>> +
>>> +/* IOMMU default address */
>>> +#define IOMMU_BASE_ADDR 0xfed80000
>>> +
>>> +/* page management constants */
>>> +#define IOMMU_PAGE_SHIFT 12
>>> +#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
>>> +
>>> +#define IOMMU_PAGE_SHIFT_4K 12
>>> +#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
>>> +#define IOMMU_PAGE_SHIFT_2M 21
>>> +#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) - 1))
>>> +#define IOMMU_PAGE_SHIFT_1G 30
>>> +#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
>>> +
>>> +#define PCI_SLOT(devfn)      (((devfn) >> 3) & 0x1f)
>>> +#define PCI_FUNC(devfn)      ((devfn) & 0x07)
>>> +
>>> +#define MAX_VA_ADDR          (48UL << 5)
>>> +#define MAX_PH_ADDR          (40UL << 8)
>>> +#define MAX_GVA_ADDR         (48UL << 15)
>>> +
>>> +/* invalidation command device id */
>>> +#define INVAL_DEV_ID_SHIFT  32
>>> +#define INVAL_DEV_ID_MASK   (~((1UL << INVAL_DEV_ID_SHIFT) - 1))
>>> +
>>> +/* invalidation address */
>>> +#define INVAL_ADDR_MASK_SHIFT 12
>>> +#define INVAL_ADDR_MASK     (~((1UL << INVAL_ADDR_MASK_SHIFT) - 1))
>>> +
>>> +/* invalidation S bit mask */
>>> +#define INVAL_ALL(val) ((val) & (0x1))
>>> +
>>> +/* reserved bits */
>>> +#define COMPLETION_WAIT_RSVD    0x0ff000000
>>> +#define CMD_INVAL_DEV_RSVD      0xffff00000fffffff
>>> +#define INVAL_IOMMU_PAGES_RSVD  0xfff000000fff0000
>>> +#define INVAL_IOTLB_PAGES_RSVD  0x00000ff4
>>> +#define INVAL_INTR_TABLE_RSVD   0xffff00000fffffff
>>> +#define PRF_IOMMU_PAGES_RSVD    0x00ff00000ff00000
>>> +#define COMPLETE_PPR_RQ_RSVD    0xffff00000ff00000
>>> +#define INVAL_IOMMU_ALL_RSVD    0x0fffffff00000000
>>> +
>>> +/* command masks - inval iommu pages */
>>> +#define INVAL_PAGES_PASID       (~((1UL << 20) - 1))
>>> +#define INVAL_PAGES_DOMID       (((1UL << 16) - 1) << 32)
>>> +#define INVAL_PAGES_ADDRESS     (~((1UL << 12) - 1))
>>> +#define INVAL_PAGES_SBIT        (1UL << 0)
>>> +#define INVAL_PAGES_PDE         (1UL << 1)
>>> +#define INVAL_PAGES_GN          (1UL << 2)
>>> +
>>> +/* masks - inval iotlb pages */
>>> +#define INVAL_IOTLB_DEVID       (~((1UL << 16) - 1))
>>> +#define INVAL_IOTLB_PASID_LOW   (0xff << 15)
>>> +#define INVAL_IOTLB_MAXPEND     (0xff << 23)
>>> +#define INVAL_IOTLB_QUEUEID     (~((1UL << 16) - 1))
>>> +#define INVAL_IOTLB_PASID_HIGH  (0xff << 46)
>>> +#define INVAL_IOTLB_GN          INVAL_PAGES_GN
>>> +#define INVAL_IOTBL_S           INVAL_PAGES_S
>>> +#define INVAL_IOTLB_ADDRESS     INVAL_PAGES_ADDRESS
>>> +#define INVAL_IOTLB_MAKEPASID(low, high)
>>> +
>>> +/* masks - prefetch pages   */
>>> +#define PREFETCH_PAGES_DEVID     INVAL_IOTLB_DEVID
>>> +#define PREFETCH_PAGES_PFCOUNT   INVAL_IOTLB_MAXPEND
>>> +
>>> +#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
>>> +#define AMD_IOMMU_DEVICE(obj)\
>>> +    OBJECT_CHECK(AMDIOMMUState, (obj), TYPE_AMD_IOMMU_DEVICE)
>>> +
>>> +#define AMD_IOMMU_STR "amd"
>>> +
>>> +typedef struct AMDIOMMUState AMDIOMMUState;
>>> +
>>> +typedef struct AMDIOMMUAddressSpace {
>>> +    uint8_t bus_num;            /* bus number                           */
>>> +    uint8_t devfn;              /* device function                      */
>>> +    AMDIOMMUState *iommu_state; /* IOMMU - one per machine              */
>>> +    MemoryRegion iommu;         /* Device's iommu region                */
>>> +    AddressSpace as;            /* device's corresponding address space */
>>> +} AMDIOMMUAddressSpace;
>>> +
>>> +struct AMDIOMMUState {
>>> +    PCIDevice dev;               /* The PCI device itself        */
>>> +
>>> +    uint32_t version;
>>> +
>>> +    uint32_t capab_offset;       /* capability offset pointer    */
>>> +    uint64_t mmio_addr;
>>> +    uint8_t *capab;              /* capabilities registers       */
>>> +
>>> +    bool enabled;                /* IOMMU enabled                */
>>> +    bool ats_enabled;            /* address translation enabled  */
>>> +    bool cmdbuf_enabled;         /* command buffer enabled       */
>>> +    bool evtlog_enabled;         /* event log enabled            */
>>> +    bool excl_enabled;
>>> +
>>> +    dma_addr_t devtab;           /* base address device table    */
>>> +    size_t devtab_len;           /* device table length          */
>>> +
>>> +    dma_addr_t cmdbuf;           /* command buffer base address  */
>>> +    uint64_t cmdbuf_len;         /* command buffer length        */
>>> +    uint32_t cmdbuf_head;        /* current IOMMU read position  */
>>> +    uint32_t cmdbuf_tail;        /* next Software write position */
>>> +    bool completion_wait_intr;
>>> +
>>> +    dma_addr_t evtlog;           /* base address event log       */
>>> +    bool evtlog_intr;
>>> +    uint32_t evtlog_len;         /* event log length             */
>>> +    uint32_t evtlog_head;        /* current IOMMU write position */
>>> +    uint32_t evtlog_tail;        /* current Software read position */
>>> +
>>> +    /* unused for now */
>>> +    dma_addr_t excl_base;        /* base DVA - IOMMU exclusion range */
>>> +    dma_addr_t excl_limit;       /* limit of IOMMU exclusion range   */
>>> +    bool excl_allow;             /* translate accesses to the exclusion 
>>> range */
>>> +    bool excl_enable;            /* exclusion range enabled          */
>>> +
>>> +    dma_addr_t ppr_log;          /* base address ppr log */
>>> +    uint32_t pprlog_len;         /* ppr log len  */
>>> +    uint32_t pprlog_head;        /* ppr log head */
>>> +    uint32_t pprlog_tail;        /* ppr log tail */
>>> +
>>> +    MemoryRegion mmio;           /* MMIO region                  */
>>> +    uint8_t mmior[MMIO_SIZE];    /* read/write MMIO              */
>>> +    uint8_t w1cmask[MMIO_SIZE];  /* read/write 1 clear mask      */
>>> +    uint8_t romask[MMIO_SIZE];   /* MMIO read/only mask          */
>>> +    bool mmio_enabled;
>>> +
>>> +    /* IOMMU function */
>>> +    MemoryRegionIOMMUOps iommu_ops;
>>> +
>>> +    /* for each served device */
>>> +    AMDIOMMUAddressSpace **address_spaces[PCI_BUS_MAX];
>>> +
>>> +    /* IOTLB */
>>> +    GHashTable *iotlb;
>>> +};
>>> +
>>> +typedef struct IOMMUIOTLBEntry {
>>> +    uint64_t gfn;
>>> +    uint16_t domid;
>>> +    uint64_t devid;
>>> +    uint64_t perms;
>>> +    uint64_t translated_addr;
>>> +} IOMMUIOTLBEntry;
>>> +
>>
>> Unlike VTD public interface seems to use none of the above.
>> So why expose it in the public header?
>>
>
> AMDIOMMUState in referenced in q35.c so not moving it. I moved the
> other struct though.

Okay, forward declaration will do, you're right.

>
>>> +AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn);
>>
>> Interface must be documented in header.
>> Internal functions should preferably have a bit of
>> documentation too, but that's less important there.
>>
>>> +
>>> +#endif
>>> diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
>>> index dedf277..61deace 100644
>>> --- a/include/hw/pci/pci.h
>>> +++ b/include/hw/pci/pci.h
>>> @@ -15,6 +15,8 @@
>>>
>>>  /* PCI bus */
>>>
>>> +#define PCI_BUS_NUM(x)          (((x) >> 8) & 0xff)
>>> +#define PCI_DEVID(bus, devfn)   ((((uint16_t)(bus)) << 8) | (devfn))
>>>  #define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
>>>  #define PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
>>>  #define PCI_FUNC(devfn)         ((devfn) & 0x07)
>>> --
>>> 2.1.4



reply via email to

[Prev in Thread] Current Thread [Next in Thread]