[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v2 3/9] hw: arm: SMMUv3 emulation model
From: |
Edgar E. Iglesias |
Subject: |
Re: [Qemu-devel] [PATCH v2 3/9] hw: arm: SMMUv3 emulation model |
Date: |
Sun, 25 Sep 2016 18:37:05 +0200 |
User-agent: |
Mutt/1.5.24 (2015-08-30) |
On Mon, Aug 22, 2016 at 09:47:34PM +0530, Prem Mallappa wrote:
> Big patch adds SMMUv3 model to Qemu
> - As per SMMUv3 spec 16.0
> - Works with SMMUv3 driver in Linux 4.7rc1
> - Only LPAE mode translation supported
> - BE mode is not supported yet
> - Stage1, Stage2 and S1+S2
> - Suspend/resume not tested
Thanks Prem,
I'm going to look at the PCI parts and get back to you with
comments on that.
I've put another round of comments inline:
>
> Signed-off-by: Prem Mallappa <address@hidden>
> ---
> hw/arm/smmu-common.c | 152 +++++
> hw/arm/smmu-common.h | 141 +++++
> hw/arm/smmu-v3.c | 1369
> ++++++++++++++++++++++++++++++++++++++++++++++
> hw/arm/smmuv3-internal.h | 432 +++++++++++++++
> hw/vfio/common.c | 2 +-
> 5 files changed, 2095 insertions(+), 1 deletion(-)
> create mode 100644 hw/arm/smmu-common.c
> create mode 100644 hw/arm/smmu-common.h
> create mode 100644 hw/arm/smmu-v3.c
> create mode 100644 hw/arm/smmuv3-internal.h
>
> diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
> new file mode 100644
> index 0000000..bf2039b
> --- /dev/null
> +++ b/hw/arm/smmu-common.c
> @@ -0,0 +1,152 @@
> +/*
> + * Copyright (C) 2014-2016 Broadcom Corporation
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Author: Prem Mallappa <address@hidden>
> + *
> + */
> +
> +#include "qemu/osdep.h"
> +#include "sysemu/sysemu.h"
> +#include "exec/address-spaces.h"
> +
> +#include "smmu-common.h"
> +
> +inline MemTxResult smmu_read_sysmem(hwaddr addr, void *buf, int len,
> + bool secure)
> +{
> + MemTxAttrs attrs = {.unspecified = 1, .secure = secure};
> +
> + switch (len) {
> + case 4:
> + *(uint32_t *)buf = ldl_le_phys(&address_space_memory, addr);
> + break;
> + case 8:
> + *(uint64_t *)buf = ldq_le_phys(&address_space_memory, addr);
> + break;
> + default:
> + return address_space_rw(&address_space_memory, addr,
> + attrs, buf, len, false);
> + }
> + return MEMTX_OK;
> +}
> +
> +inline void
> +smmu_write_sysmem(hwaddr addr, void *buf, int len, bool secure)
> +{
> + MemTxAttrs attrs = {.unspecified = 1, .secure = secure};
> +
> + switch (len) {
> + case 4:
> + stl_le_phys(&address_space_memory, addr, *(uint32_t *)buf);
> + break;
> + case 8:
> + stq_le_phys(&address_space_memory, addr, *(uint64_t *)buf);
> + break;
> + default:
> + address_space_rw(&address_space_memory, addr,
> + attrs, buf, len, true);
> + }
> +}
Thinking about this, I think you should just remove these functions and
always call dma_memory_read/write directly.
It would be nice if you could add a property/link so that machine code
can specify the MemoryRegion/address space to be used. You'll need a
link to allow setup of the MemoryRegion and also some code to create
an address space from the selected MR.
You can have a look at the following code to see how it's done:
exec.c cpu_exec_init() see object_property_add_link
cpus.c qemu_init_vcpu() see address_space_init_shareable
> +
> +SMMUTransErr
> +smmu_translate_64(SMMUTransCfg *cfg, uint32_t *pagesize,
> + uint32_t *perm, bool is_write)
> +{
> + int ret, level;
> + int stage = cfg->stage;
> + int granule_sz = cfg->granule_sz[stage];
> + int va_size = cfg->va_size[stage];
> + hwaddr va, addr, mask;
> + hwaddr *outaddr;
> +
> +
> + va = addr = cfg->va; /* or ipa in Stage2 */
> + SMMU_DPRINTF(TT_1, "stage:%d\n", stage);
> + assert(va_size == 64); /* We dont support 32-bit yet */
> + /* same location, for clearity */
> + outaddr = &cfg->pa;
> +
> + level = 4 - (va_size - cfg->tsz[stage] - 4) / granule_sz;
> +
> + mask = (1ULL << (granule_sz + 3)) - 1;
> +
> + addr = extract64(cfg->ttbr[stage], 0, 48);
> + addr &= ~((1ULL << (va_size - cfg->tsz[stage] -
> + (granule_sz * (4 - level)))) - 1);
> +
> + for (;;) {
> + uint64_t desc;
> +#ifdef ARM_SMMU_DEBUG
> + uint64_t ored = (va >> (granule_sz * (4 - level))) & mask;
> + SMMU_DPRINTF(TT_1,
> + "Level: %d va:%lx addr:%lx ored:%lx\n",
> + level, va, addr, ored);
> +#endif
> + addr |= (va >> (granule_sz * (4 - level))) & mask;
> + addr &= ~7ULL;
> +
> + if (smmu_read_sysmem(addr, &desc, sizeof(desc), false)) {
> + ret = SMMU_TRANS_ERR_WALK_EXT_ABRT;
> + SMMU_DPRINTF(CRIT, "Translation table read error lvl:%d\n",
> level);
> + break;
> + }
> +
> + SMMU_DPRINTF(TT_1,
> + "Level: %d gran_sz:%d mask:%lx addr:%lx desc:%lx\n",
> + level, granule_sz, mask, addr, desc);
> +
> + if (!(desc & 1) ||
> + (!(desc & 2) && (level == 3))) {
> + ret = SMMU_TRANS_ERR_TRANS;
> + break;
> + }
> +
> + /* We call again to resolve address at this 'level' */
> + if (cfg->s2_needed) {
> + uint32_t perm_s2, pagesize_s2;
> + SMMUTransCfg s2cfg = *cfg;
> +
> + s2cfg.stage++;
> + s2cfg.va = desc;
> + s2cfg.s2_needed = false;
> +
> + ret = smmu_translate_64(&s2cfg, &pagesize_s2,
> + &perm_s2, is_write);
> + if (ret) {
> + break;
> + }
> +
> + desc = (uint64_t)s2cfg.pa;
> + SMMU_DPRINTF(TT_2, "addr:%lx pagesize:%x\n", addr, *pagesize);
> + }
> +
> + addr = desc & 0xffffffff000ULL;
> + if ((desc & 2) && (level < 3)) {
> + level++;
> + continue;
> + }
> + *pagesize = (1ULL << ((granule_sz * (4 - level)) + 3));
> + addr |= (va & (*pagesize - 1));
> + SMMU_DPRINTF(TT_1, "addr:%lx pagesize:%x\n", addr, *pagesize);
> + break;
> + }
> +
> + if (ret == 0) {
> + *outaddr = addr;
> + }
> +
> + return ret;
> +}
> diff --git a/hw/arm/smmu-common.h b/hw/arm/smmu-common.h
> new file mode 100644
> index 0000000..91f7194
> --- /dev/null
> +++ b/hw/arm/smmu-common.h
> @@ -0,0 +1,141 @@
> +/*
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Copyright (C) 2015-2016 Broadcom Corporation
> + *
> + * Author: Prem Mallappa <address@hidden>
> + *
> + */
> +#ifndef HW_ARM_SMMU_COMMON_H
> +#define HW_ARM_SMMU_COMMON_H
> +
> +#include <qemu/log.h>
> +#include <hw/sysbus.h>
> +
> +#define TYPE_SMMU_DEV_BASE "smmu-base"
> +#define TYPE_SMMU_V3_DEV "smmuv3"
> +
> +typedef struct SMMUState {
> + /* <private> */
> + SysBusDevice dev;
> +
> + uint32_t cid[4]; /* Coresight registers */
> + uint32_t pid[8];
> +
> + MemoryRegion iomem;
> +} SMMUState;
> +
> +#define SMMU_SYS_DEV(obj) OBJECT_CHECK(SMMUState, (obj), TYPE_SMMU_DEV_BASE)
> +
> +typedef enum {
> + SMMU_TRANS_ERR_WALK_EXT_ABRT = 0x1, /* Translation walk external abort
> */
> + SMMU_TRANS_ERR_TRANS = 0x10, /* Translation fault */
> + SMMU_TRANS_ERR_ADDR_SZ, /* Address Size fault */
> + SMMU_TRANS_ERR_ACCESS, /* Access fault */
> + SMMU_TRANS_ERR_PERM, /* Permission fault */
> + SMMU_TRANS_ERR_TLB_CONFLICT = 0x20, /* TLB Conflict */
> +} SMMUTransErr;
> +
> +
> +/*
> + * This needs to be populated by SMMUv2 and SMMUv3
> + * each do it in their own way
> + * translate functions use it to call translations
> + */
> +typedef struct SMMUTransCfg {
> + hwaddr va; /* Input to S1 */
> + int stage;
> + uint32_t oas[3];
> + uint32_t tsz[3];
> + uint64_t ttbr[3];
> + uint32_t granule[3];
> + uint32_t va_size[3];
> + uint32_t granule_sz[3];
> +
> + hwaddr pa; /* Output from S1, Final PA */
> + bool s2_needed;
> +} SMMUTransCfg;
> +
> +struct SMMUTransReq {
> + uint32_t stage;
> + SMMUTransCfg cfg[2];
> +};
> +
> +typedef struct {
> + /* <private> */
> + SysBusDeviceClass parent_class;
> +
> + /* public */
> + SMMUTransErr (*translate_32)(SMMUTransCfg *cfg, uint32_t *pagesize,
> + uint32_t *perm, bool is_write);
> + SMMUTransErr (*translate_64)(SMMUTransCfg *cfg, uint32_t *pagesize,
> + uint32_t *perm, bool is_write);
> +} SMMUBaseClass;
> +
> +#define SMMU_DEVICE_GET_CLASS(obj) \
> + OBJECT_GET_CLASS(SMMUBaseClass, (obj), TYPE_SMMU_DEV_BASE)
> +
> +/* #define ARM_SMMU_DEBUG */
> +#ifdef ARM_SMMU_DEBUG
> +
> +extern uint32_t dbg_bits;
> +
> +#define HERE() printf("%s:%d\n", __func__, __LINE__)
Can you please remove HERE?
> +
> +enum {
> + SMMU_DBG_PANIC, SMMU_DBG_CRIT, SMMU_DBG_WARN, /* error level */
> + SMMU_DBG_DBG1, SMMU_DBG_DBG2, SMMU_DBG_INFO, /* info level */
> + SMMU_DBG_CMDQ, /* Just command queue */
> + SMMU_DBG_STE, SMMU_DBG_CD, /* Specific parts STE/CD */
> + SMMU_DBG_TT_1, SMMU_DBG_TT_2, /* Translation Stage 1/2 */
> + SMMU_DBG_IRQ, /* IRQ */
> +};
> +
> +#define DBG_BIT(bit) (1 << SMMU_DBG_##bit)
> +
> +#define IS_DBG_ENABLED(bit) (dbg_bits & (1 << SMMU_DBG_##bit))
> +
> +#define DBG_DEFAULT (DBG_BIT(PANIC) | DBG_BIT(CRIT) | DBG_BIT(IRQ))
> +#define DBG_EXTRA (DBG_BIT(STE) | DBG_BIT(CD) | DBG_BIT(TT_1))
> +#define DBG_VERBOSE1 DBG_BIT(DBG1)
> +#define DBG_VERBOSE2 (DBG_VERBOSE1 | DBG_BIT(DBG1))
> +#define DBG_VERBOSE3 (DBG_VERBOSE2 | DBG_BIT(DBG2))
> +#define DBG_VERBOSE4 (DBG_VERBOSE3 | DBG_BIT(INFO))
> +
> +#define SMMU_DPRINTF(lvl, fmt, ...) \
> + do { \
> + if (dbg_bits & DBG_BIT(lvl)) { \
> + qemu_log_mask(CPU_LOG_IOMMU, \
> + "(smmu)%s: " fmt , \
> + __func__, \
> + ## __VA_ARGS__); \
> + } \
> + } while (0)
> +
> +#else
> +#define IS_DBG_ENABLED(bit) false
> +#define SMMU_DPRINTF(lvl, fmt, ...)
> +
> +#endif /* SMMU_DEBUG */
> +
> +SMMUTransErr smmu_translate_64(SMMUTransCfg *cfg, uint32_t *pagesize,
> + uint32_t *perm, bool is_write);
> +
> +SMMUTransErr smmu_translate_32(SMMUTransCfg *cfg, uint32_t *pagesize,
> + uint32_t *perm, bool is_write);
> +
> +MemTxResult smmu_read_sysmem(hwaddr addr, void *buf, int len, bool secure);
> +void smmu_write_sysmem(hwaddr addr, void *buf, int len, bool secure);
> +
> +#endif /* HW_ARM_SMMU_COMMON */
> diff --git a/hw/arm/smmu-v3.c b/hw/arm/smmu-v3.c
> new file mode 100644
> index 0000000..7260468
> --- /dev/null
> +++ b/hw/arm/smmu-v3.c
> @@ -0,0 +1,1369 @@
> +/*
> + * Copyright (C) 2014-2016 Broadcom Corporation
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Author: Prem Mallappa <address@hidden>
> + *
> + */
> +
> +#include "qemu/osdep.h"
> +#include "hw/boards.h"
> +#include "sysemu/sysemu.h"
> +#include "hw/sysbus.h"
> +#include "hw/pci/pci.h"
> +#include "exec/address-spaces.h"
> +
> +#include "hw/arm/smmu.h"
> +#include "smmu-common.h"
> +#include "smmuv3-internal.h"
> +
> +#define SMMU_NREGS 0x200
> +#define PCI_BUS_MAX 256
> +#define PCI_DEVFN_MAX 256
> +
> +#ifdef ARM_SMMU_DEBUG
> +uint32_t dbg_bits = \
> + DBG_DEFAULT | \
> + DBG_VERBOSE3 | \
> + DBG_EXTRA | \
> + DBG_VERBOSE1;
> +#else
> +const uint32_t dbg_bits;
> +#endif
> +
> +typedef struct SMMUDevice SMMUDevice;
> +
> +struct SMMUDevice {
> + void *smmu;
> + PCIBus *bus;
> + int devfn;
> + MemoryRegion iommu;
> + AddressSpace as;
> +};
> +
> +typedef struct SMMUPciBus SMMUPciBus;
> +struct SMMUPciBus {
> + PCIBus *bus;
> + SMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc
> */
> +};
> +
> +typedef struct SMMUV3State SMMUV3State;
> +
> +struct SMMUV3State {
> + SMMUState smmu_state;
> +
> +#define SMMU_FEATURE_2LVL_STE (1 << 0)
> + /* Local cache of most-frequently used register */
> + uint32_t features;
> + uint16_t sid_size;
> + uint16_t sid_split;
> + uint64_t strtab_base;
> +
> + uint64_t regs[SMMU_NREGS];
> +
> + qemu_irq irq[4];
> +
> + SMMUQueue cmdq, evtq, priq;
> +
> + /* IOMMU Address space */
> + MemoryRegion iommu;
> + AddressSpace iommu_as;
> + /*
> + * Bus number is not populated in the beginning, hence we need
> + * a mechanism to retrieve the corresponding address space for each
> + * pci device.
> + */
> + GHashTable *smmu_as_by_busptr;
> +};
> +
> +#define SMMU_V3_DEV(obj) OBJECT_CHECK(SMMUV3State, (obj), TYPE_SMMU_V3_DEV)
> +
> +static void smmu_write64_reg(SMMUV3State *s, uint32_t addr, uint64_t val)
> +{
> + addr >>= 2;
> + s->regs[addr] = val & 0xFFFFFFFFULL;
> + s->regs[addr + 1] = val & ~0xFFFFFFFFULL;
> +}
> +
> +static void smmu_write_reg(SMMUV3State *s, uint32_t addr, uint64_t val)
> +{
> + s->regs[addr >> 2] = val;
> +}
> +
> +static inline uint32_t smmu_read_reg(SMMUV3State *s, uint32_t addr)
> +{
> + return s->regs[addr >> 2];
> +}
> +
> +static inline uint64_t smmu_read64_reg(SMMUV3State *s, uint32_t addr)
> +{
> + addr >>= 2;
> + return s->regs[addr] | (s->regs[addr + 1] << 32);
> +}
> +
> +#define smmu_read32_reg smmu_read_reg
> +#define smmu_write32_reg smmu_write_reg
> +
> +static inline int smmu_enabled(SMMUV3State *s)
> +{
> + return (smmu_read32_reg(s, SMMU_REG_CR0) & SMMU_CR0_SMMU_ENABLE) != 0;
> +}
> +
> +typedef enum {
> + CMD_Q_EMPTY,
> + CMD_Q_FULL,
> + CMD_Q_INUSE,
> +} SMMUQStatus;
> +
> +static inline SMMUQStatus
> +__smmu_queue_status(SMMUV3State *s, SMMUQueue *q)
> +{
> + uint32_t prod = Q_IDX(q, q->prod), cons = Q_IDX(q, q->cons);
> + if ((prod == cons) && (q->wrap.prod != q->wrap.cons)) {
> + return CMD_Q_FULL;
> + } else if ((prod == cons) && (q->wrap.prod == q->wrap.cons)) {
> + return CMD_Q_EMPTY;
> + }
> + return CMD_Q_INUSE;
> +}
> +#define smmu_is_q_full(s, q) (__smmu_queue_status(s, q) == CMD_Q_FULL)
> +#define smmu_is_q_empty(s, q) (__smmu_queue_status(s, q) == CMD_Q_EMPTY)
> +
> +static int __smmu_q_enabled(SMMUV3State *s, uint32_t q)
> +{
> + return smmu_read32_reg(s, SMMU_REG_CR0) & q;
> +}
> +#define smmu_cmd_q_enabled(s) __smmu_q_enabled(s, SMMU_CR0_CMDQ_ENABLE)
> +#define smmu_evt_q_enabled(s) __smmu_q_enabled(s, SMMU_CR0_EVTQ_ENABLE)
> +
> +static inline int __smmu_irq_enabled(SMMUV3State *s, uint32_t q)
> +{
> + return smmu_read64_reg(s, SMMU_REG_IRQ_CTRL) & q;
> +}
> +#define smmu_evt_irq_enabled(s) \
> + __smmu_irq_enabled(s, SMMU_IRQ_CTRL_EVENT_EN)
> +#define smmu_gerror_irq_enabled(s) \
> + __smmu_irq_enabled(s, SMMU_IRQ_CTRL_GERROR_EN)
> +#define smmu_pri_irq_enabled(s) \
> + __smmu_irq_enabled(s, SMMU_IRQ_CTRL_PRI_EN)
Please drop the __ prefix on functions. _ prefixed functions are reserved and
we usually avoid them.
I don't think smmu_evt_irq_enabled() is very useful,
smmu_irq_enabled(s, SMMU_IRQ_CTRL_EVENT_EN) is readable enough.
> +
> +
> +static inline int is_cd_valid(SMMUV3State *s, Ste *ste, Cd *cd)
> +{
> + return CD_VALID(cd);
> +}
> +
> +static inline int is_ste_valid(SMMUV3State *s, Ste *ste)
> +{
> + return STE_VALID(ste);
> +}
> +
> +static inline int is_ste_bypass(SMMUV3State *s, Ste *ste)
> +{
> + return STE_CONFIG(ste) == STE_CONFIG_BYPASS;
> +}
> +
> +static inline uint16_t smmu_get_sid(SMMUDevice *sdev)
> +{
> + return ((pci_bus_num(sdev->bus) & 0xff) << 8) | sdev->devfn;
> +}
> +
> +static void smmu_coresight_regs_init(SMMUV3State *sv3)
> +{
> + SMMUState *s = SMMU_SYS_DEV(sv3);
> + int i;
> +
> + /* Primecell ID registers */
> + s->cid[0] = 0x0D;
> + s->cid[1] = 0xF0;
> + s->cid[2] = 0x05;
> + s->cid[3] = 0xB1;
> +
> + for (i = 0; i < ARRAY_SIZE(s->pid); i++) {
> + s->pid[i] = 0x1;
> + }
> +}
> +
> +/*
> + * smmu_irq_update:
> + * update corresponding register,
> + * return > 0 when IRQ is supposed to be rased
> + * Spec req:
> + * - Raise irq only when it not active already,
> + * blindly toggling bits may actually clear the error
> + */
> +static int
> +smmu_irq_update(SMMUV3State *s, int irq, uint64_t data)
> +{
> + uint32_t error = 0;
> +
> + switch (irq) {
> + case SMMU_IRQ_EVTQ:
> + if (smmu_evt_irq_enabled(s)) {
> + error = SMMU_GERROR_EVENTQ;
> + }
> + break;
> + case SMMU_IRQ_CMD_SYNC:
> + if (smmu_gerror_irq_enabled(s)) {
> + uint32_t err_type = (uint32_t)data;
> + if (err_type) {
> + uint32_t regval = smmu_read32_reg(s, SMMU_REG_CMDQ_CONS);
> + smmu_write32_reg(s, SMMU_REG_CMDQ_CONS,
> + regval | err_type <<
> SMMU_CMD_CONS_ERR_SHIFT);
> + }
> + error = SMMU_GERROR_CMDQ;
> + }
> + break;
> + case SMMU_IRQ_PRIQ:
> + if (smmu_pri_irq_enabled(s)) {
> + error = SMMU_GERROR_PRIQ;
> + }
> + break;
> + }
> + SMMU_DPRINTF(IRQ, "<< error:%x\n", error);
> +
> + if (error && smmu_gerror_irq_enabled(s)) {
> + uint32_t gerror = smmu_read32_reg(s, SMMU_REG_GERROR);
> + uint32_t gerrorn = smmu_read32_reg(s, SMMU_REG_GERRORN);
> + SMMU_DPRINTF(IRQ, "<<<< error:%x gerror:%x gerrorn:%x\n",
> + error, gerror, gerrorn);
> + if (!((gerror ^ gerrorn) & error)) {
> + smmu_write32_reg(s, SMMU_REG_GERROR, gerror ^ error);
> + }
> + }
> +
> + return error;
> +}
> +
> +static void smmu_irq_raise(SMMUV3State *s, int irq, uint64_t data)
> +{
> + SMMU_DPRINTF(IRQ, "irq:%d\n", irq);
> + if (smmu_irq_update(s, irq, data)) {
> + qemu_irq_raise(s->irq[irq]);
> + }
> +}
> +
> +static MemTxResult smmu_q_read(SMMUV3State *s, SMMUQueue *q, void *data)
> +{
> + uint64_t addr = Q_ENTRY(q, Q_IDX(q, q->cons));
> +
> + q->cons++;
> + if (q->cons == q->entries) {
> + q->cons = 0;
> + q->wrap.cons++; /* this will toggle */
> + }
> +
> + return smmu_read_sysmem(addr, data, q->ent_size, false);
> +}
> +
> +static MemTxResult smmu_q_write(SMMUV3State *s, SMMUQueue *q, void *data)
> +{
> + uint64_t addr = Q_ENTRY(q, Q_IDX(q, q->prod));
> +
> + if (q->prod == q->entries) {
> + q->prod = 0;
> + q->wrap.prod++; /* this will toggle */
> + }
> +
> + q->prod++;
> +
> + smmu_write_sysmem(addr, data, q->ent_size, false);
> +
> + return MEMTX_OK;
> +}
> +
> +static MemTxResult smmu_read_cmdq(SMMUV3State *s, Cmd *cmd)
> +{
> + SMMUQueue *q = &s->cmdq;
> + MemTxResult ret = smmu_q_read(s, q, cmd);
> + uint32_t val = 0;
> +
> + val |= (q->wrap.cons << q->shift) | q->cons;
> +
> + /* Update consumer pointer */
> + smmu_write32_reg(s, SMMU_REG_CMDQ_CONS, val);
> +
> + return ret;
> +}
> +
> +#define SMMU_CMDQ_ERR(s) ((smmu_read32_reg(s, SMMU_REG_GERROR) ^ \
> + smmu_read32_reg(s, SMMU_REG_GERRORN)) & \
> + SMMU_GERROR_CMDQ)
> +
> +static int smmu_cmdq_consume(SMMUV3State *s)
> +{
> + uint32_t error = SMMU_CMD_ERR_NONE;
> +
> + SMMU_DPRINTF(CMDQ, "CMDQ_ERR: %d\n", SMMU_CMDQ_ERR(s));
> +
> + if (!smmu_cmd_q_enabled(s))
> + goto out_while;
> +
> + while (!SMMU_CMDQ_ERR(s) && !smmu_is_q_empty(s, &s->cmdq)) {
> + Cmd cmd;
> +#ifdef ARM_SMMU_DEBUG
> + SMMUQueue *q = &s->cmdq;
> +#endif
> + if (smmu_read_cmdq(s, &cmd) != MEMTX_OK) {
> + error = SMMU_CMD_ERR_ABORT;
> + goto out_while;
> + }
> +
> + SMMU_DPRINTF(DBG2, "CMDQ base: %lx cons:%d prod:%d val:%x wrap:%d\n",
> + q->base, q->cons, q->prod, cmd.word[0], q->wrap.cons);
> +
> + switch (CMD_TYPE(&cmd)) {
> + case SMMU_CMD_CFGI_STE:
> + case SMMU_CMD_CFGI_STE_RANGE:
> + break;
> + case SMMU_CMD_TLBI_NSNH_ALL: /* TLB not implemented */
> + case SMMU_CMD_TLBI_EL2_ALL: /* Fallthrough */
> + case SMMU_CMD_TLBI_EL3_ALL:
> + case SMMU_CMD_TLBI_NH_ALL:
> + case SMMU_CMD_TLBI_S2_IPA:
> + break;
> + case SMMU_CMD_SYNC: /* Fallthrough */
> + if (CMD_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
> + smmu_irq_raise(s, SMMU_IRQ_CMD_SYNC, SMMU_CMD_ERR_NONE);
> + }
> + break;
> + case SMMU_CMD_PREFETCH_CONFIG:
> + break;
> + case SMMU_CMD_TLBI_NH_ASID:
> + case SMMU_CMD_TLBI_NH_VA: /* too many of this is sent */
> + break;
> +
> + default:
> + error = SMMU_CMD_ERR_ILLEGAL;
> + SMMU_DPRINTF(CRIT, "Unknown Command type: %x, ignoring\n",
> + CMD_TYPE(&cmd));
> + if (IS_DBG_ENABLED(CD)) {
> + dump_cmd(&cmd);
> + }
> + break;
> + }
> +
> + if (error != SMMU_CMD_ERR_NONE) {
> + SMMU_DPRINTF(INFO, "CMD Error\n");
> + goto out_while;
> + }
> + }
> +
> +out_while:
> + if (error) {
> + smmu_irq_raise(s, SMMU_IRQ_GERROR, error);
> + }
> +
> + SMMU_DPRINTF(CMDQ, "prod_wrap:%d, prod:%x cons_wrap:%d cons:%x\n",
> + s->cmdq.wrap.prod, s->cmdq.prod,
> + s->cmdq.wrap.cons, s->cmdq.cons);
> +
> + return 0;
> +}
> +
> +static inline bool
> +smmu_is_irq_pending(SMMUV3State *s, int irq)
> +{
> + return smmu_read32_reg(s, SMMU_REG_GERROR) ^
> + smmu_read32_reg(s, SMMU_REG_GERRORN);
> +}
> +
> +/*
> + * GERROR is updated when rasing an interrupt, GERRORN will be updated
> + * by s/w and should match GERROR before normal operation resumes.
> + */
> +static void smmu_irq_clear(SMMUV3State *s, uint64_t gerrorn)
> +{
> + int irq = SMMU_IRQ_GERROR;
> + uint32_t toggled;
> +
> + toggled = smmu_read32_reg(s, SMMU_REG_GERRORN) ^ gerrorn;
> +
> + while (toggled) {
> + irq = ctz32(toggled);
> +
> + qemu_irq_lower(s->irq[irq]);
> +
> + toggled &= toggled - 1;
> + }
> +}
> +
> +static int smmu_evtq_update(SMMUV3State *s)
> +{
> + if (!smmu_enabled(s)) {
> + return 0;
> + }
> +
> + if (!smmu_is_q_empty(s, &s->evtq)) {
> + if (smmu_evt_irq_enabled(s))
> + smmu_irq_raise(s, SMMU_IRQ_EVTQ, 0);
> + }
> +
> + if (smmu_is_q_empty(s, &s->evtq)) {
> + smmu_irq_clear(s, SMMU_GERROR_EVENTQ);
> + }
> +
> + return 1;
> +}
> +
> +static void smmu_create_event(SMMUV3State *s, hwaddr iova,
> + uint32_t sid, bool is_write, int error);
> +
> +static void smmu_update(SMMUV3State *s)
> +{
> + int error = 0;
> +
> + /* SMMU starts processing commands even when not enabled */
> + if (!smmu_enabled(s)) {
> + goto check_cmdq;
> + }
> +
> + /* EVENT Q updates takes more priority */
> + if ((smmu_evt_q_enabled(s)) && !smmu_is_q_empty(s, &s->evtq)) {
> + SMMU_DPRINTF(CRIT, "q empty:%d prod:%d cons:%d p.wrap:%d
> p.cons:%d\n",
> + smmu_is_q_empty(s, &s->evtq), s->evtq.prod,
> + s->evtq.cons, s->evtq.wrap.prod, s->evtq.wrap.cons);
> + error = smmu_evtq_update(s);
> + }
> +
> + if (error) {
> + /* TODO: May be in future we create proper event queue entry */
> + /* an error condition is not a recoverable event, like other devices
> */
> + SMMU_DPRINTF(CRIT, "An unfavourable condition\n");
> + smmu_create_event(s, 0, 0, 0, error);
> + }
> +
> +check_cmdq:
> + if (smmu_cmd_q_enabled(s) && !SMMU_CMDQ_ERR(s)) {
> + smmu_cmdq_consume(s);
> + } else {
> + SMMU_DPRINTF(INFO, "cmdq not enabled or error :%x\n",
> SMMU_CMDQ_ERR(s));
> + }
> +
> +}
> +
> +static void smmu_update_irq(SMMUV3State *s, uint64_t addr, uint64_t val)
> +{
> + smmu_irq_clear(s, val);
> +
> + smmu_write32_reg(s, SMMU_REG_GERRORN, val);
> +
> + SMMU_DPRINTF(IRQ, "irq pend: %d gerror:%x gerrorn:%x\n",
> + smmu_is_irq_pending(s, 0),
> + smmu_read32_reg(s, SMMU_REG_GERROR),
> + smmu_read32_reg(s, SMMU_REG_GERRORN));
> +
> + /* Clear only when no more left */
> + if (!smmu_is_irq_pending(s, 0)) {
> + qemu_irq_lower(s->irq[0]);
> + }
> +}
> +
> +#define SMMU_ID_REG_INIT(s, reg, d) do { \
> + s->regs[reg >> 2] = d; \
> + } while (0)
> +
> +static void smmuv3_id_reg_init(SMMUV3State *s)
> +{
> + uint32_t data =
> + 1 << 27 | /* 2 Level stream id */
> + 1 << 26 | /* Term Model */
> + 1 << 24 | /* Stall model not supported */
> + 1 << 18 | /* VMID 16 bits */
> + 1 << 16 | /* PRI */
> + 1 << 12 | /* ASID 16 bits */
> + 1 << 10 | /* ATS */
> + 1 << 9 | /* HYP */
> + 2 << 6 | /* HTTU */
> + 1 << 4 | /* COHACC */
> + 2 << 2 | /* TTF=Arch64 */
> + 1 << 1 | /* Stage 1 */
> + 1 << 0; /* Stage 2 */
> +
> + SMMU_ID_REG_INIT(s, SMMU_REG_IDR0, data);
> +
> +#define SMMU_SID_SIZE 16
> +#define SMMU_QUEUE_SIZE_LOG2 19
> + data =
> + 1 << 27 | /* Attr Types override */
> + SMMU_QUEUE_SIZE_LOG2 << 21 | /* Cmd Q size */
> + SMMU_QUEUE_SIZE_LOG2 << 16 | /* Event Q size */
> + SMMU_QUEUE_SIZE_LOG2 << 11 | /* PRI Q size */
> + 0 << 6 | /* SSID not supported */
> + SMMU_SID_SIZE << 0 ; /* SID size */
> +
> + SMMU_ID_REG_INIT(s, SMMU_REG_IDR1, data);
> +
> + data =
> + 1 << 6 | /* Granule 64K */
> + 1 << 4 | /* Granule 4K */
> + 4 << 0; /* OAS = 44 bits */
> +
> + SMMU_ID_REG_INIT(s, SMMU_REG_IDR5, data);
> +
> +}
> +
> +static void smmuv3_init(SMMUV3State *s)
> +{
> + smmu_coresight_regs_init(s);
> +
> + smmuv3_id_reg_init(s); /* Update ID regs alone */
> +
> + s->sid_size = SMMU_SID_SIZE;
> +
> + s->cmdq.entries = (smmu_read32_reg(s, SMMU_REG_IDR1) >> 21) & 0x1f;
> + s->cmdq.ent_size = sizeof(Cmd);
> + s->evtq.entries = (smmu_read32_reg(s, SMMU_REG_IDR1) >> 16) & 0x1f;
> + s->evtq.ent_size = sizeof(Evt);
> +}
> +
> +/*
> + * All SMMU data structures are little endian, and are aligned to 8 bytes
> + * L1STE/STE/L1CD/CD, Queue entries in CMDQ/EVTQ/PRIQ
> + */
> +static inline int smmu_get_ste(SMMUV3State *s, hwaddr addr, Ste *buf)
> +{
> + return dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
> +}
> +
> +/*
> + * For now we only support CD with a single entry, 'ssid' is used to identify
> + * otherwise
> + */
> +static inline int smmu_get_cd(SMMUV3State *s, Ste *ste, uint32_t ssid, Cd
> *buf)
> +{
> + hwaddr addr = STE_CTXPTR(ste);
> +
> + if (STE_S1CDMAX(ste) != 0) {
> + SMMU_DPRINTF(CRIT, "Multilevel Ctx Descriptor not supported yet\n");
> + }
> +
> + return dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
> +}
> +
> +static int
> +is_ste_consistent(SMMUV3State *s, Ste *ste)
> +{
> + uint32_t _config = STE_CONFIG(ste) & 0x7,
> + idr0 = smmu_read32_reg(s, SMMU_REG_IDR0),
> + idr5 = smmu_read32_reg(s, SMMU_REG_IDR5);
> +
> + uint32_t httu = extract32(idr0, 6, 2);
> + bool config[] = {_config & 0x1,
> + _config & 0x2,
> + _config & 0x3};
> + bool granule_supported;
> +
> + bool s1p = idr0 & SMMU_IDR0_S1P,
> + s2p = idr0 & SMMU_IDR0_S2P,
> + hyp = idr0 & SMMU_IDR0_HYP,
> + cd2l = idr0 & SMMU_IDR0_CD2L,
> + idr0_vmid = idr0 & SMMU_IDR0_VMID16,
> + ats = idr0 & SMMU_IDR0_ATS,
> + ttf0 = (idr0 >> 2) & 0x1,
> + ttf1 = (idr0 >> 3) & 0x1;
> +
> + int ssidsz = (smmu_read32_reg(s, SMMU_REG_IDR1) >> 6) & 0x1f;
> +
> + uint32_t ste_vmid = STE_S2VMID(ste),
> + ste_eats = STE_EATS(ste),
> + ste_s2s = STE_S2S(ste),
> + ste_s1fmt = STE_S1FMT(ste),
> + aa64 = STE_S2AA64(ste),
> + ste_s1cdmax = STE_S1CDMAX(ste);
> +
> + uint8_t ste_strw = STE_STRW(ste);
> + uint64_t oas, max_pa;
> + bool strw_ign;
> + bool addr_out_of_range;
> +
> + if (!STE_VALID(ste)) {
> + SMMU_DPRINTF(STE, "STE NOT valid\n");
> + return false;
> + }
> +
> + switch (STE_S2TG(ste)) {
> + case 1:
> + granule_supported = 0x4; break;
> + case 2:
> + granule_supported = 0x2; break;
> + case 0:
> + granule_supported = 0x1; break;
> + }
> + granule_supported &= (idr5 >> 4);
> +
> + if (!config[2]) {
> + if ((!s1p && config[0]) ||
> + (!s2p && config[1]) ||
> + (s2p && config[1])) {
> + SMMU_DPRINTF(STE, "STE inconsistant, S2P mismatch\n");
> + return false;
> + }
> + if (!ssidsz && ste_s1cdmax && config[0] && !cd2l &&
> + (ste_s1fmt == 1 || ste_s1fmt == 2)) {
> + SMMU_DPRINTF(STE, "STE inconsistant, CD mismatch\n");
> + return false;
> + }
> + if (ats && ((_config & 0x3) == 0) &&
> + ((ste_eats == 2 && (_config != 0x7 || ste_s2s)) ||
> + (ste_eats == 1 && !ste_s2s))) {
> + SMMU_DPRINTF(STE, "STE inconsistant, EATS/S2S mismatch\n");
> + return false;
> + }
> + if (config[0] && (ssidsz && (ste_s1cdmax > ssidsz))) {
> + SMMU_DPRINTF(STE, "STE inconsistant, SSID out of range\n");
> + return false;
> + }
> + }
> +
> + oas = MIN(STE_S2PS(ste), idr5 & 0x7);
> +
> + if (oas == 3) {
> + max_pa = deposit64(0, 0, 42, ~0UL);
> + } else {
> + max_pa = deposit64(0, 0, (32 + (oas * 4)), ~0UL);
> + }
> +
> + strw_ign = (!s1p || !hyp || (_config == 4));
> +
> + addr_out_of_range = (int64_t)(max_pa - STE_S2TTB(ste)) < 0;
> +
> + if (config[1] && (
> + (aa64 && !granule_supported) ||
> + (!aa64 && !ttf0) ||
> + (aa64 && !ttf1) ||
> + ((STE_S2HA(ste) || STE_S2HD(ste)) && !aa64) ||
> + ((STE_S2HA(ste) || STE_S2HD(ste)) && !httu) ||
> + (STE_S2HD(ste) && (httu == 1)) ||
> + addr_out_of_range)) {
> + SMMU_DPRINTF(STE, "STE inconsistant\n");
> + SMMU_DPRINTF(STE, "config[1]:%d gran:%d addr:%d\n"
> + " aa64:%d ttf0:%d ttf1:%d s2ha:%d s2hd:%d httu:%d\n",
> + config[1], granule_supported,
> + addr_out_of_range, aa64, ttf0, ttf1, STE_S2HA(ste),
> + STE_S2HD(ste), httu);
> + SMMU_DPRINTF(STE, "maxpa:%lx s2ttb:%lx\n", max_pa, STE_S2TTB(ste));
> + return false;
> + }
> + if (s2p && (config[0] == 0 && config[1]) &&
> + (strw_ign || !ste_strw) && !idr0_vmid && !(ste_vmid >> 8)) {
> + SMMU_DPRINTF(STE, "STE inconsistant, VMID out of range\n");
> + return false;
> + }
> +
> + return true;
> +}
> +
> +static int tg2granule(int bits, bool tg1)
> +{
> + switch (bits) {
> + case 1:
> + return tg1 ? 14 : 16;
> + case 2:
> + return tg1 ? 14 : 12;
> + case 3:
> + return tg1 ? 16 : 12;
> + default:
> + return 12;
> + }
> +}
> +
> +static inline int oas2bits(int oas)
> +{
> + switch (oas) {
> + case 2:
> + return 40;
> + case 3:
> + return 42;
> + case 4:
> + return 44;
> + case 5:
> + default: return 48;
> + }
> +}
> +
> +#define STM2U64(stm) ({ \
> + uint64_t hi, lo; \
> + hi = (stm)->word[1]; \
> + lo = (stm)->word[0] & ~(uint64_t)0x1f; \
> + hi << 32 | lo; \
> + })
> +
> +#define STMSPAN(stm) (1 << (extract32((stm)->word[0], 0, 4) - 1))
> +
> +static int smmu_find_ste(SMMUV3State *s, uint16_t sid, Ste *ste)
> +{
> + hwaddr addr;
> +
> + SMMU_DPRINTF(STE, "SID:%x\n", sid);
> + /* Check SID range */
> + if (sid > (1 << s->sid_size)) {
> + return SMMU_EVT_C_BAD_SID;
> + }
> + SMMU_DPRINTF(STE, "features:%x\n", s->features);
> + if (s->features & SMMU_FEATURE_2LVL_STE) {
> + int span;
> + hwaddr stm_addr;
> + STEDesc stm;
> + int l1_ste_offset, l2_ste_offset;
> + SMMU_DPRINTF(STE, "no. ste: %x\n", s->sid_split);
> +
> + l1_ste_offset = sid >> s->sid_split;
> + l2_ste_offset = sid & ((1 << s->sid_split) - 1);
> + SMMU_DPRINTF(STE, "l1_off:%x, l2_off:%x\n", l1_ste_offset,
> + l2_ste_offset);
> + stm_addr = (hwaddr)(s->strtab_base + l1_ste_offset * sizeof(stm));
> + smmu_read_sysmem(stm_addr, &stm, sizeof(stm), false);
> +
> + SMMU_DPRINTF(STE, "strtab_base:%lx stm_addr:%lx\n"
> + "l1_ste_offset:%x l1(64):%#016lx\n",
> + s->strtab_base, stm_addr, l1_ste_offset,
> + STM2U64(&stm));
> +
> + span = STMSPAN(&stm);
> + SMMU_DPRINTF(STE, "l2_ste_offset:%x ~ span:%d\n", l2_ste_offset,
> span);
> + if (l2_ste_offset > span) {
> + SMMU_DPRINTF(CRIT, "l2_ste_offset > span\n");
> + return SMMU_EVT_C_BAD_STE;
> + }
> + addr = STM2U64(&stm) + l2_ste_offset * sizeof(*ste);
> + } else {
> + addr = s->strtab_base + sid * sizeof(*ste);
> + }
> + SMMU_DPRINTF(STE, "ste:%lx\n", addr);
> + if (smmu_get_ste(s, addr, ste)) {
> + SMMU_DPRINTF(CRIT, "Unable to Fetch STE\n");
> + return SMMU_EVT_F_UUT;
> + }
> +
> + return 0;
> +}
> +
> +static void smmu_cfg_populate_s2(SMMUTransCfg *cfg, Ste *ste)
> +{ /* stage 2 cfg */
> + bool s2a64 = STE_S2AA64(ste);
> + const int stage = 2;
> +
> + cfg->granule[stage] = STE_S2TG(ste);
> + cfg->tsz[stage] = STE_S2T0SZ(ste);
> + cfg->ttbr[stage] = STE_S2TTB(ste);
> + cfg->oas[stage] = oas2bits(STE_S2PS(ste));
> +
> + if (s2a64) {
> + cfg->tsz[stage] = MIN(cfg->tsz[stage], 39);
> + cfg->tsz[stage] = MAX(cfg->tsz[stage], 16);
> + }
> + cfg->va_size[stage] = STE_S2AA64(ste) ? 64 : 32;
> + cfg->granule_sz[stage] = tg2granule(cfg->granule[stage], 0) - 3;
> +}
> +
> +static void smmu_cfg_populate_s1(SMMUTransCfg *cfg, Cd *cd)
> +{ /* stage 1 cfg */
> + bool s1a64 = CD_AARCH64(cd);
> + const int stage = 1;
> +
> + cfg->granule[stage] = (CD_EPD0(cd)) ? CD_TG1(cd) : CD_TG0(cd);
> + cfg->tsz[stage] = (CD_EPD0(cd)) ? CD_T1SZ(cd) : CD_T0SZ(cd);
> + cfg->ttbr[stage] = (CD_EPD0(cd)) ? CD_TTB1(cd) : CD_TTB0(cd);
> + cfg->oas[stage] = oas2bits(CD_IPS(cd));
> +
> + if (s1a64) {
> + cfg->tsz[stage] = MIN(cfg->tsz[stage], 39);
> + cfg->tsz[stage] = MAX(cfg->tsz[stage], 16);
> + }
> + cfg->va_size[stage] = CD_AARCH64(cd) ? 64 : 32;
> + cfg->granule_sz[stage] = tg2granule(cfg->granule[stage], CD_EPD0(cd)) -
> 3;
> +}
> +
> +static SMMUEvtErr smmu_walk_pgtable(SMMUV3State *s, Ste *ste, Cd *cd,
> + IOMMUTLBEntry *tlbe, bool is_write)
> +{
> + SMMUState *sys = SMMU_SYS_DEV(s);
> + SMMUBaseClass *sbc = SMMU_DEVICE_GET_CLASS(sys);
> + SMMUTransCfg _cfg = {};
> + SMMUTransCfg *cfg = &_cfg;
> + SMMUEvtErr retval = 0;
> + uint32_t ste_cfg = STE_CONFIG(ste);
> + uint32_t page_size = 0, perm = 0;
> + hwaddr pa; /* Input address, output address */
> + int stage = 0;
> +
> + SMMU_DPRINTF(DBG1, "ste_cfg :%x\n", ste_cfg);
> + /* Both Bypass, we dont need to do anything */
> + if (is_ste_bypass(s, ste)) {
> + return 0;
> + }
> +
> + SMMU_DPRINTF(TT_1, "Input addr: %lx ste_config:%d\n",
> + tlbe->iova, ste_cfg);
> +
> + if (ste_cfg & STE_CONFIG_S1TR) {
> + stage = cfg->stage = 1;
> + smmu_cfg_populate_s1(cfg, cd);
> +
> + cfg->oas[stage] = MIN(oas2bits(smmu_read32_reg(s, SMMU_REG_IDR5) &
> 0xf),
> + cfg->oas[stage]);
> + /* fix ttbr - make top bits zero*/
> + cfg->ttbr[stage] = extract64(cfg->ttbr[stage], 0, cfg->oas[stage]);
> + cfg->s2_needed = (STE_CONFIG(ste) == STE_CONFIG_S1TR_S2TR) ? 1 : 0;
> +
> + SMMU_DPRINTF(DBG1, "S1 populated\n ");
> + }
> +
> + if (ste_cfg & STE_CONFIG_S2TR) {
> + stage = 2;
> + if (cfg->stage) { /* S1+S2 */
> + cfg->s2_needed = true;
> + } else /* Stage2 only */
> + cfg->stage = stage;
> +
> + /* Stage2 only configuratoin */
> + smmu_cfg_populate_s2(cfg, ste);
> +
> + cfg->oas[stage] = MIN(oas2bits(smmu_read32_reg(s, SMMU_REG_IDR5) &
> 0xf),
> + cfg->oas[stage]);
> + /* fix ttbr - make top bits zero*/
> + cfg->ttbr[stage] = extract64(cfg->ttbr[stage], 0, cfg->oas[stage]);
> +
> + SMMU_DPRINTF(DBG1, "S2 populated\n ");
> + }
> +
> + cfg->va = tlbe->iova;
> +
> + if ((cfg->stage == 1 && CD_AARCH64(cd)) ||
> + STE_S2AA64(ste)) {
> + SMMU_DPRINTF(DBG1, "Translate 64\n");
> + retval = sbc->translate_64(cfg, &page_size, &perm,
> + is_write);
> + } else {
> + SMMU_DPRINTF(DBG1, "Translate 32\n");
> + retval = sbc->translate_32(cfg, &page_size, &perm, is_write);
> + }
> +
> + if (retval != 0) {
> + SMMU_DPRINTF(CRIT, "FAILED Stage1 translation\n");
> + goto exit;
> + }
> + pa = cfg->pa;
> +
> + SMMU_DPRINTF(TT_1, "DONE: o/p addr:%lx mask:%x is_write:%d\n ",
> + pa, page_size - 1, is_write);
> + tlbe->translated_addr = pa;
> + tlbe->addr_mask = page_size - 1;
> + tlbe->perm = perm;
> +
> +exit:
> + dump_smmutranscfg(cfg);
> + return retval;
> +}
> +
> +static MemTxResult smmu_write_evtq(SMMUV3State *s, Evt *evt)
> +{
> + SMMUQueue *q = &s->evtq;
> + int ret = smmu_q_write(s, q, evt);
> + uint32_t val = 0;
> +
> + val |= (q->wrap.prod << q->shift) | q->prod;
> +
> + smmu_write32_reg(s, SMMU_REG_EVTQ_PROD, val);
> +
> + return ret;
> +}
> +
> +/*
> + * Events created on the EventQ
> + */
> +static void smmu_create_event(SMMUV3State *s, hwaddr iova,
> + uint32_t sid, bool is_write, int error)
> +{
> + SMMUQueue *q = &s->evtq;
> + uint64_t head;
> + Evt evt;
> +
> + if (!smmu_evt_q_enabled(s)) {
> + return;
> + }
> +
> + EVT_SET_TYPE(&evt, error);
> + EVT_SET_SID(&evt, sid);
> +
> + switch (error) {
> + case SMMU_EVT_F_UUT:
> + case SMMU_EVT_C_BAD_STE:
> + break;
> + case SMMU_EVT_C_BAD_CD:
> + case SMMU_EVT_F_CD_FETCH:
> + break;
> + case SMMU_EVT_F_TRANS_FORBIDDEN:
> + case SMMU_EVT_F_WALK_EXT_ABRT:
> + EVT_SET_INPUT_ADDR(&evt, iova);
> + default:
> + break;
> + }
> +
> + smmu_write_evtq(s, &evt);
> +
> + head = Q_IDX(q, q->prod);
> +
> + if (smmu_is_q_full(s, &s->evtq)) {
> + head = q->prod ^ (1 << 31); /* Set overflow */
> + }
> +
> + smmu_write32_reg(s, SMMU_REG_EVTQ_PROD, head);
> +
> + smmu_irq_raise(s, SMMU_IRQ_EVTQ, (uint64_t)&evt);
> +}
> +
> +/*
> + * TR - Translation Request
> + * TT - Translated Tansaction
> + * OT - Other Transaction
> + */
> +static IOMMUTLBEntry
> +smmuv3_translate(MemoryRegion *mr, hwaddr addr, bool is_write)
> +{
> + SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
> + SMMUV3State *s = sdev->smmu;
> + uint16_t sid = 0, config;
> + Ste ste;
> + Cd cd;
> + SMMUEvtErr error = 0;
> +
> + IOMMUTLBEntry ret = {
> + .target_as = &address_space_memory,
> + .iova = addr,
> + .translated_addr = addr,
> + .addr_mask = ~(hwaddr)0,
> + .perm = IOMMU_NONE,
> + };
> +
> + /* SMMU Bypass, We allow traffic through if SMMU is disabled */
> + if (!smmu_enabled(s)) {
> + SMMU_DPRINTF(CRIT, "SMMU Not enabled.. bypassing addr:%lx\n", addr);
> + goto bypass;
> + }
> +
> + sid = smmu_get_sid(sdev);
> + SMMU_DPRINTF(TT_1, "SID:%x bus:%d ste_base:%lx\n",
> + sid, pci_bus_num(sdev->bus), s->strtab_base);
> +
> + /* Fetch & Check STE */
> + error = smmu_find_ste(s, sid, &ste);
> + if (error) {
> + goto error_out; /* F_STE_FETCH or F_CFG_CONFLICT */
> + }
> +
> + if (IS_DBG_ENABLED(STE)) {
> + dump_ste(&ste);
> + }
> +
> + if (is_ste_valid(s, &ste) && is_ste_bypass(s, &ste)) {
> + goto bypass;
> + }
> +
> + SMMU_DPRINTF(STE, "STE is not bypass\n");
> + if (!is_ste_consistent(s, &ste)) {
> + error = SMMU_EVT_C_BAD_STE;
> + goto error_out;
> + }
> + SMMU_DPRINTF(INFO, "Valid STE Found\n");
> +
> + /* Stream Bypass */
> + config = STE_CONFIG(&ste) & 0x3;
> +
> + if (config & (STE_CONFIG_S1TR)) {
> + smmu_get_cd(s, &ste, 0, &cd); /* We dont have SSID yet, so 0 */
> + SMMU_DPRINTF(CRIT, "GET_CD CTXPTR:%p\n", (void *)STE_CTXPTR(&ste));
> + if (1 || IS_DBG_ENABLED(CD)) {
> + dump_cd(&cd);
> + }
> +
> + if (!is_cd_valid(s, &ste, &cd)) {
> + error = SMMU_EVT_C_BAD_CD;
> + goto error_out;
> + }
> + }
> +
> + /* Walk Stage1, if S2 is enabled, S2 walked for Every access on S1 */
> + error = smmu_walk_pgtable(s, &ste, &cd, &ret, is_write);
> +
> + SMMU_DPRINTF(INFO, "DONE walking tables \n");
> +
> +error_out:
> + if (error) { /* Post the Error using Event Q */
> + SMMU_DPRINTF(CRIT, "Translation Error: %x\n", error);
> + smmu_create_event(s, ret.iova, sid, is_write, error);
> + goto out;
> + }
> +
> +bypass:
> + ret.perm = is_write ? IOMMU_RW : IOMMU_RO;
> +
> +out:
> + return ret;
> +}
> +
> +static const MemoryRegionIOMMUOps smmu_iommu_ops = {
> + .translate = smmuv3_translate,
> +};
> +
> +static AddressSpace *smmu_init_pci_iommu(PCIBus *bus, void *opaque, int
> devfn)
> +{
> + SMMUV3State *s = opaque;
> + SMMUState *sys = SMMU_SYS_DEV(s);
> + uintptr_t key = (uintptr_t)bus;
> + SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_as_by_busptr, &key);
> + SMMUDevice *sdev;
> +
> + if (!sbus) {
> + sbus = g_malloc0(sizeof(SMMUPciBus) +
> + sizeof(SMMUDevice) * PCI_DEVFN_MAX);
> + sbus->bus = bus;
> + g_hash_table_insert(s->smmu_as_by_busptr, &key, sbus);
> + }
> +
> + sdev = sbus->pbdev[devfn];
> + if (!sdev) {
> + sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(SMMUDevice));
> +
> + sdev->smmu = s;
> + sdev->bus = bus;
> + sdev->devfn = devfn;
> +
> + memory_region_init_iommu(&sdev->iommu, OBJECT(sys),
> + &smmu_iommu_ops, TYPE_SMMU_V3_DEV,
> UINT64_MAX);
> + address_space_init(&sdev->as, &sdev->iommu, TYPE_SMMU_V3_DEV);
> + }
> +
> + return &sdev->as;
> +}
> +
> +static inline void smmu_update_base_reg(SMMUV3State *s, uint64_t *base,
> uint64_t val)
> +{
> + *base = val & ~(SMMU_BASE_RA | 0x3fULL);
> +}
> +
> +static void smmu_update_qreg(SMMUV3State *s, SMMUQueue *q, hwaddr reg,
> + uint32_t off, uint64_t val, unsigned size)
> +{
> + if (size == 8 && off == 0) {
> + smmu_write64_reg(s, reg, val);
> + } else
> + smmu_write_reg(s, reg, val);
> +
> + switch (off) {
> + case 0: /* BASE register */
> + val = smmu_read64_reg(s, reg);
> + q->shift = val & 0x1f;
> + q->entries = 1 << (q->shift);
> + smmu_update_base_reg(s, &q->base, val);
> + break;
> +
> + case 4: /* CONS */
> + q->cons = Q_IDX(q, val);
> + q->wrap.cons = val >> q->shift;
> + SMMU_DPRINTF(DBG2, "cons written : %d val:%lx\n", q->cons, val);
> + break;
> +
> + case 8: /* PROD */
> + q->prod = Q_IDX(q, val);
> + q->wrap.prod = val >> q->shift;
> + break;
> + }
> +
> + switch (reg) {
> + case SMMU_REG_CMDQ_PROD: /* should be only for CMDQ_PROD */
> + case SMMU_REG_CMDQ_CONS: /* but we do it anyway */
> + smmu_update(s);
> + break;
> + }
> +}
> +
> +static void smmu_write_mmio_fixup(SMMUV3State *s, hwaddr *addr)
> +{
> + switch (*addr) {
> + case 0x100a8: case 0x100ac: /* Aliasing => page0 registers */
> + case 0x100c8: case 0x100cc:
> + *addr ^= (hwaddr)0x10000;
> + }
> +}
> +
> +static void smmu_write_mmio(void *opaque, hwaddr addr,
> + uint64_t val, unsigned size)
> +{
> + SMMUState *sys = opaque;
> + SMMUV3State *s = SMMU_V3_DEV(sys);
> + bool update = false;
> +
> + smmu_write_mmio_fixup(s, &addr);
> +
> + SMMU_DPRINTF(DBG2, "addr: %lx val:%lx\n", addr, val);
> +
> + switch (addr) {
> + case 0xFDC ... 0xFFC:
> + case SMMU_REG_IDR0 ... SMMU_REG_IDR5:
> + SMMU_DPRINTF(CRIT, "write to RO/Unimpl reg %lx val64:%lx\n",
> + addr, val);
> + return;
> +
> + case SMMU_REG_GERRORN:
> + smmu_update_irq(s, addr, val);
> + return;
> +
> + case SMMU_REG_CR0:
> + smmu_write32_reg(s, SMMU_REG_CR0_ACK, val);
> + update = true;
> + break;
> +
> + case SMMU_REG_IRQ_CTRL:
> + smmu_write32_reg(s, SMMU_REG_IRQ_CTRL_ACK, val);
> + update = true;
> + break;
> +
> + case SMMU_REG_STRTAB_BASE:
> + smmu_update_base_reg(s, &s->strtab_base, val);
> + return;
> +
> + case SMMU_REG_STRTAB_BASE_CFG:
> + if (((val >> 16) & 0x3) == 0x1) {
> + s->sid_split = (val >> 6) & 0x1f;
> + s->features |= SMMU_FEATURE_2LVL_STE;
> + }
> + break;
> +
> + case SMMU_REG_CMDQ_PROD:
> + case SMMU_REG_CMDQ_CONS:
> + case SMMU_REG_CMDQ_BASE:
> + case SMMU_REG_CMDQ_BASE + 4:
> + smmu_update_qreg(s, &s->cmdq, addr, addr - SMMU_REG_CMDQ_BASE,
> + val, size);
> + return;
> +
> + case SMMU_REG_EVTQ_CONS: /* fallthrough */
> + {
> + SMMUQueue *evtq = &s->evtq;
> + evtq->cons = Q_IDX(evtq, val);
> + evtq->wrap.cons = Q_WRAP(evtq, val);
> +
> + SMMU_DPRINTF(IRQ, "Before clearing interrupt "
> + "prod:%x cons:%x prod.w:%d cons.w:%d\n",
> + evtq->prod, evtq->cons, evtq->wrap.prod,
> evtq->wrap.cons);
> + if (smmu_is_q_empty(s, &s->evtq)) {
> + SMMU_DPRINTF(IRQ, "Clearing interrupt"
> + " prod:%x cons:%x prod.w:%d cons.w:%d\n",
> + evtq->prod, evtq->cons, evtq->wrap.prod,
> + evtq->wrap.cons);
> + qemu_irq_lower(s->irq[SMMU_IRQ_EVTQ]);
> + }
> + }
> + case SMMU_REG_EVTQ_BASE:
> + case SMMU_REG_EVTQ_BASE + 4:
> + case SMMU_REG_EVTQ_PROD:
> + smmu_update_qreg(s, &s->evtq, addr, addr - SMMU_REG_EVTQ_BASE,
> + val, size);
> + return;
> +
> + case SMMU_REG_PRIQ_CONS:
> + case SMMU_REG_PRIQ_BASE:
> + case SMMU_REG_PRIQ_BASE + 4:
> + case SMMU_REG_PRIQ_PROD:
> + smmu_update_qreg(s, &s->priq, addr, addr - SMMU_REG_PRIQ_BASE,
> + val, size);
> + return;
> + }
> +
> + if (size == 8) {
> + smmu_write_reg(s, addr, val);
> + } else {
> + smmu_write32_reg(s, addr, (uint32_t)val);
> + }
> +
> + if (update)
> + smmu_update(s);
> +}
> +
> +static uint64_t smmu_read_mmio(void *opaque, hwaddr addr, unsigned size)
> +{
> + SMMUState *sys = opaque;
> + SMMUV3State *s = SMMU_V3_DEV(sys);
> + uint64_t val;
> +
> + smmu_write_mmio_fixup(s, &addr);
> +
> + /* Primecell/Corelink ID registers */
> + switch (addr) {
> + case 0xFF0 ... 0xFFC:
> + val = (uint64_t)sys->cid[(addr - 0xFF0) >> 2];
> + break;
> +
> + case 0xFDC ... 0xFE4:
> + val = (uint64_t)sys->pid[(addr - 0xFDC) >> 2];
> + break;
> +
> + default:
> + val = (uint64_t)smmu_read32_reg(s, addr);
> + break;
> +
> + case SMMU_REG_STRTAB_BASE ... SMMU_REG_CMDQ_BASE:
> + case SMMU_REG_EVTQ_BASE:
> + case SMMU_REG_PRIQ_BASE ... SMMU_REG_PRIQ_IRQ_CFG1:
> + val = smmu_read64_reg(s, addr);
> + break;
> + }
> +
> + SMMU_DPRINTF(DBG2, "addr: %lx val:%lx\n", addr, val);
> + SMMU_DPRINTF(DBG2, "cmdq cons:%d\n", s->cmdq.cons);
> + return val;
> +}
> +
> +static const MemoryRegionOps smmu_mem_ops = {
> + .read = smmu_read_mmio,
> + .write = smmu_write_mmio,
> + .endianness = DEVICE_LITTLE_ENDIAN,
> + .valid = {
> + .min_access_size = 4,
> + .max_access_size = 8,
> + },
> +};
> +
> +static void smmu_init_irq(SMMUV3State *s, SysBusDevice *dev)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
> + sysbus_init_irq(dev, &s->irq[i]);
> + }
> +}
> +
> +static void smmu_init_iommu_as(SMMUV3State *sys)
> +{
> + SMMUState *s = SMMU_SYS_DEV(sys);
> + PCIBus *pcibus = pci_find_primary_bus();
> +
> + if (pcibus) {
> + SMMU_DPRINTF(CRIT, "Found PCI bus, setting up iommu\n");
> + pci_setup_iommu(pcibus, smmu_init_pci_iommu, s);
> + } else {
> + SMMU_DPRINTF(CRIT, "No PCI bus, SMMU is not registered\n");
> + }
> +}
> +
> +static void smmu_reset(DeviceState *dev)
> +{
> + SMMUV3State *s = SMMU_V3_DEV(dev);
> + smmuv3_init(s);
> +}
> +
> +static int smmu_populate_internal_state(void *opaque, int version_id)
> +{
> + SMMUV3State *s = opaque;
> + /* SMMUState *sys = SMMU_SYS_DEV(s); */
> +
> + smmu_update(s);
> +
> + return 0;
> +}
> +
> +static gboolean smmu_uint64_equal(gconstpointer v1, gconstpointer v2)
> +{
> + return *((const uint64_t *)v1) == *((const uint64_t *)v2);
> +}
> +
> +static guint smmu_uint64_hash(gconstpointer v)
> +{
> + return (guint)*(const uint64_t *)v;
> +}
> +
> +static void smmu_realize(DeviceState *d, Error **errp)
> +{
> + SMMUState *sys = SMMU_SYS_DEV(d);
> + SMMUV3State *s = SMMU_V3_DEV(sys);
> + SysBusDevice *dev = SYS_BUS_DEVICE(d);
> +
> + /* Register Access */
> + memory_region_init_io(&sys->iomem, OBJECT(s),
> + &smmu_mem_ops, sys, TYPE_SMMU_V3_DEV, 0x20000);
> +
> + s->smmu_as_by_busptr = g_hash_table_new_full(smmu_uint64_hash,
> + smmu_uint64_equal,
> + g_free, g_free);
> + sysbus_init_mmio(dev, &sys->iomem);
> +
> + smmu_init_irq(s, dev);
> +
> + smmu_init_iommu_as(s);
> +}
> +
> +static const VMStateDescription vmstate_smmu = {
> + .name = "smmu",
> + .version_id = 1,
> + .minimum_version_id = 1,
> + .post_load = smmu_populate_internal_state,
> + .fields = (VMStateField[]) {
> + VMSTATE_UINT32_ARRAY(cid, SMMUState, 4),
> + VMSTATE_UINT32_ARRAY(pid, SMMUState, 8),
> + VMSTATE_UINT64_ARRAY(regs, SMMUV3State, SMMU_NREGS),
> + VMSTATE_END_OF_LIST(),
> + },
> +};
> +
> +static void smmu_class_init(ObjectClass *klass, void *data)
> +{
> + DeviceClass *dc = DEVICE_CLASS(klass);
> + SMMUBaseClass *sbc = SMMU_DEVICE_CLASS(klass);
> +
> + sbc->translate_64 = smmu_translate_64;
> +
> + dc->reset = smmu_reset;
> + dc->vmsd = &vmstate_smmu;
> + dc->realize = smmu_realize;
> +}
> +
> +static void smmu_base_instance_init(Object *obj)
> +{
> + /* Nothing much to do here as of now */
> +}
> +
> +static void smmu_instance_init(Object *obj)
> +{
> + /* Nothing much to do here as of now */
> +}
> +
> +static const TypeInfo smmu_base_info = {
> + .name = TYPE_SMMU_DEV_BASE,
> + .parent = TYPE_SYS_BUS_DEVICE,
> + .instance_size = sizeof(SMMUV3State),
> + .instance_init = smmu_base_instance_init,
> + .class_size = sizeof(SMMUBaseClass),
> + .abstract = true,
> +};
> +
> +static void smmu_register_types(void)
> +{
> + TypeInfo type_info = {
> + .name = TYPE_SMMU_V3_DEV,
> + .parent = TYPE_SMMU_DEV_BASE,
> + .class_data = NULL,
> + .class_init = smmu_class_init,
> + .instance_init = smmu_instance_init,
> + };
> +
> + type_register_static(&smmu_base_info);
> +
> + type_register(&type_info);
> +}
> +
> +type_init(smmu_register_types)
> +
> diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
> new file mode 100644
> index 0000000..8d34f2a
> --- /dev/null
> +++ b/hw/arm/smmuv3-internal.h
> @@ -0,0 +1,432 @@
> +/*
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Copyright (C) 2014-2015 Broadcom Corporation
> + *
> + * Author: Prem Mallappa <address@hidden>
> + *
> + */
> +
> +#ifndef HW_ARM_SMMU_V3_INTERNAL_H
> +#define HW_ARM_SMMU_V3_INTERNAL_H
> +
> +/*****************************
> + * MMIO Register
> + *****************************/
> +enum {
> + SMMU_REG_IDR0 = 0x0,
For all regs, I think you should prefix regs with R_.
And also do / 4, e.g:
R_SMMU_REG_IDR1 = 0x4 / 4,
That way you can do s->regs[R_SMMU_REG_IDR1] and remove smmu_read32_reg.
If you use the REG32 and FIELD macros from the register API you'll
also be able to use the FIELD_ family of macros (e.g ARRAY_FIELD_EX32)
to extract fields from regs.
> +
> +#define SMMU_IDR0_S2P (1 << 0)
> +#define SMMU_IDR0_S1P (1 << 1)
> +#define SMMU_IDR0_TTF (0x3 << 2)
> +#define SMMU_IDR0_HTTU (0x3 << 6)
> +#define SMMU_IDR0_HYP (1 << 9)
> +#define SMMU_IDR0_ATS (1 << 10)
> +#define SMMU_IDR0_VMID16 (1 << 18)
> +#define SMMU_IDR0_CD2L (1 << 19)
> +
> + SMMU_REG_IDR1 = 0x4,
> + SMMU_REG_IDR2 = 0x8,
> + SMMU_REG_IDR3 = 0xc,
> + SMMU_REG_IDR4 = 0x10,
> + SMMU_REG_IDR5 = 0x14,
> + SMMU_REG_IIDR = 0x1c,
> + SMMU_REG_CR0 = 0x20,
> +
> +#define SMMU_CR0_SMMU_ENABLE (1 << 0)
> +#define SMMU_CR0_PRIQ_ENABLE (1 << 1)
> +#define SMMU_CR0_EVTQ_ENABLE (1 << 2)
> +#define SMMU_CR0_CMDQ_ENABLE (1 << 3)
> +#define SMMU_CR0_ATS_CHECK (1 << 4)
> +
> + SMMU_REG_CR0_ACK = 0x24,
> + SMMU_REG_CR1 = 0x28,
> + SMMU_REG_CR2 = 0x2c,
> +
> + SMMU_REG_STATUSR = 0x40,
> +
> + SMMU_REG_IRQ_CTRL = 0x50,
> + SMMU_REG_IRQ_CTRL_ACK = 0x54,
> +
> +#define SMMU_IRQ_CTRL_GERROR_EN (1 << 0)
> +#define SMMU_IRQ_CTRL_EVENT_EN (1 << 1)
> +#define SMMU_IRQ_CTRL_PRI_EN (1 << 2)
> +
> + SMMU_REG_GERROR = 0x60,
> +
> +#define SMMU_GERROR_CMDQ (1 << 0)
> +#define SMMU_GERROR_EVENTQ (1 << 2)
> +#define SMMU_GERROR_PRIQ (1 << 3)
> +#define SMMU_GERROR_MSI_CMDQ (1 << 4)
> +#define SMMU_GERROR_MSI_EVENTQ (1 << 5)
> +#define SMMU_GERROR_MSI_PRIQ (1 << 6)
> +#define SMMU_GERROR_MSI_GERROR (1 << 7)
> +#define SMMU_GERROR_SFM_ERR (1 << 8)
> +
> + SMMU_REG_GERRORN = 0x64,
> + SMMU_REG_GERROR_IRQ_CFG0 = 0x68,
> + SMMU_REG_GERROR_IRQ_CFG1 = 0x70,
> + SMMU_REG_GERROR_IRQ_CFG2 = 0x74,
> +
> + /* SMMU_BASE_RA Applies to STRTAB_BASE, CMDQ_BASE and EVTQ_BASE */
> +#define SMMU_BASE_RA (1ULL << 62)
> + SMMU_REG_STRTAB_BASE = 0x80,
> + SMMU_REG_STRTAB_BASE_CFG = 0x88,
> +
> + SMMU_REG_CMDQ_BASE = 0x90,
> + SMMU_REG_CMDQ_PROD = 0x98,
> + SMMU_REG_CMDQ_CONS = 0x9c,
> + /* CMD Consumer (CONS) */
> +#define SMMU_CMD_CONS_ERR_SHIFT 24
> +#define SMMU_CMD_CONS_ERR_BITS 7
> +
> + SMMU_REG_EVTQ_BASE = 0xa0,
> + SMMU_REG_EVTQ_PROD = 0xa8,
> + SMMU_REG_EVTQ_CONS = 0xac,
> + SMMU_REG_EVTQ_IRQ_CFG0 = 0xb0,
> + SMMU_REG_EVTQ_IRQ_CFG1 = 0xb8,
> + SMMU_REG_EVTQ_IRQ_CFG2 = 0xbc,
> +
> + SMMU_REG_PRIQ_BASE = 0xc0,
> + SMMU_REG_PRIQ_PROD = 0xc8,
> + SMMU_REG_PRIQ_CONS = 0xcc,
> + SMMU_REG_PRIQ_IRQ_CFG0 = 0xd0,
> + SMMU_REG_PRIQ_IRQ_CFG1 = 0xd8,
> + SMMU_REG_PRIQ_IRQ_CFG2 = 0xdc,
> +
> + SMMU_ID_REGS_OFFSET = 0xfd0,
> +
> + /* Secure registers are not used for now */
> + SMMU_SECURE_OFFSET = 0x8000,
> +};
> +
> +/*****************************
> + * STE fields
> + *****************************/
> +#define STE_VALID(x) extract32((x)->word[0], 0, 1) /* 0 */
> +#define STE_CONFIG(x) (extract32((x)->word[0], 1, 3) & 0x7)
> +enum {
> + STE_CONFIG_NONE = 0,
> + STE_CONFIG_BYPASS = 4, /* S1 Bypass, S2 Bypass */
> + STE_CONFIG_S1TR = 1, /* S1 Translate, S2 Bypass */
> + STE_CONFIG_S2TR = 2, /* S1 Bypass, S2 Translate */
> + STE_CONFIG_S1TR_S2TR = 3, /* S1 Translate, S2 Translate */
> +};
> +#define STE_S1FMT(x) extract32((x)->word[0], 4, 2)
> +#define STE_S1CDMAX(x) extract32((x)->word[1], 8, 2)
> +#define STE_EATS(x) extract32((x)->word[2], 28, 2)
> +#define STE_STRW(x) extract32((x)->word[2], 30, 2)
> +#define STE_S2VMID(x) extract32((x)->word[4], 0, 16) /* 4 */
> +#define STE_S2T0SZ(x) extract32((x)->word[5], 0, 6) /* 5 */
> +#define STE_S2TG(x) extract32((x)->word[5], 14, 2)
> +#define STE_S2PS(x) extract32((x)->word[5], 16, 3)
> +#define STE_S2AA64(x) extract32((x)->word[5], 19, 1)
> +#define STE_S2HD(x) extract32((x)->word[5], 24, 1)
> +#define STE_S2HA(x) extract32((x)->word[5], 25, 1)
> +#define STE_S2S(x) extract32((x)->word[5], 26, 1)
> +#define STE_CTXPTR(x) \
> + ({ \
> + unsigned long addr; \
> + addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32; \
> + addr |= (uint64_t)((x)->word[0] & 0xffffffc0); \
> + addr; \
> + })
> +
> +#define STE_S2TTB(x) \
> + ({ \
> + unsigned long addr; \
> + addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32; \
> + addr |= (uint64_t)((x)->word[6] & 0xfffffff0); \
> + addr; \
> + })
> +
> +/*****************************
> + * CD fields
> + *****************************/
> +#define CD_VALID(x) extract32((x)->word[0], 30, 1)
> +#define CD_ASID(x) extract32((x)->word[1], 16, 16)
> +#define CD_TTB(x, sel) \
> + ({ \
> + uint64_t hi, lo; \
> + hi = extract32((x)->word[(sel) * 2 + 3], 0, 16); \
> + hi <<= 32; \
> + lo = (x)->word[(sel) * 2 + 2] & ~0xf; \
> + hi | lo; \
> + })
> +
> +#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
> +#define CD_TG(x, sel) extract32((x)->word[0], (16 * (sel)) + 6, 2)
> +#define CD_EPD(x, sel) extract32((x)->word[0], (16 * (sel)) + 14, 1)
> +
> +#define CD_T0SZ(x) CD_TSZ((x), 0)
> +#define CD_T1SZ(x) CD_TSZ((x), 1)
> +#define CD_TG0(x) CD_TG((x), 0)
> +#define CD_TG1(x) CD_TG((x), 1)
> +#define CD_EPD0(x) CD_EPD((x), 0)
> +#define CD_EPD1(x) CD_EPD((x), 1)
> +#define CD_IPS(x) extract32((x)->word[1], 0, 3)
> +#define CD_AARCH64(x) extract32((x)->word[1], 9, 1)
> +#define CD_TTB0(x) CD_TTB((x), 0)
> +#define CD_TTB1(x) CD_TTB((x), 1)
> +
> +#define CDM_VALID(x) ((x)->word[0] & 0x1)
> +
> +/*****************************
> + * Commands
> + *****************************/
> +enum {
> + SMMU_CMD_PREFETCH_CONFIG = 0x01,
> + SMMU_CMD_PREFETCH_ADDR,
> + SMMU_CMD_CFGI_STE,
> + SMMU_CMD_CFGI_STE_RANGE,
> + SMMU_CMD_CFGI_CD,
> + SMMU_CMD_CFGI_CD_ALL,
> + SMMU_CMD_TLBI_NH_ALL = 0x10,
> + SMMU_CMD_TLBI_NH_ASID,
> + SMMU_CMD_TLBI_NH_VA,
> + SMMU_CMD_TLBI_NH_VAA,
> + SMMU_CMD_TLBI_EL3_ALL = 0x18,
> + SMMU_CMD_TLBI_EL3_VA = 0x1a,
> + SMMU_CMD_TLBI_EL2_ALL = 0x20,
> + SMMU_CMD_TLBI_EL2_ASID,
> + SMMU_CMD_TLBI_EL2_VA,
> + SMMU_CMD_TLBI_EL2_VAA, /* 0x23 */
> + SMMU_CMD_TLBI_S12_VMALL = 0x28,
> + SMMU_CMD_TLBI_S2_IPA = 0x2a,
> + SMMU_CMD_TLBI_NSNH_ALL = 0x30,
> + SMMU_CMD_ATC_INV = 0x40,
> + SMMU_CMD_PRI_RESP,
> + SMMU_CMD_RESUME = 0x44,
> + SMMU_CMD_STALL_TERM,
> + SMMU_CMD_SYNC, /* 0x46 */
> +};
> +
> +/*****************************
> + * CMDQ fields
> + *****************************/
> +
> +enum { /* Command Errors */
> + SMMU_CMD_ERR_NONE = 0,
> + SMMU_CMD_ERR_ILLEGAL,
> + SMMU_CMD_ERR_ABORT
> +};
> +
> +enum { /* Command completion notification */
> + CMD_SYNC_SIG_NONE,
> + CMD_SYNC_SIG_IRQ,
> + CMD_SYNC_SIG_SEV,
> +};
> +
> +#define CMD_TYPE(x) extract32((x)->word[0], 0, 8)
> +#define CMD_SEC(x) extract32((x)->word[0], 9, 1)
> +#define CMD_SEV(x) extract32((x)->word[0], 10, 1)
> +#define CMD_AC(x) extract32((x)->word[0], 12, 1)
> +#define CMD_AB(x) extract32((x)->word[0], 13, 1)
> +#define CMD_CS(x) extract32((x)->word[0], 12, 2)
> +#define CMD_SSID(x) extract32((x)->word[0], 16, 16)
> +#define CMD_SID(x) ((x)->word[1])
> +#define CMD_VMID(x) extract32((x)->word[1], 0, 16)
> +#define CMD_ASID(x) extract32((x)->word[1], 16, 16)
> +#define CMD_STAG(x) extract32((x)->word[2], 0, 16)
> +#define CMD_RESP(x) extract32((x)->word[2], 11, 2)
> +#define CMD_GRPID(x) extract32((x)->word[3], 0, 8)
> +#define CMD_SIZE(x) extract32((x)->word[3], 0, 16)
> +#define CMD_LEAF(x) extract32((x)->word[3], 0, 1)
> +#define CMD_SPAN(x) extract32((x)->word[3], 0, 5)
> +#define CMD_ADDR(x) ({ \
> + uint64_t addr = (uint64_t)(x)->word[3]; \
> + addr <<= 32; \
> + addr |= extract32((x)->word[3], 12, 20); \
> + addr; \
> + })
> +
> +/*****************************
> + * EVTQ fields
> + *****************************/
> +#define EVT_Q_OVERFLOW (1 << 31)
> +
> +#define EVT_SET_TYPE(x, t) deposit32((x)->word[0], 0, 8, t)
> +#define EVT_SET_SID(x, s) ((x)->word[1] = s)
> +#define EVT_SET_INPUT_ADDR(x, addr) ({ \
> + (x)->word[5] = (uint32_t)(addr >> 32); \
> + (x)->word[4] = (uint32_t)(addr & 0xffffffff); \
> + addr; \
> + })
> +
> +/*****************************
> + * Events
> + *****************************/
> +enum evt_err {
> + SMMU_EVT_F_UUT = 0x1,
> + SMMU_EVT_C_BAD_SID,
> + SMMU_EVT_F_STE_FETCH,
> + SMMU_EVT_C_BAD_STE,
> + SMMU_EVT_F_BAD_ATS_REQ,
> + SMMU_EVT_F_STREAM_DISABLED,
> + SMMU_EVT_F_TRANS_FORBIDDEN,
> + SMMU_EVT_C_BAD_SSID,
> + SMMU_EVT_F_CD_FETCH,
> + SMMU_EVT_C_BAD_CD,
> + SMMU_EVT_F_WALK_EXT_ABRT,
> + SMMU_EVT_F_TRANS = 0x10,
> + SMMU_EVT_F_ADDR_SZ,
> + SMMU_EVT_F_ACCESS,
> + SMMU_EVT_F_PERM,
> + SMMU_EVT_F_TLB_CONFLICT = 0x20,
> + SMMU_EVT_F_CFG_CONFLICT = 0x21,
> + SMMU_EVT_E_PAGE_REQ = 0x24,
> +};
> +
> +typedef enum evt_err SMMUEvtErr;
> +
> +
> +/*****************************
> + * SMMU Data structures
> + *****************************/
> +#define ARM_SMMU_FEAT_PASSID_SUPPORT (1 << 24) /* Some random bits for now
> */
> +#define ARM_SMMU_FEAT_CD_2LVL (1 << 25)
> +
> +struct SMMUQueue {
> + hwaddr base;
> + uint32_t prod;
> + uint32_t cons;
> + union {
> + struct {
> + uint8_t prod:1;
> + uint8_t cons:1;
Hi, Peter generally doesn't like bitfields. I'd stay away form
them unless you have a good case. Just change them too bool.
> + };
> + uint8_t unused;
> + } wrap;
> +
> + uint16_t entries; /* Number of entries */
> + uint8_t ent_size; /* Size of entry in bytes */
> + uint8_t shift; /* Size in log2 */
> +};
> +typedef struct SMMUQueue SMMUQueue;
> +
> +#define Q_ENTRY(q, idx) (q->base + q->ent_size * idx)
> +#define Q_WRAP(q, pc) ((pc) >> (q)->shift)
> +#define Q_IDX(q, pc) ((pc) & ((1 << (q)->shift) - 1))
You probably want 1U << (q)->shift
> +
> +struct __smmu_data2 {
> + uint32_t word[2];
> +};
> +
> +struct __smmu_data8 {
> + uint32_t word[8];
> +};
> +
> +struct __smmu_data16 {
> + uint32_t word[16];
> +};
> +
> +struct __smmu_data4 {
> + uint32_t word[4];
> +};
> +
> +typedef struct __smmu_data2 STEDesc; /* STE Level 1 Descriptor */
> +typedef struct __smmu_data16 Ste; /* Stream Table Entry(STE) */
> +typedef struct __smmu_data2 CDDesc; /* CD Level 1 Descriptor */
> +typedef struct __smmu_data16 Cd; /* Context Descriptor(CD) */
> +
> +typedef struct __smmu_data4 Cmd; /* Command Entry */
> +typedef struct __smmu_data8 Evt; /* Event Entry */
> +typedef struct __smmu_data4 Pri; /* PRI entry */
For all of these, I think it would be more useful if you would declare
structs with actual fields representing the data structures.
You can then declare load functions that load the STE from memory and
decode the fields.
E.g:
typedef struct SMMUv3_STEDesc {
bool valid;
.... etc...
} SMMUv3_STEDesc;
void smmuv3_load_ste(AddressSpace *as, dma_addr_t addr, SMMUv3_STEDesc *ste)
{
uint32_t buf[16];
dma_memory_read(as, addr, buf, sizeof(*buf));
ste->valid = extract32(buf[0], 0, 1);
}
Then, instead of for example doing STE_VALID(x), you can do ste->valid.
> +
> +
> +/*****************************
> + * Broadcom Specific register and bits
> + *****************************/
> +#define SMMU_REG_CNTL (0x410 << 2)
> +#define SMMU_REG_CNTL_1 (0x411 << 2)
> +#define SMMU_REG_INTERRUPT (0x412 << 2)
> +/* BIT encoding is same as SMMU_REG_INTERRUPT, except for last 4 bits */
> +#define SMMU_REG_INTERRUPT_EN (0x413 << 2)
> +
> +#define SMMU_INTR_BMI_ERR (1 << 6) /* Smmu BMI Rd Wr Error*/
> +#define SMMU_INTR_BSI_ERR (1 << 5) /* Smmu BSI Rd Wr Error*/
> +#define SMMU_INTR_SBU_INTR (1 << 4) /* SBU interrupt 0 */
> +#define SMMU_INTR_CMD_SYNC (1 << 3) /* CmdSync completion set to interrupt */
> +#define SMMU_INTR_EVENT (1 << 2) /* high till EventQ.PROD != EventQ.CONS
> */
> +#define SMMU_INTR_PRI (1 << 1) /* PriQ. high till PriQ.PROD !=
> PriQ.CONS */
> +#define SMMU_INTR_GERROR (1 << 0) /* cleared when GERRORN is written */
> +
> +/*****************************
> + * QEMu related
> + *****************************/
> +
> +typedef struct {
> + SMMUBaseClass smmu_base_class;
> +} SMMUV3Class;
> +
> +#define SMMU_DEVICE_CLASS(klass) \
> + OBJECT_CLASS_CHECK(SMMUBaseClass, (klass), TYPE_SMMU_DEV_BASE)
> +
> +#define SMMU_V3_DEVICE_GET_CLASS(obj) \
> + OBJECT_GET_CLASS(SMMUBaseClass, (obj), TYPE_SMMU_V3_DEV)
> +
> +#ifdef ARM_SMMU_DEBUG
> +static inline void dump_ste(Ste *ste)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(ste->word); i += 2) {
> + SMMU_DPRINTF(STE, "STE[%2d]: %#010x\t STE[%2d]: %#010x\n",
> + i, ste->word[i], i + 1, ste->word[i + 1]);
> + }
> +}
> +
> +static inline void dump_cd(Cd *cd)
> +{
> + int i;
> + for (i = 0; i < ARRAY_SIZE(cd->word); i += 2) {
> + SMMU_DPRINTF(CD, "CD[%2d]: %#010x\t CD[%2d]: %#010x\n",
> + i, cd->word[i], i + 1, cd->word[i + 1]);
> + }
> +}
> +
> +static inline void dump_evt(Evt *e)
> +{}
> +
> +static inline void dump_cmd(Cmd *cmd)
> +{
> + int i;
> + for (i = 0; i < ARRAY_SIZE(cmd->word); i += 2) {
> + SMMU_DPRINTF(CMDQ, "CMD[%2d]: %#010x\t CMD[%2d]: %#010x\n",
> + i, cmd->word[i], i + 1, cmd->word[i + 1]);
> + }
> +}
> +
> +static void dump_smmutranscfg(SMMUTransCfg *cfg)
> +{
> + int i;
> + SMMU_DPRINTF(TT_1, "TransCFG stage:%d va:%lx pa:%lx s2_needed:%d\n",
> + cfg->stage, cfg->va, cfg->pa, cfg->s2_needed);
> + for (i = 1; i <= 2; i++) {
> + SMMU_DPRINTF(TT_1, "TransCFG i:%d oas:%x tsz:%x ttbr:%lx granule:%x"
> + " va_size:%x gran_sz:%x\n", i, cfg->oas[i], cfg->tsz[i],
> + cfg->ttbr[i], cfg->granule[i], cfg->va_size[i],
> + cfg->granule_sz[i]);
> + }
> +}
> +
> +#else
> +#define dump_ste(...) do {} while (0)
> +#define dump_cd(...) do {} while (0)
> +#define dump_evt(...) do {} while (0)
> +#define dump_cmd(...) do {} while (0)
> +static void dump_smmutranscfg(SMMUTransCfg *cfg) {}
> +#endif
> +
> +#endif
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index e51ed3a..96da537 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -412,10 +412,10 @@ static void vfio_listener_region_add(MemoryListener
> *listener,
>
> ret = vfio_dma_map(container, iova, int128_get64(llsize),
> vaddr, section->readonly);
> - if (ret) {
> error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
> "0x%"HWADDR_PRIx", %p) = %d (%m)",
> container, iova, int128_get64(llsize), vaddr, ret);
> + if (ret) {
> goto fail;
> }
Shouldn't this be in a separate patch?
>
> --
> 2.9.3
>
- Re: [Qemu-devel] [PATCH v2 3/9] hw: arm: SMMUv3 emulation model,
Edgar E. Iglesias <=