qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 1/2] hw/riscv: Add fw_cfg support to virt


From: Bin Meng
Subject: Re: [PATCH v2 1/2] hw/riscv: Add fw_cfg support to virt
Date: Sun, 28 Feb 2021 14:06:29 +0800

On Fri, Feb 26, 2021 at 12:31 PM Asherah Connor <ashe@kivikakk.ee> wrote:
>
> Provides fw_cfg for the virt machine on riscv.  This enables
> using e.g.  ramfb later.
>
> Signed-off-by: Asherah Connor <ashe@kivikakk.ee>
> ---
>
> Changes in v2:
> * Add DMA support (needed for writes).
>
>  hw/riscv/Kconfig        |  1 +
>  hw/riscv/virt.c         | 27 +++++++++++++++++++++++++++
>  include/hw/riscv/virt.h |  4 +++-
>  3 files changed, 31 insertions(+), 1 deletion(-)
>
> diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
> index facb0cbacc..afaa5e58bb 100644
> --- a/hw/riscv/Kconfig
> +++ b/hw/riscv/Kconfig
> @@ -33,6 +33,7 @@ config RISCV_VIRT
>      select SIFIVE_PLIC
>      select SIFIVE_TEST
>      select VIRTIO_MMIO
> +    select FW_CFG_DMA
>
>  config SIFIVE_E
>      bool
> diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
> index 2299b3a6be..a10f218c43 100644
> --- a/hw/riscv/virt.c
> +++ b/hw/riscv/virt.c
> @@ -56,6 +56,7 @@ static const struct MemmapEntry {
>      [VIRT_PLIC] =        {  0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
>      [VIRT_UART0] =       { 0x10000000,         0x100 },
>      [VIRT_VIRTIO] =      { 0x10001000,        0x1000 },
> +    [VIRT_FW_CFG] =      { 0x10100000,          0x18 },
>      [VIRT_FLASH] =       { 0x20000000,     0x4000000 },
>      [VIRT_PCIE_ECAM] =   { 0x30000000,    0x10000000 },
>      [VIRT_PCIE_MMIO] =   { 0x40000000,    0x40000000 },
> @@ -488,6 +489,28 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion 
> *sys_mem,
>      return dev;
>  }
>
> +static FWCfgState *create_fw_cfg(const RISCVVirtState *s)
> +{
> +    hwaddr base = virt_memmap[VIRT_FW_CFG].base;
> +    hwaddr size = virt_memmap[VIRT_FW_CFG].size;
> +    FWCfgState *fw_cfg;
> +    char *nodename;
> +
> +    fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16,
> +                                  &address_space_memory);
> +    fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)MACHINE(s)->smp.cpus);
> +
> +    nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
> +    qemu_fdt_add_subnode(s->fdt, nodename);
> +    qemu_fdt_setprop_string(s->fdt, nodename,
> +                            "compatible", "qemu,fw-cfg-mmio");
> +    qemu_fdt_setprop_sized_cells(s->fdt, nodename, "reg",
> +                                 2, base, 2, size);
> +    qemu_fdt_setprop(s->fdt, nodename, "dma-coherent", NULL, 0);
> +    g_free(nodename);
> +    return fw_cfg;
> +}
> +
>  static void virt_machine_init(MachineState *machine)
>  {
>      const struct MemmapEntry *memmap = virt_memmap;
> @@ -652,6 +675,10 @@ static void virt_machine_init(MachineState *machine)
>          start_addr = virt_memmap[VIRT_FLASH].base;
>      }
>
> +    /* init fw_cfg */

I guess this is put here because riscv_load_fdt() is trying to touch
the device tree, and creating fw_cfg has to be done before that?
Maybe a comment is needed to prevent whoever later wanted to move the
codes around?

> +    s->fw_cfg = create_fw_cfg(s);
> +    rom_set_fw(s->fw_cfg);
> +
>      /* Compute the fdt load address in dram */
>      fdt_load_addr = riscv_load_fdt(memmap[VIRT_DRAM].base,
>                                     machine->ram_size, s->fdt);
> diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
> index 84b7a3848f..3b81a2e3f6 100644
> --- a/include/hw/riscv/virt.h
> +++ b/include/hw/riscv/virt.h
> @@ -40,6 +40,7 @@ struct RISCVVirtState {
>      RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
>      DeviceState *plic[VIRT_SOCKETS_MAX];
>      PFlashCFI01 *flash[2];
> +    FWCfgState *fw_cfg;
>
>      void *fdt;
>      int fdt_size;
> @@ -58,7 +59,8 @@ enum {
>      VIRT_DRAM,
>      VIRT_PCIE_MMIO,
>      VIRT_PCIE_PIO,
> -    VIRT_PCIE_ECAM
> +    VIRT_PCIE_ECAM,
> +    VIRT_FW_CFG

nits: insert this before VIRT_FLASH

>  };
>
>  enum {
> --

Regards,
Bin



reply via email to

[Prev in Thread] Current Thread [Next in Thread]