[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v6 21/36] multi-process: PCI BAR read/write handling for prox
From: |
Dr. David Alan Gilbert |
Subject: |
Re: [PATCH v6 21/36] multi-process: PCI BAR read/write handling for proxy & remote endpoints |
Date: |
Thu, 9 Apr 2020 19:55:52 +0100 |
User-agent: |
Mutt/1.13.4 (2020-02-15) |
* address@hidden (address@hidden) wrote:
> From: Jagannathan Raman <address@hidden>
>
> Proxy device object implements handler for PCI BAR writes and reads.
> The handler uses BAR_WRITE/BAR_READ message to communicate to the
> remote process with the BAR address and value to be written/read.
> The remote process implements handler for BAR_WRITE/BAR_READ
> message.
>
> Signed-off-by: Jagannathan Raman <address@hidden>
> Signed-off-by: Elena Ufimtseva <address@hidden>
> Signed-off-by: John G Johnson <address@hidden>
Again please see my comments on v5
> ---
> hw/proxy/qemu-proxy.c | 64 ++++++++++++++++++++++++++++++
> include/hw/proxy/qemu-proxy.h | 20 +++++++++-
> include/io/mpqemu-link.h | 12 ++++++
> io/mpqemu-link.c | 6 +++
> remote/remote-main.c | 73 +++++++++++++++++++++++++++++++++++
> 5 files changed, 173 insertions(+), 2 deletions(-)
>
> diff --git a/hw/proxy/qemu-proxy.c b/hw/proxy/qemu-proxy.c
> index 87cf39c672..7fd0a312a5 100644
> --- a/hw/proxy/qemu-proxy.c
> +++ b/hw/proxy/qemu-proxy.c
> @@ -169,3 +169,67 @@ static void pci_proxy_dev_register_types(void)
>
> type_init(pci_proxy_dev_register_types)
>
> +static void send_bar_access_msg(PCIProxyDev *dev, MemoryRegion *mr,
> + bool write, hwaddr addr, uint64_t *val,
> + unsigned size, bool memory)
> +{
> + MPQemuLinkState *mpqemu_link = dev->mpqemu_link;
> + MPQemuMsg msg;
> + int wait;
> +
> + memset(&msg, 0, sizeof(MPQemuMsg));
> +
> + msg.bytestream = 0;
> + msg.size = sizeof(msg.data1);
> + msg.data1.bar_access.addr = mr->addr + addr;
> + msg.data1.bar_access.size = size;
> + msg.data1.bar_access.memory = memory;
> +
> + if (write) {
> + msg.cmd = BAR_WRITE;
> + msg.data1.bar_access.val = *val;
> + } else {
> + wait = GET_REMOTE_WAIT;
> +
> + msg.cmd = BAR_READ;
> + msg.num_fds = 1;
> + msg.fds[0] = wait;
> + }
> +
> + mpqemu_msg_send(&msg, mpqemu_link->dev);
> +
> + if (!write) {
> + *val = wait_for_remote(wait);
> + PUT_REMOTE_WAIT(wait);
> + }
> +}
> +
> +void proxy_default_bar_write(void *opaque, hwaddr addr, uint64_t val,
> + unsigned size)
> +{
> + ProxyMemoryRegion *pmr = opaque;
> +
> + send_bar_access_msg(pmr->dev, &pmr->mr, true, addr, &val, size,
> + pmr->memory);
> +}
> +
> +uint64_t proxy_default_bar_read(void *opaque, hwaddr addr, unsigned size)
> +{
> + ProxyMemoryRegion *pmr = opaque;
> + uint64_t val;
> +
> + send_bar_access_msg(pmr->dev, &pmr->mr, false, addr, &val, size,
> + pmr->memory);
> +
> + return val;
> +}
> +
> +const MemoryRegionOps proxy_default_ops = {
> + .read = proxy_default_bar_read,
> + .write = proxy_default_bar_write,
> + .endianness = DEVICE_NATIVE_ENDIAN,
> + .impl = {
> + .min_access_size = 1,
> + .max_access_size = 1,
> + },
> +};
> diff --git a/include/hw/proxy/qemu-proxy.h b/include/hw/proxy/qemu-proxy.h
> index d7eaf26f29..9e4127eccb 100644
> --- a/include/hw/proxy/qemu-proxy.h
> +++ b/include/hw/proxy/qemu-proxy.h
> @@ -26,14 +26,25 @@
> #define PCI_PROXY_DEV_GET_CLASS(obj) \
> OBJECT_GET_CLASS(PCIProxyDevClass, (obj), TYPE_PCI_PROXY_DEV)
>
> -typedef struct PCIProxyDev {
> +typedef struct PCIProxyDev PCIProxyDev;
> +
> +typedef struct ProxyMemoryRegion {
> + PCIProxyDev *dev;
> + MemoryRegion mr;
> + bool memory;
> + bool present;
> + uint8_t type;
> +} ProxyMemoryRegion;
> +
> +struct PCIProxyDev {
> PCIDevice parent_dev;
>
> MPQemuLinkState *mpqemu_link;
>
> int socket;
>
> -} PCIProxyDev;
> + ProxyMemoryRegion region[PCI_NUM_REGIONS];
> +};
>
> typedef struct PCIProxyDevClass {
> PCIDeviceClass parent_class;
> @@ -43,4 +54,9 @@ typedef struct PCIProxyDevClass {
> char *command;
> } PCIProxyDevClass;
>
> +void proxy_default_bar_write(void *opaque, hwaddr addr, uint64_t val,
> + unsigned size);
> +
> +uint64_t proxy_default_bar_read(void *opaque, hwaddr addr, unsigned size);
> +
> #endif /* QEMU_PROXY_H */
> diff --git a/include/io/mpqemu-link.h b/include/io/mpqemu-link.h
> index 7228a1915e..41cf092f9e 100644
> --- a/include/io/mpqemu-link.h
> +++ b/include/io/mpqemu-link.h
> @@ -31,6 +31,8 @@
> /**
> * mpqemu_cmd_t:
> * SYNC_SYSMEM Shares QEMU's RAM with remote device's RAM
> + * BAR_WRITE Writes to PCI BAR region
> + * BAR_READ Reads from PCI BAR region
> *
> * proc_cmd_t enum type to specify the command to be executed on the remote
> * device.
> @@ -41,6 +43,8 @@ typedef enum {
> CONNECT_DEV,
> PCI_CONFIG_WRITE,
> PCI_CONFIG_READ,
> + BAR_WRITE,
> + BAR_READ,
> MAX,
> } mpqemu_cmd_t;
>
> @@ -56,6 +60,13 @@ typedef struct {
> ram_addr_t offsets[REMOTE_MAX_FDS];
> } sync_sysmem_msg_t;
>
> +typedef struct {
> + hwaddr addr;
> + uint64_t val;
> + unsigned size;
> + bool memory;
> +} bar_access_msg_t;
> +
> /**
> * MPQemuMsg:
> * @cmd: The remote command
> @@ -78,6 +89,7 @@ typedef struct {
> union {
> uint64_t u64;
> sync_sysmem_msg_t sync_sysmem;
> + bar_access_msg_t bar_access;
> } data1;
>
> int fds[REMOTE_MAX_FDS];
> diff --git a/io/mpqemu-link.c b/io/mpqemu-link.c
> index 643c0588ce..2b67ef6410 100644
> --- a/io/mpqemu-link.c
> +++ b/io/mpqemu-link.c
> @@ -367,6 +367,12 @@ bool mpqemu_msg_valid(MPQemuMsg *msg)
> return false;
> }
> break;
> + case BAR_WRITE:
> + case BAR_READ:
> + if (msg->size != sizeof(msg->data1)) {
> + return false;
> + }
> + break;
> default:
> break;
> }
> diff --git a/remote/remote-main.c b/remote/remote-main.c
> index b5ed31f63b..4f512aa5a8 100644
> --- a/remote/remote-main.c
> +++ b/remote/remote-main.c
> @@ -33,6 +33,7 @@
> #include "sysemu/sysemu.h"
> #include "block/block.h"
> #include "exec/ramlist.h"
> +#include "exec/memattrs.h"
>
> static void process_msg(GIOCondition cond, MPQemuLinkState *link,
> MPQemuChannel *chan);
> @@ -102,6 +103,66 @@ exit:
> notify_proxy(wait, ret);
> }
>
> +/* TODO: confirm memtx attrs. */
> +static void process_bar_write(MPQemuMsg *msg, Error **errp)
> +{
> + bar_access_msg_t *bar_access = &msg->data1.bar_access;
> + AddressSpace *as =
> + bar_access->memory ? &address_space_memory : &address_space_io;
> + MemTxResult res;
> +
> + res = address_space_rw(as, bar_access->addr, MEMTXATTRS_UNSPECIFIED,
> + (uint8_t *)&bar_access->val, bar_access->size,
> true);
> +
> + if (res != MEMTX_OK) {
> + error_setg(errp, "Could not perform address space write operation,"
> + " inaccessible address: %lx.", bar_access->addr);
> + }
> +}
> +
> +static void process_bar_read(MPQemuMsg *msg, Error **errp)
> +{
> + bar_access_msg_t *bar_access = &msg->data1.bar_access;
> + AddressSpace *as;
> + int wait = msg->fds[0];
> + MemTxResult res;
> + uint64_t val = 0;
> +
> + as = bar_access->memory ? &address_space_memory : &address_space_io;
> +
> + assert(bar_access->size <= sizeof(uint64_t));
> +
> + res = address_space_rw(as, bar_access->addr, MEMTXATTRS_UNSPECIFIED,
> + (uint8_t *)&val, bar_access->size, false);
> +
> + if (res != MEMTX_OK) {
> + error_setg(errp, "Could not perform address space read operation,"
> + " inaccessible address: %lx.", bar_access->addr);
> + val = (uint64_t)-1;
> + goto fail;
> + }
> +
> + switch (bar_access->size) {
> + case 4:
> + val = *((uint32_t *)&val);
> + break;
> + case 2:
> + val = *((uint16_t *)&val);
> + break;
> + case 1:
> + val = *((uint8_t *)&val);
> + break;
> + default:
> + error_setg(errp, "Invalid PCI BAR read size");
> + return;
> + }
> +
> +fail:
> + notify_proxy(wait, val);
> +
> + PUT_REMOTE_WAIT(wait);
> +}
> +
> static void process_msg(GIOCondition cond, MPQemuLinkState *link,
> MPQemuChannel *chan)
> {
> @@ -131,6 +192,18 @@ static void process_msg(GIOCondition cond,
> MPQemuLinkState *link,
> case PCI_CONFIG_READ:
> process_config_read(LINK_TO_DEV(link), msg);
> break;
> + case BAR_WRITE:
> + process_bar_write(msg, &err);
> + if (err) {
> + goto finalize_loop;
> + }
> + break;
> + case BAR_READ:
> + process_bar_read(msg, &err);
> + if (err) {
> + goto finalize_loop;
> + }
> + break;
> default:
> error_setg(&err, "Unknown command");
> goto finalize_loop;
> --
> 2.25.GIT
>
--
Dr. David Alan Gilbert / address@hidden / Manchester, UK
- [PATCH v6 10/36] multi-process: build system for remote device process, (continued)
- [PATCH v6 10/36] multi-process: build system for remote device process, elena . ufimtseva, 2020/04/06
- [PATCH v6 12/36] multi-process: add functions to synchronize proxy and remote endpoints, elena . ufimtseva, 2020/04/06
- [PATCH v6 15/36] multi-process: setup memory manager for remote device, elena . ufimtseva, 2020/04/06
- [PATCH v6 17/36] multi-process: introduce proxy object, elena . ufimtseva, 2020/04/06
- [PATCH v6 16/36] multi-process: remote process initialization, elena . ufimtseva, 2020/04/06
- [PATCH v6 14/36] multi-process: setup a machine object for remote device process, elena . ufimtseva, 2020/04/06
- [PATCH v6 21/36] multi-process: PCI BAR read/write handling for proxy & remote endpoints, elena . ufimtseva, 2020/04/06
- Re: [PATCH v6 21/36] multi-process: PCI BAR read/write handling for proxy & remote endpoints,
Dr. David Alan Gilbert <=
- [PATCH v6 18/36] multi-process: Initialize Proxy Object's communication channel, elena . ufimtseva, 2020/04/06
- [PATCH v6 13/36] multi-process: setup PCI host bridge for remote device, elena . ufimtseva, 2020/04/06
- [PATCH v6 19/36] multi-process: Connect Proxy Object with device in the remote process, elena . ufimtseva, 2020/04/06
- [PATCH v6 22/36] multi-process: Synchronize remote memory, elena . ufimtseva, 2020/04/06
- [PATCH v6 26/36] multi-process: add parse_cmdline in remote process, elena . ufimtseva, 2020/04/06
- [PATCH v6 20/36] multi-process: Forward PCI config space acceses to the remote process, elena . ufimtseva, 2020/04/06
- [PATCH v6 25/36] multi-process: Introduce build flags to separate remote process code, elena . ufimtseva, 2020/04/06
- [PATCH v6 28/36] multi-process: send heartbeat messages to remote, elena . ufimtseva, 2020/04/06
- [PATCH v6 29/36] multi-process: handle heartbeat messages in remote process, elena . ufimtseva, 2020/04/06
- [PATCH v6 31/36] multi-process/mon: choose HMP commands based on target, elena . ufimtseva, 2020/04/06