[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH, RFC] Compile most virtio devices only once
From: |
Blue Swirl |
Subject: |
[Qemu-devel] [PATCH, RFC] Compile most virtio devices only once |
Date: |
Sun, 20 Sep 2009 21:36:47 +0300 |
Concentrate CPU and machine dependencies to virtio.c.
Signed-off-by: Blue Swirl <address@hidden>
---
The concentration seems to break device boundaries. Maybe some other
approach would be cleaner?
Only compile tested. At least kvm_enabled() will be broken if
!defined(NEED_CPU_H).
Makefile.hw | 4 +++-
Makefile.target | 5 ++---
balloon.h | 2 --
hw/virtio-balloon.c | 27 +++++++++++++++------------
hw/virtio-blk.c | 6 +++---
hw/virtio-console.c | 4 ++--
hw/virtio-net.c | 29 ++++++++++++++++++++++-------
hw/virtio-pci.c | 2 +-
hw/virtio.c | 9 +++++++++
hw/virtio.h | 3 +++
kvm.h | 8 ++++++++
11 files changed, 68 insertions(+), 31 deletions(-)
diff --git a/Makefile.hw b/Makefile.hw
index 830902b..f4c4ea1 100644
--- a/Makefile.hw
+++ b/Makefile.hw
@@ -12,7 +12,6 @@ QEMU_CFLAGS+=-I.. -I$(SRC_PATH)/fpu
obj-y =
obj-y += loader.o
-obj-y += virtio.o
obj-y += fw_cfg.o
obj-y += watchdog.o
obj-y += nand.o ecc.o
@@ -30,6 +29,9 @@ obj-y += ne2000.o
# SCSI layer
obj-y += lsi53c895a.o esp.o
+# virtio devices
+obj-y += virtio-blk.o virtio-pci.o virtio-balloon.o virtio-net.o
virtio-console.o
+
obj-y += dma-helpers.o sysbus.o qdev-addr.o isa-bus.o
all: $(HWLIB)
diff --git a/Makefile.target b/Makefile.target
index e5db55b..5415495 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -157,9 +157,8 @@ ifdef CONFIG_SOFTMMU
obj-y = vl.o monitor.o pci.o isa_mmio.o machine.o \
gdbstub.o gdbstub-xml.o
-# virtio has to be here due to weird dependency between PCI and virtio-net.
-# need to fix this properly
-obj-y += virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o
virtio-pci.o
+# virtio has to be here due to certain CPU and machine dependencies
+obj-y += virtio.o
obj-$(CONFIG_KVM) += kvm.o kvm-all.o
LIBS+=-lz
diff --git a/balloon.h b/balloon.h
index 60b4a5d..54efb02 100644
--- a/balloon.h
+++ b/balloon.h
@@ -14,8 +14,6 @@
#ifndef _QEMU_BALLOON_H
#define _QEMU_BALLOON_H
-#include "cpu-defs.h"
-
typedef ram_addr_t (QEMUBalloonEvent)(void *opaque, ram_addr_t target);
void qemu_add_balloon_handler(QEMUBalloonEvent *func, void *opaque);
diff --git a/hw/virtio-balloon.c b/hw/virtio-balloon.c
index cfd3b41..8f7cea7 100644
--- a/hw/virtio-balloon.c
+++ b/hw/virtio-balloon.c
@@ -13,9 +13,7 @@
#include "qemu-common.h"
#include "virtio.h"
-#include "pc.h"
#include "sysemu.h"
-#include "cpu.h"
#include "balloon.h"
#include "virtio-balloon.h"
#include "kvm.h"
@@ -37,11 +35,11 @@ static VirtIOBalloon *to_virtio_balloon(VirtIODevice *vdev)
return (VirtIOBalloon *)vdev;
}
-static void balloon_page(void *addr, int deflate)
+static void balloon_page(VirtIODevice *vdev, void *addr, int deflate)
{
#if defined(__linux__)
- if (!kvm_enabled() || kvm_has_sync_mmu())
- madvise(addr, TARGET_PAGE_SIZE,
+ if (vdev->balloon_use_madvise)
+ madvise(addr, vdev->page_size,
deflate ? MADV_WILLNEED : MADV_DONTNEED);
#endif
}
@@ -86,17 +84,20 @@ static void
virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
elem.out_sg, elem.out_num) == 4) {
ram_addr_t pa;
ram_addr_t addr;
+ uint32_t temp;
- pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
+ /* We assume host and target CPU endianness match */
+ cpu_physical_memory_read(pfn, (uint8_t *)&temp, sizeof(temp));
+ pa = (ram_addr_t)temp << VIRTIO_BALLOON_PFN_SHIFT;
offset += 4;
addr = cpu_get_physical_page_desc(pa);
- if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM)
+ if ((addr & ~(vdev->page_size - 1)) != IO_MEM_RAM)
continue;
/* Using qemu_get_ram_ptr is bending the rules a bit, but
should be OK because we only want a single page. */
- balloon_page(qemu_get_ram_ptr(addr), !!(vq == s->dvq));
+ balloon_page(vdev, qemu_get_ram_ptr(addr), !!(vq == s->dvq));
}
virtqueue_push(vq, &elem, offset);
@@ -133,15 +134,17 @@ static ram_addr_t virtio_balloon_to_target(void
*opaque, ram_addr_t target)
{
VirtIOBalloon *dev = opaque;
- if (target > ram_size)
- target = ram_size;
+ if (target > dev->vdev.ram_size) {
+ target = dev->vdev.ram_size;
+ }
if (target) {
- dev->num_pages = (ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
+ dev->num_pages = (dev->vdev.ram_size - target) >>
+ VIRTIO_BALLOON_PFN_SHIFT;
virtio_notify_config(&dev->vdev);
}
- return ram_size - (dev->actual << VIRTIO_BALLOON_PFN_SHIFT);
+ return dev->vdev.ram_size - (dev->actual << VIRTIO_BALLOON_PFN_SHIFT);
}
static void virtio_balloon_save(QEMUFile *f, void *opaque)
diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c
index 2d6d71a..e512f60 100644
--- a/hw/virtio-blk.c
+++ b/hw/virtio-blk.c
@@ -417,9 +417,9 @@ static void virtio_blk_update_config(VirtIODevice
*vdev, uint8_t *config)
bdrv_get_geometry(s->bs, &capacity);
bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs);
memset(&blkcfg, 0, sizeof(blkcfg));
- stq_raw(&blkcfg.capacity, capacity);
- stl_raw(&blkcfg.seg_max, 128 - 2);
- stw_raw(&blkcfg.cylinders, cylinders);
+ blkcfg.capacity = capacity;
+ blkcfg.seg_max = 128 - 2;
+ blkcfg.cylinders = cylinders;
blkcfg.heads = heads;
blkcfg.sectors = secs;
blkcfg.size_max = 0;
diff --git a/hw/virtio-console.c b/hw/virtio-console.c
index 57f8f89..83f6966 100644
--- a/hw/virtio-console.c
+++ b/hw/virtio-console.c
@@ -69,8 +69,8 @@ static int vcon_can_read(void *opaque)
* We fall back to a one byte per read if there is not enough room.
* It would be cool to have a function that returns the available byte
* instead of checking for a limit */
- if (virtqueue_avail_bytes(s->ivq, TARGET_PAGE_SIZE, 0))
- return TARGET_PAGE_SIZE;
+ if (virtqueue_avail_bytes(s->ivq, s->vdev.page_size, 0))
+ return s->vdev.page_size;
if (virtqueue_avail_bytes(s->ivq, 1, 0))
return 1;
return 0;
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 218f985..d85fee9 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -165,7 +165,8 @@ static int virtio_net_handle_rx_mode(VirtIONet *n,
uint8_t cmd,
exit(1);
}
- on = ldub_p(elem->out_sg[1].iov_base);
+ cpu_physical_memory_read((target_phys_addr_t)elem->out_sg[1].iov_base, &on,
+ sizeof(on));
if (cmd == VIRTIO_NET_CTRL_RX_MODE_PROMISC)
n->promisc = on;
@@ -189,6 +190,7 @@ static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
VirtQueueElement *elem)
{
struct virtio_net_ctrl_mac mac_data;
+ uint32_t temp;
if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET || elem->out_num != 3 ||
elem->out_sg[1].iov_len < sizeof(mac_data) ||
@@ -201,7 +203,9 @@ static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
n->mac_table.multi_overflow = 0;
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
- mac_data.entries = ldl_le_p(elem->out_sg[1].iov_base);
+ cpu_physical_memory_read((target_phys_addr_t)elem->out_sg[1].iov_base,
+ (uint8_t *)&temp, sizeof(temp));
+ mac_data.entries = le32_to_cpu(temp);
if (sizeof(mac_data.entries) +
(mac_data.entries * ETH_ALEN) > elem->out_sg[1].iov_len)
@@ -217,7 +221,9 @@ static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
n->mac_table.first_multi = n->mac_table.in_use;
- mac_data.entries = ldl_le_p(elem->out_sg[2].iov_base);
+ cpu_physical_memory_read((target_phys_addr_t)elem->out_sg[2].iov_base,
+ (uint8_t *)&temp, sizeof(temp));
+ mac_data.entries = le32_to_cpu(temp);
if (sizeof(mac_data.entries) +
(mac_data.entries * ETH_ALEN) > elem->out_sg[2].iov_len)
@@ -247,7 +253,9 @@ static int virtio_net_handle_vlan_table(VirtIONet
*n, uint8_t cmd,
return VIRTIO_NET_ERR;
}
- vid = lduw_le_p(elem->out_sg[1].iov_base);
+ cpu_physical_memory_read((target_phys_addr_t)elem->out_sg[1].iov_base,
+ (uint8_t *)&vid, sizeof(vid));
+ vid = le16_to_cpu(vid);
if (vid >= MAX_VLAN)
return VIRTIO_NET_ERR;
@@ -268,6 +276,7 @@ static void virtio_net_handle_ctrl(VirtIODevice
*vdev, VirtQueue *vq)
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
+ uint8_t temp;
while (virtqueue_pop(vq, &elem)) {
if ((elem.in_num < 1) || (elem.out_num < 1)) {
@@ -281,8 +290,12 @@ static void virtio_net_handle_ctrl(VirtIODevice
*vdev, VirtQueue *vq)
exit(1);
}
- ctrl.class = ldub_p(elem.out_sg[0].iov_base);
- ctrl.cmd = ldub_p(elem.out_sg[0].iov_base + sizeof(ctrl.class));
+ cpu_physical_memory_read((target_phys_addr_t)elem.in_sg[0].iov_base,
+ (uint8_t *)&temp, 1);
+ ctrl.class = temp;
+ cpu_physical_memory_read((target_phys_addr_t)elem.in_sg[0].iov_base +
+ sizeof(ctrl.class), (uint8_t *)&temp, 1);
+ ctrl.cmd = temp;
if (ctrl.class == VIRTIO_NET_CTRL_RX_MODE)
status = virtio_net_handle_rx_mode(n, ctrl.cmd, &elem);
@@ -291,7 +304,9 @@ static void virtio_net_handle_ctrl(VirtIODevice
*vdev, VirtQueue *vq)
else if (ctrl.class == VIRTIO_NET_CTRL_VLAN)
status = virtio_net_handle_vlan_table(n, ctrl.cmd, &elem);
- stb_p(elem.in_sg[elem.in_num - 1].iov_base, status);
+ cpu_physical_memory_write((target_phys_addr_t)elem.in_sg[elem.in_num
+ - 1].iov_base,
+ (uint8_t *)&status, 1);
virtqueue_push(vq, &elem, sizeof(status));
virtio_notify(vdev, vq);
diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index bd5a7c4..4318787 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -412,7 +412,7 @@ static void virtio_init_pci(VirtIOPCIProxy *proxy,
VirtIODevice *vdev,
config[0x3d] = 1;
if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors, 1, 0,
- TARGET_PAGE_SIZE)) {
+ vdev->page_size)) {
pci_register_bar(&proxy->pci_dev, 1,
msix_bar_size(&proxy->pci_dev),
PCI_ADDRESS_SPACE_MEM,
diff --git a/hw/virtio.c b/hw/virtio.c
index 337ff27..efd1214 100644
--- a/hw/virtio.c
+++ b/hw/virtio.c
@@ -15,6 +15,7 @@
#include "virtio.h"
#include "sysemu.h"
+#include "kvm.h"
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
@@ -714,6 +715,14 @@ VirtIODevice *virtio_common_init(const char
*name, uint16_t device_id,
else
vdev->config = NULL;
+ /* CPU and machine dependencies */
+ vdev->page_size = TARGET_PAGE_SIZE;
+ if (!kvm_enabled() || kvm_has_sync_mmu()) {
+ vdev->balloon_use_madvise = 1;
+ } else {
+ vdev->balloon_use_madvise = 0;
+ }
+ vdev->ram_size = ram_size;
return vdev;
}
diff --git a/hw/virtio.h b/hw/virtio.h
index c441a93..a705277 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -108,6 +108,9 @@ struct VirtIODevice
const VirtIOBindings *binding;
void *binding_opaque;
uint16_t device_id;
+ target_phys_addr_t page_size;
+ int balloon_use_madvise;
+ uint64_t ram_size;
};
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
diff --git a/kvm.h b/kvm.h
index e7d5beb..4a6c83b 100644
--- a/kvm.h
+++ b/kvm.h
@@ -14,7 +14,9 @@
#ifndef QEMU_KVM_H
#define QEMU_KVM_H
+#ifdef NEED_CPU_H
#include "config.h"
+#endif
#include "qemu-queue.h"
#ifdef CONFIG_KVM
@@ -31,9 +33,11 @@ struct kvm_run;
int kvm_init(int smp_cpus);
+#ifdef NEED_CPU_H
int kvm_init_vcpu(CPUState *env);
int kvm_cpu_exec(CPUState *env);
+#endif
void kvm_set_phys_mem(target_phys_addr_t start_addr,
ram_addr_t size,
@@ -53,12 +57,14 @@ void kvm_setup_guest_memory(void *start, size_t size);
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
+#ifdef NEED_CPU_H
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
target_ulong len, int type);
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
target_ulong len, int type);
void kvm_remove_all_breakpoints(CPUState *current_env);
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
+#endif
int kvm_pit_in_kernel(void);
int kvm_irqchip_in_kernel(void);
@@ -72,6 +78,7 @@ int kvm_ioctl(KVMState *s, int type, ...);
int kvm_vm_ioctl(KVMState *s, int type, ...);
+#ifdef NEED_CPU_H
int kvm_vcpu_ioctl(CPUState *env, int type, ...);
int kvm_get_mp_state(CPUState *env);
@@ -138,5 +145,6 @@ static inline void cpu_synchronize_state(CPUState *env)
kvm_cpu_synchronize_state(env);
}
}
+#endif
#endif
--
1.6.2.4
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [Qemu-devel] [PATCH, RFC] Compile most virtio devices only once,
Blue Swirl <=