qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH 5/5] net: Introduce e1000e device emulation


From: Leonid Bloch
Subject: [Qemu-devel] [RFC PATCH 5/5] net: Introduce e1000e device emulation
Date: Sun, 25 Oct 2015 19:00:08 +0200

From: Dmitry Fleytman <address@hidden>

This patch introduces emulation for the Intel 82574 adapter, AKA e1000e.

This implementation is based on the e1000 emulation code, and
utilizes the TX/RX packet abstractions initially developed for the
vmxnet3 device. Although some parts of the introduced code are
common with the e1000, the differences are substantial enough so
that the only shared resources for the two devices are the
definitions in hw/net/e1000_regs.h.

Similarly to vmxnet3, the new device uses virtio headers for task
offloads (for backends that support virtio extensions). Usage of
virtio headers may be forcibly disabled via a boolean device property
"vnet" (which is enabled by default). In such case task offloads
will be performed in software, in the same way it is done on
backends that do not support virtio headers.

The device code is split into two parts:

  1. hw/net/e1000e.c: QEMU-specific code for a network device;
  2. hw/net/e1000e_core.[hc]: Device emulation according to the spec.

The new device name is e1000e.

Intel specification for 82574 controller is available at
http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf

Signed-off-by: Dmitry Fleytman <address@hidden>
Signed-off-by: Leonid Bloch <address@hidden>
---
 default-configs/pci.mak |    1 +
 hw/net/Makefile.objs    |    3 +-
 hw/net/e1000e.c         |  531 ++++++++++++
 hw/net/e1000e_core.c    | 2081 +++++++++++++++++++++++++++++++++++++++++++++++
 hw/net/e1000e_core.h    |  181 +++++
 trace-events            |   68 ++
 6 files changed, 2864 insertions(+), 1 deletion(-)
 create mode 100644 hw/net/e1000e.c
 create mode 100644 hw/net/e1000e_core.c
 create mode 100644 hw/net/e1000e_core.h

diff --git a/default-configs/pci.mak b/default-configs/pci.mak
index 58a2c0a..5fd4fcd 100644
--- a/default-configs/pci.mak
+++ b/default-configs/pci.mak
@@ -17,6 +17,7 @@ CONFIG_VMW_PVSCSI_SCSI_PCI=y
 CONFIG_MEGASAS_SCSI_PCI=y
 CONFIG_RTL8139_PCI=y
 CONFIG_E1000_PCI=y
+CONFIG_E1000E_PCI=y
 CONFIG_VMXNET3_PCI=y
 CONFIG_IDE_CORE=y
 CONFIG_IDE_QDEV=y
diff --git a/hw/net/Makefile.objs b/hw/net/Makefile.objs
index 34039fc..67d8efe 100644
--- a/hw/net/Makefile.objs
+++ b/hw/net/Makefile.objs
@@ -6,7 +6,8 @@ common-obj-$(CONFIG_NE2000_PCI) += ne2000.o
 common-obj-$(CONFIG_EEPRO100_PCI) += eepro100.o
 common-obj-$(CONFIG_PCNET_PCI) += pcnet-pci.o
 common-obj-$(CONFIG_PCNET_COMMON) += pcnet.o
-common-obj-$(CONFIG_E1000_PCI) += e1000.o
+common-obj-$(CONFIG_E1000_PCI) += e1000.o e1000e_core.o
+common-obj-$(CONFIG_E1000E_PCI) += e1000e.o e1000e_core.o
 common-obj-$(CONFIG_RTL8139_PCI) += rtl8139.o
 common-obj-$(CONFIG_VMXNET3_PCI) += net_tx_pkt.o net_rx_pkt.o
 common-obj-$(CONFIG_VMXNET3_PCI) += vmxnet3.o
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
new file mode 100644
index 0000000..d16bd60
--- /dev/null
+++ b/hw/net/e1000e.c
@@ -0,0 +1,531 @@
+/*
+* QEMU INTEL 82574 GbE NIC emulation
+*
+* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+*
+* Developed by Daynix Computing LTD (http://www.daynix.com)
+*
+* Authors:
+* Dmitry Fleytman <address@hidden>
+* Leonid Bloch <address@hidden>
+* Yan Vugenfirer <address@hidden>
+*
+* This work is licensed under the terms of the GNU GPL, version 2.
+* See the COPYING file in the top-level directory.
+*
+*/
+
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "net/net.h"
+#include "net/tap.h"
+#include "sysemu/sysemu.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+
+#include "hw/net/e1000_regs.h"
+
+#include "e1000e_core.h"
+
+#include "trace.h"
+
+#define TYPE_E1000E "e1000e"
+#define E1000E(obj) OBJECT_CHECK(E1000EState, (obj), TYPE_E1000E)
+
+typedef struct {
+    PCIDevice parent_obj;
+    NICState *nic;
+    NICConf conf;
+
+    MemoryRegion mmio;
+    MemoryRegion io;
+    MemoryRegion msix;
+
+    uint32_t intr_state;
+    bool use_vnet;
+
+    E1000ECore core;
+
+} E1000EState;
+
+#define E1000E_MMIO_IDX     0
+#define E1000E_IO_IDX       1
+#define E1000E_MSIX_IDX     2
+
+#define E1000E_MMIO_SIZE    (128*1024)
+#define E1000E_IO_SIZE      (32)
+#define E1000E_MSIX_SIZE    (16*1024)
+
+#define E1000E_MSIX_TABLE   (0x0000)
+#define E1000E_MSIX_PBA     (0x2000)
+
+#define E1000E_USE_MSI     BIT(0)
+#define E1000E_USE_MSIX    BIT(1)
+
+static uint64_t
+e1000e_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+    E1000EState *s = opaque;
+    return e1000e_core_read(&s->core, addr, size);
+}
+
+static void
+e1000e_mmio_write(void *opaque, hwaddr addr,
+                  uint64_t val, unsigned size)
+{
+    E1000EState *s = opaque;
+    e1000e_core_write(&s->core, addr, val, size);
+}
+
+static uint64_t
+e1000e_io_read(void *opaque, hwaddr addr, unsigned size)
+{
+    /* TODO: Implement me */
+    trace_e1000e_wrn_io_read(addr, size);
+    return 0;
+}
+
+static void
+e1000e_io_write(void *opaque, hwaddr addr,
+                uint64_t val, unsigned size)
+{
+    /* TODO: Implement me */
+    trace_e1000e_wrn_io_write(addr, size, val);
+}
+
+static const MemoryRegionOps mmio_ops = {
+    .read = e1000e_mmio_read,
+    .write = e1000e_mmio_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .impl = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+};
+
+static const MemoryRegionOps io_ops = {
+    .read = e1000e_io_read,
+    .write = e1000e_io_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .impl = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+};
+
+static int
+_e1000e_can_receive(NetClientState *nc)
+{
+    E1000EState *s = qemu_get_nic_opaque(nc);
+    return e1000e_can_receive(&s->core);
+}
+
+static ssize_t
+_e1000e_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
+{
+    E1000EState *s = qemu_get_nic_opaque(nc);
+    return e1000e_receive_iov(&s->core, iov, iovcnt);
+}
+
+static ssize_t
+_e1000e_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+{
+    E1000EState *s = qemu_get_nic_opaque(nc);
+    return e1000e_receive(&s->core, buf, size);
+}
+
+static void
+e1000e_set_link_status(NetClientState *nc)
+{
+    E1000EState *s = qemu_get_nic_opaque(nc);
+    e1000e_core_set_link_status(&s->core);
+}
+
+static NetClientInfo net_e1000e_info = {
+    .type = NET_CLIENT_OPTIONS_KIND_NIC,
+    .size = sizeof(NICState),
+    .can_receive = _e1000e_can_receive,
+    .receive = _e1000e_receive,
+    .receive_iov = _e1000e_receive_iov,
+    .link_status_changed = e1000e_set_link_status,
+};
+
+/*
+* EEPROM (NVM) contents documented in Table 36, section 6.1.
+*/
+static const uint16_t e1000e_eeprom_template[64] = {
+  /*        Address        |    Compat.    | ImVer |   Compat.     */
+    0x0000, 0x0000, 0x0000, 0x0420, 0xf746, 0x2010, 0xffff, 0xffff,
+  /*      PBA      |ICtrl1 | SSID  | SVID  | DevID |-------|ICtrl2 */
+    0x0000, 0x0000, 0x026b, 0x0000, 0x8086, 0x0000, 0x0000, 0x8058,
+  /*    NVM words 1,2,3    |-------------------------------|PCI-EID*/
+    0x0000, 0x2001, 0x7e7c, 0xffff, 0x1000, 0x00c8, 0x0000, 0x2704,
+  /* PCIe Init. Conf 1,2,3 |PCICtrl|PHY|LD1|-------| RevID | LD0,2 */
+    0x6cc9, 0x3150, 0x070e, 0x460b, 0x2d84, 0x0100, 0xf000, 0x0706,
+  /* FLPAR |FLANADD|LAN-PWR|FlVndr |ICtrl3 |APTSMBA|APTRxEP|APTSMBC*/
+    0x6000, 0x0080, 0x0f04, 0x7fff, 0x4f01, 0xc600, 0x0000, 0x20ff,
+  /* APTIF | APTMC |APTuCP |LSWFWID|MSWFWID|NC-SIMC|NC-SIC | VPDP  */
+    0x0028, 0x0003, 0x0000, 0x0000, 0x0000, 0x0003, 0x0000, 0xffff,
+  /*                            SW Section                         */
+    0x0100, 0xc000, 0x121c, 0xc007, 0xffff, 0xffff, 0xffff, 0xffff,
+  /*                      SW Section                       |CHKSUM */
+    0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x0120, 0xffff, 0x0000,
+};
+
+static void _e1000e_core_reinitialize(E1000EState *s)
+{
+    s->core.owner = &s->parent_obj;
+    s->core.owner_nic = s->nic;
+}
+
+static void
+_e1000e_init_msi(E1000EState *s)
+{
+    int res;
+
+    res = msi_init(PCI_DEVICE(s),
+                   0xD0,   /* MSI capability offset              */
+                   1,      /* MAC MSI interrupts                 */
+                   true,   /* 64-bit message addresses supported */
+                   false); /* Per vector mask supported          */
+
+    if (res > 0) {
+        s->intr_state |= E1000E_USE_MSI;
+    } else {
+        trace_e1000e_msi_init_fail(res);
+    }
+}
+
+static void
+_e1000e_cleanup_msi(E1000EState *s)
+{
+    if (s->intr_state & E1000E_USE_MSI) {
+        msi_uninit(PCI_DEVICE(s));
+    }
+}
+
+static void
+_e1000e_unuse_msix_vectors(E1000EState *s, int num_vectors)
+{
+    int i;
+    for (i = 0; i < num_vectors; i++) {
+        msix_vector_unuse(PCI_DEVICE(s), i);
+    }
+}
+
+static bool
+_e1000e_use_msix_vectors(E1000EState *s, int num_vectors)
+{
+    int i;
+    for (i = 0; i < num_vectors; i++) {
+        int res = msix_vector_use(PCI_DEVICE(s), i);
+        if (res < 0) {
+            trace_e1000e_msix_use_vector_fail(i, res);
+            _e1000e_unuse_msix_vectors(s, i);
+            return false;
+        }
+    }
+    return true;
+}
+
+static void
+_e1000e_init_msix(E1000EState *s)
+{
+    PCIDevice *d = PCI_DEVICE(s);
+    int res = msix_init(PCI_DEVICE(s), E1000E_MSIX_VEC_NUM,
+                        &s->msix,
+                        E1000E_MSIX_IDX, E1000E_MSIX_TABLE,
+                        &s->msix,
+                        E1000E_MSIX_IDX, E1000E_MSIX_PBA,
+                        0xA0);
+
+    if (0 > res) {
+        trace_e1000e_msix_init_fail(res);
+    } else {
+        if (!_e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM)) {
+            msix_uninit(d, &s->msix, &s->msix);
+        } else {
+            s->intr_state |= E1000E_USE_MSIX;
+        }
+    }
+}
+
+static void
+_e1000e_cleanup_msix(E1000EState *s)
+{
+    if (s->intr_state & E1000E_USE_MSIX) {
+        _e1000e_unuse_msix_vectors(s, E1000E_MSIX_VEC_NUM);
+        msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix);
+    }
+}
+
+static void
+_e1000e_init_net_peer(E1000EState *s, PCIDevice *pci_dev, uint8_t *macaddr)
+{
+    DeviceState *dev = DEVICE(pci_dev);
+    NetClientState *nc;
+
+    s->nic = qemu_new_nic(&net_e1000e_info, &s->conf,
+        object_get_typename(OBJECT(s)), dev->id, s);
+
+    qemu_format_nic_info_str(qemu_get_queue(s->nic), macaddr);
+
+    nc = qemu_get_queue(s->nic);
+
+    s->core.has_vnet = (s->use_vnet && nc->peer) ?
+        qemu_has_vnet_hdr(nc->peer) : false;
+
+    trace_e1000e_cfg_support_virtio(s->core.has_vnet);
+
+    if (s->core.has_vnet) {
+        qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr));
+        qemu_using_vnet_hdr(nc->peer, true);
+        qemu_set_offload(nc->peer, 1, 0, 0, 0, 0);
+    }
+}
+
+static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp)
+{
+    E1000EState *s = E1000E(pci_dev);
+    uint8_t *macaddr;
+
+    trace_e1000e_cb_pci_realize();
+
+    pci_dev->config[PCI_CACHE_LINE_SIZE] = 0x10;
+    pci_dev->config[PCI_INTERRUPT_PIN] = 1;
+
+    /* Define IO/MMIO regions */
+    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_ops, s,
+                          "e1000e-mmio", E1000E_MMIO_SIZE);
+    pci_register_bar(pci_dev, E1000E_MMIO_IDX,
+                     PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio);
+
+    memory_region_init_io(&s->io, OBJECT(s), &io_ops, s,
+                          "e1000e-io", E1000E_IO_SIZE);
+    pci_register_bar(pci_dev, E1000E_IO_IDX,
+                     PCI_BASE_ADDRESS_SPACE_IO, &s->io);
+
+    memory_region_init(&s->msix, OBJECT(s), "e1000e-msix",
+                       E1000E_MSIX_SIZE);
+    pci_register_bar(pci_dev, E1000E_MSIX_IDX,
+                     PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix);
+
+    /* Create networking backend */
+    qemu_macaddr_default_if_unset(&s->conf.macaddr);
+    macaddr = s->conf.macaddr.a;
+
+    _e1000e_init_net_peer(s, pci_dev, macaddr);
+
+    _e1000e_init_msi(s);
+    _e1000e_init_msix(s);
+
+    /* Initialize registers interface */
+    _e1000e_core_reinitialize(s);
+
+    e1000e_core_pci_realize(&s->core,
+                           e1000e_eeprom_template,
+                           sizeof(e1000e_eeprom_template),
+                           macaddr);
+}
+
+static void e1000e_pci_uninit(PCIDevice *pci_dev)
+{
+    E1000EState *s = E1000E(pci_dev);
+
+    trace_e1000e_cb_pci_uninit();
+
+    e1000e_core_pci_uninit(&s->core);
+    qemu_del_nic(s->nic);
+
+    _e1000e_cleanup_msix(s);
+    _e1000e_cleanup_msi(s);
+}
+
+static void
+e1000e_write_config(PCIDevice *pci_dev, uint32_t addr, uint32_t val, int len)
+{
+    trace_e1000e_cb_write_config();
+
+    pci_default_write_config(pci_dev, addr, val, len);
+    msi_write_config(pci_dev, addr, val, len);
+    msix_write_config(pci_dev, addr, val, len);
+}
+
+static void e1000e_qdev_reset(DeviceState *dev)
+{
+    E1000EState *s = E1000E(dev);
+    uint8_t *macaddr = s->conf.macaddr.a;
+
+    trace_e1000e_cb_qdev_reset();
+
+    e1000e_core_reset(&s->core, macaddr, E1000_PHY_ID2_82574x);
+    qemu_format_nic_info_str(qemu_get_queue(s->nic), macaddr);
+}
+
+static void e1000e_pre_save(void *opaque)
+{
+    E1000EState *s = opaque;
+
+    trace_e1000e_cb_pre_save();
+
+    e1000e_core_pre_save(&s->core);
+}
+
+static int e1000e_post_load(void *opaque, int version_id)
+{
+    E1000EState *s = opaque;
+
+    _e1000e_core_reinitialize(s);
+
+    trace_e1000e_cb_post_load();
+
+    return e1000e_core_post_load(&s->core);
+}
+
+static bool e1000e_mit_state_needed(void *opaque)
+{
+    E1000EState *s = opaque;
+
+    return s->core.compat_flags & E1000_FLAG_MIT;
+}
+
+static const VMStateDescription vmstate_e1000e_mit_state = {
+    .name = "e1000e/mit_state",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(core.mac[RDTR], E1000EState),
+        VMSTATE_UINT32(core.mac[RADV], E1000EState),
+        VMSTATE_UINT32(core.mac[TADV], E1000EState),
+        VMSTATE_UINT32(core.mac[ITR], E1000EState),
+        VMSTATE_BOOL(core.mit_irq_level, E1000EState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_e1000e = {
+    .name = "e1000e",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .pre_save = e1000e_pre_save,
+    .post_load = e1000e_post_load,
+    .fields = (VMStateField[]) {
+        VMSTATE_PCI_DEVICE(parent_obj, E1000EState),
+        VMSTATE_MSIX(parent_obj, E1000EState),
+
+        VMSTATE_UINT32(intr_state, E1000EState),
+        VMSTATE_UINT32(core.rxbuf_min_shift, E1000EState),
+        VMSTATE_UINT8(core.rx_desc_len, E1000EState),
+        VMSTATE_UINT32_ARRAY(core.rxbuf_sizes, E1000EState,
+                             E1000_PSRCTL_BUFFS_PER_DESC),
+        VMSTATE_UINT32(core.rx_desc_buf_size, E1000EState),
+        VMSTATE_UINT32(core.eecd_state.val_in, E1000EState),
+        VMSTATE_UINT16(core.eecd_state.bitnum_in, E1000EState),
+        VMSTATE_UINT16(core.eecd_state.bitnum_out, E1000EState),
+        VMSTATE_UINT16(core.eecd_state.reading, E1000EState),
+        VMSTATE_UINT32(core.eecd_state.old_eecd, E1000EState),
+        VMSTATE_UINT16_ARRAY(core.eeprom, E1000EState, E1000E_EEPROM_SIZE),
+        VMSTATE_UINT16_ARRAY(core.phy, E1000EState, E1000E_PHY_SIZE),
+        VMSTATE_UINT32_ARRAY(core.mac, E1000EState, E1000E_MAC_SIZE),
+
+        VMSTATE_UINT8(core.tx[0].sum_needed, E1000EState),
+        VMSTATE_UINT8(core.tx[0].ipcss, E1000EState),
+        VMSTATE_UINT8(core.tx[0].ipcso, E1000EState),
+        VMSTATE_UINT16(core.tx[0].ipcse, E1000EState),
+        VMSTATE_UINT8(core.tx[0].tucss, E1000EState),
+        VMSTATE_UINT8(core.tx[0].tucso, E1000EState),
+        VMSTATE_UINT16(core.tx[0].tucse, E1000EState),
+        VMSTATE_UINT8(core.tx[0].hdr_len, E1000EState),
+        VMSTATE_UINT16(core.tx[0].mss, E1000EState),
+        VMSTATE_UINT32(core.tx[0].paylen, E1000EState),
+        VMSTATE_INT8(core.tx[0].ip, E1000EState),
+        VMSTATE_INT8(core.tx[0].tcp, E1000EState),
+        VMSTATE_BOOL(core.tx[0].tse, E1000EState),
+        VMSTATE_BOOL(core.tx[0].cptse, E1000EState),
+        VMSTATE_BOOL(core.tx[0].skip_cp, E1000EState),
+
+        VMSTATE_UINT8(core.tx[1].sum_needed, E1000EState),
+        VMSTATE_UINT8(core.tx[1].ipcss, E1000EState),
+        VMSTATE_UINT8(core.tx[1].ipcso, E1000EState),
+        VMSTATE_UINT16(core.tx[1].ipcse, E1000EState),
+        VMSTATE_UINT8(core.tx[1].tucss, E1000EState),
+        VMSTATE_UINT8(core.tx[1].tucso, E1000EState),
+        VMSTATE_UINT16(core.tx[1].tucse, E1000EState),
+        VMSTATE_UINT8(core.tx[1].hdr_len, E1000EState),
+        VMSTATE_UINT16(core.tx[1].mss, E1000EState),
+        VMSTATE_UINT32(core.tx[1].paylen, E1000EState),
+        VMSTATE_INT8(core.tx[1].ip, E1000EState),
+        VMSTATE_INT8(core.tx[1].tcp, E1000EState),
+        VMSTATE_BOOL(core.tx[1].tse, E1000EState),
+        VMSTATE_BOOL(core.tx[1].cptse, E1000EState),
+        VMSTATE_BOOL(core.tx[1].skip_cp, E1000EState),
+
+        VMSTATE_BOOL(core.has_vnet, E1000EState),
+
+        VMSTATE_END_OF_LIST()
+    },
+    .subsections = (VMStateSubsection[]) {
+        {
+            .vmsd = &vmstate_e1000e_mit_state,
+            .needed = e1000e_mit_state_needed,
+        }, {
+            /* empty */
+        }
+    }
+};
+
+static Property e1000e_properties[] = {
+    DEFINE_NIC_PROPERTIES(E1000EState, conf),
+    DEFINE_PROP_BIT("autonegotiation", E1000EState,
+                    core.compat_flags, E1000_FLAG_AUTONEG_BIT, true),
+    DEFINE_PROP_BIT("mitigation", E1000EState,
+                    core.compat_flags, E1000_FLAG_MIT_BIT, true),
+    DEFINE_PROP_BOOL("vnet", E1000EState, use_vnet, true),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void e1000e_class_init(ObjectClass *class, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(class);
+    PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
+
+    c->realize = e1000e_pci_realize;
+    c->exit = e1000e_pci_uninit;
+    c->vendor_id = PCI_VENDOR_ID_INTEL;
+    c->device_id = E1000_DEV_ID_82574L;
+    c->revision = 0;
+    c->class_id = PCI_CLASS_NETWORK_ETHERNET;
+    c->subsystem_vendor_id = PCI_VENDOR_ID_INTEL;
+    c->subsystem_id = 0;
+    c->config_write = e1000e_write_config;
+
+    dc->desc = "Intel 82574L GbE Controller";
+    dc->reset = e1000e_qdev_reset;
+    dc->vmsd = &vmstate_e1000e;
+    dc->props = e1000e_properties;
+
+    set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+}
+
+static void e1000e_instance_init(Object *obj)
+{
+    E1000EState *s = E1000E(obj);
+    device_add_bootindex_property(obj, &s->conf.bootindex,
+                                  "bootindex", "/address@hidden",
+                                  DEVICE(obj), NULL);
+}
+
+static const TypeInfo e1000e_info = {
+    .name = TYPE_E1000E,
+    .parent = TYPE_PCI_DEVICE,
+    .instance_size = sizeof(E1000EState),
+    .class_init = e1000e_class_init,
+    .instance_init = e1000e_instance_init,
+};
+
+static void e1000e_register_types(void)
+{
+    type_register_static(&e1000e_info);
+}
+
+type_init(e1000e_register_types)
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
new file mode 100644
index 0000000..f099784
--- /dev/null
+++ b/hw/net/e1000e_core.c
@@ -0,0 +1,2081 @@
+/*
+* Core code for QEMU e1000e emulation
+*
+* Software developer's manuals:
+* 
http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
+*
+* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+* Developed by Daynix Computing LTD (http://www.daynix.com)
+*
+* Authors:
+* Dmitry Fleytman <address@hidden>
+* Leonid Bloch <address@hidden>
+* Yan Vugenfirer <address@hidden>
+*
+* Based on work done by:
+* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+* Copyright (c) 2008 Qumranet
+* Based on work done by:
+* Copyright (c) 2007 Dan Aloni
+* Copyright (c) 2004 Antony T Curtis
+*
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "net/net.h"
+#include "net/tap.h"
+#include "net/checksum.h"
+#include "sysemu/sysemu.h"
+#include "qemu/iov.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+
+#include "net_tx_pkt.h"
+#include "net_rx_pkt.h"
+
+#include "e1000_regs.h"
+#include "e1000e_core.h"
+
+#include "trace.h"
+
+static const uint8_t E1000E_MAX_TX_FRAGS = 64;
+
+static void
+set_interrupt_cause(E1000ECore *core, uint32_t val);
+
+static inline int
+vlan_enabled(E1000ECore *core)
+{
+    return ((core->mac[CTRL] & E1000_CTRL_VME) != 0);
+}
+
+static inline int
+is_vlan_txd(uint32_t txd_lower)
+{
+    return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
+}
+
+static inline void
+inc_reg_if_not_full(E1000ECore *core, int index)
+{
+    if (core->mac[index] != 0xffffffff) {
+        core->mac[index]++;
+    }
+}
+
+static void
+grow_8reg_if_not_full(E1000ECore *core, int index, int size)
+{
+    uint64_t sum = core->mac[index] | (uint64_t)core->mac[index+1] << 32;
+
+    if (sum + size < sum) {
+        sum = ~0ULL;
+    } else {
+        sum += size;
+    }
+    core->mac[index] = sum;
+    core->mac[index+1] = sum >> 32;
+}
+
+static void
+increase_size_stats(E1000ECore *core, const int *size_regs, int size)
+{
+    if (size > 1023) {
+        inc_reg_if_not_full(core, size_regs[5]);
+    } else if (size > 511) {
+        inc_reg_if_not_full(core, size_regs[4]);
+    } else if (size > 255) {
+        inc_reg_if_not_full(core, size_regs[3]);
+    } else if (size > 127) {
+        inc_reg_if_not_full(core, size_regs[2]);
+    } else if (size > 64) {
+        inc_reg_if_not_full(core, size_regs[1]);
+    } else if (size == 64) {
+        inc_reg_if_not_full(core, size_regs[0]);
+    }
+}
+
+static inline void
+process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp)
+{
+    if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) {
+        trace_e1000e_wrn_no_ts_support();
+    }
+}
+
+static inline void
+process_snap_option(E1000ECore *core, uint32_t cmd_and_length)
+{
+    if (cmd_and_length & E1000_TXD_CMD_SNAP) {
+        trace_e1000e_wrn_no_snap_support();
+    }
+}
+
+static void
+_e1000e_setup_tx_offloads(E1000ECore *core, struct e1000_tx *tx)
+{
+    if (tx->tse && tx->cptse) {
+        net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->mss);
+        net_tx_pkt_update_ip_checksums(tx->tx_pkt);
+        inc_reg_if_not_full(core, TSCTC);
+        return;
+    }
+
+    if (tx->sum_needed & E1000_TXD_POPTS_TXSM) {
+        net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0);
+    }
+
+    if (tx->sum_needed & E1000_TXD_POPTS_IXSM) {
+        net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
+    }
+}
+
+static bool
+_e1000e_tx_pkt_send(E1000ECore *core, struct e1000_tx *tx)
+{
+    NetClientState *queue = qemu_get_queue(core->owner_nic);
+
+    _e1000e_setup_tx_offloads(core, tx);
+
+    net_tx_pkt_dump(tx->tx_pkt);
+
+    if ((core->phy[PHY_CTRL] & MII_CR_LOOPBACK) ||
+        ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
+        return net_tx_pkt_send_loopback(tx->tx_pkt, queue);
+    } else {
+        return net_tx_pkt_send(tx->tx_pkt, queue);
+    }
+}
+
+static void
+_e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt)
+{
+    static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
+                                    PTC1023, PTC1522 };
+
+    size_t tot_len = net_tx_pkt_get_total_len(tx_pkt);
+
+    increase_size_stats(core, PTCregs, tot_len);
+    inc_reg_if_not_full(core, TPT);
+    grow_8reg_if_not_full(core, TOTL, tot_len);
+
+    switch (net_tx_pkt_get_packet_type(tx_pkt)) {
+    case ETH_PKT_BCAST:
+        inc_reg_if_not_full(core, BPTC);
+        break;
+    case ETH_PKT_MCAST:
+        inc_reg_if_not_full(core, MPTC);
+        break;
+    case ETH_PKT_UCAST:
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    core->mac[GPTC] = core->mac[TPT];
+    core->mac[GOTCL] = core->mac[TOTL];
+    core->mac[GOTCH] = core->mac[TOTH];
+}
+
+static void
+process_tx_desc(E1000ECore *core,
+                struct e1000_tx *tx,
+                struct e1000_tx_desc *dp)
+{
+    uint32_t txd_lower = le32_to_cpu(dp->lower.data);
+    uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
+    unsigned int split_size = txd_lower & 0xffff, op;
+    uint64_t addr;
+    struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
+    bool eop = txd_lower & E1000_TXD_CMD_EOP;
+
+    core->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
+    if (dtype == E1000_TXD_CMD_DEXT) {    /* context descriptor */
+        op = le32_to_cpu(xp->cmd_and_length);
+        tx->ipcss = xp->lower_setup.ip_fields.ipcss;
+        tx->ipcso = xp->lower_setup.ip_fields.ipcso;
+        tx->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
+        tx->tucss = xp->upper_setup.tcp_fields.tucss;
+        tx->tucso = xp->upper_setup.tcp_fields.tucso;
+        tx->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
+        tx->paylen = op & 0xfffff;
+        tx->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
+        tx->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
+        tx->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
+        tx->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
+        tx->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
+        if (tx->tucso == 0) { /* this is probably wrong */
+            trace_e1000e_tx_cso_zero();
+            tx->tucso = tx->tucss + (tx->tcp ? 16 : 6);
+        }
+        process_snap_option(core, op);
+        return;
+    } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
+        /* data descriptor */
+        tx->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
+        tx->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
+        process_ts_option(core, dp);
+    } else {
+        /* legacy descriptor */
+        process_ts_option(core, dp);
+        tx->cptse = 0;
+    }
+
+    addr = le64_to_cpu(dp->buffer_addr);
+
+    if (!tx->skip_cp) {
+        if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) {
+            tx->skip_cp = true;
+        }
+    }
+
+    if (eop) {
+        if (!tx->skip_cp) {
+            net_tx_pkt_parse(tx->tx_pkt);
+            if (vlan_enabled(core) && is_vlan_txd(txd_lower)) {
+                net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt,
+                    le16_to_cpu(core->mac[VET]),
+                    le16_to_cpu(dp->upper.fields.special));
+            }
+            if (_e1000e_tx_pkt_send(core, tx)) {
+                _e1000e_on_tx_done_update_stats(core, tx->tx_pkt);
+            }
+        }
+
+        tx->skip_cp = false;
+        net_tx_pkt_reset(tx->tx_pkt);
+
+        tx->sum_needed = 0;
+        tx->cptse = 0;
+    }
+}
+
+static inline uint32_t
+_e1000e_tx_wb_interrupt_cause(E1000ECore *core)
+{
+    return msix_enabled(core->owner) ? E1000_ICR_TXQ0 : E1000_ICR_TXDW;
+}
+
+static inline uint32_t
+_e1000e_rx_wb_interrupt_cause(E1000ECore *core)
+{
+    return msix_enabled(core->owner) ? E1000_ICR_RXQ0 : E1000_ICS_RXT0;
+}
+
+static uint32_t
+txdesc_writeback(E1000ECore *core, dma_addr_t base, struct e1000_tx_desc *dp)
+{
+    uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
+
+    if (!(txd_lower & E1000_TXD_CMD_RS)) {
+        return 0;
+    }
+
+    txd_upper = le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD;
+
+    dp->upper.data = cpu_to_le32(txd_upper);
+    pci_dma_write(core->owner, base + ((char *)&dp->upper - (char *)dp),
+                  &dp->upper, sizeof(dp->upper));
+    return _e1000e_tx_wb_interrupt_cause(core);
+}
+
+typedef struct tx_ring_st {
+    int tdbah;
+    int tdbal;
+    int tdlen;
+    int tdh;
+    int tdt;
+
+    struct e1000_tx *tx;
+} tx_ring;
+
+static inline int
+_e1000e_mq_reg(int reg_idx, int queue_idx)
+{
+    return reg_idx + (0x100 >> 2) * queue_idx;
+}
+
+static inline int
+_e1000e_mq_queue_idx(int base_reg_idx, int reg_idx)
+{
+    return (reg_idx - base_reg_idx) / (0x100 >> 2);
+}
+
+static inline void
+_e1000e_tx_ring_init(E1000ECore *core, tx_ring *txr, int idx)
+{
+    txr->tdbah = _e1000e_mq_reg(TDBAH, idx);
+    txr->tdbal = _e1000e_mq_reg(TDBAL, idx);
+    txr->tdlen = _e1000e_mq_reg(TDLEN, idx);
+    txr->tdh   = _e1000e_mq_reg(TDH,   idx);
+    txr->tdt   = _e1000e_mq_reg(TDT,   idx);
+
+    txr->tx    = &core->tx[idx];
+}
+
+static uint64_t tx_desc_base(E1000ECore *core, const tx_ring *txr)
+{
+    uint64_t bah = core->mac[txr->tdbah];
+    uint64_t bal = core->mac[txr->tdbal] & ~0xf;
+
+    return (bah << 32) + bal;
+}
+
+static void
+start_xmit(E1000ECore *core, const tx_ring *txr)
+{
+    dma_addr_t base;
+    struct e1000_tx_desc desc;
+    uint32_t tdh_start = core->mac[txr->tdh], cause = E1000_ICS_TXQE;
+
+    if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
+        trace_e1000e_tx_disabled();
+        return;
+    }
+
+    while (core->mac[txr->tdh] != core->mac[txr->tdt]) {
+        base = tx_desc_base(core, txr) +
+               sizeof(struct e1000_tx_desc) * core->mac[txr->tdh];
+        pci_dma_read(core->owner, base, &desc, sizeof(desc));
+
+        trace_e1000e_tx_descr(core->mac[txr->tdh],
+               (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
+               desc.upper.data);
+
+        process_tx_desc(core, txr->tx, &desc);
+        cause |= txdesc_writeback(core, base, &desc);
+
+        if (++core->mac[txr->tdh] * sizeof(desc) >= core->mac[txr->tdlen]) {
+            core->mac[txr->tdh] = 0;
+        }
+        /*
+         * the following could happen only if guest sw assigns
+         * bogus values to TDT/TDLEN.
+         * there's nothing too intelligent we could do about this.
+         */
+        if (core->mac[txr->tdh] == tdh_start) {
+            trace_e1000e_tdh_wraparound(tdh_start,
+                                        core->mac[txr->tdt],
+                                        core->mac[txr->tdlen]);
+            break;
+        }
+    }
+
+    set_interrupt_cause(core, cause);
+}
+
+static bool
+_e1000e_has_rxbufs(E1000ECore *core, size_t total_size)
+{
+    int bufs;
+    /* Fast-path short packets */
+    if (total_size <= core->rx_desc_buf_size) {
+        return core->mac[RDH] != core->mac[RDT];
+    }
+    if (core->mac[RDH] < core->mac[RDT]) {
+        bufs = core->mac[RDT] - core->mac[RDH];
+    } else if (core->mac[RDH] > core->mac[RDT]) {
+        bufs = core->mac[RDLEN] / core->rx_desc_len +
+            core->mac[RDT] - core->mac[RDH];
+    } else {
+        return false;
+    }
+    return total_size <= bufs * core->rx_desc_buf_size;
+}
+
+static void
+start_recv(E1000ECore *core)
+{
+    if (_e1000e_has_rxbufs(core, 1)) {
+        qemu_flush_queued_packets(qemu_get_queue(core->owner_nic));
+    }
+}
+
+int
+e1000e_can_receive(E1000ECore *core)
+{
+    return (core->mac[STATUS] & E1000_STATUS_LU) &&
+        (core->mac[RCTL] & E1000_RCTL_EN) &&
+        (core->owner->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
+        _e1000e_has_rxbufs(core, 1);
+}
+
+ssize_t
+e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size)
+{
+    const struct iovec iov = {
+        .iov_base = (uint8_t *)buf,
+        .iov_len = size
+    };
+
+    return e1000e_receive_iov(core, &iov, 1);
+}
+
+static inline int
+vlan_rx_filter_enabled(E1000ECore *core)
+{
+    return ((core->mac[RCTL] & E1000_RCTL_VFE) != 0);
+}
+
+static inline int
+is_vlan_packet(E1000ECore *core, const uint8_t *buf)
+{
+    return (be16_to_cpup((uint16_t *)(buf + 12)) ==
+        le16_to_cpu(core->mac[VET]));
+}
+
+static bool
+receive_filter(E1000ECore *core, const uint8_t *buf, int size)
+{
+    static const int mta_shift[] = {4, 3, 2, 0};
+    uint32_t f, rctl = core->mac[RCTL], ra[2], *rp;
+
+    if (is_vlan_packet(core, buf) && vlan_rx_filter_enabled(core)) {
+        uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
+        uint32_t vfta = le32_to_cpup((uint32_t *)(core->mac + VFTA) +
+                                     ((vid >> 5) & 0x7f));
+        if ((vfta & (1 << (vid & 0x1f))) == 0) {
+            return 0;
+        }
+    }
+
+    switch (net_rx_pkt_get_packet_type(core->tx[0].rx_pkt)) {
+    case ETH_PKT_UCAST:
+        if (rctl & E1000_RCTL_UPE) {
+            return true; /* promiscuous ucast */
+        }
+        break;
+
+    case ETH_PKT_BCAST:
+        if (rctl & E1000_RCTL_BAM) {
+            return true; /* broadcast enabled */
+        }
+        break;
+
+    case ETH_PKT_MCAST:
+        if (rctl & E1000_RCTL_MPE) {
+            return true; /* promiscuous mcast */
+        }
+        break;
+
+    default:
+        g_assert_not_reached();
+    }
+
+    for (rp = core->mac + RA; rp < core->mac + RA + 32; rp += 2) {
+        if (!(rp[1] & E1000_RAH_AV)) {
+            continue;
+        }
+        ra[0] = cpu_to_le32(rp[0]);
+        ra[1] = cpu_to_le32(rp[1]);
+        if (!memcmp(buf, (uint8_t *)ra, 6)) {
+            trace_e1000e_rx_flt_ucast_match((int)(rp - core->mac - RA) / 2,
+                                           MAC_ARG(buf));
+            return 1;
+        }
+    }
+    trace_e1000e_rx_flt_ucast_mismatch(MAC_ARG(buf));
+
+    f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
+    f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
+    if (core->mac[MTA + (f >> 5)] & (1 << (f & 0x1f))) {
+        inc_reg_if_not_full(core, MPRC);
+        return 1;
+    }
+
+    trace_e1000e_rx_flt_inexact_mismatch(MAC_ARG(buf),
+                                        (rctl >> E1000_RCTL_MO_SHIFT) & 3,
+                                        f >> 5,
+                                        core->mac[MTA + (f >> 5)]);
+
+    return 0;
+}
+
+static uint64_t rx_desc_base(E1000ECore *core)
+{
+    uint64_t bah = core->mac[RDBAH];
+    uint64_t bal = core->mac[RDBAL] & ~0xf;
+
+    return (bah << 32) + bal;
+}
+
+/* FCS aka Ethernet CRC-32. We don't get it from backends and can't
+ * fill it in, just pad descriptor length by 4 bytes unless guest
+ * told us to strip it off the packet. */
+static inline int
+fcs_len(E1000ECore *core)
+{
+    return (core->mac[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
+}
+
+static void
+read_legacy_rx_descriptor(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr)
+{
+    struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
+    *buff_addr = le64_to_cpu(d->buffer_addr);
+}
+
+static void
+read_extended_rx_descriptor(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr)
+{
+    union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc;
+    *buff_addr = le64_to_cpu(d->read.buffer_addr);
+}
+
+static void
+read_ps_rx_descriptor(E1000ECore *core, uint8_t *desc,
+                      hwaddr (*buff_addr)[MAX_PS_BUFFERS])
+{
+    int i;
+    union e1000_rx_desc_packet_split *d =
+        (union e1000_rx_desc_packet_split *) desc;
+
+    for (i = 0; i < MAX_PS_BUFFERS; i++) {
+        (*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]);
+    }
+
+    trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1],
+                                 (*buff_addr)[2], (*buff_addr)[3]);
+}
+
+static void
+read_rx_descriptor(E1000ECore *core, uint8_t *desc,
+                   hwaddr (*buff_addr)[MAX_PS_BUFFERS])
+{
+    if (core->mac[RFCTL] & E1000_RFCTL_EXTEN) {
+        if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
+            read_ps_rx_descriptor(core, desc, buff_addr);
+        } else {
+            read_extended_rx_descriptor(core, desc, &(*buff_addr)[0]);
+            (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
+        }
+    } else {
+        read_legacy_rx_descriptor(core, desc, &(*buff_addr)[0]);
+        (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
+    }
+}
+
+static void
+_e1000e_build_rx_metadata(struct NetRxPkt *pkt,
+                          bool is_eop,
+                          uint32_t *status_flags,
+                          uint16_t *ip_id,
+                          uint16_t *vlan_tag)
+{
+    struct virtio_net_hdr *vhdr;
+    bool isip4, isip6, istcp, isudp;
+    uint32_t pkt_type;
+
+    net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
+
+    *status_flags = E1000_RXD_STAT_DD;
+
+    /* No additional metadata needed for non-EOP descriptors */
+    if (!is_eop) {
+        return;
+    }
+
+    *status_flags |= E1000_RXD_STAT_EOP;
+
+    /* VLAN state */
+    if (net_rx_pkt_is_vlan_stripped(pkt)) {
+        *status_flags |= E1000_RXD_STAT_VP;
+        *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
+    }
+
+    /* Packet parsing results */
+    if (isip4) {
+        *status_flags |= E1000_RXD_STAT_IPIDV;
+        *ip_id = net_rx_pkt_get_ip_id(pkt);
+    }
+
+    if (istcp && net_rx_pkt_is_tcp_ack(pkt)) {
+        *status_flags |= E1000_RXD_STAT_ACK;
+    }
+
+    if (istcp || isudp) {
+        pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
+    } else if (isip4 || isip6) {
+        pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
+    } else {
+        pkt_type = E1000_RXD_PKT_MAC;
+    }
+
+    *status_flags |= E1000_RXD_PKT_TYPE(pkt_type);
+
+    /* RX CSO information */
+    if (!net_rx_pkt_has_virt_hdr(pkt)) {
+        return;
+    }
+
+    vhdr = net_rx_pkt_get_vhdr(pkt);
+
+    if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
+        !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
+        return;
+    }
+
+    *status_flags |= (isip4 ? E1000_RXD_STAT_IPIDV : 0) |
+                     (istcp ? E1000_RXD_STAT_TCPCS : 0) |
+                     (isudp ? E1000_RXD_STAT_UDPCS : 0);
+}
+
+static void
+write_legacy_rx_descriptor(E1000ECore *core, uint8_t *desc,
+                           struct NetRxPkt *pkt, uint16_t length)
+{
+    uint32_t status_flags;
+    uint16_t ip_id;
+
+    struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
+
+    d->length = cpu_to_le16(length);
+
+    _e1000e_build_rx_metadata(pkt, pkt != NULL,
+                              &status_flags, &ip_id, &d->special);
+
+    d->status = (uint8_t) status_flags;
+}
+
+static void
+write_extended_rx_descriptor(E1000ECore *core, uint8_t *desc,
+                             struct NetRxPkt *pkt, uint16_t length)
+{
+    union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc;
+
+    d->wb.upper.length = cpu_to_le16(length);
+
+    _e1000e_build_rx_metadata(pkt, pkt != NULL,
+                              &d->wb.upper.status_error,
+                              &d->wb.lower.hi_dword.csum_ip.ip_id,
+                              &d->wb.upper.vlan);
+}
+
+static void
+write_ps_rx_descriptor(E1000ECore *core, uint8_t *desc,
+                       struct NetRxPkt *pkt,
+                       uint16_t (*written)[MAX_PS_BUFFERS])
+{
+    int i;
+    union e1000_rx_desc_packet_split *d =
+        (union e1000_rx_desc_packet_split *) desc;
+
+    d->wb.middle.length0 = cpu_to_le16((*written)[0]);
+
+    for (i = 0; i < PS_PAGE_BUFFERS; i++) {
+        d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]);
+    }
+
+    _e1000e_build_rx_metadata(pkt, pkt != NULL,
+                              &d->wb.middle.status_error,
+                              &d->wb.lower.hi_dword.csum_ip.ip_id,
+                              &d->wb.middle.vlan);
+
+    trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1],
+                                  (*written)[2], (*written)[3]);
+}
+
+static void
+write_rx_descriptor(E1000ECore *core, uint8_t *desc,
+                    struct NetRxPkt *pkt,
+                    uint16_t (*written)[MAX_PS_BUFFERS])
+{
+    if (core->mac[RFCTL] & E1000_RFCTL_EXTEN) {
+        if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
+            write_ps_rx_descriptor(core, desc, pkt, written);
+        } else {
+            write_extended_rx_descriptor(core, desc,
+                                         pkt, (*written)[0]);
+        }
+    } else {
+        write_legacy_rx_descriptor(core, desc, pkt, (*written)[0]);
+    }
+}
+
+typedef struct ba_state_st {
+    uint16_t written[MAX_PS_BUFFERS];
+    uint8_t cur_idx;
+} ba_state;
+
+static void
+write_to_rx_buffers(E1000ECore *core,
+                    hwaddr (*ba)[MAX_PS_BUFFERS],
+                    ba_state *bastate,
+                    const char *data,
+                    dma_addr_t data_len)
+{
+    while (data_len > 0) {
+        uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx];
+        uint32_t cur_buf_bytes_left = cur_buf_len -
+                                      bastate->written[bastate->cur_idx];
+        uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left);
+
+        trace_e1000e_rx_desc_buff_write(bastate->cur_idx,
+                                        (*ba)[bastate->cur_idx],
+                                        bastate->written[bastate->cur_idx],
+                                        data,
+                                        bytes_to_write);
+
+        pci_dma_write(core->owner,
+            (*ba)[bastate->cur_idx] + bastate->written[bastate->cur_idx],
+            data, bytes_to_write);
+
+        bastate->written[bastate->cur_idx] += bytes_to_write;
+        data += bytes_to_write;
+        data_len -= bytes_to_write;
+
+        if (bastate->written[bastate->cur_idx] == cur_buf_len) {
+            bastate->cur_idx++;
+        }
+
+        assert(bastate->cur_idx < MAX_PS_BUFFERS);
+    }
+}
+
+static void
+_e1000e_update_rx_stats(E1000ECore *core,
+                        size_t data_size,
+                        size_t data_fcs_size)
+{
+    static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511,
+                                    PRC1023, PRC1522 };
+
+    increase_size_stats(core, PRCregs, data_fcs_size);
+    inc_reg_if_not_full(core, TPR);
+    core->mac[GPRC] = core->mac[TPR];
+    /* TOR - Total Octets Received:
+    * This register includes bytes received in a packet from the <Destination
+    * Address> field through the <CRC> field, inclusively.
+    * Always include FCS length (4) in size.
+    */
+    grow_8reg_if_not_full(core, TORL, data_size + 4);
+    core->mac[GORCL] = core->mac[TORL];
+    core->mac[GORCH] = core->mac[TORH];
+
+    switch (net_rx_pkt_get_packet_type(core->tx[0].rx_pkt)) {
+    case ETH_PKT_BCAST:
+        inc_reg_if_not_full(core, BPRC);
+        break;
+
+    case ETH_PKT_MCAST:
+        inc_reg_if_not_full(core, MPRC);
+        break;
+
+    default:
+        break;
+    }
+}
+
+static bool
+_e1000e_write_paket_to_guest(E1000ECore *core, struct NetRxPkt *pkt)
+{
+    PCIDevice *d = core->owner;
+    dma_addr_t base;
+    uint8_t desc[E1000_MAX_RX_DESC_LEN];
+    size_t desc_size;
+    size_t desc_offset = 0;
+    size_t iov_ofs = 0;
+    uint32_t rdh_start = core->mac[RDH];
+
+    struct iovec *iov = net_rx_pkt_get_iovec(pkt);
+    size_t size = net_rx_pkt_get_total_len(pkt);
+    size_t total_size = size + fcs_len(core);
+
+    if (!_e1000e_has_rxbufs(core, total_size)) {
+        set_interrupt_cause(core, E1000_ICS_RXO);
+        return false;
+    }
+
+    do {
+        hwaddr ba[MAX_PS_BUFFERS];
+        ba_state bastate = { 0 };
+        bool is_last = false;
+
+        desc_size = total_size - desc_offset;
+
+        if (desc_size > core->rx_desc_buf_size) {
+            desc_size = core->rx_desc_buf_size;
+        }
+        base = rx_desc_base(core) + core->rx_desc_len * core->mac[RDH];
+        pci_dma_read(d, base, &desc, core->rx_desc_len);
+
+        read_rx_descriptor(core, desc, &ba);
+
+        if (ba[0]) {
+            if (desc_offset < size) {
+                static const uint32_t fcs_pad;
+                size_t iov_copy;
+                size_t copy_size = size - desc_offset;
+                if (copy_size > core->rx_desc_buf_size) {
+                    copy_size = core->rx_desc_buf_size;
+                }
+                do {
+                    iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
+
+                    write_to_rx_buffers(core, &ba, &bastate,
+                                        iov->iov_base + iov_ofs, iov_copy);
+
+                    copy_size -= iov_copy;
+                    iov_ofs += iov_copy;
+                    if (iov_ofs == iov->iov_len) {
+                        iov++;
+                        iov_ofs = 0;
+                    }
+                } while (copy_size);
+
+                /* Simulate FCS checksum presence */
+                write_to_rx_buffers(core, &ba, &bastate,
+                                    (const char *) &fcs_pad, fcs_len(core));
+            }
+            desc_offset += desc_size;
+            if (desc_offset >= total_size) {
+                is_last = true;
+            }
+        } else { /* as per intel docs; skip descriptors with null buf addr */
+            trace_e1000e_rx_null_descriptor();
+        }
+
+        write_rx_descriptor(core, desc, is_last ? core->tx[0].rx_pkt : NULL,
+                            &bastate.written);
+        pci_dma_write(d, base, &desc, core->rx_desc_len);
+
+        if (++core->mac[RDH] * core->rx_desc_len >= core->mac[RDLEN]) {
+            core->mac[RDH] = 0;
+        }
+        /* see comment in start_xmit; same here */
+        if (core->mac[RDH] == rdh_start) {
+            trace_e1000e_rx_err_wraparound(rdh_start,
+                                          core->mac[RDT],
+                                          core->mac[RDLEN]);
+            set_interrupt_cause(core, E1000_ICS_RXO);
+            return false;
+        }
+    } while (desc_offset < total_size);
+
+    _e1000e_update_rx_stats(core, size, total_size);
+
+    return true;
+}
+
+ssize_t
+e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt)
+{
+    /* this is the size past which hardware will
+       drop packets when setting LPE=0 */
+    static const int MAXIMUM_ETHERNET_VLAN_SIZE = 1522;
+    /* this is the size past which hardware will
+       drop packets when setting LPE=1 */
+    static const int MAXIMUM_ETHERNET_LPE_SIZE = 16384;
+
+    static const int MAXIMUM_ETHERNET_HDR_LEN = (14 + 4);
+
+    /* Min. octets in an ethernet frame sans FCS */
+    static const int MIN_BUF_SIZE = 60;
+
+    unsigned int n, rdt;
+    uint8_t min_buf[MIN_BUF_SIZE];
+    struct iovec min_iov;
+    uint8_t *filter_buf;
+    size_t size, orig_size;
+    size_t iov_ofs = 0;
+
+    if (!(core->mac[STATUS] & E1000_STATUS_LU)) {
+        return -1;
+    }
+
+    if (!(core->mac[RCTL] & E1000_RCTL_EN)) {
+        return -1;
+    }
+
+    /* Pull virtio header in */
+    if (core->has_vnet) {
+        net_rx_pkt_set_vhdr_iovec(core->tx[0].rx_pkt, iov, iovcnt);
+        iov_ofs = sizeof(struct virtio_net_hdr);
+    }
+
+    filter_buf = iov->iov_base + iov_ofs;
+    orig_size = iov_size(iov, iovcnt);
+    size = orig_size - iov_ofs;
+
+    /* Pad to minimum Ethernet frame length */
+    if (size < sizeof(min_buf)) {
+        iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size);
+        memset(&min_buf[size], 0, sizeof(min_buf) - size);
+        inc_reg_if_not_full(core, RUC);
+        min_iov.iov_base = filter_buf = min_buf;
+        min_iov.iov_len = size = sizeof(min_buf);
+        iovcnt = 1;
+        iov = &min_iov;
+        iov_ofs = 0;
+    } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
+        /* This is very unlikely, but may happen. */
+        iov_to_buf(iov, iovcnt, iov_ofs, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
+        filter_buf = min_buf;
+    }
+
+    /* Discard oversized packets if !LPE and !SBP. */
+    if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
+        (size > MAXIMUM_ETHERNET_VLAN_SIZE
+        && !(core->mac[RCTL] & E1000_RCTL_LPE)))
+        && !(core->mac[RCTL] & E1000_RCTL_SBP)) {
+        inc_reg_if_not_full(core, ROC);
+        return orig_size;
+    }
+
+    net_rx_pkt_set_packet_type(core->tx[0].rx_pkt,
+        get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf)));
+
+    if (!receive_filter(core, filter_buf, size)) {
+        return orig_size;
+    }
+
+    net_rx_pkt_attach_iovec_ex(core->tx[0].rx_pkt, iov, iovcnt, iov_ofs,
+                                  vlan_enabled(core),
+                                  le16_to_cpu(core->mac[VET]));
+
+    if (!_e1000e_write_paket_to_guest(core, core->tx[0].rx_pkt)) {
+        return -1;
+    }
+
+    n = _e1000e_rx_wb_interrupt_cause(core);
+    rdt = core->mac[RDT];
+    if (rdt < core->mac[RDH]) {
+        rdt += core->mac[RDLEN] / core->rx_desc_len;
+    }
+    if (((rdt - core->mac[RDH]) * core->rx_desc_len) <= core->mac[RDLEN] >>
+        core->rxbuf_min_shift) {
+        n |= E1000_ICS_RXDMT0;
+    }
+
+    set_interrupt_cause(core, n);
+
+    return orig_size;
+}
+
+static void
+e1000_link_down(E1000ECore *core)
+{
+    core->mac[STATUS] &= ~E1000_STATUS_LU;
+    core->phy[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
+    core->phy[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
+    core->phy[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
+}
+
+static void
+e1000_link_up(E1000ECore *core)
+{
+    core->mac[STATUS] |= E1000_STATUS_LU;
+    core->phy[PHY_STATUS] |= MII_SR_LINK_STATUS;
+}
+
+static bool
+have_autoneg(E1000ECore *core)
+{
+    return (core->compat_flags & E1000_FLAG_AUTONEG) &&
+        (core->phy[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
+}
+
+static void
+set_phy_ctrl(E1000ECore *core, int index, uint16_t val)
+{
+    /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
+    core->phy[PHY_CTRL] = val & ~(0x3f |
+                                  MII_CR_RESET |
+                                  MII_CR_RESTART_AUTO_NEG);
+
+    /*
+     * QEMU 1.3 does not support link auto-negotiation emulation, so if we
+     * migrate during auto negotiation, after migration the link will be
+     * down.
+     */
+    if (have_autoneg(core) && (val & MII_CR_RESTART_AUTO_NEG)) {
+        e1000_link_down(core);
+        trace_e1000e_core_start_link_negotiation();
+        timer_mod(core->autoneg_timer,
+                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
+    }
+}
+
+static void
+set_phy_page(E1000ECore *core, int index, uint16_t val)
+{
+    core->phy[PHY_PAGE] = val & PHY_PAGE_RW_MASK;
+}
+
+void
+e1000e_core_set_link_status(E1000ECore *core)
+{
+    NetClientState *nc = qemu_get_queue(core->owner_nic);
+    uint32_t old_status = core->mac[STATUS];
+
+    if (nc->link_down) {
+        e1000_link_down(core);
+    } else {
+        if (have_autoneg(core) &&
+            !(core->phy[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
+            /* emulate auto-negotiation if supported */
+            timer_mod(core->autoneg_timer,
+                      qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
+        } else {
+            e1000_link_up(core);
+        }
+    }
+
+    if (core->mac[STATUS] != old_status) {
+        set_interrupt_cause(core, E1000_ICR_LSC);
+    }
+}
+
+static void
+set_ctrl(E1000ECore *core, int index, uint32_t val)
+{
+    /* RST is self clearing */
+    core->mac[CTRL] = val & ~E1000_CTRL_RST;
+}
+
+static uint32_t
+parse_rxbufsize_e1000(uint32_t rctl)
+{
+    rctl &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
+            E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
+            E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
+    switch (rctl) {
+    case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
+        return 16384;
+    case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
+        return 8192;
+    case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
+        return 4096;
+    case E1000_RCTL_SZ_1024:
+        return 1024;
+    case E1000_RCTL_SZ_512:
+        return 512;
+    case E1000_RCTL_SZ_256:
+        return 256;
+    }
+    return 2048;
+}
+
+static void
+calc_per_desc_buf_size(E1000ECore *core)
+{
+    int i;
+    core->rx_desc_buf_size = 0;
+
+    for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) {
+        core->rx_desc_buf_size += core->rxbuf_sizes[i];
+    }
+}
+
+static void
+parse_rxbufsize(E1000ECore *core)
+{
+    uint32_t rctl = core->mac[RCTL];
+
+    memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes));
+
+    if (rctl & E1000_RCTL_DTYP_MASK) {
+        uint32_t bsize;
+
+        bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK;
+        core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128;
+
+        bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK;
+        core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024;
+
+        bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK;
+        core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024;
+
+        bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK;
+        core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024;
+    } else if (rctl & E1000_RCTL_FLXBUF_MASK) {
+        int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK;
+        core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024;
+    } else {
+        core->rxbuf_sizes[0] = parse_rxbufsize_e1000(rctl);
+    }
+
+    trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1],
+                                    core->rxbuf_sizes[2], 
core->rxbuf_sizes[3]);
+
+    calc_per_desc_buf_size(core);
+}
+
+static void
+calc_rxdesclen(E1000ECore *core)
+{
+    if (core->mac[RFCTL] & E1000_RFCTL_EXTEN) {
+        if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
+            core->rx_desc_len = sizeof(union e1000_rx_desc_packet_split);
+        } else {
+            core->rx_desc_len = sizeof(union e1000_rx_desc_extended);
+        }
+    } else {
+        core->rx_desc_len = sizeof(struct e1000_rx_desc);
+    }
+}
+
+static void
+set_rx_control(E1000ECore *core, int index, uint32_t val)
+{
+    core->mac[RCTL] = val;
+    trace_e1000e_core_set_rxctl(core->mac[RDT], core->mac[RCTL]);
+
+    if (val & E1000_RCTL_EN) {
+        parse_rxbufsize(core);
+        calc_rxdesclen(core);
+        core->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
+        qemu_flush_queued_packets(qemu_get_queue(core->owner_nic));
+    }
+}
+
+static void(*phyreg_writeops[])(E1000ECore *, int, uint16_t) = {
+    [PHY_CTRL] = set_phy_ctrl, [PHY_PAGE] = set_phy_page
+};
+
+enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
+
+/* Helper function, *curr == 0 means the value is not set */
+static inline void
+mit_update_delay(uint32_t *curr, uint32_t value)
+{
+    if (value && (*curr == 0 || value < *curr)) {
+        *curr = value;
+    }
+}
+
+static void
+clear_ims_bits(E1000ECore *core, uint32_t bits)
+{
+    core->mac[IMS] &= ~bits;
+}
+
+static void
+_e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg)
+{
+    if (E1000_IVAR_ENTRY_VALID(int_cfg)) {
+        uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg);
+        if (vec < E1000E_MSIX_VEC_NUM) {
+            msix_notify(core->owner, vec);
+            trace_e1000e_irq_msix_notify_vec(vec);
+        } else {
+            trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg);
+        }
+    } else {
+        trace_e1000e_wrn_msix_invalid(cause, int_cfg);
+    }
+
+    if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) {
+        clear_ims_bits(core, core->mac[IAM] & cause);
+    }
+
+    core->mac[ICR] &= ~(core->mac[EIAC] & E1000_EIAC_MASK);
+}
+
+static void
+_e1000e_msix_notify(E1000ECore *core, uint32_t causes)
+{
+    if (causes & E1000_ICR_RXQ0) {
+        _e1000e_msix_notify_one(core, E1000_ICR_RXQ0,
+                                E1000_IVAR_RXQ0(core->mac[IVAR]));
+    }
+
+    if (causes & E1000_ICR_RXQ1) {
+        _e1000e_msix_notify_one(core, E1000_ICR_RXQ1,
+                                E1000_IVAR_RXQ1(core->mac[IVAR]));
+    }
+
+    if (causes & E1000_ICR_TXQ0) {
+        _e1000e_msix_notify_one(core, E1000_ICR_TXQ0,
+                                E1000_IVAR_TXQ0(core->mac[IVAR]));
+    }
+
+    if (causes & E1000_ICR_TXQ1) {
+        _e1000e_msix_notify_one(core, E1000_ICR_TXQ1,
+                                E1000_IVAR_TXQ1(core->mac[IVAR]));
+    }
+
+    if (causes & ~(E1000_ICR_RXQ0 |
+                   E1000_ICR_RXQ1 |
+                   E1000_ICR_TXQ0 |
+                   E1000_ICR_TXQ1))
+    {
+        _e1000e_msix_notify_one(core, E1000_ICR_OTHER,
+                                E1000_IVAR_OTHER(core->mac[IVAR]));
+    }
+}
+
+static void
+_e1000e_fix_icr_asserted(E1000ECore *core)
+{
+    core->mac[ICR] &= ~E1000_ICR_ASSERTED;
+    if (core->mac[ICR]) {
+        core->mac[ICR] |= E1000_ICR_ASSERTED;
+    }
+}
+
+static void
+_e1000e_update_interrupt_state(E1000ECore *core)
+{
+    uint32_t pending_ints;
+    uint32_t mit_delay;
+
+    _e1000e_fix_icr_asserted(core);
+
+    /*
+     * Make sure ICR and ICS registers have the same value.
+     * The spec says that the ICS register is write-only.  However in practice,
+     * on real hardware ICS is readable, and for reads it has the same value as
+     * ICR (except that ICS does not have the clear on read behaviour of ICR).
+     *
+     * The VxWorks PRO/1000 driver uses this behaviour.
+     */
+    core->mac[ICS] = core->mac[ICR];
+
+    pending_ints = (core->mac[IMS] & core->mac[ICR]);
+    if (!core->mit_irq_level && pending_ints) {
+        /*
+         * Here we detect a potential raising edge. We postpone raising the
+         * interrupt line if we are inside the mitigation delay window
+         * (s->mit_timer_on == 1).
+         * We provide a partial implementation of interrupt mitigation,
+         * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
+         * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
+         * RADV; relative timers based on TIDV and RDTR are not implemented.
+         */
+        if (core->mit_timer_on) {
+            return;
+        }
+        if (core->compat_flags & E1000_FLAG_MIT) {
+            /* Compute the next mitigation delay according to pending
+             * interrupts and the current values of RADV (provided
+             * RDTR!=0), TADV and ITR.
+             * Then rearm the timer.
+             */
+            mit_delay = 0;
+            if (core->mit_ide &&
+                    (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
+                mit_update_delay(&mit_delay, core->mac[TADV] * 4);
+            }
+            if (core->mac[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
+                mit_update_delay(&mit_delay, core->mac[RADV] * 4);
+            }
+            mit_update_delay(&mit_delay, core->mac[ITR]);
+
+            if (mit_delay) {
+                core->mit_timer_on = 1;
+                timer_mod(core->mit_timer,
+                          qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+                          mit_delay * 256);
+            }
+            core->mit_ide = 0;
+        }
+    }
+
+    core->mit_irq_level = (pending_ints != 0);
+
+    trace_e1000e_irq_legacy_notify(core->mit_irq_level);
+    if (core->mit_irq_level) {
+        inc_reg_if_not_full(core, IAC);
+    }
+
+    pci_set_irq(core->owner, core->mit_irq_level);
+}
+
+static void
+send_msi(E1000ECore *core, uint32_t causes)
+{
+    causes &= core->mac[IMS] & ~E1000_ICR_ASSERTED;
+
+    if (causes == 0) {
+        return;
+    }
+
+    if (msix_enabled(core->owner)) {
+        trace_e1000e_irq_msix_notify(causes);
+        _e1000e_msix_notify(core, causes);
+        return;
+    }
+
+    if (msi_enabled(core->owner)) {
+        trace_e1000e_irq_msi_notify(causes);
+        msi_notify(core->owner, 0);
+        return;
+    }
+}
+
+static void
+set_interrupt_cause(E1000ECore *core, uint32_t val)
+{
+    trace_e1000e_irq_set_cause(val);
+
+    core->mac[ICR] |= val;
+    _e1000e_update_interrupt_state(core);
+    send_msi(core, val);
+}
+
+static void
+_e1000e_mit_timer(void *opaque)
+{
+    E1000ECore *core = opaque;
+
+    core->mit_timer_on = 0;
+    /* Call set_interrupt_cause to update the irq level (if necessary). */
+    set_interrupt_cause(core, core->mac[ICR]);
+}
+
+static void
+_e1000e_autoneg_timer(void *opaque)
+{
+    E1000ECore *core = opaque;
+    if (!qemu_get_queue(core->owner_nic)->link_down) {
+        e1000_link_up(core);
+        core->phy[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
+        core->phy[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
+        trace_e1000e_core_link_negotiation_done();
+        set_interrupt_cause(core, E1000_ICR_LSC); /* signal link status change
+                                                   * to guest */
+    }
+}
+
+static const char phy_regcap[0x20] = {
+    [PHY_STATUS]        = PHY_R,  [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
+    [PHY_ID1]           = PHY_R,  [M88E1000_PHY_SPEC_CTRL] =     PHY_RW,
+    [PHY_CTRL]          = PHY_RW, [PHY_1000T_CTRL] =             PHY_RW,
+    [PHY_LP_ABILITY]    = PHY_R,  [PHY_1000T_STATUS] =           PHY_R,
+    [PHY_AUTONEG_ADV]   = PHY_RW, [M88E1000_RX_ERR_CNTR] =       PHY_R,
+    [PHY_ID2]           = PHY_R,  [M88E1000_PHY_SPEC_STATUS] =   PHY_R,
+    [PHY_AUTONEG_EXP]   = PHY_R,  [PHY_PAGE] =                   PHY_RW,
+    [PHY_OEM_BITS]      = PHY_RW, [PHY_BIAS_1] =                 PHY_RW,
+    [PHY_BIAS_2]        = PHY_RW
+};
+
+static bool
+phy_reg_check_cap(E1000ECore *core, uint32_t addr, char cap)
+{
+    return phy_regcap[addr] & cap;
+}
+
+static void
+phy_reg_write(E1000ECore *core, uint32_t addr, uint16_t data)
+{
+    if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
+        phyreg_writeops[addr](core, addr, data);
+    } else {
+        core->phy[addr] = data;
+    }
+}
+
+static void
+set_mdic(E1000ECore *core, int index, uint32_t val)
+{
+    uint32_t data = val & E1000_MDIC_DATA_MASK;
+    uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+
+    if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */
+        val = core->mac[MDIC] | E1000_MDIC_ERROR;
+    } else if (val & E1000_MDIC_OP_READ) {
+        trace_e1000e_core_mdic_read(addr, data);
+        if (!phy_reg_check_cap(core, addr, PHY_R)) {
+            trace_e1000e_core_mdic_read_unhandled(addr);
+            val |= E1000_MDIC_ERROR;
+        } else {
+            val = (val ^ data) | core->phy[addr];
+        }
+    } else if (val & E1000_MDIC_OP_WRITE) {
+        trace_e1000e_core_mdic_write(addr, data);
+        if (!phy_reg_check_cap(core, addr, PHY_W)) {
+            trace_e1000e_core_mdic_write_unhandled(addr);
+            val |= E1000_MDIC_ERROR;
+        } else {
+            phy_reg_write(core, addr, data);
+        }
+    }
+    core->mac[MDIC] = val | E1000_MDIC_READY;
+
+    if (val & E1000_MDIC_INT_EN) {
+        set_interrupt_cause(core, E1000_ICR_MDAC);
+    }
+}
+
+static uint32_t
+get_eecd(E1000ECore *core, int index)
+{
+    uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | core->eecd_state.old_eecd;
+
+    trace_e1000e_core_eeeprom_read(core->eecd_state.bitnum_out,
+                                  core->eecd_state.reading);
+    if (!core->eecd_state.reading ||
+        ((core->eeprom[(core->eecd_state.bitnum_out >> 4) & 0x3f] >>
+          ((core->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1) {
+        ret |= E1000_EECD_DO;
+    }
+    return ret;
+}
+
+static uint32_t
+flash_eerd_read(E1000ECore *core, int x)
+{
+    unsigned int index, r = core->mac[EERD] & ~E1000_EEPROM_RW_REG_START;
+
+    if ((core->mac[EERD] & E1000_EEPROM_RW_REG_START) == 0) {
+        return core->mac[EERD];
+    }
+
+    index = r >> E1000_EEPROM_RW_ADDR_SHIFT;
+    if (index > EEPROM_CHECKSUM_REG) {
+        return E1000_EEPROM_RW_REG_DONE | r;
+    }
+
+    return (core->eeprom[index] << E1000_EEPROM_RW_REG_DATA) |
+           E1000_EEPROM_RW_REG_DONE | r;
+}
+
+static void
+set_rdt(E1000ECore *core, int index, uint32_t val)
+{
+    core->mac[index] = val & 0xffff;
+    start_recv(core);
+}
+
+static void
+set_16bit(E1000ECore *core, int index, uint32_t val)
+{
+    core->mac[index] = val & 0xffff;
+}
+
+static void
+set_dlen(E1000ECore *core, int index, uint32_t val)
+{
+    core->mac[index] = val & 0xfff80;
+}
+
+static void
+set_tctl(E1000ECore *core, int index, uint32_t val)
+{
+    tx_ring txr;
+    core->mac[index] = val;
+
+    _e1000e_tx_ring_init(core, &txr, 0);
+    start_xmit(core, &txr);
+
+    if (core->mac[TARC1] & E1000_TARC_ENABLE) {
+            _e1000e_tx_ring_init(core, &txr, 1);
+            start_xmit(core, &txr);
+    }
+}
+
+static void
+set_tdt(E1000ECore *core, int index, uint32_t val)
+{
+    tx_ring txr;
+
+    core->mac[index] = val & 0xffff;
+
+    _e1000e_tx_ring_init(core, &txr, _e1000e_mq_queue_idx(TDT, index));
+    start_xmit(core, &txr);
+}
+
+static void
+set_ics(E1000ECore *core, int index, uint32_t val)
+{
+    trace_e1000e_core_set_ics(val);
+    set_interrupt_cause(core, val);
+}
+
+static void
+set_icr(E1000ECore *core, int index, uint32_t val)
+{
+    trace_e1000e_core_icr_write(val);
+
+    if ((core->mac[ICR] & E1000_ICR_ASSERTED) &&
+        (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) {
+        clear_ims_bits(core, core->mac[IAM]);
+    }
+
+    core->mac[ICR] &= ~val;
+    _e1000e_update_interrupt_state(core);
+}
+
+static void
+set_imc(E1000ECore *core, int index, uint32_t val)
+{
+    clear_ims_bits(core, val);
+    _e1000e_update_interrupt_state(core);
+}
+
+static void
+set_ims(E1000ECore *core, int index, uint32_t val)
+{
+    core->mac[IMS] |= val;
+    _e1000e_update_interrupt_state(core);
+}
+
+static uint32_t
+mac_readreg(E1000ECore *core, int index)
+{
+    return core->mac[index];
+}
+
+static uint32_t
+mac_ics_read(E1000ECore *core, int index)
+{
+    uint32_t val = core->mac[ICS];
+    trace_e1000e_core_read_ics(val);
+    return val;
+}
+
+static uint32_t
+mac_ims_read(E1000ECore *core, int index)
+{
+    return core->mac[IMS];
+}
+
+static uint32_t
+mac_low11_read(E1000ECore *core, int index)
+{
+    return core->mac[index] & 0x7ff;
+}
+
+static uint32_t
+mac_low13_read(E1000ECore *core, int index)
+{
+    return core->mac[index] & 0x1fff;
+}
+
+static uint32_t
+mac_swsm_read(E1000ECore *core, int index)
+{
+    uint32_t val = core->mac[SWSM];
+    core->mac[SWSM] = val | 1;
+    return val;
+}
+
+static uint32_t
+mac_icr_read(E1000ECore *core, int index)
+{
+    uint32_t ret = core->mac[ICR];
+
+    if (core->mac[IMS] == 0) {
+        core->mac[ICR] = 0;
+    }
+
+    if ((core->mac[ICR] & E1000_ICR_ASSERTED) &&
+        (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) {
+        core->mac[ICR] = 0;
+        clear_ims_bits(core, core->mac[IAM]);
+    }
+
+    _e1000e_update_interrupt_state(core);
+    trace_e1000e_core_icr_read(ret);
+    return ret;
+}
+
+static uint32_t
+mac_read_clr4(E1000ECore *core, int index)
+{
+    uint32_t ret = core->mac[index];
+
+    core->mac[index] = 0;
+    return ret;
+}
+
+static uint32_t
+mac_read_clr8(E1000ECore *core, int index)
+{
+    uint32_t ret = core->mac[index];
+
+    core->mac[index] = 0;
+    core->mac[index-1] = 0;
+    return ret;
+}
+
+static uint32_t
+get_status(E1000ECore *core, int index)
+{
+    bool gio_disable = core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE;
+    uint32_t mask = gio_disable ? ~E1000_STATUS_GIO_MASTER_ENABLE : ~0L;
+
+    return core->mac[STATUS] & mask;
+}
+
+static uint32_t
+get_tarc(E1000ECore *core, int index)
+{
+    return core->mac[index] & ((BIT(11) - 1) |
+                                BIT(27)      |
+                                BIT(28)      |
+                                BIT(29)      |
+                                BIT(30));
+}
+
+static uint32_t
+get_pbs(E1000ECore *core, int index)
+{
+    return core->mac[index] & 0x3f;
+}
+
+static void
+mac_writereg(E1000ECore *core, int index, uint32_t val)
+{
+    uint32_t macaddr[2];
+
+    core->mac[index] = val;
+
+    if (index == RA + 1) {
+        macaddr[0] = cpu_to_le32(core->mac[RA]);
+        macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
+        qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
+                                 (uint8_t *)macaddr);
+    }
+}
+
+static void
+set_eecd(E1000ECore *core, int index, uint32_t val)
+{
+    g_warning("e1000e EECD write not implemented");
+}
+
+static void
+set_eerd(E1000ECore *core, int index, uint32_t val)
+{
+    uint32_t addr = (val >> E1000_NVM_RW_ADDR_SHIFT) & E1000_NVM_ADDR_MASK;
+    uint32_t data;
+
+    if (!(val & E1000_NVM_RW_REG_START)) {
+        return;
+    }
+
+    if (addr > EEPROM_CHECKSUM_REG) {
+        return;
+    }
+
+    data = core->eeprom[addr];
+
+    core->mac[EERD] = E1000_NVM_RW_REG_DONE             |
+                      (addr << E1000_NVM_RW_ADDR_SHIFT) |
+                      (data << E1000_NVM_RW_REG_DATA);
+}
+
+static void
+set_psrctl(E1000ECore *core, int index, uint32_t val)
+{
+    if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) {
+        hw_error("e1000e: PSRCTL.BSIZE0 cannot be zero");
+    }
+
+    if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) {
+        hw_error("e1000e: PSRCTL.BSIZE1 cannot be zero");
+    }
+
+    core->mac[PSRCTL] = val;
+}
+
+static void
+set_rxcsum(E1000ECore *core, int index, uint32_t val)
+{
+    qemu_set_offload(qemu_get_queue(core->owner_nic),
+                     !!(val & E1000_RXCSUM_TUOFLD),
+                     0, 0, 0, 0);
+
+    core->mac[RXCSUM] = val;
+}
+
+static void
+set_gcr(E1000ECore *core, int index, uint32_t val)
+{
+    uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS;
+    core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
+}
+
+
+#define getreg(x)    [x] = mac_readreg
+
+static uint32_t (*macreg_readops[])(E1000ECore *, int) = {
+    getreg(PBA),      getreg(RCTL),     getreg(TDH),      getreg(TXDCTL),
+    getreg(WUFC),     getreg(TDT),      getreg(CTRL),     getreg(LEDCTL),
+    getreg(MANC),     getreg(MDIC),     getreg(STATUS),   getreg(TORL),
+    getreg(TOTL),     getreg(FCRUC),    getreg(TCTL),     getreg(RDH),
+    getreg(RDT),      getreg(VET),      getreg(AIT),      getreg(TDBAL),
+    getreg(TDBAH),    getreg(RDBAH),    getreg(RDBAL),    getreg(TDLEN),
+    getreg(TDLEN1),   getreg(TDBAL1),   getreg(TDBAH1),   getreg(TDH1),
+    getreg(TDT1),     getreg(RDLEN),    getreg(RDTR),     getreg(RADV),
+    getreg(TADV),     getreg(ITR),      getreg(SCC),      getreg(ECOL),
+    getreg(MCC),      getreg(LATECOL),  getreg(COLC),     getreg(DC),
+    getreg(TNCRS),    getreg(SEC),      getreg(CEXTERR),  getreg(RLEC),
+    getreg(XONRXC),   getreg(XONTXC),   getreg(XOFFRXC),  getreg(XOFFTXC),
+    getreg(WUC),      getreg(WUS),      getreg(IPAV),     getreg(RFC),
+    getreg(RJC),      getreg(GORCL),    getreg(GOTCL),    getreg(RNBC),
+    getreg(TSCTFC),   getreg(MGTPRC),   getreg(MGTPDC),   getreg(MGTPTC),
+    getreg(EECD),     getreg(EERD),     getreg(GCR),      getreg(TIMINCA),
+    getreg(IAM),      getreg(EIAC),     getreg(IVAR),     getreg(CTRL_EXT),
+    getreg(RFCTL),    getreg(PSRCTL),   getreg(POEMB),    getreg(MFUTP01),
+    getreg(MFUTP23),  getreg(MANC2H),   getreg(MFVAL),    getreg(FACTPS),
+    getreg(RXCSUM),   getreg(FUNCTAG),  getreg(GSCL_1),   getreg(EXTCNF_CTRL),
+    getreg(GSCL_2),   getreg(GSCL_3),   getreg(GSCL_4),   getreg(GSCN_0),
+    getreg(GSCN_1),   getreg(GSCN_2),   getreg(GSCN_3),   getreg(GCR2),
+
+    [TOTH] = mac_read_clr8,   [TORH] = mac_read_clr8,   [GOTCH] = 
mac_read_clr8,
+    [GORCH] = mac_read_clr8,
+    [PRC64] = mac_read_clr4,  [PRC127] = mac_read_clr4, [PRC255] = 
mac_read_clr4,
+    [PRC511] = mac_read_clr4, [PRC1023] = mac_read_clr4, [PRC1522] = 
mac_read_clr4,
+    [PTC64] = mac_read_clr4,  [PTC127] = mac_read_clr4, [PTC255] = 
mac_read_clr4,
+    [PTC511] = mac_read_clr4, [PTC1023] = mac_read_clr4, [PTC1522] = 
mac_read_clr4,
+    [GPRC] = mac_read_clr4,   [GPTC] = mac_read_clr4,   [TPR] = mac_read_clr4,
+    [TPT] = mac_read_clr4,    [RUC] = mac_read_clr4,    [ROC] = mac_read_clr4,
+    [BPRC] = mac_read_clr4,   [MPRC] = mac_read_clr4,   [MPTC] = mac_read_clr4,
+    [BPTC] = mac_read_clr4,   [IAC] = mac_read_clr4,    [TSCTC] = 
mac_read_clr4,
+    [ICR] = mac_icr_read,     [EECD] = get_eecd,        [EERD] = 
flash_eerd_read,
+    [ICS] = mac_ics_read,
+    [IMS] = mac_ims_read,
+    [RDFH] = mac_low13_read,  [RDFT] = mac_low13_read,
+    [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read, [RDFPC] = 
mac_low13_read,
+    [TDFH] = mac_low11_read,  [TDFT] = mac_low11_read,
+    [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read, [TDFPC] = 
mac_low13_read,
+    [TDFH] = mac_low13_read,  [TDFT] = mac_low13_read,  [TDFHS] = 
mac_low13_read,
+    [TDFTS] = mac_low13_read,
+    [STATUS] = get_status,    [TARC0] = get_tarc,       [TARC1] = get_tarc,
+    [PBS] = get_pbs,          [SWSM] = mac_swsm_read,
+
+    [CRCERRS ... MPC] = &mac_readreg,
+    [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg,
+    [FFLT ... FFLT+6] = &mac_low11_read,
+    [RA ... RA+31] = &mac_readreg,
+    [WUPM ... WUPM+31] = &mac_readreg,
+    [MTA ... MTA+127] = &mac_readreg,
+    [VFTA ... VFTA+127] = &mac_readreg,
+    [FFMT ... FFMT+254] = &mac_readreg, [FFVT ... FFVT+254] = &mac_readreg,
+    [PBM ... PBM+16383] = &mac_readreg,
+    [MDEF ... MDEF + 7] = &mac_readreg,
+    [FFLT ... FFLT + 10] = &mac_readreg,
+    [FTFT ... FTFT + 254] = &mac_readreg,
+    [PBM ... PBM + 10239] = &mac_readreg,
+};
+enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
+
+#define putreg(x)    [x] = mac_writereg
+static void (*macreg_writeops[])(E1000ECore *, int, uint32_t) = {
+    putreg(PBA),      putreg(EERD),     putreg(SWSM),     putreg(WUFC),
+    putreg(TDBAL),    putreg(TDBAH),    putreg(TXDCTL),   putreg(RDBAH),
+    putreg(RDBAL),    putreg(LEDCTL),   putreg(VET),      putreg(FCRUC),
+    putreg(AIT),      putreg(TDFH),     putreg(TDFT),     putreg(TDFHS),
+    putreg(TDFTS),    putreg(TDFPC),    putreg(WUC),      putreg(WUS),
+    putreg(RDFH),     putreg(RDFT),     putreg(RDFHS),    putreg(RDFTS),
+    putreg(RDFPC),    putreg(IPAV),     putreg(TDBAL1),   putreg(TDBAH1),
+    putreg(TIMINCA),  putreg(IAM),      putreg(EIAC),     putreg(IVAR),
+    putreg(CTRL_EXT), putreg(RFCTL),    putreg(TARC0),    putreg(TARC1),
+    putreg(TDFH),     putreg(TDFT),     putreg(TDFHS),    putreg(TDFTS),
+    putreg(POEMB),    putreg(PBS),      putreg(MFUTP01),  putreg(MFUTP23),
+    putreg(MANC),     putreg(MANC2H),   putreg(MFVAL),    putreg(EXTCNF_CTRL),
+    putreg(FACTPS),   putreg(FUNCTAG),  putreg(GSCL_1),   putreg(GSCL_2),
+    putreg(GSCL_3),   putreg(GSCL_4),   putreg(GSCN_0),   putreg(GSCN_1),
+    putreg(GSCN_2),   putreg(GSCN_3),   putreg(GCR2),
+
+    [TDLEN1] = set_dlen, [TDH1] = set_16bit,     [TDT1] = set_tdt,
+    [TDLEN] = set_dlen, [RDLEN] = set_dlen,      [TCTL] = set_tctl,
+    [TDT] = set_tdt,    [MDIC] = set_mdic,       [ICS] = set_ics,
+    [TDH] = set_16bit,  [RDH] = set_16bit,       [RDT] = set_rdt,
+    [IMC] = set_imc,    [IMS] = set_ims,         [ICR] = set_icr,
+    [EECD] = set_eecd,  [RCTL] = set_rx_control, [CTRL] = set_ctrl,
+    [RDTR] = set_16bit, [RADV] = set_16bit,      [TADV] = set_16bit,
+    [ITR] = set_16bit,  [EERD] = set_eerd,       [GCR] = set_gcr,
+    [PSRCTL] = set_psrctl, [RXCSUM] = set_rxcsum,
+
+    [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg,
+    [FFLT ... FFLT+6] = &mac_writereg,
+    [RA ... RA+31] = &mac_writereg,
+    [WUPM ... WUPM+31] = &mac_writereg,
+    [MTA ... MTA+127] = &mac_writereg,
+    [VFTA ... VFTA+127] = &mac_writereg,
+    [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg,
+    [PBM ... PBM+16383] = &mac_writereg,
+    [PBM ... PBM + 10239] = &mac_writereg,
+    [MDEF ... MDEF + 7] = &mac_writereg,
+    [FFLT ... FFLT + 10] = &mac_writereg,
+    [FTFT ... FTFT + 254] = &mac_writereg,
+};
+
+enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
+
+void
+e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size)
+{
+    unsigned int index = (addr & 0x1ffff) >> 2;
+
+    if (index < NWRITEOPS && macreg_writeops[index]) {
+        trace_e1000e_core_write(index << 2, size, val);
+        macreg_writeops[index](core, index, val);
+    } else if (index < NREADOPS && macreg_readops[index]) {
+        trace_e1000e_wrn_regs_write_ro(index << 2, size, val);
+    } else {
+        trace_e1000e_wrn_regs_write_unknown(index << 2, size, val);
+    }
+}
+
+uint64_t
+e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size)
+{
+    uint64_t val;
+    unsigned int index = (addr & 0x1ffff) >> 2;
+
+    if (index < NREADOPS && macreg_readops[index]) {
+        val = macreg_readops[index](core, index);
+        trace_e1000e_core_read(index << 2, size, val);
+        return val;
+    }
+    trace_e1000e_wrn_regs_read_unknown(index << 2, size);
+    return 0;
+}
+
+static void
+e1000e_core_prepare_eeprom(E1000ECore      *core,
+                          const uint16_t *templ,
+                          uint32_t        templ_size,
+                          const uint8_t  *macaddr)
+{
+    PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(core->owner);
+    uint16_t checksum = 0;
+    int i;
+
+    memmove(core->eeprom, templ, templ_size);
+
+    for (i = 0; i < 3; i++) {
+        core->eeprom[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
+    }
+
+    core->eeprom[11] = core->eeprom[13] = pdc->device_id;
+
+    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+        checksum += core->eeprom[i];
+    }
+
+    checksum = (uint16_t) EEPROM_SUM - checksum;
+
+    core->eeprom[EEPROM_CHECKSUM_REG] = checksum;
+}
+
+void
+e1000e_core_pci_realize(E1000ECore      *core,
+                       const uint16_t *eeprom_templ,
+                       uint32_t        eeprom_size,
+                       const uint8_t  *macaddr)
+{
+    int i;
+
+    core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+                                       _e1000e_autoneg_timer, core);
+    core->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+                                   _e1000e_mit_timer, core);
+
+    for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+        net_tx_pkt_init(&core->tx[i].tx_pkt,
+            E1000E_MAX_TX_FRAGS, core->has_vnet);
+        net_rx_pkt_init(&core->tx[i].rx_pkt,
+            core->has_vnet);
+    }
+
+    e1000e_core_prepare_eeprom(core, eeprom_templ, eeprom_size, macaddr);
+}
+
+void
+e1000e_core_pci_uninit(E1000ECore *core)
+{
+    int i;
+
+    timer_del(core->autoneg_timer);
+    timer_free(core->autoneg_timer);
+    timer_del(core->mit_timer);
+    timer_free(core->mit_timer);
+
+    for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+        net_tx_pkt_reset(core->tx[i].tx_pkt);
+        net_tx_pkt_uninit(core->tx[i].tx_pkt);
+        net_rx_pkt_uninit(core->tx[i].rx_pkt);
+    }
+}
+
+/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
+static const uint16_t phy_reg_init[] = {
+    [PHY_CTRL] =   MII_CR_SPEED_SELECT_MSB |
+                   MII_CR_FULL_DUPLEX |
+                   MII_CR_AUTO_NEG_EN,
+
+    [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
+                   MII_SR_LINK_STATUS |   /* link initially up */
+                   MII_SR_AUTONEG_CAPS |
+                   /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
+                   MII_SR_PREAMBLE_SUPPRESS |
+                   MII_SR_EXTENDED_STATUS |
+                   MII_SR_10T_HD_CAPS |
+                   MII_SR_10T_FD_CAPS |
+                   MII_SR_100X_HD_CAPS |
+                   MII_SR_100X_FD_CAPS,
+
+    [PHY_ID1] = 0x141,
+    /* [PHY_ID2] configured per DevId, from e1000_reset() */
+    [PHY_AUTONEG_ADV] = 0xde1,
+    [PHY_LP_ABILITY] = 0x1e0,
+    [PHY_1000T_CTRL] = 0x0e00,
+    [PHY_1000T_STATUS] = 0x3c00,
+    [M88E1000_PHY_SPEC_CTRL] = 0x360,
+    [M88E1000_PHY_SPEC_STATUS] = 0xac00,
+    [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
+};
+
+static const uint32_t mac_reg_init[] = {
+    [PBA] =     0x00100030,
+    [LEDCTL] =  0x602,
+    [CTRL] =    E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
+                E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
+    [STATUS] =  0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
+                E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
+                E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
+                E1000_STATUS_LU,
+    [PSRCTL]  = (2 << E1000_PSRCTL_BSIZE0_SHIFT) |
+                (4 << E1000_PSRCTL_BSIZE1_SHIFT) |
+                (4 << E1000_PSRCTL_BSIZE2_SHIFT),
+    [TARC0]   = 0x3 | E1000_TARC_ENABLE,
+    [TARC1]   = 0x3 | E1000_TARC_ENABLE,
+    [EECD]    = E1000_EECD_AUTO_RD,
+    [EERD]    = E1000_NVM_RW_REG_DONE,
+    [GCR]     = E1000_L0S_ADJUST |
+                E1000_L1_ENTRY_LATENCY_MSB |
+                E1000_L1_ENTRY_LATENCY_LSB,
+    [TDFH]    = 0x600,
+    [TDFT]    = 0x600,
+    [TDFHS]   = 0x600,
+    [TDFTS]   = 0x600,
+    [POEMB]   = 0x30D,
+    [PBS]     = 0x028,
+    [MANC]    = E1000_MANC_DIS_IP_CHK_ARP,
+    [FACTPS]  = E1000_FACTPS_LAN0_ON | 0x20000000,
+    [SWSM]    = 1,
+    [RXCSUM]  = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD
+};
+
+void
+e1000e_core_reset(E1000ECore *core, uint8_t *macaddr, uint16_t phy_id2)
+{
+    int i;
+
+    timer_del(core->autoneg_timer);
+    timer_del(core->mit_timer);
+    core->mit_timer_on = 0;
+    core->mit_irq_level = 0;
+    core->mit_ide = 0;
+
+    memset(core->phy, 0, sizeof core->phy);
+    memmove(core->phy, phy_reg_init, sizeof phy_reg_init);
+    core->phy[PHY_ID2] = phy_id2;
+    memset(core->mac, 0, sizeof core->mac);
+    memmove(core->mac, mac_reg_init, sizeof mac_reg_init);
+
+    core->rxbuf_min_shift = 1;
+
+    if (qemu_get_queue(core->owner_nic)->link_down) {
+        e1000_link_down(core);
+    }
+
+    /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
+    core->mac[RA] = 0;
+    core->mac[RA + 1] = E1000_RAH_AV;
+    for (i = 0; i < 4; i++) {
+        core->mac[RA] |= macaddr[i] << (8 * i);
+        core->mac[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
+    }
+
+    for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+        net_tx_pkt_reset(core->tx[i].tx_pkt);
+        core->tx[i].sum_needed = 0;
+        core->tx[i].ipcss = 0;
+        core->tx[i].ipcso = 0;
+        core->tx[i].ipcse = 0;
+        core->tx[i].tucss = 0;
+        core->tx[i].tucso = 0;
+        core->tx[i].tucse = 0;
+        core->tx[i].hdr_len = 0;
+        core->tx[i].mss = 0;
+        core->tx[i].paylen = 0;
+        core->tx[i].ip = 0;
+        core->tx[i].tcp = 0;
+        core->tx[i].tse = 0;
+        core->tx[i].cptse = 0;
+        core->tx[i].skip_cp = 0;
+    }
+}
+
+void e1000e_core_pre_save(E1000ECore *core)
+{
+    int i;
+    NetClientState *nc = qemu_get_queue(core->owner_nic);
+
+    /* If the mitigation timer is active, emulate a timeout now. */
+    if (core->mit_timer_on) {
+        _e1000e_mit_timer(core);
+    }
+
+    /*
+    * If link is down and auto-negotiation is supported and ongoing,
+    * complete auto-negotiation immediately. This allows us to look
+    * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
+    */
+    if (nc->link_down && have_autoneg(core)) {
+        core->phy[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
+    }
+
+    for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+        if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
+            core->tx[i].skip_cp = true;
+        }
+    }
+}
+
+int
+e1000e_core_post_load(E1000ECore *core)
+{
+    int i;
+
+    NetClientState *nc = qemu_get_queue(core->owner_nic);
+
+    for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+        net_tx_pkt_init(&core->tx[i].tx_pkt,
+            E1000E_MAX_TX_FRAGS, core->has_vnet);
+        net_rx_pkt_init(&core->tx[i].rx_pkt,
+            core->has_vnet);
+    }
+
+    if (!(core->compat_flags & E1000_FLAG_MIT)) {
+        core->mac[ITR] = core->mac[RDTR] = core->mac[RADV] =
+            core->mac[TADV] = 0;
+        core->mit_irq_level = false;
+    }
+    core->mit_ide = 0;
+    core->mit_timer_on = false;
+
+    /* nc.link_down can't be migrated, so infer link_down according
+     * to link status bit in core.mac[STATUS].
+     * Alternatively, restart link negotiation if it was in progress. */
+    nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
+
+    if (have_autoneg(core) &&
+        !(core->phy[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
+        nc->link_down = false;
+        timer_mod(core->autoneg_timer,
+                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
+    }
+
+    return 0;
+}
diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h
new file mode 100644
index 0000000..78e4834
--- /dev/null
+++ b/hw/net/e1000e_core.h
@@ -0,0 +1,181 @@
+/*
+* Core code for QEMU e1000e emulation
+*
+* Software developer's manuals:
+* 
http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
+*
+* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+* Developed by Daynix Computing LTD (http://www.daynix.com)
+*
+* Authors:
+* Dmitry Fleytman <address@hidden>
+* Leonid Bloch <address@hidden>
+* Yan Vugenfirer <address@hidden>
+*
+* Based on work done by:
+* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+* Copyright (c) 2008 Qumranet
+* Based on work done by:
+* Copyright (c) 2007 Dan Aloni
+* Copyright (c) 2004 Antony T Curtis
+*
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#define E1000E_PHY_SIZE     (0x20)
+#define E1000E_MAC_SIZE     (0x8000)
+#define E1000E_EEPROM_SIZE  (64)
+#define E1000E_MSIX_VEC_NUM (5)
+#define E1000E_NUM_TX_RINGS (2)
+
+typedef struct E1000Regs_st E1000ECore;
+
+enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
+
+typedef struct E1000Regs_st {
+    uint32_t mac[E1000E_MAC_SIZE];
+    uint16_t phy[E1000E_PHY_SIZE];
+    uint16_t eeprom[E1000E_EEPROM_SIZE];
+
+    uint32_t rxbuf_sizes[E1000_PSRCTL_BUFFS_PER_DESC];
+    uint32_t rx_desc_buf_size;
+    uint32_t rxbuf_min_shift;
+    uint8_t rx_desc_len;
+
+    struct {
+        uint32_t val_in;    /* shifted in from guest driver */
+        uint16_t bitnum_in;
+        uint16_t bitnum_out;
+        uint16_t reading;
+        uint32_t old_eecd;
+    } eecd_state;
+
+    QEMUTimer *autoneg_timer;
+    QEMUTimer *mit_timer;      /* Mitigation timer. */
+    bool mit_timer_on;         /* Mitigation timer is running. */
+    bool mit_irq_level;        /* Tracks interrupt pin level. */
+    uint32_t mit_ide;          /* Tracks E1000_TXD_CMD_IDE bit. */
+
+    /* Compatibility flags for migration to/from qemu 1.3.0 and older */
+#define E1000_FLAG_AUTONEG_BIT 0
+#define E1000_FLAG_MIT_BIT 1
+#define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
+#define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
+    uint32_t compat_flags;
+
+    struct e1000_tx {
+        unsigned char sum_needed;
+        uint8_t ipcss;
+        uint8_t ipcso;
+        uint16_t ipcse;
+        uint8_t tucss;
+        uint8_t tucso;
+        uint16_t tucse;
+        uint8_t hdr_len;
+        uint16_t mss;
+        uint32_t paylen;
+        int8_t ip;
+        int8_t tcp;
+        bool tse;
+        bool cptse;
+
+        struct NetTxPkt *tx_pkt;
+        bool skip_cp;
+
+        struct NetRxPkt *rx_pkt;
+    } tx[E1000E_NUM_TX_RINGS];
+
+    bool has_vnet;
+
+    NICState *owner_nic;
+    PCIDevice *owner;
+    void (*owner_start_recv)(PCIDevice *d);
+} E1000ECore;
+
+#define defreg(x)   x = (E1000_##x>>2)
+enum {
+    defreg(CTRL),    defreg(EECD),    defreg(EERD),    defreg(GPRC),
+    defreg(GPTC),    defreg(ICR),     defreg(ICS),     defreg(IMC),
+    defreg(IMS),     defreg(LEDCTL),  defreg(MANC),    defreg(MDIC),
+    defreg(MPC),     defreg(PBA),     defreg(RCTL),    defreg(RDBAH),
+    defreg(RDBAL),   defreg(RDH),     defreg(RDLEN),   defreg(RDT),
+    defreg(STATUS),  defreg(SWSM),    defreg(TCTL),    defreg(TDBAH),
+    defreg(TDBAL),   defreg(TDH),     defreg(TDLEN),   defreg(TDT),
+    defreg(TDLEN1),  defreg(TDBAL1),  defreg(TDBAH1),  defreg(TDH1),
+    defreg(TDT1),    defreg(TORH),    defreg(TORL),    defreg(TOTH),
+    defreg(TOTL),    defreg(TPR),     defreg(TPT),     defreg(TXDCTL),
+    defreg(WUFC),    defreg(RA),      defreg(MTA),     defreg(CRCERRS),
+    defreg(VFTA),    defreg(VET),     defreg(RDTR),    defreg(RADV),
+    defreg(TADV),    defreg(ITR),     defreg(SCC),     defreg(ECOL),
+    defreg(MCC),     defreg(LATECOL), defreg(COLC),    defreg(DC),
+    defreg(TNCRS),   defreg(SEC),     defreg(CEXTERR), defreg(RLEC),
+    defreg(XONRXC),  defreg(XONTXC),  defreg(XOFFRXC), defreg(XOFFTXC),
+    defreg(FCRUC),   defreg(AIT),     defreg(TDFH),    defreg(TDFT),
+    defreg(TDFHS),   defreg(TDFTS),   defreg(TDFPC),   defreg(WUC),
+    defreg(WUS),     defreg(POEMB),   defreg(PBS),     defreg(RDFH),
+    defreg(RDFT),    defreg(RDFHS),   defreg(RDFTS),   defreg(RDFPC),
+    defreg(PBM),     defreg(IPAV),    defreg(IP4AT),   defreg(IP6AT),
+    defreg(WUPM),    defreg(FFLT),    defreg(FFMT),    defreg(FFVT),
+    defreg(TARC0),   defreg(TARC1),   defreg(IAM),     defreg(EXTCNF_CTRL),
+    defreg(GCR),     defreg(TIMINCA), defreg(EIAC),    defreg(CTRL_EXT),
+    defreg(IVAR),    defreg(MFUTP01), defreg(MFUTP23), defreg(MANC2H),
+    defreg(MFVAL),   defreg(MDEF),    defreg(FACTPS),  defreg(FTFT),
+    defreg(RUC),     defreg(ROC),     defreg(RFC),     defreg(RJC),
+    defreg(PRC64),   defreg(PRC127),  defreg(PRC255),  defreg(PRC511),
+    defreg(PRC1023), defreg(PRC1522), defreg(PTC64),   defreg(PTC127),
+    defreg(PTC255),  defreg(PTC511),  defreg(PTC1023), defreg(PTC1522),
+    defreg(GORCL),   defreg(GORCH),   defreg(GOTCL),   defreg(GOTCH),
+    defreg(RNBC),    defreg(BPRC),    defreg(MPRC),    defreg(RFCTL),
+    defreg(PSRCTL),  defreg(MPTC),    defreg(BPTC),    defreg(TSCTFC),
+    defreg(IAC),     defreg(MGTPRC),  defreg(MGTPDC),  defreg(MGTPTC),
+    defreg(TSCTC),   defreg(RXCSUM),  defreg(FUNCTAG), defreg(GSCL_1),
+    defreg(GSCL_2),  defreg(GSCL_3),  defreg(GSCL_4),  defreg(GSCN_0),
+    defreg(GSCN_1),  defreg(GSCN_2),  defreg(GSCN_3),  defreg(GCR2)
+};
+
+void
+e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size);
+
+uint64_t
+e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size);
+
+void
+e1000e_core_pci_realize(E1000ECore      *regs,
+                       const uint16_t *eeprom_templ,
+                       uint32_t        eeprom_size,
+                       const uint8_t  *macaddr);
+
+void
+e1000e_core_reset(E1000ECore *core, uint8_t *macaddr, uint16_t phy_id2);
+
+void
+e1000e_core_pre_save(E1000ECore *core);
+
+int
+e1000e_core_post_load(E1000ECore *core);
+
+void
+e1000e_core_set_link_status(E1000ECore *core);
+
+void
+e1000e_core_pci_uninit(E1000ECore *core);
+
+int
+e1000e_can_receive(E1000ECore *core);
+
+ssize_t
+e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size);
+
+ssize_t
+e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt);
diff --git a/trace-events b/trace-events
index 30eba92..39ccb21 100644
--- a/trace-events
+++ b/trace-events
@@ -1590,3 +1590,71 @@ i8257_unregistered_dma(int nchan, int dma_pos, int 
dma_len) "unregistered DMA ch
 cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8
 cpu_halt(int cpu_index) "halting cpu %d"
 cpu_unhalt(int cpu_index) "unhalting cpu %d"
+
+# hw/net/e1000e_core.c
+e1000e_core_write(uint64_t index, uint32_t size, uint64_t val) "Write to 
register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64
+e1000e_core_read(uint64_t index, uint32_t size, uint64_t val) "Read from 
register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64
+e1000e_core_set_rxctl(uint32_t rdt, uint32_t rctl) "RCTL: %d, mac[RCTL] = 0x%x"
+e1000e_core_set_ics(uint32_t val) "set_ics 0x%x\n"
+e1000e_core_read_ics(uint32_t val) "read_ics 0x%x\n"
+e1000e_core_mdic_read(uint32_t addr, uint32_t data) "MDIC read reg 0x%x, value 
0x%x"
+e1000e_core_mdic_read_unhandled(uint32_t addr) "MDIC read reg 0x%x unhandled"
+e1000e_core_mdic_write(uint32_t addr, uint32_t data) "MDIC write reg 0x%x, 
value 0x%x"
+e1000e_core_mdic_write_unhandled(uint32_t addr) "MDIC write reg 0x%x unhandled"
+e1000e_core_eeeprom_read(uint16_t bit, uint16_t reading) "reading eeprom bit 
%d (reading %d)"
+e1000e_core_eeeprom_write(uint16_t bit_in, uint16_t bit_out, uint16_t reading) 
"eeprom bitnum in %d out %d, reading %d"
+e1000e_core_icr_write(uint32_t val) "ICR write value 0x%x"
+e1000e_core_icr_read(uint32_t val) "ICR read value 0x%x"
+e1000e_core_start_link_negotiation(void) "Start link auto negotiation"
+e1000e_core_link_negotiation_done(void) "Auto negotiation is completed"
+
+e1000e_wrn_regs_write_ro(uint64_t index, uint32_t size, uint64_t val) 
"WARNING: Write to RO register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64
+e1000e_wrn_regs_write_unknown(uint64_t index, uint32_t size, uint64_t val) 
"WARNING: Write to unknown register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64
+e1000e_wrn_regs_read_unknown(uint64_t index, uint32_t size) "WARNING: Read 
from unknown register 0x%"PRIx64", %d byte(s)"
+e1000e_wrn_no_ts_support(void) "WARNING: Guest requested TX timestamping which 
is not supported"
+e1000e_wrn_no_snap_support(void) "WARNING: Guest requested TX SNAP header 
update which is not supported"
+
+e1000e_tx_disabled(void) "TX Disabled"
+e1000e_tx_descr(uint32_t head, void *addr, uint32_t lower, uint32_t upper) 
"index %d: %p : %x %x"
+e1000e_tdh_wraparound(uint32_t start, uint32_t tail, uint32_t len) "TDH 
wraparound @%x, TDT %x, TDLEN %x"
+e1000e_tx_cso_zero(void) "TCP/UDP: cso 0!"
+
+e1000e_rx_null_descriptor(void) "Null RX descriptor!!"
+
+e1000e_rx_err_wraparound(uint32_t start, uint32_t rdt, uint32_t rdlen) "RDH 
wraparound @%x, RDT %x, RDLEN %x"
+
+e1000e_rx_flt_ucast_match(uint32_t idx, uint8_t b0, uint8_t b1, uint8_t b2, 
uint8_t b3, uint8_t b4, uint8_t b5) "unicast match[%d]: 
%02x:%02x:%02x:%02x:%02x:%02x"
+e1000e_rx_flt_ucast_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, 
uint8_t b4, uint8_t b5) "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x"
+e1000e_rx_flt_inexact_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, 
uint8_t b4, uint8_t b5, uint32_t mo, uint32_t mta, uint32_t mta_val) "inexact 
mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x"
+
+e1000e_rx_desc_ps_read(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) 
"buffers: [0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64"]"
+e1000e_rx_desc_ps_write(uint16_t a0, uint16_t a1, uint16_t a2, uint16_t a3) 
"bytes written: [%u, %u, %u, %u]"
+e1000e_rx_desc_buff_sizes(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) 
"buffer sizes: [%u, %u, %u, %u]"
+
+e1000e_rx_desc_buff_write(uint8_t idx, uint64_t addr, uint16_t offset, const 
void* source, uint32_t len) "buffer #%u, addr: 0x%"PRIx64", offset: %u, from: 
%p, length: %u"
+
+e1000e_irq_set_cause(uint32_t cause) "IRQ cause set 0x%x"
+e1000e_irq_msi_notify(uint32_t cause) "MSI notify 0x%x"
+e1000e_irq_msix_notify(uint32_t cause) "MSI-X notify 0x%x"
+e1000e_irq_legacy_notify(bool level) "IRQ line state: %d"
+e1000e_irq_msix_notify_vec(uint32_t vector) "MSI-X notify vector 0x%x"
+
+e1000e_wrn_msix_vec_wrong(uint32_t cause, uint32_t cfg) "Invalid configuration 
for cause 0x%x: 0x%x"
+e1000e_wrn_msix_invalid(uint32_t cause, uint32_t cfg) "Invalid entry for cause 
0x%x: 0x%x"
+
+# hw/net/e1000e.c
+e1000e_cb_pci_realize(void) "E1000E PCI realize entry"
+e1000e_cb_pci_uninit(void) "E1000E PCI unit entry"
+e1000e_cb_write_config(void) "E1000E write config entry"
+e1000e_cb_qdev_reset(void) "E1000E qdev reset entry"
+e1000e_cb_pre_save(void) "E1000E pre save entry"
+e1000e_cb_post_load(void) "E1000E post load entry"
+
+e1000e_wrn_io_read(uint64_t addr, uint32_t size) "IO unknown read from 
0x%"PRIx64", %d byte(s)"
+e1000e_wrn_io_write(uint64_t addr, uint32_t size, uint64_t val) "IO unknown 
write to 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64
+
+e1000e_msi_init_fail(int32_t res) "Failed to initialize MSI, error %d"
+e1000e_msix_init_fail(int32_t res) "Failed to initialize MSI-X, error %d"
+e1000e_msix_use_vector_fail(uint32_t vec, int32_t res) "Failed to use MSI-X 
vector %d, error %d"
+
+e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d"
-- 
2.4.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]