.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_UINT16(vlan, struct igb_tx),
- VMSTATE_UINT16(mss, struct igb_tx),
+ VMSTATE_UINT32(ctx.vlan_macip_lens, struct igb_tx),
+ VMSTATE_UINT32(ctx.seqnum_seed, struct igb_tx),
+ VMSTATE_UINT32(ctx.type_tucmd_mlhl, struct igb_tx),
+ VMSTATE_UINT32(ctx.mss_l4len_idx, struct igb_tx),
VMSTATE_BOOL(tse, struct igb_tx),
VMSTATE_BOOL(ixsm, struct igb_tx),
VMSTATE_BOOL(txsm, struct igb_tx),
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 41d1abae03..dbe24739d0 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -390,7 +390,8 @@ static bool
igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
{
if (tx->tse) {
- if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true,
tx->mss)) {
+ uint32_t mss = tx->ctx.mss_l4len_idx >> 16;
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) {
return false;
}
@@ -551,8 +552,10 @@ igb_process_tx_desc(IGBCore *core,
E1000_ADVTXD_DTYP_CTXT) {
/* advanced transmit context descriptor */
tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc;
- tx->vlan = le32_to_cpu(tx_ctx_desc->vlan_macip_lens) >> 16;
- tx->mss = le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 16;
+ tx->ctx.vlan_macip_lens =
le32_to_cpu(tx_ctx_desc->vlan_macip_lens);
+ tx->ctx.seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed);
+ tx->ctx.type_tucmd_mlhl =
le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl);
+ tx->ctx.mss_l4len_idx =
le32_to_cpu(tx_ctx_desc->mss_l4len_idx);
return;
} else {
/* unknown descriptor type */
@@ -576,8 +579,9 @@ igb_process_tx_desc(IGBCore *core,
if (cmd_type_len & E1000_TXD_CMD_EOP) {
if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
if (cmd_type_len & E1000_TXD_CMD_VLE) {
- net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan,
- core->mac[VET] & 0xffff);
+ uint16_t vlan = tx->ctx.vlan_macip_lens >> 16;
+ uint16_t vet = core->mac[VET] & 0xffff;
+ net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, vet);
}
if (igb_tx_pkt_send(core, tx, queue_index)) {
igb_on_tx_done_update_stats(core, tx->tx_pkt);
@@ -4027,8 +4031,7 @@ static void igb_reset(IGBCore *core, bool sw)
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
tx = &core->tx[i];
net_tx_pkt_reset(tx->tx_pkt, NULL);
- tx->vlan = 0;
- tx->mss = 0;
+ memset(&tx->ctx, 0, sizeof(tx->ctx));
tx->tse = false;
tx->ixsm = false;
tx->txsm = false;
diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h
index 814c1e264b..3483edc655 100644
--- a/hw/net/igb_core.h
+++ b/hw/net/igb_core.h
@@ -72,8 +72,7 @@ struct IGBCore {
QEMUTimer *autoneg_timer;
struct igb_tx {
- uint16_t vlan; /* VLAN Tag */
- uint16_t mss; /* Maximum Segment Size */
+ struct e1000_adv_tx_context_desc ctx;
bool tse; /* TCP/UDP Segmentation Enable */
bool ixsm; /* Insert IP Checksum */
bool txsm; /* Insert TCP/UDP Checksum */