[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 8/8] igb: respect VMVIR and VMOLR for VLAN
From: |
Sriram Yagnaraman |
Subject: |
[PATCH v5 8/8] igb: respect VMVIR and VMOLR for VLAN |
Date: |
Thu, 2 Feb 2023 08:26:48 +0100 |
Add support for stripping/inserting VLAN for VFs.
Had to move CSUM calculation back into the for loop, since packet data
is pulled inside the loop based on strip VLAN decision for every VF.
net_rx_pkt_fix_l4_csum should be extended to accept a buffer instead for
igb. Work for a future patch.
Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
---
hw/net/igb_core.c | 54 ++++++++++++++++++++++++++++++++++++++---------
1 file changed, 44 insertions(+), 10 deletions(-)
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 42e3517695..d85f39a25f 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -386,6 +386,25 @@ igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt,
bool tx,
info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
}
+static inline bool
+igb_tx_insert_vlan(IGBCore *core, uint16_t qn,
+ struct igb_tx *tx, bool desc_vle)
+{
+ if (core->mac[MRQC] & 1) {
+ uint16_t pool = qn % IGB_NUM_VM_POOLS;
+
+ if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
+ /* always insert default VLAN */
+ desc_vle = true;
+ tx->vlan = core->mac[VMVIR0 + pool] & 0xffff;
+ } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
+ return false;
+ }
+ }
+
+ return desc_vle && e1000x_vlan_enabled(core->mac);
+}
+
static bool
igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
{
@@ -581,7 +600,8 @@ igb_process_tx_desc(IGBCore *core,
if (cmd_type_len & E1000_TXD_CMD_EOP) {
if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
- if (cmd_type_len & E1000_TXD_CMD_VLE) {
+ if (igb_tx_insert_vlan(core, queue_index, tx,
+ !!(cmd_type_len & E1000_TXD_CMD_VLE))) {
net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan,
core->mac[VET] & 0xffff);
}
@@ -1536,6 +1556,20 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt
*pkt,
igb_update_rx_stats(core, rxi, size, total_size);
}
+static bool
+igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi)
+{
+ if (core->mac[MRQC] & 1) {
+ uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
+ /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
+ return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
+ core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
+ core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
+ }
+
+ return e1000x_vlan_enabled(core->mac);
+}
+
static inline void
igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
{
@@ -1616,10 +1650,7 @@ igb_receive_internal(IGBCore *core, const struct iovec
*iov, int iovcnt,
ehdr = PKT_GET_ETH_HDR(filter_buf);
net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
-
- net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
- e1000x_vlan_enabled(core->mac),
- core->mac[VET] & 0xffff);
+ net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size);
queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx);
if (!queues) {
@@ -1627,11 +1658,6 @@ igb_receive_internal(IGBCore *core, const struct iovec
*iov, int iovcnt,
return orig_size;
}
- total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
- e1000x_fcs_len(core->mac);
-
- igb_rx_fix_l4_csum(core, core->rx_pkt);
-
for (i = 0; i < IGB_NUM_QUEUES; i++) {
if (!(queues & BIT(i)) ||
!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
@@ -1639,12 +1665,20 @@ igb_receive_internal(IGBCore *core, const struct iovec
*iov, int iovcnt,
}
igb_rx_ring_init(core, &rxr, i);
+ net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
+ igb_rx_strip_vlan(core, rxr.i),
+ core->mac[VET] & 0xffff);
+
+ total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
+ e1000x_fcs_len(core->mac);
+
if (!igb_has_rxbufs(core, rxr.i, total_size)) {
n |= E1000_ICS_RXO;
trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
continue;
}
+ igb_rx_fix_l4_csum(core, core->rx_pkt);
igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
--
2.34.1
- [PATCH v5 0/8] igb: merge changes from <address@hidden>, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 8/8] igb: respect VMVIR and VMOLR for VLAN,
Sriram Yagnaraman <=
- [PATCH v5 4/8] igb: implement VFRE and VFTE registers, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 7/8] igb: implement VF Tx and Rx stats, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 2/8] igb: handle PF/VF reset properly, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 3/8] igb: add ICR_RXDW, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 6/8] igb: respect E1000_VMOLR_RSSE, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 5/8] igb: check oversized packets for VMDq, Sriram Yagnaraman, 2023/02/02
- [PATCH v5 1/8] MAINTAINERS: Add Sriram Yagnaraman as a igb reviewer, Sriram Yagnaraman, 2023/02/02