[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 2/7] virtio-net: implement RSS configuration command
From: |
Yuri Benditovich |
Subject: |
[PATCH v5 2/7] virtio-net: implement RSS configuration command |
Date: |
Wed, 18 Mar 2020 11:15:20 +0200 |
Optionally report RSS feature.
Handle RSS configuration command and keep RSS parameters
in virtio-net device context.
Signed-off-by: Yuri Benditovich <address@hidden>
---
hw/net/trace-events | 3 +
hw/net/virtio-net.c | 189 +++++++++++++++++++++++++++++----
include/hw/virtio/virtio-net.h | 13 +++
3 files changed, 185 insertions(+), 20 deletions(-)
diff --git a/hw/net/trace-events b/hw/net/trace-events
index a1da98a643..a84b9c3d9f 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -371,6 +371,9 @@ virtio_net_announce_notify(void) ""
virtio_net_announce_timer(int round) "%d"
virtio_net_handle_announce(int round) "%d"
virtio_net_post_load_device(void)
+virtio_net_rss_disable(void)
+virtio_net_rss_error(const char *msg, uint32_t value) "%s, value 0x%08x"
+virtio_net_rss_enable(uint32_t p1, uint16_t p2, uint8_t p3) "hashes 0x%x,
table of %d, key of %d"
# tulip.c
tulip_reg_write(uint64_t addr, const char *name, int size, uint64_t val) "addr
0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 90b01221e9..6d21922746 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -142,6 +142,16 @@ struct virtio_net_rss_config {
tso/gso/gro 'off'. */
#define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
+#define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
+ VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
+ VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
+ VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
+ VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
+ VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
+ VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
+ VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
+ VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
+
/* temporary until standard header include it */
#if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
@@ -173,6 +183,8 @@ static VirtIOFeature feature_sizes[] = {
.end = endof(struct virtio_net_config, mtu)},
{.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
.end = endof(struct virtio_net_config, duplex)},
+ {.flags = 1ULL << VIRTIO_NET_F_RSS,
+ .end = endof(struct virtio_net_config_with_rss, supported_hash_types)},
{}
};
@@ -195,28 +207,33 @@ static int vq2q(int queue_index)
static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
{
VirtIONet *n = VIRTIO_NET(vdev);
- struct virtio_net_config netcfg;
-
- virtio_stw_p(vdev, &netcfg.status, n->status);
- virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
- virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
- memcpy(netcfg.mac, n->mac, ETH_ALEN);
- virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
- netcfg.duplex = n->net_conf.duplex;
+ struct virtio_net_config_with_rss netcfg;
+
+ virtio_stw_p(vdev, &netcfg.cfg.status, n->status);
+ virtio_stw_p(vdev, &netcfg.cfg.max_virtqueue_pairs, n->max_queues);
+ virtio_stw_p(vdev, &netcfg.cfg.mtu, n->net_conf.mtu);
+ memcpy(netcfg.cfg.mac, n->mac, ETH_ALEN);
+ virtio_stl_p(vdev, &netcfg.cfg.speed, n->net_conf.speed);
+ netcfg.cfg.duplex = n->net_conf.duplex;
+ netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
+ virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
+ VIRTIO_NET_RSS_MAX_TABLE_LEN);
+ virtio_stl_p(vdev, &netcfg.supported_hash_types,
+ VIRTIO_NET_RSS_SUPPORTED_HASHES);
memcpy(config, &netcfg, n->config_size);
}
static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
{
VirtIONet *n = VIRTIO_NET(vdev);
- struct virtio_net_config netcfg = {};
+ struct virtio_net_config_with_rss netcfg = {};
memcpy(&netcfg, config, n->config_size);
if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
- memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
- memcpy(n->mac, netcfg.mac, ETH_ALEN);
+ memcmp(netcfg.cfg.mac, n->mac, ETH_ALEN)) {
+ memcpy(n->mac, netcfg.cfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
}
}
@@ -766,6 +783,7 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev,
uint64_t features,
return features;
}
+ virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
features = vhost_net_get_features(get_vhost_net(nc->peer), features);
vdev->backend_features = features;
@@ -925,6 +943,7 @@ static void virtio_net_set_features(VirtIODevice *vdev,
uint64_t features)
}
virtio_net_set_multiqueue(n,
+ virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
virtio_has_feature(features, VIRTIO_NET_F_MQ));
virtio_net_set_mrg_rx_bufs(n,
@@ -1201,25 +1220,152 @@ static int virtio_net_handle_announce(VirtIONet *n,
uint8_t cmd,
}
}
+static void virtio_net_disable_rss(VirtIONet *n)
+{
+ if (n->rss_data.enabled) {
+ trace_virtio_net_rss_disable();
+ }
+ n->rss_data.enabled = false;
+}
+
+static uint16_t virtio_net_handle_rss(VirtIONet *n,
+ struct iovec *iov, unsigned int iov_cnt)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ struct virtio_net_rss_config cfg;
+ size_t s, offset = 0, size_get;
+ uint16_t queues, i;
+ struct {
+ uint16_t us;
+ uint8_t b;
+ } QEMU_PACKED temp;
+ const char *err_msg = "";
+ uint32_t err_value = 0;
+
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
+ err_msg = "RSS is not negotiated";
+ goto error;
+ }
+ size_get = offsetof(struct virtio_net_rss_config, indirection_table);
+ s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
+ if (s != size_get) {
+ err_msg = "Short command buffer";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
+ n->rss_data.indirections_len =
+ virtio_lduw_p(vdev, &cfg.indirection_table_mask);
+ n->rss_data.indirections_len++;
+ if (!is_power_of_2(n->rss_data.indirections_len)) {
+ err_msg = "Invalid size of indirection table";
+ err_value = n->rss_data.indirections_len;
+ goto error;
+ }
+ if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
+ err_msg = "Too large indirection table";
+ err_value = n->rss_data.indirections_len;
+ goto error;
+ }
+ n->rss_data.default_queue =
+ virtio_lduw_p(vdev, &cfg.unclassified_queue);
+ if (n->rss_data.default_queue >= n->max_queues) {
+ err_msg = "Invalid default queue";
+ err_value = n->rss_data.default_queue;
+ goto error;
+ }
+ offset += size_get;
+ size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
+ g_free(n->rss_data.indirections_table);
+ n->rss_data.indirections_table = g_malloc(size_get);
+ if (!n->rss_data.indirections_table) {
+ err_msg = "Can't allocate indirections table";
+ err_value = n->rss_data.indirections_len;
+ goto error;
+ }
+ s = iov_to_buf(iov, iov_cnt, offset,
+ n->rss_data.indirections_table, size_get);
+ if (s != size_get) {
+ err_msg = "Short indirection table buffer";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ for (i = 0; i < n->rss_data.indirections_len; ++i) {
+ uint16_t val = n->rss_data.indirections_table[i];
+ n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
+ }
+ offset += size_get;
+ size_get = sizeof(temp);
+ s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
+ if (s != size_get) {
+ err_msg = "Can't get queues";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ queues = virtio_lduw_p(vdev, &temp.us);
+ if (queues == 0 || queues > n->max_queues) {
+ err_msg = "Invalid number of queues";
+ err_value = queues;
+ goto error;
+ }
+ if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
+ err_msg = "Invalid key size";
+ err_value = temp.b;
+ goto error;
+ }
+ if (!temp.b && n->rss_data.hash_types) {
+ err_msg = "No key provided";
+ err_value = 0;
+ goto error;
+ }
+ if (!temp.b && !n->rss_data.hash_types) {
+ virtio_net_disable_rss(n);
+ return queues;
+ }
+ offset += size_get;
+ size_get = temp.b;
+ s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
+ if (s != size_get) {
+ err_msg = "Can get key buffer";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ n->rss_data.enabled = true;
+ trace_virtio_net_rss_enable(n->rss_data.hash_types,
+ n->rss_data.indirections_len,
+ temp.b);
+ return queues;
+error:
+ trace_virtio_net_rss_error(err_msg, err_value);
+ virtio_net_disable_rss(n);
+ return 0;
+}
+
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- struct virtio_net_ctrl_mq mq;
- size_t s;
uint16_t queues;
- s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
- if (s != sizeof(mq)) {
- return VIRTIO_NET_ERR;
- }
+ virtio_net_disable_rss(n);
+ if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ queues = virtio_net_handle_rss(n, iov, iov_cnt);
+ } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ struct virtio_net_ctrl_mq mq;
+ size_t s;
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
+ return VIRTIO_NET_ERR;
+ }
+ s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
+ if (s != sizeof(mq)) {
+ return VIRTIO_NET_ERR;
+ }
+ queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
- if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ } else {
return VIRTIO_NET_ERR;
}
- queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
-
if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
queues > n->max_queues ||
@@ -3173,6 +3319,7 @@ static void virtio_net_device_unrealize(DeviceState *dev,
Error **errp)
g_free(n->vqs);
qemu_del_nic(n->nic);
virtio_net_rsc_cleanup(n);
+ g_free(n->rss_data.indirections_table);
virtio_cleanup(vdev);
}
@@ -3274,6 +3421,8 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
+ DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
+ VIRTIO_NET_F_RSS, false),
DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
VIRTIO_NET_F_RSC_EXT, false),
DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 96c68d4a92..d3fad7c8f3 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -126,6 +126,18 @@ typedef struct VirtioNetRscChain {
/* Maximum packet size we can receive from tap device: header + 64k */
#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 * KiB))
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
+
+typedef struct VirtioNetRssData {
+ bool enabled;
+ uint32_t hash_types;
+ uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+ uint16_t indirections_len;
+ uint16_t *indirections_table;
+ uint16_t default_queue;
+} VirtioNetRssData;
+
typedef struct VirtIONetQueue {
VirtQueue *rx_vq;
VirtQueue *tx_vq;
@@ -199,6 +211,7 @@ struct VirtIONet {
bool failover;
DeviceListener primary_listener;
Notifier migration_state;
+ VirtioNetRssData rss_data;
};
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
--
2.17.1
- [PATCH v5 0/7] reference implementation of RSS and hash report, Yuri Benditovich, 2020/03/18
- [PATCH v5 1/7] virtio-net: introduce RSS and hash report features, Yuri Benditovich, 2020/03/18
- [PATCH v5 2/7] virtio-net: implement RSS configuration command,
Yuri Benditovich <=
- [PATCH v5 4/7] tap: allow extended virtio header with hash info, Yuri Benditovich, 2020/03/18
- [PATCH v5 3/7] virtio-net: implement RX RSS processing, Yuri Benditovich, 2020/03/18
- [PATCH v5 6/7] vmstate.h: provide VMSTATE_VARRAY_UINT16_ALLOC macro, Yuri Benditovich, 2020/03/18
- Re: [PATCH v5 6/7] vmstate.h: provide VMSTATE_VARRAY_UINT16_ALLOC macro, Dr. David Alan Gilbert, 2020/03/18
- Re: [PATCH v5 6/7] vmstate.h: provide VMSTATE_VARRAY_UINT16_ALLOC macro, Juan Quintela, 2020/03/18
- [PATCH v5 5/7] virtio-net: reference implementation of hash report, Yuri Benditovich, 2020/03/18