qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] hw/net/virtio-net: Allocating Large sized array


From: Zhou Jie
Subject: Re: [Qemu-devel] [PATCH] hw/net/virtio-net: Allocating Large sized arrays to heap
Date: Wed, 27 Apr 2016 08:45:39 +0800
User-agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.7.2

On 2016/4/26 20:42, Michael S. Tsirkin wrote:
On Tue, Apr 26, 2016 at 04:05:24PM +0800, Zhou Jie wrote:
virtio_net_flush_tx has a huge stack usage of 16392 bytes approx.
Moving large arrays to heap to reduce stack usage.

Signed-off-by: Zhou Jie <address@hidden>

I don't think it's appropriate for trivial.
Also - what's the point, exactly?
I think functions should not have very large stack frames.
For 64bit machine it will be 32k.
But according what Christian Borntraeger said, it doesn't matter.
So, considerate that virtio_net_flush_tx is in a hot path,
I agree to not change this.

Sincerely,
Zhou Jie


---
  hw/net/virtio-net.c | 15 +++++++++++----
  1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5798f87..cab7bbc 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1213,6 +1213,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
      VirtIONet *n = q->n;
      VirtIODevice *vdev = VIRTIO_DEVICE(n);
      VirtQueueElement *elem;
+    struct iovec *sg = NULL, *sg2 = NULL;
      int32_t num_packets = 0;
      int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
      if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -1224,10 +1225,12 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
          return num_packets;
      }

+    sg = g_new(struct iovec, VIRTQUEUE_MAX_SIZE);
+    sg2 = g_new(struct iovec, VIRTQUEUE_MAX_SIZE + 1);
      for (;;) {
          ssize_t ret;
          unsigned int out_num;
-        struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], 
*out_sg;
+        struct iovec *out_sg;
          struct virtio_net_hdr_mrg_rxbuf mhdr;

          elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
@@ -1252,7 +1255,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
                  virtio_net_hdr_swap(vdev, (void *) &mhdr);
                  sg2[0].iov_base = &mhdr;
                  sg2[0].iov_len = n->guest_hdr_len;
-                out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
+                out_num = iov_copy(&sg2[1], VIRTQUEUE_MAX_SIZE,
                                     out_sg, out_num,
                                     n->guest_hdr_len, -1);
                  if (out_num == VIRTQUEUE_MAX_SIZE) {
@@ -1269,10 +1272,10 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
           */
          assert(n->host_hdr_len <= n->guest_hdr_len);
          if (n->host_hdr_len != n->guest_hdr_len) {
-            unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
+            unsigned sg_num = iov_copy(sg, VIRTQUEUE_MAX_SIZE,
                                         out_sg, out_num,
                                         0, n->host_hdr_len);
-            sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
+            sg_num += iov_copy(sg + sg_num, VIRTQUEUE_MAX_SIZE - sg_num,
                               out_sg, out_num,
                               n->guest_hdr_len, -1);
              out_num = sg_num;
@@ -1284,6 +1287,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
          if (ret == 0) {
              virtio_queue_set_notification(q->tx_vq, 0);
              q->async_tx.elem = elem;
+            g_free(sg);
+            g_free(sg2);
              return -EBUSY;
          }

@@ -1296,6 +1301,8 @@ drop:
              break;
          }
      }
+    g_free(sg);
+    g_free(sg2);
      return num_packets;
  }

--
2.5.5




.


--
------------------------------------------------
周潔
Dept 1
No. 6 Wenzhu Road,
Nanjing, 210012, China
TEL:+86+25-86630566-8557
FUJITSU INTERNAL:7998-8557
address@hidden
------------------------------------------------





reply via email to

[Prev in Thread] Current Thread [Next in Thread]