qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 for-2.4 12/12] axienet: Flush queued packets


From: Jason Wang
Subject: Re: [Qemu-devel] [PATCH v2 for-2.4 12/12] axienet: Flush queued packets when rx is done
Date: Thu, 16 Jul 2015 10:58:42 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Thunderbird/31.7.0


On 07/15/2015 06:19 PM, Fam Zheng wrote:
> eth_can_rx checks s->rxsize and returns false if it is non-zero. Because
> of the .can_receive semantics change, this will make the incoming queue
> disabled by peer, until it is explicitly flushed. So we should flush it
> when s->rxsize is becoming zero.
>
> Squash eth_can_rx semantics into etx_rx and drop .can_receive()
> callback, also add flush when rx buffer becomes available again after a
> packet gets queued.
>
> The other conditions, "!axienet_rx_resetting(s) &&
> axienet_rx_enabled(s)" are OK because enet_write already calls
> qemu_flush_queued_packets when the register bits are changed.
>
> Signed-off-by: Fam Zheng <address@hidden>
> ---
>  hw/net/xilinx_axienet.c | 17 +++++++++++++----
>  1 file changed, 13 insertions(+), 4 deletions(-)
>
> diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
> index 9205770..d63c423 100644
> --- a/hw/net/xilinx_axienet.c
> +++ b/hw/net/xilinx_axienet.c
> @@ -401,6 +401,9 @@ struct XilinxAXIEnet {
>  
>      uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
>      uint32_t rxappsize;
> +
> +    /* Whether axienet_eth_rx_notify should flush incoming queue. */
> +    bool need_flush;
>  };
>  
>  static void axienet_rx_reset(XilinxAXIEnet *s)
> @@ -658,10 +661,8 @@ static const MemoryRegionOps enet_ops = {
>      .endianness = DEVICE_LITTLE_ENDIAN,
>  };
>  
> -static int eth_can_rx(NetClientState *nc)
> +static int eth_can_rx(XilinxAXIEnet *s)
>  {
> -    XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
> -
>      /* RX enabled?  */
>      return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
>  }
> @@ -701,6 +702,10 @@ static void axienet_eth_rx_notify(void *opaque)
>          s->rxpos += ret;
>          if (!s->rxsize) {
>              s->regs[R_IS] |= IS_RX_COMPLETE;
> +            if (s->need_flush) {
> +                s->need_flush = false;
> +                qemu_flush_queued_packets(qemu_get_queue(s->nic));
> +            }
>          }
>      }
>      enet_update_irq(s);
> @@ -721,6 +726,11 @@ static ssize_t eth_rx(NetClientState *nc, const uint8_t 
> *buf, size_t size)
>  
>      DENET(qemu_log("%s: %zd bytes\n", __func__, size));
>  
> +    if (!eth_can_rx(s)) {
> +        s->need_flush = true;
> +        return 0;
> +    }
> +

axienet_eth_rx_notify() was only called by eth_rx(). So when
s->need_flush is true, we won't ever reach axienet_eth_rx_notify()?

>      unicast = ~buf[0] & 0x1;
>      broadcast = memcmp(buf, sa_bcast, 6) == 0;
>      multicast = !unicast && !broadcast;
> @@ -925,7 +935,6 @@ xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t 
> *buf, size_t size)
>  static NetClientInfo net_xilinx_enet_info = {
>      .type = NET_CLIENT_OPTIONS_KIND_NIC,
>      .size = sizeof(NICState),
> -    .can_receive = eth_can_rx,
>      .receive = eth_rx,
>  };
>  




reply via email to

[Prev in Thread] Current Thread [Next in Thread]