lwip-users
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[lwip-users] Raw TCP Client - wait for ACK after each packet causes slow


From: Adrian Figueroa
Subject: [lwip-users] Raw TCP Client - wait for ACK after each packet causes slowdown
Date: Mon, 26 Jun 2017 07:39:13 +0000

Hello!

 

I wrote a TCP client (on STM32 microprocessor) that should send a large amount of data to a Windows host PC (TCP server). However, this is quite slow because the client waits for the ACK message after each transmitted packet.

 

Data is sent like this:

 

Client connects to Server

Sends data with tcp_write

Waits for tcp_sent callback -> tcp_sent calls the next tcp_write

 

I thought that tcp_sent callback would also be triggered without an ACK, when there is enough free memory in the tcp_sndbuf. It seems like this is not the case.

Find my whole code attached below.

 

Please note that the following options are used:

 

#define TCP_WND_UPDATE_THRESHOLD 536

#define TCP_SND_BUF 5360

 

Thanks in advance,

Adrian

 

 

CODE:

#include "tcp_trx.h"

#include "tcp.h"

#include "lwip.h"

#include "dhcp.h"

 

struct netif *gnetifp;

 

err_t tcp_connect_callback(void *arg, struct tcp_pcb *tpcb, err_t error);

void  tcp_err_callback(void *arg, err_t err);

err_t tcp_poll_callback(void *arg, struct tcp_pcb *tpcb);

 

err_t conn_send_next(struct conn_state *cs);

err_t conn_sent(void *arg, struct tcp_pcb *tpcb, u16_t len);

 

err_t tcp_frame_transmit(void *data, struct tcp_pcb *tpcb, uint32_t length);

 

err_t tcp_connect_callback(void *arg, struct tcp_pcb *tpcb, err_t error)

{

  // connection established

  struct conn_state *cs;

 

  cs = (struct conn_state *)arg;

 

  cs->state = CS_CONN_OK;

 

  tcp_nagle_disable(cs->pcb);

 

  return error;

}

 

err_t tcp_poll_callback(void *arg, struct tcp_pcb *tpcb)

{

  err_t ret_err = ERR_OK;

  struct conn_state *cs;

 

  cs = (struct conn_state *)arg;

 

  if (cs != NULL)

  {

    switch (cs->state)

    {

      case CS_CONN_ERR:

        /* Retry connection to server */

        tpcb = tcp_new();

        cs->pcb = tpcb;

        cs->retries++;

        cs->state = CS_CONN_RETRY;

        // TODO: Reconnect if server is down and back up

        tcp_arg(cs->pcb, cs);

        // tcp_recv(tpcb, echo_recv);

        tcp_err(cs->pcb, tcp_err_callback);

        tcp_poll(cs->pcb, tcp_poll_callback, 0);

        tcp_sent(cs->pcb, conn_sent);

        tcp_bind(cs->pcb, IP_ADDR_ANY, 0);

        tcp_connect(cs->pcb, &(cs->ip), cs->port, tcp_connect_callback);

        break;

     

      case CS_CONN_RETRY:

        /* Wait for connection retry. */

        /* Fall back to CS_CONN_ERR or continue with CS_CONN_OK */

        break;

 

      case CS_CONN_OK:

        cs->retries = 0;

        cs->state = CS_READY;

        break;

     

      case CS_READY:

      /* Check data availability */

        if (cs->state_dat == DS_SENT) {

          cs->state_dat = DS_NONE;

        }

        else if (cs->state_dat == DS_READY)

        {

          cs->state = CS_SEND;

        }

      break;

 

      case CS_SEND:

        conn_sent(cs, NULL, 0);

        cs->state = CS_SENDING;

        cs->state_dat = DS_SENDING;

        break;

 

      case CS_SENDING:

        /* Wait for transmission to finish */

        break;

 

      case CS_SENT:

        cs->state = CS_READY;

        cs->state_dat = DS_SENT;

        break;

    }

  }

  return ret_err;

}

 

err_t conn_sent(void *arg, struct tcp_pcb *tpcb, u16_t len)

{

  struct conn_state *cs;

 

  volatile uint16_t freebuffff;

 

  freebuffff = tcp_sndbuf(tpcb);

 

  LWIP_UNUSED_ARG(len);

  LWIP_UNUSED_ARG(tpcb);

 

  cs = (struct conn_state *)arg;

 

  err_t err = conn_send_next(cs);

 

  if (err == ERR_OK)

  {

    cs->state = CS_SENT;

  }

 

  return err;

}

 

err_t conn_send_next(struct conn_state *cs)

{

  struct dbuf *dtr;

  struct pbuf *ptr;

 

  err_t wr_err = ERR_INPROGRESS;

 

  // pointer to data structure

  dtr = cs->d;

  // pointer to pbuf structure

  ptr = cs->p;

 

  // TODO: tcp_sndbuf(tpcb) ???

  /* enqueue data for transmission */

 

  if(dtr->curr_dlen// buffer not yet written entirely

  {

    uint32_t len;

    uint32_t max_len = cs->pcb->mss;

 

    if(dtr->curr_dlen < max_len)

    { // last packet

      len = dtr->curr_dlen;

    }

    else

    { // full size packet

      len = max_len;

    }

 

    ptr->payload = dtr->curr_dp;

    ptr->len = len;

   

    wr_err = tcp_write(cs->pcb, ptr->payload, ptr->len, 0); // TODO: copy? buffer management

 

    if(wr_err == ERR_OK)

    {

      // update pointers

      dtr->curr_dlen -= len;

      dtr->curr_dp += len;

 

      wr_err = ERR_INPROGRESS;

    }

  }

  else

  {

    wr_err = ERR_OK;

  }

 

  return wr_err;

}

 

void tcp_err_callback(void *arg, err_t err)

{

  struct conn_state *cs;

 

  LWIP_UNUSED_ARG(err);

 

  cs = (struct conn_state *)arg;

 

  cs->state = CS_CONN_ERR;

 

  if (cs->pcb != NULL)

  {

    tcp_close(cs->pcb);

  }

}

 

struct conn_state * init_tcp_connection(uint16_t port, uint8_t a3, uint8_t a2, uint8_t a1, uint8_t a0)

{

  ip4_addr_t ip_dest;

  IP4_ADDR(&ip_dest, a3, a2, a1, a0);

 

  struct tcp_pcb *tpcb;

  struct conn_state *cs;

 

  while (0 == dhcp_supplied_address(gnetifp))

  {

    /* TODO: Improve dhcp error handling */

    // FIXME: use netif_set_status_callback()

    // wait

    MX_LWIP_Process();

  }

 

  tpcb = tcp_new();

 

  cs = (struct conn_state *)mem_malloc(sizeof(struct conn_state));

  if (cs != NULL)

  {

    cs->state = CS_INIT;

    cs->pcb = tpcb;

    cs->retries = 0;

    cs->ip = ip_dest;

   cs->port = port;

    cs->p = NULL;

    /* pass newly allocated cs to our callbacks */

    tcp_arg(cs->pcb, cs);

    // tcp_recv(tpcb, echo_recv);

    tcp_err(cs->pcb, tcp_err_callback);

    tcp_poll(cs->pcb, tcp_poll_callback, 0);

    tcp_sent(cs->pcb, conn_sent);

    tcp_bind(cs->pcb, IP_ADDR_ANY, 0);

    tcp_connect(cs->pcb, &(cs->ip), cs->port, tcp_connect_callback);

    // ret_err = ERR_OK;

  } // else ...

 

  return cs;

}

 


reply via email to

[Prev in Thread] Current Thread [Next in Thread]