[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 31/45] Implement GENET RX path
From: |
Sergey Kambalin |
Subject: |
[PATCH v3 31/45] Implement GENET RX path |
Date: |
Sun, 3 Dec 2023 15:48:56 -0600 |
Signed-off-by: Sergey Kambalin <sergey.kambalin@auriga.com>
---
hw/net/bcm2838_genet.c | 265 ++++++++++++++++++++++++++++++++-
include/hw/net/bcm2838_genet.h | 1 +
2 files changed, 265 insertions(+), 1 deletion(-)
diff --git a/hw/net/bcm2838_genet.c b/hw/net/bcm2838_genet.c
index 4c9b39a3ca..61c1981e10 100644
--- a/hw/net/bcm2838_genet.c
+++ b/hw/net/bcm2838_genet.c
@@ -511,6 +511,25 @@ static bool
bcm2838_genet_tdma_ring_active(BCM2838GenetState *s,
return active;
}
+static bool bcm2838_genet_rdma_ring_active(BCM2838GenetState *s,
+ unsigned int ring_index)
+{
+ uint32_t ring_mask = 1 << ring_index;
+
+ bool dma_en = FIELD_EX32(s->regs.rdma.ctrl, GENET_DMA_CTRL, EN) != 0;
+ bool ring_en = (FIELD_EX32(s->regs.rdma.ring_cfg, GENET_DMA_RING_CFG, EN)
+ & ring_mask) != 0;
+ bool ring_buf_en = (FIELD_EX32(s->regs.rdma.ctrl,
+ GENET_DMA_CTRL, RING_BUF_EN)
+ & ring_mask) != 0;
+ bool active = dma_en && ring_en && ring_buf_en;
+
+ trace_bcm2838_genet_rx_dma_ring_active(ring_index,
+ active ? "active" : "halted");
+
+ return active;
+}
+
static void bcm2838_genet_tdma(BCM2838GenetState *s, hwaddr offset,
uint64_t value)
{
@@ -721,9 +740,251 @@ static const MemoryRegionOps bcm2838_genet_ops = {
.valid = {.min_access_size = 4},
};
+static int32_t bcm2838_genet_filter(BCM2838GenetState *s, const void *buf,
+ size_t size)
+{
+ qemu_log_mask(LOG_UNIMP,
+ "Packet filtration with HFB isn't implemented yet");
+ return -1;
+}
+
+static int32_t bcm2838_genet_filter2ring(BCM2838GenetState *s,
+ uint32_t filter_idx)
+{
+ qemu_log_mask(LOG_UNIMP,
+ "Packet filtration with HFB isn't implemented yet");
+ return -1;
+}
+
+static bool is_packet_broadcast(const uint8_t *buf, size_t size)
+{
+ static const uint8_t bcst_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if (size < sizeof(bcst_addr)) {
+ return false;
+ }
+
+ return !memcmp(buf, bcst_addr, sizeof(bcst_addr));
+}
+
+static bool is_packet_multicast(const uint8_t *buf, size_t size)
+{
+ return !!(buf[0] & 0x01);
+}
+
+static ssize_t bcm2838_genet_rdma(BCM2838GenetState *s, uint32_t ring_idx,
+ const void *buf, size_t size)
+{
+ const size_t DESC_WORD_SIZE =
+ sizeof(BCM2838GenetRdmaDesc) / sizeof(uint32_t);
+
+ ssize_t len = 0;
+ BCM2838GenetRegsRdma *rdma = &s->regs.rdma;
+ BCM2838GenetRdmaRing *ring = &rdma->rings[ring_idx];
+ hwaddr write_index =
+ (ring->write_ptr + ((hwaddr)ring->write_ptr_hi << 32)) /
DESC_WORD_SIZE;
+ BCM2838GenetRdmaDesc *desc = &rdma->descs[write_index];
+
+ const hwaddr START_INDEX =
+ (ring->start_addr + ((hwaddr)ring->start_addr_hi << 32))
+ / DESC_WORD_SIZE;
+ const hwaddr END_INDEX =
+ (ring->end_addr + ((hwaddr)ring->end_addr_hi << 32)) / DESC_WORD_SIZE;
+
+ if (!bcm2838_genet_rdma_ring_active(s, ring_idx)) {
+ return -1;
+ }
+
+ desc->length_status = FIELD_DP32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS, SOP, 1);
+
+ while (len < size) {
+ size_t l = size - len;
+ size_t buf_size = ring->ring_buf_size & 0xffff;
+ uint8_t *dma_buf = s->rx_packet;
+ hwaddr dma_buf_addr =
+ desc->address_lo + ((hwaddr)desc->address_hi << 32);
+ MemTxResult mem_tx_result = MEMTX_OK;
+ uint8_t *frame_buf = dma_buf + sizeof(BCM2838GenetXmitStatus) + 2;
+ BCM2838GenetXmitStatus *xmit_status = (BCM2838GenetXmitStatus
*)dma_buf;
+ struct iovec iov;
+ bool isip4, isip6;
+ size_t l3hdr_off, l4hdr_off, l5hdr_off;
+ eth_ip6_hdr_info ip6hdr_info;
+ eth_ip4_hdr_info ip4hdr_info;
+ eth_l4_hdr_info l4hdr_info;
+
+ bool crc_fwd = FIELD_EX32(s->regs.umac.cmd, GENET_UMAC_CMD, CRC_FWD);
+ size_t buflength;
+ uint32_t prod_index;
+
+ if (l > ring->ring_buf_size) {
+ l = ring->ring_buf_size;
+ }
+
+ memcpy(frame_buf, buf + len, l);
+ iov.iov_base = frame_buf;
+ iov.iov_len = l;
+ eth_get_protocols(&iov, 1, 0,
+ &isip4, &isip6,
+ &l3hdr_off, &l4hdr_off, &l5hdr_off,
+ &ip6hdr_info, &ip4hdr_info, &l4hdr_info);
+
+ len += l;
+
+ desc->length_status = FIELD_DP32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS,
+ EOP, !!(len >= size));
+
+ buflength = l + sizeof(BCM2838GenetXmitStatus) + 2;
+ if (crc_fwd) {
+ buflength += 4;
+ }
+
+ desc->length_status = FIELD_DP32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS,
+ BUFLENGTH, buflength);
+
+ desc->length_status = FIELD_DP32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS,
+ BROADCAST,
+ !!is_packet_broadcast(frame_buf, l));
+ desc->length_status = FIELD_DP32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS,
+ MULTICAST,
+ !!is_packet_multicast(frame_buf, l));
+
+ xmit_status->rx_csum = 0;
+ if (isip4) {
+ xmit_status->rx_csum = ip4hdr_info.ip4_hdr.ip_sum;
+ }
+ xmit_status->length_status = desc->length_status;
+
+ mem_tx_result = address_space_write(&s->dma_as, dma_buf_addr,
+ MEMTXATTRS_UNSPECIFIED,
+ dma_buf, buf_size);
+ if (mem_tx_result != MEMTX_OK) {
+ desc->length_status = FIELD_DP32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS,
+ RXERR, 1);
+ }
+
+ if (FIELD_EX32(desc->length_status,
+ GENET_RDMA_LENGTH_STATUS, RXERR) != 0) {
+ break;
+ }
+
+ prod_index = FIELD_EX32(ring->prod_index, GENET_DMA_PROD_INDEX, INDEX);
+ ring->prod_index = FIELD_DP32(ring->prod_index,
+ GENET_DMA_PROD_INDEX,
+ INDEX, ++prod_index);
+ if (++write_index > END_INDEX) {
+ write_index = START_INDEX;
+ }
+ desc = &rdma->descs[write_index];
+ ring->write_ptr = write_index * DESC_WORD_SIZE;
+ ring->write_ptr_hi = ((hwaddr)write_index * DESC_WORD_SIZE) >> 32;
+ }
+
+ if (ring_idx == BCM2838_GENET_DMA_RING_DEFAULT) {
+ s->regs.intrl0.stat = FIELD_DP32(s->regs.intrl0.stat,
+ GENET_INTRL_0, RXDMA_MBDONE, 1);
+ } else {
+ uint32_t rx_intrs =
+ FIELD_EX32(s->regs.intrl1.stat, GENET_INTRL_1, RX_INTRS);
+ rx_intrs |= 1 << ring_idx;
+
+ s->regs.intrl1.stat = FIELD_DP32(s->regs.intrl1.stat,
+ GENET_INTRL_1, RX_INTRS, rx_intrs);
+ }
+
+ return len;
+}
+
+static ssize_t bcm2838_genet_receive(NetClientState *nc, const uint8_t *buf,
+ size_t size)
+{
+ BCM2838GenetState *s = (BCM2838GenetState *)qemu_get_nic_opaque(nc);
+ ssize_t bytes_received = -1;
+ int32_t filter_index = -1;
+ int32_t ring_index = -1;
+
+ if (FIELD_EX32(s->regs.rdma.ctrl, GENET_DMA_CTRL, EN) != 0) {
+ filter_index = bcm2838_genet_filter(s, buf, size);
+
+ if (filter_index >= 0) {
+ ring_index = bcm2838_genet_filter2ring(s, filter_index);
+ } else {
+ ring_index = BCM2838_GENET_DMA_RING_CNT - 1;
+ }
+
+ if (size <= MAX_PACKET_SIZE) {
+ bytes_received = bcm2838_genet_rdma(s, ring_index, buf, size);
+ }
+ }
+
+ bcm2838_genet_set_irq_default(s);
+ bcm2838_genet_set_irq_prio(s);
+
+ return bytes_received;
+}
+
+static void bcm2838_genet_phy_update_link(BCM2838GenetState *s)
+{
+ bool qemu_link_down = qemu_get_queue(s->nic)->link_down != 0;
+
+ bool lstatus = FIELD_EX32(s->phy_regs.bmsr, GENET_PHY_BMSR, LSTATUS) != 0;
+
+ if (qemu_link_down && lstatus) {
+ trace_bcm2838_genet_phy_update_link("down");
+
+ s->phy_regs.bmsr = FIELD_DP32(s->phy_regs.bmsr,
+ GENET_PHY_BMSR, ANEGCOMPLETE, 0);
+ s->phy_regs.bmsr = FIELD_DP32(s->phy_regs.bmsr,
+ GENET_PHY_BMSR, LSTATUS, 0);
+ s->regs.intrl0.stat = FIELD_DP32(s->regs.intrl0.stat,
+ GENET_INTRL_0, LINK_DOWN, 1);
+ } else if (!qemu_link_down && !lstatus) {
+ trace_bcm2838_genet_phy_update_link("up");
+
+ /*
+ * Complete auto-negotiation (fixed link partner's abilities for now:
+ * 1Gbps with flow control)
+ */
+ s->phy_regs.stat1000 = FIELD_DP32(s->phy_regs.stat1000,
+ GENET_PHY_STAT_1000, HALF, 1);
+ s->phy_regs.stat1000 = FIELD_DP32(s->phy_regs.stat1000,
+ GENET_PHY_STAT_1000, FULL, 1);
+
+ s->phy_regs.lpa = FIELD_DP32(s->phy_regs.lpa,
+ GENET_PHY_LPA, PAUSE_CAP, 1);
+ s->phy_regs.lpa = FIELD_DP32(s->phy_regs.lpa,
+ GENET_PHY_LPA, PAUSE_ASYM, 1);
+ s->phy_regs.lpa = FIELD_DP32(s->phy_regs.lpa, GENET_PHY_LPA, LPACK, 1);
+
+ s->phy_regs.bmsr = FIELD_DP32(s->phy_regs.bmsr,
+ GENET_PHY_BMSR, ANEGCOMPLETE, 1);
+ s->phy_regs.bmsr = FIELD_DP32(s->phy_regs.bmsr,
+ GENET_PHY_BMSR, LSTATUS, 1);
+
+ s->regs.intrl0.stat = FIELD_DP32(s->regs.intrl0.stat,
+ GENET_INTRL_0, LINK_UP, 1);
+ }
+
+ bcm2838_genet_set_irq_default(s);
+}
+static void bcm2838_genet_set_link(NetClientState *nc)
+{
+ BCM2838GenetState *s = qemu_get_nic_opaque(nc);
+
+ bcm2838_genet_phy_update_link(s);
+}
+
static NetClientInfo bcm2838_genet_client_info = {
.type = NET_CLIENT_DRIVER_NIC,
- .size = sizeof(NICState)
+ .size = sizeof(NICState),
+ .receive = bcm2838_genet_receive,
+ .link_status_changed = bcm2838_genet_set_link,
};
static void bcm2838_genet_realize(DeviceState *dev, Error **errp)
@@ -777,6 +1038,8 @@ static void bcm2838_genet_phy_reset(BCM2838GenetState *s)
s->phy_aux_ctl_shd_regs.misc = 0x1E;
trace_bcm2838_genet_phy_reset("done");
+
+ bcm2838_genet_phy_update_link(s);
}
static void bcm2838_genet_reset(DeviceState *d)
diff --git a/include/hw/net/bcm2838_genet.h b/include/hw/net/bcm2838_genet.h
index f96ea3a145..f044d0d17e 100644
--- a/include/hw/net/bcm2838_genet.h
+++ b/include/hw/net/bcm2838_genet.h
@@ -420,6 +420,7 @@ struct BCM2838GenetState {
qemu_irq irq_prio;
uint8_t tx_packet[MAX_FRAME_SIZE];
+ uint8_t rx_packet[MAX_FRAME_SIZE];
};
#endif /* BCM2838_GENET_H */
--
2.34.1
- [PATCH v3 21/45] Add clock_isp stub, (continued)
- [PATCH v3 21/45] Add clock_isp stub, Sergey Kambalin, 2023/12/03
- [PATCH v3 22/45] Add GENET stub, Sergey Kambalin, 2023/12/03
- [PATCH v3 23/45] Add GENET register structs. Part 1, Sergey Kambalin, 2023/12/03
- [PATCH v3 24/45] Add GENET register structs. Part 2, Sergey Kambalin, 2023/12/03
- [PATCH v3 25/45] Add GENET register structs. Part 3, Sergey Kambalin, 2023/12/03
- [PATCH v3 26/45] Add GENET register structs. Part 4, Sergey Kambalin, 2023/12/03
- [PATCH v3 27/45] Add GENET register access macros, Sergey Kambalin, 2023/12/03
- [PATCH v3 28/45] Implement GENET register ops, Sergey Kambalin, 2023/12/03
- [PATCH v3 29/45] Implement GENET MDIO, Sergey Kambalin, 2023/12/03
- [PATCH v3 30/45] Implement GENET TX path, Sergey Kambalin, 2023/12/03
- [PATCH v3 31/45] Implement GENET RX path,
Sergey Kambalin <=
- [PATCH v3 32/45] Enable BCM2838 GENET controller, Sergey Kambalin, 2023/12/03
- [PATCH v3 33/45] Connect RNG200, PCIE and GENET to GIC, Sergey Kambalin, 2023/12/03
- [PATCH v3 34/45] Add Rpi4b boot tests, Sergey Kambalin, 2023/12/03
- [PATCH v3 35/45] Add mailbox test stub, Sergey Kambalin, 2023/12/03
- [PATCH v3 36/45] Add mailbox test constants, Sergey Kambalin, 2023/12/03
- [PATCH v3 37/45] Add mailbox tests tags. Part 1, Sergey Kambalin, 2023/12/03
- [PATCH v3 38/45] Add mailbox tests tags. Part 2, Sergey Kambalin, 2023/12/03
- [PATCH v3 39/45] Add mailbox tests tags. Part 3, Sergey Kambalin, 2023/12/03
- [PATCH v3 40/45] Add mailbox property tests. Part 1, Sergey Kambalin, 2023/12/03
- [PATCH v3 41/45] Add mailbox property tests. Part 2, Sergey Kambalin, 2023/12/03