qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-ppc] [PATCH] ppc/xive: introduce helpers to access the XIVE struct


From: Cédric Le Goater
Subject: [Qemu-ppc] [PATCH] ppc/xive: introduce helpers to access the XIVE structures
Date: Tue, 18 Dec 2018 22:38:03 +0100

The XIVE internal structures (EAS, END, NVT) are architected to be Big
Endian. The XIVE models, today, access the different fields of these
structures using wrappers around the GETFIELD/SETFIELD macros. The
definitions of these macros depend on the host which seems unnecessary
for guest only values.

Remove GETFIELD/SETFIELD and replace them with explicit XIVE handlers
doing the endianess conversion.

Signed-off-by: Cédric Le Goater <address@hidden>
---
 include/hw/ppc/xive_regs.h | 28 +++++++++++++---
 target/ppc/cpu.h           | 12 -------
 hw/intc/spapr_xive.c       | 48 +++++++++++++--------------
 hw/intc/xive.c             | 68 +++++++++++++++++++-------------------
 4 files changed, 82 insertions(+), 74 deletions(-)

diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index 85557e730cd8..78875b156980 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -126,8 +126,18 @@ typedef struct XiveEAS {
 #define xive_eas_is_valid(eas)   (be64_to_cpu((eas)->w) & EAS_VALID)
 #define xive_eas_is_masked(eas)  (be64_to_cpu((eas)->w) & EAS_MASKED)
 
-#define GETFIELD_BE64(m, v)      GETFIELD(m, be64_to_cpu(v))
-#define SETFIELD_BE64(m, v, val) cpu_to_be64(SETFIELD(m, be64_to_cpu(v), val))
+static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word)
+{
+    return (be64_to_cpu(word) & mask) >> ctz64(mask);
+}
+
+static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word,
+                                        uint64_t value)
+{
+    uint64_t tmp =
+        (be64_to_cpu(word) & ~mask) | ((value << ctz64(mask)) & mask);
+    return cpu_to_be64(tmp);
+}
 
 /* Event Notification Descriptor (END) */
 typedef struct XiveEND {
@@ -183,8 +193,18 @@ typedef struct XiveEND {
 #define xive_end_is_backlog(end)  (be32_to_cpu((end)->w0) & END_W0_BACKLOG)
 #define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & 
END_W0_ESCALATE_CTL)
 
-#define GETFIELD_BE32(m, v)       GETFIELD(m, be32_to_cpu(v))
-#define SETFIELD_BE32(m, v, val)  cpu_to_be32(SETFIELD(m, be32_to_cpu(v), val))
+static inline uint32_t xive_get_field32(uint32_t mask, uint32_t word)
+{
+    return (be32_to_cpu(word) & mask) >> ctz32(mask);
+}
+
+static inline uint32_t xive_set_field32(uint32_t mask, uint32_t word,
+                                        uint32_t value)
+{
+    uint32_t tmp =
+        (be32_to_cpu(word) & ~mask) | ((value << ctz32(mask)) & mask);
+    return cpu_to_be32(tmp);
+}
 
 /* Notification Virtual Target (NVT) */
 typedef struct XiveNVT {
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 527181c0f09f..d5f99f1fc7b9 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -78,18 +78,6 @@
                                  PPC_BIT32(bs))
 #define PPC_BITMASK8(bs, be)    ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs))
 
-#if HOST_LONG_BITS == 32
-# define MASK_TO_LSH(m)          (__builtin_ffsll(m) - 1)
-#elif HOST_LONG_BITS == 64
-# define MASK_TO_LSH(m)          (__builtin_ffsl(m) - 1)
-#else
-# error Unknown sizeof long
-#endif
-
-#define GETFIELD(m, v)          (((v) & (m)) >> MASK_TO_LSH(m))
-#define SETFIELD(m, v, val)                             \
-        (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m)))
-
 /*****************************************************************************/
 /* Exception vectors definitions                                             */
 enum {
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index 3ceabe668b16..87424de26c1c 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -119,12 +119,12 @@ static int spapr_xive_target_to_end(uint32_t target, 
uint8_t prio,
 static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end,
                                           Monitor *mon)
 {
-    uint32_t qindex = GETFIELD_BE32(END_W1_PAGE_OFF, end->w1);
-    uint32_t qgen = GETFIELD_BE32(END_W1_GENERATION, end->w1);
-    uint32_t qsize = GETFIELD_BE32(END_W0_QSIZE, end->w0);
+    uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+    uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
+    uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
     uint32_t qentries = 1 << (qsize + 10);
-    uint32_t nvt = GETFIELD_BE32(END_W6_NVT_INDEX, end->w6);
-    uint8_t priority = GETFIELD_BE32(END_W7_F0_PRIORITY, end->w7);
+    uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
+    uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
 
     monitor_printf(mon, "%3d/%d % 6d/%5d ^%d",
                    spapr_xive_nvt_to_target(0, nvt),
@@ -155,10 +155,10 @@ void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor 
*mon)
                        pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
                        xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
                        xive_eas_is_masked(eas) ? "M" : " ",
-                       (int) GETFIELD_BE64(EAS_END_DATA, eas->w));
+                       (int) xive_get_field64(EAS_END_DATA, eas->w));
 
         if (!xive_eas_is_masked(eas)) {
-            uint32_t end_idx = GETFIELD_BE64(EAS_END_INDEX, eas->w);
+            uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
             XiveEND *end;
 
             assert(end_idx < xive->nr_ends);
@@ -741,11 +741,11 @@ static target_ulong h_int_set_source_config(PowerPCCPU 
*cpu,
         return H_P3;
     }
 
-    new_eas.w = SETFIELD_BE64(EAS_END_BLOCK, new_eas.w, end_blk);
-    new_eas.w = SETFIELD_BE64(EAS_END_INDEX, new_eas.w, end_idx);
+    new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
+    new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
 
     if (flags & SPAPR_XIVE_SRC_SET_EISN) {
-        new_eas.w = SETFIELD_BE64(EAS_END_DATA, new_eas.w, eisn);
+        new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
     }
 
 out:
@@ -810,22 +810,22 @@ static target_ulong h_int_get_source_config(PowerPCCPU 
*cpu,
     }
 
     /* EAS_END_BLOCK is unused on sPAPR */
-    end_idx = GETFIELD_BE64(EAS_END_INDEX, eas.w);
+    end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
 
     assert(end_idx < xive->nr_ends);
     end = &xive->endt[end_idx];
 
-    nvt_blk = GETFIELD_BE32(END_W6_NVT_BLOCK, end->w6);
-    nvt_idx = GETFIELD_BE32(END_W6_NVT_INDEX, end->w6);
+    nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
+    nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
     args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
 
     if (xive_eas_is_masked(&eas)) {
         args[1] = 0xff;
     } else {
-        args[1] = GETFIELD_BE32(END_W7_F0_PRIORITY, end->w7);
+        args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
     }
 
-    args[2] = GETFIELD_BE64(EAS_END_DATA, eas.w);
+    args[2] = xive_get_field64(EAS_END_DATA, eas.w);
 
     return H_SUCCESS;
 }
@@ -894,7 +894,7 @@ static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
 
     args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
     if (xive_end_is_enqueue(end)) {
-        args[1] = GETFIELD_BE32(END_W0_QSIZE, end->w0) + 12;
+        args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
     } else {
         args[1] = 0;
     }
@@ -987,7 +987,7 @@ static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
         end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
         end.w3 = cpu_to_be32(qpage & 0xffffffff);
         end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
-        end.w0 = SETFIELD_BE32(END_W0_QSIZE, end.w0, qsize - 12);
+        end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
         break;
     case 0:
         /* reset queue and disable queueing */
@@ -1026,9 +1026,9 @@ static target_ulong h_int_set_queue_config(PowerPCCPU 
*cpu,
     /* Ensure the priority and target are correctly set (they will not
      * be right after allocation)
      */
-    end.w6 = SETFIELD_BE32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
-        SETFIELD_BE32(END_W6_NVT_INDEX, 0ul, nvt_idx);
-    end.w7 = SETFIELD_BE32(END_W7_F0_PRIORITY, 0ul, priority);
+    end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
+        xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
+    end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
 
     if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
         end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
@@ -1040,7 +1040,7 @@ static target_ulong h_int_set_queue_config(PowerPCCPU 
*cpu,
      * offset counter starts at 0.
      */
     end.w1 = cpu_to_be32(END_W1_GENERATION) |
-        SETFIELD_BE32(END_W1_PAGE_OFF, 0ul, 0ul);
+        xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
     end.w0 |= cpu_to_be32(END_W0_VALID);
 
     /* TODO: issue syncs required to ensure all in-flight interrupts
@@ -1132,7 +1132,7 @@ static target_ulong h_int_get_queue_config(PowerPCCPU 
*cpu,
     if (xive_end_is_enqueue(end)) {
         args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
             | be32_to_cpu(end->w3);
-        args[2] = GETFIELD_BE32(END_W0_QSIZE, end->w0) + 12;
+        args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
     } else {
         args[1] = 0;
         args[2] = 0;
@@ -1141,10 +1141,10 @@ static target_ulong h_int_get_queue_config(PowerPCCPU 
*cpu,
     /* TODO: do we need any locking on the END ? */
     if (flags & SPAPR_XIVE_END_DEBUG) {
         /* Load the event queue generation number into the return flags */
-        args[0] |= (uint64_t)GETFIELD_BE32(END_W1_GENERATION, end->w1) << 62;
+        args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 
62;
 
         /* Load R7 with the event queue offset counter */
-        args[3] = GETFIELD_BE32(END_W1_PAGE_OFF, end->w1);
+        args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
     } else {
         args[3] = 0;
     }
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 53d2f191e8a3..f8510e426f63 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -982,8 +982,8 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t 
width, Monitor *mon)
 {
     uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
         | be32_to_cpu(end->w3);
-    uint32_t qsize = GETFIELD_BE32(END_W0_QSIZE, end->w0);
-    uint32_t qindex = GETFIELD_BE32(END_W1_PAGE_OFF, end->w1);
+    uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
+    uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
     uint32_t qentries = 1 << (qsize + 10);
     int i;
 
@@ -1012,13 +1012,13 @@ void xive_end_pic_print_info(XiveEND *end, uint32_t 
end_idx, Monitor *mon)
 {
     uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
         | be32_to_cpu(end->w3);
-    uint32_t qindex = GETFIELD_BE32(END_W1_PAGE_OFF, end->w1);
-    uint32_t qgen = GETFIELD_BE32(END_W1_GENERATION, end->w1);
-    uint32_t qsize = GETFIELD_BE32(END_W0_QSIZE, end->w0);
+    uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+    uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
+    uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
     uint32_t qentries = 1 << (qsize + 10);
 
-    uint32_t nvt = GETFIELD_BE32(END_W6_NVT_INDEX, end->w6);
-    uint8_t priority = GETFIELD_BE32(END_W7_F0_PRIORITY, end->w7);
+    uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
+    uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
 
     if (!xive_end_is_valid(end)) {
         return;
@@ -1041,9 +1041,9 @@ static void xive_end_enqueue(XiveEND *end, uint32_t data)
 {
     uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
         | be32_to_cpu(end->w3);
-    uint32_t qsize = GETFIELD_BE32(END_W0_QSIZE, end->w0);
-    uint32_t qindex = GETFIELD_BE32(END_W1_PAGE_OFF, end->w1);
-    uint32_t qgen = GETFIELD_BE32(END_W1_GENERATION, end->w1);
+    uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
+    uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
+    uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
 
     uint64_t qaddr = qaddr_base + (qindex << 2);
     uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
@@ -1058,9 +1058,9 @@ static void xive_end_enqueue(XiveEND *end, uint32_t data)
     qindex = (qindex + 1) & (qentries - 1);
     if (qindex == 0) {
         qgen ^= 1;
-        end->w1 = SETFIELD_BE32(END_W1_GENERATION, end->w1, qgen);
+        end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
     }
-    end->w1 = SETFIELD_BE32(END_W1_PAGE_OFF, end->w1, qindex);
+    end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
 }
 
 /*
@@ -1139,13 +1139,13 @@ static int xive_presenter_tctx_match(XiveTCTX *tctx, 
uint8_t format,
 
         /* HV POOL ring */
         if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
-            cam == GETFIELD_BE32(TM_QW2W2_POOL_CAM, qw2w2)) {
+            cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
             return TM_QW2_HV_POOL;
         }
 
         /* OS ring */
         if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
-            cam == GETFIELD_BE32(TM_QW1W2_OS_CAM, qw1w2)) {
+            cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
             return TM_QW1_OS;
         }
     } else {
@@ -1153,9 +1153,9 @@ static int xive_presenter_tctx_match(XiveTCTX *tctx, 
uint8_t format,
 
         /* USER ring */
         if  ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
-             (cam == GETFIELD_BE32(TM_QW1W2_OS_CAM, qw1w2)) &&
+             (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
              (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
-             (logic_serv == GETFIELD_BE32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
+             (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
             return TM_QW0_USER;
         }
     }
@@ -1313,8 +1313,8 @@ static void xive_router_end_notify(XiveRouter *xrtr, 
uint8_t end_blk,
      *   F=1 : User level Event-Based Branch (EBB) notification, no
      *         priority
      */
-    format = GETFIELD_BE32(END_W6_FORMAT_BIT, end.w6);
-    priority = GETFIELD_BE32(END_W7_F0_PRIORITY, end.w7);
+    format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
+    priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
 
     /* The END is masked */
     if (format == 0 && priority == 0xff) {
@@ -1326,11 +1326,11 @@ static void xive_router_end_notify(XiveRouter *xrtr, 
uint8_t end_blk,
      * even futher coalescing in the Router
      */
     if (!xive_end_is_notify(&end)) {
-        uint8_t pq = GETFIELD_BE32(END_W1_ESn, end.w1);
+        uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
         bool notify = xive_esb_trigger(&pq);
 
-        if (pq != GETFIELD_BE32(END_W1_ESn, end.w1)) {
-            end.w1 = SETFIELD_BE32(END_W1_ESn, end.w1, pq);
+        if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
+            end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
             xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
         }
 
@@ -1344,11 +1344,11 @@ static void xive_router_end_notify(XiveRouter *xrtr, 
uint8_t end_blk,
      * Follows IVPE notification
      */
     xive_presenter_notify(xrtr, format,
-                          GETFIELD_BE32(END_W6_NVT_BLOCK, end.w6),
-                          GETFIELD_BE32(END_W6_NVT_INDEX, end.w6),
-                          GETFIELD_BE32(END_W7_F0_IGNORE, end.w7),
+                          xive_get_field32(END_W6_NVT_BLOCK, end.w6),
+                          xive_get_field32(END_W6_NVT_INDEX, end.w6),
+                          xive_get_field32(END_W7_F0_IGNORE, end.w7),
                           priority,
-                          GETFIELD_BE32(END_W7_F1_LOG_SERVER_ID, end.w7));
+                          xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
 
     /* TODO: Auto EOI. */
 }
@@ -1385,9 +1385,9 @@ static void xive_router_notify(XiveNotifier *xn, uint32_t 
lisn)
      * The event trigger becomes an END trigger
      */
     xive_router_end_notify(xrtr,
-                           GETFIELD_BE64(EAS_END_BLOCK, eas.w),
-                           GETFIELD_BE64(EAS_END_INDEX, eas.w),
-                           GETFIELD_BE64(EAS_END_DATA,  eas.w));
+                           xive_get_field64(EAS_END_BLOCK, eas.w),
+                           xive_get_field64(EAS_END_INDEX, eas.w),
+                           xive_get_field64(EAS_END_DATA,  eas.w));
 }
 
 static void xive_router_class_init(ObjectClass *klass, void *data)
@@ -1419,9 +1419,9 @@ void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, 
Monitor *mon)
 
     monitor_printf(mon, "  %08x %s end:%02x/%04x data:%08x\n",
                    lisn, xive_eas_is_masked(eas) ? "M" : " ",
-                   (uint8_t)  GETFIELD_BE64(EAS_END_BLOCK, eas->w),
-                   (uint32_t) GETFIELD_BE64(EAS_END_INDEX, eas->w),
-                   (uint32_t) GETFIELD_BE64(EAS_END_DATA, eas->w));
+                   (uint8_t)  xive_get_field64(EAS_END_BLOCK, eas->w),
+                   (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
+                   (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
 }
 
 /*
@@ -1454,7 +1454,7 @@ static uint64_t xive_end_source_read(void *opaque, hwaddr 
addr, unsigned size)
     }
 
     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
-    pq = GETFIELD_BE32(end_esmask, end.w1);
+    pq = xive_get_field32(end_esmask, end.w1);
 
     switch (offset) {
     case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
@@ -1479,8 +1479,8 @@ static uint64_t xive_end_source_read(void *opaque, hwaddr 
addr, unsigned size)
         return -1;
     }
 
-    if (pq != GETFIELD_BE32(end_esmask, end.w1)) {
-        end.w1 = SETFIELD_BE32(end_esmask, end.w1, pq);
+    if (pq != xive_get_field32(end_esmask, end.w1)) {
+        end.w1 = xive_set_field32(end_esmask, end.w1, pq);
         xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
     }
 
-- 
2.17.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]