qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-arm] [PATCH v2] aspeed_scu: Implement RNG register


From: Joel Stanley
Subject: [Qemu-arm] [PATCH v2] aspeed_scu: Implement RNG register
Date: Tue, 29 May 2018 00:52:25 +0930

The ASPEED SoCs contain a single register that returns random data when
read. This models that register so that guests can use it.

The random number data register has a corresponding control register,
data returns a different number regardless of the state of the enabled
bit, so the model follows this behaviour.

Signed-off-by: Joel Stanley <address@hidden>
---
v2:
 - Remove call to qcrypto_random_init as this is done in main()
---
 hw/misc/aspeed_scu.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
index 5e6d5744eeca..29e58527793b 100644
--- a/hw/misc/aspeed_scu.c
+++ b/hw/misc/aspeed_scu.c
@@ -16,6 +16,7 @@
 #include "qapi/visitor.h"
 #include "qemu/bitops.h"
 #include "qemu/log.h"
+#include "crypto/random.h"
 #include "trace.h"
 
 #define TO_REG(offset) ((offset) >> 2)
@@ -154,6 +155,18 @@ static const uint32_t 
ast2500_a1_resets[ASPEED_SCU_NR_REGS] = {
      [BMC_DEV_ID]      = 0x00002402U
 };
 
+static uint32_t aspeed_scu_get_random(void)
+{
+    Error *err = NULL;
+    uint32_t num;
+
+    if (qcrypto_random_bytes((uint8_t *)&num, sizeof(num), &err)) {
+        error_report_err(err);
+    }
+
+    return num;
+}
+
 static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
 {
     AspeedSCUState *s = ASPEED_SCU(opaque);
@@ -167,6 +180,9 @@ static uint64_t aspeed_scu_read(void *opaque, hwaddr 
offset, unsigned size)
     }
 
     switch (reg) {
+    case RNG_DATA:
+        s->regs[RNG_DATA] = aspeed_scu_get_random();
+        break;
     case WAKEUP_EN:
         qemu_log_mask(LOG_GUEST_ERROR,
                       "%s: Read of write-only offset 0x%" HWADDR_PRIx "\n",
-- 
2.17.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]