qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [Qemu-ppc] [PATCH v2 2/2] tpm_spapr: Support suspend an


From: Stefan Berger
Subject: Re: [Qemu-devel] [Qemu-ppc] [PATCH v2 2/2] tpm_spapr: Support suspend and resume
Date: Fri, 31 Aug 2018 09:29:40 -0400
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.8.0

On 07/26/2018 01:31 AM, David Gibson wrote:
On Tue, Dec 12, 2017 at 03:44:03PM -0500, Stefan Berger wrote:
Signed-off-by: Stefan Berger <address@hidden>
---
  hw/tpm/tpm_spapr.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++----
  1 file changed, 57 insertions(+), 4 deletions(-)

diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c
index ef5e62d..0b8a424 100644
--- a/hw/tpm/tpm_spapr.c
+++ b/hw/tpm/tpm_spapr.c
@@ -248,9 +248,8 @@ static int tpm_spapr_do_crq(struct VIOsPAPRDevice *dev, 
uint8_t *crq_data)
      return H_SUCCESS;
  }
-static void tpm_spapr_request_completed(TPMIf *ti)
+static void _tpm_spapr_request_completed(SPAPRvTPMState *s)
Don't start identifiers with _, please, they're reserved by the
standard library.

  {
-    SPAPRvTPMState *s = VIO_SPAPR_VTPM(ti);
      TPMSpaprCRQ *crq = &s->crq;
      uint32_t len;
      int rc;
@@ -281,6 +280,13 @@ static void tpm_spapr_request_completed(TPMIf *ti)
      }
  }
+static void tpm_spapr_request_completed(TPMIf *ti)
+{
+    SPAPRvTPMState *s = VIO_SPAPR_VTPM(ti);
+
+    _tpm_spapr_request_completed(s);
+}
I don't see much value in this wrapper - there's only one caller,
local to this code, so it can do the cast itself.

ok.

+
  static int tpm_spapr_do_startup_tpm(SPAPRvTPMState *s, size_t buffersize)
  {
      return tpm_backend_startup_tpm(s->be_driver, buffersize);
@@ -313,9 +319,56 @@ static enum TPMVersion tpm_spapr_get_version(TPMIf *ti)
      return tpm_backend_get_tpm_version(s->be_driver);
  }
+/* persistent state handling */
+
+static int tpm_spapr_pre_save(void *opaque)
+{
+    SPAPRvTPMState *s = opaque;
+
+    /*
+     * Synchronize with backend completion.
+     */
+    tpm_backend_wait_cmd_completed(s->be_driver);
+
+    /*
+     * we cannot deliver the results to the VM (in state
+     * SPAPR_VTPM_STATE_EXECUTION) since DMA would touch VM memory
+     */
+
+    return 0;
+}
+
+static int tpm_spapr_post_load(void *opaque,
+                               int version_id __attribute__((unused)))
Use the G_GNUC_UNUSED macro instead, please.

I will remove the attribute altogether since it's not used anywhere else, either.


+{
+    SPAPRvTPMState *s = opaque;
+
+    if (s->state == SPAPR_VTPM_STATE_EXECUTION) {
+        /*
+         * now we can deliver the results to the VM via DMA
+         */
+        _tpm_spapr_request_completed(s);
+    }
+
+    return 0;
+}
+
  static const VMStateDescription vmstate_spapr_vtpm = {
-    .name = "tpm_spapr",
-    .unmigratable = 1,
+    .name = "tpm-spapr",
+    .version_id = 1,
+    .minimum_version_id = 0,
+    .minimum_version_id_old = 0,
+    .pre_save = tpm_spapr_pre_save,
+    .post_load = tpm_spapr_post_load,
+    .fields = (VMStateField[]) {
This should include VMSTATE_SPAPR_VIO(), which will cover several of
the fields you list explicitly below.

True. I thought I had tried this 'back then' and for some reason it wouldn't recover completely while saving/restoring during firmware execution, but now it works...

I will repost soon. I added a new 2nd patch a while ago that gets a boolean from the function we need to call before suspend [tpm_backend_wait_cmd_completed(s->be_driver)] that indicates true if a response was received from the TPM and we need to deliver it to the VM post resume.


+        VMSTATE_BUFFER(crq.raw, SPAPRvTPMState),
+        VMSTATE_UINT64(vdev.crq.qladdr, SPAPRvTPMState),
+        VMSTATE_UINT32(vdev.crq.qsize, SPAPRvTPMState),
+        VMSTATE_UINT32(vdev.crq.qnext, SPAPRvTPMState),
+        VMSTATE_UINT8(state, SPAPRvTPMState),
+        VMSTATE_BUFFER(buffer, SPAPRvTPMState),
+        VMSTATE_END_OF_LIST(),
+    }
  };
static Property tpm_spapr_properties[] = {





reply via email to

[Prev in Thread] Current Thread [Next in Thread]