[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH] hvf: Enable 1G page support
From: |
Alexander Graf |
Subject: |
[PATCH] hvf: Enable 1G page support |
Date: |
Fri, 21 Apr 2023 00:52:58 +0200 |
Hvf on x86 only supported 2MiB large pages, but never bothered to strip
out the 1GiB page size capability from -cpu host. With QEMU 8.0.0 this
became a problem because OVMF started to use 1GiB pages by default.
Let's just unconditionally add 1GiB page walk support to the walker.
With this fix applied, I can successfully run OVMF again.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1603
Signed-off-by: Alexander Graf <agraf@csgraf.de>
Reported-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Reported-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
On my test VM, Linux dies later on with issues in interrupt delivery. But
those are unrelated to this patch; I confirmed that I get the same behavior
with 1GiB page support disabled.
---
target/i386/hvf/x86_mmu.c | 30 ++++++++++++++++++++----------
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
index 96d117567e..1d860651c6 100644
--- a/target/i386/hvf/x86_mmu.c
+++ b/target/i386/hvf/x86_mmu.c
@@ -38,6 +38,7 @@
#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
+#define PAE_PTE_SUPER_PAGE_MASK ((-1llu << (30)) & ((1llu << 52) - 1))
struct gpt_translation {
target_ulong gva;
@@ -96,7 +97,7 @@ static bool get_pt_entry(struct CPUState *cpu, struct
gpt_translation *pt,
/* test page table entry */
static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
- int level, bool *is_large, bool pae)
+ int level, int *largeness, bool pae)
{
uint64_t pte = pt->pte[level];
@@ -118,9 +119,9 @@ static bool test_pt_entry(struct CPUState *cpu, struct
gpt_translation *pt,
goto exit;
}
- if (1 == level && pte_large_page(pte)) {
+ if (level && pte_large_page(pte)) {
pt->err_code |= MMU_PAGE_PT;
- *is_large = true;
+ *largeness = level;
}
if (!level) {
pt->err_code |= MMU_PAGE_PT;
@@ -152,9 +153,18 @@ static inline uint64_t pse_pte_to_page(uint64_t pte)
return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
}
-static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
+static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae,
+ int largeness)
{
- VM_PANIC_ON(!pte_large_page(pt->pte[1]))
+ VM_PANIC_ON(!pte_large_page(pt->pte[largeness]))
+
+ /* 1Gib large page */
+ if (pae && largeness == 2) {
+ return (pt->pte[2] & PAE_PTE_SUPER_PAGE_MASK) | (pt->gva & 0x3fffffff);
+ }
+
+ VM_PANIC_ON(largeness != 1)
+
/* 2Mb large page */
if (pae) {
return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
@@ -170,7 +180,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong
addr, int err_code,
struct gpt_translation *pt, bool pae)
{
int top_level, level;
- bool is_large = false;
+ int largeness = 0;
target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3);
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
@@ -186,19 +196,19 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong
addr, int err_code,
for (level = top_level; level > 0; level--) {
get_pt_entry(cpu, pt, level, pae);
- if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
+ if (!test_pt_entry(cpu, pt, level - 1, &largeness, pae)) {
return false;
}
- if (is_large) {
+ if (largeness) {
break;
}
}
- if (!is_large) {
+ if (!largeness) {
pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
} else {
- pt->gpa = large_page_gpa(pt, pae);
+ pt->gpa = large_page_gpa(pt, pae, largeness);
}
return true;
--
2.39.2 (Apple Git-143)
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [PATCH] hvf: Enable 1G page support,
Alexander Graf <=