[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 8/9] separate initialization of kernel and user PTP tables
From: |
Luca Dariz |
Subject: |
[PATCH 8/9] separate initialization of kernel and user PTP tables |
Date: |
Sun, 12 Feb 2023 18:28:17 +0100 |
* i386/i386/vm_param.h: temporariliy fix kernel upper address
* i386/intel/pmap.c: split kernel and user L3 map initialization. For
simplicity in handling the different configurations, on 32-bit
(+PAE) the name PDPNUM_KERNEL is used in place of PDPNUM, while only
on x86_64 the PDPNUM_USER and PDPNUM_KERNEL are treated differently.
Also, change iterating over PTP tables in case the kernel map is not
right after the user map.
* i386/intel/pmap.h: define PDPNUM_USER and PDPNUM_KERNEL and move
PDPSHIFT to simplify ifdefs.
---
i386/i386/vm_param.h | 2 +-
i386/intel/pmap.c | 62 ++++++++++++++++++++++++++++++++++----------
i386/intel/pmap.h | 8 +++---
3 files changed, 52 insertions(+), 20 deletions(-)
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index 5e7f149a..c2e623a6 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -77,7 +77,7 @@
/* This is the kernel address range in linear addresses. */
#ifdef __x86_64__
#define LINEAR_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
-#define LINEAR_MAX_KERNEL_ADDRESS (0x00007fffffffffffUL)
+#define LINEAR_MAX_KERNEL_ADDRESS (0x00000000ffffffffUL)
#else
/* On x86, the kernel virtual address space is actually located
at high linear addresses. */
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index a9ff6f3e..7d4ad341 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -604,17 +604,22 @@ static void pmap_bootstrap_pae(void)
#endif
kernel_pmap->l4base = (pt_entry_t*)phystokv(pmap_grab_page());
memset(kernel_pmap->l4base, 0, INTEL_PGBYTES);
+#else
+ const int PDPNUM_KERNEL = PDPNUM;
#endif /* x86_64 */
- // TODO: allocate only the PDPTE for kernel virtual space
- // this means all directmap and the stupid limit above it
- init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr);
+ init_alloc_aligned(PDPNUM_KERNEL * INTEL_PGBYTES, &addr);
kernel_page_dir = (pt_entry_t*)phystokv(addr);
+ memset(kernel_page_dir, 0, PDPNUM_KERNEL * INTEL_PGBYTES);
pdp_kernel = (pt_entry_t*)phystokv(pmap_grab_page());
memset(pdp_kernel, 0, INTEL_PGBYTES);
- for (int i = 0; i < PDPNUM; i++)
- WRITE_PTE(&pdp_kernel[i],
+ for (int i = 0; i < PDPNUM_KERNEL; i++) {
+ int pdp_index = i;
+#ifdef __x86_64__
+ pdp_index += lin2pdpnum(VM_MIN_KERNEL_ADDRESS);
+#endif
+ WRITE_PTE(&pdp_kernel[pdp_index],
pa_to_pte(_kvtophys((void *) kernel_page_dir
+ i * INTEL_PGBYTES))
| INTEL_PTE_VALID
@@ -622,6 +627,7 @@ static void pmap_bootstrap_pae(void)
| INTEL_PTE_WRITE
#endif
);
+ }
#ifdef __x86_64__
/* only fill the kernel pdpte during bootstrap */
@@ -749,12 +755,12 @@ void pmap_bootstrap(void)
pmap_bootstrap_pae();
#else /* PAE */
kernel_pmap->dirbase = kernel_page_dir =
(pt_entry_t*)phystokv(pmap_grab_page());
-#endif /* PAE */
{
unsigned i;
for (i = 0; i < NPDES; i++)
kernel_page_dir[i] = 0;
}
+#endif /* PAE */
#ifdef MACH_PV_PAGETABLES
pmap_bootstrap_xen()
@@ -1260,6 +1266,10 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
*/
pmap_t pmap_create(vm_size_t size)
{
+#ifdef __x86_64__
+ // needs to be reworked if we want to dynamically allocate PDPs
+ const int PDPNUM = PDPNUM_KERNEL;
+#endif
pt_entry_t *page_dir[PDPNUM], *pdp_kernel;
int i;
pmap_t p;
@@ -1328,8 +1338,12 @@ pmap_t pmap_create(vm_size_t size)
memset(pdp_kernel, 0, INTEL_PGBYTES);
{
- for (i = 0; i < PDPNUM; i++)
- WRITE_PTE(&pdp_kernel[i],
+ for (i = 0; i < PDPNUM; i++) {
+ int pdp_index = i;
+#ifdef __x86_64__
+ pdp_index += lin2pdpnum(VM_MIN_KERNEL_ADDRESS);
+#endif
+ WRITE_PTE(&pdp_kernel[pdp_index],
pa_to_pte(kvtophys((vm_offset_t) page_dir[i]))
| INTEL_PTE_VALID
#if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES)
@@ -1339,19 +1353,39 @@ pmap_t pmap_create(vm_size_t size)
#endif /* __x86_64__ */
#endif
);
+ }
}
#ifdef __x86_64__
- // TODO alloc only PDPTE for the user range VM_MIN_USER_ADDRESS,
VM_MAX_USER_ADDRESS
- // and keep the same for kernel range, in l4 table we have different
entries
p->l4base = (pt_entry_t *) kmem_cache_alloc(&l4_cache);
if (p->l4base == NULL)
panic("pmap_create");
memset(p->l4base, 0, INTEL_PGBYTES);
WRITE_PTE(&p->l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
- pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) |
INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_USER);
+ pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) |
INTEL_PTE_VALID | INTEL_PTE_WRITE);
#if lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_USER_ADDRESS)
- // TODO kernel vm and user vm are not in the same l4 entry, so add the
user one
+ // kernel vm and user vm are not in the same l4 entry, so add the user
one
+ // TODO alloc only PDPTE for the user range VM_MIN_USER_ADDRESS,
VM_MAX_USER_ADDRESS
+ // and keep the same for kernel range, in l4 table we have different
entries
+ pt_entry_t *pdp_user = (pt_entry_t *) kmem_cache_alloc(&pdpt_cache);
+ if (pdp_user == NULL) {
+ panic("pmap create");
+ }
+ memset(pdp_user, 0, INTEL_PGBYTES);
+ WRITE_PTE(&p->l4base[lin2l4num(VM_MIN_USER_ADDRESS)],
+ pa_to_pte(kvtophys((vm_offset_t) pdp_user)) | INTEL_PTE_VALID
| INTEL_PTE_WRITE | INTEL_PTE_USER);
+#endif /* lin2l4num(VM_MIN_KERNEL_ADDRESS) != lin2l4num(VM_MAX_USER_ADDRESS) */
+ for (int i = 0; i < PDPNUM_USER; i++) {
+ pt_entry_t *user_page_dir = (pt_entry_t *)
kmem_cache_alloc(&pd_cache);
+ memset(user_page_dir, 0, INTEL_PGBYTES);
+ WRITE_PTE(&pdp_user[i + lin2pdpnum(VM_MIN_USER_ADDRESS)], //
pdp_user
+ pa_to_pte(kvtophys(user_page_dir))
+ | INTEL_PTE_VALID
+#if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES)
+ | INTEL_PTE_WRITE | INTEL_PTE_USER
#endif
+ );
+ }
+
#ifdef MACH_PV_PAGETABLES
// FIXME: use kmem_cache_alloc instead
if (kmem_alloc_wired(kernel_map,
@@ -1435,7 +1469,7 @@ void pmap_destroy(pmap_t p)
}
#if PAE
- for (i = 0; i <= lin2pdpnum(LINEAR_MIN_KERNEL_ADDRESS); i++) {
+ for (i = 0; i < lin2pdpnum(VM_MAX_USER_ADDRESS); i++) {
#ifdef __x86_64__
#ifdef USER32
/* In this case we know we have one PDP for user space */
@@ -2444,7 +2478,7 @@ void pmap_collect(pmap_t p)
return;
#if PAE
- for (i = 0; i <= lin2pdpnum(LINEAR_MIN_KERNEL_ADDRESS); i++) {
+ for (i = 0; i < lin2pdpnum(VM_MAX_USER_ADDRESS); i++) {
#ifdef __x86_64__
#ifdef USER32
/* In this case we know we have one PDP for user space */
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 1c6a0d30..34c7cc89 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -74,16 +74,14 @@ typedef phys_addr_t pt_entry_t;
#ifdef __x86_64__
#define L4SHIFT 39 /* L4 shift */
#define L4MASK 0x1ff /* mask for L4 index */
-#endif
-#define PDPSHIFT 30 /* page directory pointer */
-#ifdef __x86_64__
-/* Enough for 8GiB addressing space. */
-#define PDPNUM 8 /* number of page directory pointers */
+#define PDPNUM_KERNEL (((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) >>
PDPSHIFT) + 1)
+#define PDPNUM_USER (((VM_MAX_USER_ADDRESS - VM_MIN_USER_ADDRESS) >>
PDPSHIFT) + 1)
#define PDPMASK 0x1ff /* mask for page directory pointer
index */
#else
#define PDPNUM 4 /* number of page directory pointers */
#define PDPMASK 3 /* mask for page directory pointer
index */
#endif
+#define PDPSHIFT 30 /* page directory pointer */
#define PDESHIFT 21 /* page descriptor shift */
#define PDEMASK 0x1ff /* mask for page descriptor index */
#define PTESHIFT 12 /* page table shift */
--
2.30.2
- [PATCH 2/9] fix x86_64 asm for higher kernel addresses, (continued)
- [PATCH 2/9] fix x86_64 asm for higher kernel addresses, Luca Dariz, 2023/02/12
- [PATCH 3/9] factor out xen-specific bootstrap, Luca Dariz, 2023/02/12
- [PATCH 4/9] factor out PAE-specific bootstrap, Luca Dariz, 2023/02/12
- [PATCH 7/9] extend data types to hold a 64-bit address, Luca Dariz, 2023/02/12
- [PATCH 9/9] move kernel virtual address space to upper addresses, Luca Dariz, 2023/02/12
- [PATCH 8/9] separate initialization of kernel and user PTP tables,
Luca Dariz <=
- Re: [PATCH 0/9 gnumach] move kernel vm map to high addresses on x86_64, Luca, 2023/02/12