[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 06/14] util/mmap-alloc: Factor out activating of memory to mma
From: |
David Hildenbrand |
Subject: |
[PATCH v4 06/14] util/mmap-alloc: Factor out activating of memory to mmap_activate() |
Date: |
Fri, 19 Mar 2021 11:12:22 +0100 |
We want to activate memory within a reserved memory region, to make it
accessible. Let's factor that out.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Acked-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
util/mmap-alloc.c | 94 +++++++++++++++++++++++++----------------------
1 file changed, 50 insertions(+), 44 deletions(-)
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index 223d66219c..0e2bd7bc0e 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -114,6 +114,52 @@ static void *mmap_reserve(size_t size, int fd)
return mmap(0, size, PROT_NONE, flags, fd, 0);
}
+/*
+ * Activate memory in a reserved region from the given fd (if any), to make
+ * it accessible.
+ */
+static void *mmap_activate(void *ptr, size_t size, int fd, bool readonly,
+ bool shared, bool is_pmem, off_t map_offset)
+{
+ const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
+ int map_sync_flags = 0;
+ int flags = MAP_FIXED;
+ void *activated_ptr;
+
+ flags |= fd == -1 ? MAP_ANONYMOUS : 0;
+ flags |= shared ? MAP_SHARED : MAP_PRIVATE;
+ if (shared && is_pmem) {
+ map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
+ }
+
+ activated_ptr = mmap(ptr, size, prot, flags | map_sync_flags, fd,
+ map_offset);
+ if (activated_ptr == MAP_FAILED && map_sync_flags) {
+ if (errno == ENOTSUP) {
+ char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
+ char *file_name = g_malloc0(PATH_MAX);
+ int len = readlink(proc_link, file_name, PATH_MAX - 1);
+
+ if (len < 0) {
+ len = 0;
+ }
+ file_name[len] = '\0';
+ fprintf(stderr, "Warning: requesting persistence across crashes "
+ "for backend file %s failed. Proceeding without "
+ "persistence, data might become corrupted in case of host "
+ "crash.\n", file_name);
+ g_free(proc_link);
+ g_free(file_name);
+ }
+ /*
+ * If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we will try
+ * again without these flags to handle backwards compatibility.
+ */
+ activated_ptr = mmap(ptr, size, prot, flags, fd, map_offset);
+ }
+ return activated_ptr;
+}
+
static inline size_t mmap_guard_pagesize(int fd)
{
#if defined(__powerpc64__) && defined(__linux__)
@@ -133,13 +179,8 @@ void *qemu_ram_mmap(int fd,
off_t map_offset)
{
const size_t guard_pagesize = mmap_guard_pagesize(fd);
- int prot;
- int flags;
- int map_sync_flags = 0;
- size_t offset;
- size_t total;
- void *guardptr;
- void *ptr;
+ size_t offset, total;
+ void *ptr, *guardptr;
/*
* Note: this always allocates at least one extra page of virtual address
@@ -156,45 +197,10 @@ void *qemu_ram_mmap(int fd,
/* Always align to host page size */
assert(align >= guard_pagesize);
- flags = MAP_FIXED;
- flags |= fd == -1 ? MAP_ANONYMOUS : 0;
- flags |= shared ? MAP_SHARED : MAP_PRIVATE;
- if (shared && is_pmem) {
- map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
- }
-
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
- prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
-
- ptr = mmap(guardptr + offset, size, prot,
- flags | map_sync_flags, fd, map_offset);
-
- if (ptr == MAP_FAILED && map_sync_flags) {
- if (errno == ENOTSUP) {
- char *proc_link, *file_name;
- int len;
- proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
- file_name = g_malloc0(PATH_MAX);
- len = readlink(proc_link, file_name, PATH_MAX - 1);
- if (len < 0) {
- len = 0;
- }
- file_name[len] = '\0';
- fprintf(stderr, "Warning: requesting persistence across crashes "
- "for backend file %s failed. Proceeding without "
- "persistence, data might become corrupted in case of host "
- "crash.\n", file_name);
- g_free(proc_link);
- g_free(file_name);
- }
- /*
- * if map failed with MAP_SHARED_VALIDATE | MAP_SYNC,
- * we will remove these flags to handle compatibility.
- */
- ptr = mmap(guardptr + offset, size, prot, flags, fd, map_offset);
- }
-
+ ptr = mmap_activate(guardptr + offset, size, fd, readonly, shared, is_pmem,
+ map_offset);
if (ptr == MAP_FAILED) {
munmap(guardptr, total);
return MAP_FAILED;
--
2.29.2
- [PATCH v4 00/14] RAM_NORESERVE, MAP_NORESERVE and hostmem "reserve" property, David Hildenbrand, 2021/03/19
- [PATCH v4 01/14] softmmu/physmem: Mark shared anonymous memory RAM_SHARED, David Hildenbrand, 2021/03/19
- [PATCH v4 02/14] softmmu/physmem: Fix ram_block_discard_range() to handle shared anonymous memory, David Hildenbrand, 2021/03/19
- [PATCH v4 03/14] softmmu/physmem: Fix qemu_ram_remap() to handle shared anonymous memory, David Hildenbrand, 2021/03/19
- [PATCH v4 04/14] util/mmap-alloc: Factor out calculation of the pagesize for the guard page, David Hildenbrand, 2021/03/19
- [PATCH v4 05/14] util/mmap-alloc: Factor out reserving of a memory region to mmap_reserve(), David Hildenbrand, 2021/03/19
- [PATCH v4 06/14] util/mmap-alloc: Factor out activating of memory to mmap_activate(),
David Hildenbrand <=
- [PATCH v4 07/14] softmmu/memory: Pass ram_flags to qemu_ram_alloc_from_fd(), David Hildenbrand, 2021/03/19
- [PATCH v4 08/14] softmmu/memory: Pass ram_flags to memory_region_init_ram_shared_nomigrate(), David Hildenbrand, 2021/03/19
- [PATCH v4 09/14] util/mmap-alloc: Pass flags instead of separate bools to qemu_ram_mmap(), David Hildenbrand, 2021/03/19
- [PATCH v4 10/14] memory: Introduce RAM_NORESERVE and wire it up in qemu_ram_mmap(), David Hildenbrand, 2021/03/19
- [PATCH v4 11/14] util/mmap-alloc: Support RAM_NORESERVE via MAP_NORESERVE under Linux, David Hildenbrand, 2021/03/19
- [PATCH v4 12/14] hostmem: Wire up RAM_NORESERVE via "reserve" property, David Hildenbrand, 2021/03/19