[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v3 05/12] intel-iommu: introduce vtd_page_walk_i
From: |
Auger Eric |
Subject: |
Re: [Qemu-devel] [PATCH v3 05/12] intel-iommu: introduce vtd_page_walk_info |
Date: |
Fri, 18 May 2018 10:23:58 +0200 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.4.0 |
On 05/17/2018 10:59 AM, Peter Xu wrote:
> During the recursive page walking of IOVA page tables, some stack
> variables are constant variables and never changed during the whole page
> walking procedure. Isolate them into a struct so that we don't need to
> pass those contants down the stack every time and multiple times.
s/contants/constants
>
> Signed-off-by: Peter Xu <address@hidden>
> ---
> hw/i386/intel_iommu.c | 84 ++++++++++++++++++++++++++-----------------
> 1 file changed, 52 insertions(+), 32 deletions(-)
>
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index 9a418abfb6..4953d02ed0 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -747,9 +747,27 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce,
> uint64_t iova, bool is_write,
>
> typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
>
> +/**
> + * Constant information used during page walking
page table walk?
> + *
> + * @hook_fn: hook func to be called when detected page
nit: when detected page? Not necessarily as it may be called on invalid
entries. on leaf ptw entries?
> + * @private: private data to be passed into hook func
> + * @notify_unmap: whether we should notify invalid entries
> + * @aw: maximum address width
> + */
> +typedef struct {
> + vtd_page_walk_hook hook_fn;
> + void *private;
> + bool notify_unmap;
> + uint8_t aw;
> +} vtd_page_walk_info;
> +
> static int vtd_page_walk_one(IOMMUTLBEntry *entry, int level,
> - vtd_page_walk_hook hook_fn, void *private)
> + vtd_page_walk_info *info)
> {
> + vtd_page_walk_hook hook_fn = info->hook_fn;
> + void *private = info->private;
> +
> assert(hook_fn);
> trace_vtd_page_walk_one(level, entry->iova, entry->translated_addr,
> entry->addr_mask, entry->perm);
> @@ -762,17 +780,13 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, int
> level,
> * @addr: base GPA addr to start the walk
> * @start: IOVA range start address
> * @end: IOVA range end address (start <= addr < end)
> - * @hook_fn: hook func to be called when detected page
> - * @private: private data to be passed into hook func
> * @read: whether parent level has read permission
> * @write: whether parent level has write permission
> - * @notify_unmap: whether we should notify invalid entries
> - * @aw: maximum address width
> + * @info: constant information for the page walk
> */
> static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
> - uint64_t end, vtd_page_walk_hook hook_fn,
> - void *private, uint32_t level, bool read,
> - bool write, bool notify_unmap, uint8_t aw)
> + uint64_t end, uint32_t level, bool read,
> + bool write, vtd_page_walk_info *info)
> {
> bool read_cur, write_cur, entry_valid;
> uint32_t offset;
> @@ -822,24 +836,24 @@ static int vtd_page_walk_level(dma_addr_t addr,
> uint64_t start,
>
> if (vtd_is_last_slpte(slpte, level)) {
> /* NOTE: this is only meaningful if entry_valid == true */
> - entry.translated_addr = vtd_get_slpte_addr(slpte, aw);
> - if (!entry_valid && !notify_unmap) {
> + entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
> + if (!entry_valid && !info->notify_unmap) {
> trace_vtd_page_walk_skip_perm(iova, iova_next);
> goto next;
> }
> - ret = vtd_page_walk_one(&entry, level, hook_fn, private);
> + ret = vtd_page_walk_one(&entry, level, info);
> if (ret < 0) {
> return ret;
> }
> } else {
> if (!entry_valid) {
> - if (notify_unmap) {
> + if (info->notify_unmap) {
> /*
> * The whole entry is invalid; unmap it all.
> * Translated address is meaningless, zero it.
> */
> entry.translated_addr = 0x0;
> - ret = vtd_page_walk_one(&entry, level, hook_fn, private);
> + ret = vtd_page_walk_one(&entry, level, info);
> if (ret < 0) {
> return ret;
> }
> @@ -848,10 +862,9 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t
> start,
> }
> goto next;
> }
> - ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, aw), iova,
> - MIN(iova_next, end), hook_fn, private,
> - level - 1, read_cur, write_cur,
> - notify_unmap, aw);
> + ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw),
> + iova, MIN(iova_next, end), level - 1,
> + read_cur, write_cur, info);
> if (ret < 0) {
> return ret;
> }
> @@ -870,28 +883,24 @@ next:
> * @ce: context entry to walk upon
> * @start: IOVA address to start the walk
> * @end: IOVA range end address (start <= addr < end)
> - * @hook_fn: the hook that to be called for each detected area
> - * @private: private data for the hook function
> - * @aw: maximum address width
> + * @info: page walking information struct
> */
> static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
> - vtd_page_walk_hook hook_fn, void *private,
> - bool notify_unmap, uint8_t aw)
> + vtd_page_walk_info *info)
> {
> dma_addr_t addr = vtd_ce_get_slpt_base(ce);
> uint32_t level = vtd_ce_get_level(ce);
>
> - if (!vtd_iova_range_check(start, ce, aw)) {
> + if (!vtd_iova_range_check(start, ce, info->aw)) {
> return -VTD_FR_ADDR_BEYOND_MGAW;
> }
>
> - if (!vtd_iova_range_check(end, ce, aw)) {
> + if (!vtd_iova_range_check(end, ce, info->aw)) {
> /* Fix end so that it reaches the maximum */
> - end = vtd_iova_limit(ce, aw);
> + end = vtd_iova_limit(ce, info->aw);
> }
>
> - return vtd_page_walk_level(addr, start, end, hook_fn, private,
> - level, true, true, notify_unmap, aw);
> + return vtd_page_walk_level(addr, start, end, level, true, true, info);
> }
>
> /* Map a device to its corresponding domain (context-entry) */
> @@ -1446,13 +1455,18 @@ static void
> vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
> vtd_as->devfn, &ce);
> if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
> if (vtd_as_notify_mappings(vtd_as)) {
> + vtd_page_walk_info info = {
> + .hook_fn = vtd_page_invalidate_notify_hook,
> + .private = (void *)&vtd_as->iommu,
> + .notify_unmap = true,
> + .aw = s->aw_bits,
> + };
> +
> /*
> * For MAP-inclusive notifiers, we need to walk the
> * page table to sync the shadow page table.
> */
> - vtd_page_walk(&ce, addr, addr + size,
> - vtd_page_invalidate_notify_hook,
> - (void *)&vtd_as->iommu, true, s->aw_bits);
> + vtd_page_walk(&ce, addr, addr + size, &info);
> } else {
> /*
> * For UNMAP-only notifiers, we don't need to walk the
> @@ -2922,8 +2936,14 @@ static void vtd_iommu_replay(IOMMUMemoryRegion
> *iommu_mr, IOMMUNotifier *n)
> ce.hi, ce.lo);
> if (vtd_as_notify_mappings(vtd_as)) {
> /* This is required only for MAP typed notifiers */
> - vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false,
> - s->aw_bits);
> + vtd_page_walk_info info = {
> + .hook_fn = vtd_replay_hook,
> + .private = (void *)n,
> + .notify_unmap = false,
> + .aw = s->aw_bits,
> + };
> +
> + vtd_page_walk(&ce, 0, ~0ULL, &info);
> }
> } else {
> trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
>
Besides the nit
Reviewed-by: Eric Auger <address@hidden>
Thanks
Eric
- [Qemu-devel] [PATCH v3 00/12] intel-iommu: nested vIOMMU, cleanups, bug fixes, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 01/12] intel-iommu: send PSI always even if across PDEs, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 02/12] intel-iommu: remove IntelIOMMUNotifierNode, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 03/12] intel-iommu: add iommu lock, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 04/12] intel-iommu: only do page walk for MAP notifiers, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 05/12] intel-iommu: introduce vtd_page_walk_info, Peter Xu, 2018/05/17
- Re: [Qemu-devel] [PATCH v3 05/12] intel-iommu: introduce vtd_page_walk_info,
Auger Eric <=
- [Qemu-devel] [PATCH v3 06/12] intel-iommu: pass in address space when page walk, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 07/12] intel-iommu: trace domain id during page walk, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 08/12] util: implement simple iova tree, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 09/12] intel-iommu: maintain per-device iova ranges, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 10/12] intel-iommu: simplify page walk logic, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 11/12] intel-iommu: new vtd_sync_shadow_page_table_range, Peter Xu, 2018/05/17
- [Qemu-devel] [PATCH v3 12/12] intel-iommu: new sync_shadow_page_table, Peter Xu, 2018/05/17