[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2] hw/arm/smmuv3: Simplify range invalidation
From: |
Liu, Renwei |
Subject: |
[PATCH v2] hw/arm/smmuv3: Simplify range invalidation |
Date: |
Mon, 23 Aug 2021 07:50:28 +0000 |
Simplify range invalidation which can avoid to iterate over all
iotlb entries multi-times. For instance invalidations patterns like
"invalidate 32 4kB pages starting from 0xffacd000" need to iterate over
all iotlb entries 6 times (num_pages: 1, 2, 16, 8, 4, 1). It only needs
to iterate over all iotlb entries once with new implementation.
Signed-off-by: Renwei Liu <renwei.liu@verisilicon.com>
---
v2:
- Remove file mode change.
hw/arm/smmu-common.c | 6 +++---
hw/arm/smmu-internal.h | 2 +-
hw/arm/smmuv3.c | 22 ++++------------------
3 files changed, 8 insertions(+), 22 deletions(-)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 0459850a93..ccb085f83c 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -142,8 +142,8 @@ static gboolean smmu_hash_remove_by_asid_iova(gpointer key,
gpointer value,
if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
return false;
}
- return ((info->iova & ~entry->addr_mask) == entry->iova) ||
- ((entry->iova & ~info->mask) == info->iova);
+ return (entry->iova >= info->iova) &&
+ ((entry->iova + entry->addr_mask) < (info->iova + info->range));
}
inline void
@@ -167,7 +167,7 @@ smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
SMMUIOTLBPageInvInfo info = {
.asid = asid, .iova = iova,
- .mask = (num_pages * 1 << granule) - 1};
+ .range = num_pages * 1 << granule};
g_hash_table_foreach_remove(s->iotlb,
smmu_hash_remove_by_asid_iova,
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
index 2d75b31953..f0e3a777af 100644
--- a/hw/arm/smmu-internal.h
+++ b/hw/arm/smmu-internal.h
@@ -101,7 +101,7 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize,
typedef struct SMMUIOTLBPageInvInfo {
int asid;
uint64_t iova;
- uint64_t mask;
+ uint64_t range;
} SMMUIOTLBPageInvInfo;
typedef struct SMMUSIDRange {
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 01b60bee49..0b009107d1 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -857,7 +857,7 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int
asid, dma_addr_t iova,
static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
{
- dma_addr_t end, addr = CMD_ADDR(cmd);
+ dma_addr_t addr = CMD_ADDR(cmd);
uint8_t type = CMD_TYPE(cmd);
uint16_t vmid = CMD_VMID(cmd);
uint8_t scale = CMD_SCALE(cmd);
@@ -866,7 +866,6 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
bool leaf = CMD_LEAF(cmd);
uint8_t tg = CMD_TG(cmd);
uint64_t num_pages;
- uint8_t granule;
int asid = -1;
if (type == SMMU_CMD_TLBI_NH_VA) {
@@ -880,23 +879,10 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
return;
}
- /* RIL in use */
-
num_pages = (num + 1) * BIT_ULL(scale);
- granule = tg * 2 + 10;
-
- /* Split invalidations into ^2 range invalidations */
- end = addr + (num_pages << granule) - 1;
-
- while (addr != end + 1) {
- uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
-
- num_pages = (mask + 1) >> granule;
- trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl,
leaf);
- smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
- smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
- addr += mask + 1;
- }
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
+ smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
}
static gboolean
--
2.32.0
- [PATCH v2] hw/arm/smmuv3: Simplify range invalidation,
Liu, Renwei <=