3
0
Fork 0
forked from mirrors/linux

iommu/io-pgtable-arm: Add quirk to quiet WARN_ON()

In situations where mapping/unmapping sequence can be controlled by
userspace, attempting to map over a region that has not yet been
unmapped is an error.  But not something that should spam dmesg.

Now that there is a quirk, we can also drop the selftest_running
flag, and use the quirk instead for selftests.

Acked-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Rob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20250519175348.11924-6-robdclark@gmail.com
[will: Rename quirk to IO_PGTABLE_QUIRK_NO_WARN per Robin's suggestion]
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Rob Clark 2025-05-19 10:51:28 -07:00 committed by Will Deacon
parent be5a2d3f8f
commit 3318f7b5ce
2 changed files with 22 additions and 13 deletions

View file

@ -251,8 +251,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
(data->start_level == 1) && (oas == 40); (data->start_level == 1) && (oas == 40);
} }
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages) static dma_addr_t __arm_lpae_dma_addr(void *pages)
{ {
return (dma_addr_t)virt_to_phys(pages); return (dma_addr_t)virt_to_phys(pages);
@ -371,7 +369,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */ /* We require an unmap first */
WARN_ON(!selftest_running); WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST; return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/* /*
@ -473,7 +471,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data); cptep = iopte_deref(pte, data);
} else if (pte) { } else if (pte) {
/* We require an unmap first */ /* We require an unmap first */
WARN_ON(!selftest_running); WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST; return -EEXIST;
} }
@ -641,8 +639,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start; ptep += unmap_idx_start;
pte = READ_ONCE(*ptep); pte = READ_ONCE(*ptep);
if (WARN_ON(!pte)) if (!pte) {
return 0; WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -ENOENT;
}
/* If the size matches this level, we're in the right place */ /* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@ -652,8 +652,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */ /* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) { for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]); pte = READ_ONCE(ptep[i]);
if (WARN_ON(!pte)) if (!pte) {
WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
break; break;
}
if (!iopte_leaf(pte, lvl, iop->fmt)) { if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1); __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@ -968,7 +970,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 | IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA | IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
IO_PGTABLE_QUIRK_ARM_HD)) IO_PGTABLE_QUIRK_ARM_HD |
IO_PGTABLE_QUIRK_NO_WARN))
return NULL; return NULL;
data = arm_lpae_alloc_pgtable(cfg); data = arm_lpae_alloc_pgtable(cfg);
@ -1069,7 +1072,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data; struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB)) if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
IO_PGTABLE_QUIRK_NO_WARN))
return NULL; return NULL;
data = arm_lpae_alloc_pgtable(cfg); data = arm_lpae_alloc_pgtable(cfg);
@ -1310,7 +1314,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \ #define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \ arm_lpae_dump_ops(ops); \
selftest_running = false; \
-EFAULT; \ -EFAULT; \
}) })
@ -1326,8 +1329,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped; size_t size, mapped;
struct io_pgtable_ops *ops; struct io_pgtable_ops *ops;
selftest_running = true;
for (i = 0; i < ARRAY_SIZE(fmts); ++i) { for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg; cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@ -1416,7 +1417,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops); free_io_pgtable_ops(ops);
} }
selftest_running = false;
return 0; return 0;
} }
@ -1438,6 +1438,7 @@ static int __init arm_lpae_do_selftests(void)
.tlb = &dummy_tlb_ops, .tlb = &dummy_tlb_ops,
.coherent_walk = true, .coherent_walk = true,
.iommu_dev = &dev, .iommu_dev = &dev,
.quirks = IO_PGTABLE_QUIRK_NO_WARN,
}; };
/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */ /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */

View file

@ -88,6 +88,13 @@ struct io_pgtable_cfg {
* *
* IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable. * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
* IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits
*
* IO_PGTABLE_QUIRK_NO_WARN: Do not WARN_ON() on conflicting
* mappings, but silently return -EEXISTS. Normally an attempt
* to map over an existing mapping would indicate some sort of
* kernel bug, which would justify the WARN_ON(). But for GPU
* drivers, this could be under control of userspace. Which
* deserves an error return, but not to spam dmesg.
*/ */
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
@ -97,6 +104,7 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
#define IO_PGTABLE_QUIRK_ARM_HD BIT(7) #define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
#define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8) #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8)
#define IO_PGTABLE_QUIRK_NO_WARN BIT(9)
unsigned long quirks; unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;