mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	documented (hopefully adequately) in the respective changelogs.  Notable
 series include:
 
 - Lucas Stach has provided some page-mapping
   cleanup/consolidation/maintainability work in the series "mm/treewide:
   Remove pXd_huge() API".
 
 - In the series "Allow migrate on protnone reference with
   MPOL_PREFERRED_MANY policy", Donet Tom has optimized mempolicy's
   MPOL_PREFERRED_MANY mode, yielding almost doubled performance in one
   test.
 
 - In their series "Memory allocation profiling" Kent Overstreet and
   Suren Baghdasaryan have contributed a means of determining (via
   /proc/allocinfo) whereabouts in the kernel memory is being allocated:
   number of calls and amount of memory.
 
 - Matthew Wilcox has provided the series "Various significant MM
   patches" which does a number of rather unrelated things, but in largely
   similar code sites.
 
 - In his series "mm: page_alloc: freelist migratetype hygiene" Johannes
   Weiner has fixed the page allocator's handling of migratetype requests,
   with resulting improvements in compaction efficiency.
 
 - In the series "make the hugetlb migration strategy consistent" Baolin
   Wang has fixed a hugetlb migration issue, which should improve hugetlb
   allocation reliability.
 
 - Liu Shixin has hit an I/O meltdown caused by readahead in a
   memory-tight memcg.  Addressed in the series "Fix I/O high when memory
   almost met memcg limit".
 
 - In the series "mm/filemap: optimize folio adding and splitting" Kairui
   Song has optimized pagecache insertion, yielding ~10% performance
   improvement in one test.
 
 - Baoquan He has cleaned up and consolidated the early zone
   initialization code in the series "mm/mm_init.c: refactor
   free_area_init_core()".
 
 - Baoquan has also redone some MM initializatio code in the series
   "mm/init: minor clean up and improvement".
 
 - MM helper cleanups from Christoph Hellwig in his series "remove
   follow_pfn".
 
 - More cleanups from Matthew Wilcox in the series "Various page->flags
   cleanups".
 
 - Vlastimil Babka has contributed maintainability improvements in the
   series "memcg_kmem hooks refactoring".
 
 - More folio conversions and cleanups in Matthew Wilcox's series
 
 	"Convert huge_zero_page to huge_zero_folio"
 	"khugepaged folio conversions"
 	"Remove page_idle and page_young wrappers"
 	"Use folio APIs in procfs"
 	"Clean up __folio_put()"
 	"Some cleanups for memory-failure"
 	"Remove page_mapping()"
 	"More folio compat code removal"
 
 - David Hildenbrand chipped in with "fs/proc/task_mmu: convert hugetlb
   functions to work on folis".
 
 - Code consolidation and cleanup work related to GUP's handling of
   hugetlbs in Peter Xu's series "mm/gup: Unify hugetlb, part 2".
 
 - Rick Edgecombe has developed some fixes to stack guard gaps in the
   series "Cover a guard gap corner case".
 
 - Jinjiang Tu has fixed KSM's behaviour after a fork+exec in the series
   "mm/ksm: fix ksm exec support for prctl".
 
 - Baolin Wang has implemented NUMA balancing for multi-size THPs.  This
   is a simple first-cut implementation for now.  The series is "support
   multi-size THP numa balancing".
 
 - Cleanups to vma handling helper functions from Matthew Wilcox in the
   series "Unify vma_address and vma_pgoff_address".
 
 - Some selftests maintenance work from Dev Jain in the series
   "selftests/mm: mremap_test: Optimizations and style fixes".
 
 - Improvements to the swapping of multi-size THPs from Ryan Roberts in
   the series "Swap-out mTHP without splitting".
 
 - Kefeng Wang has significantly optimized the handling of arm64's
   permission page faults in the series
 
 	"arch/mm/fault: accelerate pagefault when badaccess"
 	"mm: remove arch's private VM_FAULT_BADMAP/BADACCESS"
 
 - GUP cleanups from David Hildenbrand in "mm/gup: consistently call it
   GUP-fast".
 
 - hugetlb fault code cleanups from Vishal Moola in "Hugetlb fault path to
   use struct vm_fault".
 
 - selftests build fixes from John Hubbard in the series "Fix
   selftests/mm build without requiring "make headers"".
 
 - Memory tiering fixes/improvements from Ho-Ren (Jack) Chuang in the
   series "Improved Memory Tier Creation for CPUless NUMA Nodes".  Fixes
   the initialization code so that migration between different memory types
   works as intended.
 
 - David Hildenbrand has improved follow_pte() and fixed an errant driver
   in the series "mm: follow_pte() improvements and acrn follow_pte()
   fixes".
 
 - David also did some cleanup work on large folio mapcounts in his
   series "mm: mapcount for large folios + page_mapcount() cleanups".
 
 - Folio conversions in KSM in Alex Shi's series "transfer page to folio
   in KSM".
 
 - Barry Song has added some sysfs stats for monitoring multi-size THP's
   in the series "mm: add per-order mTHP alloc and swpout counters".
 
 - Some zswap cleanups from Yosry Ahmed in the series "zswap same-filled
   and limit checking cleanups".
 
 - Matthew Wilcox has been looking at buffer_head code and found the
   documentation to be lacking.  The series is "Improve buffer head
   documentation".
 
 - Multi-size THPs get more work, this time from Lance Yang.  His series
   "mm/madvise: enhance lazyfreeing with mTHP in madvise_free" optimizes
   the freeing of these things.
 
 - Kemeng Shi has added more userspace-visible writeback instrumentation
   in the series "Improve visibility of writeback".
 
 - Kemeng Shi then sent some maintenance work on top in the series "Fix
   and cleanups to page-writeback".
 
 - Matthew Wilcox reduces mmap_lock traffic in the anon vma code in the
   series "Improve anon_vma scalability for anon VMAs".  Intel's test bot
   reported an improbable 3x improvement in one test.
 
 - SeongJae Park adds some DAMON feature work in the series
 
 	"mm/damon: add a DAMOS filter type for page granularity access recheck"
 	"selftests/damon: add DAMOS quota goal test"
 
 - Also some maintenance work in the series
 
 	"mm/damon/paddr: simplify page level access re-check for pageout"
 	"mm/damon: misc fixes and improvements"
 
 - David Hildenbrand has disabled some known-to-fail selftests ni the
   series "selftests: mm: cow: flag vmsplice() hugetlb tests as XFAIL".
 
 - memcg metadata storage optimizations from Shakeel Butt in "memcg:
   reduce memory consumption by memcg stats".
 
 - DAX fixes and maintenance work from Vishal Verma in the series
   "dax/bus.c: Fixups for dax-bus locking".
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZkgQYwAKCRDdBJ7gKXxA
 jrdKAP9WVJdpEcXxpoub/vVE0UWGtffr8foifi9bCwrQrGh5mgEAx7Yf0+d/oBZB
 nvA4E0DcPrUAFy144FNM0NTCb7u9vAw=
 =V3R/
 -----END PGP SIGNATURE-----
Merge tag 'mm-stable-2024-05-17-19-19' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull mm updates from Andrew Morton:
 "The usual shower of singleton fixes and minor series all over MM,
  documented (hopefully adequately) in the respective changelogs.
  Notable series include:
   - Lucas Stach has provided some page-mapping cleanup/consolidation/
     maintainability work in the series "mm/treewide: Remove pXd_huge()
     API".
   - In the series "Allow migrate on protnone reference with
     MPOL_PREFERRED_MANY policy", Donet Tom has optimized mempolicy's
     MPOL_PREFERRED_MANY mode, yielding almost doubled performance in
     one test.
   - In their series "Memory allocation profiling" Kent Overstreet and
     Suren Baghdasaryan have contributed a means of determining (via
     /proc/allocinfo) whereabouts in the kernel memory is being
     allocated: number of calls and amount of memory.
   - Matthew Wilcox has provided the series "Various significant MM
     patches" which does a number of rather unrelated things, but in
     largely similar code sites.
   - In his series "mm: page_alloc: freelist migratetype hygiene"
     Johannes Weiner has fixed the page allocator's handling of
     migratetype requests, with resulting improvements in compaction
     efficiency.
   - In the series "make the hugetlb migration strategy consistent"
     Baolin Wang has fixed a hugetlb migration issue, which should
     improve hugetlb allocation reliability.
   - Liu Shixin has hit an I/O meltdown caused by readahead in a
     memory-tight memcg. Addressed in the series "Fix I/O high when
     memory almost met memcg limit".
   - In the series "mm/filemap: optimize folio adding and splitting"
     Kairui Song has optimized pagecache insertion, yielding ~10%
     performance improvement in one test.
   - Baoquan He has cleaned up and consolidated the early zone
     initialization code in the series "mm/mm_init.c: refactor
     free_area_init_core()".
   - Baoquan has also redone some MM initializatio code in the series
     "mm/init: minor clean up and improvement".
   - MM helper cleanups from Christoph Hellwig in his series "remove
     follow_pfn".
   - More cleanups from Matthew Wilcox in the series "Various
     page->flags cleanups".
   - Vlastimil Babka has contributed maintainability improvements in the
     series "memcg_kmem hooks refactoring".
   - More folio conversions and cleanups in Matthew Wilcox's series:
	"Convert huge_zero_page to huge_zero_folio"
	"khugepaged folio conversions"
	"Remove page_idle and page_young wrappers"
	"Use folio APIs in procfs"
	"Clean up __folio_put()"
	"Some cleanups for memory-failure"
	"Remove page_mapping()"
	"More folio compat code removal"
   - David Hildenbrand chipped in with "fs/proc/task_mmu: convert
     hugetlb functions to work on folis".
   - Code consolidation and cleanup work related to GUP's handling of
     hugetlbs in Peter Xu's series "mm/gup: Unify hugetlb, part 2".
   - Rick Edgecombe has developed some fixes to stack guard gaps in the
     series "Cover a guard gap corner case".
   - Jinjiang Tu has fixed KSM's behaviour after a fork+exec in the
     series "mm/ksm: fix ksm exec support for prctl".
   - Baolin Wang has implemented NUMA balancing for multi-size THPs.
     This is a simple first-cut implementation for now. The series is
     "support multi-size THP numa balancing".
   - Cleanups to vma handling helper functions from Matthew Wilcox in
     the series "Unify vma_address and vma_pgoff_address".
   - Some selftests maintenance work from Dev Jain in the series
     "selftests/mm: mremap_test: Optimizations and style fixes".
   - Improvements to the swapping of multi-size THPs from Ryan Roberts
     in the series "Swap-out mTHP without splitting".
   - Kefeng Wang has significantly optimized the handling of arm64's
     permission page faults in the series
	"arch/mm/fault: accelerate pagefault when badaccess"
	"mm: remove arch's private VM_FAULT_BADMAP/BADACCESS"
   - GUP cleanups from David Hildenbrand in "mm/gup: consistently call
     it GUP-fast".
   - hugetlb fault code cleanups from Vishal Moola in "Hugetlb fault
     path to use struct vm_fault".
   - selftests build fixes from John Hubbard in the series "Fix
     selftests/mm build without requiring "make headers"".
   - Memory tiering fixes/improvements from Ho-Ren (Jack) Chuang in the
     series "Improved Memory Tier Creation for CPUless NUMA Nodes".
     Fixes the initialization code so that migration between different
     memory types works as intended.
   - David Hildenbrand has improved follow_pte() and fixed an errant
     driver in the series "mm: follow_pte() improvements and acrn
     follow_pte() fixes".
   - David also did some cleanup work on large folio mapcounts in his
     series "mm: mapcount for large folios + page_mapcount() cleanups".
   - Folio conversions in KSM in Alex Shi's series "transfer page to
     folio in KSM".
   - Barry Song has added some sysfs stats for monitoring multi-size
     THP's in the series "mm: add per-order mTHP alloc and swpout
     counters".
   - Some zswap cleanups from Yosry Ahmed in the series "zswap
     same-filled and limit checking cleanups".
   - Matthew Wilcox has been looking at buffer_head code and found the
     documentation to be lacking. The series is "Improve buffer head
     documentation".
   - Multi-size THPs get more work, this time from Lance Yang. His
     series "mm/madvise: enhance lazyfreeing with mTHP in madvise_free"
     optimizes the freeing of these things.
   - Kemeng Shi has added more userspace-visible writeback
     instrumentation in the series "Improve visibility of writeback".
   - Kemeng Shi then sent some maintenance work on top in the series
     "Fix and cleanups to page-writeback".
   - Matthew Wilcox reduces mmap_lock traffic in the anon vma code in
     the series "Improve anon_vma scalability for anon VMAs". Intel's
     test bot reported an improbable 3x improvement in one test.
   - SeongJae Park adds some DAMON feature work in the series
	"mm/damon: add a DAMOS filter type for page granularity access recheck"
	"selftests/damon: add DAMOS quota goal test"
   - Also some maintenance work in the series
	"mm/damon/paddr: simplify page level access re-check for pageout"
	"mm/damon: misc fixes and improvements"
   - David Hildenbrand has disabled some known-to-fail selftests ni the
     series "selftests: mm: cow: flag vmsplice() hugetlb tests as
     XFAIL".
   - memcg metadata storage optimizations from Shakeel Butt in "memcg:
     reduce memory consumption by memcg stats".
   - DAX fixes and maintenance work from Vishal Verma in the series
     "dax/bus.c: Fixups for dax-bus locking""
* tag 'mm-stable-2024-05-17-19-19' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (426 commits)
  memcg, oom: cleanup unused memcg_oom_gfp_mask and memcg_oom_order
  selftests/mm: hugetlb_madv_vs_map: avoid test skipping by querying hugepage size at runtime
  mm/hugetlb: add missing VM_FAULT_SET_HINDEX in hugetlb_wp
  mm/hugetlb: add missing VM_FAULT_SET_HINDEX in hugetlb_fault
  selftests: cgroup: add tests to verify the zswap writeback path
  mm: memcg: make alloc_mem_cgroup_per_node_info() return bool
  mm/damon/core: fix return value from damos_wmark_metric_value
  mm: do not update memcg stats for NR_{FILE/SHMEM}_PMDMAPPED
  selftests: cgroup: remove redundant enabling of memory controller
  Docs/mm/damon/maintainer-profile: allow posting patches based on damon/next tree
  Docs/mm/damon/maintainer-profile: change the maintainer's timezone from PST to PT
  Docs/mm/damon/design: use a list for supported filters
  Docs/admin-guide/mm/damon/usage: fix wrong schemes effective quota update command
  Docs/admin-guide/mm/damon/usage: fix wrong example of DAMOS filter matching sysfs file
  selftests/damon: classify tests for functionalities and regressions
  selftests/damon/_damon_sysfs: use 'is' instead of '==' for 'None'
  selftests/damon/_damon_sysfs: find sysfs mount point from /proc/mounts
  selftests/damon/_damon_sysfs: check errors from nr_schemes file reads
  mm/damon/core: initialize ->esz_bp from damos_quota_init_priv()
  selftests/damon: add a test for DAMOS quota goal
  ...
		
	
			
		
			
				
	
	
		
			2176 lines
		
	
	
	
		
			56 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			2176 lines
		
	
	
	
		
			56 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0+
 | 
						|
/*
 | 
						|
 * test_xarray.c: Test the XArray API
 | 
						|
 * Copyright (c) 2017-2018 Microsoft Corporation
 | 
						|
 * Copyright (c) 2019-2020 Oracle
 | 
						|
 * Author: Matthew Wilcox <willy@infradead.org>
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/xarray.h>
 | 
						|
#include <linux/module.h>
 | 
						|
 | 
						|
static unsigned int tests_run;
 | 
						|
static unsigned int tests_passed;
 | 
						|
 | 
						|
static const unsigned int order_limit =
 | 
						|
		IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
 | 
						|
 | 
						|
#ifndef XA_DEBUG
 | 
						|
# ifdef __KERNEL__
 | 
						|
void xa_dump(const struct xarray *xa) { }
 | 
						|
# endif
 | 
						|
#undef XA_BUG_ON
 | 
						|
#define XA_BUG_ON(xa, x) do {					\
 | 
						|
	tests_run++;						\
 | 
						|
	if (x) {						\
 | 
						|
		printk("BUG at %s:%d\n", __func__, __LINE__);	\
 | 
						|
		xa_dump(xa);					\
 | 
						|
		dump_stack();					\
 | 
						|
	} else {						\
 | 
						|
		tests_passed++;					\
 | 
						|
	}							\
 | 
						|
} while (0)
 | 
						|
#endif
 | 
						|
 | 
						|
static void *xa_mk_index(unsigned long index)
 | 
						|
{
 | 
						|
	return xa_mk_value(index & LONG_MAX);
 | 
						|
}
 | 
						|
 | 
						|
static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 | 
						|
{
 | 
						|
	return xa_store(xa, index, xa_mk_index(index), gfp);
 | 
						|
}
 | 
						|
 | 
						|
static void xa_insert_index(struct xarray *xa, unsigned long index)
 | 
						|
{
 | 
						|
	XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
 | 
						|
				GFP_KERNEL) != 0);
 | 
						|
}
 | 
						|
 | 
						|
static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 | 
						|
{
 | 
						|
	u32 id;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
 | 
						|
				gfp) != 0);
 | 
						|
	XA_BUG_ON(xa, id != index);
 | 
						|
}
 | 
						|
 | 
						|
static void xa_erase_index(struct xarray *xa, unsigned long index)
 | 
						|
{
 | 
						|
	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * If anyone needs this, please move it to xarray.c.  We have no current
 | 
						|
 * users outside the test suite because all current multislot users want
 | 
						|
 * to use the advanced API.
 | 
						|
 */
 | 
						|
static void *xa_store_order(struct xarray *xa, unsigned long index,
 | 
						|
		unsigned order, void *entry, gfp_t gfp)
 | 
						|
{
 | 
						|
	XA_STATE_ORDER(xas, xa, index, order);
 | 
						|
	void *curr;
 | 
						|
 | 
						|
	do {
 | 
						|
		xas_lock(&xas);
 | 
						|
		curr = xas_store(&xas, entry);
 | 
						|
		xas_unlock(&xas);
 | 
						|
	} while (xas_nomem(&xas, gfp));
 | 
						|
 | 
						|
	return curr;
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_err(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
 | 
						|
#ifndef __KERNEL__
 | 
						|
	/* The kernel does not fail GFP_NOWAIT allocations */
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
 | 
						|
#endif
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
 | 
						|
// kills the test-suite :-(
 | 
						|
//	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xas_retry(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	xa_store_index(xa, 0, GFP_KERNEL);
 | 
						|
	xa_store_index(xa, 1, GFP_KERNEL);
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
 | 
						|
	xa_erase_index(xa, 1);
 | 
						|
	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
 | 
						|
	XA_BUG_ON(xa, xas_retry(&xas, NULL));
 | 
						|
	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
 | 
						|
	xas_reset(&xas);
 | 
						|
	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
 | 
						|
	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xas.xa_node != NULL);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
 | 
						|
	xas.xa_node = XAS_RESTART;
 | 
						|
	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	/* Make sure we can iterate through retry entries */
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_set(&xas, 0);
 | 
						|
	xas_store(&xas, XA_RETRY_ENTRY);
 | 
						|
	xas_set(&xas, 1);
 | 
						|
	xas_store(&xas, XA_RETRY_ENTRY);
 | 
						|
 | 
						|
	xas_set(&xas, 0);
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		xas_store(&xas, xa_mk_index(xas.xa_index));
 | 
						|
	}
 | 
						|
	xas_unlock(&xas);
 | 
						|
 | 
						|
	xa_erase_index(xa, 0);
 | 
						|
	xa_erase_index(xa, 1);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_load(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long i, j;
 | 
						|
 | 
						|
	for (i = 0; i < 1024; i++) {
 | 
						|
		for (j = 0; j < 1024; j++) {
 | 
						|
			void *entry = xa_load(xa, j);
 | 
						|
			if (j < i)
 | 
						|
				XA_BUG_ON(xa, xa_to_value(entry) != j);
 | 
						|
			else
 | 
						|
				XA_BUG_ON(xa, entry);
 | 
						|
		}
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = 0; i < 1024; i++) {
 | 
						|
		for (j = 0; j < 1024; j++) {
 | 
						|
			void *entry = xa_load(xa, j);
 | 
						|
			if (j >= i)
 | 
						|
				XA_BUG_ON(xa, xa_to_value(entry) != j);
 | 
						|
			else
 | 
						|
				XA_BUG_ON(xa, entry);
 | 
						|
		}
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
 | 
						|
{
 | 
						|
	unsigned int order;
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
 | 
						|
 | 
						|
	/* NULL elements have no marks set */
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
	xa_set_mark(xa, index, XA_MARK_0);
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
 | 
						|
	/* Storing a pointer will not make a mark appear */
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
	xa_set_mark(xa, index, XA_MARK_0);
 | 
						|
	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
 | 
						|
	/* Setting one mark will not set another mark */
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
 | 
						|
 | 
						|
	/* Storing NULL clears marks, and they can't be set again */
 | 
						|
	xa_erase_index(xa, index);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
	xa_set_mark(xa, index, XA_MARK_0);
 | 
						|
	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Storing a multi-index entry over entries with marks gives the
 | 
						|
	 * entire entry the union of the marks
 | 
						|
	 */
 | 
						|
	BUG_ON((index % 4) != 0);
 | 
						|
	for (order = 2; order < max_order; order++) {
 | 
						|
		unsigned long base = round_down(index, 1UL << order);
 | 
						|
		unsigned long next = base + (1UL << order);
 | 
						|
		unsigned long i;
 | 
						|
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
 | 
						|
		xa_set_mark(xa, index + 1, XA_MARK_0);
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
 | 
						|
		xa_set_mark(xa, index + 2, XA_MARK_2);
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
 | 
						|
		xa_store_order(xa, index, order, xa_mk_index(index),
 | 
						|
				GFP_KERNEL);
 | 
						|
		for (i = base; i < next; i++) {
 | 
						|
			XA_STATE(xas, xa, i);
 | 
						|
			unsigned int seen = 0;
 | 
						|
			void *entry;
 | 
						|
 | 
						|
			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
 | 
						|
			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
 | 
						|
			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
 | 
						|
 | 
						|
			/* We should see two elements in the array */
 | 
						|
			rcu_read_lock();
 | 
						|
			xas_for_each(&xas, entry, ULONG_MAX)
 | 
						|
				seen++;
 | 
						|
			rcu_read_unlock();
 | 
						|
			XA_BUG_ON(xa, seen != 2);
 | 
						|
 | 
						|
			/* One of which is marked */
 | 
						|
			xas_set(&xas, 0);
 | 
						|
			seen = 0;
 | 
						|
			rcu_read_lock();
 | 
						|
			xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
 | 
						|
				seen++;
 | 
						|
			rcu_read_unlock();
 | 
						|
			XA_BUG_ON(xa, seen != 1);
 | 
						|
		}
 | 
						|
		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
 | 
						|
		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
 | 
						|
		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
 | 
						|
		xa_erase_index(xa, index);
 | 
						|
		xa_erase_index(xa, next);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_mark_2(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	unsigned long index;
 | 
						|
	unsigned int count = 0;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	xa_store_index(xa, 0, GFP_KERNEL);
 | 
						|
	xa_set_mark(xa, 0, XA_MARK_0);
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_load(&xas);
 | 
						|
	xas_init_marks(&xas);
 | 
						|
	xas_unlock(&xas);
 | 
						|
	XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
 | 
						|
 | 
						|
	for (index = 3500; index < 4500; index++) {
 | 
						|
		xa_store_index(xa, index, GFP_KERNEL);
 | 
						|
		xa_set_mark(xa, index, XA_MARK_0);
 | 
						|
	}
 | 
						|
 | 
						|
	xas_reset(&xas);
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
 | 
						|
		count++;
 | 
						|
	rcu_read_unlock();
 | 
						|
	XA_BUG_ON(xa, count != 1000);
 | 
						|
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		xas_init_marks(&xas);
 | 
						|
		XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
 | 
						|
		XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
 | 
						|
	}
 | 
						|
	xas_unlock(&xas);
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_mark_3(struct xarray *xa)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	XA_STATE(xas, xa, 0x41);
 | 
						|
	void *entry;
 | 
						|
	int count = 0;
 | 
						|
 | 
						|
	xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
 | 
						|
	xa_set_mark(xa, 0x41, XA_MARK_0);
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
 | 
						|
		count++;
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(0x40));
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, count != 1);
 | 
						|
	rcu_read_unlock();
 | 
						|
	xa_destroy(xa);
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_mark(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long index;
 | 
						|
 | 
						|
	for (index = 0; index < 16384; index += 4)
 | 
						|
		check_xa_mark_1(xa, index);
 | 
						|
 | 
						|
	check_xa_mark_2(xa);
 | 
						|
	check_xa_mark_3(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_shrink(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 1);
 | 
						|
	struct xa_node *node;
 | 
						|
	unsigned int order;
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Check that erasing the entry at 1 shrinks the tree and properly
 | 
						|
	 * marks the node as being deleted.
 | 
						|
	 */
 | 
						|
	xas_lock(&xas);
 | 
						|
	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
 | 
						|
	node = xas.xa_node;
 | 
						|
	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
 | 
						|
	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
 | 
						|
	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
 | 
						|
	XA_BUG_ON(xa, xas_load(&xas) != NULL);
 | 
						|
	xas_unlock(&xas);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
 | 
						|
	xa_erase_index(xa, 0);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	for (order = 0; order < max_order; order++) {
 | 
						|
		unsigned long max = (1UL << order) - 1;
 | 
						|
		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
 | 
						|
		rcu_read_lock();
 | 
						|
		node = xa_head(xa);
 | 
						|
		rcu_read_unlock();
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
 | 
						|
				NULL);
 | 
						|
		rcu_read_lock();
 | 
						|
		XA_BUG_ON(xa, xa_head(xa) == node);
 | 
						|
		rcu_read_unlock();
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
 | 
						|
		xa_erase_index(xa, ULONG_MAX);
 | 
						|
		XA_BUG_ON(xa, xa->xa_head != node);
 | 
						|
		xa_erase_index(xa, 0);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_insert(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long i;
 | 
						|
 | 
						|
	for (i = 0; i < 1024; i++) {
 | 
						|
		xa_insert_index(xa, i);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = 10; i < BITS_PER_LONG; i++) {
 | 
						|
		xa_insert_index(xa, 1UL << i);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
 | 
						|
		xa_erase_index(xa, 1UL << i);
 | 
						|
 | 
						|
		xa_insert_index(xa, (1UL << i) - 1);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
 | 
						|
		xa_erase_index(xa, (1UL << i) - 1);
 | 
						|
	}
 | 
						|
 | 
						|
	xa_insert_index(xa, ~0UL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
 | 
						|
	xa_erase_index(xa, ~0UL);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_cmpxchg(struct xarray *xa)
 | 
						|
{
 | 
						|
	void *FIVE = xa_mk_value(5);
 | 
						|
	void *SIX = xa_mk_value(6);
 | 
						|
	void *LOTS = xa_mk_value(12345678);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
 | 
						|
	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
 | 
						|
	xa_erase_index(xa, 12345678);
 | 
						|
	xa_erase_index(xa, 5);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_cmpxchg_order(struct xarray *xa)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	void *FIVE = xa_mk_value(5);
 | 
						|
	unsigned int i, order = 3;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
 | 
						|
 | 
						|
	/* Check entry FIVE has the order saved */
 | 
						|
	XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
 | 
						|
 | 
						|
	/* Check all the tied indexes have the same entry and order */
 | 
						|
	for (i = 0; i < (1 << order); i++) {
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
 | 
						|
		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
 | 
						|
	}
 | 
						|
 | 
						|
	/* Ensure that nothing is stored at index '1 << order' */
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Additionally, keep the node information and the order at
 | 
						|
	 * '1 << order'
 | 
						|
	 */
 | 
						|
	XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
 | 
						|
	for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
 | 
						|
		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
 | 
						|
	}
 | 
						|
 | 
						|
	/* Conditionally replace FIVE entry at index '0' with NULL */
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
 | 
						|
 | 
						|
	/* Verify the order is lost at FIVE (and old) entries */
 | 
						|
	XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
 | 
						|
 | 
						|
	/* Verify the order and entries are lost in all the tied indexes */
 | 
						|
	for (i = 0; i < (1 << order); i++) {
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, i) != NULL);
 | 
						|
		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
 | 
						|
	}
 | 
						|
 | 
						|
	/* Verify node and order are kept at '1 << order' */
 | 
						|
	for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
 | 
						|
		XA_BUG_ON(xa, xa_get_order(xa, i) != order);
 | 
						|
	}
 | 
						|
 | 
						|
	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_reserve(struct xarray *xa)
 | 
						|
{
 | 
						|
	void *entry;
 | 
						|
	unsigned long index;
 | 
						|
	int count;
 | 
						|
 | 
						|
	/* An array with a reserved entry is not empty */
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 12345678));
 | 
						|
	xa_release(xa, 12345678);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* Releasing a used entry does nothing */
 | 
						|
	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
 | 
						|
	xa_release(xa, 12345678);
 | 
						|
	xa_erase_index(xa, 12345678);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* cmpxchg sees a reserved entry as ZERO */
 | 
						|
	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
 | 
						|
				xa_mk_value(12345678), GFP_NOWAIT) != NULL);
 | 
						|
	xa_release(xa, 12345678);
 | 
						|
	xa_erase_index(xa, 12345678);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* xa_insert treats it as busy */
 | 
						|
	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
 | 
						|
			-EBUSY);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* Can iterate through a reserved entry */
 | 
						|
	xa_store_index(xa, 5, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
 | 
						|
	xa_store_index(xa, 7, GFP_KERNEL);
 | 
						|
 | 
						|
	count = 0;
 | 
						|
	xa_for_each(xa, index, entry) {
 | 
						|
		XA_BUG_ON(xa, index != 5 && index != 7);
 | 
						|
		count++;
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, count != 2);
 | 
						|
 | 
						|
	/* If we free a reserved entry, we should be able to allocate it */
 | 
						|
	if (xa->xa_flags & XA_FLAGS_ALLOC) {
 | 
						|
		u32 id;
 | 
						|
 | 
						|
		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
 | 
						|
					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
 | 
						|
		XA_BUG_ON(xa, id != 8);
 | 
						|
 | 
						|
		xa_release(xa, 6);
 | 
						|
		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
 | 
						|
					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
 | 
						|
		XA_BUG_ON(xa, id != 6);
 | 
						|
	}
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xas_erase(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	void *entry;
 | 
						|
	unsigned long i, j;
 | 
						|
 | 
						|
	for (i = 0; i < 200; i++) {
 | 
						|
		for (j = i; j < 2 * i + 17; j++) {
 | 
						|
			xas_set(&xas, j);
 | 
						|
			do {
 | 
						|
				xas_lock(&xas);
 | 
						|
				xas_store(&xas, xa_mk_index(j));
 | 
						|
				xas_unlock(&xas);
 | 
						|
			} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
		}
 | 
						|
 | 
						|
		xas_set(&xas, ULONG_MAX);
 | 
						|
		do {
 | 
						|
			xas_lock(&xas);
 | 
						|
			xas_store(&xas, xa_mk_value(0));
 | 
						|
			xas_unlock(&xas);
 | 
						|
		} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
		xas_lock(&xas);
 | 
						|
		xas_store(&xas, NULL);
 | 
						|
 | 
						|
		xas_set(&xas, 0);
 | 
						|
		j = i;
 | 
						|
		xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
			XA_BUG_ON(xa, entry != xa_mk_index(j));
 | 
						|
			xas_store(&xas, NULL);
 | 
						|
			j++;
 | 
						|
		}
 | 
						|
		xas_unlock(&xas);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
 | 
						|
		unsigned int order)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, index);
 | 
						|
	unsigned long min = index & ~((1UL << order) - 1);
 | 
						|
	unsigned long max = min + (1UL << order);
 | 
						|
 | 
						|
	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 | 
						|
 | 
						|
	xas_lock(&xas);
 | 
						|
	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
 | 
						|
	xas_unlock(&xas);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 | 
						|
 | 
						|
	xa_erase_index(xa, min);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
 | 
						|
		unsigned int order)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, index);
 | 
						|
	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
 | 
						|
 | 
						|
	xas_lock(&xas);
 | 
						|
	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != index);
 | 
						|
	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
 | 
						|
	xas_unlock(&xas);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
 | 
						|
		unsigned int order)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	void *entry;
 | 
						|
	int n = 0;
 | 
						|
 | 
						|
	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
 | 
						|
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(index));
 | 
						|
		n++;
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, n != 1);
 | 
						|
	xas_set(&xas, index + 1);
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(index));
 | 
						|
		n++;
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, n != 2);
 | 
						|
	xas_unlock(&xas);
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
static noinline void check_multi_store(struct xarray *xa)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	unsigned long i, j, k;
 | 
						|
	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
 | 
						|
 | 
						|
	/* Loading from any position returns the same value */
 | 
						|
	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
 | 
						|
	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	/* Storing adjacent to the value does not alter the value */
 | 
						|
	xa_store(xa, 3, xa, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
 | 
						|
	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	/* Overwriting multiple indexes works */
 | 
						|
	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
 | 
						|
	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	/* We can erase multiple values with a single store */
 | 
						|
	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* Even when the first slot is empty but the others aren't */
 | 
						|
	xa_store_index(xa, 1, GFP_KERNEL);
 | 
						|
	xa_store_index(xa, 2, GFP_KERNEL);
 | 
						|
	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	for (i = 0; i < max_order; i++) {
 | 
						|
		for (j = 0; j < max_order; j++) {
 | 
						|
			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
 | 
						|
			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
 | 
						|
 | 
						|
			for (k = 0; k < max_order; k++) {
 | 
						|
				void *entry = xa_load(xa, (1UL << k) - 1);
 | 
						|
				if ((i < k) && (j < k))
 | 
						|
					XA_BUG_ON(xa, entry != NULL);
 | 
						|
				else
 | 
						|
					XA_BUG_ON(xa, entry != xa_mk_index(j));
 | 
						|
			}
 | 
						|
 | 
						|
			xa_erase(xa, 0);
 | 
						|
			XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = 0; i < 20; i++) {
 | 
						|
		check_multi_store_1(xa, 200, i);
 | 
						|
		check_multi_store_1(xa, 0, i);
 | 
						|
		check_multi_store_1(xa, (1UL << i) + 1, i);
 | 
						|
	}
 | 
						|
	check_multi_store_2(xa, 4095, 9);
 | 
						|
 | 
						|
	for (i = 1; i < 20; i++) {
 | 
						|
		check_multi_store_3(xa, 0, i);
 | 
						|
		check_multi_store_3(xa, 1UL << i, i);
 | 
						|
	}
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
/* mimics page cache __filemap_add_folio() */
 | 
						|
static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
 | 
						|
						  unsigned long index,
 | 
						|
						  unsigned int order,
 | 
						|
						  void *p)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, index);
 | 
						|
	unsigned int nrpages = 1UL << order;
 | 
						|
 | 
						|
	/* users are responsible for index alignemnt to the order when adding */
 | 
						|
	XA_BUG_ON(xa, index & (nrpages - 1));
 | 
						|
 | 
						|
	xas_set_order(&xas, index, order);
 | 
						|
 | 
						|
	do {
 | 
						|
		xas_lock_irq(&xas);
 | 
						|
		xas_store(&xas, p);
 | 
						|
		xas_unlock_irq(&xas);
 | 
						|
		/*
 | 
						|
		 * In our selftest case the only failure we can expect is for
 | 
						|
		 * there not to be enough memory as we're not mimicking the
 | 
						|
		 * entire page cache, so verify that's the only error we can run
 | 
						|
		 * into here. The xas_nomem() which follows will ensure to fix
 | 
						|
		 * that condition for us so to chug on on the loop.
 | 
						|
		 */
 | 
						|
		XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
 | 
						|
	} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xas_error(&xas));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, index) != p);
 | 
						|
}
 | 
						|
 | 
						|
/* mimics page_cache_delete() */
 | 
						|
static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
 | 
						|
							unsigned long index,
 | 
						|
							unsigned int order)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, index);
 | 
						|
 | 
						|
	xas_set_order(&xas, index, order);
 | 
						|
	xas_store(&xas, NULL);
 | 
						|
	xas_init_marks(&xas);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
 | 
						|
						     unsigned long index,
 | 
						|
						     unsigned int order)
 | 
						|
{
 | 
						|
	xa_lock_irq(xa);
 | 
						|
	check_xa_multi_store_adv_del_entry(xa, index, order);
 | 
						|
	xa_unlock_irq(xa);
 | 
						|
}
 | 
						|
 | 
						|
/* mimics page cache filemap_get_entry() */
 | 
						|
static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, index);
 | 
						|
	void *p;
 | 
						|
	static unsigned int loops = 0;
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
repeat:
 | 
						|
	xas_reset(&xas);
 | 
						|
	p = xas_load(&xas);
 | 
						|
	if (xas_retry(&xas, p))
 | 
						|
		goto repeat;
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	/*
 | 
						|
	 * This is not part of the page cache, this selftest is pretty
 | 
						|
	 * aggressive and does not want to trust the xarray API but rather
 | 
						|
	 * test it, and for order 20 (4 GiB block size) we can loop over
 | 
						|
	 * over a million entries which can cause a soft lockup. Page cache
 | 
						|
	 * APIs won't be stupid, proper page cache APIs loop over the proper
 | 
						|
	 * order so when using a larger order we skip shared entries.
 | 
						|
	 */
 | 
						|
	if (++loops % XA_CHECK_SCHED == 0)
 | 
						|
		schedule();
 | 
						|
 | 
						|
	return p;
 | 
						|
}
 | 
						|
 | 
						|
static unsigned long some_val = 0xdeadbeef;
 | 
						|
static unsigned long some_val_2 = 0xdeaddead;
 | 
						|
 | 
						|
/* mimics the page cache usage */
 | 
						|
static noinline void check_xa_multi_store_adv(struct xarray *xa,
 | 
						|
					      unsigned long pos,
 | 
						|
					      unsigned int order)
 | 
						|
{
 | 
						|
	unsigned int nrpages = 1UL << order;
 | 
						|
	unsigned long index, base, next_index, next_next_index;
 | 
						|
	unsigned int i;
 | 
						|
 | 
						|
	index = pos >> PAGE_SHIFT;
 | 
						|
	base = round_down(index, nrpages);
 | 
						|
	next_index = round_down(base + nrpages, nrpages);
 | 
						|
	next_next_index = round_down(next_index + nrpages, nrpages);
 | 
						|
 | 
						|
	check_xa_multi_store_adv_add(xa, base, order, &some_val);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
 | 
						|
 | 
						|
	/* Use order 0 for the next item */
 | 
						|
	check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
 | 
						|
	XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
 | 
						|
 | 
						|
	/* Remove the next item */
 | 
						|
	check_xa_multi_store_adv_delete(xa, next_index, 0);
 | 
						|
 | 
						|
	/* Now use order for a new pointer */
 | 
						|
	check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
 | 
						|
 | 
						|
	check_xa_multi_store_adv_delete(xa, next_index, order);
 | 
						|
	check_xa_multi_store_adv_delete(xa, base, order);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* starting fresh again */
 | 
						|
 | 
						|
	/* let's test some holes now */
 | 
						|
 | 
						|
	/* hole at base and next_next */
 | 
						|
	check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
 | 
						|
 | 
						|
	check_xa_multi_store_adv_delete(xa, next_index, order);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* hole at base and next */
 | 
						|
 | 
						|
	check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
 | 
						|
 | 
						|
	for (i = 0; i < nrpages; i++)
 | 
						|
		XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
 | 
						|
 | 
						|
	check_xa_multi_store_adv_delete(xa, next_next_index, order);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
static noinline void check_multi_store_advanced(struct xarray *xa)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
 | 
						|
	unsigned long end = ULONG_MAX/2;
 | 
						|
	unsigned long pos, i;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * About 117 million tests below.
 | 
						|
	 */
 | 
						|
	for (pos = 7; pos < end; pos = (pos * pos) + 564) {
 | 
						|
		for (i = 0; i < max_order; i++) {
 | 
						|
			check_xa_multi_store_adv(xa, pos, i);
 | 
						|
			check_xa_multi_store_adv(xa, pos + 157, i);
 | 
						|
		}
 | 
						|
	}
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
 | 
						|
{
 | 
						|
	int i;
 | 
						|
	u32 id;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	/* An empty array should assign %base to the first alloc */
 | 
						|
	xa_alloc_index(xa, base, GFP_KERNEL);
 | 
						|
 | 
						|
	/* Erasing it should make the array empty again */
 | 
						|
	xa_erase_index(xa, base);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* And it should assign %base again */
 | 
						|
	xa_alloc_index(xa, base, GFP_KERNEL);
 | 
						|
 | 
						|
	/* Allocating and then erasing a lot should not lose base */
 | 
						|
	for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
 | 
						|
		xa_alloc_index(xa, i, GFP_KERNEL);
 | 
						|
	for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
	xa_alloc_index(xa, base, GFP_KERNEL);
 | 
						|
 | 
						|
	/* Destroying the array should do the same as erasing */
 | 
						|
	xa_destroy(xa);
 | 
						|
 | 
						|
	/* And it should assign %base again */
 | 
						|
	xa_alloc_index(xa, base, GFP_KERNEL);
 | 
						|
 | 
						|
	/* The next assigned ID should be base+1 */
 | 
						|
	xa_alloc_index(xa, base + 1, GFP_KERNEL);
 | 
						|
	xa_erase_index(xa, base + 1);
 | 
						|
 | 
						|
	/* Storing a value should mark it used */
 | 
						|
	xa_store_index(xa, base + 1, GFP_KERNEL);
 | 
						|
	xa_alloc_index(xa, base + 2, GFP_KERNEL);
 | 
						|
 | 
						|
	/* If we then erase base, it should be free */
 | 
						|
	xa_erase_index(xa, base);
 | 
						|
	xa_alloc_index(xa, base, GFP_KERNEL);
 | 
						|
 | 
						|
	xa_erase_index(xa, base + 1);
 | 
						|
	xa_erase_index(xa, base + 2);
 | 
						|
 | 
						|
	for (i = 1; i < 5000; i++) {
 | 
						|
		xa_alloc_index(xa, base + i, GFP_KERNEL);
 | 
						|
	}
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
 | 
						|
	/* Check that we fail properly at the limit of allocation */
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
 | 
						|
				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
 | 
						|
				GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != 0xfffffffeU);
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
 | 
						|
				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
 | 
						|
				GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != 0xffffffffU);
 | 
						|
	id = 3;
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
 | 
						|
				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
 | 
						|
				GFP_KERNEL) != -EBUSY);
 | 
						|
	XA_BUG_ON(xa, id != 3);
 | 
						|
	xa_destroy(xa);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
 | 
						|
				GFP_KERNEL) != -EBUSY);
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
 | 
						|
				GFP_KERNEL) != -EBUSY);
 | 
						|
	xa_erase_index(xa, 3);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
 | 
						|
{
 | 
						|
	unsigned int i, id;
 | 
						|
	unsigned long index;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	/* Allocate and free a NULL and check xa_empty() behaves */
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != base);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* Ditto, but check destroy instead of erase */
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != base);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	xa_destroy(xa);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	for (i = base; i < base + 10; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
 | 
						|
					GFP_KERNEL) != 0);
 | 
						|
		XA_BUG_ON(xa, id != i);
 | 
						|
	}
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
 | 
						|
	XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != 5);
 | 
						|
 | 
						|
	xa_for_each(xa, index, entry) {
 | 
						|
		xa_erase_index(xa, index);
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = base; i < base + 9; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
 | 
						|
		XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
 | 
						|
{
 | 
						|
	struct xa_limit limit = XA_LIMIT(1, 0x3fff);
 | 
						|
	u32 next = 0;
 | 
						|
	unsigned int i, id;
 | 
						|
	unsigned long index;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
 | 
						|
				&next, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != 1);
 | 
						|
 | 
						|
	next = 0x3ffd;
 | 
						|
	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
 | 
						|
				&next, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != 0x3ffd);
 | 
						|
	xa_erase_index(xa, 0x3ffd);
 | 
						|
	xa_erase_index(xa, 1);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	for (i = 0x3ffe; i < 0x4003; i++) {
 | 
						|
		if (i < 0x4000)
 | 
						|
			entry = xa_mk_index(i);
 | 
						|
		else
 | 
						|
			entry = xa_mk_index(i - 0x3fff);
 | 
						|
		XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
 | 
						|
					&next, GFP_KERNEL) != (id == 1));
 | 
						|
		XA_BUG_ON(xa, xa_mk_index(id) != entry);
 | 
						|
	}
 | 
						|
 | 
						|
	/* Check wrap-around is handled correctly */
 | 
						|
	if (base != 0)
 | 
						|
		xa_erase_index(xa, base);
 | 
						|
	xa_erase_index(xa, base + 1);
 | 
						|
	next = UINT_MAX;
 | 
						|
	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
 | 
						|
				xa_limit_32b, &next, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != UINT_MAX);
 | 
						|
	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
 | 
						|
				xa_limit_32b, &next, GFP_KERNEL) != 1);
 | 
						|
	XA_BUG_ON(xa, id != base);
 | 
						|
	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
 | 
						|
				xa_limit_32b, &next, GFP_KERNEL) != 0);
 | 
						|
	XA_BUG_ON(xa, id != base + 1);
 | 
						|
 | 
						|
	xa_for_each(xa, index, entry)
 | 
						|
		xa_erase_index(xa, index);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static DEFINE_XARRAY_ALLOC(xa0);
 | 
						|
static DEFINE_XARRAY_ALLOC1(xa1);
 | 
						|
 | 
						|
static noinline void check_xa_alloc(void)
 | 
						|
{
 | 
						|
	check_xa_alloc_1(&xa0, 0);
 | 
						|
	check_xa_alloc_1(&xa1, 1);
 | 
						|
	check_xa_alloc_2(&xa0, 0);
 | 
						|
	check_xa_alloc_2(&xa1, 1);
 | 
						|
	check_xa_alloc_3(&xa0, 0);
 | 
						|
	check_xa_alloc_3(&xa1, 1);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
 | 
						|
			unsigned int order, unsigned int present)
 | 
						|
{
 | 
						|
	XA_STATE_ORDER(xas, xa, start, order);
 | 
						|
	void *entry;
 | 
						|
	unsigned int count = 0;
 | 
						|
 | 
						|
retry:
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_for_each_conflict(&xas, entry) {
 | 
						|
		XA_BUG_ON(xa, !xa_is_value(entry));
 | 
						|
		XA_BUG_ON(xa, entry < xa_mk_index(start));
 | 
						|
		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
 | 
						|
		count++;
 | 
						|
	}
 | 
						|
	xas_store(&xas, xa_mk_index(start));
 | 
						|
	xas_unlock(&xas);
 | 
						|
	if (xas_nomem(&xas, GFP_KERNEL)) {
 | 
						|
		count = 0;
 | 
						|
		goto retry;
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, xas_error(&xas));
 | 
						|
	XA_BUG_ON(xa, count != present);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
 | 
						|
			xa_mk_index(start));
 | 
						|
	xa_erase_index(xa, start);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_store_iter(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned int i, j;
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
 | 
						|
 | 
						|
	for (i = 0; i < max_order; i++) {
 | 
						|
		unsigned int min = 1 << i;
 | 
						|
		unsigned int max = (2 << i) - 1;
 | 
						|
		__check_store_iter(xa, 0, i, 0);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
		__check_store_iter(xa, min, i, 0);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
		xa_store_index(xa, min, GFP_KERNEL);
 | 
						|
		__check_store_iter(xa, min, i, 1);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
		xa_store_index(xa, max, GFP_KERNEL);
 | 
						|
		__check_store_iter(xa, min, i, 1);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
		for (j = 0; j < min; j++)
 | 
						|
			xa_store_index(xa, j, GFP_KERNEL);
 | 
						|
		__check_store_iter(xa, 0, i, min);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
		for (j = 0; j < min; j++)
 | 
						|
			xa_store_index(xa, min + j, GFP_KERNEL);
 | 
						|
		__check_store_iter(xa, min, i, min);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	}
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	xa_store_index(xa, 63, GFP_KERNEL);
 | 
						|
	xa_store_index(xa, 65, GFP_KERNEL);
 | 
						|
	__check_store_iter(xa, 64, 2, 1);
 | 
						|
	xa_erase_index(xa, 63);
 | 
						|
#endif
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	unsigned long multi = 3 << order;
 | 
						|
	unsigned long next = 4 << order;
 | 
						|
	unsigned long index;
 | 
						|
 | 
						|
	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
 | 
						|
 | 
						|
	index = 0;
 | 
						|
	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
 | 
						|
			xa_mk_value(multi));
 | 
						|
	XA_BUG_ON(xa, index != multi);
 | 
						|
	index = multi + 1;
 | 
						|
	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
 | 
						|
			xa_mk_value(multi));
 | 
						|
	XA_BUG_ON(xa, (index < multi) || (index >= next));
 | 
						|
	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
 | 
						|
			xa_mk_value(next));
 | 
						|
	XA_BUG_ON(xa, index != next);
 | 
						|
	XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
 | 
						|
	XA_BUG_ON(xa, index != next);
 | 
						|
 | 
						|
	xa_erase_index(xa, multi);
 | 
						|
	xa_erase_index(xa, next);
 | 
						|
	xa_erase_index(xa, next + 1);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_multi_find_2(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
 | 
						|
	unsigned int i, j;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	for (i = 0; i < max_order; i++) {
 | 
						|
		unsigned long index = 1UL << i;
 | 
						|
		for (j = 0; j < index; j++) {
 | 
						|
			XA_STATE(xas, xa, j + index);
 | 
						|
			xa_store_index(xa, index - 1, GFP_KERNEL);
 | 
						|
			xa_store_order(xa, index, i, xa_mk_index(index),
 | 
						|
					GFP_KERNEL);
 | 
						|
			rcu_read_lock();
 | 
						|
			xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
				xa_erase_index(xa, index);
 | 
						|
			}
 | 
						|
			rcu_read_unlock();
 | 
						|
			xa_erase_index(xa, index - 1);
 | 
						|
			XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_multi_find_3(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned int order;
 | 
						|
 | 
						|
	for (order = 5; order < order_limit; order++) {
 | 
						|
		unsigned long index = 1UL << (order - 5);
 | 
						|
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
		xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
 | 
						|
		XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
 | 
						|
		xa_erase_index(xa, 0);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_find_1(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long i, j, k;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Check xa_find with all pairs between 0 and 99 inclusive,
 | 
						|
	 * starting at every index between 0 and 99
 | 
						|
	 */
 | 
						|
	for (i = 0; i < 100; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
 | 
						|
		xa_set_mark(xa, i, XA_MARK_0);
 | 
						|
		for (j = 0; j < i; j++) {
 | 
						|
			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
 | 
						|
					NULL);
 | 
						|
			xa_set_mark(xa, j, XA_MARK_0);
 | 
						|
			for (k = 0; k < 100; k++) {
 | 
						|
				unsigned long index = k;
 | 
						|
				void *entry = xa_find(xa, &index, ULONG_MAX,
 | 
						|
								XA_PRESENT);
 | 
						|
				if (k <= j)
 | 
						|
					XA_BUG_ON(xa, index != j);
 | 
						|
				else if (k <= i)
 | 
						|
					XA_BUG_ON(xa, index != i);
 | 
						|
				else
 | 
						|
					XA_BUG_ON(xa, entry != NULL);
 | 
						|
 | 
						|
				index = k;
 | 
						|
				entry = xa_find(xa, &index, ULONG_MAX,
 | 
						|
								XA_MARK_0);
 | 
						|
				if (k <= j)
 | 
						|
					XA_BUG_ON(xa, index != j);
 | 
						|
				else if (k <= i)
 | 
						|
					XA_BUG_ON(xa, index != i);
 | 
						|
				else
 | 
						|
					XA_BUG_ON(xa, entry != NULL);
 | 
						|
			}
 | 
						|
			xa_erase_index(xa, j);
 | 
						|
			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
 | 
						|
			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
 | 
						|
		}
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
 | 
						|
	}
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_find_2(struct xarray *xa)
 | 
						|
{
 | 
						|
	void *entry;
 | 
						|
	unsigned long i, j, index;
 | 
						|
 | 
						|
	xa_for_each(xa, index, entry) {
 | 
						|
		XA_BUG_ON(xa, true);
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = 0; i < 1024; i++) {
 | 
						|
		xa_store_index(xa, index, GFP_KERNEL);
 | 
						|
		j = 0;
 | 
						|
		xa_for_each(xa, index, entry) {
 | 
						|
			XA_BUG_ON(xa, xa_mk_index(index) != entry);
 | 
						|
			XA_BUG_ON(xa, index != j++);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_find_3(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	unsigned long i, j, k;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	for (i = 0; i < 100; i++) {
 | 
						|
		for (j = 0; j < 100; j++) {
 | 
						|
			rcu_read_lock();
 | 
						|
			for (k = 0; k < 100; k++) {
 | 
						|
				xas_set(&xas, j);
 | 
						|
				xas_for_each_marked(&xas, entry, k, XA_MARK_0)
 | 
						|
					;
 | 
						|
				if (j > k)
 | 
						|
					XA_BUG_ON(xa,
 | 
						|
						xas.xa_node != XAS_RESTART);
 | 
						|
			}
 | 
						|
			rcu_read_unlock();
 | 
						|
		}
 | 
						|
		xa_store_index(xa, i, GFP_KERNEL);
 | 
						|
		xa_set_mark(xa, i, XA_MARK_0);
 | 
						|
	}
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_find_4(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long index = 0;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
 | 
						|
 | 
						|
	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
 | 
						|
	XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
 | 
						|
 | 
						|
	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
 | 
						|
	XA_BUG_ON(xa, entry);
 | 
						|
 | 
						|
	xa_erase_index(xa, ULONG_MAX);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_find(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned i;
 | 
						|
 | 
						|
	check_find_1(xa);
 | 
						|
	check_find_2(xa);
 | 
						|
	check_find_3(xa);
 | 
						|
	check_find_4(xa);
 | 
						|
 | 
						|
	for (i = 2; i < 10; i++)
 | 
						|
		check_multi_find_1(xa, i);
 | 
						|
	check_multi_find_2(xa);
 | 
						|
	check_multi_find_3(xa);
 | 
						|
}
 | 
						|
 | 
						|
/* See find_swap_entry() in mm/shmem.c */
 | 
						|
static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	unsigned int checked = 0;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		if (xas_retry(&xas, entry))
 | 
						|
			continue;
 | 
						|
		if (entry == item)
 | 
						|
			break;
 | 
						|
		checked++;
 | 
						|
		if ((checked % 4) != 0)
 | 
						|
			continue;
 | 
						|
		xas_pause(&xas);
 | 
						|
	}
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	return entry ? xas.xa_index : -1;
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_find_entry(struct xarray *xa)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	unsigned int order;
 | 
						|
	unsigned long offset, index;
 | 
						|
 | 
						|
	for (order = 0; order < 20; order++) {
 | 
						|
		for (offset = 0; offset < (1UL << (order + 3));
 | 
						|
		     offset += (1UL << order)) {
 | 
						|
			for (index = 0; index < (1UL << (order + 5));
 | 
						|
			     index += (1UL << order)) {
 | 
						|
				xa_store_order(xa, index, order,
 | 
						|
						xa_mk_index(index), GFP_KERNEL);
 | 
						|
				XA_BUG_ON(xa, xa_load(xa, index) !=
 | 
						|
						xa_mk_index(index));
 | 
						|
				XA_BUG_ON(xa, xa_find_entry(xa,
 | 
						|
						xa_mk_index(index)) != index);
 | 
						|
			}
 | 
						|
			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
 | 
						|
			xa_destroy(xa);
 | 
						|
		}
 | 
						|
	}
 | 
						|
#endif
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
 | 
						|
	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
 | 
						|
	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
 | 
						|
	xa_erase_index(xa, ULONG_MAX);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_pause(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	void *entry;
 | 
						|
	unsigned int order;
 | 
						|
	unsigned long index = 1;
 | 
						|
	unsigned int count = 0;
 | 
						|
 | 
						|
	for (order = 0; order < order_limit; order++) {
 | 
						|
		XA_BUG_ON(xa, xa_store_order(xa, index, order,
 | 
						|
					xa_mk_index(index), GFP_KERNEL));
 | 
						|
		index += 1UL << order;
 | 
						|
	}
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
 | 
						|
		count++;
 | 
						|
	}
 | 
						|
	rcu_read_unlock();
 | 
						|
	XA_BUG_ON(xa, count != order_limit);
 | 
						|
 | 
						|
	count = 0;
 | 
						|
	xas_set(&xas, 0);
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_for_each(&xas, entry, ULONG_MAX) {
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
 | 
						|
		count++;
 | 
						|
		xas_pause(&xas);
 | 
						|
	}
 | 
						|
	rcu_read_unlock();
 | 
						|
	XA_BUG_ON(xa, count != order_limit);
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_move_tiny(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xas_next(&xas) != NULL);
 | 
						|
	XA_BUG_ON(xa, xas_next(&xas) != NULL);
 | 
						|
	rcu_read_unlock();
 | 
						|
	xa_store_index(xa, 0, GFP_KERNEL);
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_set(&xas, 0);
 | 
						|
	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
 | 
						|
	XA_BUG_ON(xa, xas_next(&xas) != NULL);
 | 
						|
	xas_set(&xas, 0);
 | 
						|
	XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
 | 
						|
	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
 | 
						|
	rcu_read_unlock();
 | 
						|
	xa_erase_index(xa, 0);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_move_max(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
 | 
						|
	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
 | 
						|
	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	xas_set(&xas, 0);
 | 
						|
	rcu_read_lock();
 | 
						|
	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
 | 
						|
	xas_pause(&xas);
 | 
						|
	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	xa_erase_index(xa, ULONG_MAX);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
	unsigned long i;
 | 
						|
 | 
						|
	xa_store_index(xa, 0, GFP_KERNEL);
 | 
						|
	xa_store_index(xa, idx, GFP_KERNEL);
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	for (i = 0; i < idx * 4; i++) {
 | 
						|
		void *entry = xas_next(&xas);
 | 
						|
		if (i <= idx)
 | 
						|
			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
 | 
						|
		XA_BUG_ON(xa, xas.xa_index != i);
 | 
						|
		if (i == 0 || i == idx)
 | 
						|
			XA_BUG_ON(xa, entry != xa_mk_index(i));
 | 
						|
		else
 | 
						|
			XA_BUG_ON(xa, entry != NULL);
 | 
						|
	}
 | 
						|
	xas_next(&xas);
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != i);
 | 
						|
 | 
						|
	do {
 | 
						|
		void *entry = xas_prev(&xas);
 | 
						|
		i--;
 | 
						|
		if (i <= idx)
 | 
						|
			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
 | 
						|
		XA_BUG_ON(xa, xas.xa_index != i);
 | 
						|
		if (i == 0 || i == idx)
 | 
						|
			XA_BUG_ON(xa, entry != xa_mk_index(i));
 | 
						|
		else
 | 
						|
			XA_BUG_ON(xa, entry != NULL);
 | 
						|
	} while (i > 0);
 | 
						|
 | 
						|
	xas_set(&xas, ULONG_MAX);
 | 
						|
	XA_BUG_ON(xa, xas_next(&xas) != NULL);
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
 | 
						|
	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != 0);
 | 
						|
	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	xa_erase_index(xa, 0);
 | 
						|
	xa_erase_index(xa, idx);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_move(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, (1 << 16) - 1);
 | 
						|
	unsigned long i;
 | 
						|
 | 
						|
	for (i = 0; i < (1 << 16); i++)
 | 
						|
		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	do {
 | 
						|
		void *entry = xas_prev(&xas);
 | 
						|
		i--;
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(i));
 | 
						|
		XA_BUG_ON(xa, i != xas.xa_index);
 | 
						|
	} while (i != 0);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
 | 
						|
 | 
						|
	do {
 | 
						|
		void *entry = xas_next(&xas);
 | 
						|
		XA_BUG_ON(xa, entry != xa_mk_index(i));
 | 
						|
		XA_BUG_ON(xa, i != xas.xa_index);
 | 
						|
		i++;
 | 
						|
	} while (i < (1 << 16));
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	for (i = (1 << 8); i < (1 << 15); i++)
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
 | 
						|
	i = xas.xa_index;
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	do {
 | 
						|
		void *entry = xas_prev(&xas);
 | 
						|
		i--;
 | 
						|
		if ((i < (1 << 8)) || (i >= (1 << 15)))
 | 
						|
			XA_BUG_ON(xa, entry != xa_mk_index(i));
 | 
						|
		else
 | 
						|
			XA_BUG_ON(xa, entry != NULL);
 | 
						|
		XA_BUG_ON(xa, i != xas.xa_index);
 | 
						|
	} while (i != 0);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
 | 
						|
	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
 | 
						|
 | 
						|
	do {
 | 
						|
		void *entry = xas_next(&xas);
 | 
						|
		if ((i < (1 << 8)) || (i >= (1 << 15)))
 | 
						|
			XA_BUG_ON(xa, entry != xa_mk_index(i));
 | 
						|
		else
 | 
						|
			XA_BUG_ON(xa, entry != NULL);
 | 
						|
		XA_BUG_ON(xa, i != xas.xa_index);
 | 
						|
		i++;
 | 
						|
	} while (i < (1 << 16));
 | 
						|
	rcu_read_unlock();
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
 | 
						|
	check_move_tiny(xa);
 | 
						|
	check_move_max(xa);
 | 
						|
 | 
						|
	for (i = 0; i < 16; i++)
 | 
						|
		check_move_small(xa, 1UL << i);
 | 
						|
 | 
						|
	for (i = 2; i < 16; i++)
 | 
						|
		check_move_small(xa, (1UL << i) - 1);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void xa_store_many_order(struct xarray *xa,
 | 
						|
		unsigned long index, unsigned order)
 | 
						|
{
 | 
						|
	XA_STATE_ORDER(xas, xa, index, order);
 | 
						|
	unsigned int i = 0;
 | 
						|
 | 
						|
	do {
 | 
						|
		xas_lock(&xas);
 | 
						|
		XA_BUG_ON(xa, xas_find_conflict(&xas));
 | 
						|
		xas_create_range(&xas);
 | 
						|
		if (xas_error(&xas))
 | 
						|
			goto unlock;
 | 
						|
		for (i = 0; i < (1U << order); i++) {
 | 
						|
			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
 | 
						|
			xas_next(&xas);
 | 
						|
		}
 | 
						|
unlock:
 | 
						|
		xas_unlock(&xas);
 | 
						|
	} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xas_error(&xas));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_create_range_1(struct xarray *xa,
 | 
						|
		unsigned long index, unsigned order)
 | 
						|
{
 | 
						|
	unsigned long i;
 | 
						|
 | 
						|
	xa_store_many_order(xa, index, order);
 | 
						|
	for (i = index; i < index + (1UL << order); i++)
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_create_range_2(struct xarray *xa, unsigned order)
 | 
						|
{
 | 
						|
	unsigned long i;
 | 
						|
	unsigned long nr = 1UL << order;
 | 
						|
 | 
						|
	for (i = 0; i < nr * nr; i += nr)
 | 
						|
		xa_store_many_order(xa, i, order);
 | 
						|
	for (i = 0; i < nr * nr; i++)
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_create_range_3(void)
 | 
						|
{
 | 
						|
	XA_STATE(xas, NULL, 0);
 | 
						|
	xas_set_err(&xas, -EEXIST);
 | 
						|
	xas_create_range(&xas);
 | 
						|
	XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_create_range_4(struct xarray *xa,
 | 
						|
		unsigned long index, unsigned order)
 | 
						|
{
 | 
						|
	XA_STATE_ORDER(xas, xa, index, order);
 | 
						|
	unsigned long base = xas.xa_index;
 | 
						|
	unsigned long i = 0;
 | 
						|
 | 
						|
	xa_store_index(xa, index, GFP_KERNEL);
 | 
						|
	do {
 | 
						|
		xas_lock(&xas);
 | 
						|
		xas_create_range(&xas);
 | 
						|
		if (xas_error(&xas))
 | 
						|
			goto unlock;
 | 
						|
		for (i = 0; i < (1UL << order); i++) {
 | 
						|
			void *old = xas_store(&xas, xa_mk_index(base + i));
 | 
						|
			if (xas.xa_index == index)
 | 
						|
				XA_BUG_ON(xa, old != xa_mk_index(base + i));
 | 
						|
			else
 | 
						|
				XA_BUG_ON(xa, old != NULL);
 | 
						|
			xas_next(&xas);
 | 
						|
		}
 | 
						|
unlock:
 | 
						|
		xas_unlock(&xas);
 | 
						|
	} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xas_error(&xas));
 | 
						|
 | 
						|
	for (i = base; i < base + (1UL << order); i++)
 | 
						|
		xa_erase_index(xa, i);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_create_range_5(struct xarray *xa,
 | 
						|
		unsigned long index, unsigned int order)
 | 
						|
{
 | 
						|
	XA_STATE_ORDER(xas, xa, index, order);
 | 
						|
	unsigned int i;
 | 
						|
 | 
						|
	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
 | 
						|
 | 
						|
	for (i = 0; i < order + 10; i++) {
 | 
						|
		do {
 | 
						|
			xas_lock(&xas);
 | 
						|
			xas_create_range(&xas);
 | 
						|
			xas_unlock(&xas);
 | 
						|
		} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
	}
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_create_range(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned int order;
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
 | 
						|
 | 
						|
	for (order = 0; order < max_order; order++) {
 | 
						|
		check_create_range_1(xa, 0, order);
 | 
						|
		check_create_range_1(xa, 1U << order, order);
 | 
						|
		check_create_range_1(xa, 2U << order, order);
 | 
						|
		check_create_range_1(xa, 3U << order, order);
 | 
						|
		check_create_range_1(xa, 1U << 24, order);
 | 
						|
		if (order < 10)
 | 
						|
			check_create_range_2(xa, order);
 | 
						|
 | 
						|
		check_create_range_4(xa, 0, order);
 | 
						|
		check_create_range_4(xa, 1U << order, order);
 | 
						|
		check_create_range_4(xa, 2U << order, order);
 | 
						|
		check_create_range_4(xa, 3U << order, order);
 | 
						|
		check_create_range_4(xa, 1U << 24, order);
 | 
						|
 | 
						|
		check_create_range_4(xa, 1, order);
 | 
						|
		check_create_range_4(xa, (1U << order) + 1, order);
 | 
						|
		check_create_range_4(xa, (2U << order) + 1, order);
 | 
						|
		check_create_range_4(xa, (2U << order) - 1, order);
 | 
						|
		check_create_range_4(xa, (3U << order) + 1, order);
 | 
						|
		check_create_range_4(xa, (3U << order) - 1, order);
 | 
						|
		check_create_range_4(xa, (1U << 24) + 1, order);
 | 
						|
 | 
						|
		check_create_range_5(xa, 0, order);
 | 
						|
		check_create_range_5(xa, (1U << order), order);
 | 
						|
	}
 | 
						|
 | 
						|
	check_create_range_3();
 | 
						|
}
 | 
						|
 | 
						|
static noinline void __check_store_range(struct xarray *xa, unsigned long first,
 | 
						|
		unsigned long last)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
 | 
						|
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
 | 
						|
	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
 | 
						|
 | 
						|
	xa_store_range(xa, first, last, NULL, GFP_KERNEL);
 | 
						|
#endif
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_store_range(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long i, j;
 | 
						|
 | 
						|
	for (i = 0; i < 128; i++) {
 | 
						|
		for (j = i; j < 128; j++) {
 | 
						|
			__check_store_range(xa, i, j);
 | 
						|
			__check_store_range(xa, 128 + i, 128 + j);
 | 
						|
			__check_store_range(xa, 4095 + i, 4095 + j);
 | 
						|
			__check_store_range(xa, 4096 + i, 4096 + j);
 | 
						|
			__check_store_range(xa, 123456 + i, 123456 + j);
 | 
						|
			__check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
static void check_split_1(struct xarray *xa, unsigned long index,
 | 
						|
				unsigned int order, unsigned int new_order)
 | 
						|
{
 | 
						|
	XA_STATE_ORDER(xas, xa, index, new_order);
 | 
						|
	unsigned int i, found;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	xa_store_order(xa, index, order, xa, GFP_KERNEL);
 | 
						|
	xa_set_mark(xa, index, XA_MARK_1);
 | 
						|
 | 
						|
	xas_split_alloc(&xas, xa, order, GFP_KERNEL);
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_split(&xas, xa, order);
 | 
						|
	for (i = 0; i < (1 << order); i += (1 << new_order))
 | 
						|
		__xa_store(xa, index + i, xa_mk_index(index + i), 0);
 | 
						|
	xas_unlock(&xas);
 | 
						|
 | 
						|
	for (i = 0; i < (1 << order); i++) {
 | 
						|
		unsigned int val = index + (i & ~((1 << new_order) - 1));
 | 
						|
		XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
 | 
						|
	}
 | 
						|
 | 
						|
	xa_set_mark(xa, index, XA_MARK_0);
 | 
						|
	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
 | 
						|
 | 
						|
	xas_set_order(&xas, index, 0);
 | 
						|
	found = 0;
 | 
						|
	rcu_read_lock();
 | 
						|
	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
 | 
						|
		found++;
 | 
						|
		XA_BUG_ON(xa, xa_is_internal(entry));
 | 
						|
	}
 | 
						|
	rcu_read_unlock();
 | 
						|
	XA_BUG_ON(xa, found != 1 << (order - new_order));
 | 
						|
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_split(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned int order, new_order;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
 | 
						|
		for (new_order = 0; new_order < order; new_order++) {
 | 
						|
			check_split_1(xa, 0, order, new_order);
 | 
						|
			check_split_1(xa, 1UL << order, order, new_order);
 | 
						|
			check_split_1(xa, 3UL << order, order, new_order);
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
#else
 | 
						|
static void check_split(struct xarray *xa) { }
 | 
						|
#endif
 | 
						|
 | 
						|
static void check_align_1(struct xarray *xa, char *name)
 | 
						|
{
 | 
						|
	int i;
 | 
						|
	unsigned int id;
 | 
						|
	unsigned long index;
 | 
						|
	void *entry;
 | 
						|
 | 
						|
	for (i = 0; i < 8; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
 | 
						|
					GFP_KERNEL) != 0);
 | 
						|
		XA_BUG_ON(xa, id != i);
 | 
						|
	}
 | 
						|
	xa_for_each(xa, index, entry)
 | 
						|
		XA_BUG_ON(xa, xa_is_err(entry));
 | 
						|
	xa_destroy(xa);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * We should always be able to store without allocating memory after
 | 
						|
 * reserving a slot.
 | 
						|
 */
 | 
						|
static void check_align_2(struct xarray *xa, char *name)
 | 
						|
{
 | 
						|
	int i;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	for (i = 0; i < 8; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
 | 
						|
		xa_erase(xa, 0);
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = 0; i < 8; i++) {
 | 
						|
		XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
 | 
						|
		XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
 | 
						|
		xa_erase(xa, 0);
 | 
						|
	}
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_align(struct xarray *xa)
 | 
						|
{
 | 
						|
	char name[] = "Motorola 68000";
 | 
						|
 | 
						|
	check_align_1(xa, name);
 | 
						|
	check_align_1(xa, name + 1);
 | 
						|
	check_align_1(xa, name + 2);
 | 
						|
	check_align_1(xa, name + 3);
 | 
						|
	check_align_2(xa, name);
 | 
						|
}
 | 
						|
 | 
						|
static LIST_HEAD(shadow_nodes);
 | 
						|
 | 
						|
static void test_update_node(struct xa_node *node)
 | 
						|
{
 | 
						|
	if (node->count && node->count == node->nr_values) {
 | 
						|
		if (list_empty(&node->private_list))
 | 
						|
			list_add(&shadow_nodes, &node->private_list);
 | 
						|
	} else {
 | 
						|
		if (!list_empty(&node->private_list))
 | 
						|
			list_del_init(&node->private_list);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static noinline void shadow_remove(struct xarray *xa)
 | 
						|
{
 | 
						|
	struct xa_node *node;
 | 
						|
 | 
						|
	xa_lock(xa);
 | 
						|
	while ((node = list_first_entry_or_null(&shadow_nodes,
 | 
						|
					struct xa_node, private_list))) {
 | 
						|
		XA_BUG_ON(xa, node->array != xa);
 | 
						|
		list_del_init(&node->private_list);
 | 
						|
		xa_delete_node(node, test_update_node);
 | 
						|
	}
 | 
						|
	xa_unlock(xa);
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_workingset(struct xarray *xa, unsigned long index)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, index);
 | 
						|
	xas_set_update(&xas, test_update_node);
 | 
						|
 | 
						|
	do {
 | 
						|
		xas_lock(&xas);
 | 
						|
		xas_store(&xas, xa_mk_value(0));
 | 
						|
		xas_next(&xas);
 | 
						|
		xas_store(&xas, xa_mk_value(1));
 | 
						|
		xas_unlock(&xas);
 | 
						|
	} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
	XA_BUG_ON(xa, list_empty(&shadow_nodes));
 | 
						|
 | 
						|
	xas_lock(&xas);
 | 
						|
	xas_next(&xas);
 | 
						|
	xas_store(&xas, &xas);
 | 
						|
	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
 | 
						|
 | 
						|
	xas_store(&xas, xa_mk_value(2));
 | 
						|
	xas_unlock(&xas);
 | 
						|
	XA_BUG_ON(xa, list_empty(&shadow_nodes));
 | 
						|
 | 
						|
	shadow_remove(xa);
 | 
						|
	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Check that the pointer / value / sibling entries are accounted the
 | 
						|
 * way we expect them to be.
 | 
						|
 */
 | 
						|
static noinline void check_account(struct xarray *xa)
 | 
						|
{
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	unsigned int order;
 | 
						|
 | 
						|
	for (order = 1; order < 12; order++) {
 | 
						|
		XA_STATE(xas, xa, 1 << order);
 | 
						|
 | 
						|
		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
 | 
						|
		rcu_read_lock();
 | 
						|
		xas_load(&xas);
 | 
						|
		XA_BUG_ON(xa, xas.xa_node->count == 0);
 | 
						|
		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
 | 
						|
		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
 | 
						|
		rcu_read_unlock();
 | 
						|
 | 
						|
		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
 | 
						|
				GFP_KERNEL);
 | 
						|
		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
 | 
						|
 | 
						|
		xa_erase(xa, 1 << order);
 | 
						|
		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
 | 
						|
 | 
						|
		xa_erase(xa, 0);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	}
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_get_order(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
 | 
						|
	unsigned int order;
 | 
						|
	unsigned long i, j;
 | 
						|
 | 
						|
	for (i = 0; i < 3; i++)
 | 
						|
		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
 | 
						|
 | 
						|
	for (order = 0; order < max_order; order++) {
 | 
						|
		for (i = 0; i < 10; i++) {
 | 
						|
			xa_store_order(xa, i << order, order,
 | 
						|
					xa_mk_index(i << order), GFP_KERNEL);
 | 
						|
			for (j = i << order; j < (i + 1) << order; j++)
 | 
						|
				XA_BUG_ON(xa, xa_get_order(xa, j) != order);
 | 
						|
			xa_erase(xa, i << order);
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xas_get_order(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
 | 
						|
	unsigned int order;
 | 
						|
	unsigned long i, j;
 | 
						|
 | 
						|
	for (order = 0; order < max_order; order++) {
 | 
						|
		for (i = 0; i < 10; i++) {
 | 
						|
			xas_set_order(&xas, i << order, order);
 | 
						|
			do {
 | 
						|
				xas_lock(&xas);
 | 
						|
				xas_store(&xas, xa_mk_value(i));
 | 
						|
				xas_unlock(&xas);
 | 
						|
			} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
			for (j = i << order; j < (i + 1) << order; j++) {
 | 
						|
				xas_set_order(&xas, j, 0);
 | 
						|
				rcu_read_lock();
 | 
						|
				xas_load(&xas);
 | 
						|
				XA_BUG_ON(xa, xas_get_order(&xas) != order);
 | 
						|
				rcu_read_unlock();
 | 
						|
			}
 | 
						|
 | 
						|
			xas_lock(&xas);
 | 
						|
			xas_set_order(&xas, i << order, order);
 | 
						|
			xas_store(&xas, NULL);
 | 
						|
			xas_unlock(&xas);
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static noinline void check_xas_conflict_get_order(struct xarray *xa)
 | 
						|
{
 | 
						|
	XA_STATE(xas, xa, 0);
 | 
						|
 | 
						|
	void *entry;
 | 
						|
	int only_once;
 | 
						|
	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
 | 
						|
	unsigned int order;
 | 
						|
	unsigned long i, j, k;
 | 
						|
 | 
						|
	for (order = 0; order < max_order; order++) {
 | 
						|
		for (i = 0; i < 10; i++) {
 | 
						|
			xas_set_order(&xas, i << order, order);
 | 
						|
			do {
 | 
						|
				xas_lock(&xas);
 | 
						|
				xas_store(&xas, xa_mk_value(i));
 | 
						|
				xas_unlock(&xas);
 | 
						|
			} while (xas_nomem(&xas, GFP_KERNEL));
 | 
						|
 | 
						|
			/*
 | 
						|
			 * Ensure xas_get_order works with xas_for_each_conflict.
 | 
						|
			 */
 | 
						|
			j = i << order;
 | 
						|
			for (k = 0; k < order; k++) {
 | 
						|
				only_once = 0;
 | 
						|
				xas_set_order(&xas, j + (1 << k), k);
 | 
						|
				xas_lock(&xas);
 | 
						|
				xas_for_each_conflict(&xas, entry) {
 | 
						|
					XA_BUG_ON(xa, entry != xa_mk_value(i));
 | 
						|
					XA_BUG_ON(xa, xas_get_order(&xas) != order);
 | 
						|
					only_once++;
 | 
						|
				}
 | 
						|
				XA_BUG_ON(xa, only_once != 1);
 | 
						|
				xas_unlock(&xas);
 | 
						|
			}
 | 
						|
 | 
						|
			if (order < max_order - 1) {
 | 
						|
				only_once = 0;
 | 
						|
				xas_set_order(&xas, (i & ~1UL) << order, order + 1);
 | 
						|
				xas_lock(&xas);
 | 
						|
				xas_for_each_conflict(&xas, entry) {
 | 
						|
					XA_BUG_ON(xa, entry != xa_mk_value(i));
 | 
						|
					XA_BUG_ON(xa, xas_get_order(&xas) != order);
 | 
						|
					only_once++;
 | 
						|
				}
 | 
						|
				XA_BUG_ON(xa, only_once != 1);
 | 
						|
				xas_unlock(&xas);
 | 
						|
			}
 | 
						|
 | 
						|
			xas_set_order(&xas, i << order, order);
 | 
						|
			xas_lock(&xas);
 | 
						|
			xas_store(&xas, NULL);
 | 
						|
			xas_unlock(&xas);
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
static noinline void check_destroy(struct xarray *xa)
 | 
						|
{
 | 
						|
	unsigned long index;
 | 
						|
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* Destroying an empty array is a no-op */
 | 
						|
	xa_destroy(xa);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
	/* Destroying an array with a single entry */
 | 
						|
	for (index = 0; index < 1000; index++) {
 | 
						|
		xa_store_index(xa, index, GFP_KERNEL);
 | 
						|
		XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
		xa_destroy(xa);
 | 
						|
		XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
	}
 | 
						|
 | 
						|
	/* Destroying an array with a single entry at ULONG_MAX */
 | 
						|
	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	xa_destroy(xa);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
 | 
						|
#ifdef CONFIG_XARRAY_MULTI
 | 
						|
	/* Destroying an array with a multi-index entry */
 | 
						|
	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
 | 
						|
	XA_BUG_ON(xa, xa_empty(xa));
 | 
						|
	xa_destroy(xa);
 | 
						|
	XA_BUG_ON(xa, !xa_empty(xa));
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static DEFINE_XARRAY(array);
 | 
						|
 | 
						|
static int xarray_checks(void)
 | 
						|
{
 | 
						|
	check_xa_err(&array);
 | 
						|
	check_xas_retry(&array);
 | 
						|
	check_xa_load(&array);
 | 
						|
	check_xa_mark(&array);
 | 
						|
	check_xa_shrink(&array);
 | 
						|
	check_xas_erase(&array);
 | 
						|
	check_insert(&array);
 | 
						|
	check_cmpxchg(&array);
 | 
						|
	check_cmpxchg_order(&array);
 | 
						|
	check_reserve(&array);
 | 
						|
	check_reserve(&xa0);
 | 
						|
	check_multi_store(&array);
 | 
						|
	check_multi_store_advanced(&array);
 | 
						|
	check_get_order(&array);
 | 
						|
	check_xas_get_order(&array);
 | 
						|
	check_xas_conflict_get_order(&array);
 | 
						|
	check_xa_alloc();
 | 
						|
	check_find(&array);
 | 
						|
	check_find_entry(&array);
 | 
						|
	check_pause(&array);
 | 
						|
	check_account(&array);
 | 
						|
	check_destroy(&array);
 | 
						|
	check_move(&array);
 | 
						|
	check_create_range(&array);
 | 
						|
	check_store_range(&array);
 | 
						|
	check_store_iter(&array);
 | 
						|
	check_align(&xa0);
 | 
						|
	check_split(&array);
 | 
						|
 | 
						|
	check_workingset(&array, 0);
 | 
						|
	check_workingset(&array, 64);
 | 
						|
	check_workingset(&array, 4096);
 | 
						|
 | 
						|
	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
 | 
						|
	return (tests_run == tests_passed) ? 0 : -EINVAL;
 | 
						|
}
 | 
						|
 | 
						|
static void xarray_exit(void)
 | 
						|
{
 | 
						|
}
 | 
						|
 | 
						|
module_init(xarray_checks);
 | 
						|
module_exit(xarray_exit);
 | 
						|
MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
 | 
						|
MODULE_LICENSE("GPL");
 |