VisionFive2 Linux kernel

StarFive Tech Linux Kernel for VisionFive (JH7110) boards (mirror)

More than 9999 Commits   32 Branches   54 Tags
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    1) // SPDX-License-Identifier: GPL-2.0+
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    2) /*
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    3)  * test_xarray.c: Test the XArray API
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    4)  * Copyright (c) 2017-2018 Microsoft Corporation
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500    5)  * Copyright (c) 2019-2020 Oracle
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    6)  * Author: Matthew Wilcox <willy@infradead.org>
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    7)  */
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    8) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500    9) #include <linux/xarray.h>
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   10) #include <linux/module.h>
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   11) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   12) static unsigned int tests_run;
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   13) static unsigned int tests_passed;
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   14) 
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500   15) static const unsigned int order_limit =
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500   16) 		IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500   17) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   18) #ifndef XA_DEBUG
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   19) # ifdef __KERNEL__
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   20) void xa_dump(const struct xarray *xa) { }
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   21) # endif
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   22) #undef XA_BUG_ON
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   23) #define XA_BUG_ON(xa, x) do {					\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   24) 	tests_run++;						\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   25) 	if (x) {						\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   26) 		printk("BUG at %s:%d\n", __func__, __LINE__);	\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   27) 		xa_dump(xa);					\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   28) 		dump_stack();					\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   29) 	} else {						\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   30) 		tests_passed++;					\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   31) 	}							\
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   32) } while (0)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   33) #endif
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   34) 
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   35) static void *xa_mk_index(unsigned long index)
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   36) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   37) 	return xa_mk_value(index & LONG_MAX);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   38) }
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   39) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   40) static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   41) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   42) 	return xa_store(xa, index, xa_mk_index(index), gfp);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   43) }
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   44) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500   45) static void xa_insert_index(struct xarray *xa, unsigned long index)
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500   46) {
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500   47) 	XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500   48) 				GFP_KERNEL) != 0);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500   49) }
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500   50) 
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   51) static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   52) {
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500   53) 	u32 id;
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   54) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500   55) 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   56) 				gfp) != 0);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   57) 	XA_BUG_ON(xa, id != index);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   58) }
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400   59) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   60) static void xa_erase_index(struct xarray *xa, unsigned long index)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500   61) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500   62) 	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   63) 	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   64) }
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   65) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   66) /*
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   67)  * If anyone needs this, please move it to xarray.c.  We have no current
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   68)  * users outside the test suite because all current multislot users want
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   69)  * to use the advanced API.
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   70)  */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   71) static void *xa_store_order(struct xarray *xa, unsigned long index,
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   72) 		unsigned order, void *entry, gfp_t gfp)
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   73) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   74) 	XA_STATE_ORDER(xas, xa, index, order);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   75) 	void *curr;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   76) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   77) 	do {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   78) 		xas_lock(&xas);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   79) 		curr = xas_store(&xas, entry);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   80) 		xas_unlock(&xas);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   81) 	} while (xas_nomem(&xas, gfp));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   82) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   83) 	return curr;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   84) }
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   85) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   86) static noinline void check_xa_err(struct xarray *xa)
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   87) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   88) 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   89) 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   90) #ifndef __KERNEL__
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   91) 	/* The kernel does not fail GFP_NOWAIT allocations */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   92) 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   93) 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   94) #endif
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   95) 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   96) 	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   97) 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   98) // kills the test-suite :-(
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500   99) //	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  100) }
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  101) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  102) static noinline void check_xas_retry(struct xarray *xa)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  103) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  104) 	XA_STATE(xas, xa, 0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  105) 	void *entry;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  106) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  107) 	xa_store_index(xa, 0, GFP_KERNEL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  108) 	xa_store_index(xa, 1, GFP_KERNEL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  109) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  110) 	rcu_read_lock();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  111) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  112) 	xa_erase_index(xa, 1);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  113) 	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  114) 	XA_BUG_ON(xa, xas_retry(&xas, NULL));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  115) 	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  116) 	xas_reset(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  117) 	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  118) 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  119) 	XA_BUG_ON(xa, xas.xa_node != NULL);
bd54211b8e199 (Matthew Wilcox          2019-02-04 23:12:08 -0500  120) 	rcu_read_unlock();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  121) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  122) 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
bd54211b8e199 (Matthew Wilcox          2019-02-04 23:12:08 -0500  123) 
bd54211b8e199 (Matthew Wilcox          2019-02-04 23:12:08 -0500  124) 	rcu_read_lock();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  125) 	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  126) 	xas.xa_node = XAS_RESTART;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  127) 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  128) 	rcu_read_unlock();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  129) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  130) 	/* Make sure we can iterate through retry entries */
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  131) 	xas_lock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  132) 	xas_set(&xas, 0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  133) 	xas_store(&xas, XA_RETRY_ENTRY);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  134) 	xas_set(&xas, 1);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  135) 	xas_store(&xas, XA_RETRY_ENTRY);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  136) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  137) 	xas_set(&xas, 0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  138) 	xas_for_each(&xas, entry, ULONG_MAX) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  139) 		xas_store(&xas, xa_mk_index(xas.xa_index));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  140) 	}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  141) 	xas_unlock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  142) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  143) 	xa_erase_index(xa, 0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  144) 	xa_erase_index(xa, 1);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  145) }
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  146) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  147) static noinline void check_xa_load(struct xarray *xa)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  148) {
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  149) 	unsigned long i, j;
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  150) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  151) 	for (i = 0; i < 1024; i++) {
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  152) 		for (j = 0; j < 1024; j++) {
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  153) 			void *entry = xa_load(xa, j);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  154) 			if (j < i)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  155) 				XA_BUG_ON(xa, xa_to_value(entry) != j);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  156) 			else
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  157) 				XA_BUG_ON(xa, entry);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  158) 		}
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  159) 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  160) 	}
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  161) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  162) 	for (i = 0; i < 1024; i++) {
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  163) 		for (j = 0; j < 1024; j++) {
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  164) 			void *entry = xa_load(xa, j);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  165) 			if (j >= i)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  166) 				XA_BUG_ON(xa, xa_to_value(entry) != j);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  167) 			else
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  168) 				XA_BUG_ON(xa, entry);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  169) 		}
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  170) 		xa_erase_index(xa, i);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  171) 	}
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  172) 	XA_BUG_ON(xa, !xa_empty(xa));
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  173) }
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500  174) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  175) static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  176) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  177) 	unsigned int order;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  178) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  179) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  180) 	/* NULL elements have no marks set */
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  181) 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  182) 	xa_set_mark(xa, index, XA_MARK_0);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  183) 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  184) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  185) 	/* Storing a pointer will not make a mark appear */
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  186) 	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  187) 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  188) 	xa_set_mark(xa, index, XA_MARK_0);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  189) 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  190) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  191) 	/* Setting one mark will not set another mark */
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  192) 	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  193) 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  194) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  195) 	/* Storing NULL clears marks, and they can't be set again */
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  196) 	xa_erase_index(xa, index);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  197) 	XA_BUG_ON(xa, !xa_empty(xa));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  198) 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  199) 	xa_set_mark(xa, index, XA_MARK_0);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  200) 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  201) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  202) 	/*
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  203) 	 * Storing a multi-index entry over entries with marks gives the
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  204) 	 * entire entry the union of the marks
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  205) 	 */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  206) 	BUG_ON((index % 4) != 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  207) 	for (order = 2; order < max_order; order++) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  208) 		unsigned long base = round_down(index, 1UL << order);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  209) 		unsigned long next = base + (1UL << order);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  210) 		unsigned long i;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  211) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  212) 		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  213) 		xa_set_mark(xa, index + 1, XA_MARK_0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  214) 		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
d69d287a9002b (Matthew Wilcox          2019-01-14 13:57:31 -0500  215) 		xa_set_mark(xa, index + 2, XA_MARK_2);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  216) 		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  217) 		xa_store_order(xa, index, order, xa_mk_index(index),
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  218) 				GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  219) 		for (i = base; i < next; i++) {
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  220) 			XA_STATE(xas, xa, i);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  221) 			unsigned int seen = 0;
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  222) 			void *entry;
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  223) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  224) 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
d69d287a9002b (Matthew Wilcox          2019-01-14 13:57:31 -0500  225) 			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
d69d287a9002b (Matthew Wilcox          2019-01-14 13:57:31 -0500  226) 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  227) 
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  228) 			/* We should see two elements in the array */
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  229) 			rcu_read_lock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  230) 			xas_for_each(&xas, entry, ULONG_MAX)
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  231) 				seen++;
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  232) 			rcu_read_unlock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  233) 			XA_BUG_ON(xa, seen != 2);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  234) 
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  235) 			/* One of which is marked */
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  236) 			xas_set(&xas, 0);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  237) 			seen = 0;
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  238) 			rcu_read_lock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  239) 			xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  240) 				seen++;
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  241) 			rcu_read_unlock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  242) 			XA_BUG_ON(xa, seen != 1);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  243) 		}
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  244) 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  245) 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  246) 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  247) 		xa_erase_index(xa, index);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  248) 		xa_erase_index(xa, next);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  249) 		XA_BUG_ON(xa, !xa_empty(xa));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  250) 	}
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  251) 	XA_BUG_ON(xa, !xa_empty(xa));
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  252) }
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  253) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  254) static noinline void check_xa_mark_2(struct xarray *xa)
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  255) {
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  256) 	XA_STATE(xas, xa, 0);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  257) 	unsigned long index;
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  258) 	unsigned int count = 0;
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  259) 	void *entry;
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  260) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  261) 	xa_store_index(xa, 0, GFP_KERNEL);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  262) 	xa_set_mark(xa, 0, XA_MARK_0);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  263) 	xas_lock(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  264) 	xas_load(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  265) 	xas_init_marks(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  266) 	xas_unlock(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  267) 	XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  268) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  269) 	for (index = 3500; index < 4500; index++) {
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  270) 		xa_store_index(xa, index, GFP_KERNEL);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  271) 		xa_set_mark(xa, index, XA_MARK_0);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  272) 	}
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  273) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  274) 	xas_reset(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  275) 	rcu_read_lock();
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  276) 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  277) 		count++;
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  278) 	rcu_read_unlock();
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  279) 	XA_BUG_ON(xa, count != 1000);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  280) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  281) 	xas_lock(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  282) 	xas_for_each(&xas, entry, ULONG_MAX) {
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  283) 		xas_init_marks(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  284) 		XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  285) 		XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  286) 	}
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  287) 	xas_unlock(&xas);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  288) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  289) 	xa_destroy(xa);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  290) }
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  291) 
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  292) static noinline void check_xa_mark_3(struct xarray *xa)
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  293) {
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  294) #ifdef CONFIG_XARRAY_MULTI
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  295) 	XA_STATE(xas, xa, 0x41);
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  296) 	void *entry;
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  297) 	int count = 0;
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  298) 
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  299) 	xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  300) 	xa_set_mark(xa, 0x41, XA_MARK_0);
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  301) 
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  302) 	rcu_read_lock();
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  303) 	xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  304) 		count++;
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  305) 		XA_BUG_ON(xa, entry != xa_mk_index(0x40));
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  306) 	}
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  307) 	XA_BUG_ON(xa, count != 1);
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  308) 	rcu_read_unlock();
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  309) 	xa_destroy(xa);
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  310) #endif
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  311) }
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  312) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  313) static noinline void check_xa_mark(struct xarray *xa)
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  314) {
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  315) 	unsigned long index;
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  316) 
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  317) 	for (index = 0; index < 16384; index += 4)
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  318) 		check_xa_mark_1(xa, index);
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  319) 
adb9d9c4ccb1f (Matthew Wilcox          2018-04-09 16:52:21 -0400  320) 	check_xa_mark_2(xa);
04e9e9bb8470b (Matthew Wilcox (Oracle) 2020-06-14 21:52:04 -0400  321) 	check_xa_mark_3(xa);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  322) }
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500  323) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  324) static noinline void check_xa_shrink(struct xarray *xa)
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  325) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  326) 	XA_STATE(xas, xa, 1);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  327) 	struct xa_node *node;
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  328) 	unsigned int order;
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  329) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  330) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  331) 	XA_BUG_ON(xa, !xa_empty(xa));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  332) 	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  333) 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  334) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  335) 	/*
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  336) 	 * Check that erasing the entry at 1 shrinks the tree and properly
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  337) 	 * marks the node as being deleted.
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  338) 	 */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  339) 	xas_lock(&xas);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  340) 	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  341) 	node = xas.xa_node;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  342) 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  343) 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  344) 	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  345) 	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  346) 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  347) 	XA_BUG_ON(xa, xas_load(&xas) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  348) 	xas_unlock(&xas);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  349) 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  350) 	xa_erase_index(xa, 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  351) 	XA_BUG_ON(xa, !xa_empty(xa));
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  352) 
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  353) 	for (order = 0; order < max_order; order++) {
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  354) 		unsigned long max = (1UL << order) - 1;
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  355) 		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  356) 		XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  357) 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  358) 		rcu_read_lock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  359) 		node = xa_head(xa);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  360) 		rcu_read_unlock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  361) 		XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  362) 				NULL);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  363) 		rcu_read_lock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  364) 		XA_BUG_ON(xa, xa_head(xa) == node);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  365) 		rcu_read_unlock();
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  366) 		XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  367) 		xa_erase_index(xa, ULONG_MAX);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  368) 		XA_BUG_ON(xa, xa->xa_head != node);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  369) 		xa_erase_index(xa, 0);
93eb07f72c8d8 (Matthew Wilcox          2018-09-08 12:09:52 -0400  370) 	}
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  371) }
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  372) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  373) static noinline void check_insert(struct xarray *xa)
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  374) {
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  375) 	unsigned long i;
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  376) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  377) 	for (i = 0; i < 1024; i++) {
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  378) 		xa_insert_index(xa, i);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  379) 		XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  380) 		XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  381) 		xa_erase_index(xa, i);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  382) 	}
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  383) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  384) 	for (i = 10; i < BITS_PER_LONG; i++) {
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  385) 		xa_insert_index(xa, 1UL << i);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  386) 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  387) 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  388) 		xa_erase_index(xa, 1UL << i);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  389) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  390) 		xa_insert_index(xa, (1UL << i) - 1);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  391) 		XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  392) 		XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  393) 		xa_erase_index(xa, (1UL << i) - 1);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  394) 	}
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  395) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  396) 	xa_insert_index(xa, ~0UL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  397) 	XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  398) 	XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  399) 	xa_erase_index(xa, ~0UL);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  400) 
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  401) 	XA_BUG_ON(xa, !xa_empty(xa));
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  402) }
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500  403) 
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  404) static noinline void check_cmpxchg(struct xarray *xa)
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  405) {
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  406) 	void *FIVE = xa_mk_value(5);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  407) 	void *SIX = xa_mk_value(6);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  408) 	void *LOTS = xa_mk_value(12345678);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  409) 
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  410) 	XA_BUG_ON(xa, !xa_empty(xa));
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  411) 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
fd9dc93e36231 (Matthew Wilcox          2019-02-06 13:07:11 -0500  412) 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  413) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  414) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  415) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  416) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  417) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
062b735912b9f (Matthew Wilcox (Oracle) 2020-03-31 14:23:59 -0400  418) 	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
062b735912b9f (Matthew Wilcox (Oracle) 2020-03-31 14:23:59 -0400  419) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
062b735912b9f (Matthew Wilcox (Oracle) 2020-03-31 14:23:59 -0400  420) 	XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  421) 	xa_erase_index(xa, 12345678);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  422) 	xa_erase_index(xa, 5);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  423) 	XA_BUG_ON(xa, !xa_empty(xa));
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  424) }
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500  425) 
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  426) static noinline void check_reserve(struct xarray *xa)
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  427) {
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  428) 	void *entry;
4a31896c5b5a2 (Matthew Wilcox          2018-12-17 14:45:36 -0500  429) 	unsigned long index;
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  430) 	int count;
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  431) 
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  432) 	/* An array with a reserved entry is not empty */
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  433) 	XA_BUG_ON(xa, !xa_empty(xa));
f818b82b80164 (Matthew Wilcox          2019-02-08 14:02:45 -0500  434) 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  435) 	XA_BUG_ON(xa, xa_empty(xa));
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  436) 	XA_BUG_ON(xa, xa_load(xa, 12345678));
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  437) 	xa_release(xa, 12345678);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  438) 	XA_BUG_ON(xa, !xa_empty(xa));
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  439) 
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  440) 	/* Releasing a used entry does nothing */
f818b82b80164 (Matthew Wilcox          2019-02-08 14:02:45 -0500  441) 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  442) 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  443) 	xa_release(xa, 12345678);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  444) 	xa_erase_index(xa, 12345678);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  445) 	XA_BUG_ON(xa, !xa_empty(xa));
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  446) 
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  447) 	/* cmpxchg sees a reserved entry as ZERO */
f818b82b80164 (Matthew Wilcox          2019-02-08 14:02:45 -0500  448) 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  449) 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  450) 				xa_mk_value(12345678), GFP_NOWAIT) != NULL);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  451) 	xa_release(xa, 12345678);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  452) 	xa_erase_index(xa, 12345678);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  453) 	XA_BUG_ON(xa, !xa_empty(xa));
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  454) 
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  455) 	/* xa_insert treats it as busy */
f818b82b80164 (Matthew Wilcox          2019-02-08 14:02:45 -0500  456) 	XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
b0606fed6eece (Matthew Wilcox          2019-01-02 13:57:03 -0500  457) 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
fd9dc93e36231 (Matthew Wilcox          2019-02-06 13:07:11 -0500  458) 			-EBUSY);
b0606fed6eece (Matthew Wilcox          2019-01-02 13:57:03 -0500  459) 	XA_BUG_ON(xa, xa_empty(xa));
b0606fed6eece (Matthew Wilcox          2019-01-02 13:57:03 -0500  460) 	XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
4c0608f4a0e76 (Matthew Wilcox          2018-10-30 09:45:55 -0400  461) 	XA_BUG_ON(xa, !xa_empty(xa));
4c0608f4a0e76 (Matthew Wilcox          2018-10-30 09:45:55 -0400  462) 
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  463) 	/* Can iterate through a reserved entry */
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  464) 	xa_store_index(xa, 5, GFP_KERNEL);
f818b82b80164 (Matthew Wilcox          2019-02-08 14:02:45 -0500  465) 	XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  466) 	xa_store_index(xa, 7, GFP_KERNEL);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  467) 
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  468) 	count = 0;
4a31896c5b5a2 (Matthew Wilcox          2018-12-17 14:45:36 -0500  469) 	xa_for_each(xa, index, entry) {
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  470) 		XA_BUG_ON(xa, index != 5 && index != 7);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  471) 		count++;
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  472) 	}
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  473) 	XA_BUG_ON(xa, count != 2);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  474) 
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  475) 	/* If we free a reserved entry, we should be able to allocate it */
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  476) 	if (xa->xa_flags & XA_FLAGS_ALLOC) {
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  477) 		u32 id;
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  478) 
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  479) 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  480) 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  481) 		XA_BUG_ON(xa, id != 8);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  482) 
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  483) 		xa_release(xa, 6);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  484) 		XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  485) 					XA_LIMIT(5, 10), GFP_KERNEL) != 0);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  486) 		XA_BUG_ON(xa, id != 6);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  487) 	}
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500  488) 
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  489) 	xa_destroy(xa);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  490) }
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400  491) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  492) static noinline void check_xas_erase(struct xarray *xa)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  493) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  494) 	XA_STATE(xas, xa, 0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  495) 	void *entry;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  496) 	unsigned long i, j;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  497) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  498) 	for (i = 0; i < 200; i++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  499) 		for (j = i; j < 2 * i + 17; j++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  500) 			xas_set(&xas, j);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  501) 			do {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  502) 				xas_lock(&xas);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  503) 				xas_store(&xas, xa_mk_index(j));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  504) 				xas_unlock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  505) 			} while (xas_nomem(&xas, GFP_KERNEL));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  506) 		}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  507) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  508) 		xas_set(&xas, ULONG_MAX);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  509) 		do {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  510) 			xas_lock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  511) 			xas_store(&xas, xa_mk_value(0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  512) 			xas_unlock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  513) 		} while (xas_nomem(&xas, GFP_KERNEL));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  514) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  515) 		xas_lock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  516) 		xas_store(&xas, NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  517) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  518) 		xas_set(&xas, 0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  519) 		j = i;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  520) 		xas_for_each(&xas, entry, ULONG_MAX) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  521) 			XA_BUG_ON(xa, entry != xa_mk_index(j));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  522) 			xas_store(&xas, NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  523) 			j++;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  524) 		}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  525) 		xas_unlock(&xas);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  526) 		XA_BUG_ON(xa, !xa_empty(xa));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  527) 	}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  528) }
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  529) 
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  530) #ifdef CONFIG_XARRAY_MULTI
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  531) static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  532) 		unsigned int order)
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  533) {
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  534) 	XA_STATE(xas, xa, index);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  535) 	unsigned long min = index & ~((1UL << order) - 1);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  536) 	unsigned long max = min + (1UL << order);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  537) 
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  538) 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  539) 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  540) 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  541) 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  542) 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  543) 
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  544) 	xas_lock(&xas);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  545) 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  546) 	xas_unlock(&xas);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  547) 	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  548) 	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  549) 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  550) 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  551) 
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  552) 	xa_erase_index(xa, min);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  553) 	XA_BUG_ON(xa, !xa_empty(xa));
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  554) }
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  555) 
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  556) static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  557) 		unsigned int order)
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  558) {
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  559) 	XA_STATE(xas, xa, index);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  560) 	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  561) 
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  562) 	xas_lock(&xas);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  563) 	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  564) 	XA_BUG_ON(xa, xas.xa_index != index);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  565) 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500  566) 	xas_unlock(&xas);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  567) 	XA_BUG_ON(xa, !xa_empty(xa));
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  568) }
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  569) 
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  570) static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  571) 		unsigned int order)
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  572) {
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  573) 	XA_STATE(xas, xa, 0);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  574) 	void *entry;
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  575) 	int n = 0;
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  576) 
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  577) 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  578) 
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  579) 	xas_lock(&xas);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  580) 	xas_for_each(&xas, entry, ULONG_MAX) {
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  581) 		XA_BUG_ON(xa, entry != xa_mk_index(index));
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  582) 		n++;
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  583) 	}
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  584) 	XA_BUG_ON(xa, n != 1);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  585) 	xas_set(&xas, index + 1);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  586) 	xas_for_each(&xas, entry, ULONG_MAX) {
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  587) 		XA_BUG_ON(xa, entry != xa_mk_index(index));
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  588) 		n++;
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  589) 	}
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  590) 	XA_BUG_ON(xa, n != 2);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  591) 	xas_unlock(&xas);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  592) 
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  593) 	xa_destroy(xa);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  594) }
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  595) #endif
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  596) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  597) static noinline void check_multi_store(struct xarray *xa)
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  598) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  599) #ifdef CONFIG_XARRAY_MULTI
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  600) 	unsigned long i, j, k;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  601) 	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  602) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  603) 	/* Loading from any position returns the same value */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  604) 	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  605) 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  606) 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  607) 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  608) 	rcu_read_lock();
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  609) 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  610) 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  611) 	rcu_read_unlock();
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  612) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  613) 	/* Storing adjacent to the value does not alter the value */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  614) 	xa_store(xa, 3, xa, GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  615) 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  616) 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  617) 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  618) 	rcu_read_lock();
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  619) 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  620) 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  621) 	rcu_read_unlock();
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  622) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  623) 	/* Overwriting multiple indexes works */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  624) 	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  625) 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  626) 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  627) 	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  628) 	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  629) 	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  630) 	rcu_read_lock();
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  631) 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  632) 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  633) 	rcu_read_unlock();
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  634) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  635) 	/* We can erase multiple values with a single store */
5404a7f1c21cf (Matthew Wilcox          2018-11-05 09:34:04 -0500  636) 	xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  637) 	XA_BUG_ON(xa, !xa_empty(xa));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  638) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  639) 	/* Even when the first slot is empty but the others aren't */
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  640) 	xa_store_index(xa, 1, GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  641) 	xa_store_index(xa, 2, GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  642) 	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  643) 	XA_BUG_ON(xa, !xa_empty(xa));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  644) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  645) 	for (i = 0; i < max_order; i++) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  646) 		for (j = 0; j < max_order; j++) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  647) 			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  648) 			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  649) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  650) 			for (k = 0; k < max_order; k++) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  651) 				void *entry = xa_load(xa, (1UL << k) - 1);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  652) 				if ((i < k) && (j < k))
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  653) 					XA_BUG_ON(xa, entry != NULL);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  654) 				else
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  655) 					XA_BUG_ON(xa, entry != xa_mk_index(j));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  656) 			}
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  657) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  658) 			xa_erase(xa, 0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  659) 			XA_BUG_ON(xa, !xa_empty(xa));
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  660) 		}
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  661) 	}
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  662) 
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  663) 	for (i = 0; i < 20; i++) {
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  664) 		check_multi_store_1(xa, 200, i);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  665) 		check_multi_store_1(xa, 0, i);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  666) 		check_multi_store_1(xa, (1UL << i) + 1, i);
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  667) 	}
4f06d6302da68 (Matthew Wilcox          2018-09-09 01:52:17 -0400  668) 	check_multi_store_2(xa, 4095, 9);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  669) 
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  670) 	for (i = 1; i < 20; i++) {
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  671) 		check_multi_store_3(xa, 0, i);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  672) 		check_multi_store_3(xa, 1UL << i, i);
4f145cd66a1a7 (Matthew Wilcox          2018-11-29 16:04:35 -0500  673) 	}
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  674) #endif
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  675) }
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500  676) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  677) static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  678) {
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  679) 	int i;
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  680) 	u32 id;
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  681) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  682) 	XA_BUG_ON(xa, !xa_empty(xa));
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  683) 	/* An empty array should assign %base to the first alloc */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  684) 	xa_alloc_index(xa, base, GFP_KERNEL);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  685) 
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  686) 	/* Erasing it should make the array empty again */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  687) 	xa_erase_index(xa, base);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  688) 	XA_BUG_ON(xa, !xa_empty(xa));
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  689) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  690) 	/* And it should assign %base again */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  691) 	xa_alloc_index(xa, base, GFP_KERNEL);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  692) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  693) 	/* Allocating and then erasing a lot should not lose base */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  694) 	for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  695) 		xa_alloc_index(xa, i, GFP_KERNEL);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  696) 	for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  697) 		xa_erase_index(xa, i);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  698) 	xa_alloc_index(xa, base, GFP_KERNEL);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  699) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  700) 	/* Destroying the array should do the same as erasing */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  701) 	xa_destroy(xa);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  702) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  703) 	/* And it should assign %base again */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  704) 	xa_alloc_index(xa, base, GFP_KERNEL);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  705) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  706) 	/* The next assigned ID should be base+1 */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  707) 	xa_alloc_index(xa, base + 1, GFP_KERNEL);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  708) 	xa_erase_index(xa, base + 1);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  709) 
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  710) 	/* Storing a value should mark it used */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  711) 	xa_store_index(xa, base + 1, GFP_KERNEL);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  712) 	xa_alloc_index(xa, base + 2, GFP_KERNEL);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  713) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  714) 	/* If we then erase base, it should be free */
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  715) 	xa_erase_index(xa, base);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  716) 	xa_alloc_index(xa, base, GFP_KERNEL);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  717) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  718) 	xa_erase_index(xa, base + 1);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  719) 	xa_erase_index(xa, base + 2);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  720) 
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  721) 	for (i = 1; i < 5000; i++) {
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  722) 		xa_alloc_index(xa, base + i, GFP_KERNEL);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  723) 	}
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  724) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  725) 	xa_destroy(xa);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  726) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  727) 	/* Check that we fail properly at the limit of allocation */
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  728) 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  729) 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  730) 				GFP_KERNEL) != 0);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  731) 	XA_BUG_ON(xa, id != 0xfffffffeU);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  732) 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  733) 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  734) 				GFP_KERNEL) != 0);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  735) 	XA_BUG_ON(xa, id != 0xffffffffU);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  736) 	id = 3;
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  737) 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  738) 				XA_LIMIT(UINT_MAX - 1, UINT_MAX),
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  739) 				GFP_KERNEL) != -EBUSY);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  740) 	XA_BUG_ON(xa, id != 3);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  741) 	xa_destroy(xa);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500  742) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  743) 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  744) 				GFP_KERNEL) != -EBUSY);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  745) 	XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  746) 	XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  747) 				GFP_KERNEL) != -EBUSY);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  748) 	xa_erase_index(xa, 3);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  749) 	XA_BUG_ON(xa, !xa_empty(xa));
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  750) }
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  751) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  752) static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  753) {
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  754) 	unsigned int i, id;
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  755) 	unsigned long index;
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  756) 	void *entry;
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  757) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  758) 	/* Allocate and free a NULL and check xa_empty() behaves */
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  759) 	XA_BUG_ON(xa, !xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  760) 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  761) 	XA_BUG_ON(xa, id != base);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  762) 	XA_BUG_ON(xa, xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  763) 	XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  764) 	XA_BUG_ON(xa, !xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  765) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  766) 	/* Ditto, but check destroy instead of erase */
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  767) 	XA_BUG_ON(xa, !xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  768) 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  769) 	XA_BUG_ON(xa, id != base);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  770) 	XA_BUG_ON(xa, xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  771) 	xa_destroy(xa);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  772) 	XA_BUG_ON(xa, !xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  773) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  774) 	for (i = base; i < base + 10; i++) {
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  775) 		XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  776) 					GFP_KERNEL) != 0);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  777) 		XA_BUG_ON(xa, id != i);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  778) 	}
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  779) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  780) 	XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  781) 	XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  782) 	XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  783) 	XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  784) 	XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  785) 	XA_BUG_ON(xa, id != 5);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  786) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  787) 	xa_for_each(xa, index, entry) {
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  788) 		xa_erase_index(xa, index);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  789) 	}
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  790) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  791) 	for (i = base; i < base + 9; i++) {
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  792) 		XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  793) 		XA_BUG_ON(xa, xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  794) 	}
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  795) 	XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  796) 	XA_BUG_ON(xa, xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  797) 	XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  798) 	XA_BUG_ON(xa, !xa_empty(xa));
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  799) 
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  800) 	xa_destroy(xa);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  801) }
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  802) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  803) static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  804) {
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  805) 	struct xa_limit limit = XA_LIMIT(1, 0x3fff);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  806) 	u32 next = 0;
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  807) 	unsigned int i, id;
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  808) 	unsigned long index;
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  809) 	void *entry;
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  810) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  811) 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  812) 				&next, GFP_KERNEL) != 0);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  813) 	XA_BUG_ON(xa, id != 1);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  814) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  815) 	next = 0x3ffd;
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  816) 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  817) 				&next, GFP_KERNEL) != 0);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  818) 	XA_BUG_ON(xa, id != 0x3ffd);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  819) 	xa_erase_index(xa, 0x3ffd);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  820) 	xa_erase_index(xa, 1);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  821) 	XA_BUG_ON(xa, !xa_empty(xa));
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  822) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  823) 	for (i = 0x3ffe; i < 0x4003; i++) {
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  824) 		if (i < 0x4000)
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  825) 			entry = xa_mk_index(i);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  826) 		else
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  827) 			entry = xa_mk_index(i - 0x3fff);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  828) 		XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  829) 					&next, GFP_KERNEL) != (id == 1));
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  830) 		XA_BUG_ON(xa, xa_mk_index(id) != entry);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  831) 	}
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  832) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  833) 	/* Check wrap-around is handled correctly */
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  834) 	if (base != 0)
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  835) 		xa_erase_index(xa, base);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  836) 	xa_erase_index(xa, base + 1);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  837) 	next = UINT_MAX;
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  838) 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  839) 				xa_limit_32b, &next, GFP_KERNEL) != 0);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  840) 	XA_BUG_ON(xa, id != UINT_MAX);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  841) 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  842) 				xa_limit_32b, &next, GFP_KERNEL) != 1);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  843) 	XA_BUG_ON(xa, id != base);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  844) 	XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  845) 				xa_limit_32b, &next, GFP_KERNEL) != 0);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  846) 	XA_BUG_ON(xa, id != base + 1);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  847) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  848) 	xa_for_each(xa, index, entry)
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  849) 		xa_erase_index(xa, index);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  850) 
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  851) 	XA_BUG_ON(xa, !xa_empty(xa));
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  852) }
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  853) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  854) static DEFINE_XARRAY_ALLOC(xa0);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  855) static DEFINE_XARRAY_ALLOC1(xa1);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  856) 
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  857) static noinline void check_xa_alloc(void)
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  858) {
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  859) 	check_xa_alloc_1(&xa0, 0);
3ccaf57a6a63a (Matthew Wilcox          2018-10-26 14:43:22 -0400  860) 	check_xa_alloc_1(&xa1, 1);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  861) 	check_xa_alloc_2(&xa0, 0);
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500  862) 	check_xa_alloc_2(&xa1, 1);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  863) 	check_xa_alloc_3(&xa0, 0);
2fa044e51a1f3 (Matthew Wilcox          2018-11-06 14:13:35 -0500  864) 	check_xa_alloc_3(&xa1, 1);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  865) }
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400  866) 
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  867) static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  868) 			unsigned int order, unsigned int present)
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  869) {
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  870) 	XA_STATE_ORDER(xas, xa, start, order);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  871) 	void *entry;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  872) 	unsigned int count = 0;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  873) 
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  874) retry:
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  875) 	xas_lock(&xas);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  876) 	xas_for_each_conflict(&xas, entry) {
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  877) 		XA_BUG_ON(xa, !xa_is_value(entry));
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  878) 		XA_BUG_ON(xa, entry < xa_mk_index(start));
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  879) 		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  880) 		count++;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  881) 	}
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  882) 	xas_store(&xas, xa_mk_index(start));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  883) 	xas_unlock(&xas);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  884) 	if (xas_nomem(&xas, GFP_KERNEL)) {
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  885) 		count = 0;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  886) 		goto retry;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  887) 	}
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  888) 	XA_BUG_ON(xa, xas_error(&xas));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  889) 	XA_BUG_ON(xa, count != present);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  890) 	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  891) 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  892) 			xa_mk_index(start));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  893) 	xa_erase_index(xa, start);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  894) }
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  895) 
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  896) static noinline void check_store_iter(struct xarray *xa)
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  897) {
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  898) 	unsigned int i, j;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  899) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  900) 
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  901) 	for (i = 0; i < max_order; i++) {
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  902) 		unsigned int min = 1 << i;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  903) 		unsigned int max = (2 << i) - 1;
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  904) 		__check_store_iter(xa, 0, i, 0);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  905) 		XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  906) 		__check_store_iter(xa, min, i, 0);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  907) 		XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  908) 
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  909) 		xa_store_index(xa, min, GFP_KERNEL);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  910) 		__check_store_iter(xa, min, i, 1);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  911) 		XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  912) 		xa_store_index(xa, max, GFP_KERNEL);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  913) 		__check_store_iter(xa, min, i, 1);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  914) 		XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  915) 
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  916) 		for (j = 0; j < min; j++)
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  917) 			xa_store_index(xa, j, GFP_KERNEL);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  918) 		__check_store_iter(xa, 0, i, min);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  919) 		XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  920) 		for (j = 0; j < min; j++)
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  921) 			xa_store_index(xa, min + j, GFP_KERNEL);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  922) 		__check_store_iter(xa, min, i, min);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  923) 		XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  924) 	}
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  925) #ifdef CONFIG_XARRAY_MULTI
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  926) 	xa_store_index(xa, 63, GFP_KERNEL);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  927) 	xa_store_index(xa, 65, GFP_KERNEL);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  928) 	__check_store_iter(xa, 64, 2, 1);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  929) 	xa_erase_index(xa, 63);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  930) #endif
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  931) 	XA_BUG_ON(xa, !xa_empty(xa));
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  932) }
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400  933) 
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  934) static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  935) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  936) #ifdef CONFIG_XARRAY_MULTI
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  937) 	unsigned long multi = 3 << order;
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  938) 	unsigned long next = 4 << order;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  939) 	unsigned long index;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  940) 
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  941) 	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  942) 	XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500  943) 	XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  944) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  945) 	index = 0;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  946) 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  947) 			xa_mk_value(multi));
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  948) 	XA_BUG_ON(xa, index != multi);
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  949) 	index = multi + 1;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  950) 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  951) 			xa_mk_value(multi));
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  952) 	XA_BUG_ON(xa, (index < multi) || (index >= next));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  953) 	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  954) 			xa_mk_value(next));
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  955) 	XA_BUG_ON(xa, index != next);
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500  956) 	XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500  957) 	XA_BUG_ON(xa, index != next);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  958) 
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  959) 	xa_erase_index(xa, multi);
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500  960) 	xa_erase_index(xa, next);
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500  961) 	xa_erase_index(xa, next + 1);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  962) 	XA_BUG_ON(xa, !xa_empty(xa));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  963) #endif
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  964) }
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  965) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  966) static noinline void check_multi_find_2(struct xarray *xa)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  967) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  968) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  969) 	unsigned int i, j;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  970) 	void *entry;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  971) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  972) 	for (i = 0; i < max_order; i++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  973) 		unsigned long index = 1UL << i;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  974) 		for (j = 0; j < index; j++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  975) 			XA_STATE(xas, xa, j + index);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  976) 			xa_store_index(xa, index - 1, GFP_KERNEL);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500  977) 			xa_store_order(xa, index, i, xa_mk_index(index),
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  978) 					GFP_KERNEL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  979) 			rcu_read_lock();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  980) 			xas_for_each(&xas, entry, ULONG_MAX) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  981) 				xa_erase_index(xa, index);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  982) 			}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  983) 			rcu_read_unlock();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  984) 			xa_erase_index(xa, index - 1);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  985) 			XA_BUG_ON(xa, !xa_empty(xa));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  986) 		}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  987) 	}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  988) }
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500  989) 
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  990) static noinline void check_multi_find_3(struct xarray *xa)
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  991) {
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  992) 	unsigned int order;
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  993) 
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  994) 	for (order = 5; order < order_limit; order++) {
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  995) 		unsigned long index = 1UL << (order - 5);
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  996) 
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  997) 		XA_BUG_ON(xa, !xa_empty(xa));
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  998) 		xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500  999) 		XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 1000) 		xa_erase_index(xa, 0);
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 1001) 	}
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 1002) }
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 1003) 
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1004) static noinline void check_find_1(struct xarray *xa)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1005) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1006) 	unsigned long i, j, k;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1007) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1008) 	XA_BUG_ON(xa, !xa_empty(xa));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1009) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1010) 	/*
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1011) 	 * Check xa_find with all pairs between 0 and 99 inclusive,
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1012) 	 * starting at every index between 0 and 99
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1013) 	 */
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1014) 	for (i = 0; i < 100; i++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1015) 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1016) 		xa_set_mark(xa, i, XA_MARK_0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1017) 		for (j = 0; j < i; j++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1018) 			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1019) 					NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1020) 			xa_set_mark(xa, j, XA_MARK_0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1021) 			for (k = 0; k < 100; k++) {
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1022) 				unsigned long index = k;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1023) 				void *entry = xa_find(xa, &index, ULONG_MAX,
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1024) 								XA_PRESENT);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1025) 				if (k <= j)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1026) 					XA_BUG_ON(xa, index != j);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1027) 				else if (k <= i)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1028) 					XA_BUG_ON(xa, index != i);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1029) 				else
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1030) 					XA_BUG_ON(xa, entry != NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1031) 
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1032) 				index = k;
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1033) 				entry = xa_find(xa, &index, ULONG_MAX,
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1034) 								XA_MARK_0);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1035) 				if (k <= j)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1036) 					XA_BUG_ON(xa, index != j);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1037) 				else if (k <= i)
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1038) 					XA_BUG_ON(xa, index != i);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1039) 				else
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1040) 					XA_BUG_ON(xa, entry != NULL);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1041) 			}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1042) 			xa_erase_index(xa, j);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1043) 			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1044) 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1045) 		}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1046) 		xa_erase_index(xa, i);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1047) 		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1048) 	}
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1049) 	XA_BUG_ON(xa, !xa_empty(xa));
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1050) }
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1051) 
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1052) static noinline void check_find_2(struct xarray *xa)
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1053) {
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1054) 	void *entry;
4a31896c5b5a2 (Matthew Wilcox          2018-12-17 14:45:36 -0500 1055) 	unsigned long i, j, index;
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1056) 
4a31896c5b5a2 (Matthew Wilcox          2018-12-17 14:45:36 -0500 1057) 	xa_for_each(xa, index, entry) {
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1058) 		XA_BUG_ON(xa, true);
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1059) 	}
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1060) 
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1061) 	for (i = 0; i < 1024; i++) {
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1062) 		xa_store_index(xa, index, GFP_KERNEL);
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1063) 		j = 0;
4a31896c5b5a2 (Matthew Wilcox          2018-12-17 14:45:36 -0500 1064) 		xa_for_each(xa, index, entry) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1065) 			XA_BUG_ON(xa, xa_mk_index(index) != entry);
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1066) 			XA_BUG_ON(xa, index != j++);
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1067) 		}
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1068) 	}
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1069) 
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1070) 	xa_destroy(xa);
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1071) }
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1072) 
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1073) static noinline void check_find_3(struct xarray *xa)
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1074) {
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1075) 	XA_STATE(xas, xa, 0);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1076) 	unsigned long i, j, k;
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1077) 	void *entry;
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1078) 
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1079) 	for (i = 0; i < 100; i++) {
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1080) 		for (j = 0; j < 100; j++) {
490fd30f85957 (Matthew Wilcox          2018-12-17 17:37:25 -0500 1081) 			rcu_read_lock();
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1082) 			for (k = 0; k < 100; k++) {
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1083) 				xas_set(&xas, j);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1084) 				xas_for_each_marked(&xas, entry, k, XA_MARK_0)
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1085) 					;
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1086) 				if (j > k)
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1087) 					XA_BUG_ON(xa,
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1088) 						xas.xa_node != XAS_RESTART);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1089) 			}
490fd30f85957 (Matthew Wilcox          2018-12-17 17:37:25 -0500 1090) 			rcu_read_unlock();
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1091) 		}
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1092) 		xa_store_index(xa, i, GFP_KERNEL);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1093) 		xa_set_mark(xa, i, XA_MARK_0);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1094) 	}
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1095) 	xa_destroy(xa);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1096) }
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1097) 
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1098) static noinline void check_find_4(struct xarray *xa)
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1099) {
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1100) 	unsigned long index = 0;
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1101) 	void *entry;
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1102) 
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1103) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1104) 
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1105) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1106) 	XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1107) 
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1108) 	entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1109) 	XA_BUG_ON(xa, entry);
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1110) 
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1111) 	xa_erase_index(xa, ULONG_MAX);
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1112) }
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1113) 
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1114) static noinline void check_find(struct xarray *xa)
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1115) {
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 1116) 	unsigned i;
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 1117) 
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1118) 	check_find_1(xa);
8229706e03e41 (Matthew Wilcox          2018-11-01 16:55:19 -0400 1119) 	check_find_2(xa);
48483614de97c (Matthew Wilcox          2018-12-13 13:57:42 -0500 1120) 	check_find_3(xa);
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 1121) 	check_find_4(xa);
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 1122) 
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 1123) 	for (i = 2; i < 10; i++)
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 1124) 		check_multi_find_1(xa, i);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1125) 	check_multi_find_2(xa);
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 1126) 	check_multi_find_3(xa);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1127) }
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1128) 
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1129) /* See find_swap_entry() in mm/shmem.c */
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1130) static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1131) {
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1132) 	XA_STATE(xas, xa, 0);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1133) 	unsigned int checked = 0;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1134) 	void *entry;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1135) 
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1136) 	rcu_read_lock();
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1137) 	xas_for_each(&xas, entry, ULONG_MAX) {
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1138) 		if (xas_retry(&xas, entry))
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1139) 			continue;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1140) 		if (entry == item)
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1141) 			break;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1142) 		checked++;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1143) 		if ((checked % 4) != 0)
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1144) 			continue;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1145) 		xas_pause(&xas);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1146) 	}
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1147) 	rcu_read_unlock();
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1148) 
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1149) 	return entry ? xas.xa_index : -1;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1150) }
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1151) 
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1152) static noinline void check_find_entry(struct xarray *xa)
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1153) {
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1154) #ifdef CONFIG_XARRAY_MULTI
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1155) 	unsigned int order;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1156) 	unsigned long offset, index;
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1157) 
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1158) 	for (order = 0; order < 20; order++) {
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1159) 		for (offset = 0; offset < (1UL << (order + 3));
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1160) 		     offset += (1UL << order)) {
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1161) 			for (index = 0; index < (1UL << (order + 5));
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1162) 			     index += (1UL << order)) {
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1163) 				xa_store_order(xa, index, order,
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1164) 						xa_mk_index(index), GFP_KERNEL);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1165) 				XA_BUG_ON(xa, xa_load(xa, index) !=
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1166) 						xa_mk_index(index));
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1167) 				XA_BUG_ON(xa, xa_find_entry(xa,
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1168) 						xa_mk_index(index)) != index);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1169) 			}
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1170) 			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1171) 			xa_destroy(xa);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1172) 		}
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1173) 	}
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1174) #endif
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1175) 
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1176) 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1177) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1178) 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1179) 	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1180) 	xa_erase_index(xa, ULONG_MAX);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1181) 	XA_BUG_ON(xa, !xa_empty(xa));
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1182) }
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1183) 
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1184) static noinline void check_pause(struct xarray *xa)
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1185) {
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1186) 	XA_STATE(xas, xa, 0);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1187) 	void *entry;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1188) 	unsigned int order;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1189) 	unsigned long index = 1;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1190) 	unsigned int count = 0;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1191) 
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1192) 	for (order = 0; order < order_limit; order++) {
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1193) 		XA_BUG_ON(xa, xa_store_order(xa, index, order,
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1194) 					xa_mk_index(index), GFP_KERNEL));
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1195) 		index += 1UL << order;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1196) 	}
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1197) 
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1198) 	rcu_read_lock();
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1199) 	xas_for_each(&xas, entry, ULONG_MAX) {
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1200) 		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1201) 		count++;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1202) 	}
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1203) 	rcu_read_unlock();
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1204) 	XA_BUG_ON(xa, count != order_limit);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1205) 
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1206) 	count = 0;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1207) 	xas_set(&xas, 0);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1208) 	rcu_read_lock();
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1209) 	xas_for_each(&xas, entry, ULONG_MAX) {
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1210) 		XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1211) 		count++;
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1212) 		xas_pause(&xas);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1213) 	}
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1214) 	rcu_read_unlock();
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1215) 	XA_BUG_ON(xa, count != order_limit);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1216) 
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1217) 	xa_destroy(xa);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1218) }
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1219) 
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1220) static noinline void check_move_tiny(struct xarray *xa)
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1221) {
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1222) 	XA_STATE(xas, xa, 0);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1223) 
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1224) 	XA_BUG_ON(xa, !xa_empty(xa));
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1225) 	rcu_read_lock();
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1226) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1227) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1228) 	rcu_read_unlock();
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1229) 	xa_store_index(xa, 0, GFP_KERNEL);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1230) 	rcu_read_lock();
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1231) 	xas_set(&xas, 0);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1232) 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1233) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1234) 	xas_set(&xas, 0);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1235) 	XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1236) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1237) 	rcu_read_unlock();
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1238) 	xa_erase_index(xa, 0);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1239) 	XA_BUG_ON(xa, !xa_empty(xa));
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1240) }
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1241) 
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1242) static noinline void check_move_max(struct xarray *xa)
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1243) {
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1244) 	XA_STATE(xas, xa, 0);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1245) 
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1246) 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1247) 	rcu_read_lock();
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1248) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1249) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1250) 	rcu_read_unlock();
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1251) 
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1252) 	xas_set(&xas, 0);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1253) 	rcu_read_lock();
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1254) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1255) 	xas_pause(&xas);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1256) 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1257) 	rcu_read_unlock();
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1258) 
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1259) 	xa_erase_index(xa, ULONG_MAX);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1260) 	XA_BUG_ON(xa, !xa_empty(xa));
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1261) }
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1262) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1263) static noinline void check_move_small(struct xarray *xa, unsigned long idx)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1264) {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1265) 	XA_STATE(xas, xa, 0);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1266) 	unsigned long i;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1267) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1268) 	xa_store_index(xa, 0, GFP_KERNEL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1269) 	xa_store_index(xa, idx, GFP_KERNEL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1270) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1271) 	rcu_read_lock();
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1272) 	for (i = 0; i < idx * 4; i++) {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1273) 		void *entry = xas_next(&xas);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1274) 		if (i <= idx)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1275) 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1276) 		XA_BUG_ON(xa, xas.xa_index != i);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1277) 		if (i == 0 || i == idx)
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1278) 			XA_BUG_ON(xa, entry != xa_mk_index(i));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1279) 		else
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1280) 			XA_BUG_ON(xa, entry != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1281) 	}
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1282) 	xas_next(&xas);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1283) 	XA_BUG_ON(xa, xas.xa_index != i);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1284) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1285) 	do {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1286) 		void *entry = xas_prev(&xas);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1287) 		i--;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1288) 		if (i <= idx)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1289) 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1290) 		XA_BUG_ON(xa, xas.xa_index != i);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1291) 		if (i == 0 || i == idx)
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1292) 			XA_BUG_ON(xa, entry != xa_mk_index(i));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1293) 		else
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1294) 			XA_BUG_ON(xa, entry != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1295) 	} while (i > 0);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1296) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1297) 	xas_set(&xas, ULONG_MAX);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1298) 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1299) 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1300) 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1301) 	XA_BUG_ON(xa, xas.xa_index != 0);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1302) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1303) 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1304) 	rcu_read_unlock();
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1305) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1306) 	xa_erase_index(xa, 0);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1307) 	xa_erase_index(xa, idx);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1308) 	XA_BUG_ON(xa, !xa_empty(xa));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1309) }
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1310) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1311) static noinline void check_move(struct xarray *xa)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1312) {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1313) 	XA_STATE(xas, xa, (1 << 16) - 1);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1314) 	unsigned long i;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1315) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1316) 	for (i = 0; i < (1 << 16); i++)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1317) 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1318) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1319) 	rcu_read_lock();
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1320) 	do {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1321) 		void *entry = xas_prev(&xas);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1322) 		i--;
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1323) 		XA_BUG_ON(xa, entry != xa_mk_index(i));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1324) 		XA_BUG_ON(xa, i != xas.xa_index);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1325) 	} while (i != 0);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1326) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1327) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1328) 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1329) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1330) 	do {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1331) 		void *entry = xas_next(&xas);
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1332) 		XA_BUG_ON(xa, entry != xa_mk_index(i));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1333) 		XA_BUG_ON(xa, i != xas.xa_index);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1334) 		i++;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1335) 	} while (i < (1 << 16));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1336) 	rcu_read_unlock();
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1337) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1338) 	for (i = (1 << 8); i < (1 << 15); i++)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1339) 		xa_erase_index(xa, i);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1340) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1341) 	i = xas.xa_index;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1342) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1343) 	rcu_read_lock();
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1344) 	do {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1345) 		void *entry = xas_prev(&xas);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1346) 		i--;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1347) 		if ((i < (1 << 8)) || (i >= (1 << 15)))
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1348) 			XA_BUG_ON(xa, entry != xa_mk_index(i));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1349) 		else
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1350) 			XA_BUG_ON(xa, entry != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1351) 		XA_BUG_ON(xa, i != xas.xa_index);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1352) 	} while (i != 0);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1353) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1354) 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1355) 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1356) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1357) 	do {
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1358) 		void *entry = xas_next(&xas);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1359) 		if ((i < (1 << 8)) || (i >= (1 << 15)))
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1360) 			XA_BUG_ON(xa, entry != xa_mk_index(i));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1361) 		else
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1362) 			XA_BUG_ON(xa, entry != NULL);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1363) 		XA_BUG_ON(xa, i != xas.xa_index);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1364) 		i++;
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1365) 	} while (i < (1 << 16));
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1366) 	rcu_read_unlock();
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1367) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1368) 	xa_destroy(xa);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1369) 
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1370) 	check_move_tiny(xa);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1371) 	check_move_max(xa);
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1372) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1373) 	for (i = 0; i < 16; i++)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1374) 		check_move_small(xa, 1UL << i);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1375) 
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1376) 	for (i = 2; i < 16; i++)
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1377) 		check_move_small(xa, (1UL << i) - 1);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1378) }
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1379) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1380) static noinline void xa_store_many_order(struct xarray *xa,
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1381) 		unsigned long index, unsigned order)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1382) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1383) 	XA_STATE_ORDER(xas, xa, index, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1384) 	unsigned int i = 0;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1385) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1386) 	do {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1387) 		xas_lock(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1388) 		XA_BUG_ON(xa, xas_find_conflict(&xas));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1389) 		xas_create_range(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1390) 		if (xas_error(&xas))
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1391) 			goto unlock;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1392) 		for (i = 0; i < (1U << order); i++) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1393) 			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1394) 			xas_next(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1395) 		}
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1396) unlock:
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1397) 		xas_unlock(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1398) 	} while (xas_nomem(&xas, GFP_KERNEL));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1399) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1400) 	XA_BUG_ON(xa, xas_error(&xas));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1401) }
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1402) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1403) static noinline void check_create_range_1(struct xarray *xa,
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1404) 		unsigned long index, unsigned order)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1405) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1406) 	unsigned long i;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1407) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1408) 	xa_store_many_order(xa, index, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1409) 	for (i = index; i < index + (1UL << order); i++)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1410) 		xa_erase_index(xa, i);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1411) 	XA_BUG_ON(xa, !xa_empty(xa));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1412) }
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1413) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1414) static noinline void check_create_range_2(struct xarray *xa, unsigned order)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1415) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1416) 	unsigned long i;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1417) 	unsigned long nr = 1UL << order;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1418) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1419) 	for (i = 0; i < nr * nr; i += nr)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1420) 		xa_store_many_order(xa, i, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1421) 	for (i = 0; i < nr * nr; i++)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1422) 		xa_erase_index(xa, i);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1423) 	XA_BUG_ON(xa, !xa_empty(xa));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1424) }
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1425) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1426) static noinline void check_create_range_3(void)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1427) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1428) 	XA_STATE(xas, NULL, 0);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1429) 	xas_set_err(&xas, -EEXIST);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1430) 	xas_create_range(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1431) 	XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1432) }
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1433) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1434) static noinline void check_create_range_4(struct xarray *xa,
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1435) 		unsigned long index, unsigned order)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1436) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1437) 	XA_STATE_ORDER(xas, xa, index, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1438) 	unsigned long base = xas.xa_index;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1439) 	unsigned long i = 0;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1440) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1441) 	xa_store_index(xa, index, GFP_KERNEL);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1442) 	do {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1443) 		xas_lock(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1444) 		xas_create_range(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1445) 		if (xas_error(&xas))
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1446) 			goto unlock;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1447) 		for (i = 0; i < (1UL << order); i++) {
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1448) 			void *old = xas_store(&xas, xa_mk_index(base + i));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1449) 			if (xas.xa_index == index)
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1450) 				XA_BUG_ON(xa, old != xa_mk_index(base + i));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1451) 			else
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1452) 				XA_BUG_ON(xa, old != NULL);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1453) 			xas_next(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1454) 		}
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1455) unlock:
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1456) 		xas_unlock(&xas);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1457) 	} while (xas_nomem(&xas, GFP_KERNEL));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1458) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1459) 	XA_BUG_ON(xa, xas_error(&xas));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1460) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1461) 	for (i = base; i < base + (1UL << order); i++)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1462) 		xa_erase_index(xa, i);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1463) 	XA_BUG_ON(xa, !xa_empty(xa));
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1464) }
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1465) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1466) static noinline void check_create_range(struct xarray *xa)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1467) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1468) 	unsigned int order;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1469) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1470) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1471) 	for (order = 0; order < max_order; order++) {
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1472) 		check_create_range_1(xa, 0, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1473) 		check_create_range_1(xa, 1U << order, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1474) 		check_create_range_1(xa, 2U << order, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1475) 		check_create_range_1(xa, 3U << order, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1476) 		check_create_range_1(xa, 1U << 24, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1477) 		if (order < 10)
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1478) 			check_create_range_2(xa, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1479) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1480) 		check_create_range_4(xa, 0, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1481) 		check_create_range_4(xa, 1U << order, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1482) 		check_create_range_4(xa, 2U << order, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1483) 		check_create_range_4(xa, 3U << order, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1484) 		check_create_range_4(xa, 1U << 24, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1485) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1486) 		check_create_range_4(xa, 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1487) 		check_create_range_4(xa, (1U << order) + 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1488) 		check_create_range_4(xa, (2U << order) + 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1489) 		check_create_range_4(xa, (2U << order) - 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1490) 		check_create_range_4(xa, (3U << order) + 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1491) 		check_create_range_4(xa, (3U << order) - 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1492) 		check_create_range_4(xa, (1U << 24) + 1, order);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1493) 	}
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1494) 
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1495) 	check_create_range_3();
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1496) }
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1497) 
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1498) static noinline void __check_store_range(struct xarray *xa, unsigned long first,
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1499) 		unsigned long last)
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1500) {
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1501) #ifdef CONFIG_XARRAY_MULTI
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1502) 	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1503) 
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1504) 	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1505) 	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1506) 	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1507) 	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1508) 
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1509) 	xa_store_range(xa, first, last, NULL, GFP_KERNEL);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1510) #endif
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1511) 
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1512) 	XA_BUG_ON(xa, !xa_empty(xa));
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1513) }
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1514) 
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1515) static noinline void check_store_range(struct xarray *xa)
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1516) {
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1517) 	unsigned long i, j;
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1518) 
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1519) 	for (i = 0; i < 128; i++) {
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1520) 		for (j = i; j < 128; j++) {
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1521) 			__check_store_range(xa, i, j);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1522) 			__check_store_range(xa, 128 + i, 128 + j);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1523) 			__check_store_range(xa, 4095 + i, 4095 + j);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1524) 			__check_store_range(xa, 4096 + i, 4096 + j);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1525) 			__check_store_range(xa, 123456 + i, 123456 + j);
5404a7f1c21cf (Matthew Wilcox          2018-11-05 09:34:04 -0500 1526) 			__check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1527) 		}
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1528) 	}
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1529) }
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1530) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1531) #ifdef CONFIG_XARRAY_MULTI
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1532) static void check_split_1(struct xarray *xa, unsigned long index,
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1533) 				unsigned int order, unsigned int new_order)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1534) {
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1535) 	XA_STATE_ORDER(xas, xa, index, new_order);
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1536) 	unsigned int i;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1537) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1538) 	xa_store_order(xa, index, order, xa, GFP_KERNEL);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1539) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1540) 	xas_split_alloc(&xas, xa, order, GFP_KERNEL);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1541) 	xas_lock(&xas);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1542) 	xas_split(&xas, xa, order);
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1543) 	for (i = 0; i < (1 << order); i += (1 << new_order))
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1544) 		__xa_store(xa, index + i, xa_mk_index(index + i), 0);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1545) 	xas_unlock(&xas);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1546) 
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1547) 	for (i = 0; i < (1 << order); i++) {
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1548) 		unsigned int val = index + (i & ~((1 << new_order) - 1));
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1549) 		XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1550) 	}
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1551) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1552) 	xa_set_mark(xa, index, XA_MARK_0);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1553) 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1554) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1555) 	xa_destroy(xa);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1556) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1557) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1558) static noinline void check_split(struct xarray *xa)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1559) {
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1560) 	unsigned int order, new_order;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1561) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1562) 	XA_BUG_ON(xa, !xa_empty(xa));
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1563) 
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1564) 	for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1565) 		for (new_order = 0; new_order < order; new_order++) {
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1566) 			check_split_1(xa, 0, order, new_order);
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1567) 			check_split_1(xa, 1UL << order, order, new_order);
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1568) 			check_split_1(xa, 3UL << order, order, new_order);
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1569) 		}
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1570) 	}
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1571) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1572) #else
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1573) static void check_split(struct xarray *xa) { }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1574) #endif
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1575) 
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1576) static void check_align_1(struct xarray *xa, char *name)
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1577) {
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1578) 	int i;
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1579) 	unsigned int id;
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1580) 	unsigned long index;
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1581) 	void *entry;
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1582) 
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1583) 	for (i = 0; i < 8; i++) {
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500 1584) 		XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
a3e4d3f97ec84 (Matthew Wilcox          2018-12-31 10:41:01 -0500 1585) 					GFP_KERNEL) != 0);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1586) 		XA_BUG_ON(xa, id != i);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1587) 	}
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1588) 	xa_for_each(xa, index, entry)
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1589) 		XA_BUG_ON(xa, xa_is_err(entry));
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1590) 	xa_destroy(xa);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1591) }
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1592) 
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1593) /*
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1594)  * We should always be able to store without allocating memory after
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1595)  * reserving a slot.
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1596)  */
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1597) static void check_align_2(struct xarray *xa, char *name)
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1598) {
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1599) 	int i;
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1600) 
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1601) 	XA_BUG_ON(xa, !xa_empty(xa));
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1602) 
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1603) 	for (i = 0; i < 8; i++) {
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1604) 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1605) 		xa_erase(xa, 0);
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1606) 	}
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1607) 
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1608) 	for (i = 0; i < 8; i++) {
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1609) 		XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1610) 		XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1611) 		xa_erase(xa, 0);
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1612) 	}
4a5c8d898948d (Matthew Wilcox          2019-02-21 17:54:44 -0500 1613) 
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1614) 	XA_BUG_ON(xa, !xa_empty(xa));
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1615) }
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1616) 
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1617) static noinline void check_align(struct xarray *xa)
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1618) {
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1619) 	char name[] = "Motorola 68000";
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1620) 
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1621) 	check_align_1(xa, name);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1622) 	check_align_1(xa, name + 1);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1623) 	check_align_1(xa, name + 2);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1624) 	check_align_1(xa, name + 3);
2fbe967b3eb74 (Matthew Wilcox          2019-02-21 17:36:45 -0500 1625) 	check_align_2(xa, name);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1626) }
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1627) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1628) static LIST_HEAD(shadow_nodes);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1629) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1630) static void test_update_node(struct xa_node *node)
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1631) {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1632) 	if (node->count && node->count == node->nr_values) {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1633) 		if (list_empty(&node->private_list))
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1634) 			list_add(&shadow_nodes, &node->private_list);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1635) 	} else {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1636) 		if (!list_empty(&node->private_list))
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1637) 			list_del_init(&node->private_list);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1638) 	}
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1639) }
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1640) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1641) static noinline void shadow_remove(struct xarray *xa)
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1642) {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1643) 	struct xa_node *node;
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1644) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1645) 	xa_lock(xa);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1646) 	while ((node = list_first_entry_or_null(&shadow_nodes,
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1647) 					struct xa_node, private_list))) {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1648) 		XA_BUG_ON(xa, node->array != xa);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1649) 		list_del_init(&node->private_list);
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 1650) 		xa_delete_node(node, test_update_node);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1651) 	}
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1652) 	xa_unlock(xa);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1653) }
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1654) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1655) static noinline void check_workingset(struct xarray *xa, unsigned long index)
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1656) {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1657) 	XA_STATE(xas, xa, index);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1658) 	xas_set_update(&xas, test_update_node);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1659) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1660) 	do {
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1661) 		xas_lock(&xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1662) 		xas_store(&xas, xa_mk_value(0));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1663) 		xas_next(&xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1664) 		xas_store(&xas, xa_mk_value(1));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1665) 		xas_unlock(&xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1666) 	} while (xas_nomem(&xas, GFP_KERNEL));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1667) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1668) 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1669) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1670) 	xas_lock(&xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1671) 	xas_next(&xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1672) 	xas_store(&xas, &xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1673) 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1674) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1675) 	xas_store(&xas, xa_mk_value(2));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1676) 	xas_unlock(&xas);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1677) 	XA_BUG_ON(xa, list_empty(&shadow_nodes));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1678) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1679) 	shadow_remove(xa);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1680) 	XA_BUG_ON(xa, !list_empty(&shadow_nodes));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1681) 	XA_BUG_ON(xa, !xa_empty(xa));
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1682) }
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1683) 
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1684) /*
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1685)  * Check that the pointer / value / sibling entries are accounted the
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1686)  * way we expect them to be.
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1687)  */
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1688) static noinline void check_account(struct xarray *xa)
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1689) {
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1690) #ifdef CONFIG_XARRAY_MULTI
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1691) 	unsigned int order;
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1692) 
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1693) 	for (order = 1; order < 12; order++) {
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1694) 		XA_STATE(xas, xa, 1 << order);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1695) 
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1696) 		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500 1697) 		rcu_read_lock();
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1698) 		xas_load(&xas);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1699) 		XA_BUG_ON(xa, xas.xa_node->count == 0);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1700) 		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1701) 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
fffc9a260e38a (Matthew Wilcox          2018-11-19 09:36:29 -0500 1702) 		rcu_read_unlock();
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1703) 
b7677a132a4c2 (Matthew Wilcox          2018-11-05 13:19:54 -0500 1704) 		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1705) 				GFP_KERNEL);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1706) 		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1707) 
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1708) 		xa_erase(xa, 1 << order);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1709) 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1710) 
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1711) 		xa_erase(xa, 0);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1712) 		XA_BUG_ON(xa, !xa_empty(xa));
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1713) 	}
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1714) #endif
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1715) }
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1716) 
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1717) static noinline void check_get_order(struct xarray *xa)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1718) {
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1719) 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1720) 	unsigned int order;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1721) 	unsigned long i, j;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1722) 
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1723) 	for (i = 0; i < 3; i++)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1724) 		XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1725) 
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1726) 	for (order = 0; order < max_order; order++) {
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1727) 		for (i = 0; i < 10; i++) {
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1728) 			xa_store_order(xa, i << order, order,
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1729) 					xa_mk_index(i << order), GFP_KERNEL);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1730) 			for (j = i << order; j < (i + 1) << order; j++)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1731) 				XA_BUG_ON(xa, xa_get_order(xa, j) != order);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1732) 			xa_erase(xa, i << order);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1733) 		}
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1734) 	}
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1735) }
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1736) 
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1737) static noinline void check_destroy(struct xarray *xa)
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1738) {
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1739) 	unsigned long index;
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1740) 
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1741) 	XA_BUG_ON(xa, !xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1742) 
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1743) 	/* Destroying an empty array is a no-op */
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1744) 	xa_destroy(xa);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1745) 	XA_BUG_ON(xa, !xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1746) 
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1747) 	/* Destroying an array with a single entry */
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1748) 	for (index = 0; index < 1000; index++) {
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1749) 		xa_store_index(xa, index, GFP_KERNEL);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1750) 		XA_BUG_ON(xa, xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1751) 		xa_destroy(xa);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1752) 		XA_BUG_ON(xa, !xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1753) 	}
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1754) 
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1755) 	/* Destroying an array with a single entry at ULONG_MAX */
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1756) 	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1757) 	XA_BUG_ON(xa, xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1758) 	xa_destroy(xa);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1759) 	XA_BUG_ON(xa, !xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1760) 
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1761) #ifdef CONFIG_XARRAY_MULTI
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1762) 	/* Destroying an array with a multi-index entry */
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1763) 	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1764) 	XA_BUG_ON(xa, xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1765) 	xa_destroy(xa);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1766) 	XA_BUG_ON(xa, !xa_empty(xa));
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1767) #endif
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1768) }
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1769) 
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500 1770) static DEFINE_XARRAY(array);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1771) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1772) static int xarray_checks(void)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1773) {
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500 1774) 	check_xa_err(&array);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1775) 	check_xas_retry(&array);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1776) 	check_xa_load(&array);
9b89a03551446 (Matthew Wilcox          2017-11-10 09:34:31 -0500 1777) 	check_xa_mark(&array);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500 1778) 	check_xa_shrink(&array);
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1779) 	check_xas_erase(&array);
12fd2aee6db76 (Matthew Wilcox          2019-03-09 22:25:27 -0500 1780) 	check_insert(&array);
41aec91f55985 (Matthew Wilcox          2017-11-10 15:34:55 -0500 1781) 	check_cmpxchg(&array);
9f14d4f1f1045 (Matthew Wilcox          2018-10-01 14:54:59 -0400 1782) 	check_reserve(&array);
b38f6c5027068 (Matthew Wilcox          2019-02-20 11:30:49 -0500 1783) 	check_reserve(&xa0);
58d6ea3085f2e (Matthew Wilcox          2017-11-10 15:15:08 -0500 1784) 	check_multi_store(&array);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1785) 	check_get_order(&array);
371c752dc6694 (Matthew Wilcox          2018-07-04 10:50:12 -0400 1786) 	check_xa_alloc();
b803b42823d0d (Matthew Wilcox          2017-11-14 08:30:11 -0500 1787) 	check_find(&array);
e21a29552fa3f (Matthew Wilcox          2017-11-22 08:36:00 -0500 1788) 	check_find_entry(&array);
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1789) 	check_pause(&array);
d6427f8179b5d (Matthew Wilcox          2018-08-28 16:13:16 -0400 1790) 	check_account(&array);
687149fca1f37 (Matthew Wilcox          2017-11-17 08:16:34 -0500 1791) 	check_destroy(&array);
64d3e9a9e0cc5 (Matthew Wilcox          2017-12-01 00:06:52 -0500 1792) 	check_move(&array);
2264f5132fe45 (Matthew Wilcox          2017-12-04 00:11:48 -0500 1793) 	check_create_range(&array);
0e9446c35a809 (Matthew Wilcox          2018-08-15 14:13:29 -0400 1794) 	check_store_range(&array);
4e99d4e9579d3 (Matthew Wilcox          2018-06-01 22:46:02 -0400 1795) 	check_store_iter(&array);
76b4e52995654 (Matthew Wilcox          2018-12-28 23:20:44 -0500 1796) 	check_align(&xa0);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1797) 	check_split(&array);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1798) 
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1799) 	check_workingset(&array, 0);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1800) 	check_workingset(&array, 64);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1801) 	check_workingset(&array, 4096);
a97e7904c0806 (Matthew Wilcox          2017-11-24 14:24:59 -0500 1802) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1803) 	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1804) 	return (tests_run == tests_passed) ? 0 : -EINVAL;
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1805) }
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1806) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1807) static void xarray_exit(void)
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1808) {
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1809) }
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1810) 
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1811) module_init(xarray_checks);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1812) module_exit(xarray_exit);
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1813) MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
ad3d6c7263e36 (Matthew Wilcox          2017-11-07 14:57:46 -0500 1814) MODULE_LICENSE("GPL");