de6cc6515a445 (Thomas Gleixner 2019-05-27 08:55:02 +0200 1) // SPDX-License-Identifier: GPL-2.0-or-later
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 2) /*
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 3) * Copyright (C) 2001 Momchil Velikov
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 4) * Portions Copyright (C) 2001 Christoph Hellwig
cde53535991fb (Christoph Lameter 2008-07-04 09:59:22 -0700 5) * Copyright (C) 2005 SGI, Christoph Lameter
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 6) * Copyright (C) 2006 Nick Piggin
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 7) * Copyright (C) 2012 Konstantin Khlebnikov
6b053b8e5f5e1 (Matthew Wilcox 2016-05-20 17:02:58 -0700 8) * Copyright (C) 2016 Intel, Matthew Wilcox
6b053b8e5f5e1 (Matthew Wilcox 2016-05-20 17:02:58 -0700 9) * Copyright (C) 2016 Intel, Ross Zwisler
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 10) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 11)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 12) #include <linux/bitmap.h>
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 13) #include <linux/bitops.h>
460488c58ca8b (Matthew Wilcox 2017-11-28 15:16:24 -0500 14) #include <linux/bug.h>
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 15) #include <linux/cpu.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 16) #include <linux/errno.h>
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 17) #include <linux/export.h>
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 18) #include <linux/idr.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 19) #include <linux/init.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 20) #include <linux/kernel.h>
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 21) #include <linux/kmemleak.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 22) #include <linux/percpu.h>
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 23) #include <linux/preempt.h> /* in_interrupt() */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 24) #include <linux/radix-tree.h>
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 25) #include <linux/rcupdate.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 26) #include <linux/slab.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 27) #include <linux/string.h>
02c02bf12c5d8 (Matthew Wilcox 2017-11-03 23:09:45 -0400 28) #include <linux/xarray.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 29)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 30) /*
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 31) * Radix tree node cache.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 32) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 33) struct kmem_cache *radix_tree_node_cachep;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 34)
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 35) /*
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 36) * The radix tree is variable-height, so an insert operation not only has
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 37) * to build the branch to its corresponding item, it also has to build the
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 38) * branch to existing items if the size has to be increased (by
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 39) * radix_tree_extend).
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 40) *
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 41) * The worst case is a zero height tree with just a single item at index 0,
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 42) * and then inserting an item at index ULONG_MAX. This requires 2 new branches
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 43) * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 44) * Hence:
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 45) */
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 46) #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
5536805292e64 (Nicholas Piggin 2012-05-29 15:07:34 -0700 47)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 48) /*
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 49) * The IDR does not have to be as high as the radix tree since it uses
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 50) * signed integers, not unsigned longs.
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 51) */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 52) #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 53) #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 54) RADIX_TREE_MAP_SHIFT))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 55) #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 56)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 57) /*
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 58) * Per-cpu pool of preloaded nodes
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 59) */
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 60) DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 61) .lock = INIT_LOCAL_LOCK(lock),
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 62) };
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 63) EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 64)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 65) static inline struct radix_tree_node *entry_to_node(void *ptr)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 66) {
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 67) return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 68) }
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 69)
a4db4dcea1b39 (Matthew Wilcox 2016-05-20 17:03:24 -0700 70) static inline void *node_to_entry(void *ptr)
27d20fddc8af5 (Nicholas Piggin 2010-11-11 14:05:19 -0800 71) {
30ff46ccb303f (Matthew Wilcox 2016-05-20 17:03:22 -0700 72) return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
27d20fddc8af5 (Nicholas Piggin 2010-11-11 14:05:19 -0800 73) }
27d20fddc8af5 (Nicholas Piggin 2010-11-11 14:05:19 -0800 74)
02c02bf12c5d8 (Matthew Wilcox 2017-11-03 23:09:45 -0400 75) #define RADIX_TREE_RETRY XA_RETRY_ENTRY
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 76)
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 77) static inline unsigned long
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 78) get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 79) {
76f070b413556 (Matthew Wilcox 2018-08-18 07:05:50 -0400 80) return parent ? slot - parent->slots : 0;
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 81) }
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 82)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 83) static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 84) struct radix_tree_node **nodep, unsigned long index)
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 85) {
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 86) unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 87) void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 88)
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 89) *nodep = (void *)entry;
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 90) return offset;
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 91) }
db050f2924fcf (Matthew Wilcox 2016-05-20 17:01:57 -0700 92)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 93) static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 94) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 95) return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 96) }
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 97)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 98) static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 99) int offset)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 100) {
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 101) __set_bit(offset, node->tags[tag]);
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 102) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 103)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 104) static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 105) int offset)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 106) {
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 107) __clear_bit(offset, node->tags[tag]);
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 108) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 109)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 110) static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 111) int offset)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 112) {
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 113) return test_bit(offset, node->tags[tag]);
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 114) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 115)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 116) static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 117) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 118) root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 119) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 120)
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 121) static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 122) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 123) root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 124) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 125)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 126) static inline void root_tag_clear_all(struct radix_tree_root *root)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 127) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 128) root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 129) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 130)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 131) static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 132) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 133) return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 134) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 135)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 136) static inline unsigned root_tags_get(const struct radix_tree_root *root)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 137) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 138) return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 139) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 140)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 141) static inline bool is_idr(const struct radix_tree_root *root)
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 142) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 143) return !!(root->xa_flags & ROOT_IS_IDR);
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 144) }
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 145)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 146) /*
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 147) * Returns 1 if any slot in the node has this tag set.
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 148) * Otherwise returns 0.
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 149) */
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 150) static inline int any_tag_set(const struct radix_tree_node *node,
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 151) unsigned int tag)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 152) {
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 153) unsigned idx;
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 154) for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 155) if (node->tags[tag][idx])
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 156) return 1;
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 157) }
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 158) return 0;
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 159) }
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 160)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 161) static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 162) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 163) bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 164) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 165)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 166) /**
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 167) * radix_tree_find_next_bit - find the next set bit in a memory region
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 168) *
c95c2d328cd05 (Randy Dunlap 2021-04-16 15:46:26 -0700 169) * @node: where to begin the search
c95c2d328cd05 (Randy Dunlap 2021-04-16 15:46:26 -0700 170) * @tag: the tag index
c95c2d328cd05 (Randy Dunlap 2021-04-16 15:46:26 -0700 171) * @offset: the bitnumber to start searching at
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 172) *
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 173) * Unrollable variant of find_next_bit() for constant size arrays.
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 174) * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 175) * Returns next bit offset, or size if nothing found.
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 176) */
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 177) static __always_inline unsigned long
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 178) radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 179) unsigned long offset)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 180) {
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 181) const unsigned long *addr = node->tags[tag];
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 182)
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 183) if (offset < RADIX_TREE_MAP_SIZE) {
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 184) unsigned long tmp;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 185)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 186) addr += offset / BITS_PER_LONG;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 187) tmp = *addr >> (offset % BITS_PER_LONG);
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 188) if (tmp)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 189) return __ffs(tmp) + offset;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 190) offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 191) while (offset < RADIX_TREE_MAP_SIZE) {
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 192) tmp = *++addr;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 193) if (tmp)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 194) return __ffs(tmp) + offset;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 195) offset += BITS_PER_LONG;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 196) }
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 197) }
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 198) return RADIX_TREE_MAP_SIZE;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 199) }
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 200)
268f42de71812 (Matthew Wilcox 2016-12-14 15:08:55 -0800 201) static unsigned int iter_offset(const struct radix_tree_iter *iter)
268f42de71812 (Matthew Wilcox 2016-12-14 15:08:55 -0800 202) {
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 203) return iter->index & RADIX_TREE_MAP_MASK;
268f42de71812 (Matthew Wilcox 2016-12-14 15:08:55 -0800 204) }
268f42de71812 (Matthew Wilcox 2016-12-14 15:08:55 -0800 205)
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 206) /*
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 207) * The maximum index which can be stored in a radix tree
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 208) */
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 209) static inline unsigned long shift_maxindex(unsigned int shift)
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 210) {
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 211) return (RADIX_TREE_MAP_SIZE << shift) - 1;
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 212) }
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 213)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 214) static inline unsigned long node_maxindex(const struct radix_tree_node *node)
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 215) {
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 216) return shift_maxindex(node->shift);
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 217) }
218ed7503aee0 (Matthew Wilcox 2016-12-14 15:08:43 -0800 218)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 219) static unsigned long next_index(unsigned long index,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 220) const struct radix_tree_node *node,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 221) unsigned long offset)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 222) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 223) return (index & ~node_maxindex(node)) + (offset << node->shift);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 224) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 225)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 226) /*
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 227) * This assumes that the caller has performed appropriate preallocation, and
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 228) * that the caller has pinned this thread of control to the current CPU.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 229) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 230) static struct radix_tree_node *
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 231) radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
d58275bc96ae9 (Matthew Wilcox 2017-01-16 17:10:21 -0500 232) struct radix_tree_root *root,
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 233) unsigned int shift, unsigned int offset,
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 234) unsigned int count, unsigned int nr_values)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 235) {
e2848a0efedef (Nicholas Piggin 2008-02-04 22:29:10 -0800 236) struct radix_tree_node *ret = NULL;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 237)
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 238) /*
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 239) * Preload code isn't irq safe and it doesn't make sense to use
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 240) * preloading during an interrupt anyway as all the allocations have
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 241) * to be atomic. So just do normal allocation when in interrupt.
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 242) */
d0164adc89f6b (Mel Gorman 2015-11-06 16:28:21 -0800 243) if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 244) struct radix_tree_preload *rtp;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 245)
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 246) /*
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 247) * Even if the caller has preloaded, try to allocate from the
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 248) * cache first for the new node to get accounted to the memory
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 249) * cgroup.
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 250) */
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 251) ret = kmem_cache_alloc(radix_tree_node_cachep,
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 252) gfp_mask | __GFP_NOWARN);
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 253) if (ret)
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 254) goto out;
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 255)
e2848a0efedef (Nicholas Piggin 2008-02-04 22:29:10 -0800 256) /*
e2848a0efedef (Nicholas Piggin 2008-02-04 22:29:10 -0800 257) * Provided the caller has preloaded here, we will always
e2848a0efedef (Nicholas Piggin 2008-02-04 22:29:10 -0800 258) * succeed in getting a node here (and never reach
e2848a0efedef (Nicholas Piggin 2008-02-04 22:29:10 -0800 259) * kmem_cache_alloc)
e2848a0efedef (Nicholas Piggin 2008-02-04 22:29:10 -0800 260) */
7c8e0181e6e0b (Christoph Lameter 2014-06-04 16:07:56 -0700 261) rtp = this_cpu_ptr(&radix_tree_preloads);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 262) if (rtp->nr) {
9d2a8da006fcb (Kirill A. Shutemov 2015-06-25 15:02:19 -0700 263) ret = rtp->nodes;
1293d5c5f54d5 (Matthew Wilcox 2017-01-16 16:41:29 -0500 264) rtp->nodes = ret->parent;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 265) rtp->nr--;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 266) }
ce80b067de8cd (Catalin Marinas 2014-06-06 14:38:18 -0700 267) /*
ce80b067de8cd (Catalin Marinas 2014-06-06 14:38:18 -0700 268) * Update the allocation stack trace as this is more useful
ce80b067de8cd (Catalin Marinas 2014-06-06 14:38:18 -0700 269) * for debugging.
ce80b067de8cd (Catalin Marinas 2014-06-06 14:38:18 -0700 270) */
ce80b067de8cd (Catalin Marinas 2014-06-06 14:38:18 -0700 271) kmemleak_update_trace(ret);
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 272) goto out;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 273) }
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 274) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
58e698af4c634 (Vladimir Davydov 2016-03-17 14:18:36 -0700 275) out:
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 276) BUG_ON(radix_tree_is_internal_node(ret));
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 277) if (ret) {
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 278) ret->shift = shift;
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 279) ret->offset = offset;
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 280) ret->count = count;
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 281) ret->nr_values = nr_values;
d58275bc96ae9 (Matthew Wilcox 2017-01-16 17:10:21 -0500 282) ret->parent = parent;
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 283) ret->array = root;
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 284) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 285) return ret;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 286) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 287)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 288) void radix_tree_node_rcu_free(struct rcu_head *head)
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 289) {
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 290) struct radix_tree_node *node =
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 291) container_of(head, struct radix_tree_node, rcu_head);
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 292)
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 293) /*
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 294) * Must only free zeroed nodes into the slab. We can be left with
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 295) * non-NULL entries by radix_tree_free_nodes, so clear the entries
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 296) * and tags here.
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 297) */
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 298) memset(node->slots, 0, sizeof(node->slots));
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 299) memset(node->tags, 0, sizeof(node->tags));
91d9c05ac6c78 (Matthew Wilcox 2016-12-14 15:08:34 -0800 300) INIT_LIST_HEAD(&node->private_list);
643b52b9c0b4e (Nicholas Piggin 2008-06-12 15:21:52 -0700 301)
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 302) kmem_cache_free(radix_tree_node_cachep, node);
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 303) }
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 304)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 305) static inline void
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 306) radix_tree_node_free(struct radix_tree_node *node)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 307) {
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 308) call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 309) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 310)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 311) /*
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 312) * Load up this CPU's radix_tree_node buffer with sufficient objects to
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 313) * ensure that the addition of a single element in the tree cannot fail. On
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 314) * success, return zero, with preemption disabled. On error, return -ENOMEM
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 315) * with preemption not disabled.
b34df792b4e9e (David Howells 2009-11-19 18:11:14 +0000 316) *
b34df792b4e9e (David Howells 2009-11-19 18:11:14 +0000 317) * To make use of this facility, the radix tree must be initialised without
d0164adc89f6b (Mel Gorman 2015-11-06 16:28:21 -0800 318) * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 319) */
bc9ae2247ac92 (Eric Dumazet 2017-09-08 16:15:54 -0700 320) static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 321) {
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 322) struct radix_tree_preload *rtp;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 323) struct radix_tree_node *node;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 324) int ret = -ENOMEM;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 325)
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 326) /*
e0656501a6192 (Randy Dunlap 2020-10-15 20:11:04 -0700 327) * Nodes preloaded by one cgroup can be used by another cgroup, so
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 328) * they should never be accounted to any particular memory cgroup.
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 329) */
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 330) gfp_mask &= ~__GFP_ACCOUNT;
05eb6e7263185 (Vladimir Davydov 2016-08-02 14:03:01 -0700 331)
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 332) local_lock(&radix_tree_preloads.lock);
7c8e0181e6e0b (Christoph Lameter 2014-06-04 16:07:56 -0700 333) rtp = this_cpu_ptr(&radix_tree_preloads);
c78c66d1ddfdb (Kirill A. Shutemov 2016-07-26 15:26:02 -0700 334) while (rtp->nr < nr) {
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 335) local_unlock(&radix_tree_preloads.lock);
488514d179828 (Christoph Lameter 2008-04-28 02:12:05 -0700 336) node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 337) if (node == NULL)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 338) goto out;
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 339) local_lock(&radix_tree_preloads.lock);
7c8e0181e6e0b (Christoph Lameter 2014-06-04 16:07:56 -0700 340) rtp = this_cpu_ptr(&radix_tree_preloads);
c78c66d1ddfdb (Kirill A. Shutemov 2016-07-26 15:26:02 -0700 341) if (rtp->nr < nr) {
1293d5c5f54d5 (Matthew Wilcox 2017-01-16 16:41:29 -0500 342) node->parent = rtp->nodes;
9d2a8da006fcb (Kirill A. Shutemov 2015-06-25 15:02:19 -0700 343) rtp->nodes = node;
9d2a8da006fcb (Kirill A. Shutemov 2015-06-25 15:02:19 -0700 344) rtp->nr++;
9d2a8da006fcb (Kirill A. Shutemov 2015-06-25 15:02:19 -0700 345) } else {
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 346) kmem_cache_free(radix_tree_node_cachep, node);
9d2a8da006fcb (Kirill A. Shutemov 2015-06-25 15:02:19 -0700 347) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 348) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 349) ret = 0;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 350) out:
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 351) return ret;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 352) }
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 353)
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 354) /*
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 355) * Load up this CPU's radix_tree_node buffer with sufficient objects to
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 356) * ensure that the addition of a single element in the tree cannot fail. On
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 357) * success, return zero, with preemption disabled. On error, return -ENOMEM
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 358) * with preemption not disabled.
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 359) *
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 360) * To make use of this facility, the radix tree must be initialised without
d0164adc89f6b (Mel Gorman 2015-11-06 16:28:21 -0800 361) * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 362) */
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 363) int radix_tree_preload(gfp_t gfp_mask)
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 364) {
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 365) /* Warn on non-sensical use... */
d0164adc89f6b (Mel Gorman 2015-11-06 16:28:21 -0800 366) WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
c78c66d1ddfdb (Kirill A. Shutemov 2016-07-26 15:26:02 -0700 367) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 368) }
d7f0923d83dca (David Chinner 2007-07-14 16:05:04 +1000 369) EXPORT_SYMBOL(radix_tree_preload);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 370)
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 371) /*
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 372) * The same as above function, except we don't guarantee preloading happens.
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 373) * We do it, if we decide it helps. On success, return zero with preemption
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 374) * disabled. On error, return -ENOMEM with preemption not disabled.
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 375) */
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 376) int radix_tree_maybe_preload(gfp_t gfp_mask)
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 377) {
d0164adc89f6b (Mel Gorman 2015-11-06 16:28:21 -0800 378) if (gfpflags_allow_blocking(gfp_mask))
c78c66d1ddfdb (Kirill A. Shutemov 2016-07-26 15:26:02 -0700 379) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 380) /* Preloading doesn't help anything with this gfp mask, skip it */
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 381) local_lock(&radix_tree_preloads.lock);
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 382) return 0;
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 383) }
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 384) EXPORT_SYMBOL(radix_tree_maybe_preload);
5e4c0d974139a (Jan Kara 2013-09-11 14:26:05 -0700 385)
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 386) static unsigned radix_tree_load_root(const struct radix_tree_root *root,
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 387) struct radix_tree_node **nodep, unsigned long *maxindex)
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 388) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 389) struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 390)
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 391) *nodep = node;
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 392)
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 393) if (likely(radix_tree_is_internal_node(node))) {
4dd6c0987ca43 (Matthew Wilcox 2016-05-20 17:03:27 -0700 394) node = entry_to_node(node);
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 395) *maxindex = node_maxindex(node);
c12e51b07b3ac (Matthew Wilcox 2016-05-20 17:03:10 -0700 396) return node->shift + RADIX_TREE_MAP_SHIFT;
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 397) }
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 398)
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 399) *maxindex = 0;
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 400) return 0;
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 401) }
1456a439fc2dc (Matthew Wilcox 2016-05-20 17:02:08 -0700 402)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 403) /*
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 404) * Extend a radix tree so it can store key @index.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 405) */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 406) static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 407) unsigned long index, unsigned int shift)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 408) {
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 409) void *entry;
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 410) unsigned int maxshift;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 411) int tag;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 412)
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 413) /* Figure out what the shift should be. */
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 414) maxshift = shift;
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 415) while (index > shift_maxindex(maxshift))
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 416) maxshift += RADIX_TREE_MAP_SHIFT;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 417)
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 418) entry = rcu_dereference_raw(root->xa_head);
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 419) if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 420) goto out;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 421)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 422) do {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 423) struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
d58275bc96ae9 (Matthew Wilcox 2017-01-16 17:10:21 -0500 424) root, shift, 0, 1, 0);
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 425) if (!node)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 426) return -ENOMEM;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 427)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 428) if (is_idr(root)) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 429) all_tag_set(node, IDR_FREE);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 430) if (!root_tag_get(root, IDR_FREE)) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 431) tag_clear(node, IDR_FREE, 0);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 432) root_tag_set(root, IDR_FREE);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 433) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 434) } else {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 435) /* Propagate the aggregated tag info to the new child */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 436) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 437) if (root_tag_get(root, tag))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 438) tag_set(node, tag, 0);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 439) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 440) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 441)
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 442) BUG_ON(shift > BITS_PER_LONG);
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 443) if (radix_tree_is_internal_node(entry)) {
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 444) entry_to_node(entry)->parent = node;
3159f943aafdb (Matthew Wilcox 2017-11-03 13:30:42 -0400 445) } else if (xa_is_value(entry)) {
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 446) /* Moving a value entry root->xa_head to a node */
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 447) node->nr_values = 1;
f7942430e40f1 (Johannes Weiner 2016-12-12 16:43:41 -0800 448) }
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 449) /*
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 450) * entry was already in the radix tree, so we do not need
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 451) * rcu_assign_pointer here
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 452) */
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 453) node->slots[0] = (void __rcu *)entry;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 454) entry = node_to_entry(node);
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 455) rcu_assign_pointer(root->xa_head, entry);
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 456) shift += RADIX_TREE_MAP_SHIFT;
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 457) } while (shift <= maxshift);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 458) out:
d0891265bbc98 (Matthew Wilcox 2016-05-20 17:03:19 -0700 459) return maxshift + RADIX_TREE_MAP_SHIFT;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 460) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 461)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 462) /**
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 463) * radix_tree_shrink - shrink radix tree to minimum height
c95c2d328cd05 (Randy Dunlap 2021-04-16 15:46:26 -0700 464) * @root: radix tree root
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 465) */
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 466) static inline bool radix_tree_shrink(struct radix_tree_root *root)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 467) {
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 468) bool shrunk = false;
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 469)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 470) for (;;) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 471) struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 472) struct radix_tree_node *child;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 473)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 474) if (!radix_tree_is_internal_node(node))
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 475) break;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 476) node = entry_to_node(node);
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 477)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 478) /*
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 479) * The candidate node has more than one child, or its child
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 480) * is not at the leftmost slot, we cannot shrink.
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 481) */
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 482) if (node->count != 1)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 483) break;
12320d0ff1c9d (Matthew Wilcox 2017-02-13 15:22:48 -0500 484) child = rcu_dereference_raw(node->slots[0]);
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 485) if (!child)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 486) break;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 487)
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 488) /*
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 489) * For an IDR, we must not shrink entry 0 into the root in
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 490) * case somebody calls idr_replace() with a pointer that
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 491) * appears to be an internal entry
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 492) */
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 493) if (!node->shift && is_idr(root))
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 494) break;
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 495)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 496) if (radix_tree_is_internal_node(child))
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 497) entry_to_node(child)->parent = NULL;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 498)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 499) /*
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 500) * We don't need rcu_assign_pointer(), since we are simply
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 501) * moving the node from one part of the tree to another: if it
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 502) * was safe to dereference the old pointer to it
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 503) * (node->slots[0]), it will be safe to dereference the new
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 504) * one (root->xa_head) as far as dependent read barriers go.
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 505) */
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 506) root->xa_head = (void __rcu *)child;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 507) if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 508) root_tag_clear(root, IDR_FREE);
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 509)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 510) /*
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 511) * We have a dilemma here. The node's slot[0] must not be
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 512) * NULLed in case there are concurrent lookups expecting to
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 513) * find the item. However if this was a bottom-level node,
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 514) * then it may be subject to the slot pointer being visible
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 515) * to callers dereferencing it. If item corresponding to
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 516) * slot[0] is subsequently deleted, these callers would expect
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 517) * their slot to become empty sooner or later.
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 518) *
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 519) * For example, lockless pagecache will look up a slot, deref
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 520) * the page pointer, and if the page has 0 refcount it means it
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 521) * was concurrently deleted from pagecache so try the deref
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 522) * again. Fortunately there is already a requirement for logic
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 523) * to retry the entire slot lookup -- the indirect pointer
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 524) * problem (replacing direct root node with an indirect pointer
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 525) * also results in a stale slot). So tag the slot as indirect
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 526) * to force callers to retry.
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 527) */
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 528) node->count = 0;
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 529) if (!radix_tree_is_internal_node(child)) {
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 530) node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 531) }
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 532)
ea07b862ac8ef (Johannes Weiner 2017-01-06 19:21:43 -0500 533) WARN_ON_ONCE(!list_empty(&node->private_list));
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 534) radix_tree_node_free(node);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 535) shrunk = true;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 536) }
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 537)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 538) return shrunk;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 539) }
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 540)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 541) static bool delete_node(struct radix_tree_root *root,
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 542) struct radix_tree_node *node)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 543) {
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 544) bool deleted = false;
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 545)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 546) do {
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 547) struct radix_tree_node *parent;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 548)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 549) if (node->count) {
12320d0ff1c9d (Matthew Wilcox 2017-02-13 15:22:48 -0500 550) if (node_to_entry(node) ==
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 551) rcu_dereference_raw(root->xa_head))
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 552) deleted |= radix_tree_shrink(root);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 553) return deleted;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 554) }
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 555)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 556) parent = node->parent;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 557) if (parent) {
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 558) parent->slots[node->offset] = NULL;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 559) parent->count--;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 560) } else {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 561) /*
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 562) * Shouldn't the tags already have all been cleared
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 563) * by the caller?
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 564) */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 565) if (!is_idr(root))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 566) root_tag_clear_all(root);
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 567) root->xa_head = NULL;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 568) }
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 569)
ea07b862ac8ef (Johannes Weiner 2017-01-06 19:21:43 -0500 570) WARN_ON_ONCE(!list_empty(&node->private_list));
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 571) radix_tree_node_free(node);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 572) deleted = true;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 573)
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 574) node = parent;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 575) } while (node);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 576)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 577) return deleted;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 578) }
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 579)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 580) /**
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 581) * __radix_tree_create - create a slot in a radix tree
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 582) * @root: radix tree root
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 583) * @index: index key
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 584) * @nodep: returns node
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 585) * @slotp: returns slot
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 586) *
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 587) * Create, if necessary, and return the node and slot for an item
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 588) * at position @index in the radix tree @root.
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 589) *
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 590) * Until there is more than one item in the tree, no nodes are
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 591) * allocated and @root->xa_head is used as a direct slot instead of
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 592) * pointing to a node, in which case *@nodep will be NULL.
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 593) *
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 594) * Returns -ENOMEM, or 0 for success.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 595) */
74d609585d8bd (Matthew Wilcox 2017-11-17 10:01:45 -0500 596) static int __radix_tree_create(struct radix_tree_root *root,
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 597) unsigned long index, struct radix_tree_node **nodep,
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 598) void __rcu ***slotp)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 599) {
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 600) struct radix_tree_node *node = NULL, *child;
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 601) void __rcu **slot = (void __rcu **)&root->xa_head;
49ea6ebcd3080 (Matthew Wilcox 2016-05-20 17:02:11 -0700 602) unsigned long maxindex;
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 603) unsigned int shift, offset = 0;
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 604) unsigned long max = index;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 605) gfp_t gfp = root_gfp_mask(root);
49ea6ebcd3080 (Matthew Wilcox 2016-05-20 17:02:11 -0700 606)
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 607) shift = radix_tree_load_root(root, &child, &maxindex);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 608)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 609) /* Make sure the tree is high enough. */
49ea6ebcd3080 (Matthew Wilcox 2016-05-20 17:02:11 -0700 610) if (max > maxindex) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 611) int error = radix_tree_extend(root, gfp, max, shift);
49ea6ebcd3080 (Matthew Wilcox 2016-05-20 17:02:11 -0700 612) if (error < 0)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 613) return error;
49ea6ebcd3080 (Matthew Wilcox 2016-05-20 17:02:11 -0700 614) shift = error;
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 615) child = rcu_dereference_raw(root->xa_head);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 616) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 617)
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 618) while (shift > 0) {
c12e51b07b3ac (Matthew Wilcox 2016-05-20 17:03:10 -0700 619) shift -= RADIX_TREE_MAP_SHIFT;
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 620) if (child == NULL) {
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 621) /* Have to add a child node. */
d58275bc96ae9 (Matthew Wilcox 2017-01-16 17:10:21 -0500 622) child = radix_tree_node_alloc(gfp, node, root, shift,
e8de4340767dd (Matthew Wilcox 2016-12-14 15:09:31 -0800 623) offset, 0, 0);
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 624) if (!child)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 625) return -ENOMEM;
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 626) rcu_assign_pointer(*slot, node_to_entry(child));
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 627) if (node)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 628) node->count++;
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 629) } else if (!radix_tree_is_internal_node(child))
e614523653725 (Matthew Wilcox 2016-03-17 14:21:54 -0700 630) break;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 631)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 632) /* Go a level down */
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 633) node = entry_to_node(child);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 634) offset = radix_tree_descend(node, &child, index);
89148aa40201d (Matthew Wilcox 2016-05-20 17:03:42 -0700 635) slot = &node->slots[offset];
e614523653725 (Matthew Wilcox 2016-03-17 14:21:54 -0700 636) }
e614523653725 (Matthew Wilcox 2016-03-17 14:21:54 -0700 637)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 638) if (nodep)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 639) *nodep = node;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 640) if (slotp)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 641) *slotp = slot;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 642) return 0;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 643) }
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 644)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 645) /*
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 646) * Free any nodes below this node. The tree is presumed to not need
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 647) * shrinking, and any user data in the tree is presumed to not need a
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 648) * destructor called on it. If we need to add a destructor, we can
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 649) * add that functionality later. Note that we may not clear tags or
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 650) * slots from the tree as an RCU walker may still have a pointer into
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 651) * this subtree. We could replace the entries with RADIX_TREE_RETRY,
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 652) * but we'll still have to clear those in rcu_free.
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 653) */
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 654) static void radix_tree_free_nodes(struct radix_tree_node *node)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 655) {
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 656) unsigned offset = 0;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 657) struct radix_tree_node *child = entry_to_node(node);
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 658)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 659) for (;;) {
12320d0ff1c9d (Matthew Wilcox 2017-02-13 15:22:48 -0500 660) void *entry = rcu_dereference_raw(child->slots[offset]);
02c02bf12c5d8 (Matthew Wilcox 2017-11-03 23:09:45 -0400 661) if (xa_is_node(entry) && child->shift) {
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 662) child = entry_to_node(entry);
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 663) offset = 0;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 664) continue;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 665) }
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 666) offset++;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 667) while (offset == RADIX_TREE_MAP_SIZE) {
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 668) struct radix_tree_node *old = child;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 669) offset = child->offset + 1;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 670) child = child->parent;
dd040b6f6d563 (Matthew Wilcox 2017-01-24 15:18:16 -0800 671) WARN_ON_ONCE(!list_empty(&old->private_list));
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 672) radix_tree_node_free(old);
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 673) if (old == entry_to_node(node))
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 674) return;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 675) }
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 676) }
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 677) }
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 678)
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 679) static inline int insert_entries(struct radix_tree_node *node,
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 680) void __rcu **slot, void *item, bool replace)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 681) {
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 682) if (*slot)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 683) return -EEXIST;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 684) rcu_assign_pointer(*slot, item);
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 685) if (node) {
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 686) node->count++;
3159f943aafdb (Matthew Wilcox 2017-11-03 13:30:42 -0400 687) if (xa_is_value(item))
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 688) node->nr_values++;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 689) }
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 690) return 1;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 691) }
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 692)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 693) /**
c95c2d328cd05 (Randy Dunlap 2021-04-16 15:46:26 -0700 694) * radix_tree_insert - insert into a radix tree
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 695) * @root: radix tree root
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 696) * @index: index key
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 697) * @item: item to insert
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 698) *
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 699) * Insert an item into the radix tree at position @index.
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 700) */
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 701) int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 702) void *item)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 703) {
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 704) struct radix_tree_node *node;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 705) void __rcu **slot;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 706) int error;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 707)
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 708) BUG_ON(radix_tree_is_internal_node(item));
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 709)
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 710) error = __radix_tree_create(root, index, &node, &slot);
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 711) if (error)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 712) return error;
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 713)
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 714) error = insert_entries(node, slot, item, false);
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 715) if (error < 0)
175542f575723 (Matthew Wilcox 2016-12-14 15:08:58 -0800 716) return error;
201b6264ff386 (Christoph Lameter 2005-09-06 15:16:46 -0700 717)
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 718) if (node) {
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 719) unsigned offset = get_slot_offset(node, slot);
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 720) BUG_ON(tag_get(node, 0, offset));
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 721) BUG_ON(tag_get(node, 1, offset));
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 722) BUG_ON(tag_get(node, 2, offset));
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 723) } else {
7b60e9ad59a31 (Matthew Wilcox 2016-05-20 17:02:23 -0700 724) BUG_ON(root_tags_get(root));
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 725) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 726)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 727) return 0;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 728) }
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 729) EXPORT_SYMBOL(radix_tree_insert);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 730)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 731) /**
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 732) * __radix_tree_lookup - lookup an item in a radix tree
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 733) * @root: radix tree root
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 734) * @index: index key
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 735) * @nodep: returns node
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 736) * @slotp: returns slot
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 737) *
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 738) * Lookup and return the item at position @index in the radix
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 739) * tree @root.
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 740) *
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 741) * Until there is more than one item in the tree, no nodes are
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 742) * allocated and @root->xa_head is used as a direct slot instead of
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 743) * pointing to a node, in which case *@nodep will be NULL.
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 744) */
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 745) void *__radix_tree_lookup(const struct radix_tree_root *root,
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 746) unsigned long index, struct radix_tree_node **nodep,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 747) void __rcu ***slotp)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 748) {
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 749) struct radix_tree_node *node, *parent;
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 750) unsigned long maxindex;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 751) void __rcu **slot;
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 752)
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 753) restart:
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 754) parent = NULL;
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 755) slot = (void __rcu **)&root->xa_head;
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 756) radix_tree_load_root(root, &node, &maxindex);
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 757) if (index > maxindex)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 758) return NULL;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 759)
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 760) while (radix_tree_is_internal_node(node)) {
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 761) unsigned offset;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 762)
4dd6c0987ca43 (Matthew Wilcox 2016-05-20 17:03:27 -0700 763) parent = entry_to_node(node);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 764) offset = radix_tree_descend(parent, &node, index);
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 765) slot = parent->slots + offset;
eff3860bbfedb (Matthew Wilcox 2018-12-06 08:19:13 -0500 766) if (node == RADIX_TREE_RETRY)
eff3860bbfedb (Matthew Wilcox 2018-12-06 08:19:13 -0500 767) goto restart;
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 768) if (parent->shift == 0)
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 769) break;
858299544efcf (Matthew Wilcox 2016-05-20 17:02:20 -0700 770) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 771)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 772) if (nodep)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 773) *nodep = parent;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 774) if (slotp)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 775) *slotp = slot;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 776) return node;
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 777) }
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 778)
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 779) /**
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 780) * radix_tree_lookup_slot - lookup a slot in a radix tree
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 781) * @root: radix tree root
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 782) * @index: index key
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 783) *
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 784) * Returns: the slot corresponding to the position @index in the
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 785) * radix tree @root. This is useful for update-if-exists operations.
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 786) *
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 787) * This function can be called under rcu_read_lock iff the slot is not
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 788) * modified by radix_tree_replace_slot, otherwise it must be called
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 789) * exclusive from other writers. Any dereference of the slot must be done
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 790) * using radix_tree_deref_slot.
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 791) */
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 792) void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 793) unsigned long index)
b72b71c6cb6ec (Huang Shijie 2009-06-16 15:33:42 -0700 794) {
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 795) void __rcu **slot;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 796)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 797) if (!__radix_tree_lookup(root, index, NULL, &slot))
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 798) return NULL;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 799) return slot;
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 800) }
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 801) EXPORT_SYMBOL(radix_tree_lookup_slot);
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 802)
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 803) /**
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 804) * radix_tree_lookup - perform lookup operation on a radix tree
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 805) * @root: radix tree root
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 806) * @index: index key
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 807) *
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 808) * Lookup the item at the position @index in the radix tree @root.
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 809) *
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 810) * This function can be called under rcu_read_lock, however the caller
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 811) * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 812) * them safely). No RCU barriers are required to access or modify the
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 813) * returned item, however.
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 814) */
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 815) void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
a43313668f62a (Hans Reiser 2005-11-07 00:59:29 -0800 816) {
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 817) return __radix_tree_lookup(root, index, NULL, NULL);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 818) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 819) EXPORT_SYMBOL(radix_tree_lookup);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 820)
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 821) static void replace_slot(void __rcu **slot, void *item,
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 822) struct radix_tree_node *node, int count, int values)
f7942430e40f1 (Johannes Weiner 2016-12-12 16:43:41 -0800 823) {
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 824) if (node && (count || values)) {
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 825) node->count += count;
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 826) node->nr_values += values;
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 827) }
f7942430e40f1 (Johannes Weiner 2016-12-12 16:43:41 -0800 828)
f7942430e40f1 (Johannes Weiner 2016-12-12 16:43:41 -0800 829) rcu_assign_pointer(*slot, item);
f7942430e40f1 (Johannes Weiner 2016-12-12 16:43:41 -0800 830) }
f7942430e40f1 (Johannes Weiner 2016-12-12 16:43:41 -0800 831)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 832) static bool node_tag_get(const struct radix_tree_root *root,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 833) const struct radix_tree_node *node,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 834) unsigned int tag, unsigned int offset)
a90eb3a2a405c (Matthew Wilcox 2016-12-14 15:09:07 -0800 835) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 836) if (node)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 837) return tag_get(node, tag, offset);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 838) return root_tag_get(root, tag);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 839) }
a90eb3a2a405c (Matthew Wilcox 2016-12-14 15:09:07 -0800 840)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 841) /*
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 842) * IDR users want to be able to store NULL in the tree, so if the slot isn't
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 843) * free, don't adjust the count, even if it's transitioning between NULL and
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 844) * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 845) * have empty bits, but it only stores NULL in slots when they're being
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 846) * deleted.
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 847) */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 848) static int calculate_count(struct radix_tree_root *root,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 849) struct radix_tree_node *node, void __rcu **slot,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 850) void *item, void *old)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 851) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 852) if (is_idr(root)) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 853) unsigned offset = get_slot_offset(node, slot);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 854) bool free = node_tag_get(root, node, IDR_FREE, offset);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 855) if (!free)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 856) return 0;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 857) if (!old)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 858) return 1;
a90eb3a2a405c (Matthew Wilcox 2016-12-14 15:09:07 -0800 859) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 860) return !!item - !!old;
a90eb3a2a405c (Matthew Wilcox 2016-12-14 15:09:07 -0800 861) }
a90eb3a2a405c (Matthew Wilcox 2016-12-14 15:09:07 -0800 862)
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 863) /**
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 864) * __radix_tree_replace - replace item in a slot
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 865) * @root: radix tree root
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 866) * @node: pointer to tree node
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 867) * @slot: pointer to slot in @node
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 868) * @item: new item to store in the slot.
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 869) *
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 870) * For use with __radix_tree_lookup(). Caller must hold tree write locked
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 871) * across slot lookup and replacement.
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 872) */
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 873) void __radix_tree_replace(struct radix_tree_root *root,
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 874) struct radix_tree_node *node,
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 875) void __rcu **slot, void *item)
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 876) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 877) void *old = rcu_dereference_raw(*slot);
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 878) int values = !!xa_is_value(item) - !!xa_is_value(old);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 879) int count = calculate_count(root, node, slot, item, old);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 880)
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 881) /*
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 882) * This function supports replacing value entries and
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 883) * deleting entries, but that needs accounting against the
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 884) * node unless the slot is root->xa_head.
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 885) */
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 886) WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 887) (count || values));
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 888) replace_slot(slot, item, node, count, values);
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 889)
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 890) if (!node)
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 891) return;
4d693d08607ab (Johannes Weiner 2016-12-12 16:43:49 -0800 892)
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 893) delete_node(root, node);
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 894) }
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 895)
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 896) /**
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 897) * radix_tree_replace_slot - replace item in a slot
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 898) * @root: radix tree root
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 899) * @slot: pointer to slot
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 900) * @item: new item to store in the slot.
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 901) *
7b8d046fba91d (Matthew Wilcox 2017-12-01 22:13:06 -0500 902) * For use with radix_tree_lookup_slot() and
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 903) * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 904) * across slot lookup and replacement.
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 905) *
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 906) * NOTE: This cannot be used to switch between non-entries (empty slots),
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 907) * regular entries, and value entries, as that requires accounting
f4b109c6dad54 (Johannes Weiner 2016-12-12 16:43:46 -0800 908) * inside the radix tree node. When switching from one type of entry or
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 909) * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 910) * radix_tree_iter_replace().
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 911) */
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 912) void radix_tree_replace_slot(struct radix_tree_root *root,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 913) void __rcu **slot, void *item)
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 914) {
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 915) __radix_tree_replace(root, NULL, slot, item);
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 916) }
10257d7196867 (Song Liu 2017-01-11 10:00:51 -0800 917) EXPORT_SYMBOL(radix_tree_replace_slot);
6d75f366b9242 (Johannes Weiner 2016-12-12 16:43:43 -0800 918)
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 919) /**
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 920) * radix_tree_iter_replace - replace item in a slot
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 921) * @root: radix tree root
c95c2d328cd05 (Randy Dunlap 2021-04-16 15:46:26 -0700 922) * @iter: iterator state
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 923) * @slot: pointer to slot
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 924) * @item: new item to store in the slot.
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 925) *
2956c6644bfd9 (Matthew Wilcox 2018-05-19 16:47:47 -0400 926) * For use with radix_tree_for_each_slot().
2956c6644bfd9 (Matthew Wilcox 2018-05-19 16:47:47 -0400 927) * Caller must hold tree write locked.
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 928) */
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 929) void radix_tree_iter_replace(struct radix_tree_root *root,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 930) const struct radix_tree_iter *iter,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 931) void __rcu **slot, void *item)
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 932) {
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 933) __radix_tree_replace(root, iter->node, slot, item);
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 934) }
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 935)
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 936) static void node_tag_set(struct radix_tree_root *root,
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 937) struct radix_tree_node *node,
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 938) unsigned int tag, unsigned int offset)
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 939) {
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 940) while (node) {
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 941) if (tag_get(node, tag, offset))
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 942) return;
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 943) tag_set(node, tag, offset);
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 944) offset = node->offset;
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 945) node = node->parent;
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 946) }
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 947)
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 948) if (!root_tag_get(root, tag))
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 949) root_tag_set(root, tag);
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 950) }
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 951)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 952) /**
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 953) * radix_tree_tag_set - set a tag on a radix tree node
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 954) * @root: radix tree root
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 955) * @index: index key
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 956) * @tag: tag index
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 957) *
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 958) * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 959) * corresponding to @index in the radix tree. From
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 960) * the root all the way down to the leaf node.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 961) *
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 962) * Returns the address of the tagged item. Setting a tag on a not-present
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 963) * item is a bug.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 964) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 965) void *radix_tree_tag_set(struct radix_tree_root *root,
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 966) unsigned long index, unsigned int tag)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 967) {
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 968) struct radix_tree_node *node, *parent;
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 969) unsigned long maxindex;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 970)
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 971) radix_tree_load_root(root, &node, &maxindex);
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 972) BUG_ON(index > maxindex);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 973)
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 974) while (radix_tree_is_internal_node(node)) {
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 975) unsigned offset;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 976)
4dd6c0987ca43 (Matthew Wilcox 2016-05-20 17:03:27 -0700 977) parent = entry_to_node(node);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 978) offset = radix_tree_descend(parent, &node, index);
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 979) BUG_ON(!node);
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 980)
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 981) if (!tag_get(parent, tag, offset))
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 982) tag_set(parent, tag, offset);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 983) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 984)
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 985) /* set the root's tag bit */
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 986) if (!root_tag_get(root, tag))
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 987) root_tag_set(root, tag);
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 988)
fb969909dd189 (Ross Zwisler 2016-05-20 17:02:32 -0700 989) return node;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 990) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 991) EXPORT_SYMBOL(radix_tree_tag_set);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 992)
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 993) static void node_tag_clear(struct radix_tree_root *root,
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 994) struct radix_tree_node *node,
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 995) unsigned int tag, unsigned int offset)
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 996) {
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 997) while (node) {
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 998) if (!tag_get(node, tag, offset))
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 999) return;
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1000) tag_clear(node, tag, offset);
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1001) if (any_tag_set(node, tag))
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1002) return;
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1003)
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1004) offset = node->offset;
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1005) node = node->parent;
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1006) }
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1007)
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1008) /* clear the root's tag bit */
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1009) if (root_tag_get(root, tag))
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1010) root_tag_clear(root, tag);
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1011) }
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1012)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1013) /**
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1014) * radix_tree_tag_clear - clear a tag on a radix tree node
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1015) * @root: radix tree root
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1016) * @index: index key
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1017) * @tag: tag index
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1018) *
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 1019) * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1020) * corresponding to @index in the radix tree. If this causes
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1021) * the leaf node to have no tags set then clear the tag in the
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1022) * next-to-leaf node, etc.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1023) *
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1024) * Returns the address of the tagged item on success, else NULL. ie:
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1025) * has the same return value and semantics as radix_tree_lookup().
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1026) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1027) void *radix_tree_tag_clear(struct radix_tree_root *root,
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 1028) unsigned long index, unsigned int tag)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1029) {
00f47b581105b (Ross Zwisler 2016-05-20 17:02:35 -0700 1030) struct radix_tree_node *node, *parent;
00f47b581105b (Ross Zwisler 2016-05-20 17:02:35 -0700 1031) unsigned long maxindex;
3f649ab728cda (Kees Cook 2020-06-03 13:09:38 -0700 1032) int offset;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1033)
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1034) radix_tree_load_root(root, &node, &maxindex);
00f47b581105b (Ross Zwisler 2016-05-20 17:02:35 -0700 1035) if (index > maxindex)
00f47b581105b (Ross Zwisler 2016-05-20 17:02:35 -0700 1036) return NULL;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1037)
00f47b581105b (Ross Zwisler 2016-05-20 17:02:35 -0700 1038) parent = NULL;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1039)
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 1040) while (radix_tree_is_internal_node(node)) {
4dd6c0987ca43 (Matthew Wilcox 2016-05-20 17:03:27 -0700 1041) parent = entry_to_node(node);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1042) offset = radix_tree_descend(parent, &node, index);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1043) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1044)
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1045) if (node)
d604c324524bf (Matthew Wilcox 2016-05-20 17:03:45 -0700 1046) node_tag_clear(root, parent, tag, offset);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1047)
00f47b581105b (Ross Zwisler 2016-05-20 17:02:35 -0700 1048) return node;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1049) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1050) EXPORT_SYMBOL(radix_tree_tag_clear);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1051)
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1052) /**
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1053) * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1054) * @root: radix tree root
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1055) * @iter: iterator state
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1056) * @tag: tag to clear
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1057) */
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1058) void radix_tree_iter_tag_clear(struct radix_tree_root *root,
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1059) const struct radix_tree_iter *iter, unsigned int tag)
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1060) {
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1061) node_tag_clear(root, iter->node, tag, iter_offset(iter));
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1062) }
30b888ba950d9 (Matthew Wilcox 2017-01-28 09:55:20 -0500 1063)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1064) /**
32605a18152b2 (Marcelo Tosatti 2005-09-06 15:16:48 -0700 1065) * radix_tree_tag_get - get a tag on a radix tree node
32605a18152b2 (Marcelo Tosatti 2005-09-06 15:16:48 -0700 1066) * @root: radix tree root
32605a18152b2 (Marcelo Tosatti 2005-09-06 15:16:48 -0700 1067) * @index: index key
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1068) * @tag: tag index (< RADIX_TREE_MAX_TAGS)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1069) *
32605a18152b2 (Marcelo Tosatti 2005-09-06 15:16:48 -0700 1070) * Return values:
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1071) *
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1072) * 0: tag not present or not set
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1073) * 1: tag set
ce82653d6cfcc (David Howells 2010-04-06 22:36:20 +0100 1074) *
ce82653d6cfcc (David Howells 2010-04-06 22:36:20 +0100 1075) * Note that the return value of this function may not be relied on, even if
ce82653d6cfcc (David Howells 2010-04-06 22:36:20 +0100 1076) * the RCU lock is held, unless tag modification and node deletion are excluded
ce82653d6cfcc (David Howells 2010-04-06 22:36:20 +0100 1077) * from concurrency.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1078) */
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 1079) int radix_tree_tag_get(const struct radix_tree_root *root,
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 1080) unsigned long index, unsigned int tag)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1081) {
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1082) struct radix_tree_node *node, *parent;
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1083) unsigned long maxindex;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1084)
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1085) if (!root_tag_get(root, tag))
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1086) return 0;
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1087)
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1088) radix_tree_load_root(root, &node, &maxindex);
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1089) if (index > maxindex)
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1090) return 0;
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1091)
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 1092) while (radix_tree_is_internal_node(node)) {
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1093) unsigned offset;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1094)
4dd6c0987ca43 (Matthew Wilcox 2016-05-20 17:03:27 -0700 1095) parent = entry_to_node(node);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1096) offset = radix_tree_descend(parent, &node, index);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1097)
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1098) if (!tag_get(parent, tag, offset))
3fa36acbced23 (Hugh Dickins 2011-10-31 17:07:02 -0700 1099) return 0;
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1100) if (node == RADIX_TREE_RETRY)
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1101) break;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1102) }
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1103)
4589ba6d0f439 (Ross Zwisler 2016-05-20 17:02:38 -0700 1104) return 1;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1105) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1106) EXPORT_SYMBOL(radix_tree_tag_get);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1107)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1108) /* Construct iter->tags bit-mask from node->tags[tag] array */
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1109) static void set_iter_tags(struct radix_tree_iter *iter,
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1110) struct radix_tree_node *node, unsigned offset,
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1111) unsigned tag)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1112) {
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1113) unsigned tag_long = offset / BITS_PER_LONG;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1114) unsigned tag_bit = offset % BITS_PER_LONG;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1115)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1116) if (!node) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1117) iter->tags = 1;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1118) return;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1119) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1120)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1121) iter->tags = node->tags[tag][tag_long] >> tag_bit;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1122)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1123) /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1124) if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1125) /* Pick tags from next element */
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1126) if (tag_bit)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1127) iter->tags |= node->tags[tag][tag_long + 1] <<
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1128) (BITS_PER_LONG - tag_bit);
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1129) /* Clip chunk size, here only BITS_PER_LONG tags */
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1130) iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1131) }
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1132) }
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1133)
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1134) void __rcu **radix_tree_iter_resume(void __rcu **slot,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1135) struct radix_tree_iter *iter)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1136) {
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1137) slot++;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1138) iter->index = __radix_tree_iter_add(iter, 1);
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1139) iter->next_index = iter->index;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1140) iter->tags = 0;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1141) return NULL;
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1142) }
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1143) EXPORT_SYMBOL(radix_tree_iter_resume);
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1144)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1145) /**
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1146) * radix_tree_next_chunk - find next chunk of slots for iteration
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1147) *
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1148) * @root: radix tree root
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1149) * @iter: iterator state
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1150) * @flags: RADIX_TREE_ITER_* flags and tag index
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1151) * Returns: pointer to chunk first slot, or NULL if iteration is over
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1152) */
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1153) void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1154) struct radix_tree_iter *iter, unsigned flags)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1155) {
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1156) unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1157) struct radix_tree_node *node, *child;
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1158) unsigned long index, offset, maxindex;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1159)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1160) if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1161) return NULL;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1162)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1163) /*
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1164) * Catch next_index overflow after ~0UL. iter->index never overflows
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1165) * during iterating; it can be zero only at the beginning.
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1166) * And we cannot overflow iter->next_index in a single step,
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1167) * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
fffaee365fded (Konstantin Khlebnikov 2012-06-05 21:36:33 +0400 1168) *
fffaee365fded (Konstantin Khlebnikov 2012-06-05 21:36:33 +0400 1169) * This condition also used by radix_tree_next_slot() to stop
91b9677c4c124 (Matthew Wilcox 2016-12-14 15:08:31 -0800 1170) * contiguous iterating, and forbid switching to the next chunk.
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1171) */
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1172) index = iter->next_index;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1173) if (!index && iter->index)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1174) return NULL;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1175)
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1176) restart:
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1177) radix_tree_load_root(root, &child, &maxindex);
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1178) if (index > maxindex)
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1179) return NULL;
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1180) if (!child)
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1181) return NULL;
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1182)
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1183) if (!radix_tree_is_internal_node(child)) {
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1184) /* Single-slot tree */
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1185) iter->index = index;
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1186) iter->next_index = maxindex + 1;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1187) iter->tags = 1;
268f42de71812 (Matthew Wilcox 2016-12-14 15:08:55 -0800 1188) iter->node = NULL;
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 1189) return (void __rcu **)&root->xa_head;
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1190) }
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1191)
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1192) do {
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1193) node = entry_to_node(child);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1194) offset = radix_tree_descend(node, &child, index);
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1195)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1196) if ((flags & RADIX_TREE_ITER_TAGGED) ?
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1197) !tag_get(node, tag, offset) : !child) {
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1198) /* Hole detected */
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1199) if (flags & RADIX_TREE_ITER_CONTIG)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1200) return NULL;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1201)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1202) if (flags & RADIX_TREE_ITER_TAGGED)
bc412fca6edc2 (Matthew Wilcox 2016-12-14 15:08:40 -0800 1203) offset = radix_tree_find_next_bit(node, tag,
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1204) offset + 1);
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1205) else
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1206) while (++offset < RADIX_TREE_MAP_SIZE) {
12320d0ff1c9d (Matthew Wilcox 2017-02-13 15:22:48 -0500 1207) void *slot = rcu_dereference_raw(
12320d0ff1c9d (Matthew Wilcox 2017-02-13 15:22:48 -0500 1208) node->slots[offset]);
21ef533931f73 (Ross Zwisler 2016-05-20 17:02:26 -0700 1209) if (slot)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1210) break;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1211) }
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1212) index &= ~node_maxindex(node);
9e85d81119658 (Matthew Wilcox 2016-05-20 17:03:48 -0700 1213) index += offset << node->shift;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1214) /* Overflow after ~0UL */
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1215) if (!index)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1216) return NULL;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1217) if (offset == RADIX_TREE_MAP_SIZE)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1218) goto restart;
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1219) child = rcu_dereference_raw(node->slots[offset]);
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1220) }
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1221)
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 1222) if (!child)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1223) goto restart;
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 1224) if (child == RADIX_TREE_RETRY)
e157b555945fb (Matthew Wilcox 2016-12-14 15:09:01 -0800 1225) break;
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 1226) } while (node->shift && radix_tree_is_internal_node(child));
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1227)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1228) /* Update the iterator state */
3a08cd52c37c7 (Matthew Wilcox 2018-09-22 16:14:30 -0400 1229) iter->index = (index &~ node_maxindex(node)) | offset;
8c1244de00ef9 (Matthew Wilcox 2016-05-20 17:03:36 -0700 1230) iter->next_index = (index | node_maxindex(node)) + 1;
268f42de71812 (Matthew Wilcox 2016-12-14 15:08:55 -0800 1231) iter->node = node;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1232)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1233) if (flags & RADIX_TREE_ITER_TAGGED)
148deab223b23 (Matthew Wilcox 2016-12-14 15:08:49 -0800 1234) set_iter_tags(iter, node, offset, tag);
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1235)
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1236) return node->slots + offset;
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1237) }
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1238) EXPORT_SYMBOL(radix_tree_next_chunk);
78c1d78488a3c (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1239)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1240) /**
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1241) * radix_tree_gang_lookup - perform multiple lookup on a radix tree
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1242) * @root: radix tree root
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1243) * @results: where the results of the lookup are placed
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1244) * @first_index: start the lookup from this key
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1245) * @max_items: place up to this many items at *results
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1246) *
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1247) * Performs an index-ascending scan of the tree for present items. Places
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1248) * them at *@results and returns the number of items which were placed at
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1249) * *@results.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1250) *
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1251) * The implementation is naive.
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1252) *
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1253) * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1254) * rcu_read_lock. In this case, rather than the returned results being
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1255) * an atomic snapshot of the tree at a single point in time, the
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1256) * semantics of an RCU protected gang lookup are as though multiple
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1257) * radix_tree_lookups have been issued in individual locks, and results
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1258) * stored in 'results'.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1259) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1260) unsigned int
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 1261) radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1262) unsigned long first_index, unsigned int max_items)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1263) {
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1264) struct radix_tree_iter iter;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1265) void __rcu **slot;
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1266) unsigned int ret = 0;
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1267)
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1268) if (unlikely(!max_items))
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1269) return 0;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1270)
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1271) radix_tree_for_each_slot(slot, root, &iter, first_index) {
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1272) results[ret] = rcu_dereference_raw(*slot);
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1273) if (!results[ret])
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1274) continue;
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 1275) if (radix_tree_is_internal_node(results[ret])) {
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1276) slot = radix_tree_iter_retry(&iter);
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1277) continue;
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1278) }
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1279) if (++ret == max_items)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1280) break;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1281) }
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1282)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1283) return ret;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1284) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1285) EXPORT_SYMBOL(radix_tree_gang_lookup);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1286)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1287) /**
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1288) * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1289) * based on a tag
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1290) * @root: radix tree root
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1291) * @results: where the results of the lookup are placed
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1292) * @first_index: start the lookup from this key
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1293) * @max_items: place up to this many items at *results
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 1294) * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1295) *
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1296) * Performs an index-ascending scan of the tree for present items which
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1297) * have the tag indexed by @tag set. Places the items at *@results and
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1298) * returns the number of items which were placed at *@results.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1299) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1300) unsigned int
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 1301) radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 1302) unsigned long first_index, unsigned int max_items,
daff89f324755 (Jonathan Corbet 2006-03-25 03:08:05 -0800 1303) unsigned int tag)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1304) {
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1305) struct radix_tree_iter iter;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1306) void __rcu **slot;
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1307) unsigned int ret = 0;
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1308)
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1309) if (unlikely(!max_items))
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1310) return 0;
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1311)
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1312) radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1313) results[ret] = rcu_dereference_raw(*slot);
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1314) if (!results[ret])
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1315) continue;
b194d16c27af9 (Matthew Wilcox 2016-05-20 17:03:30 -0700 1316) if (radix_tree_is_internal_node(results[ret])) {
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1317) slot = radix_tree_iter_retry(&iter);
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1318) continue;
46437f9a554fb (Matthew Wilcox 2016-02-02 16:57:52 -0800 1319) }
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1320) if (++ret == max_items)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1321) break;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1322) }
7cf9c2c76c1a1 (Nicholas Piggin 2006-12-06 20:33:44 -0800 1323)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1324) return ret;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1325) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1326) EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1327)
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1328) /**
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1329) * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1330) * radix tree based on a tag
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1331) * @root: radix tree root
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1332) * @results: where the results of the lookup are placed
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1333) * @first_index: start the lookup from this key
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1334) * @max_items: place up to this many items at *results
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1335) * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1336) *
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1337) * Performs an index-ascending scan of the tree for present items which
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1338) * have the tag indexed by @tag set. Places the slots at *@results and
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1339) * returns the number of slots which were placed at *@results.
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1340) */
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1341) unsigned int
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 1342) radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1343) void __rcu ***results, unsigned long first_index,
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 1344) unsigned int max_items, unsigned int tag)
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1345) {
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1346) struct radix_tree_iter iter;
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1347) void __rcu **slot;
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1348) unsigned int ret = 0;
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1349)
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1350) if (unlikely(!max_items))
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1351) return 0;
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1352)
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1353) radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1354) results[ret] = slot;
cebbd29e1c2f7 (Konstantin Khlebnikov 2012-03-28 14:42:53 -0700 1355) if (++ret == max_items)
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1356) break;
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1357) }
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1358)
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1359) return ret;
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1360) }
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1361) EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
47feff2c8eefe (Nicholas Piggin 2008-07-25 19:45:29 -0700 1362)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1363) static bool __radix_tree_delete(struct radix_tree_root *root,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1364) struct radix_tree_node *node, void __rcu **slot)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1365) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1366) void *old = rcu_dereference_raw(*slot);
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 1367) int values = xa_is_value(old) ? -1 : 0;
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1368) unsigned offset = get_slot_offset(node, slot);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1369) int tag;
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1370)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1371) if (is_idr(root))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1372) node_tag_set(root, node, IDR_FREE, offset);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1373) else
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1374) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1375) node_tag_clear(root, node, tag, offset);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1376)
01959dfe771c6 (Matthew Wilcox 2017-11-09 09:23:56 -0500 1377) replace_slot(slot, NULL, node, -1, values);
1cf56f9d670b8 (Matthew Wilcox 2018-04-09 16:24:45 -0400 1378) return node && delete_node(root, node);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1379) }
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1380)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1381) /**
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1382) * radix_tree_iter_delete - delete the entry at this iterator position
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1383) * @root: radix tree root
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1384) * @iter: iterator state
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1385) * @slot: pointer to slot
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1386) *
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1387) * Delete the entry at the position currently pointed to by the iterator.
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1388) * This may result in the current node being freed; if it is, the iterator
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1389) * is advanced so that it will not reference the freed memory. This
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1390) * function may be called without any locking if there are no other threads
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1391) * which can access this tree.
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1392) */
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1393) void radix_tree_iter_delete(struct radix_tree_root *root,
d7b627277b573 (Matthew Wilcox 2017-02-13 15:58:24 -0500 1394) struct radix_tree_iter *iter, void __rcu **slot)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1395) {
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1396) if (__radix_tree_delete(root, iter->node, slot))
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1397) iter->index = iter->next_index;
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1398) }
d1b48c1e7184d (Chris Wilson 2017-08-16 09:52:08 +0100 1399) EXPORT_SYMBOL(radix_tree_iter_delete);
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1400)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1401) /**
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1402) * radix_tree_delete_item - delete an item from a radix tree
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1403) * @root: radix tree root
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1404) * @index: index key
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1405) * @item: expected item
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1406) *
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1407) * Remove @item at @index from the radix tree rooted at @root.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1408) *
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1409) * Return: the deleted entry, or %NULL if it was not present
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1410) * or the entry at the given @index was not @item.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1411) */
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1412) void *radix_tree_delete_item(struct radix_tree_root *root,
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1413) unsigned long index, void *item)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1414) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1415) struct radix_tree_node *node = NULL;
7a4deea1aa8bd (Matthew Wilcox 2018-05-25 14:47:24 -0700 1416) void __rcu **slot = NULL;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1417) void *entry;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1418)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1419) entry = __radix_tree_lookup(root, index, &node, &slot);
7a4deea1aa8bd (Matthew Wilcox 2018-05-25 14:47:24 -0700 1420) if (!slot)
7a4deea1aa8bd (Matthew Wilcox 2018-05-25 14:47:24 -0700 1421) return NULL;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1422) if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1423) get_slot_offset(node, slot))))
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1424) return NULL;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1425)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1426) if (item && entry != item)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1427) return NULL;
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1428)
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1429) __radix_tree_delete(root, node, slot);
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1430)
139e561660fe1 (Johannes Weiner 2014-04-03 14:47:54 -0700 1431) return entry;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1432) }
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1433) EXPORT_SYMBOL(radix_tree_delete_item);
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1434)
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1435) /**
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1436) * radix_tree_delete - delete an entry from a radix tree
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1437) * @root: radix tree root
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1438) * @index: index key
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1439) *
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1440) * Remove the entry at @index from the radix tree rooted at @root.
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1441) *
0ac398ef391b5 (Matthew Wilcox 2017-01-28 09:56:22 -0500 1442) * Return: The deleted entry, or %NULL if it was not present.
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1443) */
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1444) void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1445) {
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1446) return radix_tree_delete_item(root, index, NULL);
53c59f262d747 (Johannes Weiner 2014-04-03 14:47:39 -0700 1447) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1448) EXPORT_SYMBOL(radix_tree_delete);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1449)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1450) /**
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1451) * radix_tree_tagged - test whether any items in the tree are tagged
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1452) * @root: radix tree root
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1453) * @tag: tag to test
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1454) */
35534c869c62f (Matthew Wilcox 2016-12-19 17:43:19 -0500 1455) int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1456) {
612d6c19db2fd (Nicholas Piggin 2006-06-23 02:03:22 -0700 1457) return root_tag_get(root, tag);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1458) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1459) EXPORT_SYMBOL(radix_tree_tagged);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1460)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1461) /**
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1462) * idr_preload - preload for idr_alloc()
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1463) * @gfp_mask: allocation mask to use for preloading
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1464) *
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1465) * Preallocate memory to use for the next call to idr_alloc(). This function
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1466) * returns with preemption disabled. It will be enabled by idr_preload_end().
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1467) */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1468) void idr_preload(gfp_t gfp_mask)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1469) {
bc9ae2247ac92 (Eric Dumazet 2017-09-08 16:15:54 -0700 1470) if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
cfa6705d89b65 (Sebastian Andrzej Siewior 2020-05-27 22:11:14 +0200 1471) local_lock(&radix_tree_preloads.lock);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1472) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1473) EXPORT_SYMBOL(idr_preload);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1474)
460488c58ca8b (Matthew Wilcox 2017-11-28 15:16:24 -0500 1475) void __rcu **idr_get_free(struct radix_tree_root *root,
388f79fda74fd (Chris Mi 2017-08-30 02:31:57 -0400 1476) struct radix_tree_iter *iter, gfp_t gfp,
388f79fda74fd (Chris Mi 2017-08-30 02:31:57 -0400 1477) unsigned long max)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1478) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1479) struct radix_tree_node *node = NULL, *child;
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 1480) void __rcu **slot = (void __rcu **)&root->xa_head;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1481) unsigned long maxindex, start = iter->next_index;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1482) unsigned int shift, offset = 0;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1483)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1484) grow:
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1485) shift = radix_tree_load_root(root, &child, &maxindex);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1486) if (!radix_tree_tagged(root, IDR_FREE))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1487) start = max(start, maxindex + 1);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1488) if (start > max)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1489) return ERR_PTR(-ENOSPC);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1490)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1491) if (start > maxindex) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1492) int error = radix_tree_extend(root, gfp, start, shift);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1493) if (error < 0)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1494) return ERR_PTR(error);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1495) shift = error;
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 1496) child = rcu_dereference_raw(root->xa_head);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1497) }
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 1498) if (start == 0 && shift == 0)
66ee620f06f99 (Matthew Wilcox 2018-06-25 06:56:50 -0400 1499) shift = RADIX_TREE_MAP_SHIFT;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1500)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1501) while (shift) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1502) shift -= RADIX_TREE_MAP_SHIFT;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1503) if (child == NULL) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1504) /* Have to add a child node. */
d58275bc96ae9 (Matthew Wilcox 2017-01-16 17:10:21 -0500 1505) child = radix_tree_node_alloc(gfp, node, root, shift,
d58275bc96ae9 (Matthew Wilcox 2017-01-16 17:10:21 -0500 1506) offset, 0, 0);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1507) if (!child)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1508) return ERR_PTR(-ENOMEM);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1509) all_tag_set(child, IDR_FREE);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1510) rcu_assign_pointer(*slot, node_to_entry(child));
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1511) if (node)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1512) node->count++;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1513) } else if (!radix_tree_is_internal_node(child))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1514) break;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1515)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1516) node = entry_to_node(child);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1517) offset = radix_tree_descend(node, &child, start);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1518) if (!tag_get(node, IDR_FREE, offset)) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1519) offset = radix_tree_find_next_bit(node, IDR_FREE,
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1520) offset + 1);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1521) start = next_index(start, node, offset);
b7e9728f3d7fc (Matthew Wilcox (Oracle) 2019-11-02 00:25:08 -0400 1522) if (start > max || start == 0)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1523) return ERR_PTR(-ENOSPC);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1524) while (offset == RADIX_TREE_MAP_SIZE) {
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1525) offset = node->offset + 1;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1526) node = node->parent;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1527) if (!node)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1528) goto grow;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1529) shift = node->shift;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1530) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1531) child = rcu_dereference_raw(node->slots[offset]);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1532) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1533) slot = &node->slots[offset];
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1534) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1535)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1536) iter->index = start;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1537) if (node)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1538) iter->next_index = 1 + min(max, (start | node_maxindex(node)));
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1539) else
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1540) iter->next_index = 1;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1541) iter->node = node;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1542) set_iter_tags(iter, node, offset, IDR_FREE);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1543)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1544) return slot;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1545) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1546)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1547) /**
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1548) * idr_destroy - release all internal memory from an IDR
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1549) * @idr: idr handle
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1550) *
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1551) * After this function is called, the IDR is empty, and may be reused or
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1552) * the data structure containing it may be freed.
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1553) *
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1554) * A typical clean-up sequence for objects stored in an idr tree will use
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1555) * idr_for_each() to free all objects, if necessary, then idr_destroy() to
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1556) * free the memory used to keep track of those objects.
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1557) */
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1558) void idr_destroy(struct idr *idr)
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1559) {
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 1560) struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1561) if (radix_tree_is_internal_node(node))
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1562) radix_tree_free_nodes(node);
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 1563) idr->idr_rt.xa_head = NULL;
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1564) root_tag_set(&idr->idr_rt, IDR_FREE);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1565) }
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1566) EXPORT_SYMBOL(idr_destroy);
0a835c4f090af (Matthew Wilcox 2016-12-20 10:27:56 -0500 1567)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1568) static void
449dd6984d0e4 (Johannes Weiner 2014-04-03 14:47:56 -0700 1569) radix_tree_node_ctor(void *arg)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1570) {
449dd6984d0e4 (Johannes Weiner 2014-04-03 14:47:56 -0700 1571) struct radix_tree_node *node = arg;
449dd6984d0e4 (Johannes Weiner 2014-04-03 14:47:56 -0700 1572)
449dd6984d0e4 (Johannes Weiner 2014-04-03 14:47:56 -0700 1573) memset(node, 0, sizeof(*node));
449dd6984d0e4 (Johannes Weiner 2014-04-03 14:47:56 -0700 1574) INIT_LIST_HEAD(&node->private_list);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1575) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1576)
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1577) static int radix_tree_cpu_dead(unsigned int cpu)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1578) {
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1579) struct radix_tree_preload *rtp;
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1580) struct radix_tree_node *node;
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1581)
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1582) /* Free per-cpu pool of preloaded nodes */
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1583) rtp = &per_cpu(radix_tree_preloads, cpu);
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1584) while (rtp->nr) {
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1585) node = rtp->nodes;
1293d5c5f54d5 (Matthew Wilcox 2017-01-16 16:41:29 -0500 1586) rtp->nodes = node->parent;
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1587) kmem_cache_free(radix_tree_node_cachep, node);
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1588) rtp->nr--;
2fcd9005cc03a (Matthew Wilcox 2016-05-20 17:03:04 -0700 1589) }
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1590) return 0;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1591) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1592)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1593) void __init radix_tree_init(void)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1594) {
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1595) int ret;
7e7844226f105 (Michal Hocko 2017-05-03 14:53:09 -0700 1596)
7e7844226f105 (Michal Hocko 2017-05-03 14:53:09 -0700 1597) BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
fa290cda102c0 (Matthew Wilcox 2018-04-10 16:36:28 -0700 1598) BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
02c02bf12c5d8 (Matthew Wilcox 2017-11-03 23:09:45 -0400 1599) BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1600) radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1601) sizeof(struct radix_tree_node), 0,
488514d179828 (Christoph Lameter 2008-04-28 02:12:05 -0700 1602) SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
488514d179828 (Christoph Lameter 2008-04-28 02:12:05 -0700 1603) radix_tree_node_ctor);
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1604) ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1605) NULL, radix_tree_cpu_dead);
d544abd5ff7d8 (Sebastian Andrzej Siewior 2016-11-03 15:50:01 +0100 1606) WARN_ON(ret < 0);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 1607) }