f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 1) // SPDX-License-Identifier: GPL-2.0+
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 2) /*
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 3) * XArray implementation
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 4) * Copyright (c) 2017-2018 Microsoft Corporation
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 5) * Copyright (c) 2018-2020 Oracle
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 6) * Author: Matthew Wilcox <willy@infradead.org>
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 7) */
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 8)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 9) #include <linux/bitmap.h>
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 10) #include <linux/export.h>
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 11) #include <linux/list.h>
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 12) #include <linux/slab.h>
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 13) #include <linux/xarray.h>
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 14)
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 15) /*
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 16) * Coding conventions in this file:
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 17) *
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 18) * @xa is used to refer to the entire xarray.
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 19) * @xas is the 'xarray operation state'. It may be either a pointer to
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 20) * an xa_state, or an xa_state stored on the stack. This is an unfortunate
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 21) * ambiguity.
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 22) * @index is the index of the entry being operated on
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 23) * @mark is an xa_mark_t; a small number indicating one of the mark bits.
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 24) * @node refers to an xa_node; usually the primary one being operated on by
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 25) * this function.
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 26) * @offset is the index into the slots array inside an xa_node.
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 27) * @parent refers to the @xa_node closer to the head than @node.
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 28) * @entry refers to something stored in a slot in the xarray
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 29) */
f8d5d0cc145cc (Matthew Wilcox 2017-11-07 16:30:10 -0500 30)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 31) static inline unsigned int xa_lock_type(const struct xarray *xa)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 32) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 33) return (__force unsigned int)xa->xa_flags & 3;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 34) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 35)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 36) static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 37) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 38) if (lock_type == XA_LOCK_IRQ)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 39) xas_lock_irq(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 40) else if (lock_type == XA_LOCK_BH)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 41) xas_lock_bh(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 42) else
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 43) xas_lock(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 44) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 45)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 46) static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 47) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 48) if (lock_type == XA_LOCK_IRQ)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 49) xas_unlock_irq(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 50) else if (lock_type == XA_LOCK_BH)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 51) xas_unlock_bh(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 52) else
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 53) xas_unlock(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 54) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 55)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 56) static inline bool xa_track_free(const struct xarray *xa)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 57) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 58) return xa->xa_flags & XA_FLAGS_TRACK_FREE;
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 59) }
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 60)
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 61) static inline bool xa_zero_busy(const struct xarray *xa)
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 62) {
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 63) return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 64) }
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 65)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 66) static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 67) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 68) if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 69) xa->xa_flags |= XA_FLAGS_MARK(mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 70) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 71)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 72) static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 73) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 74) if (xa->xa_flags & XA_FLAGS_MARK(mark))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 75) xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 76) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 77)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 78) static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 79) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 80) return node->marks[(__force unsigned)mark];
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 81) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 82)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 83) static inline bool node_get_mark(struct xa_node *node,
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 84) unsigned int offset, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 85) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 86) return test_bit(offset, node_marks(node, mark));
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 87) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 88)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 89) /* returns true if the bit was set */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 90) static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 91) xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 92) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 93) return __test_and_set_bit(offset, node_marks(node, mark));
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 94) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 95)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 96) /* returns true if the bit was set */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 97) static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 98) xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 99) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 100) return __test_and_clear_bit(offset, node_marks(node, mark));
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 101) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 102)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 103) static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 104) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 105) return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 106) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 107)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 108) static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 109) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 110) bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 111) }
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 112)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 113) #define mark_inc(mark) do { \
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 114) mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 115) } while (0)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 116)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 117) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 118) * xas_squash_marks() - Merge all marks to the first entry
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 119) * @xas: Array operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 120) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 121) * Set a mark on the first entry if any entry has it set. Clear marks on
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 122) * all sibling entries.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 123) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 124) static void xas_squash_marks(const struct xa_state *xas)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 125) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 126) unsigned int mark = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 127) unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 128)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 129) if (!xas->xa_sibs)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 130) return;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 131)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 132) do {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 133) unsigned long *marks = xas->xa_node->marks[mark];
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 134) if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 135) continue;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 136) __set_bit(xas->xa_offset, marks);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 137) bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 138) } while (mark++ != (__force unsigned)XA_MARK_MAX);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 139) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 140)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 141) /* extracts the offset within this node from the index */
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 142) static unsigned int get_offset(unsigned long index, struct xa_node *node)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 143) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 144) return (index >> node->shift) & XA_CHUNK_MASK;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 145) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 146)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 147) static void xas_set_offset(struct xa_state *xas)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 148) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 149) xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 150) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 151)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 152) /* move the index either forwards (find) or backwards (sibling slot) */
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 153) static void xas_move_index(struct xa_state *xas, unsigned long offset)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 154) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 155) unsigned int shift = xas->xa_node->shift;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 156) xas->xa_index &= ~XA_CHUNK_MASK << shift;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 157) xas->xa_index += offset << shift;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 158) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 159)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 160) static void xas_advance(struct xa_state *xas)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 161) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 162) xas->xa_offset++;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 163) xas_move_index(xas, xas->xa_offset);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 164) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 165)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 166) static void *set_bounds(struct xa_state *xas)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 167) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 168) xas->xa_node = XAS_BOUNDS;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 169) return NULL;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 170) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 171)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 172) /*
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 173) * Starts a walk. If the @xas is already valid, we assume that it's on
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 174) * the right path and just return where we've got to. If we're in an
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 175) * error state, return NULL. If the index is outside the current scope
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 176) * of the xarray, return NULL without changing @xas->xa_node. Otherwise
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 177) * set @xas->xa_node to NULL and return the current head of the array.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 178) */
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 179) static void *xas_start(struct xa_state *xas)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 180) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 181) void *entry;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 182)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 183) if (xas_valid(xas))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 184) return xas_reload(xas);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 185) if (xas_error(xas))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 186) return NULL;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 187)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 188) entry = xa_head(xas->xa);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 189) if (!xa_is_node(entry)) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 190) if (xas->xa_index)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 191) return set_bounds(xas);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 192) } else {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 193) if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 194) return set_bounds(xas);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 195) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 196)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 197) xas->xa_node = NULL;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 198) return entry;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 199) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 200)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 201) static void *xas_descend(struct xa_state *xas, struct xa_node *node)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 202) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 203) unsigned int offset = get_offset(xas->xa_index, node);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 204) void *entry = xa_entry(xas->xa, node, offset);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 205)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 206) xas->xa_node = node;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 207) if (xa_is_sibling(entry)) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 208) offset = xa_to_sibling(entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 209) entry = xa_entry(xas->xa, node, offset);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 210) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 211)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 212) xas->xa_offset = offset;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 213) return entry;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 214) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 215)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 216) /**
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 217) * xas_load() - Load an entry from the XArray (advanced).
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 218) * @xas: XArray operation state.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 219) *
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 220) * Usually walks the @xas to the appropriate state to load the entry
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 221) * stored at xa_index. However, it will do nothing and return %NULL if
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 222) * @xas is in an error state. xas_load() will never expand the tree.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 223) *
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 224) * If the xa_state is set up to operate on a multi-index entry, xas_load()
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 225) * may return %NULL or an internal entry, even if there are entries
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 226) * present within the range specified by @xas.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 227) *
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 228) * Context: Any context. The caller should hold the xa_lock or the RCU lock.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 229) * Return: Usually an entry in the XArray, but see description for exceptions.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 230) */
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 231) void *xas_load(struct xa_state *xas)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 232) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 233) void *entry = xas_start(xas);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 234)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 235) while (xa_is_node(entry)) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 236) struct xa_node *node = xa_to_node(entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 237)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 238) if (xas->xa_shift > node->shift)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 239) break;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 240) entry = xas_descend(xas, node);
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 241) if (node->shift == 0)
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 242) break;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 243) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 244) return entry;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 245) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 246) EXPORT_SYMBOL_GPL(xas_load);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 247)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 248) /* Move the radix tree node cache here */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 249) extern struct kmem_cache *radix_tree_node_cachep;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 250) extern void radix_tree_node_rcu_free(struct rcu_head *head);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 251)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 252) #define XA_RCU_FREE ((struct xarray *)1)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 253)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 254) static void xa_node_free(struct xa_node *node)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 255) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 256) XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 257) node->array = XA_RCU_FREE;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 258) call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 259) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 260)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 261) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 262) * xas_destroy() - Free any resources allocated during the XArray operation.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 263) * @xas: XArray operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 264) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 265) * This function is now internal-only.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 266) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 267) static void xas_destroy(struct xa_state *xas)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 268) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 269) struct xa_node *next, *node = xas->xa_alloc;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 270)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 271) while (node) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 272) XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 273) next = rcu_dereference_raw(node->parent);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 274) radix_tree_node_rcu_free(&node->rcu_head);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 275) xas->xa_alloc = node = next;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 276) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 277) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 278)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 279) /**
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 280) * xas_nomem() - Allocate memory if needed.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 281) * @xas: XArray operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 282) * @gfp: Memory allocation flags.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 283) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 284) * If we need to add new nodes to the XArray, we try to allocate memory
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 285) * with GFP_NOWAIT while holding the lock, which will usually succeed.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 286) * If it fails, @xas is flagged as needing memory to continue. The caller
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 287) * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 288) * the caller should retry the operation.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 289) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 290) * Forward progress is guaranteed as one node is allocated here and
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 291) * stored in the xa_state where it will be found by xas_alloc(). More
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 292) * nodes will likely be found in the slab allocator, but we do not tie
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 293) * them up here.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 294) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 295) * Return: true if memory was needed, and was successfully allocated.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 296) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 297) bool xas_nomem(struct xa_state *xas, gfp_t gfp)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 298) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 299) if (xas->xa_node != XA_ERROR(-ENOMEM)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 300) xas_destroy(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 301) return false;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 302) }
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 303) if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 304) gfp |= __GFP_ACCOUNT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 305) xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 306) if (!xas->xa_alloc)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 307) return false;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 308) xas->xa_alloc->parent = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 309) XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 310) xas->xa_node = XAS_RESTART;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 311) return true;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 312) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 313) EXPORT_SYMBOL_GPL(xas_nomem);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 314)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 315) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 316) * __xas_nomem() - Drop locks and allocate memory if needed.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 317) * @xas: XArray operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 318) * @gfp: Memory allocation flags.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 319) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 320) * Internal variant of xas_nomem().
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 321) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 322) * Return: true if memory was needed, and was successfully allocated.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 323) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 324) static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 325) __must_hold(xas->xa->xa_lock)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 326) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 327) unsigned int lock_type = xa_lock_type(xas->xa);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 328)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 329) if (xas->xa_node != XA_ERROR(-ENOMEM)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 330) xas_destroy(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 331) return false;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 332) }
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 333) if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 334) gfp |= __GFP_ACCOUNT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 335) if (gfpflags_allow_blocking(gfp)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 336) xas_unlock_type(xas, lock_type);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 337) xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 338) xas_lock_type(xas, lock_type);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 339) } else {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 340) xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 341) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 342) if (!xas->xa_alloc)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 343) return false;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 344) xas->xa_alloc->parent = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 345) XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 346) xas->xa_node = XAS_RESTART;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 347) return true;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 348) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 349)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 350) static void xas_update(struct xa_state *xas, struct xa_node *node)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 351) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 352) if (xas->xa_update)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 353) xas->xa_update(node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 354) else
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 355) XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 356) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 357)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 358) static void *xas_alloc(struct xa_state *xas, unsigned int shift)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 359) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 360) struct xa_node *parent = xas->xa_node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 361) struct xa_node *node = xas->xa_alloc;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 362)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 363) if (xas_invalid(xas))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 364) return NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 365)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 366) if (node) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 367) xas->xa_alloc = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 368) } else {
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 369) gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 370)
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 371) if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 372) gfp |= __GFP_ACCOUNT;
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 373)
7b785645e8f13 (Johannes Weiner 2019-05-24 10:12:46 -0400 374) node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 375) if (!node) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 376) xas_set_err(xas, -ENOMEM);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 377) return NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 378) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 379) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 380)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 381) if (parent) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 382) node->offset = xas->xa_offset;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 383) parent->count++;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 384) XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 385) xas_update(xas, parent);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 386) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 387) XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 388) XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 389) node->shift = shift;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 390) node->count = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 391) node->nr_values = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 392) RCU_INIT_POINTER(node->parent, xas->xa_node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 393) node->array = xas->xa;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 394)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 395) return node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 396) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 397)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 398) #ifdef CONFIG_XARRAY_MULTI
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 399) /* Returns the number of indices covered by a given xa_state */
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 400) static unsigned long xas_size(const struct xa_state *xas)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 401) {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 402) return (xas->xa_sibs + 1UL) << xas->xa_shift;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 403) }
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 404) #endif
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 405)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 406) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 407) * Use this to calculate the maximum index that will need to be created
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 408) * in order to add the entry described by @xas. Because we cannot store a
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 409) * multi-index entry at index 0, the calculation is a little more complex
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 410) * than you might expect.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 411) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 412) static unsigned long xas_max(struct xa_state *xas)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 413) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 414) unsigned long max = xas->xa_index;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 415)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 416) #ifdef CONFIG_XARRAY_MULTI
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 417) if (xas->xa_shift || xas->xa_sibs) {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 418) unsigned long mask = xas_size(xas) - 1;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 419) max |= mask;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 420) if (mask == max)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 421) max++;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 422) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 423) #endif
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 424)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 425) return max;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 426) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 427)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 428) /* The maximum index that can be contained in the array without expanding it */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 429) static unsigned long max_index(void *entry)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 430) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 431) if (!xa_is_node(entry))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 432) return 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 433) return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 434) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 435)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 436) static void xas_shrink(struct xa_state *xas)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 437) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 438) struct xarray *xa = xas->xa;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 439) struct xa_node *node = xas->xa_node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 440)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 441) for (;;) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 442) void *entry;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 443)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 444) XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 445) if (node->count != 1)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 446) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 447) entry = xa_entry_locked(xa, node, 0);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 448) if (!entry)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 449) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 450) if (!xa_is_node(entry) && node->shift)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 451) break;
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 452) if (xa_is_zero(entry) && xa_zero_busy(xa))
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 453) entry = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 454) xas->xa_node = XAS_BOUNDS;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 455)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 456) RCU_INIT_POINTER(xa->xa_head, entry);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 457) if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 458) xa_mark_clear(xa, XA_FREE_MARK);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 459)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 460) node->count = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 461) node->nr_values = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 462) if (!xa_is_node(entry))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 463) RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 464) xas_update(xas, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 465) xa_node_free(node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 466) if (!xa_is_node(entry))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 467) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 468) node = xa_to_node(entry);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 469) node->parent = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 470) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 471) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 472)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 473) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 474) * xas_delete_node() - Attempt to delete an xa_node
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 475) * @xas: Array operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 476) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 477) * Attempts to delete the @xas->xa_node. This will fail if xa->node has
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 478) * a non-zero reference count.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 479) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 480) static void xas_delete_node(struct xa_state *xas)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 481) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 482) struct xa_node *node = xas->xa_node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 483)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 484) for (;;) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 485) struct xa_node *parent;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 486)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 487) XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 488) if (node->count)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 489) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 490)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 491) parent = xa_parent_locked(xas->xa, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 492) xas->xa_node = parent;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 493) xas->xa_offset = node->offset;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 494) xa_node_free(node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 495)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 496) if (!parent) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 497) xas->xa->xa_head = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 498) xas->xa_node = XAS_BOUNDS;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 499) return;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 500) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 501)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 502) parent->slots[xas->xa_offset] = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 503) parent->count--;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 504) XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 505) node = parent;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 506) xas_update(xas, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 507) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 508)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 509) if (!node->parent)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 510) xas_shrink(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 511) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 512)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 513) /**
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 514) * xas_free_nodes() - Free this node and all nodes that it references
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 515) * @xas: Array operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 516) * @top: Node to free
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 517) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 518) * This node has been removed from the tree. We must now free it and all
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 519) * of its subnodes. There may be RCU walkers with references into the tree,
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 520) * so we must replace all entries with retry markers.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 521) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 522) static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 523) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 524) unsigned int offset = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 525) struct xa_node *node = top;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 526)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 527) for (;;) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 528) void *entry = xa_entry_locked(xas->xa, node, offset);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 529)
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 530) if (node->shift && xa_is_node(entry)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 531) node = xa_to_node(entry);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 532) offset = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 533) continue;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 534) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 535) if (entry)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 536) RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 537) offset++;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 538) while (offset == XA_CHUNK_SIZE) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 539) struct xa_node *parent;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 540)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 541) parent = xa_parent_locked(xas->xa, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 542) offset = node->offset + 1;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 543) node->count = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 544) node->nr_values = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 545) xas_update(xas, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 546) xa_node_free(node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 547) if (node == top)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 548) return;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 549) node = parent;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 550) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 551) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 552) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 553)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 554) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 555) * xas_expand adds nodes to the head of the tree until it has reached
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 556) * sufficient height to be able to contain @xas->xa_index
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 557) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 558) static int xas_expand(struct xa_state *xas, void *head)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 559) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 560) struct xarray *xa = xas->xa;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 561) struct xa_node *node = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 562) unsigned int shift = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 563) unsigned long max = xas_max(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 564)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 565) if (!head) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 566) if (max == 0)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 567) return 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 568) while ((max >> shift) >= XA_CHUNK_SIZE)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 569) shift += XA_CHUNK_SHIFT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 570) return shift + XA_CHUNK_SHIFT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 571) } else if (xa_is_node(head)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 572) node = xa_to_node(head);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 573) shift = node->shift + XA_CHUNK_SHIFT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 574) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 575) xas->xa_node = NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 576)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 577) while (max > max_index(head)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 578) xa_mark_t mark = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 579)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 580) XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 581) node = xas_alloc(xas, shift);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 582) if (!node)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 583) return -ENOMEM;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 584)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 585) node->count = 1;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 586) if (xa_is_value(head))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 587) node->nr_values = 1;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 588) RCU_INIT_POINTER(node->slots[0], head);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 589)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 590) /* Propagate the aggregated mark info to the new child */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 591) for (;;) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 592) if (xa_track_free(xa) && mark == XA_FREE_MARK) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 593) node_mark_all(node, XA_FREE_MARK);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 594) if (!xa_marked(xa, XA_FREE_MARK)) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 595) node_clear_mark(node, 0, XA_FREE_MARK);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 596) xa_mark_set(xa, XA_FREE_MARK);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 597) }
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 598) } else if (xa_marked(xa, mark)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 599) node_set_mark(node, 0, mark);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 600) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 601) if (mark == XA_MARK_MAX)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 602) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 603) mark_inc(mark);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 604) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 605)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 606) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 607) * Now that the new node is fully initialised, we can add
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 608) * it to the tree
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 609) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 610) if (xa_is_node(head)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 611) xa_to_node(head)->offset = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 612) rcu_assign_pointer(xa_to_node(head)->parent, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 613) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 614) head = xa_mk_node(node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 615) rcu_assign_pointer(xa->xa_head, head);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 616) xas_update(xas, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 617)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 618) shift += XA_CHUNK_SHIFT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 619) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 620)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 621) xas->xa_node = node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 622) return shift;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 623) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 624)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 625) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 626) * xas_create() - Create a slot to store an entry in.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 627) * @xas: XArray operation state.
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 628) * @allow_root: %true if we can store the entry in the root directly
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 629) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 630) * Most users will not need to call this function directly, as it is called
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 631) * by xas_store(). It is useful for doing conditional store operations
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 632) * (see the xa_cmpxchg() implementation for an example).
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 633) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 634) * Return: If the slot already existed, returns the contents of this slot.
804dfaf01bcc9 (Matthew Wilcox 2018-11-05 16:37:15 -0500 635) * If the slot was newly created, returns %NULL. If it failed to create the
804dfaf01bcc9 (Matthew Wilcox 2018-11-05 16:37:15 -0500 636) * slot, returns %NULL and indicates the error in @xas.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 637) */
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 638) static void *xas_create(struct xa_state *xas, bool allow_root)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 639) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 640) struct xarray *xa = xas->xa;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 641) void *entry;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 642) void __rcu **slot;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 643) struct xa_node *node = xas->xa_node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 644) int shift;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 645) unsigned int order = xas->xa_shift;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 646)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 647) if (xas_top(node)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 648) entry = xa_head_locked(xa);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 649) xas->xa_node = NULL;
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 650) if (!entry && xa_zero_busy(xa))
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 651) entry = XA_ZERO_ENTRY;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 652) shift = xas_expand(xas, entry);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 653) if (shift < 0)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 654) return NULL;
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 655) if (!shift && !allow_root)
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 656) shift = XA_CHUNK_SHIFT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 657) entry = xa_head_locked(xa);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 658) slot = &xa->xa_head;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 659) } else if (xas_error(xas)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 660) return NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 661) } else if (node) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 662) unsigned int offset = xas->xa_offset;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 663)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 664) shift = node->shift;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 665) entry = xa_entry_locked(xa, node, offset);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 666) slot = &node->slots[offset];
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 667) } else {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 668) shift = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 669) entry = xa_head_locked(xa);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 670) slot = &xa->xa_head;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 671) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 672)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 673) while (shift > order) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 674) shift -= XA_CHUNK_SHIFT;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 675) if (!entry) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 676) node = xas_alloc(xas, shift);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 677) if (!node)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 678) break;
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 679) if (xa_track_free(xa))
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 680) node_mark_all(node, XA_FREE_MARK);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 681) rcu_assign_pointer(*slot, xa_mk_node(node));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 682) } else if (xa_is_node(entry)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 683) node = xa_to_node(entry);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 684) } else {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 685) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 686) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 687) entry = xas_descend(xas, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 688) slot = &node->slots[xas->xa_offset];
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 689) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 690)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 691) return entry;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 692) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 693)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 694) /**
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 695) * xas_create_range() - Ensure that stores to this range will succeed
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 696) * @xas: XArray operation state.
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 697) *
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 698) * Creates all of the slots in the range covered by @xas. Sets @xas to
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 699) * create single-index entries and positions it at the beginning of the
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 700) * range. This is for the benefit of users which have not yet been
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 701) * converted to use multi-index entries.
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 702) */
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 703) void xas_create_range(struct xa_state *xas)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 704) {
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 705) unsigned long index = xas->xa_index;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 706) unsigned char shift = xas->xa_shift;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 707) unsigned char sibs = xas->xa_sibs;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 708)
84c34df158cf2 (Matthew Wilcox (Oracle) 2020-10-13 08:46:29 -0400 709) xas->xa_index |= ((sibs + 1UL) << shift) - 1;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 710) if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 711) xas->xa_offset |= sibs;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 712) xas->xa_shift = 0;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 713) xas->xa_sibs = 0;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 714)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 715) for (;;) {
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 716) xas_create(xas, true);
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 717) if (xas_error(xas))
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 718) goto restore;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 719) if (xas->xa_index <= (index | XA_CHUNK_MASK))
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 720) goto success;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 721) xas->xa_index -= XA_CHUNK_SIZE;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 722)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 723) for (;;) {
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 724) struct xa_node *node = xas->xa_node;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 725) xas->xa_node = xa_parent_locked(xas->xa, node);
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 726) xas->xa_offset = node->offset - 1;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 727) if (node->offset != 0)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 728) break;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 729) }
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 730) }
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 731)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 732) restore:
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 733) xas->xa_shift = shift;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 734) xas->xa_sibs = sibs;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 735) xas->xa_index = index;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 736) return;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 737) success:
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 738) xas->xa_index = index;
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 739) if (xas->xa_node)
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 740) xas_set_offset(xas);
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 741) }
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 742) EXPORT_SYMBOL_GPL(xas_create_range);
2264f5132fe45 (Matthew Wilcox 2017-12-04 00:11:48 -0500 743)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 744) static void update_node(struct xa_state *xas, struct xa_node *node,
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 745) int count, int values)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 746) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 747) if (!node || (!count && !values))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 748) return;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 749)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 750) node->count += count;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 751) node->nr_values += values;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 752) XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 753) XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 754) xas_update(xas, node);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 755) if (count < 0)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 756) xas_delete_node(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 757) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 758)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 759) /**
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 760) * xas_store() - Store this entry in the XArray.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 761) * @xas: XArray operation state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 762) * @entry: New entry.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 763) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 764) * If @xas is operating on a multi-index entry, the entry returned by this
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 765) * function is essentially meaningless (it may be an internal entry or it
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 766) * may be %NULL, even if there are non-NULL entries at some of the indices
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 767) * covered by the range). This is not a problem for any current users,
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 768) * and can be changed if needed.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 769) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 770) * Return: The old entry at this index.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 771) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 772) void *xas_store(struct xa_state *xas, void *entry)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 773) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 774) struct xa_node *node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 775) void __rcu **slot = &xas->xa->xa_head;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 776) unsigned int offset, max;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 777) int count = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 778) int values = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 779) void *first, *next;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 780) bool value = xa_is_value(entry);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 781)
4a5c8d898948d (Matthew Wilcox 2019-02-21 17:54:44 -0500 782) if (entry) {
4a5c8d898948d (Matthew Wilcox 2019-02-21 17:54:44 -0500 783) bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
4a5c8d898948d (Matthew Wilcox 2019-02-21 17:54:44 -0500 784) first = xas_create(xas, allow_root);
4a5c8d898948d (Matthew Wilcox 2019-02-21 17:54:44 -0500 785) } else {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 786) first = xas_load(xas);
4a5c8d898948d (Matthew Wilcox 2019-02-21 17:54:44 -0500 787) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 788)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 789) if (xas_invalid(xas))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 790) return first;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 791) node = xas->xa_node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 792) if (node && (xas->xa_shift < node->shift))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 793) xas->xa_sibs = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 794) if ((first == entry) && !xas->xa_sibs)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 795) return first;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 796)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 797) next = first;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 798) offset = xas->xa_offset;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 799) max = xas->xa_offset + xas->xa_sibs;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 800) if (node) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 801) slot = &node->slots[offset];
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 802) if (xas->xa_sibs)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 803) xas_squash_marks(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 804) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 805) if (!entry)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 806) xas_init_marks(xas);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 807)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 808) for (;;) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 809) /*
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 810) * Must clear the marks before setting the entry to NULL,
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 811) * otherwise xas_for_each_marked may find a NULL entry and
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 812) * stop early. rcu_assign_pointer contains a release barrier
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 813) * so the mark clearing will appear to happen before the
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 814) * entry is set to NULL.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 815) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 816) rcu_assign_pointer(*slot, entry);
2fbe967b3eb74 (Matthew Wilcox 2019-02-21 17:36:45 -0500 817) if (xa_is_node(next) && (!node || node->shift))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 818) xas_free_nodes(xas, xa_to_node(next));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 819) if (!node)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 820) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 821) count += !next - !entry;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 822) values += !xa_is_value(first) - !value;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 823) if (entry) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 824) if (offset == max)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 825) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 826) if (!xa_is_sibling(entry))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 827) entry = xa_mk_sibling(xas->xa_offset);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 828) } else {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 829) if (offset == XA_CHUNK_MASK)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 830) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 831) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 832) next = xa_entry_locked(xas->xa, node, ++offset);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 833) if (!xa_is_sibling(next)) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 834) if (!entry && (offset > max))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 835) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 836) first = next;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 837) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 838) slot++;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 839) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 840)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 841) update_node(xas, node, count, values);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 842) return first;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 843) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 844) EXPORT_SYMBOL_GPL(xas_store);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 845)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 846) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 847) * xas_get_mark() - Returns the state of this mark.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 848) * @xas: XArray operation state.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 849) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 850) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 851) * Return: true if the mark is set, false if the mark is clear or @xas
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 852) * is in an error state.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 853) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 854) bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 855) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 856) if (xas_invalid(xas))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 857) return false;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 858) if (!xas->xa_node)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 859) return xa_marked(xas->xa, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 860) return node_get_mark(xas->xa_node, xas->xa_offset, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 861) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 862) EXPORT_SYMBOL_GPL(xas_get_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 863)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 864) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 865) * xas_set_mark() - Sets the mark on this entry and its parents.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 866) * @xas: XArray operation state.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 867) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 868) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 869) * Sets the specified mark on this entry, and walks up the tree setting it
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 870) * on all the ancestor entries. Does nothing if @xas has not been walked to
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 871) * an entry, or is in an error state.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 872) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 873) void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 874) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 875) struct xa_node *node = xas->xa_node;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 876) unsigned int offset = xas->xa_offset;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 877)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 878) if (xas_invalid(xas))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 879) return;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 880)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 881) while (node) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 882) if (node_set_mark(node, offset, mark))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 883) return;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 884) offset = node->offset;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 885) node = xa_parent_locked(xas->xa, node);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 886) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 887)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 888) if (!xa_marked(xas->xa, mark))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 889) xa_mark_set(xas->xa, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 890) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 891) EXPORT_SYMBOL_GPL(xas_set_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 892)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 893) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 894) * xas_clear_mark() - Clears the mark on this entry and its parents.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 895) * @xas: XArray operation state.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 896) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 897) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 898) * Clears the specified mark on this entry, and walks back to the head
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 899) * attempting to clear it on all the ancestor entries. Does nothing if
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 900) * @xas has not been walked to an entry, or is in an error state.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 901) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 902) void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 903) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 904) struct xa_node *node = xas->xa_node;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 905) unsigned int offset = xas->xa_offset;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 906)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 907) if (xas_invalid(xas))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 908) return;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 909)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 910) while (node) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 911) if (!node_clear_mark(node, offset, mark))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 912) return;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 913) if (node_any_mark(node, mark))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 914) return;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 915)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 916) offset = node->offset;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 917) node = xa_parent_locked(xas->xa, node);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 918) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 919)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 920) if (xa_marked(xas->xa, mark))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 921) xa_mark_clear(xas->xa, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 922) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 923) EXPORT_SYMBOL_GPL(xas_clear_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 924)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 925) /**
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 926) * xas_init_marks() - Initialise all marks for the entry
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 927) * @xas: Array operations state.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 928) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 929) * Initialise all marks for the entry specified by @xas. If we're tracking
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 930) * free entries with a mark, we need to set it on all entries. All other
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 931) * marks are cleared.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 932) *
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 933) * This implementation is not as efficient as it could be; we may walk
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 934) * up the tree multiple times.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 935) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 936) void xas_init_marks(const struct xa_state *xas)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 937) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 938) xa_mark_t mark = 0;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 939)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 940) for (;;) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 941) if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 942) xas_set_mark(xas, mark);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 943) else
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 944) xas_clear_mark(xas, mark);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 945) if (mark == XA_MARK_MAX)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 946) break;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 947) mark_inc(mark);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 948) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 949) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 950) EXPORT_SYMBOL_GPL(xas_init_marks);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 951)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 952) #ifdef CONFIG_XARRAY_MULTI
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 953) static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 954) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 955) unsigned int marks = 0;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 956) xa_mark_t mark = XA_MARK_0;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 957)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 958) for (;;) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 959) if (node_get_mark(node, offset, mark))
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 960) marks |= 1 << (__force unsigned int)mark;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 961) if (mark == XA_MARK_MAX)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 962) break;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 963) mark_inc(mark);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 964) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 965)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 966) return marks;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 967) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 968)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 969) static void node_set_marks(struct xa_node *node, unsigned int offset,
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 970) struct xa_node *child, unsigned int marks)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 971) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 972) xa_mark_t mark = XA_MARK_0;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 973)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 974) for (;;) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 975) if (marks & (1 << (__force unsigned int)mark)) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 976) node_set_mark(node, offset, mark);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 977) if (child)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 978) node_mark_all(child, mark);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 979) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 980) if (mark == XA_MARK_MAX)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 981) break;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 982) mark_inc(mark);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 983) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 984) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 985)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 986) /**
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 987) * xas_split_alloc() - Allocate memory for splitting an entry.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 988) * @xas: XArray operation state.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 989) * @entry: New entry which will be stored in the array.
12efebab09e38 (Matthew Wilcox (Oracle) 2020-10-10 11:17:44 -0400 990) * @order: Current entry order.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 991) * @gfp: Memory allocation flags.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 992) *
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 993) * This function should be called before calling xas_split().
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 994) * If necessary, it will allocate new nodes (and fill them with @entry)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 995) * to prepare for the upcoming split of an entry of @order size into
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 996) * entries of the order stored in the @xas.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 997) *
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 998) * Context: May sleep if @gfp flags permit.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 999) */
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1000) void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1001) gfp_t gfp)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1002) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1003) unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1004) unsigned int mask = xas->xa_sibs;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1005)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1006) /* XXX: no support for splitting really large entries yet */
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1007) if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1008) goto nomem;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1009) if (xas->xa_shift + XA_CHUNK_SHIFT > order)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1010) return;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1011)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1012) do {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1013) unsigned int i;
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1014) void *sibling = NULL;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1015) struct xa_node *node;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1016)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1017) node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1018) if (!node)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1019) goto nomem;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1020) node->array = xas->xa;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1021) for (i = 0; i < XA_CHUNK_SIZE; i++) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1022) if ((i & mask) == 0) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1023) RCU_INIT_POINTER(node->slots[i], entry);
3012110d71f41 (Matthew Wilcox (Oracle) 2020-11-19 08:32:31 -0500 1024) sibling = xa_mk_sibling(i);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1025) } else {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1026) RCU_INIT_POINTER(node->slots[i], sibling);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1027) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1028) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1029) RCU_INIT_POINTER(node->parent, xas->xa_alloc);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1030) xas->xa_alloc = node;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1031) } while (sibs-- > 0);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1032)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1033) return;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1034) nomem:
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1035) xas_destroy(xas);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1036) xas_set_err(xas, -ENOMEM);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1037) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1038) EXPORT_SYMBOL_GPL(xas_split_alloc);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1039)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1040) /**
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1041) * xas_split() - Split a multi-index entry into smaller entries.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1042) * @xas: XArray operation state.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1043) * @entry: New entry to store in the array.
12efebab09e38 (Matthew Wilcox (Oracle) 2020-10-10 11:17:44 -0400 1044) * @order: Current entry order.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1045) *
12efebab09e38 (Matthew Wilcox (Oracle) 2020-10-10 11:17:44 -0400 1046) * The size of the new entries is set in @xas. The value in @entry is
12efebab09e38 (Matthew Wilcox (Oracle) 2020-10-10 11:17:44 -0400 1047) * copied to all the replacement entries.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1048) *
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1049) * Context: Any context. The caller should hold the xa_lock.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1050) */
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1051) void xas_split(struct xa_state *xas, void *entry, unsigned int order)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1052) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1053) unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1054) unsigned int offset, marks;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1055) struct xa_node *node;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1056) void *curr = xas_load(xas);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1057) int values = 0;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1058)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1059) node = xas->xa_node;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1060) if (xas_top(node))
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1061) return;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1062)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1063) marks = node_get_marks(node, xas->xa_offset);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1064)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1065) offset = xas->xa_offset + sibs;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1066) do {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1067) if (xas->xa_shift < node->shift) {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1068) struct xa_node *child = xas->xa_alloc;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1069)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1070) xas->xa_alloc = rcu_dereference_raw(child->parent);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1071) child->shift = node->shift - XA_CHUNK_SHIFT;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1072) child->offset = offset;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1073) child->count = XA_CHUNK_SIZE;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1074) child->nr_values = xa_is_value(entry) ?
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1075) XA_CHUNK_SIZE : 0;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1076) RCU_INIT_POINTER(child->parent, node);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1077) node_set_marks(node, offset, child, marks);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1078) rcu_assign_pointer(node->slots[offset],
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1079) xa_mk_node(child));
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1080) if (xa_is_value(curr))
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1081) values--;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1082) } else {
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1083) unsigned int canon = offset - xas->xa_sibs;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1084)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1085) node_set_marks(node, canon, NULL, marks);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1086) rcu_assign_pointer(node->slots[canon], entry);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1087) while (offset > canon)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1088) rcu_assign_pointer(node->slots[offset--],
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1089) xa_mk_sibling(canon));
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1090) values += (xa_is_value(entry) - xa_is_value(curr)) *
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1091) (xas->xa_sibs + 1);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1092) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1093) } while (offset-- > xas->xa_offset);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1094)
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1095) node->nr_values += values;
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1096) }
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1097) EXPORT_SYMBOL_GPL(xas_split);
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1098) #endif
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1099)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1100) /**
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1101) * xas_pause() - Pause a walk to drop a lock.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1102) * @xas: XArray operation state.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1103) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1104) * Some users need to pause a walk and drop the lock they're holding in
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1105) * order to yield to a higher priority thread or carry out an operation
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1106) * on an entry. Those users should call this function before they drop
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1107) * the lock. It resets the @xas to be suitable for the next iteration
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1108) * of the loop after the user has reacquired the lock. If most entries
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1109) * found during a walk require you to call xas_pause(), the xa_for_each()
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1110) * iterator may be more appropriate.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1111) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1112) * Note that xas_pause() only works for forward iteration. If a user needs
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1113) * to pause a reverse iteration, we will need a xas_pause_rev().
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1114) */
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1115) void xas_pause(struct xa_state *xas)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1116) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1117) struct xa_node *node = xas->xa_node;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1118)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1119) if (xas_invalid(xas))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1120) return;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1121)
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1122) xas->xa_node = XAS_RESTART;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1123) if (node) {
c36d451ad386b (Matthew Wilcox (Oracle) 2020-01-31 06:17:09 -0500 1124) unsigned long offset = xas->xa_offset;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1125) while (++offset < XA_CHUNK_SIZE) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1126) if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1127) break;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1128) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1129) xas->xa_index += (offset - xas->xa_offset) << node->shift;
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1130) if (xas->xa_index == 0)
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1131) xas->xa_node = XAS_BOUNDS;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1132) } else {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1133) xas->xa_index++;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1134) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1135) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1136) EXPORT_SYMBOL_GPL(xas_pause);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1137)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1138) /*
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1139) * __xas_prev() - Find the previous entry in the XArray.
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1140) * @xas: XArray operation state.
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1141) *
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1142) * Helper function for xas_prev() which handles all the complex cases
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1143) * out of line.
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1144) */
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1145) void *__xas_prev(struct xa_state *xas)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1146) {
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1147) void *entry;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1148)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1149) if (!xas_frozen(xas->xa_node))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1150) xas->xa_index--;
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1151) if (!xas->xa_node)
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1152) return set_bounds(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1153) if (xas_not_node(xas->xa_node))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1154) return xas_load(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1155)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1156) if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1157) xas->xa_offset--;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1158)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1159) while (xas->xa_offset == 255) {
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1160) xas->xa_offset = xas->xa_node->offset - 1;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1161) xas->xa_node = xa_parent(xas->xa, xas->xa_node);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1162) if (!xas->xa_node)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1163) return set_bounds(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1164) }
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1165)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1166) for (;;) {
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1167) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1168) if (!xa_is_node(entry))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1169) return entry;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1170)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1171) xas->xa_node = xa_to_node(entry);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1172) xas_set_offset(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1173) }
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1174) }
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1175) EXPORT_SYMBOL_GPL(__xas_prev);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1176)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1177) /*
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1178) * __xas_next() - Find the next entry in the XArray.
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1179) * @xas: XArray operation state.
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1180) *
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1181) * Helper function for xas_next() which handles all the complex cases
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1182) * out of line.
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1183) */
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1184) void *__xas_next(struct xa_state *xas)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1185) {
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1186) void *entry;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1187)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1188) if (!xas_frozen(xas->xa_node))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1189) xas->xa_index++;
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1190) if (!xas->xa_node)
91abab83839aa (Matthew Wilcox (Oracle) 2019-07-01 17:03:29 -0400 1191) return set_bounds(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1192) if (xas_not_node(xas->xa_node))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1193) return xas_load(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1194)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1195) if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1196) xas->xa_offset++;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1197)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1198) while (xas->xa_offset == XA_CHUNK_SIZE) {
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1199) xas->xa_offset = xas->xa_node->offset + 1;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1200) xas->xa_node = xa_parent(xas->xa, xas->xa_node);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1201) if (!xas->xa_node)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1202) return set_bounds(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1203) }
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1204)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1205) for (;;) {
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1206) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1207) if (!xa_is_node(entry))
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1208) return entry;
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1209)
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1210) xas->xa_node = xa_to_node(entry);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1211) xas_set_offset(xas);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1212) }
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1213) }
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1214) EXPORT_SYMBOL_GPL(__xas_next);
64d3e9a9e0cc5 (Matthew Wilcox 2017-12-01 00:06:52 -0500 1215)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1216) /**
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1217) * xas_find() - Find the next present entry in the XArray.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1218) * @xas: XArray operation state.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1219) * @max: Highest index to return.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1220) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1221) * If the @xas has not yet been walked to an entry, return the entry
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1222) * which has an index >= xas.xa_index. If it has been walked, the entry
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1223) * currently being pointed at has been processed, and so we move to the
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1224) * next entry.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1225) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1226) * If no entry is found and the array is smaller than @max, the iterator
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1227) * is set to the smallest index not yet in the array. This allows @xas
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1228) * to be immediately passed to xas_store().
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1229) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1230) * Return: The entry, if found, otherwise %NULL.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1231) */
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1232) void *xas_find(struct xa_state *xas, unsigned long max)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1233) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1234) void *entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1235)
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1236) if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1237) return NULL;
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 1238) if (xas->xa_index > max)
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 1239) return set_bounds(xas);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1240)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1241) if (!xas->xa_node) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1242) xas->xa_index = 1;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1243) return set_bounds(xas);
82a22311b7a68 (Matthew Wilcox (Oracle) 2019-11-07 22:49:11 -0500 1244) } else if (xas->xa_node == XAS_RESTART) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1245) entry = xas_load(xas);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1246) if (entry || xas_not_node(xas->xa_node))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1247) return entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1248) } else if (!xas->xa_node->shift &&
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1249) xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1250) xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1251) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1252)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1253) xas_advance(xas);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1254)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1255) while (xas->xa_node && (xas->xa_index <= max)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1256) if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1257) xas->xa_offset = xas->xa_node->offset + 1;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1258) xas->xa_node = xa_parent(xas->xa, xas->xa_node);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1259) continue;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1260) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1261)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1262) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1263) if (xa_is_node(entry)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1264) xas->xa_node = xa_to_node(entry);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1265) xas->xa_offset = 0;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1266) continue;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1267) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1268) if (entry && !xa_is_sibling(entry))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1269) return entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1270)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1271) xas_advance(xas);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1272) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1273)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1274) if (!xas->xa_node)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1275) xas->xa_node = XAS_BOUNDS;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1276) return NULL;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1277) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1278) EXPORT_SYMBOL_GPL(xas_find);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1279)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1280) /**
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1281) * xas_find_marked() - Find the next marked entry in the XArray.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1282) * @xas: XArray operation state.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1283) * @max: Highest index to return.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1284) * @mark: Mark number to search for.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1285) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1286) * If the @xas has not yet been walked to an entry, return the marked entry
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1287) * which has an index >= xas.xa_index. If it has been walked, the entry
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1288) * currently being pointed at has been processed, and so we return the
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1289) * first marked entry with an index > xas.xa_index.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1290) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1291) * If no marked entry is found and the array is smaller than @max, @xas is
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1292) * set to the bounds state and xas->xa_index is set to the smallest index
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1293) * not yet in the array. This allows @xas to be immediately passed to
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1294) * xas_store().
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1295) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1296) * If no entry is found before @max is reached, @xas is set to the restart
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1297) * state.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1298) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1299) * Return: The entry, if found, otherwise %NULL.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1300) */
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1301) void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1302) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1303) bool advance = true;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1304) unsigned int offset;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1305) void *entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1306)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1307) if (xas_error(xas))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1308) return NULL;
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 1309) if (xas->xa_index > max)
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 1310) goto max;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1311)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1312) if (!xas->xa_node) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1313) xas->xa_index = 1;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1314) goto out;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1315) } else if (xas_top(xas->xa_node)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1316) advance = false;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1317) entry = xa_head(xas->xa);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1318) xas->xa_node = NULL;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1319) if (xas->xa_index > max_index(entry))
48483614de97c (Matthew Wilcox 2018-12-13 13:57:42 -0500 1320) goto out;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1321) if (!xa_is_node(entry)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1322) if (xa_marked(xas->xa, mark))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1323) return entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1324) xas->xa_index = 1;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1325) goto out;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1326) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1327) xas->xa_node = xa_to_node(entry);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1328) xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1329) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1330)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1331) while (xas->xa_index <= max) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1332) if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1333) xas->xa_offset = xas->xa_node->offset + 1;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1334) xas->xa_node = xa_parent(xas->xa, xas->xa_node);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1335) if (!xas->xa_node)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1336) break;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1337) advance = false;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1338) continue;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1339) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1340)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1341) if (!advance) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1342) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1343) if (xa_is_sibling(entry)) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1344) xas->xa_offset = xa_to_sibling(entry);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1345) xas_move_index(xas, xas->xa_offset);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1346) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1347) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1348)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1349) offset = xas_find_chunk(xas, advance, mark);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1350) if (offset > xas->xa_offset) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1351) advance = false;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1352) xas_move_index(xas, offset);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1353) /* Mind the wrap */
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1354) if ((xas->xa_index - 1) >= max)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1355) goto max;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1356) xas->xa_offset = offset;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1357) if (offset == XA_CHUNK_SIZE)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1358) continue;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1359) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1360)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1361) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
7e934cf5ace1d (Matthew Wilcox (Oracle) 2020-03-12 17:29:11 -0400 1362) if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
7e934cf5ace1d (Matthew Wilcox (Oracle) 2020-03-12 17:29:11 -0400 1363) continue;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1364) if (!xa_is_node(entry))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1365) return entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1366) xas->xa_node = xa_to_node(entry);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1367) xas_set_offset(xas);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1368) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1369)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1370) out:
48483614de97c (Matthew Wilcox 2018-12-13 13:57:42 -0500 1371) if (xas->xa_index > max)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1372) goto max;
48483614de97c (Matthew Wilcox 2018-12-13 13:57:42 -0500 1373) return set_bounds(xas);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1374) max:
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1375) xas->xa_node = XAS_RESTART;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1376) return NULL;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1377) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1378) EXPORT_SYMBOL_GPL(xas_find_marked);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1379)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1380) /**
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1381) * xas_find_conflict() - Find the next present entry in a range.
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1382) * @xas: XArray operation state.
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1383) *
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1384) * The @xas describes both a range and a position within that range.
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1385) *
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1386) * Context: Any context. Expects xa_lock to be held.
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1387) * Return: The next entry in the range covered by @xas or %NULL.
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1388) */
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1389) void *xas_find_conflict(struct xa_state *xas)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1390) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1391) void *curr;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1392)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1393) if (xas_error(xas))
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1394) return NULL;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1395)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1396) if (!xas->xa_node)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1397) return NULL;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1398)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1399) if (xas_top(xas->xa_node)) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1400) curr = xas_start(xas);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1401) if (!curr)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1402) return NULL;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1403) while (xa_is_node(curr)) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1404) struct xa_node *node = xa_to_node(curr);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1405) curr = xas_descend(xas, node);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1406) }
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1407) if (curr)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1408) return curr;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1409) }
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1410)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1411) if (xas->xa_node->shift > xas->xa_shift)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1412) return NULL;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1413)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1414) for (;;) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1415) if (xas->xa_node->shift == xas->xa_shift) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1416) if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1417) break;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1418) } else if (xas->xa_offset == XA_CHUNK_MASK) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1419) xas->xa_offset = xas->xa_node->offset;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1420) xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1421) if (!xas->xa_node)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1422) break;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1423) continue;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1424) }
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1425) curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1426) if (xa_is_sibling(curr))
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1427) continue;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1428) while (xa_is_node(curr)) {
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1429) xas->xa_node = xa_to_node(curr);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1430) xas->xa_offset = 0;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1431) curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1432) }
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1433) if (curr)
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1434) return curr;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1435) }
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1436) xas->xa_offset -= xas->xa_sibs;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1437) return NULL;
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1438) }
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1439) EXPORT_SYMBOL_GPL(xas_find_conflict);
4e99d4e9579d3 (Matthew Wilcox 2018-06-01 22:46:02 -0400 1440)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1441) /**
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1442) * xa_load() - Load an entry from an XArray.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1443) * @xa: XArray.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1444) * @index: index into array.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1445) *
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1446) * Context: Any context. Takes and releases the RCU lock.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1447) * Return: The entry at @index in @xa.
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1448) */
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1449) void *xa_load(struct xarray *xa, unsigned long index)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1450) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1451) XA_STATE(xas, xa, index);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1452) void *entry;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1453)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1454) rcu_read_lock();
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1455) do {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1456) entry = xas_load(&xas);
9f14d4f1f1045 (Matthew Wilcox 2018-10-01 14:54:59 -0400 1457) if (xa_is_zero(entry))
9f14d4f1f1045 (Matthew Wilcox 2018-10-01 14:54:59 -0400 1458) entry = NULL;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1459) } while (xas_retry(&xas, entry));
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1460) rcu_read_unlock();
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1461)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1462) return entry;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1463) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1464) EXPORT_SYMBOL(xa_load);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 1465)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1466) static void *xas_result(struct xa_state *xas, void *curr)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1467) {
9f14d4f1f1045 (Matthew Wilcox 2018-10-01 14:54:59 -0400 1468) if (xa_is_zero(curr))
9f14d4f1f1045 (Matthew Wilcox 2018-10-01 14:54:59 -0400 1469) return NULL;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1470) if (xas_error(xas))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1471) curr = xas->xa_node;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1472) return curr;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1473) }
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1474)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1475) /**
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1476) * __xa_erase() - Erase this entry from the XArray while locked.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1477) * @xa: XArray.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1478) * @index: Index into array.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1479) *
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1480) * After this function returns, loading from @index will return %NULL.
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1481) * If the index is part of a multi-index entry, all indices will be erased
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1482) * and none of the entries will be part of a multi-index entry.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1483) *
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1484) * Context: Any context. Expects xa_lock to be held on entry.
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1485) * Return: The entry which used to be at this index.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1486) */
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1487) void *__xa_erase(struct xarray *xa, unsigned long index)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1488) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1489) XA_STATE(xas, xa, index);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1490) return xas_result(&xas, xas_store(&xas, NULL));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1491) }
9ee5a3b7eeb19 (Matthew Wilcox 2018-11-01 22:52:06 -0400 1492) EXPORT_SYMBOL(__xa_erase);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1493)
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1494) /**
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1495) * xa_erase() - Erase this entry from the XArray.
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1496) * @xa: XArray.
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1497) * @index: Index of entry.
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1498) *
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1499) * After this function returns, loading from @index will return %NULL.
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1500) * If the index is part of a multi-index entry, all indices will be erased
809ab9371ca0a (Matthew Wilcox 2019-01-26 00:52:26 -0500 1501) * and none of the entries will be part of a multi-index entry.
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1502) *
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1503) * Context: Any context. Takes and releases the xa_lock.
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1504) * Return: The entry which used to be at this index.
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1505) */
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1506) void *xa_erase(struct xarray *xa, unsigned long index)
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1507) {
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1508) void *entry;
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1509)
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1510) xa_lock(xa);
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1511) entry = __xa_erase(xa, index);
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1512) xa_unlock(xa);
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1513)
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1514) return entry;
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1515) }
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1516) EXPORT_SYMBOL(xa_erase);
9c16bb8890545 (Matthew Wilcox 2018-11-05 15:48:49 -0500 1517)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1518) /**
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1519) * __xa_store() - Store this entry in the XArray.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1520) * @xa: XArray.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1521) * @index: Index into array.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1522) * @entry: New entry.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1523) * @gfp: Memory allocation flags.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1524) *
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1525) * You must already be holding the xa_lock when calling this function.
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1526) * It will drop the lock if needed to allocate memory, and then reacquire
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1527) * it afterwards.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1528) *
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1529) * Context: Any context. Expects xa_lock to be held on entry. May
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1530) * release and reacquire xa_lock if @gfp flags permit.
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1531) * Return: The old entry at this index or xa_err() if an error happened.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1532) */
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1533) void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1534) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1535) XA_STATE(xas, xa, index);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1536) void *curr;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1537)
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 1538) if (WARN_ON_ONCE(xa_is_advanced(entry)))
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1539) return XA_ERROR(-EINVAL);
d9c480435add8 (Matthew Wilcox 2018-11-05 16:15:56 -0500 1540) if (xa_track_free(xa) && !entry)
d9c480435add8 (Matthew Wilcox 2018-11-05 16:15:56 -0500 1541) entry = XA_ZERO_ENTRY;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1542)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1543) do {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1544) curr = xas_store(&xas, entry);
d9c480435add8 (Matthew Wilcox 2018-11-05 16:15:56 -0500 1545) if (xa_track_free(xa))
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1546) xas_clear_mark(&xas, XA_FREE_MARK);
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1547) } while (__xas_nomem(&xas, gfp));
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1548)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1549) return xas_result(&xas, curr);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1550) }
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1551) EXPORT_SYMBOL(__xa_store);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1552)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1553) /**
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1554) * xa_store() - Store this entry in the XArray.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1555) * @xa: XArray.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1556) * @index: Index into array.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1557) * @entry: New entry.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1558) * @gfp: Memory allocation flags.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1559) *
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1560) * After this function returns, loads from this index will return @entry.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1561) * Storing into an existing multi-index entry updates the entry of every index.
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1562) * The marks associated with @index are unaffected unless @entry is %NULL.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1563) *
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1564) * Context: Any context. Takes and releases the xa_lock.
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1565) * May sleep if the @gfp flags permit.
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1566) * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1567) * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1568) * failed.
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1569) */
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1570) void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1571) {
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1572) void *curr;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1573)
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1574) xa_lock(xa);
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1575) curr = __xa_store(xa, index, entry, gfp);
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1576) xa_unlock(xa);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1577)
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1578) return curr;
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1579) }
611f318637daa (Matthew Wilcox 2018-11-05 15:56:17 -0500 1580) EXPORT_SYMBOL(xa_store);
58d6ea3085f2e (Matthew Wilcox 2017-11-10 15:15:08 -0500 1581)
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1582) /**
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1583) * __xa_cmpxchg() - Store this entry in the XArray.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1584) * @xa: XArray.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1585) * @index: Index into array.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1586) * @old: Old value to test against.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1587) * @entry: New entry.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1588) * @gfp: Memory allocation flags.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1589) *
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1590) * You must already be holding the xa_lock when calling this function.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1591) * It will drop the lock if needed to allocate memory, and then reacquire
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1592) * it afterwards.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1593) *
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1594) * Context: Any context. Expects xa_lock to be held on entry. May
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1595) * release and reacquire xa_lock if @gfp flags permit.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1596) * Return: The old entry at this index or xa_err() if an error happened.
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1597) */
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1598) void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1599) void *old, void *entry, gfp_t gfp)
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1600) {
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1601) XA_STATE(xas, xa, index);
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1602) void *curr;
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1603)
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 1604) if (WARN_ON_ONCE(xa_is_advanced(entry)))
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1605) return XA_ERROR(-EINVAL);
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1606)
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1607) do {
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1608) curr = xas_load(&xas);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1609) if (curr == old) {
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1610) xas_store(&xas, entry);
b38f6c5027068 (Matthew Wilcox 2019-02-20 11:30:49 -0500 1611) if (xa_track_free(xa) && entry && !curr)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1612) xas_clear_mark(&xas, XA_FREE_MARK);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1613) }
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1614) } while (__xas_nomem(&xas, gfp));
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1615)
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1616) return xas_result(&xas, curr);
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1617) }
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1618) EXPORT_SYMBOL(__xa_cmpxchg);
41aec91f55985 (Matthew Wilcox 2017-11-10 15:34:55 -0500 1619)
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1620) /**
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1621) * __xa_insert() - Store this entry in the XArray if no entry is present.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1622) * @xa: XArray.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1623) * @index: Index into array.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1624) * @entry: New entry.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1625) * @gfp: Memory allocation flags.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1626) *
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1627) * Inserting a NULL entry will store a reserved entry (like xa_reserve())
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1628) * if no entry is present. Inserting will fail if a reserved entry is
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1629) * present, even though loading from this index will return NULL.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1630) *
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1631) * Context: Any context. Expects xa_lock to be held on entry. May
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1632) * release and reacquire xa_lock if @gfp flags permit.
fd9dc93e36231 (Matthew Wilcox 2019-02-06 13:07:11 -0500 1633) * Return: 0 if the store succeeded. -EBUSY if another entry was present.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1634) * -ENOMEM if memory could not be allocated.
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1635) */
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1636) int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1637) {
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1638) XA_STATE(xas, xa, index);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1639) void *curr;
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1640)
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1641) if (WARN_ON_ONCE(xa_is_advanced(entry)))
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1642) return -EINVAL;
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1643) if (!entry)
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1644) entry = XA_ZERO_ENTRY;
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1645)
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1646) do {
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1647) curr = xas_load(&xas);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1648) if (!curr) {
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1649) xas_store(&xas, entry);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1650) if (xa_track_free(xa))
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1651) xas_clear_mark(&xas, XA_FREE_MARK);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1652) } else {
fd9dc93e36231 (Matthew Wilcox 2019-02-06 13:07:11 -0500 1653) xas_set_err(&xas, -EBUSY);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1654) }
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1655) } while (__xas_nomem(&xas, gfp));
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1656)
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1657) return xas_error(&xas);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1658) }
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1659) EXPORT_SYMBOL(__xa_insert);
b0606fed6eece (Matthew Wilcox 2019-01-02 13:57:03 -0500 1660)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1661) #ifdef CONFIG_XARRAY_MULTI
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1662) static void xas_set_range(struct xa_state *xas, unsigned long first,
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1663) unsigned long last)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1664) {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1665) unsigned int shift = 0;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1666) unsigned long sibs = last - first;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1667) unsigned int offset = XA_CHUNK_MASK;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1668)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1669) xas_set(xas, first);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1670)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1671) while ((first & XA_CHUNK_MASK) == 0) {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1672) if (sibs < XA_CHUNK_MASK)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1673) break;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1674) if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1675) break;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1676) shift += XA_CHUNK_SHIFT;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1677) if (offset == XA_CHUNK_MASK)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1678) offset = sibs & XA_CHUNK_MASK;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1679) sibs >>= XA_CHUNK_SHIFT;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1680) first >>= XA_CHUNK_SHIFT;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1681) }
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1682)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1683) offset = first & XA_CHUNK_MASK;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1684) if (offset + sibs > XA_CHUNK_MASK)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1685) sibs = XA_CHUNK_MASK - offset;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1686) if ((((first + sibs + 1) << shift) - 1) > last)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1687) sibs -= 1;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1688)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1689) xas->xa_shift = shift;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1690) xas->xa_sibs = sibs;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1691) }
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1692)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1693) /**
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1694) * xa_store_range() - Store this entry at a range of indices in the XArray.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1695) * @xa: XArray.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1696) * @first: First index to affect.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1697) * @last: Last index to affect.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1698) * @entry: New entry.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1699) * @gfp: Memory allocation flags.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1700) *
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1701) * After this function returns, loads from any index between @first and @last,
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1702) * inclusive will return @entry.
8fc75643c5e14 (Matthew Wilcox (Oracle) 2020-10-15 20:05:16 -0700 1703) * Storing into an existing multi-index entry updates the entry of every index.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1704) * The marks associated with @index are unaffected unless @entry is %NULL.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1705) *
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1706) * Context: Process context. Takes and releases the xa_lock. May sleep
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1707) * if the @gfp flags permit.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1708) * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1709) * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1710) */
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1711) void *xa_store_range(struct xarray *xa, unsigned long first,
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1712) unsigned long last, void *entry, gfp_t gfp)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1713) {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1714) XA_STATE(xas, xa, 0);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1715)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1716) if (WARN_ON_ONCE(xa_is_internal(entry)))
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1717) return XA_ERROR(-EINVAL);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1718) if (last < first)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1719) return XA_ERROR(-EINVAL);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1720)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1721) do {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1722) xas_lock(&xas);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1723) if (entry) {
44a4a66b619a0 (Matthew Wilcox 2018-11-05 10:53:09 -0500 1724) unsigned int order = BITS_PER_LONG;
44a4a66b619a0 (Matthew Wilcox 2018-11-05 10:53:09 -0500 1725) if (last + 1)
44a4a66b619a0 (Matthew Wilcox 2018-11-05 10:53:09 -0500 1726) order = __ffs(last + 1);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1727) xas_set_order(&xas, last, order);
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 1728) xas_create(&xas, true);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1729) if (xas_error(&xas))
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1730) goto unlock;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1731) }
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1732) do {
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1733) xas_set_range(&xas, first, last);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1734) xas_store(&xas, entry);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1735) if (xas_error(&xas))
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1736) goto unlock;
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1737) first += xas_size(&xas);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1738) } while (first <= last);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1739) unlock:
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1740) xas_unlock(&xas);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1741) } while (xas_nomem(&xas, gfp));
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1742)
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1743) return xas_result(&xas, NULL);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1744) }
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1745) EXPORT_SYMBOL(xa_store_range);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1746)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1747) /**
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1748) * xa_get_order() - Get the order of an entry.
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1749) * @xa: XArray.
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1750) * @index: Index of the entry.
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1751) *
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1752) * Return: A number between 0 and 63 indicating the order of the entry.
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1753) */
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1754) int xa_get_order(struct xarray *xa, unsigned long index)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1755) {
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1756) XA_STATE(xas, xa, index);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1757) void *entry;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1758) int order = 0;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1759)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1760) rcu_read_lock();
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1761) entry = xas_load(&xas);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1762)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1763) if (!entry)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1764) goto unlock;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1765)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1766) if (!xas.xa_node)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1767) goto unlock;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1768)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1769) for (;;) {
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1770) unsigned int slot = xas.xa_offset + (1 << order);
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1771)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1772) if (slot >= XA_CHUNK_SIZE)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1773) break;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1774) if (!xa_is_sibling(xas.xa_node->slots[slot]))
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1775) break;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1776) order++;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1777) }
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1778)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1779) order += xas.xa_node->shift;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1780) unlock:
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1781) rcu_read_unlock();
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1782)
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1783) return order;
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1784) }
57417cebc96b5 (Matthew Wilcox (Oracle) 2020-10-15 20:05:13 -0700 1785) EXPORT_SYMBOL(xa_get_order);
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1786) #endif /* CONFIG_XARRAY_MULTI */
0e9446c35a809 (Matthew Wilcox 2018-08-15 14:13:29 -0400 1787)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1788) /**
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1789) * __xa_alloc() - Find somewhere to store this entry in the XArray.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1790) * @xa: XArray.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1791) * @id: Pointer to ID.
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1792) * @limit: Range for allocated ID.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1793) * @entry: New entry.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1794) * @gfp: Memory allocation flags.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1795) *
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1796) * Finds an empty entry in @xa between @limit.min and @limit.max,
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1797) * stores the index into the @id pointer, then stores the entry at
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1798) * that index. A concurrent lookup will not see an uninitialised @id.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1799) *
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1800) * Context: Any context. Expects xa_lock to be held on entry. May
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1801) * release and reacquire xa_lock if @gfp flags permit.
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1802) * Return: 0 on success, -ENOMEM if memory could not be allocated or
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1803) * -EBUSY if there are no free entries in @limit.
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1804) */
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1805) int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1806) struct xa_limit limit, gfp_t gfp)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1807) {
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1808) XA_STATE(xas, xa, 0);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1809)
76b4e52995654 (Matthew Wilcox 2018-12-28 23:20:44 -0500 1810) if (WARN_ON_ONCE(xa_is_advanced(entry)))
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1811) return -EINVAL;
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1812) if (WARN_ON_ONCE(!xa_track_free(xa)))
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1813) return -EINVAL;
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1814)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1815) if (!entry)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1816) entry = XA_ZERO_ENTRY;
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1817)
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1818) do {
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1819) xas.xa_index = limit.min;
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1820) xas_find_marked(&xas, limit.max, XA_FREE_MARK);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1821) if (xas.xa_node == XAS_RESTART)
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1822) xas_set_err(&xas, -EBUSY);
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1823) else
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1824) *id = xas.xa_index;
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1825) xas_store(&xas, entry);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1826) xas_clear_mark(&xas, XA_FREE_MARK);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1827) } while (__xas_nomem(&xas, gfp));
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1828)
a3e4d3f97ec84 (Matthew Wilcox 2018-12-31 10:41:01 -0500 1829) return xas_error(&xas);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1830) }
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1831) EXPORT_SYMBOL(__xa_alloc);
371c752dc6694 (Matthew Wilcox 2018-07-04 10:50:12 -0400 1832)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1833) /**
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1834) * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1835) * @xa: XArray.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1836) * @id: Pointer to ID.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1837) * @entry: New entry.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1838) * @limit: Range of allocated ID.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1839) * @next: Pointer to next ID to allocate.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1840) * @gfp: Memory allocation flags.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1841) *
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1842) * Finds an empty entry in @xa between @limit.min and @limit.max,
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1843) * stores the index into the @id pointer, then stores the entry at
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1844) * that index. A concurrent lookup will not see an uninitialised @id.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1845) * The search for an empty entry will start at @next and will wrap
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1846) * around if necessary.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1847) *
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1848) * Context: Any context. Expects xa_lock to be held on entry. May
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1849) * release and reacquire xa_lock if @gfp flags permit.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1850) * Return: 0 if the allocation succeeded without wrapping. 1 if the
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1851) * allocation succeeded after wrapping, -ENOMEM if memory could not be
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1852) * allocated or -EBUSY if there are no free entries in @limit.
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1853) */
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1854) int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1855) struct xa_limit limit, u32 *next, gfp_t gfp)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1856) {
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1857) u32 min = limit.min;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1858) int ret;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1859)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1860) limit.min = max(min, *next);
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1861) ret = __xa_alloc(xa, id, entry, limit, gfp);
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1862) if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1863) xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1864) ret = 1;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1865) }
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1866)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1867) if (ret < 0 && limit.min > min) {
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1868) limit.min = min;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1869) ret = __xa_alloc(xa, id, entry, limit, gfp);
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1870) if (ret == 0)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1871) ret = 1;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1872) }
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1873)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1874) if (ret >= 0) {
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1875) *next = *id + 1;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1876) if (*next == 0)
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1877) xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1878) }
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1879) return ret;
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1880) }
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1881) EXPORT_SYMBOL(__xa_alloc_cyclic);
2fa044e51a1f3 (Matthew Wilcox 2018-11-06 14:13:35 -0500 1882)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1883) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1884) * __xa_set_mark() - Set this mark on this entry while locked.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1885) * @xa: XArray.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1886) * @index: Index of entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1887) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1888) *
804dfaf01bcc9 (Matthew Wilcox 2018-11-05 16:37:15 -0500 1889) * Attempting to set a mark on a %NULL entry does not succeed.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1890) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1891) * Context: Any context. Expects xa_lock to be held on entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1892) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1893) void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1894) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1895) XA_STATE(xas, xa, index);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1896) void *entry = xas_load(&xas);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1897)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1898) if (entry)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1899) xas_set_mark(&xas, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1900) }
9ee5a3b7eeb19 (Matthew Wilcox 2018-11-01 22:52:06 -0400 1901) EXPORT_SYMBOL(__xa_set_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1902)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1903) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1904) * __xa_clear_mark() - Clear this mark on this entry while locked.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1905) * @xa: XArray.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1906) * @index: Index of entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1907) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1908) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1909) * Context: Any context. Expects xa_lock to be held on entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1910) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1911) void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1912) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1913) XA_STATE(xas, xa, index);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1914) void *entry = xas_load(&xas);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1915)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1916) if (entry)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1917) xas_clear_mark(&xas, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1918) }
9ee5a3b7eeb19 (Matthew Wilcox 2018-11-01 22:52:06 -0400 1919) EXPORT_SYMBOL(__xa_clear_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1920)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1921) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1922) * xa_get_mark() - Inquire whether this mark is set on this entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1923) * @xa: XArray.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1924) * @index: Index of entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1925) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1926) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1927) * This function uses the RCU read lock, so the result may be out of date
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1928) * by the time it returns. If you need the result to be stable, use a lock.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1929) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1930) * Context: Any context. Takes and releases the RCU lock.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1931) * Return: True if the entry at @index has this mark set, false if it doesn't.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1932) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1933) bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1934) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1935) XA_STATE(xas, xa, index);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1936) void *entry;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1937)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1938) rcu_read_lock();
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1939) entry = xas_start(&xas);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1940) while (xas_get_mark(&xas, mark)) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1941) if (!xa_is_node(entry))
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1942) goto found;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1943) entry = xas_descend(&xas, xa_to_node(entry));
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1944) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1945) rcu_read_unlock();
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1946) return false;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1947) found:
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1948) rcu_read_unlock();
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1949) return true;
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1950) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1951) EXPORT_SYMBOL(xa_get_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1952)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1953) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1954) * xa_set_mark() - Set this mark on this entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1955) * @xa: XArray.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1956) * @index: Index of entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1957) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1958) *
804dfaf01bcc9 (Matthew Wilcox 2018-11-05 16:37:15 -0500 1959) * Attempting to set a mark on a %NULL entry does not succeed.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1960) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1961) * Context: Process context. Takes and releases the xa_lock.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1962) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1963) void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1964) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1965) xa_lock(xa);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1966) __xa_set_mark(xa, index, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1967) xa_unlock(xa);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1968) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1969) EXPORT_SYMBOL(xa_set_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1970)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1971) /**
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1972) * xa_clear_mark() - Clear this mark on this entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1973) * @xa: XArray.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1974) * @index: Index of entry.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1975) * @mark: Mark number.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1976) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1977) * Clearing a mark always succeeds.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1978) *
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1979) * Context: Process context. Takes and releases the xa_lock.
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1980) */
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1981) void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1982) {
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1983) xa_lock(xa);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1984) __xa_clear_mark(xa, index, mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1985) xa_unlock(xa);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1986) }
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1987) EXPORT_SYMBOL(xa_clear_mark);
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 1988)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1989) /**
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1990) * xa_find() - Search the XArray for an entry.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1991) * @xa: XArray.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1992) * @indexp: Pointer to an index.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1993) * @max: Maximum index to search to.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1994) * @filter: Selection criterion.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1995) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1996) * Finds the entry in @xa which matches the @filter, and has the lowest
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1997) * index that is at least @indexp and no more than @max.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1998) * If an entry is found, @indexp is updated to be the index of the entry.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 1999) * This function is protected by the RCU read lock, so it may not find
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2000) * entries which are being simultaneously added. It will not return an
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2001) * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2002) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2003) * Context: Any context. Takes and releases the RCU lock.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2004) * Return: The entry, if found, otherwise %NULL.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2005) */
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2006) void *xa_find(struct xarray *xa, unsigned long *indexp,
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2007) unsigned long max, xa_mark_t filter)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2008) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2009) XA_STATE(xas, xa, *indexp);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2010) void *entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2011)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2012) rcu_read_lock();
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2013) do {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2014) if ((__force unsigned int)filter < XA_MAX_MARKS)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2015) entry = xas_find_marked(&xas, max, filter);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2016) else
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2017) entry = xas_find(&xas, max);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2018) } while (xas_retry(&xas, entry));
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2019) rcu_read_unlock();
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2020)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2021) if (entry)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2022) *indexp = xas.xa_index;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2023) return entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2024) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2025) EXPORT_SYMBOL(xa_find);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2026)
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2027) static bool xas_sibling(struct xa_state *xas)
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2028) {
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2029) struct xa_node *node = xas->xa_node;
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2030) unsigned long mask;
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2031)
d8e93e3f22d9f (Matthew Wilcox (Oracle) 2020-02-27 07:37:40 -0500 2032) if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2033) return false;
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2034) mask = (XA_CHUNK_SIZE << node->shift) - 1;
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 2035) return (xas->xa_index & mask) >
bd40b17ca49d7 (Matthew Wilcox (Oracle) 2020-01-31 05:07:55 -0500 2036) ((unsigned long)xas->xa_offset << node->shift);
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2037) }
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2038)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2039) /**
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2040) * xa_find_after() - Search the XArray for a present entry.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2041) * @xa: XArray.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2042) * @indexp: Pointer to an index.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2043) * @max: Maximum index to search to.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2044) * @filter: Selection criterion.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2045) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2046) * Finds the entry in @xa which matches the @filter and has the lowest
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2047) * index that is above @indexp and no more than @max.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2048) * If an entry is found, @indexp is updated to be the index of the entry.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2049) * This function is protected by the RCU read lock, so it may miss entries
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2050) * which are being simultaneously added. It will not return an
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2051) * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2052) *
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2053) * Context: Any context. Takes and releases the RCU lock.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2054) * Return: The pointer, if found, otherwise %NULL.
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2055) */
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2056) void *xa_find_after(struct xarray *xa, unsigned long *indexp,
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2057) unsigned long max, xa_mark_t filter)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2058) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2059) XA_STATE(xas, xa, *indexp + 1);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2060) void *entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2061)
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 2062) if (xas.xa_index == 0)
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 2063) return NULL;
430f24f94c8a1 (Matthew Wilcox (Oracle) 2020-01-17 17:45:12 -0500 2064)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2065) rcu_read_lock();
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2066) for (;;) {
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2067) if ((__force unsigned int)filter < XA_MAX_MARKS)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2068) entry = xas_find_marked(&xas, max, filter);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2069) else
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2070) entry = xas_find(&xas, max);
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 2071)
c44aa5e8ab58b (Matthew Wilcox (Oracle) 2020-01-17 22:13:21 -0500 2072) if (xas_invalid(&xas))
8229706e03e41 (Matthew Wilcox 2018-11-01 16:55:19 -0400 2073) break;
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2074) if (xas_sibling(&xas))
19c30f4dd0923 (Matthew Wilcox (Oracle) 2020-01-17 22:00:41 -0500 2075) continue;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2076) if (!xas_retry(&xas, entry))
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2077) break;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2078) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2079) rcu_read_unlock();
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2080)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2081) if (entry)
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2082) *indexp = xas.xa_index;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2083) return entry;
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2084) }
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2085) EXPORT_SYMBOL(xa_find_after);
b803b42823d0d (Matthew Wilcox 2017-11-14 08:30:11 -0500 2086)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2087) static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2088) unsigned long max, unsigned int n)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2089) {
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2090) void *entry;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2091) unsigned int i = 0;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2092)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2093) rcu_read_lock();
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2094) xas_for_each(xas, entry, max) {
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2095) if (xas_retry(xas, entry))
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2096) continue;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2097) dst[i++] = entry;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2098) if (i == n)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2099) break;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2100) }
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2101) rcu_read_unlock();
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2102)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2103) return i;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2104) }
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2105)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2106) static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2107) unsigned long max, unsigned int n, xa_mark_t mark)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2108) {
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2109) void *entry;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2110) unsigned int i = 0;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2111)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2112) rcu_read_lock();
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2113) xas_for_each_marked(xas, entry, max, mark) {
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2114) if (xas_retry(xas, entry))
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2115) continue;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2116) dst[i++] = entry;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2117) if (i == n)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2118) break;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2119) }
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2120) rcu_read_unlock();
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2121)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2122) return i;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2123) }
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2124)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2125) /**
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2126) * xa_extract() - Copy selected entries from the XArray into a normal array.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2127) * @xa: The source XArray to copy from.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2128) * @dst: The buffer to copy entries into.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2129) * @start: The first index in the XArray eligible to be selected.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2130) * @max: The last index in the XArray eligible to be selected.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2131) * @n: The maximum number of entries to copy.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2132) * @filter: Selection criterion.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2133) *
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2134) * Copies up to @n entries that match @filter from the XArray. The
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2135) * copied entries will have indices between @start and @max, inclusive.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2136) *
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2137) * The @filter may be an XArray mark value, in which case entries which are
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2138) * marked with that mark will be copied. It may also be %XA_PRESENT, in
804dfaf01bcc9 (Matthew Wilcox 2018-11-05 16:37:15 -0500 2139) * which case all entries which are not %NULL will be copied.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2140) *
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2141) * The entries returned may not represent a snapshot of the XArray at a
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2142) * moment in time. For example, if another thread stores to index 5, then
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2143) * index 10, calling xa_extract() may return the old contents of index 5
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2144) * and the new contents of index 10. Indices not modified while this
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2145) * function is running will not be skipped.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2146) *
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2147) * If you need stronger guarantees, holding the xa_lock across calls to this
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2148) * function will prevent concurrent modification.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2149) *
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2150) * Context: Any context. Takes and releases the RCU lock.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2151) * Return: The number of entries copied.
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2152) */
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2153) unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2154) unsigned long max, unsigned int n, xa_mark_t filter)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2155) {
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2156) XA_STATE(xas, xa, start);
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2157)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2158) if (!n)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2159) return 0;
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2160)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2161) if ((__force unsigned int)filter < XA_MAX_MARKS)
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2162) return xas_extract_marked(&xas, dst, max, n, filter);
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2163) return xas_extract_present(&xas, dst, max, n);
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2164) }
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2165) EXPORT_SYMBOL(xa_extract);
80a0a1a9a3cde (Matthew Wilcox 2017-11-14 16:42:22 -0500 2166)
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2167) /**
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2168) * xa_delete_node() - Private interface for workingset code.
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2169) * @node: Node to be removed from the tree.
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2170) * @update: Function to call to update ancestor nodes.
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2171) *
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2172) * Context: xa_lock must be held on entry and will not be released.
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2173) */
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2174) void xa_delete_node(struct xa_node *node, xa_update_node_t update)
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2175) {
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2176) struct xa_state xas = {
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2177) .xa = node->array,
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2178) .xa_index = (unsigned long)node->offset <<
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2179) (node->shift + XA_CHUNK_SHIFT),
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2180) .xa_shift = node->shift + XA_CHUNK_SHIFT,
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2181) .xa_offset = node->offset,
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2182) .xa_node = xa_parent_locked(node->array, node),
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2183) .xa_update = update,
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2184) };
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2185)
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2186) xas_store(&xas, NULL);
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2187) }
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2188) EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
f82cd2f0b5eb7 (Matthew Wilcox (Oracle) 2020-08-18 09:05:56 -0400 2189)
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2190) /**
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2191) * xa_destroy() - Free all internal data structures.
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2192) * @xa: XArray.
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2193) *
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2194) * After calling this function, the XArray is empty and has freed all memory
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2195) * allocated for its internal data structures. You are responsible for
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2196) * freeing the objects referenced by the XArray.
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2197) *
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2198) * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2199) */
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2200) void xa_destroy(struct xarray *xa)
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2201) {
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2202) XA_STATE(xas, xa, 0);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2203) unsigned long flags;
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2204) void *entry;
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2205)
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2206) xas.xa_node = NULL;
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2207) xas_lock_irqsave(&xas, flags);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2208) entry = xa_head_locked(xa);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2209) RCU_INIT_POINTER(xa->xa_head, NULL);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2210) xas_init_marks(&xas);
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 2211) if (xa_zero_busy(xa))
3ccaf57a6a63a (Matthew Wilcox 2018-10-26 14:43:22 -0400 2212) xa_mark_clear(xa, XA_FREE_MARK);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2213) /* lockdep checks we're still holding the lock in xas_free_nodes() */
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2214) if (xa_is_node(entry))
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2215) xas_free_nodes(&xas, xa_to_node(entry));
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2216) xas_unlock_irqrestore(&xas, flags);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2217) }
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2218) EXPORT_SYMBOL(xa_destroy);
687149fca1f37 (Matthew Wilcox 2017-11-17 08:16:34 -0500 2219)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2220) #ifdef XA_DEBUG
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2221) void xa_dump_node(const struct xa_node *node)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2222) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2223) unsigned i, j;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2224)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2225) if (!node)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2226) return;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2227) if ((unsigned long)node & 3) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2228) pr_cont("node %px\n", node);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2229) return;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2230) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2231)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2232) pr_cont("node %px %s %d parent %px shift %d count %d values %d "
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2233) "array %px list %px %px marks",
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2234) node, node->parent ? "offset" : "max", node->offset,
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2235) node->parent, node->shift, node->count, node->nr_values,
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2236) node->array, node->private_list.prev, node->private_list.next);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2237) for (i = 0; i < XA_MAX_MARKS; i++)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2238) for (j = 0; j < XA_MARK_LONGS; j++)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2239) pr_cont(" %lx", node->marks[i][j]);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2240) pr_cont("\n");
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2241) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2242)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2243) void xa_dump_index(unsigned long index, unsigned int shift)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2244) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2245) if (!shift)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2246) pr_info("%lu: ", index);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2247) else if (shift >= BITS_PER_LONG)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2248) pr_info("0-%lu: ", ~0UL);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2249) else
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2250) pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2251) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2252)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2253) void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2254) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2255) if (!entry)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2256) return;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2257)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2258) xa_dump_index(index, shift);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2259)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2260) if (xa_is_node(entry)) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2261) if (shift == 0) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2262) pr_cont("%px\n", entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2263) } else {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2264) unsigned long i;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2265) struct xa_node *node = xa_to_node(entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2266) xa_dump_node(node);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2267) for (i = 0; i < XA_CHUNK_SIZE; i++)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2268) xa_dump_entry(node->slots[i],
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2269) index + (i << node->shift), node->shift);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2270) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2271) } else if (xa_is_value(entry))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2272) pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2273) xa_to_value(entry), entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2274) else if (!xa_is_internal(entry))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2275) pr_cont("%px\n", entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2276) else if (xa_is_retry(entry))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2277) pr_cont("retry (%ld)\n", xa_to_internal(entry));
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2278) else if (xa_is_sibling(entry))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2279) pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
9f14d4f1f1045 (Matthew Wilcox 2018-10-01 14:54:59 -0400 2280) else if (xa_is_zero(entry))
9f14d4f1f1045 (Matthew Wilcox 2018-10-01 14:54:59 -0400 2281) pr_cont("zero (%ld)\n", xa_to_internal(entry));
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2282) else
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2283) pr_cont("UNKNOWN ENTRY (%px)\n", entry);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2284) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2285)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2286) void xa_dump(const struct xarray *xa)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2287) {
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2288) void *entry = xa->xa_head;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2289) unsigned int shift = 0;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2290)
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2291) pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 2292) xa->xa_flags, xa_marked(xa, XA_MARK_0),
9b89a03551446 (Matthew Wilcox 2017-11-10 09:34:31 -0500 2293) xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2294) if (xa_is_node(entry))
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2295) shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2296) xa_dump_entry(entry, 0, shift);
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2297) }
ad3d6c7263e36 (Matthew Wilcox 2017-11-07 14:57:46 -0500 2298) #endif