^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/mm/swap_state.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Swap reorganised 29.12.95, Stephen Tweedie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Rewritten to use page cache, (C) 1998 Stephen Tweedie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/migrate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/swap_slots.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/huge_mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/shmem_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * swapper_space is a fiction, retained to simplify the path through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * vmscan's shrink_page_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static const struct address_space_operations swap_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) .writepage = swap_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .set_page_dirty = swap_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .migratepage = migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static bool enable_vma_readahead __read_mostly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SWAP_RA_VAL(addr, win, hits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) (((addr) & PAGE_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ((hits) & SWAP_RA_HITS_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Initial readahead hits is 4 to start up with a small window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define GET_SWAP_RA_VAL(vma) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long add_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long del_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long find_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long find_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) } swap_cache_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long total_swapcache_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned int i, j, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct address_space *spaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct swap_info_struct *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) for (i = 0; i < MAX_SWAPFILES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) swp_entry_t entry = swp_entry(i, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Avoid get_swap_device() to warn for bad swap entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!swp_swap_info(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* Prevent swapoff to free swapper_spaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) si = get_swap_device(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!si)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) nr = nr_swapper_spaces[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) spaces = swapper_spaces[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) for (j = 0; j < nr; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ret += spaces[j].nrpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) put_swap_device(si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) EXPORT_SYMBOL_GPL(total_swapcache_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void show_swap_cache_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) printk("%lu pages in swap cache\n", total_swapcache_pages());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) swap_cache_info.add_total, swap_cache_info.del_total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) swap_cache_info.find_success, swap_cache_info.find_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) printk("Free swap = %ldkB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) get_nr_swap_pages() << (PAGE_SHIFT - 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void *get_shadow_from_swap_cache(swp_entry_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct address_space *address_space = swap_address_space(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pgoff_t idx = swp_offset(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) page = find_get_entry(address_space, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (xa_is_value(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * but sets SwapCache flag and private instead of mapping and index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int add_to_swap_cache(struct page *page, swp_entry_t entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) gfp_t gfp, void **shadowp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct address_space *address_space = swap_address_space(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pgoff_t idx = swp_offset(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long i, nr = thp_nr_pages(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) VM_BUG_ON_PAGE(PageSwapCache(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) page_ref_add(page, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) SetPageSwapCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long nr_shadows = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) xas_lock_irq(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) xas_create_range(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (xas_error(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) old = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (xa_is_value(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) nr_shadows++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (shadowp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *shadowp = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) set_page_private(page + i, entry.val + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) xas_store(&xas, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) xas_next(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) address_space->nrexceptional -= nr_shadows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) address_space->nrpages += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ADD_CACHE_INFO(add_total, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) xas_unlock_irq(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } while (xas_nomem(&xas, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!xas_error(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ClearPageSwapCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) page_ref_sub(page, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return xas_error(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * This must be called only on pages that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * been verified to be in the swap cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void __delete_from_swap_cache(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) swp_entry_t entry, void *shadow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct address_space *address_space = swap_address_space(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int i, nr = thp_nr_pages(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pgoff_t idx = swp_offset(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) XA_STATE(xas, &address_space->i_pages, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) VM_BUG_ON_PAGE(!PageSwapCache(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) VM_BUG_ON_PAGE(PageWriteback(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void *entry = xas_store(&xas, shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) VM_BUG_ON_PAGE(entry != page, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) set_page_private(page + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) xas_next(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ClearPageSwapCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (shadow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) address_space->nrexceptional += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) address_space->nrpages -= nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ADD_CACHE_INFO(del_total, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * add_to_swap - allocate swap space for a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @page: page we want to move to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Allocate swap space for the page and add the page to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * swap cache. Caller needs to hold the page lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int add_to_swap(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) swp_entry_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) VM_BUG_ON_PAGE(!PageUptodate(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) entry = get_swap_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (!entry.val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * XArray node allocations from PF_MEMALLOC contexts could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * completely exhaust the page allocator. __GFP_NOMEMALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * stops emergency reserves from being allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * TODO: this could cause a theoretical memory reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * deadlock in the swap out path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Add it to the swap cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) err = add_to_swap_cache(page, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * add_to_swap_cache() doesn't return -EEXIST, so we can safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * clear SWAP_HAS_CACHE flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Normally the page will be dirtied in unmap because its pte should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * dirty. A special case is MADV_FREE page. The page's pte could have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * dirty bit cleared but the page's SwapBacked bit is still set because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * clearing the dirty bit and SwapBacked bit has no lock protected. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * such page, unmap will not set dirty bit for it, so page reclaim will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * not write the page out. This can cause data corruption when the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * is swap in later. Always setting the dirty bit for the page solves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * the problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) put_swap_page(page, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * This must be called only on pages that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * been verified to be in the swap cache and locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * It will never put the page into the free list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * the caller has a reference on the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void delete_from_swap_cache(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) swp_entry_t entry = { .val = page_private(page) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct address_space *address_space = swap_address_space(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) xa_lock_irq(&address_space->i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __delete_from_swap_cache(page, entry, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) xa_unlock_irq(&address_space->i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) put_swap_page(page, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) page_ref_sub(page, thp_nr_pages(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) void clear_shadow_from_swap_cache(int type, unsigned long begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long curr = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned long nr_shadows = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) swp_entry_t entry = swp_entry(type, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct address_space *address_space = swap_address_space(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) XA_STATE(xas, &address_space->i_pages, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) xa_lock_irq(&address_space->i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) xas_for_each(&xas, old, end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!xa_is_value(old))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) xas_store(&xas, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) nr_shadows++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) address_space->nrexceptional -= nr_shadows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) xa_unlock_irq(&address_space->i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* search the next swapcache until we meet end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) curr >>= SWAP_ADDRESS_SPACE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) curr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) curr <<= SWAP_ADDRESS_SPACE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (curr > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * If we are the only user, then try to free up the swap cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Its ok to check for PageSwapCache without the page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * here because we are going to recheck again inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * try_to_free_swap() _with_ the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * - Marcelo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline void free_swap_cache(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) try_to_free_swap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Perform a free_page(), also freeing any swap cache associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * this page if it is the last user of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void free_page_and_swap_cache(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) free_swap_cache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!is_huge_zero_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Passed an array of pages, drop them all from swapcache and then release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * them. They are removed from the LRU and freed if this is their last use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) void free_pages_and_swap_cache(struct page **pages, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct page **pagep = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) lru_add_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) for (i = 0; i < nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) free_swap_cache(pagep[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) release_pages(pagep, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static inline bool swap_use_vma_readahead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * Lookup a swap entry in the swap cache. A found page will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * unlocked and with its refcount incremented - we rely on the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * lock getting page table operations atomic even if we drop the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * lock before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct swap_info_struct *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) si = get_swap_device(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!si)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) page = find_get_page(swap_address_space(entry), swp_offset(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) put_swap_device(si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) INC_CACHE_INFO(find_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) bool vma_ra = swap_use_vma_readahead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bool readahead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) INC_CACHE_INFO(find_success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * At the moment, we don't support PG_readahead for anon THP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * so let's bail out rather than confusing the readahead stat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (unlikely(PageTransCompound(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) readahead = TestClearPageReadahead(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (vma && vma_ra) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned long ra_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int win, hits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ra_val = GET_SWAP_RA_VAL(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) win = SWAP_RA_WIN(ra_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) hits = SWAP_RA_HITS(ra_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (readahead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) atomic_long_set(&vma->swap_readahead_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) SWAP_RA_VAL(addr, win, hits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (readahead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) count_vm_event(SWAP_RA_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!vma || !vma_ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) atomic_inc(&swapin_readahead_hits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * find_get_incore_page - Find and get a page from the page or swap caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @mapping: The address_space to search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @index: The page cache index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * This differs from find_get_page() in that it will also look for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * page in the swap cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Return: The found page or %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) swp_entry_t swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct swap_info_struct *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct page *page = find_get_entry(mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!xa_is_value(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return find_subpage(page, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!shmem_mapping(mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) swp = radix_to_swp_entry(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Prevent swapoff from happening to us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) si = get_swap_device(swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!si)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) page = find_get_page(swap_address_space(swp), swp_offset(swp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) put_swap_device(si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) bool *new_page_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct swap_info_struct *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) void *shadow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *new_page_allocated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * First check the swap cache. Since this is normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * called after lookup_swap_cache() failed, re-calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * that would confuse statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) si = get_swap_device(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!si)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) page = find_get_page(swap_address_space(entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) swp_offset(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) put_swap_device(si);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Just skip read ahead for unused swap slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * During swap_off when swap_slot_cache is disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * we have to handle the race between putting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * swap entry in swap cache and marking swap slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * as SWAP_HAS_CACHE. That's done in later part of code or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * else swap_off will be aborted if we return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Get a new page to read into from swap. Allocate it now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * cause any racers to loop around until we add it to cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) page = alloc_page_vma(gfp_mask, vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Swap entry may have been freed since our caller observed it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) err = swapcache_prepare(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (err != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * We might race against __delete_from_swap_cache(), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * stumble across a swap_map entry whose SWAP_HAS_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * has not yet been cleared. Or race against another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * in swap_map, but not yet added its page to swap cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * The swap entry is ours to swap in. Prepare the new page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) __SetPageLocked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __SetPageSwapBacked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* May fail (-ENOMEM) if XArray node allocation failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) put_swap_page(page, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (mem_cgroup_charge(page, NULL, gfp_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) delete_from_swap_cache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (shadow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) workingset_refault(page, shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Caller will initiate read into locked page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) SetPageWorkingset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) lru_cache_add(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *new_page_allocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Locate a page of swap in physical memory, reserving swap cache space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * and reading the disk if it is not already cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * A failure return means that either the page allocation failed or that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * the swap entry is no longer in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct vm_area_struct *vma, unsigned long addr, bool do_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) bool page_was_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) vma, addr, &page_was_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (page_was_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) swap_readpage(retpage, do_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return retpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static unsigned int __swapin_nr_pages(unsigned long prev_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int hits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int prev_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned int pages, last_ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * This heuristic has been found to work well on both sequential and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * random loads, swapping to hard disk or to SSD: please don't ask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * what the "+ 2" means, it just happens to work well, that's all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pages = hits + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (pages == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * We can have no readahead hits to judge by: but must not get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * stuck here forever, so check for an adjacent offset instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * (and don't even bother to check whether swap type is same).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (offset != prev_offset + 1 && offset != prev_offset - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned int roundup = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) while (roundup < pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) roundup <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) pages = roundup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (pages > max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) pages = max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Don't shrink readahead too fast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) last_ra = prev_win / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (pages < last_ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pages = last_ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static unsigned long swapin_nr_pages(unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static unsigned long prev_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned int hits, pages, max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static atomic_t last_readahead_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) max_pages = 1 << READ_ONCE(page_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (max_pages <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) hits = atomic_xchg(&swapin_readahead_hits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) atomic_read(&last_readahead_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!hits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) WRITE_ONCE(prev_offset, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) atomic_set(&last_readahead_pages, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * swap_cluster_readahead - swap in pages in hope we need them soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * @entry: swap entry of this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * @gfp_mask: memory allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * @vmf: fault information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Returns the struct page for entry and addr, after queueing swapin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Primitive swap readahead code. We simply read an aligned block of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * (1 << page_cluster) entries in the swap area. This method is chosen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * because it doesn't cost us any seek time. We also make sure to queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * the 'original' request together with the readahead ones...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * This has been extended to use the NUMA policies from the mm triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * the readahead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * This is needed to ensure the VMA will not be freed in our back. In the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * of the speculative page fault handler, this cannot happen, even if we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * hold the mmap_sem. Callees are assumed to take care of reading VMA's fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * using READ_ONCE() to read consistent values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned long entry_offset = swp_offset(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) unsigned long offset = entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unsigned long start_offset, end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct swap_info_struct *si = swp_swap_info(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) bool do_poll = true, page_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) unsigned long addr = vmf->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) mask = swapin_nr_pages(offset) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Test swap type to make sure the dereference is safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct inode *inode = si->swap_file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (inode_read_congested(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) do_poll = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Read a page_cluster sized and aligned cluster around offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) start_offset = offset & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) end_offset = offset | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!start_offset) /* First page is swap header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) start_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (end_offset >= si->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) end_offset = si->max - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) for (offset = start_offset; offset <= end_offset ; offset++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Ok, do the async read-ahead now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) page = __read_swap_cache_async(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) swp_entry(swp_type(entry), offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) gfp_mask, vma, addr, &page_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (page_allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) swap_readpage(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (offset != entry_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) SetPageReadahead(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) count_vm_event(SWAP_RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) lru_add_drain(); /* Push any new pages onto the LRU now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int init_swap_address_space(unsigned int type, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct address_space *spaces, *space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) unsigned int i, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!spaces)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) space = spaces + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) atomic_set(&space->i_mmap_writable, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) space->a_ops = &swap_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* swap cache doesn't use writeback related tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) mapping_set_no_writeback_tags(space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) nr_swapper_spaces[type] = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) swapper_spaces[type] = spaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) void exit_swap_address_space(unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) kvfree(swapper_spaces[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) nr_swapper_spaces[type] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) swapper_spaces[type] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned long faddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned long lpfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned long rpfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) unsigned long *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) *start = max3(lpfn, PFN_DOWN(READ_ONCE(vma->vm_start)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) PFN_DOWN(faddr & PMD_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) *end = min3(rpfn, PFN_DOWN(READ_ONCE(vma->vm_end)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void swap_ra_info(struct vm_fault *vmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct vma_swap_readahead *ra_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned long ra_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) swp_entry_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned long faddr, pfn, fpfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pte_t *pte, *orig_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int max_win, hits, prev_win, win, left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) #ifndef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) pte_t *tpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) SWAP_RA_ORDER_CEILING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (max_win == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ra_info->win = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) faddr = vmf->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) entry = pte_to_swp_entry(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if ((unlikely(non_swap_entry(entry)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pte_unmap(orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) fpfn = PFN_DOWN(faddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ra_val = GET_SWAP_RA_VAL(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) prev_win = SWAP_RA_WIN(ra_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) hits = SWAP_RA_HITS(ra_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) max_win, prev_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) atomic_long_set(&vma->swap_readahead_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) SWAP_RA_VAL(faddr, win, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (win == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) pte_unmap(orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Copy the PTEs because the page table may be unmapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (fpfn == pfn + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) else if (pfn == fpfn + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) &start, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) left = (win - 1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) &start, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ra_info->nr_pte = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ra_info->offset = fpfn - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pte -= ra_info->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ra_info->ptes = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) tpte = ra_info->ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) for (pfn = start; pfn != end; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) *tpte++ = *pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) pte_unmap(orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * swap_vma_readahead - swap in pages in hope we need them soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * @fentry: swap entry of this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * @gfp_mask: memory allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * @vmf: fault information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * Returns the struct page for entry and addr, after queueing swapin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * Primitive swap readahead code. We simply read in a few pages whoes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * virtual addresses are around the fault address in the same vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * Caller must hold read mmap_lock if vmf->vma is not NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pte_t *pte, pentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) swp_entry_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bool page_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct vma_swap_readahead ra_info = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) swap_ra_info(vmf, &ra_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (ra_info.win == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) i++, pte++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) pentry = *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (pte_none(pentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (pte_present(pentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) entry = pte_to_swp_entry(pentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (unlikely(non_swap_entry(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) page = __read_swap_cache_async(entry, gfp_mask, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) vmf->address, &page_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (page_allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) swap_readpage(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (i != ra_info.offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) SetPageReadahead(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) count_vm_event(SWAP_RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) lru_add_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) ra_info.win == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * swapin_readahead - swap in pages in hope we need them soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @entry: swap entry of this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @gfp_mask: memory allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @vmf: fault information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Returns the struct page for entry and addr, after queueing swapin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * It's a main entry function for swap readahead. By the configuration,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * it will read ahead blocks by cluster-based(ie, physical disk based)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * or vma-based(ie, virtual address based on faulty address) readahead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return swap_use_vma_readahead() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) swap_vma_readahead(entry, gfp_mask, vmf) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) swap_cluster_readahead(entry, gfp_mask, vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static ssize_t vma_ra_enabled_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static ssize_t vma_ra_enabled_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) enable_vma_readahead = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) enable_vma_readahead = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static struct kobj_attribute vma_ra_enabled_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) vma_ra_enabled_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static struct attribute *swap_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) &vma_ra_enabled_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static struct attribute_group swap_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .attrs = swap_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static int __init swap_init_sysfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct kobject *swap_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) swap_kobj = kobject_create_and_add("swap", mm_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (!swap_kobj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) pr_err("failed to create swap kobject\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) err = sysfs_create_group(swap_kobj, &swap_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) pr_err("failed to register swap group\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) goto delete_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) delete_obj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) kobject_put(swap_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) subsys_initcall(swap_init_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) #endif