61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1) /*
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2) * zsmalloc memory allocator
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 3) *
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 4) * Copyright (C) 2011 Nitin Gupta
31fc00bb788ff mm/zsmalloc.c (Minchan Kim 2014-01-30 15:45:55 -0800 5) * Copyright (C) 2012, 2013 Minchan Kim
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 6) *
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 7) * This code is released using a dual license strategy: BSD/GPL
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 8) * You can choose the license that better fits your requirements.
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 9) *
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 10) * Released under the terms of 3-clause BSD License
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 11) * Released under the terms of GNU General Public License Version 2.0
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 12) */
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 13)
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 14) /*
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 15) * Following is how we use various fields and flags of underlying
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 16) * struct page(s) to form a zspage.
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 17) *
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 18) * Usage of struct page fields:
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 19) * page->private: points to zspage
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 20) * page->freelist(index): links together all component pages of a zspage
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 21) * For the huge page, this is always 0, so we use this field
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 22) * to store handle.
fd8544639e3fd mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:54 -0700 23) * page->units: first object offset in a subpage of zspage
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 24) *
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 25) * Usage of struct page flags:
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 26) * PG_private: identifies the first component page
399d8eebe768f mm/zsmalloc.c (Xishi Qiu 2017-02-22 15:45:01 -0800 27) * PG_owner_priv_1: identifies the huge component page
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 28) *
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 29) */
2db51dae56240 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-06-09 17:41:14 -0700 30)
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 31) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 32)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 33) #include <linux/module.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 34) #include <linux/kernel.h>
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 35) #include <linux/sched.h>
50d34394cee68 mm/zsmalloc.c (Ingo Molnar 2017-02-05 16:03:58 +0100 36) #include <linux/magic.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 37) #include <linux/bitops.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 38) #include <linux/errno.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 39) #include <linux/highmem.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 40) #include <linux/string.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 41) #include <linux/slab.h>
ca5999fde0a17 mm/zsmalloc.c (Mike Rapoport 2020-06-08 21:32:38 -0700 42) #include <linux/pgtable.h>
65fddcfca8ad1 mm/zsmalloc.c (Mike Rapoport 2020-06-08 21:32:42 -0700 43) #include <asm/tlbflush.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 44) #include <linux/cpumask.h>
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 45) #include <linux/cpu.h>
0cbb613fa82fb drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-02-13 08:47:49 -0600 46) #include <linux/vmalloc.h>
759b26b29885a mm/zsmalloc.c (Sergey Senozhatsky 2015-11-06 16:29:29 -0800 47) #include <linux/preempt.h>
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 48) #include <linux/spinlock.h>
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 49) #include <linux/shrinker.h>
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 50) #include <linux/types.h>
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 51) #include <linux/debugfs.h>
bcf1647d08996 mm/zsmalloc.c (Minchan Kim 2014-01-30 15:45:50 -0800 52) #include <linux/zsmalloc.h>
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 53) #include <linux/zpool.h>
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 54) #include <linux/mount.h>
8e9231f819e32 mm/zsmalloc.c (David Howells 2019-03-25 16:38:23 +0000 55) #include <linux/pseudo_fs.h>
dd4123f324bba mm/zsmalloc.c (Minchan Kim 2016-07-26 15:26:50 -0700 56) #include <linux/migrate.h>
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 57) #include <linux/wait.h>
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 58) #include <linux/pagemap.h>
cdc346b36e1df mm/zsmalloc.c (Sergey Senozhatsky 2018-01-04 16:18:02 -0800 59) #include <linux/fs.h>
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 60) #include <linux/local_lock.h>
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 61)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 62) #define ZSPAGE_MAGIC 0x58
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 63)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 64) /*
cb152a1a95606 mm/zsmalloc.c (Shijie Luo 2021-05-06 18:05:51 -0700 65) * This must be power of 2 and greater than or equal to sizeof(link_free).
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 66) * These two conditions ensure that any 'struct link_free' itself doesn't
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 67) * span more than 1 page which avoids complex case of mapping 2 pages simply
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 68) * to restore link_free pointer values.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 69) */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 70) #define ZS_ALIGN 8
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 71)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 72) /*
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 73) * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 74) * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 75) */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 76) #define ZS_MAX_ZSPAGE_ORDER 2
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 77) #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 78)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 79) #define ZS_HANDLE_SIZE (sizeof(unsigned long))
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 80)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 81) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 82)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 83) struct zsmalloc_handle {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 84) unsigned long addr;
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 85) spinlock_t lock;
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 86) };
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 87)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 88) #define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 89)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 90) #else
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 91)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 92) #define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 93) #endif
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 94)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 95) /*
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 96) * Object location (<PFN>, <obj_idx>) is encoded as
b956b5ac28cd7 mm/zsmalloc.c (Randy Dunlap 2020-08-11 18:33:31 -0700 97) * a single (unsigned long) handle value.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 98) *
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 99) * Note that object index <obj_idx> starts from 0.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 100) *
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 101) * This is made more complicated by various memory models and PAE.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 102) */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 103)
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 104) #ifndef MAX_POSSIBLE_PHYSMEM_BITS
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 105) #ifdef MAX_PHYSMEM_BITS
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 106) #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 107) #else
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 108) /*
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 109) * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 110) * be PAGE_SHIFT
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 111) */
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 112) #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 113) #endif
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 114) #endif
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 115)
02390b87a9459 mm/zsmalloc.c (Kirill A. Shutemov 2018-02-14 14:16:49 +0300 116) #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 117)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 118) /*
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 119) * Memory for allocating for handle keeps object position by
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 120) * encoding <page, obj_idx> and the encoded value has a room
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 121) * in least bit(ie, look at obj_to_location).
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 122) * We use the bit to synchronize between object access by
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 123) * user and migration.
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 124) */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 125) #define HANDLE_PIN_BIT 0
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 126)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 127) /*
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 128) * Head in allocated object should have OBJ_ALLOCATED_TAG
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 129) * to identify the object was allocated or not.
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 130) * It's okay to add the status bit in the least bit because
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 131) * header keeps handle which is 4byte-aligned address so we
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 132) * have room for two bit at least.
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 133) */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 134) #define OBJ_ALLOCATED_TAG 1
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 135) #define OBJ_TAG_BITS 1
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 136) #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 137) #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 138)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 139) #define FULLNESS_BITS 2
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 140) #define CLASS_BITS 8
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 141) #define ISOLATED_BITS 3
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 142) #define MAGIC_VAL_BITS 8
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 143)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 144) #define MAX(a, b) ((a) >= (b) ? (a) : (b))
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 145) /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 146) #define ZS_MIN_ALLOC_SIZE \
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 147) MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 148) /* each chunk includes extra space to keep handle */
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 149) #define ZS_MAX_ALLOC_SIZE PAGE_SIZE
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 150)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 151) /*
7eb52512a9778 mm/zsmalloc.c (Weijie Yang 2014-06-04 16:11:08 -0700 152) * On systems with 4K page size, this gives 255 size classes! There is a
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 153) * trader-off here:
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 154) * - Large number of size classes is potentially wasteful as free page are
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 155) * spread across these classes
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 156) * - Small number of size classes causes large internal fragmentation
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 157) * - Probably its better to use specific size classes (empirically
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 158) * determined). NOTE: all those class sizes must be set as multiple of
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 159) * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 160) *
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 161) * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 162) * (reason above)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 163) */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 164) #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 165) #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 166) ZS_SIZE_CLASS_DELTA) + 1)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 167)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 168) enum fullness_group {
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 169) ZS_EMPTY,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 170) ZS_ALMOST_EMPTY,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 171) ZS_ALMOST_FULL,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 172) ZS_FULL,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 173) NR_ZS_FULLNESS,
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 174) };
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 175)
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 176) enum zs_stat_type {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 177) CLASS_EMPTY,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 178) CLASS_ALMOST_EMPTY,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 179) CLASS_ALMOST_FULL,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 180) CLASS_FULL,
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 181) OBJ_ALLOCATED,
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 182) OBJ_USED,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 183) NR_ZS_STAT_TYPE,
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 184) };
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 185)
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 186) struct zs_size_stat {
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 187) unsigned long objs[NR_ZS_STAT_TYPE];
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 188) };
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 189)
57244594195fe mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:27 -0700 190) #ifdef CONFIG_ZSMALLOC_STAT
57244594195fe mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:27 -0700 191) static struct dentry *zs_stat_root;
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 192) #endif
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 193)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 194) #ifdef CONFIG_COMPACTION
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 195) static struct vfsmount *zsmalloc_mnt;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 196) #endif
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 197)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 198) /*
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 199) * We assign a page to ZS_ALMOST_EMPTY fullness group when:
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 200) * n <= N / f, where
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 201) * n = number of allocated objects
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 202) * N = total number of objects zspage can store
6dd9737e31504 mm/zsmalloc.c (Wang Sheng-Hui 2014-10-09 15:29:59 -0700 203) * f = fullness_threshold_frac
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 204) *
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 205) * Similarly, we assign zspage to:
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 206) * ZS_ALMOST_FULL when n > N / f
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 207) * ZS_EMPTY when n == 0
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 208) * ZS_FULL when n == N
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 209) *
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 210) * (see: fix_fullness_group())
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 211) */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 212) static const int fullness_threshold_frac = 4;
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 213) static size_t huge_class_size;
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 214)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 215) struct size_class {
57244594195fe mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:27 -0700 216) spinlock_t lock;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 217) struct list_head fullness_list[NR_ZS_FULLNESS];
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 218) /*
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 219) * Size of objects stored in this class. Must be multiple
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 220) * of ZS_ALIGN.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 221) */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 222) int size;
1fc6e27d7b861 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:11 -0700 223) int objs_per_zspage;
7dfa4612204b5 mm/zsmalloc.c (Weijie Yang 2016-01-14 15:22:40 -0800 224) /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
7dfa4612204b5 mm/zsmalloc.c (Weijie Yang 2016-01-14 15:22:40 -0800 225) int pages_per_zspage;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 226)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 227) unsigned int index;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 228) struct zs_size_stat stats;
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 229) };
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 230)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 231) /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 232) static void SetPageHugeObject(struct page *page)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 233) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 234) SetPageOwnerPriv1(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 235) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 236)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 237) static void ClearPageHugeObject(struct page *page)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 238) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 239) ClearPageOwnerPriv1(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 240) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 241)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 242) static int PageHugeObject(struct page *page)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 243) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 244) return PageOwnerPriv1(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 245) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 246)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 247) /*
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 248) * Placed within free objects to form a singly linked list.
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 249) * For every zspage, zspage->freeobj gives head of this list.
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 250) *
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 251) * This must be power of 2 and less than or equal to ZS_ALIGN
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 252) */
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 253) struct link_free {
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 254) union {
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 255) /*
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 256) * Free object index;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 257) * It's valid for non-allocated object
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 258) */
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 259) unsigned long next;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 260) /*
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 261) * Handle of allocated object.
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 262) */
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 263) unsigned long handle;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 264) };
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 265) };
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 266)
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 267) struct zs_pool {
6f3526d6db7cb mm/zsmalloc.c (Sergey SENOZHATSKY 2015-11-06 16:29:21 -0800 268) const char *name;
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 269)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 270) struct size_class *size_class[ZS_SIZE_CLASSES];
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 271) struct kmem_cache *handle_cachep;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 272) struct kmem_cache *zspage_cachep;
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 273)
13de8933c96b4 mm/zsmalloc.c (Minchan Kim 2014-10-09 15:29:48 -0700 274) atomic_long_t pages_allocated;
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 275)
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 276) struct zs_pool_stats stats;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 277)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 278) /* Compact classes */
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 279) struct shrinker shrinker;
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 280)
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 281) #ifdef CONFIG_ZSMALLOC_STAT
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 282) struct dentry *stat_dentry;
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 283) #endif
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 284) #ifdef CONFIG_COMPACTION
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 285) struct inode *inode;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 286) struct work_struct free_work;
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 287) /* A wait queue for when migration races with async_free_zspage() */
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 288) struct wait_queue_head migration_wait;
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 289) atomic_long_t isolated_pages;
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 290) bool destroying;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 291) #endif
0959c63f11c3b drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-08-08 15:12:17 +0900 292) };
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 293)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 294) struct zspage {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 295) struct {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 296) unsigned int fullness:FULLNESS_BITS;
85d492f28d056 mm/zsmalloc.c (Minchan Kim 2017-04-13 14:56:40 -0700 297) unsigned int class:CLASS_BITS + 1;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 298) unsigned int isolated:ISOLATED_BITS;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 299) unsigned int magic:MAGIC_VAL_BITS;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 300) };
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 301) unsigned int inuse;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 302) unsigned int freeobj;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 303) struct page *first_page;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 304) struct list_head list; /* fullness list */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 305) #ifdef CONFIG_COMPACTION
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 306) rwlock_t lock;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 307) #endif
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 308) };
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 309)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 310) struct mapping_area {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 311) local_lock_t lock;
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 312) char *vm_buf; /* copy buffer for objects that span pages */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 313) char *vm_addr; /* address of kmap_atomic()'ed pages */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 314) enum zs_mapmode vm_mm; /* mapping mode */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 315) };
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 316)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 317) #ifdef CONFIG_COMPACTION
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 318) static int zs_register_migration(struct zs_pool *pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 319) static void zs_unregister_migration(struct zs_pool *pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 320) static void migrate_lock_init(struct zspage *zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 321) static void migrate_read_lock(struct zspage *zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 322) static void migrate_read_unlock(struct zspage *zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 323) static void kick_deferred_free(struct zs_pool *pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 324) static void init_deferred_free(struct zs_pool *pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 325) static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 326) #else
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 327) static int zsmalloc_mount(void) { return 0; }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 328) static void zsmalloc_unmount(void) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 329) static int zs_register_migration(struct zs_pool *pool) { return 0; }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 330) static void zs_unregister_migration(struct zs_pool *pool) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 331) static void migrate_lock_init(struct zspage *zspage) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 332) static void migrate_read_lock(struct zspage *zspage) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 333) static void migrate_read_unlock(struct zspage *zspage) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 334) static void kick_deferred_free(struct zs_pool *pool) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 335) static void init_deferred_free(struct zs_pool *pool) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 336) static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 337) #endif
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 338)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 339) static int create_cache(struct zs_pool *pool)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 340) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 341) pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 342) 0, 0, NULL);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 343) if (!pool->handle_cachep)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 344) return 1;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 345)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 346) pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 347) 0, 0, NULL);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 348) if (!pool->zspage_cachep) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 349) kmem_cache_destroy(pool->handle_cachep);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 350) pool->handle_cachep = NULL;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 351) return 1;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 352) }
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 353)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 354) return 0;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 355) }
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 356)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 357) static void destroy_cache(struct zs_pool *pool)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 358) {
cd10add00c1b3 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:55 -0700 359) kmem_cache_destroy(pool->handle_cachep);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 360) kmem_cache_destroy(pool->zspage_cachep);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 361) }
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 362)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 363) static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 364) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 365) void *p;
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 366)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 367) p = kmem_cache_alloc(pool->handle_cachep,
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 368) gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 369) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 370) if (p) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 371) struct zsmalloc_handle *zh = p;
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 372)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 373) spin_lock_init(&zh->lock);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 374) }
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 375) #endif
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 376) return (unsigned long)p;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 377) }
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 378)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 379) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 380) static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 381) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 382) return (void *)(handle & ~((1 << OBJ_TAG_BITS) - 1));
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 383) }
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 384) #endif
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 385)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 386) static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 387) {
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 388) kmem_cache_free(pool->handle_cachep, (void *)handle);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 389) }
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 390)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 391) static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 392) {
f0231305acd53 mm/zsmalloc.c (Miaohe Lin 2021-02-25 17:18:27 -0800 393) return kmem_cache_zalloc(pool->zspage_cachep,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 394) flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
399d8eebe768f mm/zsmalloc.c (Xishi Qiu 2017-02-22 15:45:01 -0800 395) }
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 396)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 397) static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 398) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 399) kmem_cache_free(pool->zspage_cachep, zspage);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 400) }
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 401)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 402) static void record_obj(unsigned long handle, unsigned long obj)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 403) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 404) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 405) struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 406)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 407) WRITE_ONCE(zh->addr, obj);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 408) #else
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 409) /*
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 410) * lsb of @obj represents handle lock while other bits
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 411) * represent object value the handle is pointing so
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 412) * updating shouldn't do store tearing.
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 413) */
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 414) WRITE_ONCE(*(unsigned long *)handle, obj);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 415) #endif
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 416) }
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 417)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 418) /* zpool driver */
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 419)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 420) #ifdef CONFIG_ZPOOL
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 421)
6f3526d6db7cb mm/zsmalloc.c (Sergey SENOZHATSKY 2015-11-06 16:29:21 -0800 422) static void *zs_zpool_create(const char *name, gfp_t gfp,
786727799a85a mm/zsmalloc.c (Krzysztof Kozlowski 2015-09-08 15:05:03 -0700 423) const struct zpool_ops *zpool_ops,
479305fd71725 mm/zsmalloc.c (Dan Streetman 2015-06-25 15:00:40 -0700 424) struct zpool *zpool)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 425) {
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 426) /*
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 427) * Ignore global gfp flags: zs_malloc() may be invoked from
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 428) * different contexts and its caller must provide a valid
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 429) * gfp mask.
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 430) */
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 431) return zs_create_pool(name);
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 432) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 433)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 434) static void zs_zpool_destroy(void *pool)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 435) {
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 436) zs_destroy_pool(pool);
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 437) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 438)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 439) static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 440) unsigned long *handle)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 441) {
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 442) *handle = zs_malloc(pool, size, gfp);
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 443) return *handle ? 0 : -1;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 444) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 445) static void zs_zpool_free(void *pool, unsigned long handle)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 446) {
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 447) zs_free(pool, handle);
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 448) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 449)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 450) static void *zs_zpool_map(void *pool, unsigned long handle,
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 451) enum zpool_mapmode mm)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 452) {
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 453) enum zs_mapmode zs_mm;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 454)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 455) switch (mm) {
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 456) case ZPOOL_MM_RO:
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 457) zs_mm = ZS_MM_RO;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 458) break;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 459) case ZPOOL_MM_WO:
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 460) zs_mm = ZS_MM_WO;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 461) break;
e4a9bc58969ab mm/zsmalloc.c (Joe Perches 2020-04-06 20:08:39 -0700 462) case ZPOOL_MM_RW:
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 463) default:
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 464) zs_mm = ZS_MM_RW;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 465) break;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 466) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 467)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 468) return zs_map_object(pool, handle, zs_mm);
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 469) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 470) static void zs_zpool_unmap(void *pool, unsigned long handle)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 471) {
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 472) zs_unmap_object(pool, handle);
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 473) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 474)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 475) static u64 zs_zpool_total_size(void *pool)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 476) {
722cdc17232f0 mm/zsmalloc.c (Minchan Kim 2014-10-09 15:29:50 -0700 477) return zs_get_total_pages(pool) << PAGE_SHIFT;
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 478) }
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 479)
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 480) static struct zpool_driver zs_zpool_driver = {
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 481) .type = "zsmalloc",
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 482) .owner = THIS_MODULE,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 483) .create = zs_zpool_create,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 484) .destroy = zs_zpool_destroy,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 485) .malloc_support_movable = true,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 486) .malloc = zs_zpool_malloc,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 487) .free = zs_zpool_free,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 488) .map = zs_zpool_map,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 489) .unmap = zs_zpool_unmap,
c165f25d23ecb mm/zsmalloc.c (Hui Zhu 2019-09-23 15:39:37 -0700 490) .total_size = zs_zpool_total_size,
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 491) };
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 492)
137f8cff505ac mm/zsmalloc.c (Kees Cook 2014-08-29 15:18:40 -0700 493) MODULE_ALIAS("zpool-zsmalloc");
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 494) #endif /* CONFIG_ZPOOL */
c795779df29e1 mm/zsmalloc.c (Dan Streetman 2014-08-06 16:08:38 -0700 495)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 496) /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 497) static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 498) .lock = INIT_LOCAL_LOCK(lock),
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 499) };
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 500)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 501) static bool is_zspage_isolated(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 502) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 503) return zspage->isolated;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 504) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 505)
3457f41476751 mm/zsmalloc.c (Nick Desaulniers 2017-07-10 15:47:26 -0700 506) static __maybe_unused int is_first_page(struct page *page)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 507) {
a27545bf0bab9 drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-04-25 15:23:09 +0900 508) return PagePrivate(page);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 509) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 510)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 511) /* Protected by class->lock */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 512) static inline int get_zspage_inuse(struct zspage *zspage)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 513) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 514) return zspage->inuse;
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 515) }
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 516)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 517)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 518) static inline void mod_zspage_inuse(struct zspage *zspage, int val)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 519) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 520) zspage->inuse += val;
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 521) }
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 522)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 523) static inline struct page *get_first_page(struct zspage *zspage)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 524) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 525) struct page *first_page = zspage->first_page;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 526)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 527) VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 528) return first_page;
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 529) }
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 530)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 531) static inline int get_first_obj_offset(struct page *page)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 532) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 533) return page->units;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 534) }
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 535)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 536) static inline void set_first_obj_offset(struct page *page, int offset)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 537) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 538) page->units = offset;
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 539) }
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 540)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 541) static inline unsigned int get_freeobj(struct zspage *zspage)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 542) {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 543) return zspage->freeobj;
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 544) }
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 545)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 546) static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 547) {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 548) zspage->freeobj = obj;
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 549) }
4f42047bbde05 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:17 -0700 550)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 551) static void get_zspage_mapping(struct zspage *zspage,
a42094676f076 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:36 -0700 552) unsigned int *class_idx,
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 553) enum fullness_group *fullness)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 554) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 555) BUG_ON(zspage->magic != ZSPAGE_MAGIC);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 556)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 557) *fullness = zspage->fullness;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 558) *class_idx = zspage->class;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 559) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 560)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 561) static void set_zspage_mapping(struct zspage *zspage,
a42094676f076 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:36 -0700 562) unsigned int class_idx,
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 563) enum fullness_group fullness)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 564) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 565) zspage->class = class_idx;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 566) zspage->fullness = fullness;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 567) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 568)
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 569) /*
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 570) * zsmalloc divides the pool into various size classes where each
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 571) * class maintains a list of zspages where each zspage is divided
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 572) * into equal sized chunks. Each allocation falls into one of these
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 573) * classes depending on its size. This function returns index of the
cb152a1a95606 mm/zsmalloc.c (Shijie Luo 2021-05-06 18:05:51 -0700 574) * size class which has chunk size big enough to hold the given size.
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 575) */
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 576) static int get_size_class_index(int size)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 577) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 578) int idx = 0;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 579)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 580) if (likely(size > ZS_MIN_ALLOC_SIZE))
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 581) idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 582) ZS_SIZE_CLASS_DELTA);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 583)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 584) return min_t(int, ZS_SIZE_CLASSES - 1, idx);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 585) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 586)
3eb95feac113d mm/zsmalloc.c (Matthias Kaehlcke 2017-09-08 16:13:02 -0700 587) /* type can be of enum type zs_stat_type or fullness_group */
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 588) static inline void zs_stat_inc(struct size_class *class,
3eb95feac113d mm/zsmalloc.c (Matthias Kaehlcke 2017-09-08 16:13:02 -0700 589) int type, unsigned long cnt)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 590) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 591) class->stats.objs[type] += cnt;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 592) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 593)
3eb95feac113d mm/zsmalloc.c (Matthias Kaehlcke 2017-09-08 16:13:02 -0700 594) /* type can be of enum type zs_stat_type or fullness_group */
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 595) static inline void zs_stat_dec(struct size_class *class,
3eb95feac113d mm/zsmalloc.c (Matthias Kaehlcke 2017-09-08 16:13:02 -0700 596) int type, unsigned long cnt)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 597) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 598) class->stats.objs[type] -= cnt;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 599) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 600)
3eb95feac113d mm/zsmalloc.c (Matthias Kaehlcke 2017-09-08 16:13:02 -0700 601) /* type can be of enum type zs_stat_type or fullness_group */
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 602) static inline unsigned long zs_stat_get(struct size_class *class,
3eb95feac113d mm/zsmalloc.c (Matthias Kaehlcke 2017-09-08 16:13:02 -0700 603) int type)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 604) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 605) return class->stats.objs[type];
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 606) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 607)
57244594195fe mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:27 -0700 608) #ifdef CONFIG_ZSMALLOC_STAT
57244594195fe mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:27 -0700 609)
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 610) static void __init zs_stat_init(void)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 611) {
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 612) if (!debugfs_initialized()) {
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 613) pr_warn("debugfs not available, stat dir not created\n");
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 614) return;
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 615) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 616)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 617) zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 618) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 619)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 620) static void __exit zs_stat_exit(void)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 621) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 622) debugfs_remove_recursive(zs_stat_root);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 623) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 624)
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 625) static unsigned long zs_can_compact(struct size_class *class);
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 626)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 627) static int zs_stats_size_show(struct seq_file *s, void *v)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 628) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 629) int i;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 630) struct zs_pool *pool = s->private;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 631) struct size_class *class;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 632) int objs_per_zspage;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 633) unsigned long class_almost_full, class_almost_empty;
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 634) unsigned long obj_allocated, obj_used, pages_used, freeable;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 635) unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 636) unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 637) unsigned long total_freeable = 0;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 638)
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 639) seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 640) "class", "size", "almost_full", "almost_empty",
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 641) "obj_allocated", "obj_used", "pages_used",
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 642) "pages_per_zspage", "freeable");
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 643)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 644) for (i = 0; i < ZS_SIZE_CLASSES; i++) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 645) class = pool->size_class[i];
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 646)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 647) if (class->index != i)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 648) continue;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 649)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 650) spin_lock(&class->lock);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 651) class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 652) class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 653) obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 654) obj_used = zs_stat_get(class, OBJ_USED);
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 655) freeable = zs_can_compact(class);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 656) spin_unlock(&class->lock);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 657)
b4fd07a0864a0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:49 -0700 658) objs_per_zspage = class->objs_per_zspage;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 659) pages_used = obj_allocated / objs_per_zspage *
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 660) class->pages_per_zspage;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 661)
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 662) seq_printf(s, " %5u %5u %11lu %12lu %13lu"
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 663) " %10lu %10lu %16d %8lu\n",
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 664) i, class->size, class_almost_full, class_almost_empty,
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 665) obj_allocated, obj_used, pages_used,
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 666) class->pages_per_zspage, freeable);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 667)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 668) total_class_almost_full += class_almost_full;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 669) total_class_almost_empty += class_almost_empty;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 670) total_objs += obj_allocated;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 671) total_used_objs += obj_used;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 672) total_pages += pages_used;
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 673) total_freeable += freeable;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 674) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 675)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 676) seq_puts(s, "\n");
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 677) seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 678) "Total", "", total_class_almost_full,
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 679) total_class_almost_empty, total_objs,
1120ed5483941 mm/zsmalloc.c (Sergey Senozhatsky 2016-03-17 14:20:42 -0700 680) total_used_objs, total_pages, "", total_freeable);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 681)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 682) return 0;
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 683) }
5ad3509364a86 mm/zsmalloc.c (Andy Shevchenko 2018-04-05 16:23:16 -0700 684) DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 685)
d34f615720d17 mm/zsmalloc.c (Dan Streetman 2016-05-20 16:59:56 -0700 686) static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 687) {
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 688) if (!zs_stat_root) {
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 689) pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
d34f615720d17 mm/zsmalloc.c (Dan Streetman 2016-05-20 16:59:56 -0700 690) return;
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 691) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 692)
4268509a36a79 mm/zsmalloc.c (Greg Kroah-Hartman 2019-01-22 16:21:09 +0100 693) pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
4268509a36a79 mm/zsmalloc.c (Greg Kroah-Hartman 2019-01-22 16:21:09 +0100 694)
4268509a36a79 mm/zsmalloc.c (Greg Kroah-Hartman 2019-01-22 16:21:09 +0100 695) debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
4268509a36a79 mm/zsmalloc.c (Greg Kroah-Hartman 2019-01-22 16:21:09 +0100 696) &zs_stats_size_fops);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 697) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 698)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 699) static void zs_pool_stat_destroy(struct zs_pool *pool)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 700) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 701) debugfs_remove_recursive(pool->stat_dentry);
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 702) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 703)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 704) #else /* CONFIG_ZSMALLOC_STAT */
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 705) static void __init zs_stat_init(void)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 706) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 707) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 708)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 709) static void __exit zs_stat_exit(void)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 710) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 711) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 712)
d34f615720d17 mm/zsmalloc.c (Dan Streetman 2016-05-20 16:59:56 -0700 713) static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 714) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 715) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 716)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 717) static inline void zs_pool_stat_destroy(struct zs_pool *pool)
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 718) {
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 719) }
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 720) #endif
248ca1b053c82 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:42 -0700 721)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 722)
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 723) /*
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 724) * For each size class, zspages are divided into different groups
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 725) * depending on how "full" they are. This was done so that we could
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 726) * easily find empty or nearly empty zspages when we try to shrink
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 727) * the pool (not yet implemented). This function returns fullness
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 728) * status of the given page.
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 729) */
1fc6e27d7b861 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:11 -0700 730) static enum fullness_group get_fullness_group(struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 731) struct zspage *zspage)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 732) {
1fc6e27d7b861 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:11 -0700 733) int inuse, objs_per_zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 734) enum fullness_group fg;
830e4bc5baa9f mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:39 -0700 735)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 736) inuse = get_zspage_inuse(zspage);
1fc6e27d7b861 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:11 -0700 737) objs_per_zspage = class->objs_per_zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 738)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 739) if (inuse == 0)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 740) fg = ZS_EMPTY;
1fc6e27d7b861 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:11 -0700 741) else if (inuse == objs_per_zspage)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 742) fg = ZS_FULL;
1fc6e27d7b861 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:11 -0700 743) else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 744) fg = ZS_ALMOST_EMPTY;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 745) else
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 746) fg = ZS_ALMOST_FULL;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 747)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 748) return fg;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 749) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 750)
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 751) /*
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 752) * Each size class maintains various freelists and zspages are assigned
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 753) * to one of these freelists based on the number of live objects they
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 754) * have. This functions inserts the given zspage into the freelist
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 755) * identified by <class, fullness_group>.
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 756) */
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 757) static void insert_zspage(struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 758) struct zspage *zspage,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 759) enum fullness_group fullness)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 760) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 761) struct zspage *head;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 762)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 763) zs_stat_inc(class, fullness, 1);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 764) head = list_first_entry_or_null(&class->fullness_list[fullness],
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 765) struct zspage, list);
58f1711746251 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:44 -0700 766) /*
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 767) * We want to see more ZS_FULL pages and less almost empty/full.
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 768) * Put pages with higher ->inuse first.
58f1711746251 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:44 -0700 769) */
110ceb8287fd0 mm/zsmalloc.c (Miaohe Lin 2020-12-14 19:14:22 -0800 770) if (head && get_zspage_inuse(zspage) < get_zspage_inuse(head))
110ceb8287fd0 mm/zsmalloc.c (Miaohe Lin 2020-12-14 19:14:22 -0800 771) list_add(&zspage->list, &head->list);
110ceb8287fd0 mm/zsmalloc.c (Miaohe Lin 2020-12-14 19:14:22 -0800 772) else
110ceb8287fd0 mm/zsmalloc.c (Miaohe Lin 2020-12-14 19:14:22 -0800 773) list_add(&zspage->list, &class->fullness_list[fullness]);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 774) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 775)
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 776) /*
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 777) * This function removes the given zspage from the freelist identified
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 778) * by <class, fullness_group>.
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 779) */
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 780) static void remove_zspage(struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 781) struct zspage *zspage,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 782) enum fullness_group fullness)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 783) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 784) VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 785) VM_BUG_ON(is_zspage_isolated(zspage));
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 786)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 787) list_del_init(&zspage->list);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 788) zs_stat_dec(class, fullness, 1);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 789) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 790)
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 791) /*
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 792) * Each size class maintains zspages in different fullness groups depending
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 793) * on the number of live objects they contain. When allocating or freeing
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 794) * objects, the fullness status of the page can change, say, from ALMOST_FULL
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 795) * to ALMOST_EMPTY when freeing an object. This function checks if such
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 796) * a status change has occurred for the given page and accordingly moves the
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 797) * page from the freelist of the old fullness group to that of the new
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 798) * fullness group.
c3e3e88adccb3 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Cupta 2013-12-11 11:04:37 +0900 799) */
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 800) static enum fullness_group fix_fullness_group(struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 801) struct zspage *zspage)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 802) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 803) int class_idx;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 804) enum fullness_group currfg, newfg;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 805)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 806) get_zspage_mapping(zspage, &class_idx, &currfg);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 807) newfg = get_fullness_group(class, zspage);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 808) if (newfg == currfg)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 809) goto out;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 810)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 811) if (!is_zspage_isolated(zspage)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 812) remove_zspage(class, zspage, currfg);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 813) insert_zspage(class, zspage, newfg);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 814) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 815)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 816) set_zspage_mapping(zspage, class_idx, newfg);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 817)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 818) out:
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 819) return newfg;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 820) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 821)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 822) /*
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 823) * We have to decide on how many pages to link together
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 824) * to form a zspage for each size class. This is important
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 825) * to reduce wastage due to unusable space left at end of
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 826) * each zspage which is given as:
888fa374e625f mm/zsmalloc.c (Yinghao Xie 2015-04-15 16:15:49 -0700 827) * wastage = Zp % class_size
888fa374e625f mm/zsmalloc.c (Yinghao Xie 2015-04-15 16:15:49 -0700 828) * usage = Zp - wastage
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 829) * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 830) *
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 831) * For example, for size class of 3/8 * PAGE_SIZE, we should
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 832) * link together 3 PAGE_SIZE sized pages to form a zspage
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 833) * since then we can perfectly fit in 8 such objects.
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 834) */
2e3b61547191a drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-05-03 15:40:39 +0900 835) static int get_pages_per_zspage(int class_size)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 836) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 837) int i, max_usedpc = 0;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 838) /* zspage order which gives maximum used size per KB */
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 839) int max_usedpc_order = 1;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 840)
84d4faaba2799 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-03-05 11:33:21 -0600 841) for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 842) int zspage_size;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 843) int waste, usedpc;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 844)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 845) zspage_size = i * PAGE_SIZE;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 846) waste = zspage_size % class_size;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 847) usedpc = (zspage_size - waste) * 100 / zspage_size;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 848)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 849) if (usedpc > max_usedpc) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 850) max_usedpc = usedpc;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 851) max_usedpc_order = i;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 852) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 853) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 854)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 855) return max_usedpc_order;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 856) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 857)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 858) static struct zspage *get_zspage(struct page *page)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 859) {
a6c5e0f75b3f7 mm/zsmalloc.c (Miaohe Lin 2021-02-25 17:18:34 -0800 860) struct zspage *zspage = (struct zspage *)page_private(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 861)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 862) BUG_ON(zspage->magic != ZSPAGE_MAGIC);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 863) return zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 864) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 865)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 866) static struct page *get_next_page(struct page *page)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 867) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 868) if (unlikely(PageHugeObject(page)))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 869) return NULL;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 870)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 871) return page->freelist;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 872) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 873)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 874) /**
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 875) * obj_to_location - get (<page>, <obj_idx>) from encoded object value
e8b098fc5747a mm/zsmalloc.c (Mike Rapoport 2018-04-05 16:24:57 -0700 876) * @obj: the encoded object value
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 877) * @page: page object resides in zspage
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 878) * @obj_idx: object index
67296874eb1cc drivers/staging/zsmalloc/zsmalloc-main.c (Olav Haugan 2013-11-22 09:30:41 -0800 879) */
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 880) static void obj_to_location(unsigned long obj, struct page **page,
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 881) unsigned int *obj_idx)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 882) {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 883) obj >>= OBJ_TAG_BITS;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 884) *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 885) *obj_idx = (obj & OBJ_INDEX_MASK);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 886) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 887)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 888) /**
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 889) * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 890) * @page: page object resides in zspage
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 891) * @obj_idx: object index
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 892) */
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 893) static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 894) {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 895) unsigned long obj;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 896)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 897) obj = page_to_pfn(page) << OBJ_INDEX_BITS;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 898) obj |= obj_idx & OBJ_INDEX_MASK;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 899) obj <<= OBJ_TAG_BITS;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 900)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 901) return obj;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 902) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 903)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 904) static unsigned long handle_to_obj(unsigned long handle)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 905) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 906) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 907) struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 908)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 909) return zh->addr;
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 910) #else
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 911) return *(unsigned long *)handle;
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 912) #endif
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 913) }
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 914)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 915) static unsigned long obj_to_head(struct page *page, void *obj)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 916) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 917) if (unlikely(PageHugeObject(page))) {
830e4bc5baa9f mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:39 -0700 918) VM_BUG_ON_PAGE(!is_first_page(page), page);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 919) return page->index;
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 920) } else
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 921) return *(unsigned long *)obj;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 922) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 923)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 924) static inline int testpin_tag(unsigned long handle)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 925) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 926) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 927) struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 928)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 929) return spin_is_locked(&zh->lock);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 930) #else
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 931) return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 932) #endif
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 933) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 934)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 935) static inline int trypin_tag(unsigned long handle)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 936) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 937) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 938) struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 939)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 940) return spin_trylock(&zh->lock);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 941) #else
1b8320b620d6c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:14 -0700 942) return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 943) #endif
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 944) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 945)
70c7ec95bece1 mm/zsmalloc.c (Jules Irenge 2020-04-06 20:08:27 -0700 946) static void pin_tag(unsigned long handle) __acquires(bitlock)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 947) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 948) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 949) struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 950)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 951) return spin_lock(&zh->lock);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 952) #else
1b8320b620d6c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:14 -0700 953) bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 954) #endif
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 955) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 956)
bc22b18b1f805 mm/zsmalloc.c (Jules Irenge 2020-04-06 20:08:30 -0700 957) static void unpin_tag(unsigned long handle) __releases(bitlock)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 958) {
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 959) #ifdef CONFIG_PREEMPT_RT
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 960) struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 961)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 962) return spin_unlock(&zh->lock);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 963) #else
1b8320b620d6c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:14 -0700 964) bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 965) #endif
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 966) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 967)
f4477e90b3ea4 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-04-02 09:13:56 -0500 968) static void reset_page(struct page *page)
f4477e90b3ea4 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-04-02 09:13:56 -0500 969) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 970) __ClearPageMovable(page);
18fd06bf7aa77 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:48:00 -0700 971) ClearPagePrivate(page);
f4477e90b3ea4 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-04-02 09:13:56 -0500 972) set_page_private(page, 0);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 973) page_mapcount_reset(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 974) ClearPageHugeObject(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 975) page->freelist = NULL;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 976) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 977)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 978) static int trylock_zspage(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 979) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 980) struct page *cursor, *fail;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 981)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 982) for (cursor = get_first_page(zspage); cursor != NULL; cursor =
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 983) get_next_page(cursor)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 984) if (!trylock_page(cursor)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 985) fail = cursor;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 986) goto unlock;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 987) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 988) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 989)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 990) return 1;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 991) unlock:
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 992) for (cursor = get_first_page(zspage); cursor != fail; cursor =
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 993) get_next_page(cursor))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 994) unlock_page(cursor);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 995)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 996) return 0;
f4477e90b3ea4 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-04-02 09:13:56 -0500 997) }
f4477e90b3ea4 drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-04-02 09:13:56 -0500 998)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 999) static void __free_zspage(struct zs_pool *pool, struct size_class *class,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1000) struct zspage *zspage)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1001) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1002) struct page *page, *next;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1003) enum fullness_group fg;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1004) unsigned int class_idx;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1005)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1006) get_zspage_mapping(zspage, &class_idx, &fg);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1007)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1008) assert_spin_locked(&class->lock);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1009)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1010) VM_BUG_ON(get_zspage_inuse(zspage));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1011) VM_BUG_ON(fg != ZS_EMPTY);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1012)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1013) next = page = get_first_page(zspage);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1014) do {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1015) VM_BUG_ON_PAGE(!PageLocked(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1016) next = get_next_page(page);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1017) reset_page(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1018) unlock_page(page);
91537fee00136 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:24:45 -0700 1019) dec_zone_page_state(page, NR_ZSPAGES);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1020) put_page(page);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1021) page = next;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1022) } while (page != NULL);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1023)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1024) cache_free_zspage(pool, zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1025)
b4fd07a0864a0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:49 -0700 1026) zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1027) atomic_long_sub(class->pages_per_zspage,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1028) &pool->pages_allocated);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1029) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1030)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1031) static void free_zspage(struct zs_pool *pool, struct size_class *class,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1032) struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1033) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1034) VM_BUG_ON(get_zspage_inuse(zspage));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1035) VM_BUG_ON(list_empty(&zspage->list));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1036)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1037) if (!trylock_zspage(zspage)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1038) kick_deferred_free(pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1039) return;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1040) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1041)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1042) remove_zspage(class, zspage, ZS_EMPTY);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1043) __free_zspage(pool, class, zspage);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1044) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1045)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1046) /* Initialize a newly allocated zspage */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1047) static void init_zspage(struct size_class *class, struct zspage *zspage)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1048) {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1049) unsigned int freeobj = 1;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1050) unsigned long off = 0;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1051) struct page *page = get_first_page(zspage);
830e4bc5baa9f mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:39 -0700 1052)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1053) while (page) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1054) struct page *next_page;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1055) struct link_free *link;
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1056) void *vaddr;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1057)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1058) set_first_obj_offset(page, off);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1059)
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1060) vaddr = kmap_atomic(page);
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1061) link = (struct link_free *)vaddr + off / sizeof(*link);
5538c56237758 mm/zsmalloc.c (Dan Streetman 2014-10-09 15:30:01 -0700 1062)
5538c56237758 mm/zsmalloc.c (Dan Streetman 2014-10-09 15:30:01 -0700 1063) while ((off += class->size) < PAGE_SIZE) {
3b1d9ca65a80c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:37 -0700 1064) link->next = freeobj++ << OBJ_TAG_BITS;
5538c56237758 mm/zsmalloc.c (Dan Streetman 2014-10-09 15:30:01 -0700 1065) link += class->size / sizeof(*link);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1066) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1067)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1068) /*
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1069) * We now come to the last (full or partial) object on this
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1070) * page, which must point to the first object on the next
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1071) * page (if present)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1072) */
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1073) next_page = get_next_page(page);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1074) if (next_page) {
3b1d9ca65a80c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:37 -0700 1075) link->next = freeobj++ << OBJ_TAG_BITS;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1076) } else {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1077) /*
3b1d9ca65a80c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:37 -0700 1078) * Reset OBJ_TAG_BITS bit to last link to tell
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1079) * whether it's allocated object or not.
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1080) */
01a6ad9ac80c9 mm/zsmalloc.c (Nick Desaulniers 2018-01-31 16:20:15 -0800 1081) link->next = -1UL << OBJ_TAG_BITS;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1082) }
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1083) kunmap_atomic(vaddr);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1084) page = next_page;
5538c56237758 mm/zsmalloc.c (Dan Streetman 2014-10-09 15:30:01 -0700 1085) off %= PAGE_SIZE;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1086) }
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1087)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1088) set_freeobj(zspage, 0);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1089) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1090)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1091) static void create_page_chain(struct size_class *class, struct zspage *zspage,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1092) struct page *pages[])
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1093) {
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1094) int i;
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1095) struct page *page;
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1096) struct page *prev_page = NULL;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1097) int nr_pages = class->pages_per_zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1098)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1099) /*
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1100) * Allocate individual pages and link them together as:
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1101) * 1. all pages are linked together using page->freelist
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1102) * 2. each sub-page point to zspage using page->private
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1103) *
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1104) * we set PG_private to identify the first page (i.e. no other sub-page
22c5cef16278b mm/zsmalloc.c (Yisheng Xie 2017-02-24 14:59:42 -0800 1105) * has this flag set).
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1106) */
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1107) for (i = 0; i < nr_pages; i++) {
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1108) page = pages[i];
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1109) set_page_private(page, (unsigned long)zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1110) page->freelist = NULL;
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1111) if (i == 0) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1112) zspage->first_page = page;
a27545bf0bab9 drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-04-25 15:23:09 +0900 1113) SetPagePrivate(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1114) if (unlikely(class->objs_per_zspage == 1 &&
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1115) class->pages_per_zspage == 1))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1116) SetPageHugeObject(page);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1117) } else {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1118) prev_page->freelist = page;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1119) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1120) prev_page = page;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1121) }
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1122) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1123)
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1124) /*
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1125) * Allocate a zspage for the given size class
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1126) */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1127) static struct zspage *alloc_zspage(struct zs_pool *pool,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1128) struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1129) gfp_t gfp)
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1130) {
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1131) int i;
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1132) struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1133) struct zspage *zspage = cache_alloc_zspage(pool, gfp);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1134)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1135) if (!zspage)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1136) return NULL;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1137)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1138) zspage->magic = ZSPAGE_MAGIC;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1139) migrate_lock_init(zspage);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1140)
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1141) for (i = 0; i < class->pages_per_zspage; i++) {
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1142) struct page *page;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1143)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1144) page = alloc_page(gfp);
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1145) if (!page) {
91537fee00136 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:24:45 -0700 1146) while (--i >= 0) {
91537fee00136 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:24:45 -0700 1147) dec_zone_page_state(pages[i], NR_ZSPAGES);
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1148) __free_page(pages[i]);
91537fee00136 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:24:45 -0700 1149) }
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1150) cache_free_zspage(pool, zspage);
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1151) return NULL;
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1152) }
91537fee00136 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:24:45 -0700 1153)
91537fee00136 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:24:45 -0700 1154) inc_zone_page_state(page, NR_ZSPAGES);
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1155) pages[i] = page;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1156) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1157)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1158) create_page_chain(class, zspage, pages);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1159) init_zspage(class, zspage);
bdb0af7ca8f0e mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:20 -0700 1160)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1161) return zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1162) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1163)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1164) static struct zspage *find_get_zspage(struct size_class *class)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1165) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1166) int i;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1167) struct zspage *zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1168)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1169) for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1170) zspage = list_first_entry_or_null(&class->fullness_list[i],
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1171) struct zspage, list);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1172) if (zspage)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1173) break;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1174) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1175)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1176) return zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1177) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1178)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1179) static inline int __zs_cpu_up(struct mapping_area *area)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1180) {
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1181) /*
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1182) * Make sure we don't leak memory if a cpu UP notification
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1183) * and zs_init() race and both call zs_cpu_up() on the same cpu
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1184) */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1185) if (area->vm_buf)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1186) return 0;
40f9fb8cffc6a mm/zsmalloc.c (Mahendran Ganesh 2014-12-12 16:57:01 -0800 1187) area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1188) if (!area->vm_buf)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1189) return -ENOMEM;
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1190) return 0;
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1191) }
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1192)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1193) static inline void __zs_cpu_down(struct mapping_area *area)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1194) {
40f9fb8cffc6a mm/zsmalloc.c (Mahendran Ganesh 2014-12-12 16:57:01 -0800 1195) kfree(area->vm_buf);
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1196) area->vm_buf = NULL;
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1197) }
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1198)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1199) static void *__zs_map_object(struct mapping_area *area,
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1200) struct page *pages[2], int off, int size)
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1201) {
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1202) int sizes[2];
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1203) void *addr;
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1204) char *buf = area->vm_buf;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1205)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1206) /* disable page faults to match kmap_atomic() return conditions */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1207) pagefault_disable();
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1208)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1209) /* no read fastpath */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1210) if (area->vm_mm == ZS_MM_WO)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1211) goto out;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1212)
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1213) sizes[0] = PAGE_SIZE - off;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1214) sizes[1] = size - sizes[0];
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1215)
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1216) /* copy object to per-cpu buffer */
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1217) addr = kmap_atomic(pages[0]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1218) memcpy(buf, addr + off, sizes[0]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1219) kunmap_atomic(addr);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1220) addr = kmap_atomic(pages[1]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1221) memcpy(buf + sizes[0], addr, sizes[1]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1222) kunmap_atomic(addr);
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1223) out:
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1224) return area->vm_buf;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1225) }
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1226)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1227) static void __zs_unmap_object(struct mapping_area *area,
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1228) struct page *pages[2], int off, int size)
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1229) {
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1230) int sizes[2];
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1231) void *addr;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1232) char *buf;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1233)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1234) /* no write fastpath */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1235) if (area->vm_mm == ZS_MM_RO)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1236) goto out;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1237)
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1238) buf = area->vm_buf;
a82cbf07131b5 mm/zsmalloc.c (YiPing Xu 2016-03-17 14:20:39 -0700 1239) buf = buf + ZS_HANDLE_SIZE;
a82cbf07131b5 mm/zsmalloc.c (YiPing Xu 2016-03-17 14:20:39 -0700 1240) size -= ZS_HANDLE_SIZE;
a82cbf07131b5 mm/zsmalloc.c (YiPing Xu 2016-03-17 14:20:39 -0700 1241) off += ZS_HANDLE_SIZE;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1242)
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1243) sizes[0] = PAGE_SIZE - off;
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1244) sizes[1] = size - sizes[0];
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1245)
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1246) /* copy per-cpu buffer to object */
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1247) addr = kmap_atomic(pages[0]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1248) memcpy(addr + off, buf, sizes[0]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1249) kunmap_atomic(addr);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1250) addr = kmap_atomic(pages[1]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1251) memcpy(addr, buf + sizes[0], sizes[1]);
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1252) kunmap_atomic(addr);
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1253)
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1254) out:
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1255) /* enable page faults to match kunmap_atomic() return conditions */
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 1256) pagefault_enable();
5f601902c61e6 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:49 -0500 1257) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1258)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1259) static int zs_cpu_prepare(unsigned int cpu)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1260) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1261) struct mapping_area *area;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1262)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1263) area = &per_cpu(zs_map_area, cpu);
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1264) return __zs_cpu_up(area);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1265) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1266)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1267) static int zs_cpu_dead(unsigned int cpu)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1268) {
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1269) struct mapping_area *area;
40f9fb8cffc6a mm/zsmalloc.c (Mahendran Ganesh 2014-12-12 16:57:01 -0800 1270)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1271) area = &per_cpu(zs_map_area, cpu);
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1272) __zs_cpu_down(area);
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 1273) return 0;
b1b00a5b8a6cf mm/zsmalloc.c (Sergey Senozhatsky 2014-12-12 16:56:56 -0800 1274) }
b1b00a5b8a6cf mm/zsmalloc.c (Sergey Senozhatsky 2014-12-12 16:56:56 -0800 1275)
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 1276) static bool can_merge(struct size_class *prev, int pages_per_zspage,
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 1277) int objs_per_zspage)
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1278) {
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 1279) if (prev->pages_per_zspage == pages_per_zspage &&
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 1280) prev->objs_per_zspage == objs_per_zspage)
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 1281) return true;
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1282)
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 1283) return false;
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1284) }
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1285)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1286) static bool zspage_full(struct size_class *class, struct zspage *zspage)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1287) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1288) return get_zspage_inuse(zspage) == class->objs_per_zspage;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1289) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1290)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1291) unsigned long zs_get_total_pages(struct zs_pool *pool)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1292) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1293) return atomic_long_read(&pool->pages_allocated);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1294) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1295) EXPORT_SYMBOL_GPL(zs_get_total_pages);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1296)
4bbc0bc06b8b0 drivers/staging/zsmalloc/zsmalloc-main.c (Davidlohr Bueso 2013-01-04 12:14:00 -0800 1297) /**
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1298) * zs_map_object - get address of allocated object from handle.
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1299) * @pool: pool from which the object was allocated
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1300) * @handle: handle returned from zs_malloc
f0953a1bbaca7 mm/zsmalloc.c (Ingo Molnar 2021-05-06 18:06:47 -0700 1301) * @mm: mapping mode to use
4bbc0bc06b8b0 drivers/staging/zsmalloc/zsmalloc-main.c (Davidlohr Bueso 2013-01-04 12:14:00 -0800 1302) *
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1303) * Before using an object allocated from zs_malloc, it must be mapped using
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1304) * this function. When done with the object, it must be unmapped using
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1305) * zs_unmap_object.
4bbc0bc06b8b0 drivers/staging/zsmalloc/zsmalloc-main.c (Davidlohr Bueso 2013-01-04 12:14:00 -0800 1306) *
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1307) * Only one object can be mapped per cpu at a time. There is no protection
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1308) * against nested mappings.
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1309) *
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1310) * This function returns with preemption and page faults disabled.
4bbc0bc06b8b0 drivers/staging/zsmalloc/zsmalloc-main.c (Davidlohr Bueso 2013-01-04 12:14:00 -0800 1311) */
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1312) void *zs_map_object(struct zs_pool *pool, unsigned long handle,
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1313) enum zs_mapmode mm)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1314) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1315) struct zspage *zspage;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1316) struct page *page;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1317) unsigned long obj, off;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1318) unsigned int obj_idx;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1319)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1320) unsigned int class_idx;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1321) enum fullness_group fg;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1322) struct size_class *class;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1323) struct mapping_area *area;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1324) struct page *pages[2];
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1325) void *ret;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1326)
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1327) /*
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1328) * Because we use per-cpu mapping areas shared among the
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1329) * pools/users, we can't allow mapping in interrupt context
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1330) * because it can corrupt another users mappings.
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1331) */
1aedcafbf32b3 mm/zsmalloc.c (Sergey Senozhatsky 2017-11-15 17:34:03 -0800 1332) BUG_ON(in_interrupt());
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1333)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1334) /* From now on, migration cannot move the object */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1335) pin_tag(handle);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1336)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1337) obj = handle_to_obj(handle);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1338) obj_to_location(obj, &page, &obj_idx);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1339) zspage = get_zspage(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1340)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1341) /* migration cannot move any subpage in this zspage */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1342) migrate_read_lock(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1343)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1344) get_zspage_mapping(zspage, &class_idx, &fg);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1345) class = pool->size_class[class_idx];
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1346) off = (class->size * obj_idx) & ~PAGE_MASK;
df8b5bb998f10 mm/zsmalloc.c (Ganesh Mahendran 2014-12-12 16:57:07 -0800 1347)
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 1348) local_lock(&zs_map_area.lock);
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 1349) area = this_cpu_ptr(&zs_map_area);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1350) area->vm_mm = mm;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1351) if (off + class->size <= PAGE_SIZE) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1352) /* this object is contained entirely within a page */
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1353) area->vm_addr = kmap_atomic(page);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1354) ret = area->vm_addr + off;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1355) goto out;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1356) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1357)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1358) /* this object spans two pages */
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1359) pages[0] = page;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1360) pages[1] = get_next_page(page);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1361) BUG_ON(!pages[1]);
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1362)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1363) ret = __zs_map_object(area, pages, off, class->size);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1364) out:
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1365) if (likely(!PageHugeObject(page)))
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1366) ret += ZS_HANDLE_SIZE;
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1367)
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1368) return ret;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1369) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1370) EXPORT_SYMBOL_GPL(zs_map_object);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1371)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1372) void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1373) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1374) struct zspage *zspage;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1375) struct page *page;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1376) unsigned long obj, off;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1377) unsigned int obj_idx;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1378)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1379) unsigned int class_idx;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1380) enum fullness_group fg;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1381) struct size_class *class;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1382) struct mapping_area *area;
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1383)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1384) obj = handle_to_obj(handle);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1385) obj_to_location(obj, &page, &obj_idx);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1386) zspage = get_zspage(page);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1387) get_zspage_mapping(zspage, &class_idx, &fg);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1388) class = pool->size_class[class_idx];
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1389) off = (class->size * obj_idx) & ~PAGE_MASK;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1390)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1391) area = this_cpu_ptr(&zs_map_area);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1392) if (off + class->size <= PAGE_SIZE)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1393) kunmap_atomic(area->vm_addr);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1394) else {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1395) struct page *pages[2];
40f9fb8cffc6a mm/zsmalloc.c (Mahendran Ganesh 2014-12-12 16:57:01 -0800 1396)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1397) pages[0] = page;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1398) pages[1] = get_next_page(page);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1399) BUG_ON(!pages[1]);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1400)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1401) __zs_unmap_object(area, pages, off, class->size);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1402) }
1686ad485ec0b mm/zsmalloc.c (Mike Galbraith 2021-09-28 09:38:47 +0200 1403) local_unlock(&zs_map_area.lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1404)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1405) migrate_read_unlock(zspage);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1406) unpin_tag(handle);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1407) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 1408) EXPORT_SYMBOL_GPL(zs_unmap_object);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1409)
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1410) /**
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1411) * zs_huge_class_size() - Returns the size (in bytes) of the first huge
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1412) * zsmalloc &size_class.
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1413) * @pool: zsmalloc pool to use
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1414) *
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1415) * The function returns the size of the first huge class - any object of equal
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1416) * or bigger size will be stored in zspage consisting of a single physical
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1417) * page.
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1418) *
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1419) * Context: Any context.
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1420) *
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1421) * Return: the size (in bytes) of the first huge zsmalloc &size_class.
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1422) */
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1423) size_t zs_huge_class_size(struct zs_pool *pool)
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1424) {
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1425) return huge_class_size;
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1426) }
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1427) EXPORT_SYMBOL_GPL(zs_huge_class_size);
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 1428)
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 1429) static unsigned long obj_malloc(struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1430) struct zspage *zspage, unsigned long handle)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1431) {
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1432) int i, nr_page, offset;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1433) unsigned long obj;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1434) struct link_free *link;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1435)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1436) struct page *m_page;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1437) unsigned long m_offset;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1438) void *vaddr;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1439)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1440) handle |= OBJ_ALLOCATED_TAG;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1441) obj = get_freeobj(zspage);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1442)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1443) offset = obj * class->size;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1444) nr_page = offset >> PAGE_SHIFT;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1445) m_offset = offset & ~PAGE_MASK;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1446) m_page = get_first_page(zspage);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1447)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1448) for (i = 0; i < nr_page; i++)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1449) m_page = get_next_page(m_page);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1450)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1451) vaddr = kmap_atomic(m_page);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1452) link = (struct link_free *)vaddr + m_offset / sizeof(*link);
3b1d9ca65a80c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:37 -0700 1453) set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1454) if (likely(!PageHugeObject(m_page)))
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1455) /* record handle in the header of allocated chunk */
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1456) link->handle = handle;
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1457) else
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1458) /* record handle to page->index */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1459) zspage->first_page->index = handle;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1460)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1461) kunmap_atomic(vaddr);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1462) mod_zspage_inuse(zspage, 1);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1463) zs_stat_inc(class, OBJ_USED, 1);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1464)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1465) obj = location_to_obj(m_page, obj);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1466)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1467) return obj;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1468) }
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1469)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1470)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1471) /**
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1472) * zs_malloc - Allocate block of given size from pool.
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1473) * @pool: pool to allocate from
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1474) * @size: size of block to allocate
fd8544639e3fd mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:54 -0700 1475) * @gfp: gfp flags when allocating object
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1476) *
00a61d8618bb7 drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-05-03 15:40:40 +0900 1477) * On success, handle to the allocated object is returned,
c234434835b1f drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-06-08 15:39:25 +0900 1478) * otherwise 0.
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1479) * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1480) */
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 1481) unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1482) {
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1483) unsigned long handle, obj;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1484) struct size_class *class;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1485) enum fullness_group newfg;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1486) struct zspage *zspage;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1487)
7b60a68529b0d mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:39 -0700 1488) if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1489) return 0;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1490)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1491) handle = cache_alloc_handle(pool, gfp);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1492) if (!handle)
c234434835b1f drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-06-08 15:39:25 +0900 1493) return 0;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1494)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1495) /* extra space in chunk to keep the handle */
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1496) size += ZS_HANDLE_SIZE;
9eec4cd53f986 mm/zsmalloc.c (Joonsoo Kim 2014-12-12 16:56:44 -0800 1497) class = pool->size_class[get_size_class_index(size)];
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1498)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1499) spin_lock(&class->lock);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1500) zspage = find_get_zspage(class);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1501) if (likely(zspage)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1502) obj = obj_malloc(class, zspage, handle);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1503) /* Now move the zspage to another fullness group, if required */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1504) fix_fullness_group(class, zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1505) record_obj(handle, obj);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1506) spin_unlock(&class->lock);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1507)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1508) return handle;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1509) }
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 1510)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1511) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1512)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1513) zspage = alloc_zspage(pool, class, gfp);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1514) if (!zspage) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1515) cache_free_handle(pool, handle);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1516) return 0;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1517) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1518)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1519) spin_lock(&class->lock);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1520) obj = obj_malloc(class, zspage, handle);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1521) newfg = get_fullness_group(class, zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1522) insert_zspage(class, zspage, newfg);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1523) set_zspage_mapping(zspage, class->index, newfg);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1524) record_obj(handle, obj);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1525) atomic_long_add(class->pages_per_zspage,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1526) &pool->pages_allocated);
b4fd07a0864a0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:49 -0700 1527) zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1528)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1529) /* We completely set up zspage so mark them as movable */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1530) SetZsPageMovable(pool, zspage);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1531) spin_unlock(&class->lock);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1532)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1533) return handle;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1534) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1535) EXPORT_SYMBOL_GPL(zs_malloc);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1536)
1ee4716585ed8 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:45 -0700 1537) static void obj_free(struct size_class *class, unsigned long obj)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1538) {
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1539) struct link_free *link;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1540) struct zspage *zspage;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1541) struct page *f_page;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1542) unsigned long f_offset;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1543) unsigned int f_objidx;
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1544) void *vaddr;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1545)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 1546) obj_to_location(obj, &f_page, &f_objidx);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1547) f_offset = (class->size * f_objidx) & ~PAGE_MASK;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1548) zspage = get_zspage(f_page);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1549)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1550) vaddr = kmap_atomic(f_page);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1551)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1552) /* Insert this object in containing zspage's freelist */
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1553) link = (struct link_free *)(vaddr + f_offset);
3b1d9ca65a80c mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:37 -0700 1554) link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
af4ee5e977acb mm/zsmalloc.c (Minchan Kim 2014-12-12 16:56:58 -0800 1555) kunmap_atomic(vaddr);
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1556) set_freeobj(zspage, f_objidx);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1557) mod_zspage_inuse(zspage, -1);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 1558) zs_stat_dec(class, OBJ_USED, 1);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1559) }
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1560)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1561) void zs_free(struct zs_pool *pool, unsigned long handle)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1562) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1563) struct zspage *zspage;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1564) struct page *f_page;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1565) unsigned long obj;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1566) unsigned int f_objidx;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1567) int class_idx;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1568) struct size_class *class;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1569) enum fullness_group fullness;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1570) bool isolated;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1571)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1572) if (unlikely(!handle))
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1573) return;
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1574)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1575) pin_tag(handle);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1576) obj = handle_to_obj(handle);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1577) obj_to_location(obj, &f_page, &f_objidx);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1578) zspage = get_zspage(f_page);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1579)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1580) migrate_read_lock(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1581)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1582) get_zspage_mapping(zspage, &class_idx, &fullness);
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1583) class = pool->size_class[class_idx];
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1584)
c78062612fb52 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:26 -0700 1585) spin_lock(&class->lock);
1ee4716585ed8 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:45 -0700 1586) obj_free(class, obj);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1587) fullness = fix_fullness_group(class, zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1588) if (fullness != ZS_EMPTY) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1589) migrate_read_unlock(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1590) goto out;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1591) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1592)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1593) isolated = is_zspage_isolated(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1594) migrate_read_unlock(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1595) /* If zspage is isolated, zs_page_putback will free the zspage */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1596) if (likely(!isolated))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1597) free_zspage(pool, class, zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1598) out:
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1599)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1600) spin_unlock(&class->lock);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1601) unpin_tag(handle);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1602) cache_free_handle(pool, handle);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1603) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1604) EXPORT_SYMBOL_GPL(zs_free);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1605)
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 1606) static void zs_object_copy(struct size_class *class, unsigned long dst,
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 1607) unsigned long src)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1608) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1609) struct page *s_page, *d_page;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1610) unsigned int s_objidx, d_objidx;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1611) unsigned long s_off, d_off;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1612) void *s_addr, *d_addr;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1613) int s_size, d_size, size;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1614) int written = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1615)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1616) s_size = d_size = class->size;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1617)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1618) obj_to_location(src, &s_page, &s_objidx);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1619) obj_to_location(dst, &d_page, &d_objidx);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1620)
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1621) s_off = (class->size * s_objidx) & ~PAGE_MASK;
bfd093f5e7f09 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:28 -0700 1622) d_off = (class->size * d_objidx) & ~PAGE_MASK;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1623)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1624) if (s_off + class->size > PAGE_SIZE)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1625) s_size = PAGE_SIZE - s_off;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1626)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1627) if (d_off + class->size > PAGE_SIZE)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1628) d_size = PAGE_SIZE - d_off;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1629)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1630) s_addr = kmap_atomic(s_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1631) d_addr = kmap_atomic(d_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1632)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1633) while (1) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1634) size = min(s_size, d_size);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1635) memcpy(d_addr + d_off, s_addr + s_off, size);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1636) written += size;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1637)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1638) if (written == class->size)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1639) break;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1640)
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1641) s_off += size;
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1642) s_size -= size;
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1643) d_off += size;
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1644) d_size -= size;
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1645)
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1646) if (s_off >= PAGE_SIZE) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1647) kunmap_atomic(d_addr);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1648) kunmap_atomic(s_addr);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1649) s_page = get_next_page(s_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1650) s_addr = kmap_atomic(s_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1651) d_addr = kmap_atomic(d_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1652) s_size = class->size - written;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1653) s_off = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1654) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1655)
495819ead5ad0 mm/zsmalloc.c (Sergey Senozhatsky 2015-04-15 16:16:15 -0700 1656) if (d_off >= PAGE_SIZE) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1657) kunmap_atomic(d_addr);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1658) d_page = get_next_page(d_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1659) d_addr = kmap_atomic(d_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1660) d_size = class->size - written;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1661) d_off = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1662) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1663) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1664)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1665) kunmap_atomic(d_addr);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1666) kunmap_atomic(s_addr);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1667) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1668)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1669) /*
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1670) * Find alloced object in zspage from index object and
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1671) * return handle.
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1672) */
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 1673) static unsigned long find_alloced_obj(struct size_class *class,
cf675acb743f0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:46 -0700 1674) struct page *page, int *obj_idx)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1675) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1676) unsigned long head;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1677) int offset = 0;
cf675acb743f0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:46 -0700 1678) int index = *obj_idx;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1679) unsigned long handle = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1680) void *addr = kmap_atomic(page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1681)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1682) offset = get_first_obj_offset(page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1683) offset += class->size * index;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1684)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1685) while (offset < PAGE_SIZE) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1686) head = obj_to_head(page, addr + offset);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1687) if (head & OBJ_ALLOCATED_TAG) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1688) handle = head & ~OBJ_ALLOCATED_TAG;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1689) if (trypin_tag(handle))
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1690) break;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1691) handle = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1692) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1693)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1694) offset += class->size;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1695) index++;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1696) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1697)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1698) kunmap_atomic(addr);
cf675acb743f0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:46 -0700 1699)
cf675acb743f0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:46 -0700 1700) *obj_idx = index;
cf675acb743f0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:46 -0700 1701)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1702) return handle;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1703) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1704)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1705) struct zs_compact_control {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1706) /* Source spage for migration which could be a subpage of zspage */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1707) struct page *s_page;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1708) /* Destination page for migration which should be a first page
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1709) * of zspage. */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1710) struct page *d_page;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1711) /* Starting object index within @s_page which used for live object
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1712) * in the subpage. */
41b88e14c1612 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:43 -0700 1713) int obj_idx;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1714) };
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1715)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1716) static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1717) struct zs_compact_control *cc)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1718) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1719) unsigned long used_obj, free_obj;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1720) unsigned long handle;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1721) struct page *s_page = cc->s_page;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1722) struct page *d_page = cc->d_page;
41b88e14c1612 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:43 -0700 1723) int obj_idx = cc->obj_idx;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1724) int ret = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1725)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1726) while (1) {
cf675acb743f0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:46 -0700 1727) handle = find_alloced_obj(class, s_page, &obj_idx);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1728) if (!handle) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1729) s_page = get_next_page(s_page);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1730) if (!s_page)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1731) break;
41b88e14c1612 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:43 -0700 1732) obj_idx = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1733) continue;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1734) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1735)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1736) /* Stop if there is no more space */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1737) if (zspage_full(class, get_zspage(d_page))) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1738) unpin_tag(handle);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1739) ret = -ENOMEM;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1740) break;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1741) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1742)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1743) used_obj = handle_to_obj(handle);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1744) free_obj = obj_malloc(class, get_zspage(d_page), handle);
251cbb951b831 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:42 -0700 1745) zs_object_copy(class, free_obj, used_obj);
41b88e14c1612 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:43 -0700 1746) obj_idx++;
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1747) /*
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1748) * record_obj updates handle's value to free_obj and it will
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1749) * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1750) * breaks synchronization using pin_tag(e,g, zs_free) so
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1751) * let's keep the lock bit.
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1752) */
c102f07ca0b04 mm/zsmalloc.c (Junil Lee 2016-01-20 14:58:18 -0800 1753) free_obj |= BIT(HANDLE_PIN_BIT);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1754) record_obj(handle, free_obj);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1755) unpin_tag(handle);
1ee4716585ed8 mm/zsmalloc.c (Minchan Kim 2016-05-20 16:59:45 -0700 1756) obj_free(class, used_obj);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1757) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1758)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1759) /* Remember last position in this iteration */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1760) cc->s_page = s_page;
41b88e14c1612 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:43 -0700 1761) cc->obj_idx = obj_idx;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1762)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1763) return ret;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1764) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1765)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1766) static struct zspage *isolate_zspage(struct size_class *class, bool source)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1767) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1768) int i;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1769) struct zspage *zspage;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1770) enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1771)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1772) if (!source) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1773) fg[0] = ZS_ALMOST_FULL;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1774) fg[1] = ZS_ALMOST_EMPTY;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1775) }
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1776)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1777) for (i = 0; i < 2; i++) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1778) zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1779) struct zspage, list);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1780) if (zspage) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1781) VM_BUG_ON(is_zspage_isolated(zspage));
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1782) remove_zspage(class, zspage, fg[i]);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1783) return zspage;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1784) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1785) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1786)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1787) return zspage;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1788) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1789)
860c707dca155 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:38 -0700 1790) /*
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1791) * putback_zspage - add @zspage into right class's fullness list
860c707dca155 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:38 -0700 1792) * @class: destination class
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1793) * @zspage: target page
860c707dca155 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:38 -0700 1794) *
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1795) * Return @zspage's fullness_group
860c707dca155 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:38 -0700 1796) */
4aa409cab7c39 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:26 -0700 1797) static enum fullness_group putback_zspage(struct size_class *class,
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1798) struct zspage *zspage)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1799) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1800) enum fullness_group fullness;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1801)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1802) VM_BUG_ON(is_zspage_isolated(zspage));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1803)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1804) fullness = get_fullness_group(class, zspage);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1805) insert_zspage(class, zspage, fullness);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 1806) set_zspage_mapping(zspage, class->index, fullness);
839373e645d12 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:16:18 -0700 1807)
860c707dca155 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:38 -0700 1808) return fullness;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 1809) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 1810)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1811) #ifdef CONFIG_COMPACTION
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1812) /*
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1813) * To prevent zspage destroy during migration, zspage freeing should
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1814) * hold locks of all pages in the zspage.
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1815) */
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1816) static void lock_zspage(struct zspage *zspage)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1817) {
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1818) struct page *page = get_first_page(zspage);
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1819)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1820) do {
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1821) lock_page(page);
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1822) } while ((page = get_next_page(page)) != NULL);
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1823) }
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1824)
8e9231f819e32 mm/zsmalloc.c (David Howells 2019-03-25 16:38:23 +0000 1825) static int zs_init_fs_context(struct fs_context *fc)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1826) {
8e9231f819e32 mm/zsmalloc.c (David Howells 2019-03-25 16:38:23 +0000 1827) return init_pseudo(fc, ZSMALLOC_MAGIC) ? 0 : -ENOMEM;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1828) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1829)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1830) static struct file_system_type zsmalloc_fs = {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1831) .name = "zsmalloc",
8e9231f819e32 mm/zsmalloc.c (David Howells 2019-03-25 16:38:23 +0000 1832) .init_fs_context = zs_init_fs_context,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1833) .kill_sb = kill_anon_super,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1834) };
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1835)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1836) static int zsmalloc_mount(void)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1837) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1838) int ret = 0;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1839)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1840) zsmalloc_mnt = kern_mount(&zsmalloc_fs);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1841) if (IS_ERR(zsmalloc_mnt))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1842) ret = PTR_ERR(zsmalloc_mnt);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1843)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1844) return ret;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1845) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1846)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1847) static void zsmalloc_unmount(void)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1848) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1849) kern_unmount(zsmalloc_mnt);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1850) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1851)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1852) static void migrate_lock_init(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1853) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1854) rwlock_init(&zspage->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1855) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1856)
cfc451cfdf1dd mm/zsmalloc.c (Jules Irenge 2020-04-06 20:08:21 -0700 1857) static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1858) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1859) read_lock(&zspage->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1860) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1861)
8a374cccee8cd mm/zsmalloc.c (Jules Irenge 2020-04-06 20:08:24 -0700 1862) static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1863) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1864) read_unlock(&zspage->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1865) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1866)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1867) static void migrate_write_lock(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1868) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1869) write_lock(&zspage->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1870) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1871)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1872) static void migrate_write_unlock(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1873) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1874) write_unlock(&zspage->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1875) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1876)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1877) /* Number of isolated subpage for *page migration* in this zspage */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1878) static void inc_zspage_isolation(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1879) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1880) zspage->isolated++;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1881) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1882)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1883) static void dec_zspage_isolation(struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1884) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1885) zspage->isolated--;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1886) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1887)
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1888) static void putback_zspage_deferred(struct zs_pool *pool,
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1889) struct size_class *class,
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1890) struct zspage *zspage)
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1891) {
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1892) enum fullness_group fg;
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1893)
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1894) fg = putback_zspage(class, zspage);
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1895) if (fg == ZS_EMPTY)
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1896) schedule_work(&pool->free_work);
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1897)
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1898) }
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 1899)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1900) static inline void zs_pool_dec_isolated(struct zs_pool *pool)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1901) {
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1902) VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1903) atomic_long_dec(&pool->isolated_pages);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1904) /*
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1905) * There's no possibility of racing, since wait_for_isolated_drain()
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1906) * checks the isolated count under &class->lock after enqueuing
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1907) * on migration_wait.
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1908) */
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1909) if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1910) wake_up_all(&pool->migration_wait);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1911) }
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1912)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1913) static void replace_sub_page(struct size_class *class, struct zspage *zspage,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1914) struct page *newpage, struct page *oldpage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1915) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1916) struct page *page;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1917) struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1918) int idx = 0;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1919)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1920) page = get_first_page(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1921) do {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1922) if (page == oldpage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1923) pages[idx] = newpage;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1924) else
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1925) pages[idx] = page;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1926) idx++;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1927) } while ((page = get_next_page(page)) != NULL);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1928)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1929) create_page_chain(class, zspage, pages);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1930) set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1931) if (unlikely(PageHugeObject(oldpage)))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1932) newpage->index = oldpage->index;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1933) __SetPageMovable(newpage, page_mapping(oldpage));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1934) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1935)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1936) static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1937) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1938) struct zs_pool *pool;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1939) struct size_class *class;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1940) int class_idx;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1941) enum fullness_group fullness;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1942) struct zspage *zspage;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1943) struct address_space *mapping;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1944)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1945) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1946) * Page is locked so zspage couldn't be destroyed. For detail, look at
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1947) * lock_zspage in free_zspage.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1948) */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1949) VM_BUG_ON_PAGE(!PageMovable(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1950) VM_BUG_ON_PAGE(PageIsolated(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1951)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1952) zspage = get_zspage(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1953)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1954) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1955) * Without class lock, fullness could be stale while class_idx is okay
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1956) * because class_idx is constant unless page is freed so we should get
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1957) * fullness again under class lock.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1958) */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1959) get_zspage_mapping(zspage, &class_idx, &fullness);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1960) mapping = page_mapping(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1961) pool = mapping->private_data;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1962) class = pool->size_class[class_idx];
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1963)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1964) spin_lock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1965) if (get_zspage_inuse(zspage) == 0) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1966) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1967) return false;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1968) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1969)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1970) /* zspage is isolated for object migration */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1971) if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1972) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1973) return false;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1974) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1975)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1976) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1977) * If this is first time isolation for the zspage, isolate zspage from
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1978) * size_class to prevent further object allocation from the zspage.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1979) */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1980) if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1981) get_zspage_mapping(zspage, &class_idx, &fullness);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 1982) atomic_long_inc(&pool->isolated_pages);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1983) remove_zspage(class, zspage, fullness);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1984) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1985)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1986) inc_zspage_isolation(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1987) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1988)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1989) return true;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1990) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1991)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 1992) static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1993) struct page *page, enum migrate_mode mode)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1994) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1995) struct zs_pool *pool;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1996) struct size_class *class;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1997) int class_idx;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1998) enum fullness_group fullness;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 1999) struct zspage *zspage;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2000) struct page *dummy;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2001) void *s_addr, *d_addr, *addr;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2002) int offset, pos;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2003) unsigned long handle, head;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2004) unsigned long old_obj, new_obj;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2005) unsigned int obj_idx;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2006) int ret = -EAGAIN;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2007)
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2008) /*
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2009) * We cannot support the _NO_COPY case here, because copy needs to
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2010) * happen under the zs lock, which does not work with
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2011) * MIGRATE_SYNC_NO_COPY workflow.
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2012) */
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2013) if (mode == MIGRATE_SYNC_NO_COPY)
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2014) return -EINVAL;
2916ecc0f9d43 mm/zsmalloc.c (Jérôme Glisse 2017-09-08 16:12:06 -0700 2015)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2016) VM_BUG_ON_PAGE(!PageMovable(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2017) VM_BUG_ON_PAGE(!PageIsolated(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2018)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2019) zspage = get_zspage(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2020)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2021) /* Concurrent compactor cannot migrate any subpage in zspage */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2022) migrate_write_lock(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2023) get_zspage_mapping(zspage, &class_idx, &fullness);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2024) pool = mapping->private_data;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2025) class = pool->size_class[class_idx];
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2026) offset = get_first_obj_offset(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2027)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2028) spin_lock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2029) if (!get_zspage_inuse(zspage)) {
77ff465799c60 mm/zsmalloc.c (Hui Zhu 2017-09-06 16:21:08 -0700 2030) /*
77ff465799c60 mm/zsmalloc.c (Hui Zhu 2017-09-06 16:21:08 -0700 2031) * Set "offset" to end of the page so that every loops
77ff465799c60 mm/zsmalloc.c (Hui Zhu 2017-09-06 16:21:08 -0700 2032) * skips unnecessary object scanning.
77ff465799c60 mm/zsmalloc.c (Hui Zhu 2017-09-06 16:21:08 -0700 2033) */
77ff465799c60 mm/zsmalloc.c (Hui Zhu 2017-09-06 16:21:08 -0700 2034) offset = PAGE_SIZE;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2035) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2036)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2037) pos = offset;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2038) s_addr = kmap_atomic(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2039) while (pos < PAGE_SIZE) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2040) head = obj_to_head(page, s_addr + pos);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2041) if (head & OBJ_ALLOCATED_TAG) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2042) handle = head & ~OBJ_ALLOCATED_TAG;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2043) if (!trypin_tag(handle))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2044) goto unpin_objects;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2045) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2046) pos += class->size;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2047) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2048)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2049) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2050) * Here, any user cannot access all objects in the zspage so let's move.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2051) */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2052) d_addr = kmap_atomic(newpage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2053) memcpy(d_addr, s_addr, PAGE_SIZE);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2054) kunmap_atomic(d_addr);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2055)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2056) for (addr = s_addr + offset; addr < s_addr + pos;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2057) addr += class->size) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2058) head = obj_to_head(page, addr);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2059) if (head & OBJ_ALLOCATED_TAG) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2060) handle = head & ~OBJ_ALLOCATED_TAG;
ecfc2bda7aafc mm/zsmalloc.c (zhouchuangao 2021-05-04 18:40:00 -0700 2061) BUG_ON(!testpin_tag(handle));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2062)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2063) old_obj = handle_to_obj(handle);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2064) obj_to_location(old_obj, &dummy, &obj_idx);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2065) new_obj = (unsigned long)location_to_obj(newpage,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2066) obj_idx);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2067) new_obj |= BIT(HANDLE_PIN_BIT);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2068) record_obj(handle, new_obj);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2069) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2070) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2071)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2072) replace_sub_page(class, zspage, newpage, page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2073) get_page(newpage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2074)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2075) dec_zspage_isolation(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2076)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2077) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2078) * Page migration is done so let's putback isolated zspage to
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2079) * the list if @page is final isolated subpage in the zspage.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2080) */
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2081) if (!is_zspage_isolated(zspage)) {
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2082) /*
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2083) * We cannot race with zs_destroy_pool() here because we wait
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2084) * for isolation to hit zero before we start destroying.
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2085) * Also, we ensure that everyone can see pool->destroying before
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2086) * we start waiting.
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2087) */
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 2088) putback_zspage_deferred(pool, class, zspage);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2089) zs_pool_dec_isolated(pool);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2090) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2091)
ac8f05da5174c mm/zsmalloc.c (Chanho Min 2020-01-04 12:59:36 -0800 2092) if (page_zone(newpage) != page_zone(page)) {
ac8f05da5174c mm/zsmalloc.c (Chanho Min 2020-01-04 12:59:36 -0800 2093) dec_zone_page_state(page, NR_ZSPAGES);
ac8f05da5174c mm/zsmalloc.c (Chanho Min 2020-01-04 12:59:36 -0800 2094) inc_zone_page_state(newpage, NR_ZSPAGES);
ac8f05da5174c mm/zsmalloc.c (Chanho Min 2020-01-04 12:59:36 -0800 2095) }
ac8f05da5174c mm/zsmalloc.c (Chanho Min 2020-01-04 12:59:36 -0800 2096)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2097) reset_page(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2098) put_page(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2099) page = newpage;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2100)
dd4123f324bba mm/zsmalloc.c (Minchan Kim 2016-07-26 15:26:50 -0700 2101) ret = MIGRATEPAGE_SUCCESS;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2102) unpin_objects:
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2103) for (addr = s_addr + offset; addr < s_addr + pos;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2104) addr += class->size) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2105) head = obj_to_head(page, addr);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2106) if (head & OBJ_ALLOCATED_TAG) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2107) handle = head & ~OBJ_ALLOCATED_TAG;
ecfc2bda7aafc mm/zsmalloc.c (zhouchuangao 2021-05-04 18:40:00 -0700 2108) BUG_ON(!testpin_tag(handle));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2109) unpin_tag(handle);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2110) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2111) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2112) kunmap_atomic(s_addr);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2113) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2114) migrate_write_unlock(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2115)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2116) return ret;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2117) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2118)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 2119) static void zs_page_putback(struct page *page)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2120) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2121) struct zs_pool *pool;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2122) struct size_class *class;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2123) int class_idx;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2124) enum fullness_group fg;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2125) struct address_space *mapping;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2126) struct zspage *zspage;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2127)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2128) VM_BUG_ON_PAGE(!PageMovable(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2129) VM_BUG_ON_PAGE(!PageIsolated(page), page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2130)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2131) zspage = get_zspage(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2132) get_zspage_mapping(zspage, &class_idx, &fg);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2133) mapping = page_mapping(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2134) pool = mapping->private_data;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2135) class = pool->size_class[class_idx];
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2136)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2137) spin_lock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2138) dec_zspage_isolation(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2139) if (!is_zspage_isolated(zspage)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2140) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2141) * Due to page_lock, we cannot free zspage immediately
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2142) * so let's defer.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2143) */
1a87aa03597ef mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:03 -0700 2144) putback_zspage_deferred(pool, class, zspage);
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2145) zs_pool_dec_isolated(pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2146) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2147) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2148) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2149)
4d0a5402f505e mm/zsmalloc.c (Colin Ian King 2018-08-17 15:46:50 -0700 2150) static const struct address_space_operations zsmalloc_aops = {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2151) .isolate_page = zs_page_isolate,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2152) .migratepage = zs_page_migrate,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2153) .putback_page = zs_page_putback,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2154) };
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2155)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2156) static int zs_register_migration(struct zs_pool *pool)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2157) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2158) pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2159) if (IS_ERR(pool->inode)) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2160) pool->inode = NULL;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2161) return 1;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2162) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2163)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2164) pool->inode->i_mapping->private_data = pool;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2165) pool->inode->i_mapping->a_ops = &zsmalloc_aops;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2166) return 0;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2167) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2168)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2169) static bool pool_isolated_are_drained(struct zs_pool *pool)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2170) {
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2171) return atomic_long_read(&pool->isolated_pages) == 0;
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2172) }
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2173)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2174) /* Function for resolving migration */
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2175) static void wait_for_isolated_drain(struct zs_pool *pool)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2176) {
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2177)
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2178) /*
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2179) * We're in the process of destroying the pool, so there are no
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2180) * active allocations. zs_page_isolate() fails for completely free
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2181) * zspages, so we need only wait for the zs_pool's isolated
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2182) * count to hit zero.
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2183) */
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2184) wait_event(pool->migration_wait,
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2185) pool_isolated_are_drained(pool));
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2186) }
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2187)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2188) static void zs_unregister_migration(struct zs_pool *pool)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2189) {
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2190) pool->destroying = true;
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2191) /*
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2192) * We need a memory barrier here to ensure global visibility of
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2193) * pool->destroying. Thus pool->isolated pages will either be 0 in which
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2194) * case we don't care, or it will be > 0 and pool->destroying will
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2195) * ensure that we wake up once isolation hits 0.
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2196) */
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2197) smp_mb();
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2198) wait_for_isolated_drain(pool); /* This can block */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2199) flush_work(&pool->free_work);
c3491eca37fe1 mm/zsmalloc.c (Markus Elfring 2016-07-28 15:48:59 -0700 2200) iput(pool->inode);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2201) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2202)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2203) /*
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2204) * Caller should hold page_lock of all pages in the zspage
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2205) * In here, we cannot use zspage meta data.
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2206) */
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2207) static void async_free_zspage(struct work_struct *work)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2208) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2209) int i;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2210) struct size_class *class;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2211) unsigned int class_idx;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2212) enum fullness_group fullness;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2213) struct zspage *zspage, *tmp;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2214) LIST_HEAD(free_pages);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2215) struct zs_pool *pool = container_of(work, struct zs_pool,
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2216) free_work);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2217)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 2218) for (i = 0; i < ZS_SIZE_CLASSES; i++) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2219) class = pool->size_class[i];
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2220) if (class->index != i)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2221) continue;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2222)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2223) spin_lock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2224) list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2225) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2226) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2227)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2228)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2229) list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2230) list_del(&zspage->list);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2231) lock_zspage(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2232)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2233) get_zspage_mapping(zspage, &class_idx, &fullness);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2234) VM_BUG_ON(fullness != ZS_EMPTY);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2235) class = pool->size_class[class_idx];
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2236) spin_lock(&class->lock);
338483372626f mm/zsmalloc.c (Miaohe Lin 2021-06-30 18:53:04 -0700 2237) __free_zspage(pool, class, zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2238) spin_unlock(&class->lock);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2239) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2240) };
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2241)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2242) static void kick_deferred_free(struct zs_pool *pool)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2243) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2244) schedule_work(&pool->free_work);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2245) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2246)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2247) static void init_deferred_free(struct zs_pool *pool)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2248) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2249) INIT_WORK(&pool->free_work, async_free_zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2250) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2251)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2252) static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2253) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2254) struct page *page = get_first_page(zspage);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2255)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2256) do {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2257) WARN_ON(!trylock_page(page));
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2258) __SetPageMovable(page, pool->inode->i_mapping);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2259) unlock_page(page);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2260) } while ((page = get_next_page(page)) != NULL);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2261) }
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2262) #endif
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2263)
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2264) /*
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2265) *
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2266) * Based on the number of unused allocated objects calculate
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2267) * and return the number of pages that we can free.
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2268) */
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2269) static unsigned long zs_can_compact(struct size_class *class)
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2270) {
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2271) unsigned long obj_wasted;
44f43e99fe708 mm/zsmalloc.c (Sergey Senozhatsky 2016-05-09 16:28:49 -0700 2272) unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
44f43e99fe708 mm/zsmalloc.c (Sergey Senozhatsky 2016-05-09 16:28:49 -0700 2273) unsigned long obj_used = zs_stat_get(class, OBJ_USED);
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2274)
44f43e99fe708 mm/zsmalloc.c (Sergey Senozhatsky 2016-05-09 16:28:49 -0700 2275) if (obj_allocated <= obj_used)
44f43e99fe708 mm/zsmalloc.c (Sergey Senozhatsky 2016-05-09 16:28:49 -0700 2276) return 0;
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2277)
44f43e99fe708 mm/zsmalloc.c (Sergey Senozhatsky 2016-05-09 16:28:49 -0700 2278) obj_wasted = obj_allocated - obj_used;
b4fd07a0864a0 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:49 -0700 2279) obj_wasted /= class->objs_per_zspage;
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2280)
6cbf16b3b66a6 mm/zsmalloc.c (Minchan Kim 2015-09-08 15:04:49 -0700 2281) return obj_wasted * class->pages_per_zspage;
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2282) }
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2283)
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2284) static unsigned long __zs_compact(struct zs_pool *pool,
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2285) struct size_class *class)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2286) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2287) struct zs_compact_control cc;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2288) struct zspage *src_zspage;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2289) struct zspage *dst_zspage = NULL;
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2290) unsigned long pages_freed = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2291)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2292) spin_lock(&class->lock);
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2293) while ((src_zspage = isolate_zspage(class, true))) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2294)
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2295) if (!zs_can_compact(class))
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2296) break;
04f05909e0fde mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:30 -0700 2297)
41b88e14c1612 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:43 -0700 2298) cc.obj_idx = 0;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2299) cc.s_page = get_first_page(src_zspage);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2300)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2301) while ((dst_zspage = isolate_zspage(class, false))) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2302) cc.d_page = get_first_page(dst_zspage);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2303) /*
0dc63d488a2a4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:33 -0700 2304) * If there is no more space in dst_page, resched
0dc63d488a2a4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:33 -0700 2305) * and see if anyone had allocated another zspage.
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2306) */
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2307) if (!migrate_zspage(pool, class, &cc))
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2308) break;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2309)
4aa409cab7c39 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:26 -0700 2310) putback_zspage(class, dst_zspage);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2311) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2312)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2313) /* Stop if we couldn't find slot */
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2314) if (dst_zspage == NULL)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2315) break;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2316)
4aa409cab7c39 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:26 -0700 2317) putback_zspage(class, dst_zspage);
4aa409cab7c39 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:26 -0700 2318) if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2319) free_zspage(pool, class, src_zspage);
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2320) pages_freed += class->pages_per_zspage;
4aa409cab7c39 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:26 -0700 2321) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2322) spin_unlock(&class->lock);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2323) cond_resched();
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2324) spin_lock(&class->lock);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2325) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2326)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2327) if (src_zspage)
4aa409cab7c39 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:26 -0700 2328) putback_zspage(class, src_zspage);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2329)
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2330) spin_unlock(&class->lock);
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2331)
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2332) return pages_freed;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2333) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2334)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2335) unsigned long zs_compact(struct zs_pool *pool)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2336) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2337) int i;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2338) struct size_class *class;
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2339) unsigned long pages_freed = 0;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2340)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 2341) for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2342) class = pool->size_class[i];
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2343) if (!class)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2344) continue;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2345) if (class->index != i)
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2346) continue;
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2347) pages_freed += __zs_compact(pool, class);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2348) }
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2349) atomic_long_add(pages_freed, &pool->stats.pages_compacted);
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2350)
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2351) return pages_freed;
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2352) }
312fcae227037 mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:30 -0700 2353) EXPORT_SYMBOL_GPL(zs_compact);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2354)
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2355) void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2356) {
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2357) memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2358) }
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2359) EXPORT_SYMBOL_GPL(zs_pool_stats);
7d3f3938236b4 mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:35 -0700 2360)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2361) static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2362) struct shrink_control *sc)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2363) {
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2364) unsigned long pages_freed;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2365) struct zs_pool *pool = container_of(shrinker, struct zs_pool,
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2366) shrinker);
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2367)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2368) /*
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2369) * Compact classes and calculate compaction delta.
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2370) * Can run concurrently with a manually triggered
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2371) * (by user) compaction.
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2372) */
2395928158059 mm/zsmalloc.c (Rokudo Yan 2021-02-25 17:18:31 -0800 2373) pages_freed = zs_compact(pool);
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2374)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2375) return pages_freed ? pages_freed : SHRINK_STOP;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2376) }
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2377)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2378) static unsigned long zs_shrinker_count(struct shrinker *shrinker,
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2379) struct shrink_control *sc)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2380) {
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2381) int i;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2382) struct size_class *class;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2383) unsigned long pages_to_free = 0;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2384) struct zs_pool *pool = container_of(shrinker, struct zs_pool,
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2385) shrinker);
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2386)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 2387) for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2388) class = pool->size_class[i];
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2389) if (!class)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2390) continue;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2391) if (class->index != i)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2392) continue;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2393)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2394) pages_to_free += zs_can_compact(class);
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2395) }
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2396)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2397) return pages_to_free;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2398) }
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2399)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2400) static void zs_unregister_shrinker(struct zs_pool *pool)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2401) {
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2402) unregister_shrinker(&pool->shrinker);
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2403) }
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2404)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2405) static int zs_register_shrinker(struct zs_pool *pool)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2406) {
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2407) pool->shrinker.scan_objects = zs_shrinker_scan;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2408) pool->shrinker.count_objects = zs_shrinker_count;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2409) pool->shrinker.batch = 0;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2410) pool->shrinker.seeks = DEFAULT_SEEKS;
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2411)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2412) return register_shrinker(&pool->shrinker);
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2413) }
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2414)
00a61d8618bb7 drivers/staging/zsmalloc/zsmalloc-main.c (Minchan Kim 2012-05-03 15:40:40 +0900 2415) /**
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2416) * zs_create_pool - Creates an allocation pool to work from.
fd8544639e3fd mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:54 -0700 2417) * @name: pool name to be created
166cfda752ca2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:51 -0500 2418) *
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2419) * This function must be called before anything when using
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2420) * the zsmalloc allocator.
166cfda752ca2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:51 -0500 2421) *
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2422) * On success, a pointer to the newly created pool is returned,
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2423) * otherwise NULL.
396b7fd6f9668 drivers/staging/zsmalloc/zsmalloc-main.c (Sara Bird 2013-05-20 15:18:14 -0400 2424) */
d0d8da2dc49df mm/zsmalloc.c (Sergey Senozhatsky 2016-05-20 16:59:48 -0700 2425) struct zs_pool *zs_create_pool(const char *name)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2426) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2427) int i;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2428) struct zs_pool *pool;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2429) struct size_class *prev_class = NULL;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2430)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2431) pool = kzalloc(sizeof(*pool), GFP_KERNEL);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2432) if (!pool)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2433) return NULL;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2434)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2435) init_deferred_free(pool);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2436)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 2437) pool->name = kstrdup(name, GFP_KERNEL);
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 2438) if (!pool->name)
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 2439) goto err;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 2440)
441e254cd40dc mm/zsmalloc.c (Andrew Morton 2019-08-30 16:04:35 -0700 2441) #ifdef CONFIG_COMPACTION
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2442) init_waitqueue_head(&pool->migration_wait);
441e254cd40dc mm/zsmalloc.c (Andrew Morton 2019-08-30 16:04:35 -0700 2443) #endif
701d678599d0c mm/zsmalloc.c (Henry Burns 2019-08-24 17:55:06 -0700 2444)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2445) if (create_cache(pool))
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 2446) goto err;
2e40e163a25af mm/zsmalloc.c (Minchan Kim 2015-04-15 16:15:23 -0700 2447)
c60369f011251 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:55 -0500 2448) /*
399d8eebe768f mm/zsmalloc.c (Xishi Qiu 2017-02-22 15:45:01 -0800 2449) * Iterate reversely, because, size of size_class that we want to use
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2450) * for merging should be larger or equal to current size.
c60369f011251 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:55 -0500 2451) */
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 2452) for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2453) int size;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2454) int pages_per_zspage;
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 2455) int objs_per_zspage;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2456) struct size_class *class;
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2457) int fullness = 0;
c60369f011251 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:55 -0500 2458)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2459) size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2460) if (size > ZS_MAX_ALLOC_SIZE)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2461) size = ZS_MAX_ALLOC_SIZE;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2462) pages_per_zspage = get_pages_per_zspage(size);
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 2463) objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2464)
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2465) /*
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2466) * We iterate from biggest down to smallest classes,
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2467) * so huge_class_size holds the size of the first huge
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2468) * class. Any object bigger than or equal to that will
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2469) * endup in the huge class.
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2470) */
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2471) if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2472) !huge_class_size) {
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2473) huge_class_size = size;
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2474) /*
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2475) * The object uses ZS_HANDLE_SIZE bytes to store the
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2476) * handle. We need to subtract it, because zs_malloc()
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2477) * unconditionally adds handle size before it performs
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2478) * size class search - so object may be smaller than
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2479) * huge class size, yet it still can end up in the huge
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2480) * class because it grows by ZS_HANDLE_SIZE extra bytes
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2481) * right before class lookup.
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2482) */
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2483) huge_class_size -= (ZS_HANDLE_SIZE - 1);
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2484) }
010b495e2fa32 mm/zsmalloc.c (Sergey Senozhatsky 2018-04-05 16:24:43 -0700 2485)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2486) /*
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2487) * size_class is used for normal zsmalloc operation such
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2488) * as alloc/free for that size. Although it is natural that we
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2489) * have one size_class for each size, there is a chance that we
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2490) * can get more memory utilization if we use one size_class for
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2491) * many different sizes whose size_class have same
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2492) * characteristics. So, we makes size_class point to
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2493) * previous size_class if possible.
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2494) */
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2495) if (prev_class) {
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 2496) if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2497) pool->size_class[i] = prev_class;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2498) continue;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2499) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2500) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2501)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2502) class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2503) if (!class)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2504) goto err;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2505)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2506) class->size = size;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2507) class->index = i;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2508) class->pages_per_zspage = pages_per_zspage;
64d90465f0132 mm/zsmalloc.c (Ganesh Mahendran 2016-07-28 15:47:51 -0700 2509) class->objs_per_zspage = objs_per_zspage;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2510) spin_lock_init(&class->lock);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2511) pool->size_class[i] = class;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2512) for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2513) fullness++)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2514) INIT_LIST_HEAD(&class->fullness_list[fullness]);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2515)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2516) prev_class = class;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2517) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2518)
d34f615720d17 mm/zsmalloc.c (Dan Streetman 2016-05-20 16:59:56 -0700 2519) /* debug only, don't abort if it fails */
d34f615720d17 mm/zsmalloc.c (Dan Streetman 2016-05-20 16:59:56 -0700 2520) zs_pool_stat_create(pool, name);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2521)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2522) if (zs_register_migration(pool))
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2523) goto err;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2524)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2525) /*
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2526) * Not critical since shrinker is only used to trigger internal
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2527) * defragmentation of the pool which is pretty optional thing. If
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2528) * registration fails we still can use the pool normally and user can
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2529) * trigger compaction manually. Thus, ignore return code.
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2530) */
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2531) zs_register_shrinker(pool);
93144ca35041b mm/zsmalloc.c (Aliaksei Karaliou 2018-01-31 16:18:40 -0800 2532)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2533) return pool;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2534)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2535) err:
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2536) zs_destroy_pool(pool);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2537) return NULL;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2538) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2539) EXPORT_SYMBOL_GPL(zs_create_pool);
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2540)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2541) void zs_destroy_pool(struct zs_pool *pool)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2542) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2543) int i;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2544)
ab9d306d9c3bf mm/zsmalloc.c (Sergey Senozhatsky 2015-09-08 15:04:41 -0700 2545) zs_unregister_shrinker(pool);
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2546) zs_unregister_migration(pool);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2547) zs_pool_stat_destroy(pool);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2548)
cf8e0fedf0784 mm/zsmalloc.c (Jerome Marchand 2017-07-10 15:50:18 -0700 2549) for (i = 0; i < ZS_SIZE_CLASSES; i++) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2550) int fg;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2551) struct size_class *class = pool->size_class[i];
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2552)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2553) if (!class)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2554) continue;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2555)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2556) if (class->index != i)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2557) continue;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2558)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2559) for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2560) if (!list_empty(&class->fullness_list[fg])) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2561) pr_info("Freeing non-empty class with size %db, fullness group %d\n",
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2562) class->size, fg);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2563) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2564) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2565) kfree(class);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2566) }
f553646a67cb2 drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-18 11:55:56 -0500 2567)
3783689a1aa82 mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:23 -0700 2568) destroy_cache(pool);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2569) kfree(pool->name);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2570) kfree(pool);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2571) }
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2572) EXPORT_SYMBOL_GPL(zs_destroy_pool);
b74185108668e drivers/staging/zsmalloc/zsmalloc-main.c (Seth Jennings 2012-07-02 16:15:52 -0500 2573)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2574) static int __init zs_init(void)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2575) {
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2576) int ret;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2577)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2578) ret = zsmalloc_mount();
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2579) if (ret)
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2580) goto out;
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2581)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 2582) ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 2583) zs_cpu_prepare, zs_cpu_dead);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2584) if (ret)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 2585) goto hp_setup_fail;
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2586)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2587) #ifdef CONFIG_ZPOOL
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2588) zpool_register_driver(&zs_zpool_driver);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2589) #endif
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2590)
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 2591) zs_stat_init();
4abaac9b733ea mm/zsmalloc.c (Dan Streetman 2016-05-26 15:16:27 -0700 2592)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2593) return 0;
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2594)
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 2595) hp_setup_fail:
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2596) zsmalloc_unmount();
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2597) out:
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2598) return ret;
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2599) }
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2600)
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2601) static void __exit zs_exit(void)
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2602) {
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2603) #ifdef CONFIG_ZPOOL
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2604) zpool_unregister_driver(&zs_zpool_driver);
66cdef663cd7a mm/zsmalloc.c (Ganesh Mahendran 2014-12-18 16:17:40 -0800 2605) #endif
48b4800a1c6af mm/zsmalloc.c (Minchan Kim 2016-07-26 15:23:31 -0700 2606) zsmalloc_unmount();
215c89d055e08 mm/zsmalloc.c (Sebastian Andrzej Siewior 2016-11-27 00:13:38 +0100 2607) cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2608)
0f050d997e275 mm/zsmalloc.c (Ganesh Mahendran 2015-02-12 15:00:54 -0800 2609) zs_stat_exit();
61989a80fb3ac drivers/staging/zsmalloc/zsmalloc-main.c (Nitin Gupta 2012-01-09 16:51:56 -0600 2610) }
069f101fa4633 drivers/staging/zsmalloc/zsmalloc-main.c (Ben Hutchings 2012-06-20 02:31:11 +0100 2611)
069f101fa4633 drivers/staging/zsmalloc/zsmalloc-main.c (Ben Hutchings 2012-06-20 02:31:11 +0100 2612) module_init(zs_init);
069f101fa4633 drivers/staging/zsmalloc/zsmalloc-main.c (Ben Hutchings 2012-06-20 02:31:11 +0100 2613) module_exit(zs_exit);
069f101fa4633 drivers/staging/zsmalloc/zsmalloc-main.c (Ben Hutchings 2012-06-20 02:31:11 +0100 2614)
069f101fa4633 drivers/staging/zsmalloc/zsmalloc-main.c (Ben Hutchings 2012-06-20 02:31:11 +0100 2615) MODULE_LICENSE("Dual BSD/GPL");
069f101fa4633 drivers/staging/zsmalloc/zsmalloc-main.c (Ben Hutchings 2012-06-20 02:31:11 +0100 2616) MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");