b24413180f560 (Greg Kroah-Hartman 2017-11-01 15:07:57 +0100 1) // SPDX-License-Identifier: GPL-2.0
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 2) /*
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 3) * Fast batching percpu counters.
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 4) */
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 5)
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 6) #include <linux/percpu_counter.h>
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 7) #include <linux/mutex.h>
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 8) #include <linux/init.h>
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 9) #include <linux/cpu.h>
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 10) #include <linux/module.h>
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 11) #include <linux/debugobjects.h>
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 12)
3a8495c739c1c (Glauber Costa 2011-10-31 17:12:34 -0700 13) #ifdef CONFIG_HOTPLUG_CPU
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 14) static LIST_HEAD(percpu_counters);
d87aae2f3c8e9 (Al Viro 2012-07-31 09:28:31 +0400 15) static DEFINE_SPINLOCK(percpu_counters_lock);
3a8495c739c1c (Glauber Costa 2011-10-31 17:12:34 -0700 16) #endif
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 17)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 18) #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 19)
f9e62f318fd70 (Stephen Boyd 2020-08-14 17:40:27 -0700 20) static const struct debug_obj_descr percpu_counter_debug_descr;
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 21)
d99b1d8912654 (Changbin Du 2016-05-19 17:09:35 -0700 22) static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 23) {
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 24) struct percpu_counter *fbc = addr;
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 25)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 26) switch (state) {
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 27) case ODEBUG_STATE_ACTIVE:
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 28) percpu_counter_destroy(fbc);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 29) debug_object_free(fbc, &percpu_counter_debug_descr);
d99b1d8912654 (Changbin Du 2016-05-19 17:09:35 -0700 30) return true;
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 31) default:
d99b1d8912654 (Changbin Du 2016-05-19 17:09:35 -0700 32) return false;
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 33) }
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 34) }
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 35)
f9e62f318fd70 (Stephen Boyd 2020-08-14 17:40:27 -0700 36) static const struct debug_obj_descr percpu_counter_debug_descr = {
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 37) .name = "percpu_counter",
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 38) .fixup_free = percpu_counter_fixup_free,
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 39) };
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 40)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 41) static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 42) {
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 43) debug_object_init(fbc, &percpu_counter_debug_descr);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 44) debug_object_activate(fbc, &percpu_counter_debug_descr);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 45) }
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 46)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 47) static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 48) {
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 49) debug_object_deactivate(fbc, &percpu_counter_debug_descr);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 50) debug_object_free(fbc, &percpu_counter_debug_descr);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 51) }
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 52)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 53) #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 54) static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 55) { }
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 56) static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 57) { }
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 58) #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 59)
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 60) void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 61) {
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 62) int cpu;
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 63) unsigned long flags;
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 64)
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 65) raw_spin_lock_irqsave(&fbc->lock, flags);
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 66) for_each_possible_cpu(cpu) {
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 67) s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 68) *pcount = 0;
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 69) }
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 70) fbc->count = amount;
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 71) raw_spin_unlock_irqrestore(&fbc->lock, flags);
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 72) }
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 73) EXPORT_SYMBOL(percpu_counter_set);
3a587f47b82f9 (Peter Zijlstra 2007-10-16 23:25:44 -0700 74)
db65a867fd40f (Alex Shi 2021-05-06 18:03:43 -0700 75) /*
3e8f399da490e (Nikolay Borisov 2017-07-12 14:37:51 -0700 76) * This function is both preempt and irq safe. The former is due to explicit
3e8f399da490e (Nikolay Borisov 2017-07-12 14:37:51 -0700 77) * preemption disable. The latter is guaranteed by the fact that the slow path
3e8f399da490e (Nikolay Borisov 2017-07-12 14:37:51 -0700 78) * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
3e8f399da490e (Nikolay Borisov 2017-07-12 14:37:51 -0700 79) * this_cpu_add which is irq-safe by definition. Hence there is no need muck
3e8f399da490e (Nikolay Borisov 2017-07-12 14:37:51 -0700 80) * with irq state before calling this one
3e8f399da490e (Nikolay Borisov 2017-07-12 14:37:51 -0700 81) */
104b4e5139fe3 (Nikolay Borisov 2017-06-20 21:01:20 +0300 82) void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 83) {
20e8976709639 (Peter Zijlstra 2007-10-16 23:25:43 -0700 84) s64 count;
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 85)
ea00c30b5b31b (Christoph Lameter 2010-10-26 14:23:09 -0700 86) preempt_disable();
819a72af8d665 (Christoph Lameter 2010-12-06 11:16:19 -0600 87) count = __this_cpu_read(*fbc->counters) + amount;
1d339638a954e (Miaohe Lin 2020-10-15 20:11:28 -0700 88) if (abs(count) >= batch) {
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 89) unsigned long flags;
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 90) raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 91) fbc->count += count;
d1969a84dd6a4 (Hugh Dickins 2014-01-16 15:26:48 -0800 92) __this_cpu_sub(*fbc->counters, count - amount);
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 93) raw_spin_unlock_irqrestore(&fbc->lock, flags);
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 94) } else {
74e72f894d56e (Ming Lei 2014-01-14 17:56:42 -0800 95) this_cpu_add(*fbc->counters, amount);
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 96) }
ea00c30b5b31b (Christoph Lameter 2010-10-26 14:23:09 -0700 97) preempt_enable();
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 98) }
104b4e5139fe3 (Nikolay Borisov 2017-06-20 21:01:20 +0300 99) EXPORT_SYMBOL(percpu_counter_add_batch);
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 100)
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 101) /*
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 102) * For percpu_counter with a big batch, the devication of its count could
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 103) * be big, and there is requirement to reduce the deviation, like when the
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 104) * counter's batch could be runtime decreased to get a better accuracy,
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 105) * which can be achieved by running this sync function on each CPU.
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 106) */
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 107) void percpu_counter_sync(struct percpu_counter *fbc)
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 108) {
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 109) unsigned long flags;
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 110) s64 count;
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 111)
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 112) raw_spin_lock_irqsave(&fbc->lock, flags);
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 113) count = __this_cpu_read(*fbc->counters);
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 114) fbc->count += count;
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 115) __this_cpu_sub(*fbc->counters, count);
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 116) raw_spin_unlock_irqrestore(&fbc->lock, flags);
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 117) }
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 118) EXPORT_SYMBOL(percpu_counter_sync);
0a4954a850b0c (Feng Tang 2020-08-06 23:23:11 -0700 119)
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 120) /*
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 121) * Add up all the per-cpu counts, return the result. This is a more accurate
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 122) * but much slower version of percpu_counter_read_positive()
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 123) */
02d211688727a (Andrew Morton 2008-12-09 13:14:14 -0800 124) s64 __percpu_counter_sum(struct percpu_counter *fbc)
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 125) {
0216bfcffe424 (Mingming Cao 2006-06-23 02:05:41 -0700 126) s64 ret;
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 127) int cpu;
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 128) unsigned long flags;
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 129)
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 130) raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 131) ret = fbc->count;
b4ef0296f214a (Andrew Morton 2007-07-15 23:39:51 -0700 132) for_each_online_cpu(cpu) {
0216bfcffe424 (Mingming Cao 2006-06-23 02:05:41 -0700 133) s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 134) ret += *pcount;
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 135) }
098faf5805c80 (Shaohua Li 2013-10-24 09:06:45 +0100 136) raw_spin_unlock_irqrestore(&fbc->lock, flags);
bf1d89c81352f (Peter Zijlstra 2007-10-16 23:25:45 -0700 137) return ret;
3cbc564024d8f (Ravikiran G Thirumalai 2006-06-23 02:05:40 -0700 138) }
bf1d89c81352f (Peter Zijlstra 2007-10-16 23:25:45 -0700 139) EXPORT_SYMBOL(__percpu_counter_sum);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 140)
908c7f1949cb7 (Tejun Heo 2014-09-08 09:51:29 +0900 141) int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518ba3de (Peter Zijlstra 2008-12-26 15:08:55 +0100 142) struct lock_class_key *key)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 143) {
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 144) unsigned long flags __maybe_unused;
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 145)
f032a450812f6 (Thomas Gleixner 2009-07-25 16:21:48 +0200 146) raw_spin_lock_init(&fbc->lock);
ea319518ba3de (Peter Zijlstra 2008-12-26 15:08:55 +0100 147) lockdep_set_class(&fbc->lock, key);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 148) fbc->count = amount;
908c7f1949cb7 (Tejun Heo 2014-09-08 09:51:29 +0900 149) fbc->counters = alloc_percpu_gfp(s32, gfp);
833f4077bf7c2 (Peter Zijlstra 2007-10-16 23:25:45 -0700 150) if (!fbc->counters)
833f4077bf7c2 (Peter Zijlstra 2007-10-16 23:25:45 -0700 151) return -ENOMEM;
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 152)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 153) debug_percpu_counter_activate(fbc);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 154)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 155) #ifdef CONFIG_HOTPLUG_CPU
8474b591faf3b (Masanori ITOH 2010-10-26 14:21:20 -0700 156) INIT_LIST_HEAD(&fbc->list);
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 157) spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 158) list_add(&fbc->list, &percpu_counters);
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 159) spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 160) #endif
833f4077bf7c2 (Peter Zijlstra 2007-10-16 23:25:45 -0700 161) return 0;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 162) }
ea319518ba3de (Peter Zijlstra 2008-12-26 15:08:55 +0100 163) EXPORT_SYMBOL(__percpu_counter_init);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 164)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 165) void percpu_counter_destroy(struct percpu_counter *fbc)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 166) {
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 167) unsigned long flags __maybe_unused;
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 168)
833f4077bf7c2 (Peter Zijlstra 2007-10-16 23:25:45 -0700 169) if (!fbc->counters)
833f4077bf7c2 (Peter Zijlstra 2007-10-16 23:25:45 -0700 170) return;
833f4077bf7c2 (Peter Zijlstra 2007-10-16 23:25:45 -0700 171)
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 172) debug_percpu_counter_deactivate(fbc);
e2852ae825dba (Tejun Heo 2010-10-26 14:23:05 -0700 173)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 174) #ifdef CONFIG_HOTPLUG_CPU
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 175) spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 176) list_del(&fbc->list);
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 177) spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 178) #endif
fd3d664fef97c (Eric Dumazet 2008-12-09 13:14:11 -0800 179) free_percpu(fbc->counters);
fd3d664fef97c (Eric Dumazet 2008-12-09 13:14:11 -0800 180) fbc->counters = NULL;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 181) }
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 182) EXPORT_SYMBOL(percpu_counter_destroy);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 183)
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 184) int percpu_counter_batch __read_mostly = 32;
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 185) EXPORT_SYMBOL(percpu_counter_batch);
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 186)
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 187) static int compute_batch_value(unsigned int cpu)
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 188) {
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 189) int nr = num_online_cpus();
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 190)
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 191) percpu_counter_batch = max(32, nr*2);
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 192) return 0;
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 193) }
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 194)
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 195) static int percpu_counter_cpu_dead(unsigned int cpu)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 196) {
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 197) #ifdef CONFIG_HOTPLUG_CPU
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 198) struct percpu_counter *fbc;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 199)
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 200) compute_batch_value(cpu);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 201)
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 202) spin_lock_irq(&percpu_counters_lock);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 203) list_for_each_entry(fbc, &percpu_counters, list) {
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 204) s32 *pcount;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 205)
aaf0f2fa68286 (Eric Dumazet 2017-01-20 06:34:22 -0800 206) raw_spin_lock(&fbc->lock);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 207) pcount = per_cpu_ptr(fbc->counters, cpu);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 208) fbc->count += *pcount;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 209) *pcount = 0;
aaf0f2fa68286 (Eric Dumazet 2017-01-20 06:34:22 -0800 210) raw_spin_unlock(&fbc->lock);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 211) }
ebd8fef304f99 (Tejun Heo 2014-09-08 09:51:29 +0900 212) spin_unlock_irq(&percpu_counters_lock);
179f7ebff6be4 (Eric Dumazet 2009-01-06 14:41:04 -0800 213) #endif
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 214) return 0;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 215) }
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 216)
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 217) /*
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 218) * Compare counter against given value.
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 219) * Return 1 if greater, 0 if equal and -1 if less
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 220) */
80188b0d77d74 (Dave Chinner 2015-05-29 07:39:34 +1000 221) int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 222) {
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 223) s64 count;
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 224)
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 225) count = percpu_counter_read(fbc);
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 226) /* Check to see if rough count will be sufficient for comparison */
80188b0d77d74 (Dave Chinner 2015-05-29 07:39:34 +1000 227) if (abs(count - rhs) > (batch * num_online_cpus())) {
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 228) if (count > rhs)
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 229) return 1;
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 230) else
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 231) return -1;
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 232) }
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 233) /* Need to use precise count */
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 234) count = percpu_counter_sum(fbc);
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 235) if (count > rhs)
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 236) return 1;
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 237) else if (count < rhs)
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 238) return -1;
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 239) else
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 240) return 0;
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 241) }
80188b0d77d74 (Dave Chinner 2015-05-29 07:39:34 +1000 242) EXPORT_SYMBOL(__percpu_counter_compare);
27f5e0f694fd0 (Tim Chen 2010-08-09 17:19:04 -0700 243)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 244) static int __init percpu_counter_startup(void)
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 245) {
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 246) int ret;
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 247)
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 248) ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 249) compute_batch_value, NULL);
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 250) WARN_ON(ret < 0);
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 251) ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 252) "lib/percpu_cnt:dead", NULL,
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 253) percpu_counter_cpu_dead);
5588f5afb4cfc (Sebastian Andrzej Siewior 2016-11-03 15:50:00 +0100 254) WARN_ON(ret < 0);
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 255) return 0;
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 256) }
c67ad917cbf21 (Andrew Morton 2007-07-15 23:39:51 -0700 257) module_init(percpu_counter_startup);