aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-03 10:34:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-03 10:34:18 -0500
commit0a135ba14d71fb84c691a5386aff5049691fe6d7 (patch)
treeadb1de887dd6839d69d2fc16ffa2a10ff63298fa /kernel
parent4850f524b2c4c8a4e9f8ef4dd9c7c4afde2f2b2c (diff)
parenta29d8b8e2d811a24bbe49215a0f0c536b72ebc18 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: add __percpu sparse annotations to what's left percpu: add __percpu sparse annotations to fs percpu: add __percpu sparse annotations to core kernel subsystems local_t: Remove leftover local.h this_cpu: Remove pageset_notifier this_cpu: Page allocator conversion percpu, x86: Generic inc / dec percpu instructions local_t: Move local.h include to ringbuffer.c and ring_buffer_benchmark.c module: Use this_cpu_xx to dynamically allocate counters local_t: Remove cpu_local_xx macros percpu: refactor the code in pcpu_[de]populate_chunk() percpu: remove compile warnings caused by __verify_pcpu_ptr() percpu: make accessors check for percpu pointer in sparse percpu: add __percpu for sparse. percpu: make access macros universal percpu: remove per_cpu__ prefix.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/module.c29
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/ring_buffer_benchmark.c1
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_functions_graph.c4
9 files changed, 30 insertions, 27 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ef077fb73155..87ebe8adc474 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -41,7 +41,7 @@
41#include <asm/sections.h> 41#include <asm/sections.h>
42 42
43/* Per cpu memory for storing cpu states in case of system crash. */ 43/* Per cpu memory for storing cpu states in case of system crash. */
44note_buf_t* crash_notes; 44note_buf_t __percpu *crash_notes;
45 45
46/* vmcoreinfo stuff */ 46/* vmcoreinfo stuff */
47static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; 47static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
diff --git a/kernel/module.c b/kernel/module.c
index f82386bd9ee9..e5538d5f00ad 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod)
474 474
475 INIT_LIST_HEAD(&mod->modules_which_use_me); 475 INIT_LIST_HEAD(&mod->modules_which_use_me);
476 for_each_possible_cpu(cpu) 476 for_each_possible_cpu(cpu)
477 local_set(__module_ref_addr(mod, cpu), 0); 477 per_cpu_ptr(mod->refptr, cpu)->count = 0;
478
478 /* Hold reference count during initialization. */ 479 /* Hold reference count during initialization. */
479 local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); 480 __this_cpu_write(mod->refptr->count, 1);
480 /* Backwards compatibility macros put refcount during init. */ 481 /* Backwards compatibility macros put refcount during init. */
481 mod->waiter = current; 482 mod->waiter = current;
482} 483}
@@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod)
619 int cpu; 620 int cpu;
620 621
621 for_each_possible_cpu(cpu) 622 for_each_possible_cpu(cpu)
622 total += local_read(__module_ref_addr(mod, cpu)); 623 total += per_cpu_ptr(mod->refptr, cpu)->count;
623 return total; 624 return total;
624} 625}
625EXPORT_SYMBOL(module_refcount); 626EXPORT_SYMBOL(module_refcount);
@@ -796,14 +797,15 @@ static struct module_attribute refcnt = {
796void module_put(struct module *module) 797void module_put(struct module *module)
797{ 798{
798 if (module) { 799 if (module) {
799 unsigned int cpu = get_cpu(); 800 preempt_disable();
800 local_dec(__module_ref_addr(module, cpu)); 801 __this_cpu_dec(module->refptr->count);
802
801 trace_module_put(module, _RET_IP_, 803 trace_module_put(module, _RET_IP_,
802 local_read(__module_ref_addr(module, cpu))); 804 __this_cpu_read(module->refptr->count));
803 /* Maybe they're waiting for us to drop reference? */ 805 /* Maybe they're waiting for us to drop reference? */
804 if (unlikely(!module_is_live(module))) 806 if (unlikely(!module_is_live(module)))
805 wake_up_process(module->waiter); 807 wake_up_process(module->waiter);
806 put_cpu(); 808 preempt_enable();
807 } 809 }
808} 810}
809EXPORT_SYMBOL(module_put); 811EXPORT_SYMBOL(module_put);
@@ -1397,9 +1399,9 @@ static void free_module(struct module *mod)
1397 kfree(mod->args); 1399 kfree(mod->args);
1398 if (mod->percpu) 1400 if (mod->percpu)
1399 percpu_modfree(mod->percpu); 1401 percpu_modfree(mod->percpu);
1400#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 1402#if defined(CONFIG_MODULE_UNLOAD)
1401 if (mod->refptr) 1403 if (mod->refptr)
1402 percpu_modfree(mod->refptr); 1404 free_percpu(mod->refptr);
1403#endif 1405#endif
1404 /* Free lock-classes: */ 1406 /* Free lock-classes: */
1405 lockdep_free_key_range(mod->module_core, mod->core_size); 1407 lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -2162,9 +2164,8 @@ static noinline struct module *load_module(void __user *umod,
2162 mod = (void *)sechdrs[modindex].sh_addr; 2164 mod = (void *)sechdrs[modindex].sh_addr;
2163 kmemleak_load_module(mod, hdr, sechdrs, secstrings); 2165 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2164 2166
2165#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2167#if defined(CONFIG_MODULE_UNLOAD)
2166 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2168 mod->refptr = alloc_percpu(struct module_ref);
2167 mod->name);
2168 if (!mod->refptr) { 2169 if (!mod->refptr) {
2169 err = -ENOMEM; 2170 err = -ENOMEM;
2170 goto free_init; 2171 goto free_init;
@@ -2396,8 +2397,8 @@ static noinline struct module *load_module(void __user *umod,
2396 kobject_put(&mod->mkobj.kobj); 2397 kobject_put(&mod->mkobj.kobj);
2397 free_unload: 2398 free_unload:
2398 module_unload_free(mod); 2399 module_unload_free(mod);
2399#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2400#if defined(CONFIG_MODULE_UNLOAD)
2400 percpu_modfree(mod->refptr); 2401 free_percpu(mod->refptr);
2401 free_init: 2402 free_init:
2402#endif 2403#endif
2403 module_free(mod, mod->module_init); 2404 module_free(mod, mod->module_init);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 258cdf0a91eb..58df55bf83ed 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -818,13 +818,13 @@ static void rcu_torture_timer(unsigned long unused)
818 /* Should not happen, but... */ 818 /* Should not happen, but... */
819 pipe_count = RCU_TORTURE_PIPE_LEN; 819 pipe_count = RCU_TORTURE_PIPE_LEN;
820 } 820 }
821 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 821 __this_cpu_inc(rcu_torture_count[pipe_count]);
822 completed = cur_ops->completed() - completed; 822 completed = cur_ops->completed() - completed;
823 if (completed > RCU_TORTURE_PIPE_LEN) { 823 if (completed > RCU_TORTURE_PIPE_LEN) {
824 /* Should not happen, but... */ 824 /* Should not happen, but... */
825 completed = RCU_TORTURE_PIPE_LEN; 825 completed = RCU_TORTURE_PIPE_LEN;
826 } 826 }
827 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 827 __this_cpu_inc(rcu_torture_batch[completed]);
828 preempt_enable(); 828 preempt_enable();
829 cur_ops->readunlock(idx); 829 cur_ops->readunlock(idx);
830} 830}
@@ -877,13 +877,13 @@ rcu_torture_reader(void *arg)
877 /* Should not happen, but... */ 877 /* Should not happen, but... */
878 pipe_count = RCU_TORTURE_PIPE_LEN; 878 pipe_count = RCU_TORTURE_PIPE_LEN;
879 } 879 }
880 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 880 __this_cpu_inc(rcu_torture_count[pipe_count]);
881 completed = cur_ops->completed() - completed; 881 completed = cur_ops->completed() - completed;
882 if (completed > RCU_TORTURE_PIPE_LEN) { 882 if (completed > RCU_TORTURE_PIPE_LEN) {
883 /* Should not happen, but... */ 883 /* Should not happen, but... */
884 completed = RCU_TORTURE_PIPE_LEN; 884 completed = RCU_TORTURE_PIPE_LEN;
885 } 885 }
886 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 886 __this_cpu_inc(rcu_torture_batch[completed]);
887 preempt_enable(); 887 preempt_enable();
888 cur_ops->readunlock(idx); 888 cur_ops->readunlock(idx);
889 schedule(); 889 schedule();
diff --git a/kernel/sched.c b/kernel/sched.c
index 6a212c97f523..abb36b16b93b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1521,7 +1521,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1521 1521
1522#ifdef CONFIG_FAIR_GROUP_SCHED 1522#ifdef CONFIG_FAIR_GROUP_SCHED
1523 1523
1524static __read_mostly unsigned long *update_shares_data; 1524static __read_mostly unsigned long __percpu *update_shares_data;
1525 1525
1526static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1526static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1527 1527
@@ -8813,7 +8813,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
8813struct cpuacct { 8813struct cpuacct {
8814 struct cgroup_subsys_state css; 8814 struct cgroup_subsys_state css;
8815 /* cpuusage holds pointer to a u64-type object on every cpu */ 8815 /* cpuusage holds pointer to a u64-type object on every cpu */
8816 u64 *cpuusage; 8816 u64 __percpu *cpuusage;
8817 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; 8817 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
8818 struct cpuacct *parent; 8818 struct cpuacct *parent;
8819}; 8819};
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 912823e2a11b..9bb9fb1bd79c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -45,7 +45,7 @@ static int refcount;
45static struct workqueue_struct *stop_machine_wq; 45static struct workqueue_struct *stop_machine_wq;
46static struct stop_machine_data active, idle; 46static struct stop_machine_data active, idle;
47static const struct cpumask *active_cpus; 47static const struct cpumask *active_cpus;
48static void *stop_machine_work; 48static void __percpu *stop_machine_work;
49 49
50static void set_state(enum stopmachine_state newstate) 50static void set_state(enum stopmachine_state newstate)
51{ 51{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8c1b2d290718..0287f9f52f5a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22 22
23#include <asm/local.h>
23#include "trace.h" 24#include "trace.h"
24 25
25/* 26/*
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index b2477caf09c2..df74c7982255 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -8,6 +8,7 @@
8#include <linux/kthread.h> 8#include <linux/kthread.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <asm/local.h>
11 12
12struct rb_page { 13struct rb_page {
13 u64 ts; 14 u64 ts;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 032c57ca6502..ed01fdba4a55 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
92static inline void ftrace_disable_cpu(void) 92static inline void ftrace_disable_cpu(void)
93{ 93{
94 preempt_disable(); 94 preempt_disable();
95 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); 95 __this_cpu_inc(ftrace_cpu_disabled);
96} 96}
97 97
98static inline void ftrace_enable_cpu(void) 98static inline void ftrace_enable_cpu(void)
99{ 99{
100 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); 100 __this_cpu_dec(ftrace_cpu_disabled);
101 preempt_enable(); 101 preempt_enable();
102} 102}
103 103
@@ -1166,7 +1166,7 @@ trace_function(struct trace_array *tr,
1166 struct ftrace_entry *entry; 1166 struct ftrace_entry *entry;
1167 1167
1168 /* If we are reading the ring buffer, don't trace */ 1168 /* If we are reading the ring buffer, don't trace */
1169 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 1169 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1170 return; 1170 return;
1171 1171
1172 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1172 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e998a824e9db..3fc2a575664f 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -188,7 +188,7 @@ static int __trace_graph_entry(struct trace_array *tr,
188 struct ring_buffer *buffer = tr->buffer; 188 struct ring_buffer *buffer = tr->buffer;
189 struct ftrace_graph_ent_entry *entry; 189 struct ftrace_graph_ent_entry *entry;
190 190
191 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 191 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
192 return 0; 192 return 0;
193 193
194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 194 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -247,7 +247,7 @@ static void __trace_graph_return(struct trace_array *tr,
247 struct ring_buffer *buffer = tr->buffer; 247 struct ring_buffer *buffer = tr->buffer;
248 struct ftrace_graph_ret_entry *entry; 248 struct ftrace_graph_ret_entry *entry;
249 249
250 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 250 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
251 return; 251 return;
252 252
253 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 253 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,