aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-04-07 18:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:36:14 -0400
commit188a81409ff7de1c5aae947a96356ddd8ff4aaa3 (patch)
tree7f2780e62f8a3b33c3cc6ef8b68ee05790909a8d
parent293b6a4c875c3b49853bff7de99954f49f59aa75 (diff)
percpu: add preemption checks to __this_cpu ops
We define a check function in order to avoid trouble with the include files. Then the higher level __this_cpu macros are modified to invoke the preemption check. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Christoph Lameter <cl@linux.com> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Tejun Heo <tj@kernel.org> Tested-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/percpu.h39
-rw-r--r--lib/smp_processor_id.c18
2 files changed, 43 insertions, 14 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4e4d2afcc0c7..e7a0b95ed527 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -173,6 +173,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
173 173
174extern void __bad_size_call_parameter(void); 174extern void __bad_size_call_parameter(void);
175 175
176#ifdef CONFIG_DEBUG_PREEMPT
177extern void __this_cpu_preempt_check(const char *op);
178#else
179static inline void __this_cpu_preempt_check(const char *op) { }
180#endif
181
176#define __pcpu_size_call_return(stem, variable) \ 182#define __pcpu_size_call_return(stem, variable) \
177({ typeof(variable) pscr_ret__; \ 183({ typeof(variable) pscr_ret__; \
178 __verify_pcpu_ptr(&(variable)); \ 184 __verify_pcpu_ptr(&(variable)); \
@@ -725,18 +731,24 @@ do { \
725 731
726/* 732/*
727 * Generic percpu operations for context that are safe from preemption/interrupts. 733 * Generic percpu operations for context that are safe from preemption/interrupts.
728 * Checks will be added here soon.
729 */ 734 */
730#ifndef __this_cpu_read 735#ifndef __this_cpu_read
731# define __this_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) 736# define __this_cpu_read(pcp) \
737 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
732#endif 738#endif
733 739
734#ifndef __this_cpu_write 740#ifndef __this_cpu_write
735# define __this_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) 741# define __this_cpu_write(pcp, val) \
742do { __this_cpu_preempt_check("write"); \
743 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
744} while (0)
736#endif 745#endif
737 746
738#ifndef __this_cpu_add 747#ifndef __this_cpu_add
739# define __this_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) 748# define __this_cpu_add(pcp, val) \
749do { __this_cpu_preempt_check("add"); \
750 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
751} while (0)
740#endif 752#endif
741 753
742#ifndef __this_cpu_sub 754#ifndef __this_cpu_sub
@@ -752,16 +764,23 @@ do { \
752#endif 764#endif
753 765
754#ifndef __this_cpu_and 766#ifndef __this_cpu_and
755# define __this_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) 767# define __this_cpu_and(pcp, val) \
768do { __this_cpu_preempt_check("and"); \
769 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
770} while (0)
771
756#endif 772#endif
757 773
758#ifndef __this_cpu_or 774#ifndef __this_cpu_or
759# define __this_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) 775# define __this_cpu_or(pcp, val) \
776do { __this_cpu_preempt_check("or"); \
777 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
778} while (0)
760#endif 779#endif
761 780
762#ifndef __this_cpu_add_return 781#ifndef __this_cpu_add_return
763# define __this_cpu_add_return(pcp, val) \ 782# define __this_cpu_add_return(pcp, val) \
764 __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) 783 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
765#endif 784#endif
766 785
767#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) 786#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
@@ -770,17 +789,17 @@ do { \
770 789
771#ifndef __this_cpu_xchg 790#ifndef __this_cpu_xchg
772# define __this_cpu_xchg(pcp, nval) \ 791# define __this_cpu_xchg(pcp, nval) \
773 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) 792 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
774#endif 793#endif
775 794
776#ifndef __this_cpu_cmpxchg 795#ifndef __this_cpu_cmpxchg
777# define __this_cpu_cmpxchg(pcp, oval, nval) \ 796# define __this_cpu_cmpxchg(pcp, oval, nval) \
778 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) 797 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
779#endif 798#endif
780 799
781#ifndef __this_cpu_cmpxchg_double 800#ifndef __this_cpu_cmpxchg_double
782# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 801# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
783 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 802 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
784#endif 803#endif
785 804
786#endif /* __LINUX_PERCPU_H */ 805#endif /* __LINUX_PERCPU_H */
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 04abe53f12a1..1afec32de6f2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,7 +7,8 @@
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9 9
10notrace unsigned int debug_smp_processor_id(void) 10notrace static unsigned int check_preemption_disabled(const char *what1,
11 const char *what2)
11{ 12{
12 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
13 14
@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
38 if (!printk_ratelimit()) 39 if (!printk_ratelimit())
39 goto out_enable; 40 goto out_enable;
40 41
41 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " 42 printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
42 "code: %s/%d\n", 43 what1, what2, preempt_count() - 1, current->comm, current->pid);
43 preempt_count() - 1, current->comm, current->pid); 44
44 print_symbol("caller is %s\n", (long)__builtin_return_address(0)); 45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
45 dump_stack(); 46 dump_stack();
46 47
@@ -50,5 +51,14 @@ out:
50 return this_cpu; 51 return this_cpu;
51} 52}
52 53
54notrace unsigned int debug_smp_processor_id(void)
55{
56 return check_preemption_disabled("smp_processor_id", "");
57}
53EXPORT_SYMBOL(debug_smp_processor_id); 58EXPORT_SYMBOL(debug_smp_processor_id);
54 59
60notrace void __this_cpu_preempt_check(const char *op)
61{
62 check_preemption_disabled("__this_cpu_", op);
63}
64EXPORT_SYMBOL(__this_cpu_preempt_check);