aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-10-29 09:34:15 -0400
committerTejun Heo <tj@kernel.org>2009-10-29 09:34:15 -0400
commite0fdb0e050eae331046385643618f12452aa7e73 (patch)
tree6156f577879764fd75cf8c46ca7980433de0e59d /include
parentf7b64fe806029e0a0454df132eec3c5ab576102c (diff)
percpu: add __percpu for sparse.
We have to make __kernel "__attribute__((address_space(0)))" so we can cast to it. tj: * put_cpu_var() update. * Annotations added to dynamic allocator interface. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/percpu-defs.h2
-rw-r--r--include/linux/percpu.h18
4 files changed, 18 insertions, 10 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index ca6f0491412b..fded453fd25c 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -41,7 +41,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
41 * Only S390 provides its own means of moving the pointer. 41 * Only S390 provides its own means of moving the pointer.
42 */ 42 */
43#ifndef SHIFT_PERCPU_PTR 43#ifndef SHIFT_PERCPU_PTR
44#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) 44/* Weird cast keeps both GCC and sparse happy. */
45#define SHIFT_PERCPU_PTR(__p, __offset) \
46 RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
45#endif 47#endif
46 48
47/* 49/*
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04fb5135b4e1..abba8045c6ef 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,7 +5,7 @@
5 5
6#ifdef __CHECKER__ 6#ifdef __CHECKER__
7# define __user __attribute__((noderef, address_space(1))) 7# define __user __attribute__((noderef, address_space(1)))
8# define __kernel /* default address space */ 8# define __kernel __attribute__((address_space(0)))
9# define __safe __attribute__((safe)) 9# define __safe __attribute__((safe))
10# define __force __attribute__((force)) 10# define __force __attribute__((force))
11# define __nocast __attribute__((nocast)) 11# define __nocast __attribute__((nocast))
@@ -15,6 +15,7 @@
15# define __acquire(x) __context__(x,1) 15# define __acquire(x) __context__(x,1)
16# define __release(x) __context__(x,-1) 16# define __release(x) __context__(x,-1)
17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
18# define __percpu __attribute__((noderef, address_space(3)))
18extern void __chk_user_ptr(const volatile void __user *); 19extern void __chk_user_ptr(const volatile void __user *);
19extern void __chk_io_ptr(const volatile void __iomem *); 20extern void __chk_io_ptr(const volatile void __iomem *);
20#else 21#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
32# define __acquire(x) (void)0 33# define __acquire(x) (void)0
33# define __release(x) (void)0 34# define __release(x) (void)0
34# define __cond_lock(x,c) (c) 35# define __cond_lock(x,c) (c)
36# define __percpu
35#endif 37#endif
36 38
37#ifdef __KERNEL__ 39#ifdef __KERNEL__
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index ee99f6c2cdcd..0fa0cb524250 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -12,7 +12,7 @@
12 * that section. 12 * that section.
13 */ 13 */
14#define __PCPU_ATTRS(sec) \ 14#define __PCPU_ATTRS(sec) \
15 __attribute__((section(PER_CPU_BASE_SECTION sec))) \ 15 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
16 PER_CPU_ATTRIBUTES 16 PER_CPU_ATTRIBUTES
17 17
18#define __PCPU_DUMMY_ATTRS \ 18#define __PCPU_DUMMY_ATTRS \
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index f965f833a643..2c0d31a3f6b6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -30,8 +30,12 @@
30 preempt_disable(); \ 30 preempt_disable(); \
31 &__get_cpu_var(var); })) 31 &__get_cpu_var(var); }))
32 32
33/*
34 * The weird & is necessary because sparse considers (void)(var) to be
35 * a direct dereference of percpu variable (var).
36 */
33#define put_cpu_var(var) do { \ 37#define put_cpu_var(var) do { \
34 (void)(var); \ 38 (void)&(var); \
35 preempt_enable(); \ 39 preempt_enable(); \
36} while (0) 40} while (0)
37 41
@@ -130,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
130 */ 134 */
131#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 135#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
132 136
133extern void *__alloc_reserved_percpu(size_t size, size_t align); 137extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
134extern void *__alloc_percpu(size_t size, size_t align); 138extern void __percpu *__alloc_percpu(size_t size, size_t align);
135extern void free_percpu(void *__pdata); 139extern void free_percpu(void __percpu *__pdata);
136 140
137#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 141#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
138extern void __init setup_per_cpu_areas(void); 142extern void __init setup_per_cpu_areas(void);
@@ -142,7 +146,7 @@ extern void __init setup_per_cpu_areas(void);
142 146
143#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 147#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
144 148
145static inline void *__alloc_percpu(size_t size, size_t align) 149static inline void __percpu *__alloc_percpu(size_t size, size_t align)
146{ 150{
147 /* 151 /*
148 * Can't easily make larger alignment work with kmalloc. WARN 152 * Can't easily make larger alignment work with kmalloc. WARN
@@ -153,7 +157,7 @@ static inline void *__alloc_percpu(size_t size, size_t align)
153 return kzalloc(size, GFP_KERNEL); 157 return kzalloc(size, GFP_KERNEL);
154} 158}
155 159
156static inline void free_percpu(void *p) 160static inline void free_percpu(void __percpu *p)
157{ 161{
158 kfree(p); 162 kfree(p);
159} 163}
@@ -168,7 +172,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
168#endif /* CONFIG_SMP */ 172#endif /* CONFIG_SMP */
169 173
170#define alloc_percpu(type) \ 174#define alloc_percpu(type) \
171 (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) 175 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
172 176
173/* 177/*
174 * Optional methods for optimized non-lvalue per-cpu variable access. 178 * Optional methods for optimized non-lvalue per-cpu variable access.