aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 12:58:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 12:58:24 -0500
commitd0316554d3586cbea60592a41391b5def2553d6f (patch)
tree5e7418f0bacbc68cec5dfd1541e03eb56870aa02 /include
parentfb0bbb92d42d5bd0ab224605444efdfed06d6934 (diff)
parent51e99be00ce2713cbb841cedc997cafa6e26c7f4 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits) m68k: rename global variable vmalloc_end to m68k_vmalloc_end percpu: add missing per_cpu_ptr_to_phys() definition for UP percpu: Fix kdump failure if booted with percpu_alloc=page percpu: make misc percpu symbols unique percpu: make percpu symbols in ia64 unique percpu: make percpu symbols in powerpc unique percpu: make percpu symbols in x86 unique percpu: make percpu symbols in xen unique percpu: make percpu symbols in cpufreq unique percpu: make percpu symbols in oprofile unique percpu: make percpu symbols in tracer unique percpu: make percpu symbols under kernel/ and mm/ unique percpu: remove some sparse warnings percpu: make alloc_percpu() handle array types vmalloc: fix use of non-existent percpu variable in put_cpu_var() this_cpu: Use this_cpu_xx in trace_functions_graph.c this_cpu: Use this_cpu_xx for ftrace this_cpu: Use this_cpu_xx in nmi handling this_cpu: Use this_cpu operations in RCU this_cpu: Use this_cpu ops for VM statistics ... Fix up trivial (famous last words) global per-cpu naming conflicts in arch/x86/kvm/svm.c mm/slab.c
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/vmstat.h10
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/snmp.h50
7 files changed, 439 insertions, 72 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1c..8087b90d4673 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
56#define __raw_get_cpu_var(var) \ 56#define __raw_get_cpu_var(var) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) 57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
58 58
59#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
61
59 62
60#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 63#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
61extern void setup_per_cpu_areas(void); 64extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
66#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 69#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
67#define __get_cpu_var(var) per_cpu_var(var) 70#define __get_cpu_var(var) per_cpu_var(var)
68#define __raw_get_cpu_var(var) per_cpu_var(var) 71#define __raw_get_cpu_var(var) per_cpu_var(var)
72#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
73#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
69 74
70#endif /* SMP */ 75#endif /* SMP */
71 76
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd4..5a5d6ce4bd55 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
60 60
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 61#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
63 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
64 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
65 __typeof__(type) per_cpu__##name 66 __typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..cf5efbcf716c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
34 34
35#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
36 36
37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38
39/* minimum unit size, also is the maximum supported allocation size */ 37/* minimum unit size, also is the maximum supported allocation size */
40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 38#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
41 39
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
130#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 128#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131 129
132extern void *__alloc_reserved_percpu(size_t size, size_t align); 130extern void *__alloc_reserved_percpu(size_t size, size_t align);
133
134#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
135
136struct percpu_data {
137 void *ptrs[1];
138};
139
140/* pointer disguising messes up the kmemleak objects tracking */
141#ifndef CONFIG_DEBUG_KMEMLEAK
142#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
143#else
144#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
145#endif
146
147#define per_cpu_ptr(ptr, cpu) \
148({ \
149 struct percpu_data *__p = __percpu_disguise(ptr); \
150 (__typeof__(ptr))__p->ptrs[(cpu)]; \
151})
152
153#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
154
155extern void *__alloc_percpu(size_t size, size_t align); 131extern void *__alloc_percpu(size_t size, size_t align);
156extern void free_percpu(void *__pdata); 132extern void free_percpu(void *__pdata);
133extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
157 134
158#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 135#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
159extern void __init setup_per_cpu_areas(void); 136extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
179 kfree(p); 156 kfree(p);
180} 157}
181 158
159static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
160{
161 return __pa(addr);
162}
163
182static inline void __init setup_per_cpu_areas(void) { } 164static inline void __init setup_per_cpu_areas(void) { }
183 165
184static inline void *pcpu_lpage_remapped(void *kaddr) 166static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
188 170
189#endif /* CONFIG_SMP */ 171#endif /* CONFIG_SMP */
190 172
191#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ 173#define alloc_percpu(type) \
192 __alignof__(type)) 174 (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
193 175
194/* 176/*
195 * Optional methods for optimized non-lvalue per-cpu variable access. 177 * Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
243# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) 225# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
244#endif 226#endif
245 227
228/*
229 * Branching function to split up a function into a set of functions that
230 * are called for different scalar sizes of the objects handled.
231 */
232
233extern void __bad_size_call_parameter(void);
234
235#define __pcpu_size_call_return(stem, variable) \
236({ typeof(variable) pscr_ret__; \
237 switch(sizeof(variable)) { \
238 case 1: pscr_ret__ = stem##1(variable);break; \
239 case 2: pscr_ret__ = stem##2(variable);break; \
240 case 4: pscr_ret__ = stem##4(variable);break; \
241 case 8: pscr_ret__ = stem##8(variable);break; \
242 default: \
243 __bad_size_call_parameter();break; \
244 } \
245 pscr_ret__; \
246})
247
248#define __pcpu_size_call(stem, variable, ...) \
249do { \
250 switch(sizeof(variable)) { \
251 case 1: stem##1(variable, __VA_ARGS__);break; \
252 case 2: stem##2(variable, __VA_ARGS__);break; \
253 case 4: stem##4(variable, __VA_ARGS__);break; \
254 case 8: stem##8(variable, __VA_ARGS__);break; \
255 default: \
256 __bad_size_call_parameter();break; \
257 } \
258} while (0)
259
260/*
261 * Optimized manipulation for memory allocated through the per cpu
262 * allocator or for addresses of per cpu variables (can be determined
263 * using per_cpu_var(xx).
264 *
265 * These operation guarantee exclusivity of access for other operations
266 * on the *same* processor. The assumption is that per cpu data is only
267 * accessed by a single processor instance (the current one).
268 *
269 * The first group is used for accesses that must be done in a
270 * preemption safe way since we know that the context is not preempt
271 * safe. Interrupts may occur. If the interrupt modifies the variable
272 * too then RMW actions will not be reliable.
273 *
274 * The arch code can provide optimized functions in two ways:
275 *
276 * 1. Override the function completely. F.e. define this_cpu_add().
277 * The arch must then ensure that the various scalar format passed
278 * are handled correctly.
279 *
280 * 2. Provide functions for certain scalar sizes. F.e. provide
281 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
282 * sized RMW actions. If arch code does not provide operations for
283 * a scalar size then the fallback in the generic code will be
284 * used.
285 */
286
287#define _this_cpu_generic_read(pcp) \
288({ typeof(pcp) ret__; \
289 preempt_disable(); \
290 ret__ = *this_cpu_ptr(&(pcp)); \
291 preempt_enable(); \
292 ret__; \
293})
294
295#ifndef this_cpu_read
296# ifndef this_cpu_read_1
297# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
298# endif
299# ifndef this_cpu_read_2
300# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
301# endif
302# ifndef this_cpu_read_4
303# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
304# endif
305# ifndef this_cpu_read_8
306# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
307# endif
308# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
309#endif
310
311#define _this_cpu_generic_to_op(pcp, val, op) \
312do { \
313 preempt_disable(); \
314 *__this_cpu_ptr(&pcp) op val; \
315 preempt_enable(); \
316} while (0)
317
318#ifndef this_cpu_write
319# ifndef this_cpu_write_1
320# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
321# endif
322# ifndef this_cpu_write_2
323# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
324# endif
325# ifndef this_cpu_write_4
326# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
327# endif
328# ifndef this_cpu_write_8
329# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
330# endif
331# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
332#endif
333
334#ifndef this_cpu_add
335# ifndef this_cpu_add_1
336# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
337# endif
338# ifndef this_cpu_add_2
339# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
340# endif
341# ifndef this_cpu_add_4
342# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
343# endif
344# ifndef this_cpu_add_8
345# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
346# endif
347# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
348#endif
349
350#ifndef this_cpu_sub
351# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
352#endif
353
354#ifndef this_cpu_inc
355# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
356#endif
357
358#ifndef this_cpu_dec
359# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
360#endif
361
362#ifndef this_cpu_and
363# ifndef this_cpu_and_1
364# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
365# endif
366# ifndef this_cpu_and_2
367# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
368# endif
369# ifndef this_cpu_and_4
370# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
371# endif
372# ifndef this_cpu_and_8
373# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
374# endif
375# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
376#endif
377
378#ifndef this_cpu_or
379# ifndef this_cpu_or_1
380# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
381# endif
382# ifndef this_cpu_or_2
383# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
384# endif
385# ifndef this_cpu_or_4
386# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
387# endif
388# ifndef this_cpu_or_8
389# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
390# endif
391# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
392#endif
393
394#ifndef this_cpu_xor
395# ifndef this_cpu_xor_1
396# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
397# endif
398# ifndef this_cpu_xor_2
399# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
400# endif
401# ifndef this_cpu_xor_4
402# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
403# endif
404# ifndef this_cpu_xor_8
405# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
406# endif
407# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
408#endif
409
410/*
411 * Generic percpu operations that do not require preemption handling.
412 * Either we do not care about races or the caller has the
413 * responsibility of handling preemptions issues. Arch code can still
414 * override these instructions since the arch per cpu code may be more
415 * efficient and may actually get race freeness for free (that is the
416 * case for x86 for example).
417 *
418 * If there is no other protection through preempt disable and/or
419 * disabling interupts then one of these RMW operations can show unexpected
420 * behavior because the execution thread was rescheduled on another processor
421 * or an interrupt occurred and the same percpu variable was modified from
422 * the interrupt context.
423 */
424#ifndef __this_cpu_read
425# ifndef __this_cpu_read_1
426# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
427# endif
428# ifndef __this_cpu_read_2
429# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
430# endif
431# ifndef __this_cpu_read_4
432# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
433# endif
434# ifndef __this_cpu_read_8
435# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
436# endif
437# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
438#endif
439
440#define __this_cpu_generic_to_op(pcp, val, op) \
441do { \
442 *__this_cpu_ptr(&(pcp)) op val; \
443} while (0)
444
445#ifndef __this_cpu_write
446# ifndef __this_cpu_write_1
447# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
448# endif
449# ifndef __this_cpu_write_2
450# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
451# endif
452# ifndef __this_cpu_write_4
453# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
454# endif
455# ifndef __this_cpu_write_8
456# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
457# endif
458# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
459#endif
460
461#ifndef __this_cpu_add
462# ifndef __this_cpu_add_1
463# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
464# endif
465# ifndef __this_cpu_add_2
466# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
467# endif
468# ifndef __this_cpu_add_4
469# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
470# endif
471# ifndef __this_cpu_add_8
472# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
473# endif
474# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
475#endif
476
477#ifndef __this_cpu_sub
478# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
479#endif
480
481#ifndef __this_cpu_inc
482# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
483#endif
484
485#ifndef __this_cpu_dec
486# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
487#endif
488
489#ifndef __this_cpu_and
490# ifndef __this_cpu_and_1
491# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
492# endif
493# ifndef __this_cpu_and_2
494# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
495# endif
496# ifndef __this_cpu_and_4
497# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
498# endif
499# ifndef __this_cpu_and_8
500# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
501# endif
502# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
503#endif
504
505#ifndef __this_cpu_or
506# ifndef __this_cpu_or_1
507# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
508# endif
509# ifndef __this_cpu_or_2
510# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
511# endif
512# ifndef __this_cpu_or_4
513# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
514# endif
515# ifndef __this_cpu_or_8
516# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
517# endif
518# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
519#endif
520
521#ifndef __this_cpu_xor
522# ifndef __this_cpu_xor_1
523# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
524# endif
525# ifndef __this_cpu_xor_2
526# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
527# endif
528# ifndef __this_cpu_xor_4
529# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
530# endif
531# ifndef __this_cpu_xor_8
532# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
533# endif
534# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
535#endif
536
537/*
538 * IRQ safe versions of the per cpu RMW operations. Note that these operations
539 * are *not* safe against modification of the same variable from another
540 * processors (which one gets when using regular atomic operations)
541 . They are guaranteed to be atomic vs. local interrupts and
542 * preemption only.
543 */
544#define irqsafe_cpu_generic_to_op(pcp, val, op) \
545do { \
546 unsigned long flags; \
547 local_irq_save(flags); \
548 *__this_cpu_ptr(&(pcp)) op val; \
549 local_irq_restore(flags); \
550} while (0)
551
552#ifndef irqsafe_cpu_add
553# ifndef irqsafe_cpu_add_1
554# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
555# endif
556# ifndef irqsafe_cpu_add_2
557# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
558# endif
559# ifndef irqsafe_cpu_add_4
560# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
561# endif
562# ifndef irqsafe_cpu_add_8
563# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
564# endif
565# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
566#endif
567
568#ifndef irqsafe_cpu_sub
569# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
570#endif
571
572#ifndef irqsafe_cpu_inc
573# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
574#endif
575
576#ifndef irqsafe_cpu_dec
577# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
578#endif
579
580#ifndef irqsafe_cpu_and
581# ifndef irqsafe_cpu_and_1
582# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
583# endif
584# ifndef irqsafe_cpu_and_2
585# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
586# endif
587# ifndef irqsafe_cpu_and_4
588# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
589# endif
590# ifndef irqsafe_cpu_and_8
591# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
592# endif
593# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
594#endif
595
596#ifndef irqsafe_cpu_or
597# ifndef irqsafe_cpu_or_1
598# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
599# endif
600# ifndef irqsafe_cpu_or_2
601# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
602# endif
603# ifndef irqsafe_cpu_or_4
604# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
605# endif
606# ifndef irqsafe_cpu_or_8
607# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
608# endif
609# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
610#endif
611
612#ifndef irqsafe_cpu_xor
613# ifndef irqsafe_cpu_xor_1
614# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
615# endif
616# ifndef irqsafe_cpu_xor_2
617# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
618# endif
619# ifndef irqsafe_cpu_xor_4
620# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
621# endif
622# ifndef irqsafe_cpu_xor_8
623# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
624# endif
625# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
626#endif
627
246#endif /* __LINUX_PERCPU_H */ 628#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a8..d85889710f9b 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
76 76
77static inline void __count_vm_event(enum vm_event_item item) 77static inline void __count_vm_event(enum vm_event_item item)
78{ 78{
79 __get_cpu_var(vm_event_states).event[item]++; 79 __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
80} 80}
81 81
82static inline void count_vm_event(enum vm_event_item item) 82static inline void count_vm_event(enum vm_event_item item)
83{ 83{
84 get_cpu_var(vm_event_states).event[item]++; 84 this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
85 put_cpu();
86} 85}
87 86
88static inline void __count_vm_events(enum vm_event_item item, long delta) 87static inline void __count_vm_events(enum vm_event_item item, long delta)
89{ 88{
90 __get_cpu_var(vm_event_states).event[item] += delta; 89 __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
91} 90}
92 91
93static inline void count_vm_events(enum vm_event_item item, long delta) 92static inline void count_vm_events(enum vm_event_item item, long delta)
94{ 93{
95 get_cpu_var(vm_event_states).event[item] += delta; 94 this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
96 put_cpu();
97} 95}
98 96
99extern void all_vm_events(unsigned long *); 97extern void all_vm_events(unsigned long *);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0302f31a2fb7..b0173202cad9 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -88,12 +88,7 @@ struct neigh_statistics {
88 unsigned long unres_discards; /* number of unresolved drops */ 88 unsigned long unres_discards; /* number of unresolved drops */
89}; 89};
90 90
91#define NEIGH_CACHE_STAT_INC(tbl, field) \ 91#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
92 do { \
93 preempt_disable(); \
94 (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
95 preempt_enable(); \
96 } while (0)
97 92
98struct neighbour { 93struct neighbour {
99 struct neighbour *next; 94 struct neighbour *next;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5cf7270e3ffc..a0904adfb8f7 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
293extern unsigned int nf_conntrack_max; 293extern unsigned int nf_conntrack_max;
294 294
295#define NF_CT_STAT_INC(net, count) \ 295#define NF_CT_STAT_INC(net, count) \
296 (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++) 296 __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) \ 297#define NF_CT_STAT_INC_ATOMIC(net, count) \
298do { \ 298do { \
299 local_bh_disable(); \ 299 local_bh_disable(); \
300 per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \ 300 __this_cpu_inc((net)->ct.stat->count); \
301 local_bh_enable(); \ 301 local_bh_enable(); \
302} while (0) 302} while (0)
303 303
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8c842e06bec8..f0d756f2ac99 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -136,45 +136,31 @@ struct linux_xfrm_mib {
136#define SNMP_STAT_BHPTR(name) (name[0]) 136#define SNMP_STAT_BHPTR(name) (name[0])
137#define SNMP_STAT_USRPTR(name) (name[1]) 137#define SNMP_STAT_USRPTR(name) (name[1])
138 138
139#define SNMP_INC_STATS_BH(mib, field) \ 139#define SNMP_INC_STATS_BH(mib, field) \
140 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) 140 __this_cpu_inc(mib[0]->mibs[field])
141#define SNMP_INC_STATS_USER(mib, field) \ 141#define SNMP_INC_STATS_USER(mib, field) \
142 do { \ 142 this_cpu_inc(mib[1]->mibs[field])
143 per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ 143#define SNMP_INC_STATS(mib, field) \
144 put_cpu(); \ 144 this_cpu_inc(mib[!in_softirq()]->mibs[field])
145 } while (0) 145#define SNMP_DEC_STATS(mib, field) \
146#define SNMP_INC_STATS(mib, field) \ 146 this_cpu_dec(mib[!in_softirq()]->mibs[field])
147 do { \ 147#define SNMP_ADD_STATS_BH(mib, field, addend) \
148 per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ 148 __this_cpu_add(mib[0]->mibs[field], addend)
149 put_cpu(); \ 149#define SNMP_ADD_STATS_USER(mib, field, addend) \
150 } while (0) 150 this_cpu_add(mib[1]->mibs[field], addend)
151#define SNMP_DEC_STATS(mib, field) \
152 do { \
153 per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
154 put_cpu(); \
155 } while (0)
156#define SNMP_ADD_STATS(mib, field, addend) \
157 do { \
158 per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
159 put_cpu(); \
160 } while (0)
161#define SNMP_ADD_STATS_BH(mib, field, addend) \
162 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
163#define SNMP_ADD_STATS_USER(mib, field, addend) \
164 do { \
165 per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
166 put_cpu(); \
167 } while (0)
168#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 151#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
169 do { \ 152 do { \
170 __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\ 153 __typeof__(mib[0]) ptr; \
154 preempt_disable(); \
155 ptr = this_cpu_ptr((mib)[!in_softirq()]); \
171 ptr->mibs[basefield##PKTS]++; \ 156 ptr->mibs[basefield##PKTS]++; \
172 ptr->mibs[basefield##OCTETS] += addend;\ 157 ptr->mibs[basefield##OCTETS] += addend;\
173 put_cpu(); \ 158 preempt_enable(); \
174 } while (0) 159 } while (0)
175#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 160#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
176 do { \ 161 do { \
177 __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\ 162 __typeof__(mib[0]) ptr = \
163 __this_cpu_ptr((mib)[!in_softirq()]); \
178 ptr->mibs[basefield##PKTS]++; \ 164 ptr->mibs[basefield##PKTS]++; \
179 ptr->mibs[basefield##OCTETS] += addend;\ 165 ptr->mibs[basefield##OCTETS] += addend;\
180 } while (0) 166 } while (0)