diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 20:02:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 20:02:58 -0500 |
commit | 72eb6a791459c87a0340318840bb3bd9252b627b (patch) | |
tree | 3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /include | |
parent | 23d69b09b78c4876e134f104a3814c30747c53f1 (diff) | |
parent | 55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff) |
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits)
gameport: use this_cpu_read instead of lookup
x86: udelay: Use this_cpu_read to avoid address calculation
x86: Use this_cpu_inc_return for nmi counter
x86: Replace uses of current_cpu_data with this_cpu ops
x86: Use this_cpu_ops to optimize code
vmstat: User per cpu atomics to avoid interrupt disable / enable
irq_work: Use per cpu atomics instead of regular atomics
cpuops: Use cmpxchg for xchg to avoid lock semantics
x86: this_cpu_cmpxchg and this_cpu_xchg operations
percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
percpu,x86: relocate this_cpu_add_return() and friends
connector: Use this_cpu operations
xen: Use this_cpu_inc_return
taskstats: Use this_cpu_ops
random: Use this_cpu_inc_return
fs: Use this_cpu_inc_return in buffer.c
highmem: Use this_cpu_xx_return() operations
vmstat: Use this_cpu_inc_return for vm statistics
x86: Support for this_cpu_add, sub, dec, inc_return
percpu: Generic support for this_cpu_add, sub, dec, inc_return
...
Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c}
as per Tejun.
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/irq_regs.h | 8 | ||||
-rw-r--r-- | include/linux/elevator.h | 12 | ||||
-rw-r--r-- | include/linux/highmem.h | 13 | ||||
-rw-r--r-- | include/linux/kernel_stat.h | 2 | ||||
-rw-r--r-- | include/linux/kprobes.h | 4 | ||||
-rw-r--r-- | include/linux/percpu.h | 205 |
6 files changed, 222 insertions, 22 deletions
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h index 5ae1d07d4a12..6bf9355fa7eb 100644 --- a/include/asm-generic/irq_regs.h +++ b/include/asm-generic/irq_regs.h | |||
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs); | |||
22 | 22 | ||
23 | static inline struct pt_regs *get_irq_regs(void) | 23 | static inline struct pt_regs *get_irq_regs(void) |
24 | { | 24 | { |
25 | return __get_cpu_var(__irq_regs); | 25 | return __this_cpu_read(__irq_regs); |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | 28 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) |
29 | { | 29 | { |
30 | struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs); | 30 | struct pt_regs *old_regs; |
31 | 31 | ||
32 | old_regs = *pp_regs; | 32 | old_regs = __this_cpu_read(__irq_regs); |
33 | *pp_regs = new_regs; | 33 | __this_cpu_write(__irq_regs, new_regs); |
34 | return old_regs; | 34 | return old_regs; |
35 | } | 35 | } |
36 | 36 | ||
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4fd978e7eb83..4d857973d2c9 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -195,15 +195,9 @@ enum { | |||
195 | /* | 195 | /* |
196 | * io context count accounting | 196 | * io context count accounting |
197 | */ | 197 | */ |
198 | #define elv_ioc_count_mod(name, __val) \ | 198 | #define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val) |
199 | do { \ | 199 | #define elv_ioc_count_inc(name) this_cpu_inc(name) |
200 | preempt_disable(); \ | 200 | #define elv_ioc_count_dec(name) this_cpu_dec(name) |
201 | __get_cpu_var(name) += (__val); \ | ||
202 | preempt_enable(); \ | ||
203 | } while (0) | ||
204 | |||
205 | #define elv_ioc_count_inc(name) elv_ioc_count_mod(name, 1) | ||
206 | #define elv_ioc_count_dec(name) elv_ioc_count_mod(name, -1) | ||
207 | 201 | ||
208 | #define elv_ioc_count_read(name) \ | 202 | #define elv_ioc_count_read(name) \ |
209 | ({ \ | 203 | ({ \ |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index b676c585574e..3a93f73a8acc 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -81,7 +81,8 @@ DECLARE_PER_CPU(int, __kmap_atomic_idx); | |||
81 | 81 | ||
82 | static inline int kmap_atomic_idx_push(void) | 82 | static inline int kmap_atomic_idx_push(void) |
83 | { | 83 | { |
84 | int idx = __get_cpu_var(__kmap_atomic_idx)++; | 84 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
85 | |||
85 | #ifdef CONFIG_DEBUG_HIGHMEM | 86 | #ifdef CONFIG_DEBUG_HIGHMEM |
86 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); | 87 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); |
87 | BUG_ON(idx > KM_TYPE_NR); | 88 | BUG_ON(idx > KM_TYPE_NR); |
@@ -91,16 +92,18 @@ static inline int kmap_atomic_idx_push(void) | |||
91 | 92 | ||
92 | static inline int kmap_atomic_idx(void) | 93 | static inline int kmap_atomic_idx(void) |
93 | { | 94 | { |
94 | return __get_cpu_var(__kmap_atomic_idx) - 1; | 95 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline int kmap_atomic_idx_pop(void) | 98 | static inline void kmap_atomic_idx_pop(void) |
98 | { | 99 | { |
99 | int idx = --__get_cpu_var(__kmap_atomic_idx); | ||
100 | #ifdef CONFIG_DEBUG_HIGHMEM | 100 | #ifdef CONFIG_DEBUG_HIGHMEM |
101 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); | ||
102 | |||
101 | BUG_ON(idx < 0); | 103 | BUG_ON(idx < 0); |
104 | #else | ||
105 | __this_cpu_dec(__kmap_atomic_idx); | ||
102 | #endif | 106 | #endif |
103 | return idx; | ||
104 | } | 107 | } |
105 | 108 | ||
106 | #endif | 109 | #endif |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index ad54c846911b..44e83ba12b5b 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void); | |||
47 | 47 | ||
48 | #ifndef CONFIG_GENERIC_HARDIRQS | 48 | #ifndef CONFIG_GENERIC_HARDIRQS |
49 | #define kstat_irqs_this_cpu(irq) \ | 49 | #define kstat_irqs_this_cpu(irq) \ |
50 | (kstat_this_cpu.irqs[irq]) | 50 | (this_cpu_read(kstat.irqs[irq]) |
51 | 51 | ||
52 | struct irq_desc; | 52 | struct irq_desc; |
53 | 53 | ||
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index b78edb58ee66..dd7c12e875bc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -305,12 +305,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); | |||
305 | /* kprobe_running() will just return the current_kprobe on this CPU */ | 305 | /* kprobe_running() will just return the current_kprobe on this CPU */ |
306 | static inline struct kprobe *kprobe_running(void) | 306 | static inline struct kprobe *kprobe_running(void) |
307 | { | 307 | { |
308 | return (__get_cpu_var(current_kprobe)); | 308 | return (__this_cpu_read(current_kprobe)); |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline void reset_current_kprobe(void) | 311 | static inline void reset_current_kprobe(void) |
312 | { | 312 | { |
313 | __get_cpu_var(current_kprobe) = NULL; | 313 | __this_cpu_write(current_kprobe, NULL); |
314 | } | 314 | } |
315 | 315 | ||
316 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) | 316 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5095b834a6fb..27c3c6fcfad3 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void); | |||
240 | pscr_ret__; \ | 240 | pscr_ret__; \ |
241 | }) | 241 | }) |
242 | 242 | ||
243 | #define __pcpu_size_call_return2(stem, variable, ...) \ | ||
244 | ({ \ | ||
245 | typeof(variable) pscr2_ret__; \ | ||
246 | __verify_pcpu_ptr(&(variable)); \ | ||
247 | switch(sizeof(variable)) { \ | ||
248 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | ||
249 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | ||
250 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | ||
251 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | ||
252 | default: \ | ||
253 | __bad_size_call_parameter(); break; \ | ||
254 | } \ | ||
255 | pscr2_ret__; \ | ||
256 | }) | ||
257 | |||
243 | #define __pcpu_size_call(stem, variable, ...) \ | 258 | #define __pcpu_size_call(stem, variable, ...) \ |
244 | do { \ | 259 | do { \ |
245 | __verify_pcpu_ptr(&(variable)); \ | 260 | __verify_pcpu_ptr(&(variable)); \ |
@@ -402,6 +417,89 @@ do { \ | |||
402 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | 417 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
403 | #endif | 418 | #endif |
404 | 419 | ||
420 | #define _this_cpu_generic_add_return(pcp, val) \ | ||
421 | ({ \ | ||
422 | typeof(pcp) ret__; \ | ||
423 | preempt_disable(); \ | ||
424 | __this_cpu_add(pcp, val); \ | ||
425 | ret__ = __this_cpu_read(pcp); \ | ||
426 | preempt_enable(); \ | ||
427 | ret__; \ | ||
428 | }) | ||
429 | |||
430 | #ifndef this_cpu_add_return | ||
431 | # ifndef this_cpu_add_return_1 | ||
432 | # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
433 | # endif | ||
434 | # ifndef this_cpu_add_return_2 | ||
435 | # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
436 | # endif | ||
437 | # ifndef this_cpu_add_return_4 | ||
438 | # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
439 | # endif | ||
440 | # ifndef this_cpu_add_return_8 | ||
441 | # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
442 | # endif | ||
443 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
444 | #endif | ||
445 | |||
446 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) | ||
447 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
448 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
449 | |||
450 | #define _this_cpu_generic_xchg(pcp, nval) \ | ||
451 | ({ typeof(pcp) ret__; \ | ||
452 | preempt_disable(); \ | ||
453 | ret__ = __this_cpu_read(pcp); \ | ||
454 | __this_cpu_write(pcp, nval); \ | ||
455 | preempt_enable(); \ | ||
456 | ret__; \ | ||
457 | }) | ||
458 | |||
459 | #ifndef this_cpu_xchg | ||
460 | # ifndef this_cpu_xchg_1 | ||
461 | # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
462 | # endif | ||
463 | # ifndef this_cpu_xchg_2 | ||
464 | # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
465 | # endif | ||
466 | # ifndef this_cpu_xchg_4 | ||
467 | # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
468 | # endif | ||
469 | # ifndef this_cpu_xchg_8 | ||
470 | # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
471 | # endif | ||
472 | # define this_cpu_xchg(pcp, nval) \ | ||
473 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | ||
474 | #endif | ||
475 | |||
476 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
477 | ({ typeof(pcp) ret__; \ | ||
478 | preempt_disable(); \ | ||
479 | ret__ = __this_cpu_read(pcp); \ | ||
480 | if (ret__ == (oval)) \ | ||
481 | __this_cpu_write(pcp, nval); \ | ||
482 | preempt_enable(); \ | ||
483 | ret__; \ | ||
484 | }) | ||
485 | |||
486 | #ifndef this_cpu_cmpxchg | ||
487 | # ifndef this_cpu_cmpxchg_1 | ||
488 | # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
489 | # endif | ||
490 | # ifndef this_cpu_cmpxchg_2 | ||
491 | # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
492 | # endif | ||
493 | # ifndef this_cpu_cmpxchg_4 | ||
494 | # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
495 | # endif | ||
496 | # ifndef this_cpu_cmpxchg_8 | ||
497 | # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
498 | # endif | ||
499 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
500 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
501 | #endif | ||
502 | |||
405 | /* | 503 | /* |
406 | * Generic percpu operations that do not require preemption handling. | 504 | * Generic percpu operations that do not require preemption handling. |
407 | * Either we do not care about races or the caller has the | 505 | * Either we do not care about races or the caller has the |
@@ -529,11 +627,87 @@ do { \ | |||
529 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) | 627 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
530 | #endif | 628 | #endif |
531 | 629 | ||
630 | #define __this_cpu_generic_add_return(pcp, val) \ | ||
631 | ({ \ | ||
632 | __this_cpu_add(pcp, val); \ | ||
633 | __this_cpu_read(pcp); \ | ||
634 | }) | ||
635 | |||
636 | #ifndef __this_cpu_add_return | ||
637 | # ifndef __this_cpu_add_return_1 | ||
638 | # define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
639 | # endif | ||
640 | # ifndef __this_cpu_add_return_2 | ||
641 | # define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
642 | # endif | ||
643 | # ifndef __this_cpu_add_return_4 | ||
644 | # define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
645 | # endif | ||
646 | # ifndef __this_cpu_add_return_8 | ||
647 | # define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
648 | # endif | ||
649 | # define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
650 | #endif | ||
651 | |||
652 | #define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) | ||
653 | #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
654 | #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
655 | |||
656 | #define __this_cpu_generic_xchg(pcp, nval) \ | ||
657 | ({ typeof(pcp) ret__; \ | ||
658 | ret__ = __this_cpu_read(pcp); \ | ||
659 | __this_cpu_write(pcp, nval); \ | ||
660 | ret__; \ | ||
661 | }) | ||
662 | |||
663 | #ifndef __this_cpu_xchg | ||
664 | # ifndef __this_cpu_xchg_1 | ||
665 | # define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
666 | # endif | ||
667 | # ifndef __this_cpu_xchg_2 | ||
668 | # define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
669 | # endif | ||
670 | # ifndef __this_cpu_xchg_4 | ||
671 | # define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
672 | # endif | ||
673 | # ifndef __this_cpu_xchg_8 | ||
674 | # define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
675 | # endif | ||
676 | # define __this_cpu_xchg(pcp, nval) \ | ||
677 | __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) | ||
678 | #endif | ||
679 | |||
680 | #define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
681 | ({ \ | ||
682 | typeof(pcp) ret__; \ | ||
683 | ret__ = __this_cpu_read(pcp); \ | ||
684 | if (ret__ == (oval)) \ | ||
685 | __this_cpu_write(pcp, nval); \ | ||
686 | ret__; \ | ||
687 | }) | ||
688 | |||
689 | #ifndef __this_cpu_cmpxchg | ||
690 | # ifndef __this_cpu_cmpxchg_1 | ||
691 | # define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
692 | # endif | ||
693 | # ifndef __this_cpu_cmpxchg_2 | ||
694 | # define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
695 | # endif | ||
696 | # ifndef __this_cpu_cmpxchg_4 | ||
697 | # define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
698 | # endif | ||
699 | # ifndef __this_cpu_cmpxchg_8 | ||
700 | # define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
701 | # endif | ||
702 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | ||
703 | __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) | ||
704 | #endif | ||
705 | |||
532 | /* | 706 | /* |
533 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | 707 | * IRQ safe versions of the per cpu RMW operations. Note that these operations |
534 | * are *not* safe against modification of the same variable from another | 708 | * are *not* safe against modification of the same variable from another |
535 | * processors (which one gets when using regular atomic operations) | 709 | * processors (which one gets when using regular atomic operations) |
536 | . They are guaranteed to be atomic vs. local interrupts and | 710 | * They are guaranteed to be atomic vs. local interrupts and |
537 | * preemption only. | 711 | * preemption only. |
538 | */ | 712 | */ |
539 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | 713 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ |
@@ -620,4 +794,33 @@ do { \ | |||
620 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) | 794 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
621 | #endif | 795 | #endif |
622 | 796 | ||
797 | #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
798 | ({ \ | ||
799 | typeof(pcp) ret__; \ | ||
800 | unsigned long flags; \ | ||
801 | local_irq_save(flags); \ | ||
802 | ret__ = __this_cpu_read(pcp); \ | ||
803 | if (ret__ == (oval)) \ | ||
804 | __this_cpu_write(pcp, nval); \ | ||
805 | local_irq_restore(flags); \ | ||
806 | ret__; \ | ||
807 | }) | ||
808 | |||
809 | #ifndef irqsafe_cpu_cmpxchg | ||
810 | # ifndef irqsafe_cpu_cmpxchg_1 | ||
811 | # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
812 | # endif | ||
813 | # ifndef irqsafe_cpu_cmpxchg_2 | ||
814 | # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
815 | # endif | ||
816 | # ifndef irqsafe_cpu_cmpxchg_4 | ||
817 | # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
818 | # endif | ||
819 | # ifndef irqsafe_cpu_cmpxchg_8 | ||
820 | # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
821 | # endif | ||
822 | # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | ||
823 | __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) | ||
824 | #endif | ||
825 | |||
623 | #endif /* __LINUX_PERCPU_H */ | 826 | #endif /* __LINUX_PERCPU_H */ |