aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-03-11 10:42:05 -0500
committerPekka Enberg <penberg@kernel.org>2011-03-11 10:42:05 -0500
commit2a6c5176ecc87eaeb5b1e6d8e64deb9430f4e602 (patch)
treee1bbad4b3c3775bc69fc41bc52103b494aca105e /include
parenta5abba989deceb731047425812d268daf7536575 (diff)
parentb9ec40af0e18fb7d02106be148036c2ea490fdf9 (diff)
Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu into slub/lockless
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h35
-rw-r--r--include/linux/percpu.h128
2 files changed, 150 insertions, 13 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index fe77e3395b4..22d3342d6af 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
15 * HEAD_TEXT_SECTION 15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE) 16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...) 17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE) 18 * PERCPU(CACHELINE_SIZE, PAGE_SIZE)
19 * __init_end = .; 19 * __init_end = .;
20 * 20 *
21 * _stext = .; 21 * _stext = .;
@@ -683,13 +683,18 @@
683 683
684/** 684/**
685 * PERCPU_VADDR - define output section for percpu area 685 * PERCPU_VADDR - define output section for percpu area
686 * @cacheline: cacheline size
686 * @vaddr: explicit base address (optional) 687 * @vaddr: explicit base address (optional)
687 * @phdr: destination PHDR (optional) 688 * @phdr: destination PHDR (optional)
688 * 689 *
689 * Macro which expands to output section for percpu area. If @vaddr 690 * Macro which expands to output section for percpu area.
690 * is not blank, it specifies explicit base address and all percpu 691 *
691 * symbols will be offset from the given address. If blank, @vaddr 692 * @cacheline is used to align subsections to avoid false cacheline
692 * always equals @laddr + LOAD_OFFSET. 693 * sharing between subsections for different purposes.
694 *
695 * If @vaddr is not blank, it specifies explicit base address and all
696 * percpu symbols will be offset from the given address. If blank,
697 * @vaddr always equals @laddr + LOAD_OFFSET.
693 * 698 *
694 * @phdr defines the output PHDR to use if not blank. Be warned that 699 * @phdr defines the output PHDR to use if not blank. Be warned that
695 * output PHDR is sticky. If @phdr is specified, the next output 700 * output PHDR is sticky. If @phdr is specified, the next output
@@ -700,7 +705,7 @@
700 * If there is no need to put the percpu section at a predetermined 705 * If there is no need to put the percpu section at a predetermined
701 * address, use PERCPU(). 706 * address, use PERCPU().
702 */ 707 */
703#define PERCPU_VADDR(vaddr, phdr) \ 708#define PERCPU_VADDR(cacheline, vaddr, phdr) \
704 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 709 VMLINUX_SYMBOL(__per_cpu_load) = .; \
705 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 710 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
706 - LOAD_OFFSET) { \ 711 - LOAD_OFFSET) { \
@@ -708,7 +713,9 @@
708 *(.data..percpu..first) \ 713 *(.data..percpu..first) \
709 . = ALIGN(PAGE_SIZE); \ 714 . = ALIGN(PAGE_SIZE); \
710 *(.data..percpu..page_aligned) \ 715 *(.data..percpu..page_aligned) \
716 . = ALIGN(cacheline); \
711 *(.data..percpu..readmostly) \ 717 *(.data..percpu..readmostly) \
718 . = ALIGN(cacheline); \
712 *(.data..percpu) \ 719 *(.data..percpu) \
713 *(.data..percpu..shared_aligned) \ 720 *(.data..percpu..shared_aligned) \
714 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 721 VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -717,18 +724,18 @@
717 724
718/** 725/**
719 * PERCPU - define output section for percpu area, simple version 726 * PERCPU - define output section for percpu area, simple version
727 * @cacheline: cacheline size
720 * @align: required alignment 728 * @align: required alignment
721 * 729 *
722 * Align to @align and outputs output section for percpu area. This 730 * Align to @align and outputs output section for percpu area. This macro
723 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and 731 * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
724 * __per_cpu_start will be identical. 732 * __per_cpu_start will be identical.
725 * 733 *
726 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 734 * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
727 * that __per_cpu_load is defined as a relative symbol against 735 * except that __per_cpu_load is defined as a relative symbol against
728 * .data..percpu which is required for relocatable x86_32 736 * .data..percpu which is required for relocatable x86_32 configuration.
729 * configuration.
730 */ 737 */
731#define PERCPU(align) \ 738#define PERCPU(cacheline, align) \
732 . = ALIGN(align); \ 739 . = ALIGN(align); \
733 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 740 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
734 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 741 VMLINUX_SYMBOL(__per_cpu_load) = .; \
@@ -736,7 +743,9 @@
736 *(.data..percpu..first) \ 743 *(.data..percpu..first) \
737 . = ALIGN(PAGE_SIZE); \ 744 . = ALIGN(PAGE_SIZE); \
738 *(.data..percpu..page_aligned) \ 745 *(.data..percpu..page_aligned) \
746 . = ALIGN(cacheline); \
739 *(.data..percpu..readmostly) \ 747 *(.data..percpu..readmostly) \
748 . = ALIGN(cacheline); \
740 *(.data..percpu) \ 749 *(.data..percpu) \
741 *(.data..percpu..shared_aligned) \ 750 *(.data..percpu..shared_aligned) \
742 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 751 VMLINUX_SYMBOL(__per_cpu_end) = .; \
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 27c3c6fcfad..3a5c4449fd3 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -255,6 +255,30 @@ extern void __bad_size_call_parameter(void);
255 pscr2_ret__; \ 255 pscr2_ret__; \
256}) 256})
257 257
258/*
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter.
262 */
263#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264({ \
265 bool pdcrb_ret__; \
266 __verify_pcpu_ptr(&pcp1); \
267 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
268 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
269 VM_BUG_ON((unsigned long)(&pcp2) != \
270 (unsigned long)(&pcp1) + sizeof(pcp1)); \
271 switch(sizeof(pcp1)) { \
272 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
273 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
274 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
275 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
276 default: \
277 __bad_size_call_parameter(); break; \
278 } \
279 pdcrb_ret__; \
280})
281
258#define __pcpu_size_call(stem, variable, ...) \ 282#define __pcpu_size_call(stem, variable, ...) \
259do { \ 283do { \
260 __verify_pcpu_ptr(&(variable)); \ 284 __verify_pcpu_ptr(&(variable)); \
@@ -501,6 +525,45 @@ do { \
501#endif 525#endif
502 526
503/* 527/*
528 * cmpxchg_double replaces two adjacent scalars at once. The first
529 * two parameters are per cpu variables which have to be of the same
530 * size. A truth value is returned to indicate success or failure
531 * (since a double register result is difficult to handle). There is
532 * very limited hardware support for these operations, so only certain
533 * sizes may work.
534 */
535#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
536({ \
537 int ret__; \
538 preempt_disable(); \
539 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
540 oval1, oval2, nval1, nval2); \
541 preempt_enable(); \
542 ret__; \
543})
544
545#ifndef this_cpu_cmpxchg_double
546# ifndef this_cpu_cmpxchg_double_1
547# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
548 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
549# endif
550# ifndef this_cpu_cmpxchg_double_2
551# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
552 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
553# endif
554# ifndef this_cpu_cmpxchg_double_4
555# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
556 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
557# endif
558# ifndef this_cpu_cmpxchg_double_8
559# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
560 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
561# endif
562# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
563 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
564#endif
565
566/*
504 * Generic percpu operations that do not require preemption handling. 567 * Generic percpu operations that do not require preemption handling.
505 * Either we do not care about races or the caller has the 568 * Either we do not care about races or the caller has the
506 * responsibility of handling preemptions issues. Arch code can still 569 * responsibility of handling preemptions issues. Arch code can still
@@ -703,6 +766,39 @@ do { \
703 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) 766 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
704#endif 767#endif
705 768
769#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
770({ \
771 int __ret = 0; \
772 if (__this_cpu_read(pcp1) == (oval1) && \
773 __this_cpu_read(pcp2) == (oval2)) { \
774 __this_cpu_write(pcp1, (nval1)); \
775 __this_cpu_write(pcp2, (nval2)); \
776 __ret = 1; \
777 } \
778 (__ret); \
779})
780
781#ifndef __this_cpu_cmpxchg_double
782# ifndef __this_cpu_cmpxchg_double_1
783# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
784 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
785# endif
786# ifndef __this_cpu_cmpxchg_double_2
787# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
788 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
789# endif
790# ifndef __this_cpu_cmpxchg_double_4
791# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
792 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
793# endif
794# ifndef __this_cpu_cmpxchg_double_8
795# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
796 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
797# endif
798# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
799 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
800#endif
801
706/* 802/*
707 * IRQ safe versions of the per cpu RMW operations. Note that these operations 803 * IRQ safe versions of the per cpu RMW operations. Note that these operations
708 * are *not* safe against modification of the same variable from another 804 * are *not* safe against modification of the same variable from another
@@ -823,4 +919,36 @@ do { \
823 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) 919 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
824#endif 920#endif
825 921
922#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
923({ \
924 int ret__; \
925 unsigned long flags; \
926 local_irq_save(flags); \
927 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
928 oval1, oval2, nval1, nval2); \
929 local_irq_restore(flags); \
930 ret__; \
931})
932
933#ifndef irqsafe_cpu_cmpxchg_double
934# ifndef irqsafe_cpu_cmpxchg_double_1
935# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
936 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
937# endif
938# ifndef irqsafe_cpu_cmpxchg_double_2
939# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
940 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
941# endif
942# ifndef irqsafe_cpu_cmpxchg_double_4
943# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
944 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
945# endif
946# ifndef irqsafe_cpu_cmpxchg_double_8
947# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
948 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
949# endif
950# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951 __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
952#endif
953
826#endif /* __LINUX_PERCPU_H */ 954#endif /* __LINUX_PERCPU_H */