aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h8
-rw-r--r--include/asm-generic/bug.h34
-rw-r--r--include/asm-generic/dma-mapping-common.h20
-rw-r--r--include/asm-generic/gpio.h11
-rw-r--r--include/asm-generic/kmap_types.h3
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/scatterlist.h17
-rw-r--r--include/asm-generic/topology.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h46
9 files changed, 83 insertions, 69 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index c33749f95b3..058129e9b04 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -30,8 +30,7 @@
30 * atomic_read - read atomic variable 30 * atomic_read - read atomic variable
31 * @v: pointer of type atomic_t 31 * @v: pointer of type atomic_t
32 * 32 *
33 * Atomically reads the value of @v. Note that the guaranteed 33 * Atomically reads the value of @v.
34 * useful range of an atomic_t is only 24 bits.
35 */ 34 */
36#define atomic_read(v) (*(volatile int *)&(v)->counter) 35#define atomic_read(v) (*(volatile int *)&(v)->counter)
37 36
@@ -40,8 +39,7 @@
40 * @v: pointer of type atomic_t 39 * @v: pointer of type atomic_t
41 * @i: required value 40 * @i: required value
42 * 41 *
43 * Atomically sets the value of @v to @i. Note that the guaranteed 42 * Atomically sets the value of @v to @i.
44 * useful range of an atomic_t is only 24 bits.
45 */ 43 */
46#define atomic_set(v, i) (((v)->counter) = (i)) 44#define atomic_set(v, i) (((v)->counter) = (i))
47 45
@@ -53,7 +51,6 @@
53 * @v: pointer of type atomic_t 51 * @v: pointer of type atomic_t
54 * 52 *
55 * Atomically adds @i to @v and returns the result 53 * Atomically adds @i to @v and returns the result
56 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
57 */ 54 */
58static inline int atomic_add_return(int i, atomic_t *v) 55static inline int atomic_add_return(int i, atomic_t *v)
59{ 56{
@@ -75,7 +72,6 @@ static inline int atomic_add_return(int i, atomic_t *v)
75 * @v: pointer of type atomic_t 72 * @v: pointer of type atomic_t
76 * 73 *
77 * Atomically subtracts @i from @v and returns the result 74 * Atomically subtracts @i from @v and returns the result
78 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
79 */ 75 */
80static inline int atomic_sub_return(int i, atomic_t *v) 76static inline int atomic_sub_return(int i, atomic_t *v)
81{ 77{
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 18c435d7c08..c2c9ba032d4 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -25,7 +25,10 @@ struct bug_entry {
25}; 25};
26#endif /* __ASSEMBLY__ */ 26#endif /* __ASSEMBLY__ */
27 27
28#define BUGFLAG_WARNING (1<<0) 28#define BUGFLAG_WARNING (1 << 0)
29#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
30#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
31
29#endif /* CONFIG_GENERIC_BUG */ 32#endif /* CONFIG_GENERIC_BUG */
30 33
31/* 34/*
@@ -56,17 +59,25 @@ struct bug_entry {
56 * appear at runtime. Use the versions with printk format strings 59 * appear at runtime. Use the versions with printk format strings
57 * to provide better diagnostics. 60 * to provide better diagnostics.
58 */ 61 */
59#ifndef __WARN 62#ifndef __WARN_TAINT
60#ifndef __ASSEMBLY__ 63#ifndef __ASSEMBLY__
61extern void warn_slowpath_fmt(const char *file, const int line, 64extern void warn_slowpath_fmt(const char *file, const int line,
62 const char *fmt, ...) __attribute__((format(printf, 3, 4))); 65 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
66extern void warn_slowpath_fmt_taint(const char *file, const int line,
67 unsigned taint, const char *fmt, ...)
68 __attribute__((format(printf, 4, 5)));
63extern void warn_slowpath_null(const char *file, const int line); 69extern void warn_slowpath_null(const char *file, const int line);
64#define WANT_WARN_ON_SLOWPATH 70#define WANT_WARN_ON_SLOWPATH
65#endif 71#endif
66#define __WARN() warn_slowpath_null(__FILE__, __LINE__) 72#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
67#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) 73#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
74#define __WARN_printf_taint(taint, arg...) \
75 warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
68#else 76#else
77#define __WARN() __WARN_TAINT(TAINT_WARN)
69#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) 78#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
79#define __WARN_printf_taint(taint, arg...) \
80 do { printk(arg); __WARN_TAINT(taint); } while (0)
70#endif 81#endif
71 82
72#ifndef WARN_ON 83#ifndef WARN_ON
@@ -87,6 +98,13 @@ extern void warn_slowpath_null(const char *file, const int line);
87}) 98})
88#endif 99#endif
89 100
101#define WARN_TAINT(condition, taint, format...) ({ \
102 int __ret_warn_on = !!(condition); \
103 if (unlikely(__ret_warn_on)) \
104 __WARN_printf_taint(taint, format); \
105 unlikely(__ret_warn_on); \
106})
107
90#else /* !CONFIG_BUG */ 108#else /* !CONFIG_BUG */
91#ifndef HAVE_ARCH_BUG 109#ifndef HAVE_ARCH_BUG
92#define BUG() do {} while(0) 110#define BUG() do {} while(0)
@@ -110,6 +128,8 @@ extern void warn_slowpath_null(const char *file, const int line);
110}) 128})
111#endif 129#endif
112 130
131#define WARN_TAINT(condition, taint, format...) WARN_ON(condition)
132
113#endif 133#endif
114 134
115#define WARN_ON_ONCE(condition) ({ \ 135#define WARN_ON_ONCE(condition) ({ \
@@ -132,6 +152,16 @@ extern void warn_slowpath_null(const char *file, const int line);
132 unlikely(__ret_warn_once); \ 152 unlikely(__ret_warn_once); \
133}) 153})
134 154
155#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
156 static bool __warned; \
157 int __ret_warn_once = !!(condition); \
158 \
159 if (unlikely(__ret_warn_once)) \
160 if (WARN_TAINT(!__warned, taint, format)) \
161 __warned = true; \
162 unlikely(__ret_warn_once); \
163})
164
135#define WARN_ON_RATELIMIT(condition, state) \ 165#define WARN_ON_RATELIMIT(condition, state) \
136 WARN_ON((condition) && __ratelimit(state)) 166 WARN_ON((condition) && __ratelimit(state))
137 167
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 69206957b72..0c80bb38773 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
123 size_t size, 123 size_t size,
124 enum dma_data_direction dir) 124 enum dma_data_direction dir)
125{ 125{
126 struct dma_map_ops *ops = get_dma_ops(dev); 126 dma_sync_single_for_cpu(dev, addr + offset, size, dir);
127
128 BUG_ON(!valid_dma_direction(dir));
129 if (ops->sync_single_range_for_cpu) {
130 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
131 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
132
133 } else
134 dma_sync_single_for_cpu(dev, addr + offset, size, dir);
135} 127}
136 128
137static inline void dma_sync_single_range_for_device(struct device *dev, 129static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
140 size_t size, 132 size_t size,
141 enum dma_data_direction dir) 133 enum dma_data_direction dir)
142{ 134{
143 struct dma_map_ops *ops = get_dma_ops(dev); 135 dma_sync_single_for_device(dev, addr + offset, size, dir);
144
145 BUG_ON(!valid_dma_direction(dir));
146 if (ops->sync_single_range_for_device) {
147 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
148 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
149
150 } else
151 dma_sync_single_for_device(dev, addr + offset, size, dir);
152} 136}
153 137
154static inline void 138static inline void
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 979c6a57f2f..4f3d75e1ad3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -60,7 +60,9 @@ struct module;
60 * @names: if set, must be an array of strings to use as alternative 60 * @names: if set, must be an array of strings to use as alternative
61 * names for the GPIOs in this chip. Any entry in the array 61 * names for the GPIOs in this chip. Any entry in the array
62 * may be NULL if there is no alias for the GPIO, however the 62 * may be NULL if there is no alias for the GPIO, however the
63 * array must be @ngpio entries long. 63 * array must be @ngpio entries long. A name can include a single printk
64 * format specifier for an unsigned int. It is substituted by the actual
65 * number of the gpio.
64 * 66 *
65 * A gpio_chip can help platforms abstract various sources of GPIOs so 67 * A gpio_chip can help platforms abstract various sources of GPIOs so
66 * they can all be accessed through a common programing interface. 68 * they can all be accessed through a common programing interface.
@@ -88,6 +90,9 @@ struct gpio_chip {
88 unsigned offset); 90 unsigned offset);
89 int (*direction_output)(struct gpio_chip *chip, 91 int (*direction_output)(struct gpio_chip *chip,
90 unsigned offset, int value); 92 unsigned offset, int value);
93 int (*set_debounce)(struct gpio_chip *chip,
94 unsigned offset, unsigned debounce);
95
91 void (*set)(struct gpio_chip *chip, 96 void (*set)(struct gpio_chip *chip,
92 unsigned offset, int value); 97 unsigned offset, int value);
93 98
@@ -98,7 +103,7 @@ struct gpio_chip {
98 struct gpio_chip *chip); 103 struct gpio_chip *chip);
99 int base; 104 int base;
100 u16 ngpio; 105 u16 ngpio;
101 char **names; 106 const char *const *names;
102 unsigned can_sleep:1; 107 unsigned can_sleep:1;
103 unsigned exported:1; 108 unsigned exported:1;
104}; 109};
@@ -121,6 +126,8 @@ extern void gpio_free(unsigned gpio);
121extern int gpio_direction_input(unsigned gpio); 126extern int gpio_direction_input(unsigned gpio);
122extern int gpio_direction_output(unsigned gpio, int value); 127extern int gpio_direction_output(unsigned gpio, int value);
123 128
129extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
130
124extern int gpio_get_value_cansleep(unsigned gpio); 131extern int gpio_get_value_cansleep(unsigned gpio);
125extern void gpio_set_value_cansleep(unsigned gpio, int value); 132extern void gpio_set_value_cansleep(unsigned gpio, int value);
126 133
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 97e807c8c81..0232ccb76f2 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -29,6 +29,9 @@ KMAP_D(16) KM_IRQ_PTE,
29KMAP_D(17) KM_NMI, 29KMAP_D(17) KM_NMI,
30KMAP_D(18) KM_NMI_PTE, 30KMAP_D(18) KM_NMI_PTE,
31KMAP_D(19) KM_KDB, 31KMAP_D(19) KM_KDB,
32/*
33 * Remember to update debug_kmap_atomic() when adding new kmap types!
34 */
32KMAP_D(20) KM_TYPE_NR 35KMAP_D(20) KM_TYPE_NR
33}; 36};
34 37
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2d3f7..b5043a9890d 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void);
80 80
81#ifndef PER_CPU_BASE_SECTION 81#ifndef PER_CPU_BASE_SECTION
82#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
83#define PER_CPU_BASE_SECTION ".data.percpu" 83#define PER_CPU_BASE_SECTION ".data..percpu"
84#else 84#else
85#define PER_CPU_BASE_SECTION ".data" 85#define PER_CPU_BASE_SECTION ".data"
86#endif 86#endif
@@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void);
92#define PER_CPU_SHARED_ALIGNED_SECTION "" 92#define PER_CPU_SHARED_ALIGNED_SECTION ""
93#define PER_CPU_ALIGNED_SECTION "" 93#define PER_CPU_ALIGNED_SECTION ""
94#else 94#else
95#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" 95#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
96#define PER_CPU_ALIGNED_SECTION ".shared_aligned" 96#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
97#endif 97#endif
98#define PER_CPU_FIRST_SECTION ".first" 98#define PER_CPU_FIRST_SECTION "..first"
99 99
100#else 100#else
101 101
102#define PER_CPU_SHARED_ALIGNED_SECTION "" 102#define PER_CPU_SHARED_ALIGNED_SECTION ""
103#define PER_CPU_ALIGNED_SECTION ".shared_aligned" 103#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
104#define PER_CPU_FIRST_SECTION "" 104#define PER_CPU_FIRST_SECTION ""
105 105
106#endif 106#endif
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
index 8b9454496a7..5de07355fad 100644
--- a/include/asm-generic/scatterlist.h
+++ b/include/asm-generic/scatterlist.h
@@ -11,7 +11,9 @@ struct scatterlist {
11 unsigned int offset; 11 unsigned int offset;
12 unsigned int length; 12 unsigned int length;
13 dma_addr_t dma_address; 13 dma_addr_t dma_address;
14#ifdef CONFIG_NEED_SG_DMA_LENGTH
14 unsigned int dma_length; 15 unsigned int dma_length;
16#endif
15}; 17};
16 18
17/* 19/*
@@ -22,22 +24,11 @@ struct scatterlist {
22 * is 0. 24 * is 0.
23 */ 25 */
24#define sg_dma_address(sg) ((sg)->dma_address) 26#define sg_dma_address(sg) ((sg)->dma_address)
25#ifndef sg_dma_len 27
26/* 28#ifdef CONFIG_NEED_SG_DMA_LENGTH
27 * Normally, you have an iommu on 64 bit machines, but not on 32 bit
28 * machines. Architectures that are differnt should override this.
29 */
30#if __BITS_PER_LONG == 64
31#define sg_dma_len(sg) ((sg)->dma_length) 29#define sg_dma_len(sg) ((sg)->dma_length)
32#else 30#else
33#define sg_dma_len(sg) ((sg)->length) 31#define sg_dma_len(sg) ((sg)->length)
34#endif /* 64 bit */
35#endif /* sg_dma_len */
36
37#ifndef ISA_DMA_THRESHOLD
38#define ISA_DMA_THRESHOLD (~0UL)
39#endif 32#endif
40 33
41#define ARCH_HAS_SG_CHAIN
42
43#endif /* __ASM_GENERIC_SCATTERLIST_H */ 34#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 510df36dd5d..fd60700503c 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -34,6 +34,9 @@
34#ifndef cpu_to_node 34#ifndef cpu_to_node
35#define cpu_to_node(cpu) ((void)(cpu),0) 35#define cpu_to_node(cpu) ((void)(cpu),0)
36#endif 36#endif
37#ifndef cpu_to_mem
38#define cpu_to_mem(cpu) ((void)(cpu),0)
39#endif
37#ifndef parent_node 40#ifndef parent_node
38#define parent_node(node) ((void)(node),0) 41#define parent_node(node) ((void)(node),0)
39#endif 42#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e652068e0..48c5299cbf2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -175,25 +175,25 @@
175#define NOSAVE_DATA \ 175#define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \ 176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \ 177 VMLINUX_SYMBOL(__nosave_begin) = .; \
178 *(.data.nosave) \ 178 *(.data..nosave) \
179 . = ALIGN(PAGE_SIZE); \ 179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .; 180 VMLINUX_SYMBOL(__nosave_end) = .;
181 181
182#define PAGE_ALIGNED_DATA(page_align) \ 182#define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \ 183 . = ALIGN(page_align); \
184 *(.data.page_aligned) 184 *(.data..page_aligned)
185 185
186#define READ_MOSTLY_DATA(align) \ 186#define READ_MOSTLY_DATA(align) \
187 . = ALIGN(align); \ 187 . = ALIGN(align); \
188 *(.data.read_mostly) 188 *(.data..read_mostly)
189 189
190#define CACHELINE_ALIGNED_DATA(align) \ 190#define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \ 191 . = ALIGN(align); \
192 *(.data.cacheline_aligned) 192 *(.data..cacheline_aligned)
193 193
194#define INIT_TASK_DATA(align) \ 194#define INIT_TASK_DATA(align) \
195 . = ALIGN(align); \ 195 . = ALIGN(align); \
196 *(.data.init_task) 196 *(.data..init_task)
197 197
198/* 198/*
199 * Read only Data 199 * Read only Data
@@ -247,10 +247,10 @@
247 } \ 247 } \
248 \ 248 \
249 /* RapidIO route ops */ \ 249 /* RapidIO route ops */ \
250 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 250 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
251 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 251 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
252 *(.rio_route_ops) \ 252 *(.rio_switch_ops) \
253 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 253 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
254 } \ 254 } \
255 \ 255 \
256 TRACEDATA \ 256 TRACEDATA \
@@ -435,7 +435,7 @@
435 */ 435 */
436#define INIT_TASK_DATA_SECTION(align) \ 436#define INIT_TASK_DATA_SECTION(align) \
437 . = ALIGN(align); \ 437 . = ALIGN(align); \
438 .data.init_task : { \ 438 .data..init_task : { \
439 INIT_TASK_DATA(align) \ 439 INIT_TASK_DATA(align) \
440 } 440 }
441 441
@@ -499,7 +499,7 @@
499#define BSS(bss_align) \ 499#define BSS(bss_align) \
500 . = ALIGN(bss_align); \ 500 . = ALIGN(bss_align); \
501 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 501 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
502 *(.bss.page_aligned) \ 502 *(.bss..page_aligned) \
503 *(.dynbss) \ 503 *(.dynbss) \
504 *(.bss) \ 504 *(.bss) \
505 *(COMMON) \ 505 *(COMMON) \
@@ -666,16 +666,16 @@
666 */ 666 */
667#define PERCPU_VADDR(vaddr, phdr) \ 667#define PERCPU_VADDR(vaddr, phdr) \
668 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 668 VMLINUX_SYMBOL(__per_cpu_load) = .; \
669 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 669 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
670 - LOAD_OFFSET) { \ 670 - LOAD_OFFSET) { \
671 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 671 VMLINUX_SYMBOL(__per_cpu_start) = .; \
672 *(.data.percpu.first) \ 672 *(.data..percpu..first) \
673 *(.data.percpu.page_aligned) \ 673 *(.data..percpu..page_aligned) \
674 *(.data.percpu) \ 674 *(.data..percpu) \
675 *(.data.percpu.shared_aligned) \ 675 *(.data..percpu..shared_aligned) \
676 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 676 VMLINUX_SYMBOL(__per_cpu_end) = .; \
677 } phdr \ 677 } phdr \
678 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); 678 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
679 679
680/** 680/**
681 * PERCPU - define output section for percpu area, simple version 681 * PERCPU - define output section for percpu area, simple version
@@ -687,18 +687,18 @@
687 * 687 *
688 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 688 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
689 * that __per_cpu_load is defined as a relative symbol against 689 * that __per_cpu_load is defined as a relative symbol against
690 * .data.percpu which is required for relocatable x86_32 690 * .data..percpu which is required for relocatable x86_32
691 * configuration. 691 * configuration.
692 */ 692 */
693#define PERCPU(align) \ 693#define PERCPU(align) \
694 . = ALIGN(align); \ 694 . = ALIGN(align); \
695 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 695 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
696 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 696 VMLINUX_SYMBOL(__per_cpu_load) = .; \
697 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 697 VMLINUX_SYMBOL(__per_cpu_start) = .; \
698 *(.data.percpu.first) \ 698 *(.data..percpu..first) \
699 *(.data.percpu.page_aligned) \ 699 *(.data..percpu..page_aligned) \
700 *(.data.percpu) \ 700 *(.data..percpu) \
701 *(.data.percpu.shared_aligned) \ 701 *(.data..percpu..shared_aligned) \
702 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 702 VMLINUX_SYMBOL(__per_cpu_end) = .; \
703 } 703 }
704 704