diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/dma-mapping.h | 308 | ||||
-rw-r--r-- | include/asm-generic/fcntl.h | 12 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 5 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 52 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 2 | ||||
-rw-r--r-- | include/asm-generic/siginfo.h | 14 | ||||
-rw-r--r-- | include/asm-generic/statfs.h | 5 | ||||
-rw-r--r-- | include/asm-generic/topology.h | 10 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 99 |
9 files changed, 159 insertions, 348 deletions
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h deleted file mode 100644 index 189486c3f92e..000000000000 --- a/include/asm-generic/dma-mapping.h +++ /dev/null | |||
@@ -1,308 +0,0 @@ | |||
1 | /* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com | ||
2 | * | ||
3 | * Implements the generic device dma API via the existing pci_ one | ||
4 | * for unconverted architectures | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
8 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
9 | |||
10 | |||
11 | #ifdef CONFIG_PCI | ||
12 | |||
13 | /* we implement the API below in terms of the existing PCI one, | ||
14 | * so include it */ | ||
15 | #include <linux/pci.h> | ||
16 | /* need struct page definitions */ | ||
17 | #include <linux/mm.h> | ||
18 | |||
19 | static inline int | ||
20 | dma_supported(struct device *dev, u64 mask) | ||
21 | { | ||
22 | BUG_ON(dev->bus != &pci_bus_type); | ||
23 | |||
24 | return pci_dma_supported(to_pci_dev(dev), mask); | ||
25 | } | ||
26 | |||
27 | static inline int | ||
28 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
29 | { | ||
30 | BUG_ON(dev->bus != &pci_bus_type); | ||
31 | |||
32 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
33 | } | ||
34 | |||
35 | static inline void * | ||
36 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
37 | gfp_t flag) | ||
38 | { | ||
39 | BUG_ON(dev->bus != &pci_bus_type); | ||
40 | |||
41 | return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); | ||
42 | } | ||
43 | |||
44 | static inline void | ||
45 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
46 | dma_addr_t dma_handle) | ||
47 | { | ||
48 | BUG_ON(dev->bus != &pci_bus_type); | ||
49 | |||
50 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); | ||
51 | } | ||
52 | |||
53 | static inline dma_addr_t | ||
54 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
55 | enum dma_data_direction direction) | ||
56 | { | ||
57 | BUG_ON(dev->bus != &pci_bus_type); | ||
58 | |||
59 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); | ||
60 | } | ||
61 | |||
62 | static inline void | ||
63 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
64 | enum dma_data_direction direction) | ||
65 | { | ||
66 | BUG_ON(dev->bus != &pci_bus_type); | ||
67 | |||
68 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); | ||
69 | } | ||
70 | |||
71 | static inline dma_addr_t | ||
72 | dma_map_page(struct device *dev, struct page *page, | ||
73 | unsigned long offset, size_t size, | ||
74 | enum dma_data_direction direction) | ||
75 | { | ||
76 | BUG_ON(dev->bus != &pci_bus_type); | ||
77 | |||
78 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); | ||
79 | } | ||
80 | |||
81 | static inline void | ||
82 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
83 | enum dma_data_direction direction) | ||
84 | { | ||
85 | BUG_ON(dev->bus != &pci_bus_type); | ||
86 | |||
87 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); | ||
88 | } | ||
89 | |||
90 | static inline int | ||
91 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
92 | enum dma_data_direction direction) | ||
93 | { | ||
94 | BUG_ON(dev->bus != &pci_bus_type); | ||
95 | |||
96 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
97 | } | ||
98 | |||
99 | static inline void | ||
100 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
101 | enum dma_data_direction direction) | ||
102 | { | ||
103 | BUG_ON(dev->bus != &pci_bus_type); | ||
104 | |||
105 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); | ||
106 | } | ||
107 | |||
108 | static inline void | ||
109 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
110 | enum dma_data_direction direction) | ||
111 | { | ||
112 | BUG_ON(dev->bus != &pci_bus_type); | ||
113 | |||
114 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
115 | size, (int)direction); | ||
116 | } | ||
117 | |||
118 | static inline void | ||
119 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | BUG_ON(dev->bus != &pci_bus_type); | ||
123 | |||
124 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
125 | size, (int)direction); | ||
126 | } | ||
127 | |||
128 | static inline void | ||
129 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
130 | enum dma_data_direction direction) | ||
131 | { | ||
132 | BUG_ON(dev->bus != &pci_bus_type); | ||
133 | |||
134 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); | ||
135 | } | ||
136 | |||
137 | static inline void | ||
138 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
139 | enum dma_data_direction direction) | ||
140 | { | ||
141 | BUG_ON(dev->bus != &pci_bus_type); | ||
142 | |||
143 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); | ||
144 | } | ||
145 | |||
146 | static inline int | ||
147 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
148 | { | ||
149 | return pci_dma_mapping_error(to_pci_dev(dev), dma_addr); | ||
150 | } | ||
151 | |||
152 | |||
153 | #else | ||
154 | |||
155 | static inline int | ||
156 | dma_supported(struct device *dev, u64 mask) | ||
157 | { | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static inline int | ||
162 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
163 | { | ||
164 | BUG(); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static inline void * | ||
169 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
170 | gfp_t flag) | ||
171 | { | ||
172 | BUG(); | ||
173 | return NULL; | ||
174 | } | ||
175 | |||
176 | static inline void | ||
177 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
178 | dma_addr_t dma_handle) | ||
179 | { | ||
180 | BUG(); | ||
181 | } | ||
182 | |||
183 | static inline dma_addr_t | ||
184 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
185 | enum dma_data_direction direction) | ||
186 | { | ||
187 | BUG(); | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static inline void | ||
192 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
193 | enum dma_data_direction direction) | ||
194 | { | ||
195 | BUG(); | ||
196 | } | ||
197 | |||
198 | static inline dma_addr_t | ||
199 | dma_map_page(struct device *dev, struct page *page, | ||
200 | unsigned long offset, size_t size, | ||
201 | enum dma_data_direction direction) | ||
202 | { | ||
203 | BUG(); | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static inline void | ||
208 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
209 | enum dma_data_direction direction) | ||
210 | { | ||
211 | BUG(); | ||
212 | } | ||
213 | |||
214 | static inline int | ||
215 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
216 | enum dma_data_direction direction) | ||
217 | { | ||
218 | BUG(); | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static inline void | ||
223 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
224 | enum dma_data_direction direction) | ||
225 | { | ||
226 | BUG(); | ||
227 | } | ||
228 | |||
229 | static inline void | ||
230 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
231 | enum dma_data_direction direction) | ||
232 | { | ||
233 | BUG(); | ||
234 | } | ||
235 | |||
236 | static inline void | ||
237 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
238 | enum dma_data_direction direction) | ||
239 | { | ||
240 | BUG(); | ||
241 | } | ||
242 | |||
243 | static inline void | ||
244 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
245 | enum dma_data_direction direction) | ||
246 | { | ||
247 | BUG(); | ||
248 | } | ||
249 | |||
250 | static inline void | ||
251 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
252 | enum dma_data_direction direction) | ||
253 | { | ||
254 | BUG(); | ||
255 | } | ||
256 | |||
257 | static inline int | ||
258 | dma_error(dma_addr_t dma_addr) | ||
259 | { | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | #endif | ||
264 | |||
265 | /* Now for the API extensions over the pci_ one */ | ||
266 | |||
267 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
268 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
269 | #define dma_is_consistent(d, h) (1) | ||
270 | |||
271 | static inline int | ||
272 | dma_get_cache_alignment(void) | ||
273 | { | ||
274 | /* no easy way to get cache size on all processors, so return | ||
275 | * the maximum possible, to be safe */ | ||
276 | return (1 << INTERNODE_CACHE_SHIFT); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
281 | unsigned long offset, size_t size, | ||
282 | enum dma_data_direction direction) | ||
283 | { | ||
284 | /* just sync everything, that's all the pci API can do */ | ||
285 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); | ||
286 | } | ||
287 | |||
288 | static inline void | ||
289 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
290 | unsigned long offset, size_t size, | ||
291 | enum dma_data_direction direction) | ||
292 | { | ||
293 | /* just sync everything, that's all the pci API can do */ | ||
294 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); | ||
295 | } | ||
296 | |||
297 | static inline void | ||
298 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
299 | enum dma_data_direction direction) | ||
300 | { | ||
301 | /* could define this in terms of the dma_cache ... operations, | ||
302 | * but if you get this on a platform, you should convert the platform | ||
303 | * to using the generic device DMA API */ | ||
304 | BUG(); | ||
305 | } | ||
306 | |||
307 | #endif | ||
308 | |||
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index b8477414c5c8..4d3e48373e74 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h | |||
@@ -117,9 +117,9 @@ | |||
117 | struct flock { | 117 | struct flock { |
118 | short l_type; | 118 | short l_type; |
119 | short l_whence; | 119 | short l_whence; |
120 | off_t l_start; | 120 | __kernel_off_t l_start; |
121 | off_t l_len; | 121 | __kernel_off_t l_len; |
122 | pid_t l_pid; | 122 | __kernel_pid_t l_pid; |
123 | __ARCH_FLOCK_PAD | 123 | __ARCH_FLOCK_PAD |
124 | }; | 124 | }; |
125 | #endif | 125 | #endif |
@@ -140,9 +140,9 @@ struct flock { | |||
140 | struct flock64 { | 140 | struct flock64 { |
141 | short l_type; | 141 | short l_type; |
142 | short l_whence; | 142 | short l_whence; |
143 | loff_t l_start; | 143 | __kernel_loff_t l_start; |
144 | loff_t l_len; | 144 | __kernel_loff_t l_len; |
145 | pid_t l_pid; | 145 | __kernel_pid_t l_pid; |
146 | __ARCH_FLOCK64_PAD | 146 | __ARCH_FLOCK64_PAD |
147 | }; | 147 | }; |
148 | #endif | 148 | #endif |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 81797ec9ab29..d6c379dc64fa 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
@@ -55,6 +55,10 @@ struct module; | |||
55 | * handled is (base + ngpio - 1). | 55 | * handled is (base + ngpio - 1). |
56 | * @can_sleep: flag must be set iff get()/set() methods sleep, as they | 56 | * @can_sleep: flag must be set iff get()/set() methods sleep, as they |
57 | * must while accessing GPIO expander chips over I2C or SPI | 57 | * must while accessing GPIO expander chips over I2C or SPI |
58 | * @names: if set, must be an array of strings to use as alternative | ||
59 | * names for the GPIOs in this chip. Any entry in the array | ||
60 | * may be NULL if there is no alias for the GPIO, however the | ||
61 | * array must be @ngpio entries long. | ||
58 | * | 62 | * |
59 | * A gpio_chip can help platforms abstract various sources of GPIOs so | 63 | * A gpio_chip can help platforms abstract various sources of GPIOs so |
60 | * they can all be accessed through a common programing interface. | 64 | * they can all be accessed through a common programing interface. |
@@ -92,6 +96,7 @@ struct gpio_chip { | |||
92 | struct gpio_chip *chip); | 96 | struct gpio_chip *chip); |
93 | int base; | 97 | int base; |
94 | u16 ngpio; | 98 | u16 ngpio; |
99 | char **names; | ||
95 | unsigned can_sleep:1; | 100 | unsigned can_sleep:1; |
96 | unsigned exported:1; | 101 | unsigned exported:1; |
97 | }; | 102 | }; |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b0e63c672ebd..00f45ff081a6 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void); | |||
80 | #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ | 80 | #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ |
81 | __typeof__(type) per_cpu_var(name) | 81 | __typeof__(type) per_cpu_var(name) |
82 | 82 | ||
83 | /* | ||
84 | * Optional methods for optimized non-lvalue per-cpu variable access. | ||
85 | * | ||
86 | * @var can be a percpu variable or a field of it and its size should | ||
87 | * equal char, int or long. percpu_read() evaluates to a lvalue and | ||
88 | * all others to void. | ||
89 | * | ||
90 | * These operations are guaranteed to be atomic w.r.t. preemption. | ||
91 | * The generic versions use plain get/put_cpu_var(). Archs are | ||
92 | * encouraged to implement single-instruction alternatives which don't | ||
93 | * require preemption protection. | ||
94 | */ | ||
95 | #ifndef percpu_read | ||
96 | # define percpu_read(var) \ | ||
97 | ({ \ | ||
98 | typeof(per_cpu_var(var)) __tmp_var__; \ | ||
99 | __tmp_var__ = get_cpu_var(var); \ | ||
100 | put_cpu_var(var); \ | ||
101 | __tmp_var__; \ | ||
102 | }) | ||
103 | #endif | ||
104 | |||
105 | #define __percpu_generic_to_op(var, val, op) \ | ||
106 | do { \ | ||
107 | get_cpu_var(var) op val; \ | ||
108 | put_cpu_var(var); \ | ||
109 | } while (0) | ||
110 | |||
111 | #ifndef percpu_write | ||
112 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | ||
113 | #endif | ||
114 | |||
115 | #ifndef percpu_add | ||
116 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | ||
117 | #endif | ||
118 | |||
119 | #ifndef percpu_sub | ||
120 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | ||
121 | #endif | ||
122 | |||
123 | #ifndef percpu_and | ||
124 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | ||
125 | #endif | ||
126 | |||
127 | #ifndef percpu_or | ||
128 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | ||
129 | #endif | ||
130 | |||
131 | #ifndef percpu_xor | ||
132 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | ||
133 | #endif | ||
134 | |||
83 | #endif /* _ASM_GENERIC_PERCPU_H_ */ | 135 | #endif /* _ASM_GENERIC_PERCPU_H_ */ |
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 79a7ff925bf8..4ce48e878530 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h | |||
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[]; | |||
9 | extern char __init_begin[], __init_end[]; | 9 | extern char __init_begin[], __init_end[]; |
10 | extern char _sinittext[], _einittext[]; | 10 | extern char _sinittext[], _einittext[]; |
11 | extern char _end[]; | 11 | extern char _end[]; |
12 | extern char __per_cpu_start[], __per_cpu_end[]; | 12 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; |
13 | extern char __kprobes_text_start[], __kprobes_text_end[]; | 13 | extern char __kprobes_text_start[], __kprobes_text_end[]; |
14 | extern char __initdata_begin[], __initdata_end[]; | 14 | extern char __initdata_begin[], __initdata_end[]; |
15 | extern char __start_rodata[], __end_rodata[]; | 15 | extern char __start_rodata[], __end_rodata[]; |
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 969570167e9e..35752dadd6df 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h | |||
@@ -23,7 +23,7 @@ typedef union sigval { | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifndef __ARCH_SI_UID_T | 25 | #ifndef __ARCH_SI_UID_T |
26 | #define __ARCH_SI_UID_T uid_t | 26 | #define __ARCH_SI_UID_T __kernel_uid32_t |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | /* | 29 | /* |
@@ -47,13 +47,13 @@ typedef struct siginfo { | |||
47 | 47 | ||
48 | /* kill() */ | 48 | /* kill() */ |
49 | struct { | 49 | struct { |
50 | pid_t _pid; /* sender's pid */ | 50 | __kernel_pid_t _pid; /* sender's pid */ |
51 | __ARCH_SI_UID_T _uid; /* sender's uid */ | 51 | __ARCH_SI_UID_T _uid; /* sender's uid */ |
52 | } _kill; | 52 | } _kill; |
53 | 53 | ||
54 | /* POSIX.1b timers */ | 54 | /* POSIX.1b timers */ |
55 | struct { | 55 | struct { |
56 | timer_t _tid; /* timer id */ | 56 | __kernel_timer_t _tid; /* timer id */ |
57 | int _overrun; /* overrun count */ | 57 | int _overrun; /* overrun count */ |
58 | char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; | 58 | char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; |
59 | sigval_t _sigval; /* same as below */ | 59 | sigval_t _sigval; /* same as below */ |
@@ -62,18 +62,18 @@ typedef struct siginfo { | |||
62 | 62 | ||
63 | /* POSIX.1b signals */ | 63 | /* POSIX.1b signals */ |
64 | struct { | 64 | struct { |
65 | pid_t _pid; /* sender's pid */ | 65 | __kernel_pid_t _pid; /* sender's pid */ |
66 | __ARCH_SI_UID_T _uid; /* sender's uid */ | 66 | __ARCH_SI_UID_T _uid; /* sender's uid */ |
67 | sigval_t _sigval; | 67 | sigval_t _sigval; |
68 | } _rt; | 68 | } _rt; |
69 | 69 | ||
70 | /* SIGCHLD */ | 70 | /* SIGCHLD */ |
71 | struct { | 71 | struct { |
72 | pid_t _pid; /* which child */ | 72 | __kernel_pid_t _pid; /* which child */ |
73 | __ARCH_SI_UID_T _uid; /* sender's uid */ | 73 | __ARCH_SI_UID_T _uid; /* sender's uid */ |
74 | int _status; /* exit code */ | 74 | int _status; /* exit code */ |
75 | clock_t _utime; | 75 | __kernel_clock_t _utime; |
76 | clock_t _stime; | 76 | __kernel_clock_t _stime; |
77 | } _sigchld; | 77 | } _sigchld; |
78 | 78 | ||
79 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | 79 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ |
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 6129d6802149..3b4fb3e52f0d 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h | |||
@@ -1,8 +1,9 @@ | |||
1 | #ifndef _GENERIC_STATFS_H | 1 | #ifndef _GENERIC_STATFS_H |
2 | #define _GENERIC_STATFS_H | 2 | #define _GENERIC_STATFS_H |
3 | 3 | ||
4 | #ifndef __KERNEL_STRICT_NAMES | 4 | #include <linux/types.h> |
5 | # include <linux/types.h> | 5 | |
6 | #ifdef __KERNEL__ | ||
6 | typedef __kernel_fsid_t fsid_t; | 7 | typedef __kernel_fsid_t fsid_t; |
7 | #endif | 8 | #endif |
8 | 9 | ||
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 0e9e2bc0ee96..88bada2ebc4b 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h | |||
@@ -43,20 +43,10 @@ | |||
43 | #ifndef cpumask_of_node | 43 | #ifndef cpumask_of_node |
44 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | 44 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) |
45 | #endif | 45 | #endif |
46 | #ifndef node_to_first_cpu | ||
47 | #define node_to_first_cpu(node) ((void)(node),0) | ||
48 | #endif | ||
49 | #ifndef pcibus_to_node | 46 | #ifndef pcibus_to_node |
50 | #define pcibus_to_node(bus) ((void)(bus), -1) | 47 | #define pcibus_to_node(bus) ((void)(bus), -1) |
51 | #endif | 48 | #endif |
52 | 49 | ||
53 | #ifndef pcibus_to_cpumask | ||
54 | #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ | ||
55 | CPU_MASK_ALL : \ | ||
56 | node_to_cpumask(pcibus_to_node(bus)) \ | ||
57 | ) | ||
58 | #endif | ||
59 | |||
60 | #ifndef cpumask_of_pcibus | 50 | #ifndef cpumask_of_pcibus |
61 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | 51 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ |
62 | cpu_all_mask : \ | 52 | cpu_all_mask : \ |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index c61fab1dd2f8..7fa660fd449c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -61,6 +61,30 @@ | |||
61 | #define BRANCH_PROFILE() | 61 | #define BRANCH_PROFILE() |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef CONFIG_EVENT_TRACER | ||
65 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ | ||
66 | *(_ftrace_events) \ | ||
67 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; | ||
68 | #else | ||
69 | #define FTRACE_EVENTS() | ||
70 | #endif | ||
71 | |||
72 | #ifdef CONFIG_TRACING | ||
73 | #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ | ||
74 | *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ | ||
75 | VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; | ||
76 | #else | ||
77 | #define TRACE_PRINTKS() | ||
78 | #endif | ||
79 | |||
80 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
81 | #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ | ||
82 | *(__syscalls_metadata) \ | ||
83 | VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; | ||
84 | #else | ||
85 | #define TRACE_SYSCALLS() | ||
86 | #endif | ||
87 | |||
64 | /* .data section */ | 88 | /* .data section */ |
65 | #define DATA_DATA \ | 89 | #define DATA_DATA \ |
66 | *(.data) \ | 90 | *(.data) \ |
@@ -80,8 +104,16 @@ | |||
80 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ | 104 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ |
81 | *(__tracepoints) \ | 105 | *(__tracepoints) \ |
82 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ | 106 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ |
107 | /* implement dynamic printk debug */ \ | ||
108 | . = ALIGN(8); \ | ||
109 | VMLINUX_SYMBOL(__start___verbose) = .; \ | ||
110 | *(__verbose) \ | ||
111 | VMLINUX_SYMBOL(__stop___verbose) = .; \ | ||
83 | LIKELY_PROFILE() \ | 112 | LIKELY_PROFILE() \ |
84 | BRANCH_PROFILE() | 113 | BRANCH_PROFILE() \ |
114 | TRACE_PRINTKS() \ | ||
115 | FTRACE_EVENTS() \ | ||
116 | TRACE_SYSCALLS() | ||
85 | 117 | ||
86 | #define RO_DATA(align) \ | 118 | #define RO_DATA(align) \ |
87 | . = ALIGN((align)); \ | 119 | . = ALIGN((align)); \ |
@@ -309,15 +341,7 @@ | |||
309 | CPU_DISCARD(init.data) \ | 341 | CPU_DISCARD(init.data) \ |
310 | CPU_DISCARD(init.rodata) \ | 342 | CPU_DISCARD(init.rodata) \ |
311 | MEM_DISCARD(init.data) \ | 343 | MEM_DISCARD(init.data) \ |
312 | MEM_DISCARD(init.rodata) \ | 344 | MEM_DISCARD(init.rodata) |
313 | /* implement dynamic printk debug */ \ | ||
314 | VMLINUX_SYMBOL(__start___verbose_strings) = .; \ | ||
315 | *(__verbose_strings) \ | ||
316 | VMLINUX_SYMBOL(__stop___verbose_strings) = .; \ | ||
317 | . = ALIGN(8); \ | ||
318 | VMLINUX_SYMBOL(__start___verbose) = .; \ | ||
319 | *(__verbose) \ | ||
320 | VMLINUX_SYMBOL(__stop___verbose) = .; | ||
321 | 345 | ||
322 | #define INIT_TEXT \ | 346 | #define INIT_TEXT \ |
323 | *(.init.text) \ | 347 | *(.init.text) \ |
@@ -430,12 +454,59 @@ | |||
430 | *(.initcall7.init) \ | 454 | *(.initcall7.init) \ |
431 | *(.initcall7s.init) | 455 | *(.initcall7s.init) |
432 | 456 | ||
457 | /** | ||
458 | * PERCPU_VADDR - define output section for percpu area | ||
459 | * @vaddr: explicit base address (optional) | ||
460 | * @phdr: destination PHDR (optional) | ||
461 | * | ||
462 | * Macro which expands to output section for percpu area. If @vaddr | ||
463 | * is not blank, it specifies explicit base address and all percpu | ||
464 | * symbols will be offset from the given address. If blank, @vaddr | ||
465 | * always equals @laddr + LOAD_OFFSET. | ||
466 | * | ||
467 | * @phdr defines the output PHDR to use if not blank. Be warned that | ||
468 | * output PHDR is sticky. If @phdr is specified, the next output | ||
469 | * section in the linker script will go there too. @phdr should have | ||
470 | * a leading colon. | ||
471 | * | ||
472 | * Note that this macros defines __per_cpu_load as an absolute symbol. | ||
473 | * If there is no need to put the percpu section at a predetermined | ||
474 | * address, use PERCPU(). | ||
475 | */ | ||
476 | #define PERCPU_VADDR(vaddr, phdr) \ | ||
477 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | ||
478 | .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ | ||
479 | - LOAD_OFFSET) { \ | ||
480 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | ||
481 | *(.data.percpu.first) \ | ||
482 | *(.data.percpu.page_aligned) \ | ||
483 | *(.data.percpu) \ | ||
484 | *(.data.percpu.shared_aligned) \ | ||
485 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | ||
486 | } phdr \ | ||
487 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); | ||
488 | |||
489 | /** | ||
490 | * PERCPU - define output section for percpu area, simple version | ||
491 | * @align: required alignment | ||
492 | * | ||
493 | * Align to @align and outputs output section for percpu area. This | ||
494 | * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and | ||
495 | * __per_cpu_start will be identical. | ||
496 | * | ||
497 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except | ||
498 | * that __per_cpu_load is defined as a relative symbol against | ||
499 | * .data.percpu which is required for relocatable x86_32 | ||
500 | * configuration. | ||
501 | */ | ||
433 | #define PERCPU(align) \ | 502 | #define PERCPU(align) \ |
434 | . = ALIGN(align); \ | 503 | . = ALIGN(align); \ |
435 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 504 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ |
436 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 505 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
506 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | ||
507 | *(.data.percpu.first) \ | ||
437 | *(.data.percpu.page_aligned) \ | 508 | *(.data.percpu.page_aligned) \ |
438 | *(.data.percpu) \ | 509 | *(.data.percpu) \ |
439 | *(.data.percpu.shared_aligned) \ | 510 | *(.data.percpu.shared_aligned) \ |
440 | } \ | 511 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
441 | VMLINUX_SYMBOL(__per_cpu_end) = .; | 512 | } |