diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-11 13:09:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-11 13:09:45 -0400 |
commit | 7cc4e87f912bbefa440a51856b8d076e5d1f554a (patch) | |
tree | 1b8df8683f3de37d2e8211ffa8d151f60d59af62 /arch/arm/include | |
parent | 5ba2f67afb02c5302b2898949ed6fc3b3d37dcf1 (diff) | |
parent | 69fc7eed5f56bce15b239e5110de2575a6970df4 (diff) |
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (236 commits)
[ARM] 5300/1: fixup spitz reset during boot
[ARM] 5295/1: make ZONE_DMA optional
[ARM] 5239/1: Palm Zire 72 power management support
[ARM] 5298/1: Drop desc_handle_irq()
[ARM] 5297/1: [KS8695] Fix two compile-time warnings
[ARM] 5296/1: [KS8695] Replace macro's with trailing underscores.
[ARM] pxa: allow multi-machine PCMCIA builds
[ARM] pxa: add preliminary CPUFREQ support for PXA3xx
[ARM] pxa: add missing ACCR bit definitions to pxa3xx-regs.h
[ARM] pxa: rename cpu-pxa.c to cpufreq-pxa2xx.c
[ARM] pxa/zylonite: add support for USB OHCI
[ARM] ohci-pxa27x: use ioremap() and offset for register access
[ARM] ohci-pxa27x: introduce pxa27x_clear_otgph()
[ARM] ohci-pxa27x: use platform_get_{irq,resource} for the resource
[ARM] ohci-pxa27x: move OHCI controller specific registers into the driver
[ARM] ohci-pxa27x: introduce flags to avoid direct access to OHCI registers
[ARM] pxa: move I2S register and bit definitions into pxa2xx-i2s.c
[ARM] pxa: simplify DMA register definitions
[ARM] pxa: make additional DCSR bits valid for PXA3xx
[ARM] pxa: move i2c register and bit definitions into i2c-pxa.c
...
Fixed up conflicts in
arch/arm/mach-versatile/core.c
sound/soc/pxa/pxa2xx-ac97.c
sound/soc/pxa/pxa2xx-i2s.c
manually.
Diffstat (limited to 'arch/arm/include')
24 files changed, 536 insertions, 522 deletions
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 7b62351f097d..4d88425a4169 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h | |||
@@ -12,7 +12,7 @@ extern void __bug(const char *file, int line) __attribute__((noreturn)); | |||
12 | #else | 12 | #else |
13 | 13 | ||
14 | /* this just causes an oops */ | 14 | /* this just causes an oops */ |
15 | #define BUG() (*(int *)0 = 0) | 15 | #define BUG() do { *(int *)0 = 0; } while (1) |
16 | 16 | ||
17 | #endif | 17 | #endif |
18 | 18 | ||
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 9073d9c6567e..de6c59f814a1 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -444,94 +444,4 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, | |||
444 | dmac_inv_range(start, start + size); | 444 | dmac_inv_range(start, start + size); |
445 | } | 445 | } |
446 | 446 | ||
447 | #define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) | ||
448 | #define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) | ||
449 | |||
450 | #define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) | ||
451 | #define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) | ||
452 | #define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) | ||
453 | #define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) | ||
454 | |||
455 | #define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) | ||
456 | #define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) | ||
457 | #define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) | ||
458 | #define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) | ||
459 | #define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) | ||
460 | |||
461 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | ||
462 | /* | ||
463 | * VIVT caches only | ||
464 | */ | ||
465 | #define cache_is_vivt() 1 | ||
466 | #define cache_is_vipt() 0 | ||
467 | #define cache_is_vipt_nonaliasing() 0 | ||
468 | #define cache_is_vipt_aliasing() 0 | ||
469 | #define icache_is_vivt_asid_tagged() 0 | ||
470 | |||
471 | #elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) | ||
472 | /* | ||
473 | * VIPT caches only | ||
474 | */ | ||
475 | #define cache_is_vivt() 0 | ||
476 | #define cache_is_vipt() 1 | ||
477 | #define cache_is_vipt_nonaliasing() \ | ||
478 | ({ \ | ||
479 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
480 | __cacheid_vipt_nonaliasing(__val); \ | ||
481 | }) | ||
482 | |||
483 | #define cache_is_vipt_aliasing() \ | ||
484 | ({ \ | ||
485 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
486 | __cacheid_vipt_aliasing(__val); \ | ||
487 | }) | ||
488 | |||
489 | #define icache_is_vivt_asid_tagged() \ | ||
490 | ({ \ | ||
491 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
492 | __cacheid_vivt_asid_tagged_instr(__val); \ | ||
493 | }) | ||
494 | |||
495 | #else | ||
496 | /* | ||
497 | * VIVT or VIPT caches. Note that this is unreliable since ARM926 | ||
498 | * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test. | ||
499 | * There's no way to tell from the CacheType register what type (!) | ||
500 | * the cache is. | ||
501 | */ | ||
502 | #define cache_is_vivt() \ | ||
503 | ({ \ | ||
504 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
505 | (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ | ||
506 | }) | ||
507 | |||
508 | #define cache_is_vipt() \ | ||
509 | ({ \ | ||
510 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
511 | __cacheid_present(__val) && __cacheid_vipt(__val); \ | ||
512 | }) | ||
513 | |||
514 | #define cache_is_vipt_nonaliasing() \ | ||
515 | ({ \ | ||
516 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
517 | __cacheid_present(__val) && \ | ||
518 | __cacheid_vipt_nonaliasing(__val); \ | ||
519 | }) | ||
520 | |||
521 | #define cache_is_vipt_aliasing() \ | ||
522 | ({ \ | ||
523 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
524 | __cacheid_present(__val) && \ | ||
525 | __cacheid_vipt_aliasing(__val); \ | ||
526 | }) | ||
527 | |||
528 | #define icache_is_vivt_asid_tagged() \ | ||
529 | ({ \ | ||
530 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
531 | __cacheid_present(__val) && \ | ||
532 | __cacheid_vivt_asid_tagged_instr(__val); \ | ||
533 | }) | ||
534 | |||
535 | #endif | ||
536 | |||
537 | #endif | 447 | #endif |
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h new file mode 100644 index 000000000000..d3a4c2cb9f2f --- /dev/null +++ b/arch/arm/include/asm/cachetype.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef __ASM_ARM_CACHETYPE_H | ||
2 | #define __ASM_ARM_CACHETYPE_H | ||
3 | |||
4 | #define CACHEID_VIVT (1 << 0) | ||
5 | #define CACHEID_VIPT_NONALIASING (1 << 1) | ||
6 | #define CACHEID_VIPT_ALIASING (1 << 2) | ||
7 | #define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) | ||
8 | #define CACHEID_ASID_TAGGED (1 << 3) | ||
9 | |||
10 | extern unsigned int cacheid; | ||
11 | |||
12 | #define cache_is_vivt() cacheid_is(CACHEID_VIVT) | ||
13 | #define cache_is_vipt() cacheid_is(CACHEID_VIPT) | ||
14 | #define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) | ||
15 | #define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) | ||
16 | #define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) | ||
17 | |||
18 | /* | ||
19 | * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture | ||
20 | * Mask out support which will never be present on newer CPUs. | ||
21 | * - v6+ is never VIVT | ||
22 | * - v7+ VIPT never aliases | ||
23 | */ | ||
24 | #if __LINUX_ARM_ARCH__ >= 7 | ||
25 | #define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED) | ||
26 | #elif __LINUX_ARM_ARCH__ >= 6 | ||
27 | #define __CACHEID_ARCH_MIN (~CACHEID_VIVT) | ||
28 | #else | ||
29 | #define __CACHEID_ARCH_MIN (~0) | ||
30 | #endif | ||
31 | |||
32 | /* | ||
33 | * Mask out support which isn't configured | ||
34 | */ | ||
35 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | ||
36 | #define __CACHEID_ALWAYS (CACHEID_VIVT) | ||
37 | #define __CACHEID_NEVER (~CACHEID_VIVT) | ||
38 | #elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) | ||
39 | #define __CACHEID_ALWAYS (0) | ||
40 | #define __CACHEID_NEVER (CACHEID_VIVT) | ||
41 | #else | ||
42 | #define __CACHEID_ALWAYS (0) | ||
43 | #define __CACHEID_NEVER (0) | ||
44 | #endif | ||
45 | |||
46 | static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask) | ||
47 | { | ||
48 | return (__CACHEID_ALWAYS & mask) | | ||
49 | (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid); | ||
50 | } | ||
51 | |||
52 | #endif | ||
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h new file mode 100644 index 000000000000..7b9d27e749b8 --- /dev/null +++ b/arch/arm/include/asm/cputype.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef __ASM_ARM_CPUTYPE_H | ||
2 | #define __ASM_ARM_CPUTYPE_H | ||
3 | |||
4 | #include <linux/stringify.h> | ||
5 | |||
6 | #define CPUID_ID 0 | ||
7 | #define CPUID_CACHETYPE 1 | ||
8 | #define CPUID_TCM 2 | ||
9 | #define CPUID_TLBTYPE 3 | ||
10 | |||
11 | #ifdef CONFIG_CPU_CP15 | ||
12 | #define read_cpuid(reg) \ | ||
13 | ({ \ | ||
14 | unsigned int __val; \ | ||
15 | asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ | ||
16 | : "=r" (__val) \ | ||
17 | : \ | ||
18 | : "cc"); \ | ||
19 | __val; \ | ||
20 | }) | ||
21 | #else | ||
22 | extern unsigned int processor_id; | ||
23 | #define read_cpuid(reg) (processor_id) | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * The CPU ID never changes at run time, so we might as well tell the | ||
28 | * compiler that it's constant. Use this function to read the CPU ID | ||
29 | * rather than directly reading processor_id or read_cpuid() directly. | ||
30 | */ | ||
31 | static inline unsigned int __attribute_const__ read_cpuid_id(void) | ||
32 | { | ||
33 | return read_cpuid(CPUID_ID); | ||
34 | } | ||
35 | |||
36 | static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) | ||
37 | { | ||
38 | return read_cpuid(CPUID_CACHETYPE); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * Intel's XScale3 core supports some v6 features (supersections, L2) | ||
43 | * but advertises itself as v5 as it does not support the v6 ISA. For | ||
44 | * this reason, we need a way to explicitly test for this type of CPU. | ||
45 | */ | ||
46 | #ifndef CONFIG_CPU_XSC3 | ||
47 | #define cpu_is_xsc3() 0 | ||
48 | #else | ||
49 | static inline int cpu_is_xsc3(void) | ||
50 | { | ||
51 | if ((read_cpuid_id() & 0xffffe000) == 0x69056000) | ||
52 | return 1; | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) | ||
59 | #define cpu_is_xscale() 0 | ||
60 | #else | ||
61 | #define cpu_is_xscale() 1 | ||
62 | #endif | ||
63 | |||
64 | #endif | ||
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7b95d2058395..1cb8602dd9d5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync | 104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
105 | * function so drivers using this API are highlighted with build warnings. | 105 | * function so drivers using this API are highlighted with build warnings. |
106 | */ | 106 | */ |
107 | static inline void * | 107 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
108 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 108 | dma_addr_t *handle, gfp_t gfp) |
109 | { | 109 | { |
110 | return NULL; | 110 | return NULL; |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void | 113 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
114 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 114 | void *cpu_addr, dma_addr_t handle) |
115 | dma_addr_t handle) | ||
116 | { | 115 | { |
117 | } | 116 | } |
118 | 117 | ||
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | |||
127 | * return the CPU-viewed address, and sets @handle to be the | 126 | * return the CPU-viewed address, and sets @handle to be the |
128 | * device-viewed address. | 127 | * device-viewed address. |
129 | */ | 128 | */ |
130 | extern void * | 129 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); |
131 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | ||
132 | 130 | ||
133 | /** | 131 | /** |
134 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | 132 | * dma_free_coherent - free memory allocated by dma_alloc_coherent |
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf | |||
143 | * References to memory and mappings associated with cpu_addr/handle | 141 | * References to memory and mappings associated with cpu_addr/handle |
144 | * during and after this call executing are illegal. | 142 | * during and after this call executing are illegal. |
145 | */ | 143 | */ |
146 | extern void | 144 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); |
147 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
148 | dma_addr_t handle); | ||
149 | 145 | ||
150 | /** | 146 | /** |
151 | * dma_mmap_coherent - map a coherent DMA allocation into user space | 147 | * dma_mmap_coherent - map a coherent DMA allocation into user space |
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
159 | * into user space. The coherent DMA buffer must not be freed by the | 155 | * into user space. The coherent DMA buffer must not be freed by the |
160 | * driver until the user space mapping has been released. | 156 | * driver until the user space mapping has been released. |
161 | */ | 157 | */ |
162 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 158 | int dma_mmap_coherent(struct device *, struct vm_area_struct *, |
163 | void *cpu_addr, dma_addr_t handle, size_t size); | 159 | void *, dma_addr_t, size_t); |
164 | 160 | ||
165 | 161 | ||
166 | /** | 162 | /** |
@@ -174,14 +170,94 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |||
174 | * return the CPU-viewed address, and sets @handle to be the | 170 | * return the CPU-viewed address, and sets @handle to be the |
175 | * device-viewed address. | 171 | * device-viewed address. |
176 | */ | 172 | */ |
177 | extern void * | 173 | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, |
178 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | 174 | gfp_t); |
179 | 175 | ||
180 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | 176 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ |
181 | dma_free_coherent(dev,size,cpu_addr,handle) | 177 | dma_free_coherent(dev,size,cpu_addr,handle) |
182 | 178 | ||
183 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | 179 | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, |
184 | void *cpu_addr, dma_addr_t handle, size_t size); | 180 | void *, dma_addr_t, size_t); |
181 | |||
182 | |||
183 | #ifdef CONFIG_DMABOUNCE | ||
184 | /* | ||
185 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
186 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
187 | * | ||
188 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
189 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
190 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
191 | * | ||
192 | * The following are helper functions used by the dmabounce subystem | ||
193 | * | ||
194 | */ | ||
195 | |||
196 | /** | ||
197 | * dmabounce_register_dev | ||
198 | * | ||
199 | * @dev: valid struct device pointer | ||
200 | * @small_buf_size: size of buffers to use with small buffer pool | ||
201 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
202 | * | ||
203 | * This function should be called by low-level platform code to register | ||
204 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
205 | * appropriate DMA pools for the device. | ||
206 | * | ||
207 | */ | ||
208 | extern int dmabounce_register_dev(struct device *, unsigned long, | ||
209 | unsigned long); | ||
210 | |||
211 | /** | ||
212 | * dmabounce_unregister_dev | ||
213 | * | ||
214 | * @dev: valid struct device pointer | ||
215 | * | ||
216 | * This function should be called by low-level platform code when device | ||
217 | * that was previously registered with dmabounce_register_dev is removed | ||
218 | * from the system. | ||
219 | * | ||
220 | */ | ||
221 | extern void dmabounce_unregister_dev(struct device *); | ||
222 | |||
223 | /** | ||
224 | * dma_needs_bounce | ||
225 | * | ||
226 | * @dev: valid struct device pointer | ||
227 | * @dma_handle: dma_handle of unbounced buffer | ||
228 | * @size: size of region being mapped | ||
229 | * | ||
230 | * Platforms that utilize the dmabounce mechanism must implement | ||
231 | * this function. | ||
232 | * | ||
233 | * The dmabounce routines call this function whenever a dma-mapping | ||
234 | * is requested to determine whether a given buffer needs to be bounced | ||
235 | * or not. The function must return 0 if the buffer is OK for | ||
236 | * DMA access and 1 if the buffer needs to be bounced. | ||
237 | * | ||
238 | */ | ||
239 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
240 | |||
241 | /* | ||
242 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | ||
243 | */ | ||
244 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, | ||
245 | enum dma_data_direction); | ||
246 | extern dma_addr_t dma_map_page(struct device *, struct page *, | ||
247 | unsigned long, size_t, enum dma_data_direction); | ||
248 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, | ||
249 | enum dma_data_direction); | ||
250 | |||
251 | /* | ||
252 | * Private functions | ||
253 | */ | ||
254 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
255 | size_t, enum dma_data_direction); | ||
256 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
257 | size_t, enum dma_data_direction); | ||
258 | #else | ||
259 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | ||
260 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | ||
185 | 261 | ||
186 | 262 | ||
187 | /** | 263 | /** |
@@ -198,19 +274,16 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |||
198 | * can regain ownership by calling dma_unmap_single() or | 274 | * can regain ownership by calling dma_unmap_single() or |
199 | * dma_sync_single_for_cpu(). | 275 | * dma_sync_single_for_cpu(). |
200 | */ | 276 | */ |
201 | #ifndef CONFIG_DMABOUNCE | 277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
202 | static inline dma_addr_t | 278 | size_t size, enum dma_data_direction dir) |
203 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
204 | enum dma_data_direction dir) | ||
205 | { | 279 | { |
280 | BUG_ON(!valid_dma_direction(dir)); | ||
281 | |||
206 | if (!arch_is_coherent()) | 282 | if (!arch_is_coherent()) |
207 | dma_cache_maint(cpu_addr, size, dir); | 283 | dma_cache_maint(cpu_addr, size, dir); |
208 | 284 | ||
209 | return virt_to_dma(dev, cpu_addr); | 285 | return virt_to_dma(dev, cpu_addr); |
210 | } | 286 | } |
211 | #else | ||
212 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | ||
213 | #endif | ||
214 | 287 | ||
215 | /** | 288 | /** |
216 | * dma_map_page - map a portion of a page for streaming DMA | 289 | * dma_map_page - map a portion of a page for streaming DMA |
@@ -224,23 +297,25 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d | |||
224 | * or written back. | 297 | * or written back. |
225 | * | 298 | * |
226 | * The device owns this memory once this call has completed. The CPU | 299 | * The device owns this memory once this call has completed. The CPU |
227 | * can regain ownership by calling dma_unmap_page() or | 300 | * can regain ownership by calling dma_unmap_page(). |
228 | * dma_sync_single_for_cpu(). | ||
229 | */ | 301 | */ |
230 | static inline dma_addr_t | 302 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
231 | dma_map_page(struct device *dev, struct page *page, | 303 | unsigned long offset, size_t size, enum dma_data_direction dir) |
232 | unsigned long offset, size_t size, | ||
233 | enum dma_data_direction dir) | ||
234 | { | 304 | { |
235 | return dma_map_single(dev, page_address(page) + offset, size, dir); | 305 | BUG_ON(!valid_dma_direction(dir)); |
306 | |||
307 | if (!arch_is_coherent()) | ||
308 | dma_cache_maint(page_address(page) + offset, size, dir); | ||
309 | |||
310 | return page_to_dma(dev, page) + offset; | ||
236 | } | 311 | } |
237 | 312 | ||
238 | /** | 313 | /** |
239 | * dma_unmap_single - unmap a single buffer previously mapped | 314 | * dma_unmap_single - unmap a single buffer previously mapped |
240 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 315 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
241 | * @handle: DMA address of buffer | 316 | * @handle: DMA address of buffer |
242 | * @size: size of buffer to map | 317 | * @size: size of buffer (same as passed to dma_map_single) |
243 | * @dir: DMA transfer direction | 318 | * @dir: DMA transfer direction (same as passed to dma_map_single) |
244 | * | 319 | * |
245 | * Unmap a single streaming mode DMA translation. The handle and size | 320 | * Unmap a single streaming mode DMA translation. The handle and size |
246 | * must match what was provided in the previous dma_map_single() call. | 321 | * must match what was provided in the previous dma_map_single() call. |
@@ -249,108 +324,34 @@ dma_map_page(struct device *dev, struct page *page, | |||
249 | * After this call, reads by the CPU to the buffer are guaranteed to see | 324 | * After this call, reads by the CPU to the buffer are guaranteed to see |
250 | * whatever the device wrote there. | 325 | * whatever the device wrote there. |
251 | */ | 326 | */ |
252 | #ifndef CONFIG_DMABOUNCE | 327 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
253 | static inline void | 328 | size_t size, enum dma_data_direction dir) |
254 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | ||
255 | enum dma_data_direction dir) | ||
256 | { | 329 | { |
257 | /* nothing to do */ | 330 | /* nothing to do */ |
258 | } | 331 | } |
259 | #else | 332 | #endif /* CONFIG_DMABOUNCE */ |
260 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
261 | #endif | ||
262 | 333 | ||
263 | /** | 334 | /** |
264 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | 335 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() |
265 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 336 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
266 | * @handle: DMA address of buffer | 337 | * @handle: DMA address of buffer |
267 | * @size: size of buffer to map | 338 | * @size: size of buffer (same as passed to dma_map_page) |
268 | * @dir: DMA transfer direction | 339 | * @dir: DMA transfer direction (same as passed to dma_map_page) |
269 | * | 340 | * |
270 | * Unmap a single streaming mode DMA translation. The handle and size | 341 | * Unmap a page streaming mode DMA translation. The handle and size |
271 | * must match what was provided in the previous dma_map_single() call. | 342 | * must match what was provided in the previous dma_map_page() call. |
272 | * All other usages are undefined. | 343 | * All other usages are undefined. |
273 | * | 344 | * |
274 | * After this call, reads by the CPU to the buffer are guaranteed to see | 345 | * After this call, reads by the CPU to the buffer are guaranteed to see |
275 | * whatever the device wrote there. | 346 | * whatever the device wrote there. |
276 | */ | 347 | */ |
277 | static inline void | 348 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
278 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 349 | size_t size, enum dma_data_direction dir) |
279 | enum dma_data_direction dir) | ||
280 | { | 350 | { |
281 | dma_unmap_single(dev, handle, size, dir); | 351 | dma_unmap_single(dev, handle, size, dir); |
282 | } | 352 | } |
283 | 353 | ||
284 | /** | 354 | /** |
285 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | ||
286 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
287 | * @sg: list of buffers | ||
288 | * @nents: number of buffers to map | ||
289 | * @dir: DMA transfer direction | ||
290 | * | ||
291 | * Map a set of buffers described by scatterlist in streaming | ||
292 | * mode for DMA. This is the scatter-gather version of the | ||
293 | * above dma_map_single interface. Here the scatter gather list | ||
294 | * elements are each tagged with the appropriate dma address | ||
295 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
296 | * | ||
297 | * NOTE: An implementation may be able to use a smaller number of | ||
298 | * DMA address/length pairs than there are SG table elements. | ||
299 | * (for example via virtual mapping capabilities) | ||
300 | * The routine returns the number of addr/length pairs actually | ||
301 | * used, at most nents. | ||
302 | * | ||
303 | * Device ownership issues as mentioned above for dma_map_single are | ||
304 | * the same here. | ||
305 | */ | ||
306 | #ifndef CONFIG_DMABOUNCE | ||
307 | static inline int | ||
308 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
309 | enum dma_data_direction dir) | ||
310 | { | ||
311 | int i; | ||
312 | |||
313 | for (i = 0; i < nents; i++, sg++) { | ||
314 | char *virt; | ||
315 | |||
316 | sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; | ||
317 | virt = sg_virt(sg); | ||
318 | |||
319 | if (!arch_is_coherent()) | ||
320 | dma_cache_maint(virt, sg->length, dir); | ||
321 | } | ||
322 | |||
323 | return nents; | ||
324 | } | ||
325 | #else | ||
326 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
327 | #endif | ||
328 | |||
329 | /** | ||
330 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
331 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
332 | * @sg: list of buffers | ||
333 | * @nents: number of buffers to map | ||
334 | * @dir: DMA transfer direction | ||
335 | * | ||
336 | * Unmap a set of streaming mode DMA translations. | ||
337 | * Again, CPU read rules concerning calls here are the same as for | ||
338 | * dma_unmap_single() above. | ||
339 | */ | ||
340 | #ifndef CONFIG_DMABOUNCE | ||
341 | static inline void | ||
342 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
343 | enum dma_data_direction dir) | ||
344 | { | ||
345 | |||
346 | /* nothing to do */ | ||
347 | } | ||
348 | #else | ||
349 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
350 | #endif | ||
351 | |||
352 | |||
353 | /** | ||
354 | * dma_sync_single_range_for_cpu | 355 | * dma_sync_single_range_for_cpu |
355 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 356 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
356 | * @handle: DMA address of buffer | 357 | * @handle: DMA address of buffer |
@@ -368,145 +369,52 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da | |||
368 | * must first the perform a dma_sync_for_device, and then the | 369 | * must first the perform a dma_sync_for_device, and then the |
369 | * device again owns the buffer. | 370 | * device again owns the buffer. |
370 | */ | 371 | */ |
371 | #ifndef CONFIG_DMABOUNCE | 372 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
372 | static inline void | 373 | dma_addr_t handle, unsigned long offset, size_t size, |
373 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | 374 | enum dma_data_direction dir) |
374 | unsigned long offset, size_t size, | ||
375 | enum dma_data_direction dir) | ||
376 | { | 375 | { |
377 | if (!arch_is_coherent()) | 376 | BUG_ON(!valid_dma_direction(dir)); |
378 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 377 | |
378 | dmabounce_sync_for_cpu(dev, handle, offset, size, dir); | ||
379 | } | 379 | } |
380 | 380 | ||
381 | static inline void | 381 | static inline void dma_sync_single_range_for_device(struct device *dev, |
382 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | 382 | dma_addr_t handle, unsigned long offset, size_t size, |
383 | unsigned long offset, size_t size, | 383 | enum dma_data_direction dir) |
384 | enum dma_data_direction dir) | ||
385 | { | 384 | { |
385 | BUG_ON(!valid_dma_direction(dir)); | ||
386 | |||
387 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | ||
388 | return; | ||
389 | |||
386 | if (!arch_is_coherent()) | 390 | if (!arch_is_coherent()) |
387 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 391 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
388 | } | 392 | } |
389 | #else | ||
390 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
391 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
392 | #endif | ||
393 | 393 | ||
394 | static inline void | 394 | static inline void dma_sync_single_for_cpu(struct device *dev, |
395 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | 395 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
396 | enum dma_data_direction dir) | ||
397 | { | 396 | { |
398 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | 397 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); |
399 | } | 398 | } |
400 | 399 | ||
401 | static inline void | 400 | static inline void dma_sync_single_for_device(struct device *dev, |
402 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | 401 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
403 | enum dma_data_direction dir) | ||
404 | { | 402 | { |
405 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | 403 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); |
406 | } | 404 | } |
407 | 405 | ||
408 | |||
409 | /** | ||
410 | * dma_sync_sg_for_cpu | ||
411 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
412 | * @sg: list of buffers | ||
413 | * @nents: number of buffers to map | ||
414 | * @dir: DMA transfer direction | ||
415 | * | ||
416 | * Make physical memory consistent for a set of streaming | ||
417 | * mode DMA translations after a transfer. | ||
418 | * | ||
419 | * The same as dma_sync_single_for_* but for a scatter-gather list, | ||
420 | * same rules and usage. | ||
421 | */ | ||
422 | #ifndef CONFIG_DMABOUNCE | ||
423 | static inline void | ||
424 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
425 | enum dma_data_direction dir) | ||
426 | { | ||
427 | int i; | ||
428 | |||
429 | for (i = 0; i < nents; i++, sg++) { | ||
430 | char *virt = sg_virt(sg); | ||
431 | if (!arch_is_coherent()) | ||
432 | dma_cache_maint(virt, sg->length, dir); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | static inline void | ||
437 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | ||
438 | enum dma_data_direction dir) | ||
439 | { | ||
440 | int i; | ||
441 | |||
442 | for (i = 0; i < nents; i++, sg++) { | ||
443 | char *virt = sg_virt(sg); | ||
444 | if (!arch_is_coherent()) | ||
445 | dma_cache_maint(virt, sg->length, dir); | ||
446 | } | ||
447 | } | ||
448 | #else | ||
449 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
450 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
451 | #endif | ||
452 | |||
453 | #ifdef CONFIG_DMABOUNCE | ||
454 | /* | 406 | /* |
455 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | 407 | * The scatter list versions of the above methods. |
456 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
457 | * | ||
458 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
459 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
460 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
461 | * | ||
462 | * The following are helper functions used by the dmabounce subystem | ||
463 | * | ||
464 | */ | ||
465 | |||
466 | /** | ||
467 | * dmabounce_register_dev | ||
468 | * | ||
469 | * @dev: valid struct device pointer | ||
470 | * @small_buf_size: size of buffers to use with small buffer pool | ||
471 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
472 | * | ||
473 | * This function should be called by low-level platform code to register | ||
474 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
475 | * appropriate DMA pools for the device. | ||
476 | * | ||
477 | */ | ||
478 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
479 | |||
480 | /** | ||
481 | * dmabounce_unregister_dev | ||
482 | * | ||
483 | * @dev: valid struct device pointer | ||
484 | * | ||
485 | * This function should be called by low-level platform code when device | ||
486 | * that was previously registered with dmabounce_register_dev is removed | ||
487 | * from the system. | ||
488 | * | ||
489 | */ | 408 | */ |
490 | extern void dmabounce_unregister_dev(struct device *); | 409 | extern int dma_map_sg(struct device *, struct scatterlist *, int, |
410 | enum dma_data_direction); | ||
411 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, | ||
412 | enum dma_data_direction); | ||
413 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | ||
414 | enum dma_data_direction); | ||
415 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | ||
416 | enum dma_data_direction); | ||
491 | 417 | ||
492 | /** | ||
493 | * dma_needs_bounce | ||
494 | * | ||
495 | * @dev: valid struct device pointer | ||
496 | * @dma_handle: dma_handle of unbounced buffer | ||
497 | * @size: size of region being mapped | ||
498 | * | ||
499 | * Platforms that utilize the dmabounce mechanism must implement | ||
500 | * this function. | ||
501 | * | ||
502 | * The dmabounce routines call this function whenever a dma-mapping | ||
503 | * is requested to determine whether a given buffer needs to be bounced | ||
504 | * or not. The function must return 0 if the buffer is OK for | ||
505 | * DMA access and 1 if the buffer needs to be bounced. | ||
506 | * | ||
507 | */ | ||
508 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
509 | #endif /* CONFIG_DMABOUNCE */ | ||
510 | 418 | ||
511 | #endif /* __KERNEL__ */ | 419 | #endif /* __KERNEL__ */ |
512 | #endif | 420 | #endif |
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 4ca751627489..5be016980c19 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/hwcap.h> | 4 | #include <asm/hwcap.h> |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | ||
7 | /* | 6 | /* |
8 | * ELF register definitions.. | 7 | * ELF register definitions.. |
9 | */ | 8 | */ |
@@ -17,12 +16,34 @@ typedef unsigned long elf_freg_t[3]; | |||
17 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 16 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
18 | 17 | ||
19 | typedef struct user_fp elf_fpregset_t; | 18 | typedef struct user_fp elf_fpregset_t; |
20 | #endif | ||
21 | 19 | ||
22 | #define EM_ARM 40 | 20 | #define EM_ARM 40 |
23 | #define EF_ARM_APCS26 0x08 | 21 | |
24 | #define EF_ARM_SOFT_FLOAT 0x200 | 22 | #define EF_ARM_EABI_MASK 0xff000000 |
25 | #define EF_ARM_EABI_MASK 0xFF000000 | 23 | #define EF_ARM_EABI_UNKNOWN 0x00000000 |
24 | #define EF_ARM_EABI_VER1 0x01000000 | ||
25 | #define EF_ARM_EABI_VER2 0x02000000 | ||
26 | #define EF_ARM_EABI_VER3 0x03000000 | ||
27 | #define EF_ARM_EABI_VER4 0x04000000 | ||
28 | #define EF_ARM_EABI_VER5 0x05000000 | ||
29 | |||
30 | #define EF_ARM_BE8 0x00800000 /* ABI 4,5 */ | ||
31 | #define EF_ARM_LE8 0x00400000 /* ABI 4,5 */ | ||
32 | #define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */ | ||
33 | #define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */ | ||
34 | #define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */ | ||
35 | #define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */ | ||
36 | #define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */ | ||
37 | #define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */ | ||
38 | #define EF_ARM_PIC 0x00000020 /* ABI 0 */ | ||
39 | #define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */ | ||
40 | #define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */ | ||
41 | #define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */ | ||
42 | #define EF_ARM_APCS_26 0x00000008 /* ABI 0 */ | ||
43 | #define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */ | ||
44 | #define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */ | ||
45 | #define EF_ARM_HASENTRY 0x00000002 /* All */ | ||
46 | #define EF_ARM_RELEXEC 0x00000001 /* All */ | ||
26 | 47 | ||
27 | #define R_ARM_NONE 0 | 48 | #define R_ARM_NONE 0 |
28 | #define R_ARM_PC24 1 | 49 | #define R_ARM_PC24 1 |
@@ -41,7 +62,6 @@ typedef struct user_fp elf_fpregset_t; | |||
41 | #endif | 62 | #endif |
42 | #define ELF_ARCH EM_ARM | 63 | #define ELF_ARCH EM_ARM |
43 | 64 | ||
44 | #ifndef __ASSEMBLY__ | ||
45 | /* | 65 | /* |
46 | * This yields a string that ld.so will use to load implementation | 66 | * This yields a string that ld.so will use to load implementation |
47 | * specific libraries for optimization. This is more specific in | 67 | * specific libraries for optimization. This is more specific in |
@@ -59,25 +79,17 @@ typedef struct user_fp elf_fpregset_t; | |||
59 | #define ELF_PLATFORM (elf_platform) | 79 | #define ELF_PLATFORM (elf_platform) |
60 | 80 | ||
61 | extern char elf_platform[]; | 81 | extern char elf_platform[]; |
62 | #endif | ||
63 | 82 | ||
64 | /* | 83 | struct elf32_hdr; |
65 | * This is used to ensure we don't load something for the wrong architecture. | ||
66 | */ | ||
67 | #define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x)) | ||
68 | 84 | ||
69 | /* | 85 | /* |
70 | * 32-bit code is always OK. Some cpus can do 26-bit, some can't. | 86 | * This is used to ensure we don't load something for the wrong architecture. |
71 | */ | 87 | */ |
72 | #define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) | 88 | extern int elf_check_arch(const struct elf32_hdr *); |
73 | 89 | #define elf_check_arch elf_check_arch | |
74 | #define ELF_THUMB_OK(x) \ | ||
75 | ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \ | ||
76 | ((x)->e_entry & 3) == 0) | ||
77 | 90 | ||
78 | #define ELF_26BIT_OK(x) \ | 91 | extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); |
79 | ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \ | 92 | #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) |
80 | ((x)->e_flags & EF_ARM_APCS26) == 0) | ||
81 | 93 | ||
82 | #define USE_ELF_CORE_DUMP | 94 | #define USE_ELF_CORE_DUMP |
83 | #define ELF_EXEC_PAGESIZE 4096 | 95 | #define ELF_EXEC_PAGESIZE 4096 |
@@ -94,23 +106,7 @@ extern char elf_platform[]; | |||
94 | have no such handler. */ | 106 | have no such handler. */ |
95 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 | 107 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 |
96 | 108 | ||
97 | /* | 109 | extern void elf_set_personality(const struct elf32_hdr *); |
98 | * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 | 110 | #define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex)) |
99 | * and CP1, we only enable access to the iWMMXt coprocessor if the | ||
100 | * binary is EABI or softfloat (and thus, guaranteed not to use | ||
101 | * FPA instructions.) | ||
102 | */ | ||
103 | #define SET_PERSONALITY(ex, ibcs2) \ | ||
104 | do { \ | ||
105 | if ((ex).e_flags & EF_ARM_APCS26) { \ | ||
106 | set_personality(PER_LINUX); \ | ||
107 | } else { \ | ||
108 | set_personality(PER_LINUX_32BIT); \ | ||
109 | if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \ | ||
110 | set_thread_flag(TIF_USING_IWMMXT); \ | ||
111 | else \ | ||
112 | clear_thread_flag(TIF_USING_IWMMXT); \ | ||
113 | } \ | ||
114 | } while (0) | ||
115 | 111 | ||
116 | #endif | 112 | #endif |
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 6a332a9f099c..9ee743b95de8 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
@@ -1,6 +1,124 @@ | |||
1 | #ifndef _ASM_FUTEX_H | 1 | #ifndef _ASM_ARM_FUTEX_H |
2 | #define _ASM_FUTEX_H | 2 | #define _ASM_ARM_FUTEX_H |
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #ifdef CONFIG_SMP | ||
3 | 7 | ||
4 | #include <asm-generic/futex.h> | 8 | #include <asm-generic/futex.h> |
5 | 9 | ||
6 | #endif | 10 | #else /* !SMP, we can work around lack of atomic ops by disabling preemption */ |
11 | |||
12 | #include <linux/futex.h> | ||
13 | #include <linux/preempt.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <asm/errno.h> | ||
16 | |||
17 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | ||
18 | __asm__ __volatile__( \ | ||
19 | "1: ldrt %1, [%2]\n" \ | ||
20 | " " insn "\n" \ | ||
21 | "2: strt %0, [%2]\n" \ | ||
22 | " mov %0, #0\n" \ | ||
23 | "3:\n" \ | ||
24 | " .section __ex_table,\"a\"\n" \ | ||
25 | " .align 3\n" \ | ||
26 | " .long 1b, 4f, 2b, 4f\n" \ | ||
27 | " .previous\n" \ | ||
28 | " .section .fixup,\"ax\"\n" \ | ||
29 | "4: mov %0, %4\n" \ | ||
30 | " b 3b\n" \ | ||
31 | " .previous" \ | ||
32 | : "=&r" (ret), "=&r" (oldval) \ | ||
33 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ | ||
34 | : "cc", "memory") | ||
35 | |||
36 | static inline int | ||
37 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
38 | { | ||
39 | int op = (encoded_op >> 28) & 7; | ||
40 | int cmp = (encoded_op >> 24) & 15; | ||
41 | int oparg = (encoded_op << 8) >> 20; | ||
42 | int cmparg = (encoded_op << 20) >> 20; | ||
43 | int oldval = 0, ret; | ||
44 | |||
45 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
46 | oparg = 1 << oparg; | ||
47 | |||
48 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
49 | return -EFAULT; | ||
50 | |||
51 | pagefault_disable(); /* implies preempt_disable() */ | ||
52 | |||
53 | switch (op) { | ||
54 | case FUTEX_OP_SET: | ||
55 | __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); | ||
56 | break; | ||
57 | case FUTEX_OP_ADD: | ||
58 | __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); | ||
59 | break; | ||
60 | case FUTEX_OP_OR: | ||
61 | __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); | ||
62 | break; | ||
63 | case FUTEX_OP_ANDN: | ||
64 | __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); | ||
65 | break; | ||
66 | case FUTEX_OP_XOR: | ||
67 | __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); | ||
68 | break; | ||
69 | default: | ||
70 | ret = -ENOSYS; | ||
71 | } | ||
72 | |||
73 | pagefault_enable(); /* subsumes preempt_enable() */ | ||
74 | |||
75 | if (!ret) { | ||
76 | switch (cmp) { | ||
77 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
78 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
79 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
80 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
81 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
82 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
83 | default: ret = -ENOSYS; | ||
84 | } | ||
85 | } | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static inline int | ||
90 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
91 | { | ||
92 | int val; | ||
93 | |||
94 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
95 | return -EFAULT; | ||
96 | |||
97 | pagefault_disable(); /* implies preempt_disable() */ | ||
98 | |||
99 | __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" | ||
100 | "1: ldrt %0, [%3]\n" | ||
101 | " teq %0, %1\n" | ||
102 | "2: streqt %2, [%3]\n" | ||
103 | "3:\n" | ||
104 | " .section __ex_table,\"a\"\n" | ||
105 | " .align 3\n" | ||
106 | " .long 1b, 4f, 2b, 4f\n" | ||
107 | " .previous\n" | ||
108 | " .section .fixup,\"ax\"\n" | ||
109 | "4: mov %0, %4\n" | ||
110 | " b 3b\n" | ||
111 | " .previous" | ||
112 | : "=&r" (val) | ||
113 | : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) | ||
114 | : "cc", "memory"); | ||
115 | |||
116 | pagefault_enable(); /* subsumes preempt_enable() */ | ||
117 | |||
118 | return val; | ||
119 | } | ||
120 | |||
121 | #endif /* !SMP */ | ||
122 | |||
123 | #endif /* __KERNEL__ */ | ||
124 | #endif /* _ASM_ARM_FUTEX_H */ | ||
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 71934856fc22..a8094451be57 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -60,10 +60,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); | |||
60 | #define MT_DEVICE 0 | 60 | #define MT_DEVICE 0 |
61 | #define MT_DEVICE_NONSHARED 1 | 61 | #define MT_DEVICE_NONSHARED 1 |
62 | #define MT_DEVICE_CACHED 2 | 62 | #define MT_DEVICE_CACHED 2 |
63 | #define MT_DEVICE_IXP2000 3 | 63 | #define MT_DEVICE_WC 3 |
64 | #define MT_DEVICE_WC 4 | ||
65 | /* | 64 | /* |
66 | * types 5 onwards can be found in asm/mach/map.h and are undefined | 65 | * types 4 onwards can be found in asm/mach/map.h and are undefined |
67 | * for ioremap | 66 | * for ioremap |
68 | */ | 67 | */ |
69 | 68 | ||
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index d6786090d02c..a0009aa5d157 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h | |||
@@ -22,6 +22,10 @@ | |||
22 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
23 | struct irqaction; | 23 | struct irqaction; |
24 | extern void migrate_irqs(void); | 24 | extern void migrate_irqs(void); |
25 | |||
26 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); | ||
27 | void init_IRQ(void); | ||
28 | |||
25 | #endif | 29 | #endif |
26 | 30 | ||
27 | #endif | 31 | #endif |
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index a5d0d99ad387..bb8a19bd5822 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h | |||
@@ -61,7 +61,6 @@ struct kprobe_ctlblk { | |||
61 | void arch_remove_kprobe(struct kprobe *); | 61 | void arch_remove_kprobe(struct kprobe *); |
62 | void kretprobe_trampoline(void); | 62 | void kretprobe_trampoline(void); |
63 | 63 | ||
64 | int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr); | ||
65 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); | 64 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); |
66 | int kprobe_exceptions_notify(struct notifier_block *self, | 65 | int kprobe_exceptions_notify(struct notifier_block *self, |
67 | unsigned long val, void *data); | 66 | unsigned long val, void *data); |
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 9eb936e49cc3..cb1139ac1943 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h | |||
@@ -18,16 +18,13 @@ struct map_desc { | |||
18 | unsigned int type; | 18 | unsigned int type; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | /* types 0-4 are defined in asm/io.h */ | 21 | /* types 0-3 are defined in asm/io.h */ |
22 | #define MT_CACHECLEAN 5 | 22 | #define MT_CACHECLEAN 4 |
23 | #define MT_MINICLEAN 6 | 23 | #define MT_MINICLEAN 5 |
24 | #define MT_LOW_VECTORS 7 | 24 | #define MT_LOW_VECTORS 6 |
25 | #define MT_HIGH_VECTORS 8 | 25 | #define MT_HIGH_VECTORS 7 |
26 | #define MT_MEMORY 9 | 26 | #define MT_MEMORY 8 |
27 | #define MT_ROM 10 | 27 | #define MT_ROM 9 |
28 | |||
29 | #define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED | ||
30 | #define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 | ||
31 | 28 | ||
32 | #ifdef CONFIG_MMU | 29 | #ifdef CONFIG_MMU |
33 | extern void iotable_init(struct map_desc *, int); | 30 | extern void iotable_init(struct map_desc *, int); |
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index 270902c353fd..f3eabf1ecec3 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h | |||
@@ -18,8 +18,7 @@ struct pxa2xx_udc_mach_info { | |||
18 | /* Boards following the design guidelines in the developer's manual, | 18 | /* Boards following the design guidelines in the developer's manual, |
19 | * with on-chip GPIOs not Lubbock's weird hardware, can have a sane | 19 | * with on-chip GPIOs not Lubbock's weird hardware, can have a sane |
20 | * VBUS IRQ and omit the methods above. Store the GPIO number | 20 | * VBUS IRQ and omit the methods above. Store the GPIO number |
21 | * here; for GPIO 0, also mask in one of the pxa_gpio_mode() bits. | 21 | * here. Note that sometimes the signals go through inverters... |
22 | * Note that sometimes the signals go through inverters... | ||
23 | */ | 22 | */ |
24 | bool gpio_vbus_inverted; | 23 | bool gpio_vbus_inverted; |
25 | u16 gpio_vbus; /* high == vbus present */ | 24 | u16 gpio_vbus; /* high == vbus present */ |
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h index e1ca48a9e973..6b884d2b0b69 100644 --- a/arch/arm/include/asm/mc146818rtc.h +++ b/arch/arm/include/asm/mc146818rtc.h | |||
@@ -4,8 +4,8 @@ | |||
4 | #ifndef _ASM_MC146818RTC_H | 4 | #ifndef _ASM_MC146818RTC_H |
5 | #define _ASM_MC146818RTC_H | 5 | #define _ASM_MC146818RTC_H |
6 | 6 | ||
7 | #include <linux/io.h> | ||
7 | #include <mach/irqs.h> | 8 | #include <mach/irqs.h> |
8 | #include <asm/io.h> | ||
9 | 9 | ||
10 | #ifndef RTC_PORT | 10 | #ifndef RTC_PORT |
11 | #define RTC_PORT(x) (0x70 + (x)) | 11 | #define RTC_PORT(x) (0x70 + (x)) |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index bf7c737c9226..809ff9ab853a 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -13,30 +13,27 @@ | |||
13 | #ifndef __ASM_ARM_MEMORY_H | 13 | #ifndef __ASM_ARM_MEMORY_H |
14 | #define __ASM_ARM_MEMORY_H | 14 | #define __ASM_ARM_MEMORY_H |
15 | 15 | ||
16 | #include <linux/compiler.h> | ||
17 | #include <linux/const.h> | ||
18 | #include <mach/memory.h> | ||
19 | #include <asm/sizes.h> | ||
20 | |||
16 | /* | 21 | /* |
17 | * Allow for constants defined here to be used from assembly code | 22 | * Allow for constants defined here to be used from assembly code |
18 | * by prepending the UL suffix only with actual C code compilation. | 23 | * by prepending the UL suffix only with actual C code compilation. |
19 | */ | 24 | */ |
20 | #ifndef __ASSEMBLY__ | 25 | #define UL(x) _AC(x, UL) |
21 | #define UL(x) (x##UL) | ||
22 | #else | ||
23 | #define UL(x) (x) | ||
24 | #endif | ||
25 | |||
26 | #include <linux/compiler.h> | ||
27 | #include <mach/memory.h> | ||
28 | #include <asm/sizes.h> | ||
29 | 26 | ||
30 | #ifdef CONFIG_MMU | 27 | #ifdef CONFIG_MMU |
31 | 28 | ||
32 | #ifndef TASK_SIZE | ||
33 | /* | 29 | /* |
30 | * PAGE_OFFSET - the virtual address of the start of the kernel image | ||
34 | * TASK_SIZE - the maximum size of a user space task. | 31 | * TASK_SIZE - the maximum size of a user space task. |
35 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area | 32 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area |
36 | */ | 33 | */ |
37 | #define TASK_SIZE UL(0xbf000000) | 34 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
38 | #define TASK_UNMAPPED_BASE UL(0x40000000) | 35 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) |
39 | #endif | 36 | #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) |
40 | 37 | ||
41 | /* | 38 | /* |
42 | * The maximum size of a 26-bit user space task. | 39 | * The maximum size of a 26-bit user space task. |
@@ -44,13 +41,6 @@ | |||
44 | #define TASK_SIZE_26 UL(0x04000000) | 41 | #define TASK_SIZE_26 UL(0x04000000) |
45 | 42 | ||
46 | /* | 43 | /* |
47 | * Page offset: 3GB | ||
48 | */ | ||
49 | #ifndef PAGE_OFFSET | ||
50 | #define PAGE_OFFSET UL(0xc0000000) | ||
51 | #endif | ||
52 | |||
53 | /* | ||
54 | * The module space lives between the addresses given by TASK_SIZE | 44 | * The module space lives between the addresses given by TASK_SIZE |
55 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. | 45 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. |
56 | */ | 46 | */ |
@@ -147,17 +137,11 @@ | |||
147 | 137 | ||
148 | #ifndef arch_adjust_zones | 138 | #ifndef arch_adjust_zones |
149 | #define arch_adjust_zones(node,size,holes) do { } while (0) | 139 | #define arch_adjust_zones(node,size,holes) do { } while (0) |
140 | #elif !defined(CONFIG_ZONE_DMA) | ||
141 | #error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA" | ||
150 | #endif | 142 | #endif |
151 | 143 | ||
152 | /* | 144 | /* |
153 | * Amount of memory reserved for the vmalloc() area, and minimum | ||
154 | * address for vmalloc mappings. | ||
155 | */ | ||
156 | extern unsigned long vmalloc_reserve; | ||
157 | |||
158 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | ||
159 | |||
160 | /* | ||
161 | * PFNs are used to describe any physical page; this means | 145 | * PFNs are used to describe any physical page; this means |
162 | * PFN 0 == physical address 0. | 146 | * PFN 0 == physical address 0. |
163 | * | 147 | * |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a301e446007f..0559f37c2a27 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/cachetype.h> | ||
18 | #include <asm/proc-fns.h> | 19 | #include <asm/proc-fns.h> |
19 | #include <asm-generic/mm_hooks.h> | 20 | #include <asm-generic/mm_hooks.h> |
20 | 21 | ||
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index cf2e2680daaa..bed1c0a00368 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -184,8 +184,9 @@ typedef struct page *pgtable_t; | |||
184 | 184 | ||
185 | #endif /* !__ASSEMBLY__ */ | 185 | #endif /* !__ASSEMBLY__ */ |
186 | 186 | ||
187 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 187 | #define VM_DATA_DEFAULT_FLAGS \ |
188 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 188 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ |
189 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
189 | 190 | ||
190 | /* | 191 | /* |
191 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | 192 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 8e21ef15bd74..110295c5461d 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -164,14 +164,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
164 | #define L_PTE_PRESENT (1 << 0) | 164 | #define L_PTE_PRESENT (1 << 0) |
165 | #define L_PTE_FILE (1 << 1) /* only when !PRESENT */ | 165 | #define L_PTE_FILE (1 << 1) /* only when !PRESENT */ |
166 | #define L_PTE_YOUNG (1 << 1) | 166 | #define L_PTE_YOUNG (1 << 1) |
167 | #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ | 167 | #define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */ |
168 | #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ | 168 | #define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */ |
169 | #define L_PTE_USER (1 << 4) | 169 | #define L_PTE_DIRTY (1 << 6) |
170 | #define L_PTE_WRITE (1 << 5) | 170 | #define L_PTE_WRITE (1 << 7) |
171 | #define L_PTE_EXEC (1 << 6) | 171 | #define L_PTE_USER (1 << 8) |
172 | #define L_PTE_DIRTY (1 << 7) | 172 | #define L_PTE_EXEC (1 << 9) |
173 | #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ | 173 | #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ |
174 | 174 | ||
175 | /* | ||
176 | * These are the memory types, defined to be compatible with | ||
177 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB | ||
178 | */ | ||
179 | #define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ | ||
180 | #define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ | ||
181 | #define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ | ||
182 | #define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ | ||
183 | #define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ | ||
184 | #define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ | ||
185 | #define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ | ||
186 | #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ | ||
187 | #define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ | ||
188 | #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ | ||
189 | #define L_PTE_MT_MASK (0x0f << 2) | ||
190 | |||
175 | #ifndef __ASSEMBLY__ | 191 | #ifndef __ASSEMBLY__ |
176 | 192 | ||
177 | /* | 193 | /* |
@@ -180,23 +196,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
180 | * as well as any architecture dependent bits like global/ASID and SMP | 196 | * as well as any architecture dependent bits like global/ASID and SMP |
181 | * shared mapping bits. | 197 | * shared mapping bits. |
182 | */ | 198 | */ |
183 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | 199 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG |
184 | #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | ||
185 | 200 | ||
186 | extern pgprot_t pgprot_user; | 201 | extern pgprot_t pgprot_user; |
187 | extern pgprot_t pgprot_kernel; | 202 | extern pgprot_t pgprot_kernel; |
188 | 203 | ||
189 | #define PAGE_NONE pgprot_user | 204 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
190 | #define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) | 205 | |
191 | #define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \ | 206 | #define PAGE_NONE pgprot_user |
192 | L_PTE_WRITE) | 207 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) |
193 | #define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) | 208 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) |
194 | #define PAGE_KERNEL pgprot_kernel | 209 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) |
195 | 210 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) | |
196 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) | 211 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) |
197 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) | 212 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) |
198 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) | 213 | #define PAGE_KERNEL pgprot_kernel |
199 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) | 214 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) |
215 | |||
216 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) | ||
217 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) | ||
218 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) | ||
219 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | ||
220 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | ||
221 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | ||
222 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | ||
200 | 223 | ||
201 | #endif /* __ASSEMBLY__ */ | 224 | #endif /* __ASSEMBLY__ */ |
202 | 225 | ||
@@ -212,19 +235,19 @@ extern pgprot_t pgprot_kernel; | |||
212 | #define __P001 __PAGE_READONLY | 235 | #define __P001 __PAGE_READONLY |
213 | #define __P010 __PAGE_COPY | 236 | #define __P010 __PAGE_COPY |
214 | #define __P011 __PAGE_COPY | 237 | #define __P011 __PAGE_COPY |
215 | #define __P100 __PAGE_READONLY | 238 | #define __P100 __PAGE_READONLY_EXEC |
216 | #define __P101 __PAGE_READONLY | 239 | #define __P101 __PAGE_READONLY_EXEC |
217 | #define __P110 __PAGE_COPY | 240 | #define __P110 __PAGE_COPY_EXEC |
218 | #define __P111 __PAGE_COPY | 241 | #define __P111 __PAGE_COPY_EXEC |
219 | 242 | ||
220 | #define __S000 __PAGE_NONE | 243 | #define __S000 __PAGE_NONE |
221 | #define __S001 __PAGE_READONLY | 244 | #define __S001 __PAGE_READONLY |
222 | #define __S010 __PAGE_SHARED | 245 | #define __S010 __PAGE_SHARED |
223 | #define __S011 __PAGE_SHARED | 246 | #define __S011 __PAGE_SHARED |
224 | #define __S100 __PAGE_READONLY | 247 | #define __S100 __PAGE_READONLY_EXEC |
225 | #define __S101 __PAGE_READONLY | 248 | #define __S101 __PAGE_READONLY_EXEC |
226 | #define __S110 __PAGE_SHARED | 249 | #define __S110 __PAGE_SHARED_EXEC |
227 | #define __S111 __PAGE_SHARED | 250 | #define __S111 __PAGE_SHARED_EXEC |
228 | 251 | ||
229 | #ifndef __ASSEMBLY__ | 252 | #ifndef __ASSEMBLY__ |
230 | /* | 253 | /* |
@@ -286,8 +309,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |||
286 | /* | 309 | /* |
287 | * Mark the prot value as uncacheable and unbufferable. | 310 | * Mark the prot value as uncacheable and unbufferable. |
288 | */ | 311 | */ |
289 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) | 312 | #define pgprot_noncached(prot) \ |
290 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) | 313 | __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) |
314 | #define pgprot_writecombine(prot) \ | ||
315 | __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) | ||
291 | 316 | ||
292 | #define pmd_none(pmd) (!pmd_val(pmd)) | 317 | #define pmd_none(pmd) (!pmd_val(pmd)) |
293 | #define pmd_present(pmd) (pmd_val(pmd)) | 318 | #define pmd_present(pmd) (pmd_val(pmd)) |
@@ -320,11 +345,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |||
320 | #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) | 345 | #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) |
321 | 346 | ||
322 | /* | 347 | /* |
323 | * Permanent address of a page. We never have highmem, so this is trivial. | ||
324 | */ | ||
325 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) | ||
326 | |||
327 | /* | ||
328 | * Conversion functions: convert a page and protection to a page entry, | 348 | * Conversion functions: convert a page and protection to a page entry, |
329 | * and a page entry and page directory to the page they refer to. | 349 | * and a page entry and page directory to the page they refer to. |
330 | */ | 350 | */ |
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index b415c0e85458..73192618f1c2 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -54,7 +54,6 @@ | |||
54 | #define PSR_C_BIT 0x20000000 | 54 | #define PSR_C_BIT 0x20000000 |
55 | #define PSR_Z_BIT 0x40000000 | 55 | #define PSR_Z_BIT 0x40000000 |
56 | #define PSR_N_BIT 0x80000000 | 56 | #define PSR_N_BIT 0x80000000 |
57 | #define PCMASK 0 | ||
58 | 57 | ||
59 | /* | 58 | /* |
60 | * Groups of PSR bits | 59 | * Groups of PSR bits |
@@ -139,11 +138,7 @@ static inline int valid_user_regs(struct pt_regs *regs) | |||
139 | return 0; | 138 | return 0; |
140 | } | 139 | } |
141 | 140 | ||
142 | #define pc_pointer(v) \ | 141 | #define instruction_pointer(regs) (regs)->ARM_pc |
143 | ((v) & ~PCMASK) | ||
144 | |||
145 | #define instruction_pointer(regs) \ | ||
146 | (pc_pointer((regs)->ARM_pc)) | ||
147 | 142 | ||
148 | #ifdef CONFIG_SMP | 143 | #ifdef CONFIG_SMP |
149 | extern unsigned long profile_pc(struct pt_regs *regs); | 144 | extern unsigned long profile_pc(struct pt_regs *regs); |
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 7bbf105463f1..a65413ba121d 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -209,6 +209,17 @@ struct meminfo { | |||
209 | struct membank bank[NR_BANKS]; | 209 | struct membank bank[NR_BANKS]; |
210 | }; | 210 | }; |
211 | 211 | ||
212 | #define for_each_nodebank(iter,mi,no) \ | ||
213 | for (iter = 0; iter < mi->nr_banks; iter++) \ | ||
214 | if (mi->bank[iter].node == no) | ||
215 | |||
216 | #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) | ||
217 | #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) | ||
218 | #define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) | ||
219 | #define bank_phys_start(bank) (bank)->start | ||
220 | #define bank_phys_end(bank) ((bank)->start + (bank)->size) | ||
221 | #define bank_phys_size(bank) (bank)->size | ||
222 | |||
212 | /* | 223 | /* |
213 | * Early command line parameters. | 224 | * Early command line parameters. |
214 | */ | 225 | */ |
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h index 277158191a0d..00098615c6f0 100644 --- a/arch/arm/include/asm/sparsemem.h +++ b/arch/arm/include/asm/sparsemem.h | |||
@@ -3,8 +3,22 @@ | |||
3 | 3 | ||
4 | #include <asm/memory.h> | 4 | #include <asm/memory.h> |
5 | 5 | ||
6 | #define MAX_PHYSADDR_BITS 32 | 6 | /* |
7 | #define MAX_PHYSMEM_BITS 32 | 7 | * Two definitions are required for sparsemem: |
8 | #define SECTION_SIZE_BITS NODE_MEM_SIZE_BITS | 8 | * |
9 | * MAX_PHYSMEM_BITS: The number of physical address bits required | ||
10 | * to address the last byte of memory. | ||
11 | * | ||
12 | * SECTION_SIZE_BITS: The number of physical address bits to cover | ||
13 | * the maximum amount of memory in a section. | ||
14 | * | ||
15 | * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000, | ||
16 | * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26. | ||
17 | * | ||
18 | * Define these in your mach/memory.h. | ||
19 | */ | ||
20 | #if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS) | ||
21 | #error Sparsemem is not supported on this platform | ||
22 | #endif | ||
9 | 23 | ||
10 | #endif | 24 | #endif |
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 514af792a598..7aad78420f18 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -43,11 +43,6 @@ | |||
43 | #define CR_XP (1 << 23) /* Extended page tables */ | 43 | #define CR_XP (1 << 23) /* Extended page tables */ |
44 | #define CR_VE (1 << 24) /* Vectored interrupts */ | 44 | #define CR_VE (1 << 24) /* Vectored interrupts */ |
45 | 45 | ||
46 | #define CPUID_ID 0 | ||
47 | #define CPUID_CACHETYPE 1 | ||
48 | #define CPUID_TCM 2 | ||
49 | #define CPUID_TLBTYPE 3 | ||
50 | |||
51 | /* | 46 | /* |
52 | * This is used to ensure the compiler did actually allocate the register we | 47 | * This is used to ensure the compiler did actually allocate the register we |
53 | * asked it for some inline assembly sequences. Apparently we can't trust | 48 | * asked it for some inline assembly sequences. Apparently we can't trust |
@@ -61,36 +56,8 @@ | |||
61 | #ifndef __ASSEMBLY__ | 56 | #ifndef __ASSEMBLY__ |
62 | 57 | ||
63 | #include <linux/linkage.h> | 58 | #include <linux/linkage.h> |
64 | #include <linux/stringify.h> | ||
65 | #include <linux/irqflags.h> | 59 | #include <linux/irqflags.h> |
66 | 60 | ||
67 | #ifdef CONFIG_CPU_CP15 | ||
68 | #define read_cpuid(reg) \ | ||
69 | ({ \ | ||
70 | unsigned int __val; \ | ||
71 | asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ | ||
72 | : "=r" (__val) \ | ||
73 | : \ | ||
74 | : "cc"); \ | ||
75 | __val; \ | ||
76 | }) | ||
77 | #else | ||
78 | extern unsigned int processor_id; | ||
79 | #define read_cpuid(reg) (processor_id) | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * The CPU ID never changes at run time, so we might as well tell the | ||
84 | * compiler that it's constant. Use this function to read the CPU ID | ||
85 | * rather than directly reading processor_id or read_cpuid() directly. | ||
86 | */ | ||
87 | static inline unsigned int read_cpuid_id(void) __attribute_const__; | ||
88 | |||
89 | static inline unsigned int read_cpuid_id(void) | ||
90 | { | ||
91 | return read_cpuid(CPUID_ID); | ||
92 | } | ||
93 | |||
94 | #define __exception __attribute__((section(".exception.text"))) | 61 | #define __exception __attribute__((section(".exception.text"))) |
95 | 62 | ||
96 | struct thread_info; | 63 | struct thread_info; |
@@ -131,31 +98,6 @@ extern void cpu_init(void); | |||
131 | void arm_machine_restart(char mode); | 98 | void arm_machine_restart(char mode); |
132 | extern void (*arm_pm_restart)(char str); | 99 | extern void (*arm_pm_restart)(char str); |
133 | 100 | ||
134 | /* | ||
135 | * Intel's XScale3 core supports some v6 features (supersections, L2) | ||
136 | * but advertises itself as v5 as it does not support the v6 ISA. For | ||
137 | * this reason, we need a way to explicitly test for this type of CPU. | ||
138 | */ | ||
139 | #ifndef CONFIG_CPU_XSC3 | ||
140 | #define cpu_is_xsc3() 0 | ||
141 | #else | ||
142 | static inline int cpu_is_xsc3(void) | ||
143 | { | ||
144 | extern unsigned int processor_id; | ||
145 | |||
146 | if ((processor_id & 0xffffe000) == 0x69056000) | ||
147 | return 1; | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) | ||
154 | #define cpu_is_xscale() 0 | ||
155 | #else | ||
156 | #define cpu_is_xscale() 1 | ||
157 | #endif | ||
158 | |||
159 | #define UDBG_UNDEFINED (1 << 0) | 101 | #define UDBG_UNDEFINED (1 << 0) |
160 | #define UDBG_SYSCALL (1 << 1) | 102 | #define UDBG_SYSCALL (1 << 1) |
161 | #define UDBG_BADABORT (1 << 2) | 103 | #define UDBG_BADABORT (1 << 2) |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index e56fa48e4ae7..68b9ec82a37f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | #define thread_saved_pc(tsk) \ | 100 | #define thread_saved_pc(tsk) \ |
101 | ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc))) | 101 | ((unsigned long)(task_thread_info(tsk)->cpu_context.pc)) |
102 | #define thread_saved_fp(tsk) \ | 102 | #define thread_saved_fp(tsk) \ |
103 | ((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) | 103 | ((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) |
104 | 104 | ||
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index d0f51ff900b5..e98ec60b3400 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -225,7 +225,7 @@ do { \ | |||
225 | 225 | ||
226 | #define __get_user_asm_byte(x,addr,err) \ | 226 | #define __get_user_asm_byte(x,addr,err) \ |
227 | __asm__ __volatile__( \ | 227 | __asm__ __volatile__( \ |
228 | "1: ldrbt %1,[%2],#0\n" \ | 228 | "1: ldrbt %1,[%2]\n" \ |
229 | "2:\n" \ | 229 | "2:\n" \ |
230 | " .section .fixup,\"ax\"\n" \ | 230 | " .section .fixup,\"ax\"\n" \ |
231 | " .align 2\n" \ | 231 | " .align 2\n" \ |
@@ -261,7 +261,7 @@ do { \ | |||
261 | 261 | ||
262 | #define __get_user_asm_word(x,addr,err) \ | 262 | #define __get_user_asm_word(x,addr,err) \ |
263 | __asm__ __volatile__( \ | 263 | __asm__ __volatile__( \ |
264 | "1: ldrt %1,[%2],#0\n" \ | 264 | "1: ldrt %1,[%2]\n" \ |
265 | "2:\n" \ | 265 | "2:\n" \ |
266 | " .section .fixup,\"ax\"\n" \ | 266 | " .section .fixup,\"ax\"\n" \ |
267 | " .align 2\n" \ | 267 | " .align 2\n" \ |
@@ -306,7 +306,7 @@ do { \ | |||
306 | 306 | ||
307 | #define __put_user_asm_byte(x,__pu_addr,err) \ | 307 | #define __put_user_asm_byte(x,__pu_addr,err) \ |
308 | __asm__ __volatile__( \ | 308 | __asm__ __volatile__( \ |
309 | "1: strbt %1,[%2],#0\n" \ | 309 | "1: strbt %1,[%2]\n" \ |
310 | "2:\n" \ | 310 | "2:\n" \ |
311 | " .section .fixup,\"ax\"\n" \ | 311 | " .section .fixup,\"ax\"\n" \ |
312 | " .align 2\n" \ | 312 | " .align 2\n" \ |
@@ -339,7 +339,7 @@ do { \ | |||
339 | 339 | ||
340 | #define __put_user_asm_word(x,__pu_addr,err) \ | 340 | #define __put_user_asm_word(x,__pu_addr,err) \ |
341 | __asm__ __volatile__( \ | 341 | __asm__ __volatile__( \ |
342 | "1: strt %1,[%2],#0\n" \ | 342 | "1: strt %1,[%2]\n" \ |
343 | "2:\n" \ | 343 | "2:\n" \ |
344 | " .section .fixup,\"ax\"\n" \ | 344 | " .section .fixup,\"ax\"\n" \ |
345 | " .align 2\n" \ | 345 | " .align 2\n" \ |
@@ -365,7 +365,7 @@ do { \ | |||
365 | #define __put_user_asm_dword(x,__pu_addr,err) \ | 365 | #define __put_user_asm_dword(x,__pu_addr,err) \ |
366 | __asm__ __volatile__( \ | 366 | __asm__ __volatile__( \ |
367 | "1: strt " __reg_oper1 ", [%1], #4\n" \ | 367 | "1: strt " __reg_oper1 ", [%1], #4\n" \ |
368 | "2: strt " __reg_oper0 ", [%1], #0\n" \ | 368 | "2: strt " __reg_oper0 ", [%1]\n" \ |
369 | "3:\n" \ | 369 | "3:\n" \ |
370 | " .section .fixup,\"ax\"\n" \ | 370 | " .section .fixup,\"ax\"\n" \ |
371 | " .align 2\n" \ | 371 | " .align 2\n" \ |
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h index 6a3cd2a2f670..250a4dd00630 100644 --- a/arch/arm/include/asm/vga.h +++ b/arch/arm/include/asm/vga.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef ASMARM_VGA_H | 1 | #ifndef ASMARM_VGA_H |
2 | #define ASMARM_VGA_H | 2 | #define ASMARM_VGA_H |
3 | 3 | ||
4 | #include <linux/io.h> | ||
4 | #include <mach/hardware.h> | 5 | #include <mach/hardware.h> |
5 | #include <asm/io.h> | ||
6 | 6 | ||
7 | #define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x)) | 7 | #define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x)) |
8 | 8 | ||