aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/bug.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h90
-rw-r--r--arch/arm/include/asm/cachetype.h52
-rw-r--r--arch/arm/include/asm/cputype.h64
-rw-r--r--arch/arm/include/asm/dma-mapping.h378
-rw-r--r--arch/arm/include/asm/futex.h124
-rw-r--r--arch/arm/include/asm/irq.h4
-rw-r--r--arch/arm/include/asm/kprobes.h1
-rw-r--r--arch/arm/include/asm/mc146818rtc.h2
-rw-r--r--arch/arm/include/asm/memory.h40
-rw-r--r--arch/arm/include/asm/mmu_context.h1
-rw-r--r--arch/arm/include/asm/pgtable.h5
-rw-r--r--arch/arm/include/asm/ptrace.h7
-rw-r--r--arch/arm/include/asm/setup.h11
-rw-r--r--arch/arm/include/asm/sparsemem.h20
-rw-r--r--arch/arm/include/asm/system.h58
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/asm/vga.h2
19 files changed, 435 insertions, 438 deletions
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7b62351f097..4d88425a416 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -12,7 +12,7 @@ extern void __bug(const char *file, int line) __attribute__((noreturn));
12#else 12#else
13 13
14/* this just causes an oops */ 14/* this just causes an oops */
15#define BUG() (*(int *)0 = 0) 15#define BUG() do { *(int *)0 = 0; } while (1)
16 16
17#endif 17#endif
18 18
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 9073d9c6567..de6c59f814a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -444,94 +444,4 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
444 dmac_inv_range(start, start + size); 444 dmac_inv_range(start, start + size);
445} 445}
446 446
447#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
448#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
449
450#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
451#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
452#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
453#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
454
455#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
456#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
457#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
458#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
459#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
460
461#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
462/*
463 * VIVT caches only
464 */
465#define cache_is_vivt() 1
466#define cache_is_vipt() 0
467#define cache_is_vipt_nonaliasing() 0
468#define cache_is_vipt_aliasing() 0
469#define icache_is_vivt_asid_tagged() 0
470
471#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
472/*
473 * VIPT caches only
474 */
475#define cache_is_vivt() 0
476#define cache_is_vipt() 1
477#define cache_is_vipt_nonaliasing() \
478 ({ \
479 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
480 __cacheid_vipt_nonaliasing(__val); \
481 })
482
483#define cache_is_vipt_aliasing() \
484 ({ \
485 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
486 __cacheid_vipt_aliasing(__val); \
487 })
488
489#define icache_is_vivt_asid_tagged() \
490 ({ \
491 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
492 __cacheid_vivt_asid_tagged_instr(__val); \
493 })
494
495#else
496/*
497 * VIVT or VIPT caches. Note that this is unreliable since ARM926
498 * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
499 * There's no way to tell from the CacheType register what type (!)
500 * the cache is.
501 */
502#define cache_is_vivt() \
503 ({ \
504 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
505 (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
506 })
507
508#define cache_is_vipt() \
509 ({ \
510 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
511 __cacheid_present(__val) && __cacheid_vipt(__val); \
512 })
513
514#define cache_is_vipt_nonaliasing() \
515 ({ \
516 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
517 __cacheid_present(__val) && \
518 __cacheid_vipt_nonaliasing(__val); \
519 })
520
521#define cache_is_vipt_aliasing() \
522 ({ \
523 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
524 __cacheid_present(__val) && \
525 __cacheid_vipt_aliasing(__val); \
526 })
527
528#define icache_is_vivt_asid_tagged() \
529 ({ \
530 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
531 __cacheid_present(__val) && \
532 __cacheid_vivt_asid_tagged_instr(__val); \
533 })
534
535#endif
536
537#endif 447#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
new file mode 100644
index 00000000000..d3a4c2cb9f2
--- /dev/null
+++ b/arch/arm/include/asm/cachetype.h
@@ -0,0 +1,52 @@
1#ifndef __ASM_ARM_CACHETYPE_H
2#define __ASM_ARM_CACHETYPE_H
3
4#define CACHEID_VIVT (1 << 0)
5#define CACHEID_VIPT_NONALIASING (1 << 1)
6#define CACHEID_VIPT_ALIASING (1 << 2)
7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
8#define CACHEID_ASID_TAGGED (1 << 3)
9
10extern unsigned int cacheid;
11
12#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
13#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
14#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
15#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
16#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
17
18/*
19 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
20 * Mask out support which will never be present on newer CPUs.
21 * - v6+ is never VIVT
22 * - v7+ VIPT never aliases
23 */
24#if __LINUX_ARM_ARCH__ >= 7
25#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
26#elif __LINUX_ARM_ARCH__ >= 6
27#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
28#else
29#define __CACHEID_ARCH_MIN (~0)
30#endif
31
32/*
33 * Mask out support which isn't configured
34 */
35#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
36#define __CACHEID_ALWAYS (CACHEID_VIVT)
37#define __CACHEID_NEVER (~CACHEID_VIVT)
38#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
39#define __CACHEID_ALWAYS (0)
40#define __CACHEID_NEVER (CACHEID_VIVT)
41#else
42#define __CACHEID_ALWAYS (0)
43#define __CACHEID_NEVER (0)
44#endif
45
46static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
47{
48 return (__CACHEID_ALWAYS & mask) |
49 (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
50}
51
52#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
new file mode 100644
index 00000000000..7b9d27e749b
--- /dev/null
+++ b/arch/arm/include/asm/cputype.h
@@ -0,0 +1,64 @@
1#ifndef __ASM_ARM_CPUTYPE_H
2#define __ASM_ARM_CPUTYPE_H
3
4#include <linux/stringify.h>
5
6#define CPUID_ID 0
7#define CPUID_CACHETYPE 1
8#define CPUID_TCM 2
9#define CPUID_TLBTYPE 3
10
11#ifdef CONFIG_CPU_CP15
12#define read_cpuid(reg) \
13 ({ \
14 unsigned int __val; \
15 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
16 : "=r" (__val) \
17 : \
18 : "cc"); \
19 __val; \
20 })
21#else
22extern unsigned int processor_id;
23#define read_cpuid(reg) (processor_id)
24#endif
25
26/*
27 * The CPU ID never changes at run time, so we might as well tell the
28 * compiler that it's constant. Use this function to read the CPU ID
29 * rather than directly reading processor_id or read_cpuid() directly.
30 */
31static inline unsigned int __attribute_const__ read_cpuid_id(void)
32{
33 return read_cpuid(CPUID_ID);
34}
35
36static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
37{
38 return read_cpuid(CPUID_CACHETYPE);
39}
40
41/*
42 * Intel's XScale3 core supports some v6 features (supersections, L2)
43 * but advertises itself as v5 as it does not support the v6 ISA. For
44 * this reason, we need a way to explicitly test for this type of CPU.
45 */
46#ifndef CONFIG_CPU_XSC3
47#define cpu_is_xsc3() 0
48#else
49static inline int cpu_is_xsc3(void)
50{
51 if ((read_cpuid_id() & 0xffffe000) == 0x69056000)
52 return 1;
53
54 return 0;
55}
56#endif
57
58#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
59#define cpu_is_xscale() 0
60#else
61#define cpu_is_xscale() 1
62#endif
63
64#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7b95d205839..1cb8602dd9d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
104 * Dummy noncoherent implementation. We don't provide a dma_cache_sync 104 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
105 * function so drivers using this API are highlighted with build warnings. 105 * function so drivers using this API are highlighted with build warnings.
106 */ 106 */
107static inline void * 107static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
108dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 108 dma_addr_t *handle, gfp_t gfp)
109{ 109{
110 return NULL; 110 return NULL;
111} 111}
112 112
113static inline void 113static inline void dma_free_noncoherent(struct device *dev, size_t size,
114dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 114 void *cpu_addr, dma_addr_t handle)
115 dma_addr_t handle)
116{ 115{
117} 116}
118 117
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
127 * return the CPU-viewed address, and sets @handle to be the 126 * return the CPU-viewed address, and sets @handle to be the
128 * device-viewed address. 127 * device-viewed address.
129 */ 128 */
130extern void * 129extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
131dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
132 130
133/** 131/**
134 * dma_free_coherent - free memory allocated by dma_alloc_coherent 132 * dma_free_coherent - free memory allocated by dma_alloc_coherent
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
143 * References to memory and mappings associated with cpu_addr/handle 141 * References to memory and mappings associated with cpu_addr/handle
144 * during and after this call executing are illegal. 142 * during and after this call executing are illegal.
145 */ 143 */
146extern void 144extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
147dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
148 dma_addr_t handle);
149 145
150/** 146/**
151 * dma_mmap_coherent - map a coherent DMA allocation into user space 147 * dma_mmap_coherent - map a coherent DMA allocation into user space
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
159 * into user space. The coherent DMA buffer must not be freed by the 155 * into user space. The coherent DMA buffer must not be freed by the
160 * driver until the user space mapping has been released. 156 * driver until the user space mapping has been released.
161 */ 157 */
162int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 158int dma_mmap_coherent(struct device *, struct vm_area_struct *,
163 void *cpu_addr, dma_addr_t handle, size_t size); 159 void *, dma_addr_t, size_t);
164 160
165 161
166/** 162/**
@@ -174,14 +170,94 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
174 * return the CPU-viewed address, and sets @handle to be the 170 * return the CPU-viewed address, and sets @handle to be the
175 * device-viewed address. 171 * device-viewed address.
176 */ 172 */
177extern void * 173extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
178dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); 174 gfp_t);
179 175
180#define dma_free_writecombine(dev,size,cpu_addr,handle) \ 176#define dma_free_writecombine(dev,size,cpu_addr,handle) \
181 dma_free_coherent(dev,size,cpu_addr,handle) 177 dma_free_coherent(dev,size,cpu_addr,handle)
182 178
183int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, 179int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
184 void *cpu_addr, dma_addr_t handle, size_t size); 180 void *, dma_addr_t, size_t);
181
182
183#ifdef CONFIG_DMABOUNCE
184/*
185 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
186 * and utilize bounce buffers as needed to work around limited DMA windows.
187 *
188 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
189 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
190 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
191 *
192 * The following are helper functions used by the dmabounce subystem
193 *
194 */
195
196/**
197 * dmabounce_register_dev
198 *
199 * @dev: valid struct device pointer
200 * @small_buf_size: size of buffers to use with small buffer pool
201 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
202 *
203 * This function should be called by low-level platform code to register
204 * a device as requireing DMA buffer bouncing. The function will allocate
205 * appropriate DMA pools for the device.
206 *
207 */
208extern int dmabounce_register_dev(struct device *, unsigned long,
209 unsigned long);
210
211/**
212 * dmabounce_unregister_dev
213 *
214 * @dev: valid struct device pointer
215 *
216 * This function should be called by low-level platform code when device
217 * that was previously registered with dmabounce_register_dev is removed
218 * from the system.
219 *
220 */
221extern void dmabounce_unregister_dev(struct device *);
222
223/**
224 * dma_needs_bounce
225 *
226 * @dev: valid struct device pointer
227 * @dma_handle: dma_handle of unbounced buffer
228 * @size: size of region being mapped
229 *
230 * Platforms that utilize the dmabounce mechanism must implement
231 * this function.
232 *
233 * The dmabounce routines call this function whenever a dma-mapping
234 * is requested to determine whether a given buffer needs to be bounced
235 * or not. The function must return 0 if the buffer is OK for
236 * DMA access and 1 if the buffer needs to be bounced.
237 *
238 */
239extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
240
241/*
242 * The DMA API, implemented by dmabounce.c. See below for descriptions.
243 */
244extern dma_addr_t dma_map_single(struct device *, void *, size_t,
245 enum dma_data_direction);
246extern dma_addr_t dma_map_page(struct device *, struct page *,
247 unsigned long, size_t, enum dma_data_direction);
248extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
249 enum dma_data_direction);
250
251/*
252 * Private functions
253 */
254int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
255 size_t, enum dma_data_direction);
256int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
257 size_t, enum dma_data_direction);
258#else
259#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
260#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
185 261
186 262
187/** 263/**
@@ -198,19 +274,16 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
198 * can regain ownership by calling dma_unmap_single() or 274 * can regain ownership by calling dma_unmap_single() or
199 * dma_sync_single_for_cpu(). 275 * dma_sync_single_for_cpu().
200 */ 276 */
201#ifndef CONFIG_DMABOUNCE 277static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
202static inline dma_addr_t 278 size_t size, enum dma_data_direction dir)
203dma_map_single(struct device *dev, void *cpu_addr, size_t size,
204 enum dma_data_direction dir)
205{ 279{
280 BUG_ON(!valid_dma_direction(dir));
281
206 if (!arch_is_coherent()) 282 if (!arch_is_coherent())
207 dma_cache_maint(cpu_addr, size, dir); 283 dma_cache_maint(cpu_addr, size, dir);
208 284
209 return virt_to_dma(dev, cpu_addr); 285 return virt_to_dma(dev, cpu_addr);
210} 286}
211#else
212extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
213#endif
214 287
215/** 288/**
216 * dma_map_page - map a portion of a page for streaming DMA 289 * dma_map_page - map a portion of a page for streaming DMA
@@ -224,23 +297,25 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d
224 * or written back. 297 * or written back.
225 * 298 *
226 * The device owns this memory once this call has completed. The CPU 299 * The device owns this memory once this call has completed. The CPU
227 * can regain ownership by calling dma_unmap_page() or 300 * can regain ownership by calling dma_unmap_page().
228 * dma_sync_single_for_cpu().
229 */ 301 */
230static inline dma_addr_t 302static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
231dma_map_page(struct device *dev, struct page *page, 303 unsigned long offset, size_t size, enum dma_data_direction dir)
232 unsigned long offset, size_t size,
233 enum dma_data_direction dir)
234{ 304{
235 return dma_map_single(dev, page_address(page) + offset, size, dir); 305 BUG_ON(!valid_dma_direction(dir));
306
307 if (!arch_is_coherent())
308 dma_cache_maint(page_address(page) + offset, size, dir);
309
310 return page_to_dma(dev, page) + offset;
236} 311}
237 312
238/** 313/**
239 * dma_unmap_single - unmap a single buffer previously mapped 314 * dma_unmap_single - unmap a single buffer previously mapped
240 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 315 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
241 * @handle: DMA address of buffer 316 * @handle: DMA address of buffer
242 * @size: size of buffer to map 317 * @size: size of buffer (same as passed to dma_map_single)
243 * @dir: DMA transfer direction 318 * @dir: DMA transfer direction (same as passed to dma_map_single)
244 * 319 *
245 * Unmap a single streaming mode DMA translation. The handle and size 320 * Unmap a single streaming mode DMA translation. The handle and size
246 * must match what was provided in the previous dma_map_single() call. 321 * must match what was provided in the previous dma_map_single() call.
@@ -249,108 +324,34 @@ dma_map_page(struct device *dev, struct page *page,
249 * After this call, reads by the CPU to the buffer are guaranteed to see 324 * After this call, reads by the CPU to the buffer are guaranteed to see
250 * whatever the device wrote there. 325 * whatever the device wrote there.
251 */ 326 */
252#ifndef CONFIG_DMABOUNCE 327static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
253static inline void 328 size_t size, enum dma_data_direction dir)
254dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
255 enum dma_data_direction dir)
256{ 329{
257 /* nothing to do */ 330 /* nothing to do */
258} 331}
259#else 332#endif /* CONFIG_DMABOUNCE */
260extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
261#endif
262 333
263/** 334/**
264 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 335 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
265 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 336 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
266 * @handle: DMA address of buffer 337 * @handle: DMA address of buffer
267 * @size: size of buffer to map 338 * @size: size of buffer (same as passed to dma_map_page)
268 * @dir: DMA transfer direction 339 * @dir: DMA transfer direction (same as passed to dma_map_page)
269 * 340 *
270 * Unmap a single streaming mode DMA translation. The handle and size 341 * Unmap a page streaming mode DMA translation. The handle and size
271 * must match what was provided in the previous dma_map_single() call. 342 * must match what was provided in the previous dma_map_page() call.
272 * All other usages are undefined. 343 * All other usages are undefined.
273 * 344 *
274 * After this call, reads by the CPU to the buffer are guaranteed to see 345 * After this call, reads by the CPU to the buffer are guaranteed to see
275 * whatever the device wrote there. 346 * whatever the device wrote there.
276 */ 347 */
277static inline void 348static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
278dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, 349 size_t size, enum dma_data_direction dir)
279 enum dma_data_direction dir)
280{ 350{
281 dma_unmap_single(dev, handle, size, dir); 351 dma_unmap_single(dev, handle, size, dir);
282} 352}
283 353
284/** 354/**
285 * dma_map_sg - map a set of SG buffers for streaming mode DMA
286 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
287 * @sg: list of buffers
288 * @nents: number of buffers to map
289 * @dir: DMA transfer direction
290 *
291 * Map a set of buffers described by scatterlist in streaming
292 * mode for DMA. This is the scatter-gather version of the
293 * above dma_map_single interface. Here the scatter gather list
294 * elements are each tagged with the appropriate dma address
295 * and length. They are obtained via sg_dma_{address,length}(SG).
296 *
297 * NOTE: An implementation may be able to use a smaller number of
298 * DMA address/length pairs than there are SG table elements.
299 * (for example via virtual mapping capabilities)
300 * The routine returns the number of addr/length pairs actually
301 * used, at most nents.
302 *
303 * Device ownership issues as mentioned above for dma_map_single are
304 * the same here.
305 */
306#ifndef CONFIG_DMABOUNCE
307static inline int
308dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
309 enum dma_data_direction dir)
310{
311 int i;
312
313 for (i = 0; i < nents; i++, sg++) {
314 char *virt;
315
316 sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
317 virt = sg_virt(sg);
318
319 if (!arch_is_coherent())
320 dma_cache_maint(virt, sg->length, dir);
321 }
322
323 return nents;
324}
325#else
326extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
327#endif
328
329/**
330 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
331 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
332 * @sg: list of buffers
333 * @nents: number of buffers to map
334 * @dir: DMA transfer direction
335 *
336 * Unmap a set of streaming mode DMA translations.
337 * Again, CPU read rules concerning calls here are the same as for
338 * dma_unmap_single() above.
339 */
340#ifndef CONFIG_DMABOUNCE
341static inline void
342dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
343 enum dma_data_direction dir)
344{
345
346 /* nothing to do */
347}
348#else
349extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
350#endif
351
352
353/**
354 * dma_sync_single_range_for_cpu 355 * dma_sync_single_range_for_cpu
355 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 356 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
356 * @handle: DMA address of buffer 357 * @handle: DMA address of buffer
@@ -368,145 +369,52 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
368 * must first the perform a dma_sync_for_device, and then the 369 * must first the perform a dma_sync_for_device, and then the
369 * device again owns the buffer. 370 * device again owns the buffer.
370 */ 371 */
371#ifndef CONFIG_DMABOUNCE 372static inline void dma_sync_single_range_for_cpu(struct device *dev,
372static inline void 373 dma_addr_t handle, unsigned long offset, size_t size,
373dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, 374 enum dma_data_direction dir)
374 unsigned long offset, size_t size,
375 enum dma_data_direction dir)
376{ 375{
377 if (!arch_is_coherent()) 376 BUG_ON(!valid_dma_direction(dir));
378 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); 377
378 dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
379} 379}
380 380
381static inline void 381static inline void dma_sync_single_range_for_device(struct device *dev,
382dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, 382 dma_addr_t handle, unsigned long offset, size_t size,
383 unsigned long offset, size_t size, 383 enum dma_data_direction dir)
384 enum dma_data_direction dir)
385{ 384{
385 BUG_ON(!valid_dma_direction(dir));
386
387 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
388 return;
389
386 if (!arch_is_coherent()) 390 if (!arch_is_coherent())
387 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); 391 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
388} 392}
389#else
390extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
391extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
392#endif
393 393
394static inline void 394static inline void dma_sync_single_for_cpu(struct device *dev,
395dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, 395 dma_addr_t handle, size_t size, enum dma_data_direction dir)
396 enum dma_data_direction dir)
397{ 396{
398 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); 397 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
399} 398}
400 399
401static inline void 400static inline void dma_sync_single_for_device(struct device *dev,
402dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, 401 dma_addr_t handle, size_t size, enum dma_data_direction dir)
403 enum dma_data_direction dir)
404{ 402{
405 dma_sync_single_range_for_device(dev, handle, 0, size, dir); 403 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
406} 404}
407 405
408
409/**
410 * dma_sync_sg_for_cpu
411 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
412 * @sg: list of buffers
413 * @nents: number of buffers to map
414 * @dir: DMA transfer direction
415 *
416 * Make physical memory consistent for a set of streaming
417 * mode DMA translations after a transfer.
418 *
419 * The same as dma_sync_single_for_* but for a scatter-gather list,
420 * same rules and usage.
421 */
422#ifndef CONFIG_DMABOUNCE
423static inline void
424dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
425 enum dma_data_direction dir)
426{
427 int i;
428
429 for (i = 0; i < nents; i++, sg++) {
430 char *virt = sg_virt(sg);
431 if (!arch_is_coherent())
432 dma_cache_maint(virt, sg->length, dir);
433 }
434}
435
436static inline void
437dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
438 enum dma_data_direction dir)
439{
440 int i;
441
442 for (i = 0; i < nents; i++, sg++) {
443 char *virt = sg_virt(sg);
444 if (!arch_is_coherent())
445 dma_cache_maint(virt, sg->length, dir);
446 }
447}
448#else
449extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
450extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
451#endif
452
453#ifdef CONFIG_DMABOUNCE
454/* 406/*
455 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 407 * The scatter list versions of the above methods.
456 * and utilize bounce buffers as needed to work around limited DMA windows.
457 *
458 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
459 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
460 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
461 *
462 * The following are helper functions used by the dmabounce subystem
463 *
464 */
465
466/**
467 * dmabounce_register_dev
468 *
469 * @dev: valid struct device pointer
470 * @small_buf_size: size of buffers to use with small buffer pool
471 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
472 *
473 * This function should be called by low-level platform code to register
474 * a device as requireing DMA buffer bouncing. The function will allocate
475 * appropriate DMA pools for the device.
476 *
477 */
478extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
479
480/**
481 * dmabounce_unregister_dev
482 *
483 * @dev: valid struct device pointer
484 *
485 * This function should be called by low-level platform code when device
486 * that was previously registered with dmabounce_register_dev is removed
487 * from the system.
488 *
489 */ 408 */
490extern void dmabounce_unregister_dev(struct device *); 409extern int dma_map_sg(struct device *, struct scatterlist *, int,
410 enum dma_data_direction);
411extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
412 enum dma_data_direction);
413extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
414 enum dma_data_direction);
415extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
416 enum dma_data_direction);
491 417
492/**
493 * dma_needs_bounce
494 *
495 * @dev: valid struct device pointer
496 * @dma_handle: dma_handle of unbounced buffer
497 * @size: size of region being mapped
498 *
499 * Platforms that utilize the dmabounce mechanism must implement
500 * this function.
501 *
502 * The dmabounce routines call this function whenever a dma-mapping
503 * is requested to determine whether a given buffer needs to be bounced
504 * or not. The function must return 0 if the buffer is OK for
505 * DMA access and 1 if the buffer needs to be bounced.
506 *
507 */
508extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
509#endif /* CONFIG_DMABOUNCE */
510 418
511#endif /* __KERNEL__ */ 419#endif /* __KERNEL__ */
512#endif 420#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6a332a9f099..9ee743b95de 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -1,6 +1,124 @@
1#ifndef _ASM_FUTEX_H 1#ifndef _ASM_ARM_FUTEX_H
2#define _ASM_FUTEX_H 2#define _ASM_ARM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#ifdef CONFIG_SMP
3 7
4#include <asm-generic/futex.h> 8#include <asm-generic/futex.h>
5 9
6#endif 10#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
11
12#include <linux/futex.h>
13#include <linux/preempt.h>
14#include <linux/uaccess.h>
15#include <asm/errno.h>
16
17#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
18 __asm__ __volatile__( \
19 "1: ldrt %1, [%2]\n" \
20 " " insn "\n" \
21 "2: strt %0, [%2]\n" \
22 " mov %0, #0\n" \
23 "3:\n" \
24 " .section __ex_table,\"a\"\n" \
25 " .align 3\n" \
26 " .long 1b, 4f, 2b, 4f\n" \
27 " .previous\n" \
28 " .section .fixup,\"ax\"\n" \
29 "4: mov %0, %4\n" \
30 " b 3b\n" \
31 " .previous" \
32 : "=&r" (ret), "=&r" (oldval) \
33 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
34 : "cc", "memory")
35
36static inline int
37futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
38{
39 int op = (encoded_op >> 28) & 7;
40 int cmp = (encoded_op >> 24) & 15;
41 int oparg = (encoded_op << 8) >> 20;
42 int cmparg = (encoded_op << 20) >> 20;
43 int oldval = 0, ret;
44
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg;
47
48 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
49 return -EFAULT;
50
51 pagefault_disable(); /* implies preempt_disable() */
52
53 switch (op) {
54 case FUTEX_OP_SET:
55 __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
56 break;
57 case FUTEX_OP_ADD:
58 __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
59 break;
60 case FUTEX_OP_OR:
61 __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
62 break;
63 case FUTEX_OP_ANDN:
64 __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
65 break;
66 case FUTEX_OP_XOR:
67 __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
68 break;
69 default:
70 ret = -ENOSYS;
71 }
72
73 pagefault_enable(); /* subsumes preempt_enable() */
74
75 if (!ret) {
76 switch (cmp) {
77 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
78 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
79 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
80 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
81 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
82 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
83 default: ret = -ENOSYS;
84 }
85 }
86 return ret;
87}
88
89static inline int
90futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
91{
92 int val;
93
94 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
95 return -EFAULT;
96
97 pagefault_disable(); /* implies preempt_disable() */
98
99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
100 "1: ldrt %0, [%3]\n"
101 " teq %0, %1\n"
102 "2: streqt %2, [%3]\n"
103 "3:\n"
104 " .section __ex_table,\"a\"\n"
105 " .align 3\n"
106 " .long 1b, 4f, 2b, 4f\n"
107 " .previous\n"
108 " .section .fixup,\"ax\"\n"
109 "4: mov %0, %4\n"
110 " b 3b\n"
111 " .previous"
112 : "=&r" (val)
113 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
114 : "cc", "memory");
115
116 pagefault_enable(); /* subsumes preempt_enable() */
117
118 return val;
119}
120
121#endif /* !SMP */
122
123#endif /* __KERNEL__ */
124#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index d6786090d02..a0009aa5d15 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -22,6 +22,10 @@
22#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
23struct irqaction; 23struct irqaction;
24extern void migrate_irqs(void); 24extern void migrate_irqs(void);
25
26extern void asm_do_IRQ(unsigned int, struct pt_regs *);
27void init_IRQ(void);
28
25#endif 29#endif
26 30
27#endif 31#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index a5d0d99ad38..bb8a19bd582 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -61,7 +61,6 @@ struct kprobe_ctlblk {
61void arch_remove_kprobe(struct kprobe *); 61void arch_remove_kprobe(struct kprobe *);
62void kretprobe_trampoline(void); 62void kretprobe_trampoline(void);
63 63
64int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr);
65int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); 64int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
66int kprobe_exceptions_notify(struct notifier_block *self, 65int kprobe_exceptions_notify(struct notifier_block *self,
67 unsigned long val, void *data); 66 unsigned long val, void *data);
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
index e1ca48a9e97..6b884d2b0b6 100644
--- a/arch/arm/include/asm/mc146818rtc.h
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -4,8 +4,8 @@
4#ifndef _ASM_MC146818RTC_H 4#ifndef _ASM_MC146818RTC_H
5#define _ASM_MC146818RTC_H 5#define _ASM_MC146818RTC_H
6 6
7#include <linux/io.h>
7#include <mach/irqs.h> 8#include <mach/irqs.h>
8#include <asm/io.h>
9 9
10#ifndef RTC_PORT 10#ifndef RTC_PORT
11#define RTC_PORT(x) (0x70 + (x)) 11#define RTC_PORT(x) (0x70 + (x))
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index bf7c737c922..809ff9ab853 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -13,30 +13,27 @@
13#ifndef __ASM_ARM_MEMORY_H 13#ifndef __ASM_ARM_MEMORY_H
14#define __ASM_ARM_MEMORY_H 14#define __ASM_ARM_MEMORY_H
15 15
16#include <linux/compiler.h>
17#include <linux/const.h>
18#include <mach/memory.h>
19#include <asm/sizes.h>
20
16/* 21/*
17 * Allow for constants defined here to be used from assembly code 22 * Allow for constants defined here to be used from assembly code
18 * by prepending the UL suffix only with actual C code compilation. 23 * by prepending the UL suffix only with actual C code compilation.
19 */ 24 */
20#ifndef __ASSEMBLY__ 25#define UL(x) _AC(x, UL)
21#define UL(x) (x##UL)
22#else
23#define UL(x) (x)
24#endif
25
26#include <linux/compiler.h>
27#include <mach/memory.h>
28#include <asm/sizes.h>
29 26
30#ifdef CONFIG_MMU 27#ifdef CONFIG_MMU
31 28
32#ifndef TASK_SIZE
33/* 29/*
30 * PAGE_OFFSET - the virtual address of the start of the kernel image
34 * TASK_SIZE - the maximum size of a user space task. 31 * TASK_SIZE - the maximum size of a user space task.
35 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 32 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
36 */ 33 */
37#define TASK_SIZE UL(0xbf000000) 34#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
38#define TASK_UNMAPPED_BASE UL(0x40000000) 35#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
39#endif 36#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
40 37
41/* 38/*
42 * The maximum size of a 26-bit user space task. 39 * The maximum size of a 26-bit user space task.
@@ -44,13 +41,6 @@
44#define TASK_SIZE_26 UL(0x04000000) 41#define TASK_SIZE_26 UL(0x04000000)
45 42
46/* 43/*
47 * Page offset: 3GB
48 */
49#ifndef PAGE_OFFSET
50#define PAGE_OFFSET UL(0xc0000000)
51#endif
52
53/*
54 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
55 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
56 */ 46 */
@@ -147,17 +137,11 @@
147 137
148#ifndef arch_adjust_zones 138#ifndef arch_adjust_zones
149#define arch_adjust_zones(node,size,holes) do { } while (0) 139#define arch_adjust_zones(node,size,holes) do { } while (0)
140#elif !defined(CONFIG_ZONE_DMA)
141#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
150#endif 142#endif
151 143
152/* 144/*
153 * Amount of memory reserved for the vmalloc() area, and minimum
154 * address for vmalloc mappings.
155 */
156extern unsigned long vmalloc_reserve;
157
158#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
159
160/*
161 * PFNs are used to describe any physical page; this means 145 * PFNs are used to describe any physical page; this means
162 * PFN 0 == physical address 0. 146 * PFN 0 == physical address 0.
163 * 147 *
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a301e446007..0559f37c2a2 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/cachetype.h>
18#include <asm/proc-fns.h> 19#include <asm/proc-fns.h>
19#include <asm-generic/mm_hooks.h> 20#include <asm-generic/mm_hooks.h>
20 21
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 8e21ef15bd7..ec630109a8c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -320,11 +320,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
320#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) 320#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
321 321
322/* 322/*
323 * Permanent address of a page. We never have highmem, so this is trivial.
324 */
325#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
326
327/*
328 * Conversion functions: convert a page and protection to a page entry, 323 * Conversion functions: convert a page and protection to a page entry,
329 * and a page entry and page directory to the page they refer to. 324 * and a page entry and page directory to the page they refer to.
330 */ 325 */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index b415c0e8545..73192618f1c 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -54,7 +54,6 @@
54#define PSR_C_BIT 0x20000000 54#define PSR_C_BIT 0x20000000
55#define PSR_Z_BIT 0x40000000 55#define PSR_Z_BIT 0x40000000
56#define PSR_N_BIT 0x80000000 56#define PSR_N_BIT 0x80000000
57#define PCMASK 0
58 57
59/* 58/*
60 * Groups of PSR bits 59 * Groups of PSR bits
@@ -139,11 +138,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
139 return 0; 138 return 0;
140} 139}
141 140
142#define pc_pointer(v) \ 141#define instruction_pointer(regs) (regs)->ARM_pc
143 ((v) & ~PCMASK)
144
145#define instruction_pointer(regs) \
146 (pc_pointer((regs)->ARM_pc))
147 142
148#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
149extern unsigned long profile_pc(struct pt_regs *regs); 144extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 7bbf105463f..a65413ba121 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -209,6 +209,17 @@ struct meminfo {
209 struct membank bank[NR_BANKS]; 209 struct membank bank[NR_BANKS];
210}; 210};
211 211
212#define for_each_nodebank(iter,mi,no) \
213 for (iter = 0; iter < mi->nr_banks; iter++) \
214 if (mi->bank[iter].node == no)
215
216#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
217#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
218#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
219#define bank_phys_start(bank) (bank)->start
220#define bank_phys_end(bank) ((bank)->start + (bank)->size)
221#define bank_phys_size(bank) (bank)->size
222
212/* 223/*
213 * Early command line parameters. 224 * Early command line parameters.
214 */ 225 */
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
index 277158191a0..00098615c6f 100644
--- a/arch/arm/include/asm/sparsemem.h
+++ b/arch/arm/include/asm/sparsemem.h
@@ -3,8 +3,22 @@
3 3
4#include <asm/memory.h> 4#include <asm/memory.h>
5 5
6#define MAX_PHYSADDR_BITS 32 6/*
7#define MAX_PHYSMEM_BITS 32 7 * Two definitions are required for sparsemem:
8#define SECTION_SIZE_BITS NODE_MEM_SIZE_BITS 8 *
9 * MAX_PHYSMEM_BITS: The number of physical address bits required
10 * to address the last byte of memory.
11 *
12 * SECTION_SIZE_BITS: The number of physical address bits to cover
13 * the maximum amount of memory in a section.
14 *
15 * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
16 * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
17 *
18 * Define these in your mach/memory.h.
19 */
20#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS)
21#error Sparsemem is not supported on this platform
22#endif
9 23
10#endif 24#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 514af792a59..7aad78420f1 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -43,11 +43,6 @@
43#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_XP (1 << 23) /* Extended page tables */
44#define CR_VE (1 << 24) /* Vectored interrupts */ 44#define CR_VE (1 << 24) /* Vectored interrupts */
45 45
46#define CPUID_ID 0
47#define CPUID_CACHETYPE 1
48#define CPUID_TCM 2
49#define CPUID_TLBTYPE 3
50
51/* 46/*
52 * This is used to ensure the compiler did actually allocate the register we 47 * This is used to ensure the compiler did actually allocate the register we
53 * asked it for some inline assembly sequences. Apparently we can't trust 48 * asked it for some inline assembly sequences. Apparently we can't trust
@@ -61,36 +56,8 @@
61#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
62 57
63#include <linux/linkage.h> 58#include <linux/linkage.h>
64#include <linux/stringify.h>
65#include <linux/irqflags.h> 59#include <linux/irqflags.h>
66 60
67#ifdef CONFIG_CPU_CP15
68#define read_cpuid(reg) \
69 ({ \
70 unsigned int __val; \
71 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
72 : "=r" (__val) \
73 : \
74 : "cc"); \
75 __val; \
76 })
77#else
78extern unsigned int processor_id;
79#define read_cpuid(reg) (processor_id)
80#endif
81
82/*
83 * The CPU ID never changes at run time, so we might as well tell the
84 * compiler that it's constant. Use this function to read the CPU ID
85 * rather than directly reading processor_id or read_cpuid() directly.
86 */
87static inline unsigned int read_cpuid_id(void) __attribute_const__;
88
89static inline unsigned int read_cpuid_id(void)
90{
91 return read_cpuid(CPUID_ID);
92}
93
94#define __exception __attribute__((section(".exception.text"))) 61#define __exception __attribute__((section(".exception.text")))
95 62
96struct thread_info; 63struct thread_info;
@@ -131,31 +98,6 @@ extern void cpu_init(void);
131void arm_machine_restart(char mode); 98void arm_machine_restart(char mode);
132extern void (*arm_pm_restart)(char str); 99extern void (*arm_pm_restart)(char str);
133 100
134/*
135 * Intel's XScale3 core supports some v6 features (supersections, L2)
136 * but advertises itself as v5 as it does not support the v6 ISA. For
137 * this reason, we need a way to explicitly test for this type of CPU.
138 */
139#ifndef CONFIG_CPU_XSC3
140#define cpu_is_xsc3() 0
141#else
142static inline int cpu_is_xsc3(void)
143{
144 extern unsigned int processor_id;
145
146 if ((processor_id & 0xffffe000) == 0x69056000)
147 return 1;
148
149 return 0;
150}
151#endif
152
153#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
154#define cpu_is_xscale() 0
155#else
156#define cpu_is_xscale() 1
157#endif
158
159#define UDBG_UNDEFINED (1 << 0) 101#define UDBG_UNDEFINED (1 << 0)
160#define UDBG_SYSCALL (1 << 1) 102#define UDBG_SYSCALL (1 << 1)
161#define UDBG_BADABORT (1 << 2) 103#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e56fa48e4ae..68b9ec82a37 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void)
98} 98}
99 99
100#define thread_saved_pc(tsk) \ 100#define thread_saved_pc(tsk) \
101 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc))) 101 ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
102#define thread_saved_fp(tsk) \ 102#define thread_saved_fp(tsk) \
103 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) 103 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
104 104
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index d0f51ff900b..e98ec60b340 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -225,7 +225,7 @@ do { \
225 225
226#define __get_user_asm_byte(x,addr,err) \ 226#define __get_user_asm_byte(x,addr,err) \
227 __asm__ __volatile__( \ 227 __asm__ __volatile__( \
228 "1: ldrbt %1,[%2],#0\n" \ 228 "1: ldrbt %1,[%2]\n" \
229 "2:\n" \ 229 "2:\n" \
230 " .section .fixup,\"ax\"\n" \ 230 " .section .fixup,\"ax\"\n" \
231 " .align 2\n" \ 231 " .align 2\n" \
@@ -261,7 +261,7 @@ do { \
261 261
262#define __get_user_asm_word(x,addr,err) \ 262#define __get_user_asm_word(x,addr,err) \
263 __asm__ __volatile__( \ 263 __asm__ __volatile__( \
264 "1: ldrt %1,[%2],#0\n" \ 264 "1: ldrt %1,[%2]\n" \
265 "2:\n" \ 265 "2:\n" \
266 " .section .fixup,\"ax\"\n" \ 266 " .section .fixup,\"ax\"\n" \
267 " .align 2\n" \ 267 " .align 2\n" \
@@ -306,7 +306,7 @@ do { \
306 306
307#define __put_user_asm_byte(x,__pu_addr,err) \ 307#define __put_user_asm_byte(x,__pu_addr,err) \
308 __asm__ __volatile__( \ 308 __asm__ __volatile__( \
309 "1: strbt %1,[%2],#0\n" \ 309 "1: strbt %1,[%2]\n" \
310 "2:\n" \ 310 "2:\n" \
311 " .section .fixup,\"ax\"\n" \ 311 " .section .fixup,\"ax\"\n" \
312 " .align 2\n" \ 312 " .align 2\n" \
@@ -339,7 +339,7 @@ do { \
339 339
340#define __put_user_asm_word(x,__pu_addr,err) \ 340#define __put_user_asm_word(x,__pu_addr,err) \
341 __asm__ __volatile__( \ 341 __asm__ __volatile__( \
342 "1: strt %1,[%2],#0\n" \ 342 "1: strt %1,[%2]\n" \
343 "2:\n" \ 343 "2:\n" \
344 " .section .fixup,\"ax\"\n" \ 344 " .section .fixup,\"ax\"\n" \
345 " .align 2\n" \ 345 " .align 2\n" \
@@ -365,7 +365,7 @@ do { \
365#define __put_user_asm_dword(x,__pu_addr,err) \ 365#define __put_user_asm_dword(x,__pu_addr,err) \
366 __asm__ __volatile__( \ 366 __asm__ __volatile__( \
367 "1: strt " __reg_oper1 ", [%1], #4\n" \ 367 "1: strt " __reg_oper1 ", [%1], #4\n" \
368 "2: strt " __reg_oper0 ", [%1], #0\n" \ 368 "2: strt " __reg_oper0 ", [%1]\n" \
369 "3:\n" \ 369 "3:\n" \
370 " .section .fixup,\"ax\"\n" \ 370 " .section .fixup,\"ax\"\n" \
371 " .align 2\n" \ 371 " .align 2\n" \
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
index 6a3cd2a2f67..250a4dd0063 100644
--- a/arch/arm/include/asm/vga.h
+++ b/arch/arm/include/asm/vga.h
@@ -1,8 +1,8 @@
1#ifndef ASMARM_VGA_H 1#ifndef ASMARM_VGA_H
2#define ASMARM_VGA_H 2#define ASMARM_VGA_H
3 3
4#include <linux/io.h>
4#include <mach/hardware.h> 5#include <mach/hardware.h>
5#include <asm/io.h>
6 6
7#define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x)) 7#define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x))
8 8