aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h133
-rw-r--r--arch/arm/include/asm/elf.h3
-rw-r--r--arch/arm/include/asm/ftrace.h35
-rw-r--r--arch/arm/include/asm/futex.h1
-rw-r--r--arch/arm/include/asm/mach/mmc.h2
-rw-r--r--arch/arm/include/asm/memory.h23
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/page-nommu.h3
-rw-r--r--arch/arm/include/asm/page.h4
-rw-r--r--arch/arm/include/asm/pgalloc.h16
-rw-r--r--arch/arm/include/asm/pgtable.h37
-rw-r--r--arch/arm/include/asm/ptrace.h8
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/include/asm/unified.h126
-rw-r--r--arch/arm/include/asm/unistd.h7
16 files changed, 360 insertions, 49 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 15f8a092b700..00f46d9ce299 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -74,23 +74,56 @@
74 * Enable and disable interrupts 74 * Enable and disable interrupts
75 */ 75 */
76#if __LINUX_ARM_ARCH__ >= 6 76#if __LINUX_ARM_ARCH__ >= 6
77 .macro disable_irq 77 .macro disable_irq_notrace
78 cpsid i 78 cpsid i
79 .endm 79 .endm
80 80
81 .macro enable_irq 81 .macro enable_irq_notrace
82 cpsie i 82 cpsie i
83 .endm 83 .endm
84#else 84#else
85 .macro disable_irq 85 .macro disable_irq_notrace
86 msr cpsr_c, #PSR_I_BIT | SVC_MODE 86 msr cpsr_c, #PSR_I_BIT | SVC_MODE
87 .endm 87 .endm
88 88
89 .macro enable_irq 89 .macro enable_irq_notrace
90 msr cpsr_c, #SVC_MODE 90 msr cpsr_c, #SVC_MODE
91 .endm 91 .endm
92#endif 92#endif
93 93
94 .macro asm_trace_hardirqs_off
95#if defined(CONFIG_TRACE_IRQFLAGS)
96 stmdb sp!, {r0-r3, ip, lr}
97 bl trace_hardirqs_off
98 ldmia sp!, {r0-r3, ip, lr}
99#endif
100 .endm
101
102 .macro asm_trace_hardirqs_on_cond, cond
103#if defined(CONFIG_TRACE_IRQFLAGS)
104 /*
105 * actually the registers should be pushed and pop'd conditionally, but
106 * after bl the flags are certainly clobbered
107 */
108 stmdb sp!, {r0-r3, ip, lr}
109 bl\cond trace_hardirqs_on
110 ldmia sp!, {r0-r3, ip, lr}
111#endif
112 .endm
113
114 .macro asm_trace_hardirqs_on
115 asm_trace_hardirqs_on_cond al
116 .endm
117
118 .macro disable_irq
119 disable_irq_notrace
120 asm_trace_hardirqs_off
121 .endm
122
123 .macro enable_irq
124 asm_trace_hardirqs_on
125 enable_irq_notrace
126 .endm
94/* 127/*
95 * Save the current IRQ state and disable IRQs. Note that this macro 128 * Save the current IRQ state and disable IRQs. Note that this macro
96 * assumes FIQs are enabled, and that the processor is in SVC mode. 129 * assumes FIQs are enabled, and that the processor is in SVC mode.
@@ -104,10 +137,16 @@
104 * Restore interrupt state previously stored in a register. We don't 137 * Restore interrupt state previously stored in a register. We don't
105 * guarantee that this will preserve the flags. 138 * guarantee that this will preserve the flags.
106 */ 139 */
107 .macro restore_irqs, oldcpsr 140 .macro restore_irqs_notrace, oldcpsr
108 msr cpsr_c, \oldcpsr 141 msr cpsr_c, \oldcpsr
109 .endm 142 .endm
110 143
144 .macro restore_irqs, oldcpsr
145 tst \oldcpsr, #PSR_I_BIT
146 asm_trace_hardirqs_on_cond eq
147 restore_irqs_notrace \oldcpsr
148 .endm
149
111#define USER(x...) \ 150#define USER(x...) \
1129999: x; \ 1519999: x; \
113 .section __ex_table,"a"; \ 152 .section __ex_table,"a"; \
@@ -127,3 +166,87 @@
127#endif 166#endif
128#endif 167#endif
129 .endm 168 .endm
169
170#ifdef CONFIG_THUMB2_KERNEL
171 .macro setmode, mode, reg
172 mov \reg, #\mode
173 msr cpsr_c, \reg
174 .endm
175#else
176 .macro setmode, mode, reg
177 msr cpsr_c, #\mode
178 .endm
179#endif
180
181/*
182 * STRT/LDRT access macros with ARM and Thumb-2 variants
183 */
184#ifdef CONFIG_THUMB2_KERNEL
185
186 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort
1879999:
188 .if \inc == 1
189 \instr\cond\()bt \reg, [\ptr, #\off]
190 .elseif \inc == 4
191 \instr\cond\()t \reg, [\ptr, #\off]
192 .else
193 .error "Unsupported inc macro argument"
194 .endif
195
196 .section __ex_table,"a"
197 .align 3
198 .long 9999b, \abort
199 .previous
200 .endm
201
202 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
203 @ explicit IT instruction needed because of the label
204 @ introduced by the USER macro
205 .ifnc \cond,al
206 .if \rept == 1
207 itt \cond
208 .elseif \rept == 2
209 ittt \cond
210 .else
211 .error "Unsupported rept macro argument"
212 .endif
213 .endif
214
215 @ Slightly optimised to avoid incrementing the pointer twice
216 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
217 .if \rept == 2
218 usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
219 .endif
220
221 add\cond \ptr, #\rept * \inc
222 .endm
223
224#else /* !CONFIG_THUMB2_KERNEL */
225
226 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
227 .rept \rept
2289999:
229 .if \inc == 1
230 \instr\cond\()bt \reg, [\ptr], #\inc
231 .elseif \inc == 4
232 \instr\cond\()t \reg, [\ptr], #\inc
233 .else
234 .error "Unsupported inc macro argument"
235 .endif
236
237 .section __ex_table,"a"
238 .align 3
239 .long 9999b, \abort
240 .previous
241 .endr
242 .endm
243
244#endif /* CONFIG_THUMB2_KERNEL */
245
246 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
247 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
248 .endm
249
250 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
251 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
252 .endm
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c207504de84d..c3b911ee9151 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -55,6 +55,9 @@ typedef struct user_fp elf_fpregset_t;
55#define R_ARM_MOVW_ABS_NC 43 55#define R_ARM_MOVW_ABS_NC 43
56#define R_ARM_MOVT_ABS 44 56#define R_ARM_MOVT_ABS 44
57 57
58#define R_ARM_THM_CALL 10
59#define R_ARM_THM_JUMP24 30
60
58/* 61/*
59 * These are used to set parameters in the core dumps. 62 * These are used to set parameters in the core dumps.
60 */ 63 */
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 39c8bc1a006a..103f7ee97313 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -7,8 +7,43 @@
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void mcount(void); 9extern void mcount(void);
10extern void __gnu_mcount_nc(void);
10#endif 11#endif
11 12
12#endif 13#endif
13 14
15#ifndef __ASSEMBLY__
16
17#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
18/*
19 * return_address uses walk_stackframe to do it's work. If both
20 * CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind
21 * information. For this to work in the function tracer many functions would
22 * have to be marked with __notrace. So for now just depend on
23 * !CONFIG_ARM_UNWIND.
24 */
25
26void *return_address(unsigned int);
27
28#else
29
30extern inline void *return_address(unsigned int level)
31{
32 return NULL;
33}
34
35#endif
36
37#define HAVE_ARCH_CALLER_ADDR
38
39#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
40#define CALLER_ADDR1 ((unsigned long)return_address(1))
41#define CALLER_ADDR2 ((unsigned long)return_address(2))
42#define CALLER_ADDR3 ((unsigned long)return_address(3))
43#define CALLER_ADDR4 ((unsigned long)return_address(4))
44#define CALLER_ADDR5 ((unsigned long)return_address(5))
45#define CALLER_ADDR6 ((unsigned long)return_address(6))
46
47#endif /* ifndef __ASSEMBLY__ */
48
14#endif /* _ASM_ARM_FTRACE */ 49#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 9ee743b95de8..bfcc15929a7f 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -99,6 +99,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
100 "1: ldrt %0, [%3]\n" 100 "1: ldrt %0, [%3]\n"
101 " teq %0, %1\n" 101 " teq %0, %1\n"
102 " it eq @ explicit IT needed for the 2b label\n"
102 "2: streqt %2, [%3]\n" 103 "2: streqt %2, [%3]\n"
103 "3:\n" 104 "3:\n"
104 " .section __ex_table,\"a\"\n" 105 " .section __ex_table,\"a\"\n"
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index 4da332b03144..b490ecc79def 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -10,6 +10,8 @@ struct mmc_platform_data {
10 unsigned int ocr_mask; /* available voltages */ 10 unsigned int ocr_mask; /* available voltages */
11 u32 (*translate_vdd)(struct device *, unsigned int); 11 u32 (*translate_vdd)(struct device *, unsigned int);
12 unsigned int (*status)(struct device *); 12 unsigned int (*status)(struct device *);
13 int gpio_wp;
14 int gpio_cd;
13}; 15};
14 16
15#endif 17#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 85763db87449..cefedf062138 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,7 +44,13 @@
44 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
46 */ 46 */
47#ifndef CONFIG_THUMB2_KERNEL
47#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) 48#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
49#else
50/* smaller range for Thumb-2 symbols relocation (2^24)*/
51#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
52#endif
53
48#if TASK_SIZE > MODULES_VADDR 54#if TASK_SIZE > MODULES_VADDR
49#error Top of user space clashes with start of module space 55#error Top of user space clashes with start of module space
50#endif 56#endif
@@ -212,7 +218,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
212 * 218 *
213 * page_to_pfn(page) convert a struct page * to a PFN number 219 * page_to_pfn(page) convert a struct page * to a PFN number
214 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * 220 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
215 * pfn_valid(pfn) indicates whether a PFN number is valid
216 * 221 *
217 * virt_to_page(k) convert a _valid_ virtual address to struct page * 222 * virt_to_page(k) convert a _valid_ virtual address to struct page *
218 * virt_addr_valid(k) indicates whether a virtual address is valid 223 * virt_addr_valid(k) indicates whether a virtual address is valid
@@ -221,10 +226,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
221 226
222#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 227#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
223 228
224#ifndef CONFIG_SPARSEMEM
225#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
226#endif
227
228#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 229#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
229#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) 230#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
230 231
@@ -241,18 +242,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
241#define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn) 242#define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn)
242#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT) 243#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT)
243 244
244#define pfn_valid(pfn) \
245 ({ \
246 unsigned int nid = PFN_TO_NID(pfn); \
247 int valid = nid < MAX_NUMNODES; \
248 if (valid) { \
249 pg_data_t *node = NODE_DATA(nid); \
250 valid = (pfn - node->node_start_pfn) < \
251 node->node_spanned_pages; \
252 } \
253 valid; \
254 })
255
256#define virt_to_page(kaddr) \ 245#define virt_to_page(kaddr) \
257 (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) 246 (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
258 247
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 263fed05ea33..bcdb9291ef0c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -62,8 +62,10 @@ static inline void check_context(struct mm_struct *mm)
62 62
63static inline void check_context(struct mm_struct *mm) 63static inline void check_context(struct mm_struct *mm)
64{ 64{
65#ifdef CONFIG_MMU
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 66 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm); 67 __check_kvm_seq(mm);
68#endif
67} 69}
68 70
69#define init_new_context(tsk,mm) 0 71#define init_new_context(tsk,mm) 0
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
index 3574c0deb37f..d1b162a18dcb 100644
--- a/arch/arm/include/asm/page-nommu.h
+++ b/arch/arm/include/asm/page-nommu.h
@@ -43,7 +43,4 @@ typedef unsigned long pgprot_t;
43#define __pmd(x) (x) 43#define __pmd(x) (x)
44#define __pgprot(x) (x) 44#define __pgprot(x) (x)
45 45
46extern unsigned long memory_start;
47extern unsigned long memory_end;
48
49#endif 46#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 9c746af1bf6e..3a32af4cce30 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -194,6 +194,10 @@ typedef unsigned long pgprot_t;
194 194
195typedef struct page *pgtable_t; 195typedef struct page *pgtable_t;
196 196
197#ifndef CONFIG_SPARSEMEM
198extern int pfn_valid(unsigned long);
199#endif
200
197#include <asm/memory.h> 201#include <asm/memory.h>
198 202
199#endif /* !__ASSEMBLY__ */ 203#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 3dcd64bf1824..b12cc98bbe04 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -36,6 +36,8 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
36#define pgd_alloc(mm) get_pgd_slow(mm) 36#define pgd_alloc(mm) get_pgd_slow(mm)
37#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) 37#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
38 38
39#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
40
39/* 41/*
40 * Allocate one PTE table. 42 * Allocate one PTE table.
41 * 43 *
@@ -57,7 +59,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
57{ 59{
58 pte_t *pte; 60 pte_t *pte;
59 61
60 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 62 pte = (pte_t *)__get_free_page(PGALLOC_GFP);
61 if (pte) { 63 if (pte) {
62 clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); 64 clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
63 pte += PTRS_PER_PTE; 65 pte += PTRS_PER_PTE;
@@ -71,10 +73,16 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
71{ 73{
72 struct page *pte; 74 struct page *pte;
73 75
74 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 76#ifdef CONFIG_HIGHPTE
77 pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
78#else
79 pte = alloc_pages(PGALLOC_GFP, 0);
80#endif
75 if (pte) { 81 if (pte) {
76 void *page = page_address(pte); 82 if (!PageHighMem(pte)) {
77 clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); 83 void *page = page_address(pte);
84 clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
85 }
78 pgtable_page_ctor(pte); 86 pgtable_page_ctor(pte);
79 } 87 }
80 88
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index c433c6c73112..201ccaa11f61 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -162,10 +162,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
162 * entries are stored 1024 bytes below. 162 * entries are stored 1024 bytes below.
163 */ 163 */
164#define L_PTE_PRESENT (1 << 0) 164#define L_PTE_PRESENT (1 << 0)
165#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
166#define L_PTE_YOUNG (1 << 1) 165#define L_PTE_YOUNG (1 << 1)
167#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */ 166#define L_PTE_FILE (1 << 2) /* only when !PRESENT */
168#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
169#define L_PTE_DIRTY (1 << 6) 167#define L_PTE_DIRTY (1 << 6)
170#define L_PTE_WRITE (1 << 7) 168#define L_PTE_WRITE (1 << 7)
171#define L_PTE_USER (1 << 8) 169#define L_PTE_USER (1 << 8)
@@ -264,10 +262,19 @@ extern struct page *empty_zero_page;
264#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 262#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
265#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 263#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
266#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 264#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
267#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 265
268#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 266#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr))
269#define pte_unmap(pte) do { } while (0) 267#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
270#define pte_unmap_nested(pte) do { } while (0) 268#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
269#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
270
271#ifndef CONFIG_HIGHPTE
272#define __pte_map(dir,km) pmd_page_vaddr(*(dir))
273#define __pte_unmap(pte,km) do { } while (0)
274#else
275#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
276#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km)
277#endif
271 278
272#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
273 280
@@ -381,13 +388,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
381 * 388 *
382 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 389 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
383 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 390 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
384 * <--------------- offset --------------------> <--- type --> 0 0 391 * <--------------- offset --------------------> <- type --> 0 0 0
385 * 392 *
386 * This gives us up to 127 swap files and 32GB per swap file. Note that 393 * This gives us up to 63 swap files and 32GB per swap file. Note that
387 * the offset field is always non-zero. 394 * the offset field is always non-zero.
388 */ 395 */
389#define __SWP_TYPE_SHIFT 2 396#define __SWP_TYPE_SHIFT 3
390#define __SWP_TYPE_BITS 7 397#define __SWP_TYPE_BITS 6
391#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 398#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
392#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 399#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
393 400
@@ -411,13 +418,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
411 * 418 *
412 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 419 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
413 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 420 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
414 * <------------------------ offset -------------------------> 1 0 421 * <----------------------- offset ------------------------> 1 0 0
415 */ 422 */
416#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) 423#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
417#define pte_to_pgoff(x) (pte_val(x) >> 2) 424#define pte_to_pgoff(x) (pte_val(x) >> 3)
418#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) 425#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
419 426
420#define PTE_FILE_MAX_BITS 30 427#define PTE_FILE_MAX_BITS 29
421 428
422/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 429/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
423/* FIXME: this is not correct */ 430/* FIXME: this is not correct */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 67b833c9b6b9..bbecccda76d0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -82,6 +82,14 @@
82#define PSR_ENDSTATE 0 82#define PSR_ENDSTATE 0
83#endif 83#endif
84 84
85/*
86 * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
87 * process is located in memory.
88 */
89#define PT_TEXT_ADDR 0x10000
90#define PT_DATA_ADDR 0x10004
91#define PT_TEXT_END_ADDR 0x10008
92
85#ifndef __ASSEMBLY__ 93#ifndef __ASSEMBLY__
86 94
87/* 95/*
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d3a39b1e6c0f..2dfb7d7a66e9 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -142,6 +142,7 @@ extern void vfp_sync_state(struct thread_info *thread);
142#define TIF_USING_IWMMXT 17 142#define TIF_USING_IWMMXT 17
143#define TIF_MEMDIE 18 143#define TIF_MEMDIE 18
144#define TIF_FREEZE 19 144#define TIF_FREEZE 19
145#define TIF_RESTORE_SIGMASK 20
145 146
146#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 147#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
147#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 148#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -150,6 +151,7 @@ extern void vfp_sync_state(struct thread_info *thread);
150#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 151#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
151#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 152#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
152#define _TIF_FREEZE (1 << TIF_FREEZE) 153#define _TIF_FREEZE (1 << TIF_FREEZE)
154#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
153 155
154/* 156/*
155 * Change these and you break ASM code in entry-common.S 157 * Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0da9bc9b3b1d..1d6bd40a4322 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -17,6 +17,7 @@
17#include <asm/memory.h> 17#include <asm/memory.h>
18#include <asm/domain.h> 18#include <asm/domain.h>
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/unified.h>
20 21
21#define VERIFY_READ 0 22#define VERIFY_READ 0
22#define VERIFY_WRITE 1 23#define VERIFY_WRITE 1
@@ -365,8 +366,10 @@ do { \
365 366
366#define __put_user_asm_dword(x,__pu_addr,err) \ 367#define __put_user_asm_dword(x,__pu_addr,err) \
367 __asm__ __volatile__( \ 368 __asm__ __volatile__( \
368 "1: strt " __reg_oper1 ", [%1], #4\n" \ 369 ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \
369 "2: strt " __reg_oper0 ", [%1]\n" \ 370 ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \
371 THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
372 THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
370 "3:\n" \ 373 "3:\n" \
371 " .section .fixup,\"ax\"\n" \ 374 " .section .fixup,\"ax\"\n" \
372 " .align 2\n" \ 375 " .align 2\n" \
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
new file mode 100644
index 000000000000..073e85b9b961
--- /dev/null
+++ b/arch/arm/include/asm/unified.h
@@ -0,0 +1,126 @@
1/*
2 * include/asm-arm/unified.h - Unified Assembler Syntax helper macros
3 *
4 * Copyright (C) 2008 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __ASM_UNIFIED_H
21#define __ASM_UNIFIED_H
22
23#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
24 .syntax unified
25#endif
26
27#ifdef CONFIG_THUMB2_KERNEL
28
29#if __GNUC__ < 4
30#error Thumb-2 kernel requires gcc >= 4
31#endif
32
33/* The CPSR bit describing the instruction set (Thumb) */
34#define PSR_ISETSTATE PSR_T_BIT
35
36#define ARM(x...)
37#define THUMB(x...) x
38#define W(instr) instr.w
39#define BSYM(sym) sym + 1
40
41#else /* !CONFIG_THUMB2_KERNEL */
42
43/* The CPSR bit describing the instruction set (ARM) */
44#define PSR_ISETSTATE 0
45
46#define ARM(x...) x
47#define THUMB(x...)
48#define W(instr) instr
49#define BSYM(sym) sym
50
51#endif /* CONFIG_THUMB2_KERNEL */
52
53#ifndef CONFIG_ARM_ASM_UNIFIED
54
55/*
56 * If the unified assembly syntax isn't used (in ARM mode), these
57 * macros expand to an empty string
58 */
59#ifdef __ASSEMBLY__
60 .macro it, cond
61 .endm
62 .macro itt, cond
63 .endm
64 .macro ite, cond
65 .endm
66 .macro ittt, cond
67 .endm
68 .macro itte, cond
69 .endm
70 .macro itet, cond
71 .endm
72 .macro itee, cond
73 .endm
74 .macro itttt, cond
75 .endm
76 .macro ittte, cond
77 .endm
78 .macro ittet, cond
79 .endm
80 .macro ittee, cond
81 .endm
82 .macro itett, cond
83 .endm
84 .macro itete, cond
85 .endm
86 .macro iteet, cond
87 .endm
88 .macro iteee, cond
89 .endm
90#else /* !__ASSEMBLY__ */
91__asm__(
92" .macro it, cond\n"
93" .endm\n"
94" .macro itt, cond\n"
95" .endm\n"
96" .macro ite, cond\n"
97" .endm\n"
98" .macro ittt, cond\n"
99" .endm\n"
100" .macro itte, cond\n"
101" .endm\n"
102" .macro itet, cond\n"
103" .endm\n"
104" .macro itee, cond\n"
105" .endm\n"
106" .macro itttt, cond\n"
107" .endm\n"
108" .macro ittte, cond\n"
109" .endm\n"
110" .macro ittet, cond\n"
111" .endm\n"
112" .macro ittee, cond\n"
113" .endm\n"
114" .macro itett, cond\n"
115" .endm\n"
116" .macro itete, cond\n"
117" .endm\n"
118" .macro iteet, cond\n"
119" .endm\n"
120" .macro iteee, cond\n"
121" .endm\n");
122#endif /* __ASSEMBLY__ */
123
124#endif /* CONFIG_ARM_ASM_UNIFIED */
125
126#endif /* !__ASM_UNIFIED_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 0e97b8cb77d5..9122c9ee18fb 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -360,8 +360,8 @@
360#define __NR_readlinkat (__NR_SYSCALL_BASE+332) 360#define __NR_readlinkat (__NR_SYSCALL_BASE+332)
361#define __NR_fchmodat (__NR_SYSCALL_BASE+333) 361#define __NR_fchmodat (__NR_SYSCALL_BASE+333)
362#define __NR_faccessat (__NR_SYSCALL_BASE+334) 362#define __NR_faccessat (__NR_SYSCALL_BASE+334)
363 /* 335 for pselect6 */ 363#define __NR_pselect6 (__NR_SYSCALL_BASE+335)
364 /* 336 for ppoll */ 364#define __NR_ppoll (__NR_SYSCALL_BASE+336)
365#define __NR_unshare (__NR_SYSCALL_BASE+337) 365#define __NR_unshare (__NR_SYSCALL_BASE+337)
366#define __NR_set_robust_list (__NR_SYSCALL_BASE+338) 366#define __NR_set_robust_list (__NR_SYSCALL_BASE+338)
367#define __NR_get_robust_list (__NR_SYSCALL_BASE+339) 367#define __NR_get_robust_list (__NR_SYSCALL_BASE+339)
@@ -372,7 +372,7 @@
372#define __NR_vmsplice (__NR_SYSCALL_BASE+343) 372#define __NR_vmsplice (__NR_SYSCALL_BASE+343)
373#define __NR_move_pages (__NR_SYSCALL_BASE+344) 373#define __NR_move_pages (__NR_SYSCALL_BASE+344)
374#define __NR_getcpu (__NR_SYSCALL_BASE+345) 374#define __NR_getcpu (__NR_SYSCALL_BASE+345)
375 /* 346 for epoll_pwait */ 375#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346)
376#define __NR_kexec_load (__NR_SYSCALL_BASE+347) 376#define __NR_kexec_load (__NR_SYSCALL_BASE+347)
377#define __NR_utimensat (__NR_SYSCALL_BASE+348) 377#define __NR_utimensat (__NR_SYSCALL_BASE+348)
378#define __NR_signalfd (__NR_SYSCALL_BASE+349) 378#define __NR_signalfd (__NR_SYSCALL_BASE+349)
@@ -432,6 +432,7 @@
432#define __ARCH_WANT_SYS_SIGPENDING 432#define __ARCH_WANT_SYS_SIGPENDING
433#define __ARCH_WANT_SYS_SIGPROCMASK 433#define __ARCH_WANT_SYS_SIGPROCMASK
434#define __ARCH_WANT_SYS_RT_SIGACTION 434#define __ARCH_WANT_SYS_RT_SIGACTION
435#define __ARCH_WANT_SYS_RT_SIGSUSPEND
435 436
436#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) 437#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
437#define __ARCH_WANT_SYS_TIME 438#define __ARCH_WANT_SYS_TIME