aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/signal.c96
-rw-r--r--arch/arm/kernel/traps.c14
-rw-r--r--arch/arm/mm/consistent.c6
-rw-r--r--arch/arm/mm/fault-armv.c7
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/mm-armv.c15
-rw-r--r--arch/arm/oprofile/backtrace.c46
7 files changed, 45 insertions, 143 deletions
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index a94d75fef598..a917e3dd3666 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -139,93 +139,33 @@ struct iwmmxt_sigframe {
139 unsigned long storage[0x98/4]; 139 unsigned long storage[0x98/4];
140}; 140};
141 141
142static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
143{
144 unsigned long addr = (unsigned long)uptr;
145 pgd_t *pgd = pgd_offset(mm, addr);
146 if (pgd_present(*pgd)) {
147 pmd_t *pmd = pmd_offset(pgd, addr);
148 if (pmd_present(*pmd)) {
149 pte_t *pte = pte_offset_map(pmd, addr);
150 return (pte_present(*pte) && (!wr || pte_write(*pte)));
151 }
152 }
153 return 0;
154}
155
156static int copy_locked(void __user *uptr, void *kptr, size_t size, int write,
157 void (*copyfn)(void *, void __user *))
158{
159 unsigned char v, __user *userptr = uptr;
160 int err = 0;
161
162 do {
163 struct mm_struct *mm;
164
165 if (write) {
166 __put_user_error(0, userptr, err);
167 __put_user_error(0, userptr + size - 1, err);
168 } else {
169 __get_user_error(v, userptr, err);
170 __get_user_error(v, userptr + size - 1, err);
171 }
172
173 if (err)
174 break;
175
176 mm = current->mm;
177 spin_lock(&mm->page_table_lock);
178 if (page_present(mm, userptr, write) &&
179 page_present(mm, userptr + size - 1, write)) {
180 copyfn(kptr, uptr);
181 } else
182 err = 1;
183 spin_unlock(&mm->page_table_lock);
184 } while (err);
185
186 return err;
187}
188
189static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) 142static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
190{ 143{
191 int err = 0; 144 char kbuf[sizeof(*frame) + 8];
145 struct iwmmxt_sigframe *kframe;
192 146
193 /* the iWMMXt context must be 64 bit aligned */ 147 /* the iWMMXt context must be 64 bit aligned */
194 WARN_ON((unsigned long)frame & 7); 148 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
195 149 kframe->magic0 = IWMMXT_MAGIC0;
196 __put_user_error(IWMMXT_MAGIC0, &frame->magic0, err); 150 kframe->magic1 = IWMMXT_MAGIC1;
197 __put_user_error(IWMMXT_MAGIC1, &frame->magic1, err); 151 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
198 152 return __copy_to_user(frame, kframe, sizeof(*frame));
199 /*
200 * iwmmxt_task_copy() doesn't check user permissions.
201 * Let's do a dummy write on the upper boundary to ensure
202 * access to user mem is OK all way up.
203 */
204 err |= copy_locked(&frame->storage, current_thread_info(),
205 sizeof(frame->storage), 1, iwmmxt_task_copy);
206 return err;
207} 153}
208 154
209static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) 155static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
210{ 156{
211 unsigned long magic0, magic1; 157 char kbuf[sizeof(*frame) + 8];
212 int err = 0; 158 struct iwmmxt_sigframe *kframe;
213 159
214 /* the iWMMXt context is 64 bit aligned */ 160 /* the iWMMXt context must be 64 bit aligned */
215 WARN_ON((unsigned long)frame & 7); 161 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
216 162 if (__copy_from_user(kframe, frame, sizeof(*frame)))
217 /* 163 return -1;
218 * Validate iWMMXt context signature. 164 if (kframe->magic0 != IWMMXT_MAGIC0 ||
219 * Also, iwmmxt_task_restore() doesn't check user permissions. 165 kframe->magic1 != IWMMXT_MAGIC1)
220 * Let's do a dummy write on the upper boundary to ensure 166 return -1;
221 * access to user mem is OK all way up. 167 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
222 */ 168 return 0;
223 __get_user_error(magic0, &frame->magic0, err);
224 __get_user_error(magic1, &frame->magic1, err);
225 if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1)
226 err = copy_locked(&frame->storage, current_thread_info(),
227 sizeof(frame->storage), 0, iwmmxt_task_restore);
228 return err;
229} 169}
230 170
231#endif 171#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index baa09601a64e..66e5a0516f23 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -483,29 +483,33 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
483 unsigned long addr = regs->ARM_r2; 483 unsigned long addr = regs->ARM_r2;
484 struct mm_struct *mm = current->mm; 484 struct mm_struct *mm = current->mm;
485 pgd_t *pgd; pmd_t *pmd; pte_t *pte; 485 pgd_t *pgd; pmd_t *pmd; pte_t *pte;
486 spinlock_t *ptl;
486 487
487 regs->ARM_cpsr &= ~PSR_C_BIT; 488 regs->ARM_cpsr &= ~PSR_C_BIT;
488 spin_lock(&mm->page_table_lock); 489 down_read(&mm->mmap_sem);
489 pgd = pgd_offset(mm, addr); 490 pgd = pgd_offset(mm, addr);
490 if (!pgd_present(*pgd)) 491 if (!pgd_present(*pgd))
491 goto bad_access; 492 goto bad_access;
492 pmd = pmd_offset(pgd, addr); 493 pmd = pmd_offset(pgd, addr);
493 if (!pmd_present(*pmd)) 494 if (!pmd_present(*pmd))
494 goto bad_access; 495 goto bad_access;
495 pte = pte_offset_map(pmd, addr); 496 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
496 if (!pte_present(*pte) || !pte_write(*pte)) 497 if (!pte_present(*pte) || !pte_write(*pte)) {
498 pte_unmap_unlock(pte, ptl);
497 goto bad_access; 499 goto bad_access;
500 }
498 val = *(unsigned long *)addr; 501 val = *(unsigned long *)addr;
499 val -= regs->ARM_r0; 502 val -= regs->ARM_r0;
500 if (val == 0) { 503 if (val == 0) {
501 *(unsigned long *)addr = regs->ARM_r1; 504 *(unsigned long *)addr = regs->ARM_r1;
502 regs->ARM_cpsr |= PSR_C_BIT; 505 regs->ARM_cpsr |= PSR_C_BIT;
503 } 506 }
504 spin_unlock(&mm->page_table_lock); 507 pte_unmap_unlock(pte, ptl);
508 up_read(&mm->mmap_sem);
505 return val; 509 return val;
506 510
507 bad_access: 511 bad_access:
508 spin_unlock(&mm->page_table_lock); 512 up_read(&mm->mmap_sem);
509 /* simulate a write access fault */ 513 /* simulate a write access fault */
510 do_DataAbort(addr, 15 + (1 << 11), regs); 514 do_DataAbort(addr, 15 + (1 << 11), regs);
511 return -1; 515 return -1;
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 82f4d5e27c54..47b0b767f080 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -397,8 +397,6 @@ static int __init consistent_init(void)
397 pte_t *pte; 397 pte_t *pte;
398 int ret = 0; 398 int ret = 0;
399 399
400 spin_lock(&init_mm.page_table_lock);
401
402 do { 400 do {
403 pgd = pgd_offset(&init_mm, CONSISTENT_BASE); 401 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
404 pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); 402 pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
@@ -409,7 +407,7 @@ static int __init consistent_init(void)
409 } 407 }
410 WARN_ON(!pmd_none(*pmd)); 408 WARN_ON(!pmd_none(*pmd));
411 409
412 pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); 410 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
413 if (!pte) { 411 if (!pte) {
414 printk(KERN_ERR "%s: no pte tables\n", __func__); 412 printk(KERN_ERR "%s: no pte tables\n", __func__);
415 ret = -ENOMEM; 413 ret = -ENOMEM;
@@ -419,8 +417,6 @@ static int __init consistent_init(void)
419 consistent_pte = pte; 417 consistent_pte = pte;
420 } while (0); 418 } while (0);
421 419
422 spin_unlock(&init_mm.page_table_lock);
423
424 return ret; 420 return ret;
425} 421}
426 422
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index be4ab3d73c91..7fc1b35a6746 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
26/* 26/*
27 * We take the easy way out of this problem - we make the 27 * We take the easy way out of this problem - we make the
28 * PTE uncacheable. However, we leave the write buffer on. 28 * PTE uncacheable. However, we leave the write buffer on.
29 *
30 * Note that the pte lock held when calling update_mmu_cache must also
31 * guard the pte (somewhere else in the same mm) that we modify here.
32 * Therefore those configurations which might call adjust_pte (those
33 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
29 */ 34 */
30static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 35static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
31{ 36{
@@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
127 * 2. If we have multiple shared mappings of the same space in 132 * 2. If we have multiple shared mappings of the same space in
128 * an object, we need to deal with the cache aliasing issues. 133 * an object, we need to deal with the cache aliasing issues.
129 * 134 *
130 * Note that the page_table_lock will be held. 135 * Note that the pte lock will be held.
131 */ 136 */
132void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 137void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
133{ 138{
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6fb1258df1b5..0f128c28fee4 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -75,7 +75,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
75 75
76 pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); 76 pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
77 do { 77 do {
78 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); 78 pte_t * pte = pte_alloc_kernel(pmd, address);
79 if (!pte) 79 if (!pte)
80 return -ENOMEM; 80 return -ENOMEM;
81 remap_area_pte(pte, address, end - address, address + phys_addr, pgprot); 81 remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
@@ -97,7 +97,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
97 phys_addr -= address; 97 phys_addr -= address;
98 dir = pgd_offset(&init_mm, address); 98 dir = pgd_offset(&init_mm, address);
99 BUG_ON(address >= end); 99 BUG_ON(address >= end);
100 spin_lock(&init_mm.page_table_lock);
101 do { 100 do {
102 pmd_t *pmd = pmd_alloc(&init_mm, dir, address); 101 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
103 if (!pmd) { 102 if (!pmd) {
@@ -114,7 +113,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
114 dir++; 113 dir++;
115 } while (address && (address < end)); 114 } while (address && (address < end));
116 115
117 spin_unlock(&init_mm.page_table_lock);
118 flush_cache_vmap(start, end); 116 flush_cache_vmap(start, end);
119 return err; 117 return err;
120} 118}
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 61bc2fa0511e..1221fdde1769 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -180,11 +180,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
180 180
181 if (!vectors_high()) { 181 if (!vectors_high()) {
182 /* 182 /*
183 * This lock is here just to satisfy pmd_alloc and pte_lock
184 */
185 spin_lock(&mm->page_table_lock);
186
187 /*
188 * On ARM, first page must always be allocated since it 183 * On ARM, first page must always be allocated since it
189 * contains the machine vectors. 184 * contains the machine vectors.
190 */ 185 */
@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
201 set_pte(new_pte, *init_pte); 196 set_pte(new_pte, *init_pte);
202 pte_unmap_nested(init_pte); 197 pte_unmap_nested(init_pte);
203 pte_unmap(new_pte); 198 pte_unmap(new_pte);
204
205 spin_unlock(&mm->page_table_lock);
206 } 199 }
207 200
208 return new_pgd; 201 return new_pgd;
209 202
210no_pte: 203no_pte:
211 spin_unlock(&mm->page_table_lock);
212 pmd_free(new_pmd); 204 pmd_free(new_pmd);
213 free_pages((unsigned long)new_pgd, 2);
214 return NULL;
215
216no_pmd: 205no_pmd:
217 spin_unlock(&mm->page_table_lock);
218 free_pages((unsigned long)new_pgd, 2); 206 free_pages((unsigned long)new_pgd, 2);
219 return NULL;
220
221no_pgd: 207no_pgd:
222 return NULL; 208 return NULL;
223} 209}
@@ -243,6 +229,7 @@ void free_pgd_slow(pgd_t *pgd)
243 pte = pmd_page(*pmd); 229 pte = pmd_page(*pmd);
244 pmd_clear(pmd); 230 pmd_clear(pmd);
245 dec_page_state(nr_page_table_pages); 231 dec_page_state(nr_page_table_pages);
232 pte_lock_deinit(pte);
246 pte_free(pte); 233 pte_free(pte);
247 pmd_free(pmd); 234 pmd_free(pmd);
248free: 235free:
diff --git a/arch/arm/oprofile/backtrace.c b/arch/arm/oprofile/backtrace.c
index df35c452a8bf..7c22c12618cc 100644
--- a/arch/arm/oprofile/backtrace.c
+++ b/arch/arm/oprofile/backtrace.c
@@ -49,42 +49,22 @@ static struct frame_tail* kernel_backtrace(struct frame_tail *tail)
49 49
50static struct frame_tail* user_backtrace(struct frame_tail *tail) 50static struct frame_tail* user_backtrace(struct frame_tail *tail)
51{ 51{
52 struct frame_tail buftail; 52 struct frame_tail buftail[2];
53 53
54 /* hardware pte might not be valid due to dirty/accessed bit emulation 54 /* Also check accessibility of one struct frame_tail beyond */
55 * so we use copy_from_user and benefit from exception fixups */ 55 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
56 if (copy_from_user(&buftail, tail, sizeof(struct frame_tail))) 56 return NULL;
57 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
57 return NULL; 58 return NULL;
58 59
59 oprofile_add_trace(buftail.lr); 60 oprofile_add_trace(buftail[0].lr);
60 61
61 /* frame pointers should strictly progress back up the stack 62 /* frame pointers should strictly progress back up the stack
62 * (towards higher addresses) */ 63 * (towards higher addresses) */
63 if (tail >= buftail.fp) 64 if (tail >= buftail[0].fp)
64 return NULL; 65 return NULL;
65 66
66 return buftail.fp-1; 67 return buftail[0].fp-1;
67}
68
69/* Compare two addresses and see if they're on the same page */
70#define CMP_ADDR_EQUAL(x,y,offset) ((((unsigned long) x) >> PAGE_SHIFT) \
71 == ((((unsigned long) y) + offset) >> PAGE_SHIFT))
72
73/* check that the page(s) containing the frame tail are present */
74static int pages_present(struct frame_tail *tail)
75{
76 struct mm_struct * mm = current->mm;
77
78 if (!check_user_page_readable(mm, (unsigned long)tail))
79 return 0;
80
81 if (CMP_ADDR_EQUAL(tail, tail, 8))
82 return 1;
83
84 if (!check_user_page_readable(mm, ((unsigned long)tail) + 8))
85 return 0;
86
87 return 1;
88} 68}
89 69
90/* 70/*
@@ -118,7 +98,6 @@ static int valid_kernel_stack(struct frame_tail *tail, struct pt_regs *regs)
118void arm_backtrace(struct pt_regs * const regs, unsigned int depth) 98void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
119{ 99{
120 struct frame_tail *tail; 100 struct frame_tail *tail;
121 unsigned long last_address = 0;
122 101
123 tail = ((struct frame_tail *) regs->ARM_fp) - 1; 102 tail = ((struct frame_tail *) regs->ARM_fp) - 1;
124 103
@@ -132,13 +111,6 @@ void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
132 return; 111 return;
133 } 112 }
134 113
135 while (depth-- && tail && !((unsigned long) tail & 3)) { 114 while (depth-- && tail && !((unsigned long) tail & 3))
136 if ((!CMP_ADDR_EQUAL(last_address, tail, 0)
137 || !CMP_ADDR_EQUAL(last_address, tail, 8))
138 && !pages_present(tail))
139 return;
140 last_address = (unsigned long) tail;
141 tail = user_backtrace(tail); 115 tail = user_backtrace(tail);
142 }
143} 116}
144