aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/copy_page.S1
-rw-r--r--arch/sh/mm/fault.c39
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sh/mm/pmb.c79
4 files changed, 42 insertions, 80 deletions
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 397c94c97315..ae039f2da162 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -129,6 +129,7 @@ ENTRY(__copy_user_page)
129 rts 129 rts
130 nop 130 nop
131#endif 131#endif
132 .align 2
132.Lpsz: .long PAGE_SIZE 133.Lpsz: .long PAGE_SIZE
133/* 134/*
134 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 135 * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 9207da67ff8a..c878faa4ae46 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -15,43 +15,11 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/kdebug.h>
19#include <asm/system.h> 18#include <asm/system.h>
20#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
21#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
22#include <asm/kgdb.h> 21#include <asm/kgdb.h>
23 22
24#ifdef CONFIG_KPROBES
25ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
26
27/* Hook to register for page fault notifications */
28int register_page_fault_notifier(struct notifier_block *nb)
29{
30 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
31}
32
33int unregister_page_fault_notifier(struct notifier_block *nb)
34{
35 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
36}
37
38static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
39 int trap, int sig)
40{
41 struct die_args args = {
42 .regs = regs,
43 .trapnr = trap,
44 };
45 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
46}
47#else
48static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
49 int trap, int sig)
50{
51 return NOTIFY_DONE;
52}
53#endif
54
55/* 23/*
56 * This routine handles page faults. It determines the address, 24 * This routine handles page faults. It determines the address,
57 * and the problem, and then passes it off to one of the appropriate 25 * and the problem, and then passes it off to one of the appropriate
@@ -69,11 +37,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
69 siginfo_t info; 37 siginfo_t info;
70 38
71 trace_hardirqs_on(); 39 trace_hardirqs_on();
72
73 if (notify_page_fault(DIE_PAGE_FAULT, regs,
74 writeaccess, SIGSEGV) == NOTIFY_STOP)
75 return;
76
77 local_irq_enable(); 40 local_irq_enable();
78 41
79#ifdef CONFIG_SH_KGDB 42#ifdef CONFIG_SH_KGDB
@@ -285,7 +248,7 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
285 pte_t *pte; 248 pte_t *pte;
286 pte_t entry; 249 pte_t entry;
287 struct mm_struct *mm = current->mm; 250 struct mm_struct *mm = current->mm;
288 spinlock_t *ptl; 251 spinlock_t *ptl = NULL;
289 int ret = 1; 252 int ret = 1;
290 253
291#ifdef CONFIG_SH_KGDB 254#ifdef CONFIG_SH_KGDB
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 8fe223a890ed..e0e644ff3204 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/proc_fs.h> 14#include <linux/proc_fs.h>
15#include <linux/pagemap.h>
15#include <linux/percpu.h> 16#include <linux/percpu.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
@@ -112,7 +113,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
112 * As a performance optimization, other platforms preserve the fixmap mapping 113 * As a performance optimization, other platforms preserve the fixmap mapping
113 * across a context switch, we don't presently do this, but this could be done 114 * across a context switch, we don't presently do this, but this could be done
114 * in a similar fashion as to the wired TLB interface that sh64 uses (by way 115 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
115 * of the memorry mapped UTLB configuration) -- this unfortunately forces us to 116 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
116 * give up a TLB entry for each mapping we want to preserve. While this may be 117 * give up a TLB entry for each mapping we want to preserve. While this may be
117 * viable for a small number of fixmaps, it's not particularly useful for 118 * viable for a small number of fixmaps, it's not particularly useful for
118 * everything and needs to be carefully evaluated. (ie, we may want this for 119 * everything and needs to be carefully evaluated. (ie, we may want this for
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 02aae06527dc..b6a5a338145b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005, 2006 Paul Mundt 6 * Copyright (C) 2005, 2006, 2007 Paul Mundt
7 * 7 *
8 * P1/P2 Section mapping definitions from map32.h, which was: 8 * P1/P2 Section mapping definitions from map32.h, which was:
9 * 9 *
@@ -68,6 +68,32 @@ static inline unsigned long mk_pmb_data(unsigned int entry)
68 return mk_pmb_entry(entry) | PMB_DATA; 68 return mk_pmb_entry(entry) | PMB_DATA;
69} 69}
70 70
71static DEFINE_SPINLOCK(pmb_list_lock);
72static struct pmb_entry *pmb_list;
73
74static inline void pmb_list_add(struct pmb_entry *pmbe)
75{
76 struct pmb_entry **p, *tmp;
77
78 p = &pmb_list;
79 while ((tmp = *p) != NULL)
80 p = &tmp->next;
81
82 pmbe->next = tmp;
83 *p = pmbe;
84}
85
86static inline void pmb_list_del(struct pmb_entry *pmbe)
87{
88 struct pmb_entry **p, *tmp;
89
90 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
91 if (tmp == pmbe) {
92 *p = tmp->next;
93 return;
94 }
95}
96
71struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, 97struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
72 unsigned long flags) 98 unsigned long flags)
73{ 99{
@@ -81,11 +107,19 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
81 pmbe->ppn = ppn; 107 pmbe->ppn = ppn;
82 pmbe->flags = flags; 108 pmbe->flags = flags;
83 109
110 spin_lock_irq(&pmb_list_lock);
111 pmb_list_add(pmbe);
112 spin_unlock_irq(&pmb_list_lock);
113
84 return pmbe; 114 return pmbe;
85} 115}
86 116
87void pmb_free(struct pmb_entry *pmbe) 117void pmb_free(struct pmb_entry *pmbe)
88{ 118{
119 spin_lock_irq(&pmb_list_lock);
120 pmb_list_del(pmbe);
121 spin_unlock_irq(&pmb_list_lock);
122
89 kmem_cache_free(pmb_cache, pmbe); 123 kmem_cache_free(pmb_cache, pmbe);
90} 124}
91 125
@@ -167,31 +201,6 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
167 clear_bit(entry, &pmb_map); 201 clear_bit(entry, &pmb_map);
168} 202}
169 203
170static DEFINE_SPINLOCK(pmb_list_lock);
171static struct pmb_entry *pmb_list;
172
173static inline void pmb_list_add(struct pmb_entry *pmbe)
174{
175 struct pmb_entry **p, *tmp;
176
177 p = &pmb_list;
178 while ((tmp = *p) != NULL)
179 p = &tmp->next;
180
181 pmbe->next = tmp;
182 *p = pmbe;
183}
184
185static inline void pmb_list_del(struct pmb_entry *pmbe)
186{
187 struct pmb_entry **p, *tmp;
188
189 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
190 if (tmp == pmbe) {
191 *p = tmp->next;
192 return;
193 }
194}
195 204
196static struct { 205static struct {
197 unsigned long size; 206 unsigned long size;
@@ -283,25 +292,14 @@ void pmb_unmap(unsigned long addr)
283 } while (pmbe); 292 } while (pmbe);
284} 293}
285 294
286static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) 295static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep,
296 unsigned long flags)
287{ 297{
288 struct pmb_entry *pmbe = pmb; 298 struct pmb_entry *pmbe = pmb;
289 299
290 memset(pmb, 0, sizeof(struct pmb_entry)); 300 memset(pmb, 0, sizeof(struct pmb_entry));
291 301
292 spin_lock_irq(&pmb_list_lock);
293
294 pmbe->entry = PMB_NO_ENTRY; 302 pmbe->entry = PMB_NO_ENTRY;
295 pmb_list_add(pmbe);
296
297 spin_unlock_irq(&pmb_list_lock);
298}
299
300static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
301{
302 spin_lock_irq(&pmb_list_lock);
303 pmb_list_del(pmb);
304 spin_unlock_irq(&pmb_list_lock);
305} 303}
306 304
307static int __init pmb_init(void) 305static int __init pmb_init(void)
@@ -312,8 +310,7 @@ static int __init pmb_init(void)
312 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 310 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
313 311
314 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
315 SLAB_PANIC, pmb_cache_ctor, 313 SLAB_PANIC, pmb_cache_ctor, NULL);
316 pmb_cache_dtor);
317 314
318 jump_to_P2(); 315 jump_to_P2();
319 316