aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/score/include/asm/cacheflush.h18
-rw-r--r--arch/score/include/asm/delay.h11
-rw-r--r--arch/score/include/asm/errno.h1
-rw-r--r--arch/score/include/asm/pgtable-bits.h2
-rw-r--r--arch/score/include/asm/pgtable.h59
-rw-r--r--arch/score/include/asm/ptrace.h18
-rw-r--r--arch/score/include/asm/unistd.h3
-rw-r--r--arch/score/kernel/entry.S6
-rw-r--r--arch/score/kernel/process.c2
-rw-r--r--arch/score/kernel/ptrace.c144
-rw-r--r--arch/score/kernel/signal.c6
-rw-r--r--arch/score/kernel/sys_score.c7
-rw-r--r--arch/score/kernel/traps.c2
-rw-r--r--arch/score/mm/cache.c125
14 files changed, 154 insertions, 250 deletions
diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h
index 1c74628caf71..07cc8fc457cd 100644
--- a/arch/score/include/asm/cacheflush.h
+++ b/arch/score/include/asm/cacheflush.h
@@ -4,18 +4,16 @@
4/* Keep includes the same across arches. */ 4/* Keep includes the same across arches. */
5#include <linux/mm.h> 5#include <linux/mm.h>
6 6
7extern void (*flush_cache_all)(void); 7extern void flush_cache_all(void);
8extern void (*flush_cache_mm)(struct mm_struct *mm); 8extern void flush_cache_mm(struct mm_struct *mm);
9extern void (*flush_cache_range)(struct vm_area_struct *vma, 9extern void flush_cache_range(struct vm_area_struct *vma,
10 unsigned long start, unsigned long end); 10 unsigned long start, unsigned long end);
11extern void (*flush_cache_page)(struct vm_area_struct *vma, 11extern void flush_cache_page(struct vm_area_struct *vma,
12 unsigned long page, unsigned long pfn); 12 unsigned long page, unsigned long pfn);
13extern void (*flush_cache_sigtramp)(unsigned long addr); 13extern void flush_cache_sigtramp(unsigned long addr);
14extern void (*flush_icache_all)(void); 14extern void flush_icache_all(void);
15extern void (*flush_icache_range)(unsigned long start, unsigned long end); 15extern void flush_icache_range(unsigned long start, unsigned long end);
16extern void (*flush_data_cache_page)(unsigned long addr); 16extern void flush_dcache_range(unsigned long start, unsigned long end);
17
18extern void s7_flush_cache_all(void);
19 17
20#define flush_cache_dup_mm(mm) do {} while (0) 18#define flush_cache_dup_mm(mm) do {} while (0)
21#define flush_dcache_page(page) do {} while (0) 19#define flush_dcache_page(page) do {} while (0)
diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h
index ad716f6d922d..6726ec199dc0 100644
--- a/arch/score/include/asm/delay.h
+++ b/arch/score/include/asm/delay.h
@@ -3,17 +3,22 @@
3 3
4static inline void __delay(unsigned long loops) 4static inline void __delay(unsigned long loops)
5{ 5{
6 /* 3 cycles per loop. */
6 __asm__ __volatile__ ( 7 __asm__ __volatile__ (
7 "1:\tsubi\t%0,1\n\t" 8 "1:\tsubi\t%0, 3\n\t"
8 "cmpz.c\t%0\n\t" 9 "cmpz.c\t%0\n\t"
9 "bne\t1b\n\t" 10 "ble\t1b\n\t"
10 : "=r" (loops) 11 : "=r" (loops)
11 : "0" (loops)); 12 : "0" (loops));
12} 13}
13 14
14static inline void __udelay(unsigned long usecs) 15static inline void __udelay(unsigned long usecs)
15{ 16{
16 __delay(usecs); 17 unsigned long loops_per_usec;
18
19 loops_per_usec = (loops_per_jiffy * HZ) / 1000000;
20
21 __delay(usecs * loops_per_usec);
17} 22}
18 23
19#define udelay(usecs) __udelay(usecs) 24#define udelay(usecs) __udelay(usecs)
diff --git a/arch/score/include/asm/errno.h b/arch/score/include/asm/errno.h
index 7cd3e1f07c0b..29ff39d5ab47 100644
--- a/arch/score/include/asm/errno.h
+++ b/arch/score/include/asm/errno.h
@@ -2,6 +2,5 @@
2#define _ASM_SCORE_ERRNO_H 2#define _ASM_SCORE_ERRNO_H
3 3
4#include <asm-generic/errno.h> 4#include <asm-generic/errno.h>
5#define EMAXERRNO 1024
6 5
7#endif /* _ASM_SCORE_ERRNO_H */ 6#endif /* _ASM_SCORE_ERRNO_H */
diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h
index ca16d357a644..7d65a96a82e5 100644
--- a/arch/score/include/asm/pgtable-bits.h
+++ b/arch/score/include/asm/pgtable-bits.h
@@ -17,6 +17,8 @@
17#define _CACHE_MASK (1<<3) 17#define _CACHE_MASK (1<<3)
18#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ 18#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */
19 19
20#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
21#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
20#define _PAGE_CHG_MASK \ 22#define _PAGE_CHG_MASK \
21 (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) 23 (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE)
22 24
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index 0f7177a42205..5e913e57c671 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -106,24 +106,6 @@ static inline void pmd_clear(pmd_t *pmdp)
106 ((swp_entry_t) { pte_val(pte)}) 106 ((swp_entry_t) { pte_val(pte)})
107#define __swp_entry_to_pte(x) ((pte_t) {(x).val}) 107#define __swp_entry_to_pte(x) ((pte_t) {(x).val})
108 108
109#define __P000 __pgprot(0)
110#define __P001 __pgprot(0)
111#define __P010 __pgprot(0)
112#define __P011 __pgprot(0)
113#define __P100 __pgprot(0)
114#define __P101 __pgprot(0)
115#define __P110 __pgprot(0)
116#define __P111 __pgprot(0)
117
118#define __S000 __pgprot(0)
119#define __S001 __pgprot(0)
120#define __S010 __pgprot(0)
121#define __S011 __pgprot(0)
122#define __S100 __pgprot(0)
123#define __S101 __pgprot(0)
124#define __S110 __pgprot(0)
125#define __S111 __pgprot(0)
126
127#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) 109#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
128#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 110#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
129static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 111static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
@@ -136,10 +118,15 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
136#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 118#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
137 remap_pfn_range(vma, vaddr, pfn, size, prot) 119 remap_pfn_range(vma, vaddr, pfn, size, prot)
138 120
139#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 121/*
122 * The "pgd_xxx()" functions here are trivial for a folded two-level
123 * setup: the pgd is never bad, and a pmd always exists (as it's folded
124 * into the pgd entry)
125 */
126#define pgd_present(pgd) (1)
140#define pgd_none(pgd) (0) 127#define pgd_none(pgd) (0)
141#define pgd_bad(pgd) (0) 128#define pgd_bad(pgd) (0)
142#define pgd_clear(pgdp) 129#define pgd_clear(pgdp) do { } while (0)
143 130
144#define kern_addr_valid(addr) (1) 131#define kern_addr_valid(addr) (1)
145#define pmd_offset(a, b) ((void *) 0) 132#define pmd_offset(a, b) ((void *) 0)
@@ -150,11 +137,33 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
150 137
151#define pud_offset(pgd, address) ((pud_t *) pgd) 138#define pud_offset(pgd, address) ((pud_t *) pgd)
152 139
153#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ 140#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE)
154#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ 141#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
155#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ 142 _PAGE_CACHE)
156#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ 143#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE)
157#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ 144#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE)
145#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
146 _PAGE_GLOBAL | _PAGE_CACHE)
147#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
148 __WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE)
149
150#define __P000 PAGE_NONE
151#define __P001 PAGE_READONLY
152#define __P010 PAGE_COPY
153#define __P011 PAGE_COPY
154#define __P100 PAGE_READONLY
155#define __P101 PAGE_READONLY
156#define __P110 PAGE_COPY
157#define __P111 PAGE_COPY
158
159#define __S000 PAGE_NONE
160#define __S001 PAGE_READONLY
161#define __S010 PAGE_SHARED
162#define __S011 PAGE_SHARED
163#define __S100 PAGE_READONLY
164#define __S101 PAGE_READONLY
165#define __S110 PAGE_SHARED
166#define __S111 PAGE_SHARED
158 167
159#define pgprot_noncached(x) (x) 168#define pgprot_noncached(x) (x)
160 169
diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h
index 1a4900ac49f3..66b14c8891cf 100644
--- a/arch/score/include/asm/ptrace.h
+++ b/arch/score/include/asm/ptrace.h
@@ -1,6 +1,9 @@
1#ifndef _ASM_SCORE_PTRACE_H 1#ifndef _ASM_SCORE_PTRACE_H
2#define _ASM_SCORE_PTRACE_H 2#define _ASM_SCORE_PTRACE_H
3 3
4#define PTRACE_GETREGS 12
5#define PTRACE_SETREGS 13
6
4#define PC 32 7#define PC 32
5#define CONDITION 33 8#define CONDITION 33
6#define ECR 34 9#define ECR 34
@@ -76,12 +79,17 @@ struct pt_regs {
76 */ 79 */
77#define user_mode(regs) ((regs->cp0_psr & 8) == 8) 80#define user_mode(regs) ((regs->cp0_psr & 8) == 8)
78 81
79#define instruction_pointer(regs) (0) 82#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc)
80#define profile_pc(regs) instruction_pointer(regs) 83#define profile_pc(regs) instruction_pointer(regs)
81 84
82extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); 85extern void do_syscall_trace(struct pt_regs *regs, int entryexit);
83extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); 86extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *);
84extern void clear_single_step(struct task_struct *); 87extern int read_tsk_short(struct task_struct *, unsigned long,
85#endif 88 unsigned short *);
89
90#define arch_has_single_step() (1)
91extern void user_enable_single_step(struct task_struct *);
92extern void user_disable_single_step(struct task_struct *);
93#endif /* __KERNEL__ */
86 94
87#endif /* _ASM_SCORE_PTRACE_H */ 95#endif /* _ASM_SCORE_PTRACE_H */
diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h
index 9aa3a159bbf6..f0f84deeb564 100644
--- a/arch/score/include/asm/unistd.h
+++ b/arch/score/include/asm/unistd.h
@@ -1,7 +1,8 @@
1#ifndef _ASM_SCORE_UNISTD_H 1#if !defined(_ASM_SCORE_UNISTD_H) || defined(__SYSCALL)
2#define _ASM_SCORE_UNISTD_H 2#define _ASM_SCORE_UNISTD_H
3 3
4#define __ARCH_HAVE_MMU 4#define __ARCH_HAVE_MMU
5#define __ARCH_WANT_IPC_PARSE_VERSION
5 6
6#include <asm-generic/unistd.h> 7#include <asm-generic/unistd.h>
7 8
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 6c6b7ea58afa..0af89b2f16b0 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -23,7 +23,7 @@
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */ 24 */
25 25
26#include <linux/errno.h> 26#include <linux/err.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29 29
@@ -434,7 +434,7 @@ stack_done:
434 sw r8, [r0, PT_R7] 434 sw r8, [r0, PT_R7]
435 b 2f 435 b 2f
4361: 4361:
437 cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134 437 cmpi.c r4, -MAX_ERRNO - 1
438 ble 2f 438 ble 2f
439 ldi r8, 0x1; 439 ldi r8, 0x1;
440 sw r8, [r0, PT_R7] 440 sw r8, [r0, PT_R7]
@@ -466,7 +466,7 @@ syscall_trace_entry:
466 lw r7, [r0, PT_R7] 466 lw r7, [r0, PT_R7]
467 brl r8 467 brl r8
468 468
469 li r8, -EMAXERRNO - 1 # error? 469 li r8, -MAX_ERRNO - 1
470 sw r8, [r0, PT_R7] # set error flag 470 sw r8, [r0, PT_R7] # set error flag
471 471
472 neg r4, r4 # error 472 neg r4, r4 # error
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index aaa3085251fa..d93966f7ac83 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -56,7 +56,7 @@ void __noreturn cpu_idle(void)
56 } 56 }
57} 57}
58 58
59asmlinkage void ret_from_fork(void); 59void ret_from_fork(void);
60 60
61void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) 61void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
62{ 62{
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 8fe7209355aa..19911e3187be 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -80,7 +80,35 @@ write_tsk_long(struct task_struct *child,
80 return copied != sizeof(val) ? -EIO : 0; 80 return copied != sizeof(val) ? -EIO : 0;
81} 81}
82 82
83void set_single_step(struct task_struct *child) 83/*
84 * Get all user integer registers.
85 */
86static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
87{
88 struct pt_regs *regs = task_pt_regs(tsk);
89
90 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
91}
92
93/*
94 * Set all user integer registers.
95 */
96static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
97{
98 struct pt_regs newregs;
99 int ret;
100
101 ret = -EFAULT;
102 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
103 struct pt_regs *regs = task_pt_regs(tsk);
104 *regs = newregs;
105 ret = 0;
106 }
107
108 return ret;
109}
110
111void user_enable_single_step(struct task_struct *child)
84{ 112{
85 /* far_epc is the target of branch */ 113 /* far_epc is the target of branch */
86 unsigned int epc, far_epc = 0; 114 unsigned int epc, far_epc = 0;
@@ -201,7 +229,7 @@ void set_single_step(struct task_struct *child)
201 } 229 }
202} 230}
203 231
204void clear_single_step(struct task_struct *child) 232void user_disable_single_step(struct task_struct *child)
205{ 233{
206 if (child->thread.insn1_type == 0) 234 if (child->thread.insn1_type == 0)
207 write_tsk_short(child, child->thread.addr1, 235 write_tsk_short(child, child->thread.addr1,
@@ -230,54 +258,17 @@ void clear_single_step(struct task_struct *child)
230 child->thread.ss_nextcnt = 0; 258 child->thread.ss_nextcnt = 0;
231} 259}
232 260
233 261void ptrace_disable(struct task_struct *child)
234void ptrace_disable(struct task_struct *child) {} 262{
263 user_disable_single_step(child);
264}
235 265
236long 266long
237arch_ptrace(struct task_struct *child, long request, long addr, long data) 267arch_ptrace(struct task_struct *child, long request, long addr, long data)
238{ 268{
239 int ret; 269 int ret;
240 270
241 if (request == PTRACE_TRACEME) {
242 /* are we already being traced? */
243 if (current->ptrace & PT_PTRACED)
244 return -EPERM;
245
246 /* set the ptrace bit in the process flags. */
247 current->ptrace |= PT_PTRACED;
248 return 0;
249 }
250
251 ret = -ESRCH;
252 if (!child)
253 return ret;
254
255 ret = -EPERM;
256
257 if (request == PTRACE_ATTACH) {
258 ret = ptrace_attach(child);
259 return ret;
260 }
261
262 ret = ptrace_check_attach(child, request == PTRACE_KILL);
263 if (ret < 0)
264 return ret;
265
266 switch (request) { 271 switch (request) {
267 case PTRACE_PEEKTEXT: /* read word at location addr. */
268 case PTRACE_PEEKDATA: {
269 unsigned long tmp;
270 int copied;
271
272 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
273 ret = -EIO;
274 if (copied != sizeof(tmp))
275 break;
276
277 ret = put_user(tmp, (unsigned long *) data);
278 return ret;
279 }
280
281 /* Read the word at location addr in the USER area. */ 272 /* Read the word at location addr in the USER area. */
282 case PTRACE_PEEKUSR: { 273 case PTRACE_PEEKUSR: {
283 struct pt_regs *regs; 274 struct pt_regs *regs;
@@ -329,15 +320,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
329 return ret; 320 return ret;
330 } 321 }
331 322
332 case PTRACE_POKETEXT: /* write the word at location addr. */
333 case PTRACE_POKEDATA:
334 ret = 0;
335 if (access_process_vm(child, addr, &data, sizeof(data), 1)
336 == sizeof(data))
337 break;
338 ret = -EIO;
339 return ret;
340
341 case PTRACE_POKEUSR: { 323 case PTRACE_POKEUSR: {
342 struct pt_regs *regs; 324 struct pt_regs *regs;
343 ret = 0; 325 ret = 0;
@@ -372,64 +354,16 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
372 break; 354 break;
373 } 355 }
374 356
375 case PTRACE_SYSCALL: /* continue and stop at next 357 case PTRACE_GETREGS:
376 (return from) syscall. */ 358 ret = ptrace_getregs(child, (void __user *)data);
377 case PTRACE_CONT: { /* restart after signal. */
378 ret = -EIO;
379 if (!valid_signal(data))
380 break;
381 if (request == PTRACE_SYSCALL)
382 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
383 else
384 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
385
386 child->exit_code = data;
387 wake_up_process(child);
388 ret = 0;
389 break;
390 }
391
392 /*
393 * make the child exit. Best I can do is send it a sigkill.
394 * perhaps it should be put in the status that it wants to
395 * exit.
396 */
397 case PTRACE_KILL:
398 ret = 0;
399 if (child->state == EXIT_ZOMBIE) /* already dead. */
400 break;
401 child->exit_code = SIGKILL;
402 clear_single_step(child);
403 wake_up_process(child);
404 break; 359 break;
405 360
406 case PTRACE_SINGLESTEP: { /* set the trap flag. */ 361 case PTRACE_SETREGS:
407 ret = -EIO; 362 ret = ptrace_setregs(child, (void __user *)data);
408 if ((unsigned long) data > _NSIG)
409 break;
410 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
411 set_single_step(child);
412 child->exit_code = data;
413 /* give it a chance to run. */
414 wake_up_process(child);
415 ret = 0;
416 break;
417 }
418
419 case PTRACE_DETACH: /* detach a process that was attached. */
420 ret = ptrace_detach(child, data);
421 break;
422
423 case PTRACE_SETOPTIONS:
424 if (data & PTRACE_O_TRACESYSGOOD)
425 child->ptrace |= PT_TRACESYSGOOD;
426 else
427 child->ptrace &= ~PT_TRACESYSGOOD;
428 ret = 0;
429 break; 363 break;
430 364
431 default: 365 default:
432 ret = -EIO; 366 ret = ptrace_request(child, request, addr, data);
433 break; 367 break;
434 } 368 }
435 369
diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c
index b4ed1b3f8072..5c004084d17d 100644
--- a/arch/score/kernel/signal.c
+++ b/arch/score/kernel/signal.c
@@ -132,7 +132,7 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
132 return (void *)((sp - frame_size) & ~7); 132 return (void *)((sp - frame_size) & ~7);
133} 133}
134 134
135asmlinkage int score_sigaltstack(struct pt_regs *regs) 135int score_sigaltstack(struct pt_regs *regs)
136{ 136{
137 const stack_t *uss = (const stack_t *) regs->regs[4]; 137 const stack_t *uss = (const stack_t *) regs->regs[4];
138 stack_t *uoss = (stack_t *) regs->regs[5]; 138 stack_t *uoss = (stack_t *) regs->regs[5];
@@ -141,7 +141,7 @@ asmlinkage int score_sigaltstack(struct pt_regs *regs)
141 return do_sigaltstack(uss, uoss, usp); 141 return do_sigaltstack(uss, uoss, usp);
142} 142}
143 143
144asmlinkage void score_rt_sigreturn(struct pt_regs *regs) 144void score_rt_sigreturn(struct pt_regs *regs)
145{ 145{
146 struct rt_sigframe __user *frame; 146 struct rt_sigframe __user *frame;
147 sigset_t set; 147 sigset_t set;
@@ -276,7 +276,7 @@ int handle_signal(unsigned long sig, siginfo_t *info,
276 return ret; 276 return ret;
277} 277}
278 278
279asmlinkage void do_signal(struct pt_regs *regs) 279void do_signal(struct pt_regs *regs)
280{ 280{
281 struct k_sigaction ka; 281 struct k_sigaction ka;
282 sigset_t *oldset; 282 sigset_t *oldset;
diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c
index 6a60d1ee5330..68655f4cbce9 100644
--- a/arch/score/kernel/sys_score.c
+++ b/arch/score/kernel/sys_score.c
@@ -64,8 +64,7 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
64 * Clone a task - this clones the calling program thread. 64 * Clone a task - this clones the calling program thread.
65 * This is called indirectly via a small wrapper 65 * This is called indirectly via a small wrapper
66 */ 66 */
67asmlinkage int 67int score_clone(struct pt_regs *regs)
68score_clone(struct pt_regs *regs)
69{ 68{
70 unsigned long clone_flags; 69 unsigned long clone_flags;
71 unsigned long newsp; 70 unsigned long newsp;
@@ -93,7 +92,7 @@ score_clone(struct pt_regs *regs)
93 * sys_execve() executes a new program. 92 * sys_execve() executes a new program.
94 * This is called indirectly via a small wrapper 93 * This is called indirectly via a small wrapper
95 */ 94 */
96asmlinkage int score_execve(struct pt_regs *regs) 95int score_execve(struct pt_regs *regs)
97{ 96{
98 int error; 97 int error;
99 char *filename; 98 char *filename;
@@ -114,7 +113,7 @@ asmlinkage int score_execve(struct pt_regs *regs)
114 * If we ever come here the user sp is bad. Zap the process right away. 113 * If we ever come here the user sp is bad. Zap the process right away.
115 * Due to the bad stack signaling wouldn't work. 114 * Due to the bad stack signaling wouldn't work.
116 */ 115 */
117asmlinkage void bad_stack(void) 116void bad_stack(void)
118{ 117{
119 do_exit(SIGSEGV); 118 do_exit(SIGSEGV);
120} 119}
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 957ae9eb3567..0e46fb19a848 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -252,7 +252,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
252 if (current->thread.single_step == 1) { 252 if (current->thread.single_step == 1) {
253 if ((epc == current->thread.addr1) || 253 if ((epc == current->thread.addr1) ||
254 (epc == current->thread.addr2)) { 254 (epc == current->thread.addr2)) {
255 clear_single_step(current); 255 user_disable_single_step(current);
256 force_sig(SIGTRAP, current); 256 force_sig(SIGTRAP, current);
257 return; 257 return;
258 } else 258 } else
diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c
index 1ebc67f18c6d..dbac9d9dfddd 100644
--- a/arch/score/mm/cache.c
+++ b/arch/score/mm/cache.c
@@ -32,34 +32,26 @@
32 32
33#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
34 34
35/* Cache operations. */ 35/*
36void (*flush_cache_all)(void); 36Just flush entire Dcache!!
37void (*__flush_cache_all)(void); 37You must ensure the page doesn't include instructions, because
38void (*flush_cache_mm)(struct mm_struct *mm); 38the function will not flush the Icache.
39void (*flush_cache_range)(struct vm_area_struct *vma, 39The addr must be cache aligned.
40 unsigned long start, unsigned long end); 40*/
41void (*flush_cache_page)(struct vm_area_struct *vma, 41static void flush_data_cache_page(unsigned long addr)
42 unsigned long page, unsigned long pfn); 42{
43void (*flush_icache_range)(unsigned long start, unsigned long end); 43 unsigned int i;
44void (*__flush_cache_vmap)(void); 44 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
45void (*__flush_cache_vunmap)(void); 45 __asm__ __volatile__(
46void (*flush_cache_sigtramp)(unsigned long addr); 46 "cache 0x0e, [%0, 0]\n"
47void (*flush_data_cache_page)(unsigned long addr); 47 "cache 0x1a, [%0, 0]\n"
48EXPORT_SYMBOL(flush_data_cache_page); 48 "nop\n"
49void (*flush_icache_all)(void); 49 : : "r" (addr));
50 50 addr += L1_CACHE_BYTES;
51/*Score 7 cache operations*/ 51 }
52static inline void s7___flush_cache_all(void); 52}
53static void s7_flush_cache_mm(struct mm_struct *mm);
54static void s7_flush_cache_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end);
56static void s7_flush_cache_page(struct vm_area_struct *vma,
57 unsigned long page, unsigned long pfn);
58static void s7_flush_icache_range(unsigned long start, unsigned long end);
59static void s7_flush_cache_sigtramp(unsigned long addr);
60static void s7_flush_data_cache_page(unsigned long addr);
61static void s7_flush_dcache_range(unsigned long start, unsigned long end);
62 53
54/* called by update_mmu_cache. */
63void __update_cache(struct vm_area_struct *vma, unsigned long address, 55void __update_cache(struct vm_area_struct *vma, unsigned long address,
64 pte_t pte) 56 pte_t pte)
65{ 57{
@@ -74,7 +66,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
74 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { 66 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
75 addr = (unsigned long) page_address(page); 67 addr = (unsigned long) page_address(page);
76 if (exec) 68 if (exec)
77 s7_flush_data_cache_page(addr); 69 flush_data_cache_page(addr);
78 clear_bit(PG_arch_1, &page->flags); 70 clear_bit(PG_arch_1, &page->flags);
79 } 71 }
80} 72}
@@ -101,44 +93,22 @@ static inline void setup_protection_map(void)
101 93
102void __devinit cpu_cache_init(void) 94void __devinit cpu_cache_init(void)
103{ 95{
104 flush_cache_all = s7_flush_cache_all;
105 __flush_cache_all = s7___flush_cache_all;
106 flush_cache_mm = s7_flush_cache_mm;
107 flush_cache_range = s7_flush_cache_range;
108 flush_cache_page = s7_flush_cache_page;
109 flush_icache_range = s7_flush_icache_range;
110 flush_cache_sigtramp = s7_flush_cache_sigtramp;
111 flush_data_cache_page = s7_flush_data_cache_page;
112
113 setup_protection_map(); 96 setup_protection_map();
114} 97}
115 98
116void s7_flush_icache_all(void) 99void flush_icache_all(void)
117{ 100{
118 __asm__ __volatile__( 101 __asm__ __volatile__(
119 "la r8, s7_flush_icache_all\n" 102 "la r8, flush_icache_all\n"
120 "cache 0x10, [r8, 0]\n" 103 "cache 0x10, [r8, 0]\n"
121 "nop\nnop\nnop\nnop\nnop\nnop\n" 104 "nop\nnop\nnop\nnop\nnop\nnop\n"
122 : : : "r8"); 105 : : : "r8");
123} 106}
124 107
125void s7_flush_dcache_all(void) 108void flush_dcache_all(void)
126{
127 __asm__ __volatile__(
128 "la r8, s7_flush_dcache_all\n"
129 "cache 0x1f, [r8, 0]\n"
130 "nop\nnop\nnop\nnop\nnop\nnop\n"
131 "cache 0x1a, [r8, 0]\n"
132 "nop\nnop\nnop\nnop\nnop\nnop\n"
133 : : : "r8");
134}
135
136void s7_flush_cache_all(void)
137{ 109{
138 __asm__ __volatile__( 110 __asm__ __volatile__(
139 "la r8, s7_flush_cache_all\n" 111 "la r8, flush_dcache_all\n"
140 "cache 0x10, [r8, 0]\n"
141 "nop\nnop\nnop\nnop\nnop\nnop\n"
142 "cache 0x1f, [r8, 0]\n" 112 "cache 0x1f, [r8, 0]\n"
143 "nop\nnop\nnop\nnop\nnop\nnop\n" 113 "nop\nnop\nnop\nnop\nnop\nnop\n"
144 "cache 0x1a, [r8, 0]\n" 114 "cache 0x1a, [r8, 0]\n"
@@ -146,10 +116,10 @@ void s7_flush_cache_all(void)
146 : : : "r8"); 116 : : : "r8");
147} 117}
148 118
149void s7___flush_cache_all(void) 119void flush_cache_all(void)
150{ 120{
151 __asm__ __volatile__( 121 __asm__ __volatile__(
152 "la r8, s7_flush_cache_all\n" 122 "la r8, flush_cache_all\n"
153 "cache 0x10, [r8, 0]\n" 123 "cache 0x10, [r8, 0]\n"
154 "nop\nnop\nnop\nnop\nnop\nnop\n" 124 "nop\nnop\nnop\nnop\nnop\nnop\n"
155 "cache 0x1f, [r8, 0]\n" 125 "cache 0x1f, [r8, 0]\n"
@@ -159,11 +129,11 @@ void s7___flush_cache_all(void)
159 : : : "r8"); 129 : : : "r8");
160} 130}
161 131
162static void s7_flush_cache_mm(struct mm_struct *mm) 132void flush_cache_mm(struct mm_struct *mm)
163{ 133{
164 if (!(mm->context)) 134 if (!(mm->context))
165 return; 135 return;
166 s7_flush_cache_all(); 136 flush_cache_all();
167} 137}
168 138
169/*if we flush a range precisely , the processing may be very long. 139/*if we flush a range precisely , the processing may be very long.
@@ -176,8 +146,7 @@ The interface is provided in hopes that the port can find
176a suitably efficient method for removing multiple page 146a suitably efficient method for removing multiple page
177sized regions from the cache. 147sized regions from the cache.
178*/ 148*/
179static void 149void flush_cache_range(struct vm_area_struct *vma,
180s7_flush_cache_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end) 150 unsigned long start, unsigned long end)
182{ 151{
183 struct mm_struct *mm = vma->vm_mm; 152 struct mm_struct *mm = vma->vm_mm;
@@ -209,27 +178,26 @@ s7_flush_cache_range(struct vm_area_struct *vma,
209 tmpend = (start | (PAGE_SIZE-1)) > end ? 178 tmpend = (start | (PAGE_SIZE-1)) > end ?
210 end : (start | (PAGE_SIZE-1)); 179 end : (start | (PAGE_SIZE-1));
211 180
212 s7_flush_dcache_range(start, tmpend); 181 flush_dcache_range(start, tmpend);
213 if (exec) 182 if (exec)
214 s7_flush_icache_range(start, tmpend); 183 flush_icache_range(start, tmpend);
215 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 184 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
216 } 185 }
217} 186}
218 187
219static void 188void flush_cache_page(struct vm_area_struct *vma,
220s7_flush_cache_page(struct vm_area_struct *vma,
221 unsigned long addr, unsigned long pfn) 189 unsigned long addr, unsigned long pfn)
222{ 190{
223 int exec = vma->vm_flags & VM_EXEC; 191 int exec = vma->vm_flags & VM_EXEC;
224 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); 192 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
225 193
226 s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); 194 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
227 195
228 if (exec) 196 if (exec)
229 s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); 197 flush_icache_range(kaddr, kaddr + PAGE_SIZE);
230} 198}
231 199
232static void s7_flush_cache_sigtramp(unsigned long addr) 200void flush_cache_sigtramp(unsigned long addr)
233{ 201{
234 __asm__ __volatile__( 202 __asm__ __volatile__(
235 "cache 0x02, [%0, 0]\n" 203 "cache 0x02, [%0, 0]\n"
@@ -248,30 +216,11 @@ static void s7_flush_cache_sigtramp(unsigned long addr)
248} 216}
249 217
250/* 218/*
251Just flush entire Dcache!!
252You must ensure the page doesn't include instructions, because
253the function will not flush the Icache.
254The addr must be cache aligned.
255*/
256static void s7_flush_data_cache_page(unsigned long addr)
257{
258 unsigned int i;
259 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
260 __asm__ __volatile__(
261 "cache 0x0e, [%0, 0]\n"
262 "cache 0x1a, [%0, 0]\n"
263 "nop\n"
264 : : "r" (addr));
265 addr += L1_CACHE_BYTES;
266 }
267}
268
269/*
2701. WB and invalid a cache line of Dcache 2191. WB and invalid a cache line of Dcache
2712. Drain Write Buffer 2202. Drain Write Buffer
272the range must be smaller than PAGE_SIZE 221the range must be smaller than PAGE_SIZE
273*/ 222*/
274static void s7_flush_dcache_range(unsigned long start, unsigned long end) 223void flush_dcache_range(unsigned long start, unsigned long end)
275{ 224{
276 int size, i; 225 int size, i;
277 226
@@ -290,7 +239,7 @@ static void s7_flush_dcache_range(unsigned long start, unsigned long end)
290 } 239 }
291} 240}
292 241
293static void s7_flush_icache_range(unsigned long start, unsigned long end) 242void flush_icache_range(unsigned long start, unsigned long end)
294{ 243{
295 int size, i; 244 int size, i;
296 start = start & ~(L1_CACHE_BYTES - 1); 245 start = start & ~(L1_CACHE_BYTES - 1);