aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/include/asm/cacheflush.h16
-rw-r--r--arch/parisc/kernel/asm-offsets.c15
-rw-r--r--arch/parisc/kernel/entry.S52
-rw-r--r--arch/parisc/kernel/syscall.S32
-rw-r--r--arch/parisc/math-emu/decode_exc.c1
-rw-r--r--arch/parisc/mm/fault.c7
6 files changed, 47 insertions, 76 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 477277739da5..4556d820128a 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -2,6 +2,7 @@
2#define _PARISC_CACHEFLUSH_H 2#define _PARISC_CACHEFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/uaccess.h>
5 6
6/* The usual comment is "Caches aren't brain-dead on the <architecture>". 7/* The usual comment is "Caches aren't brain-dead on the <architecture>".
7 * Unfortunately, that doesn't apply to PA-RISC. */ 8 * Unfortunately, that doesn't apply to PA-RISC. */
@@ -125,11 +126,20 @@ static inline void *kmap(struct page *page)
125 126
126#define kunmap(page) kunmap_parisc(page_address(page)) 127#define kunmap(page) kunmap_parisc(page_address(page))
127 128
128#define kmap_atomic(page, idx) page_address(page) 129static inline void *kmap_atomic(struct page *page, enum km_type idx)
130{
131 pagefault_disable();
132 return page_address(page);
133}
129 134
130#define kunmap_atomic(addr, idx) kunmap_parisc(addr) 135static inline void kunmap_atomic(void *addr, enum km_type idx)
136{
137 kunmap_parisc(addr);
138 pagefault_enable();
139}
131 140
132#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) 141#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
142#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
133#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 143#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
134#endif 144#endif
135 145
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index ec787b411e9a..dcd55103a4bb 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -45,8 +45,12 @@
45#else 45#else
46#define FRAME_SIZE 64 46#define FRAME_SIZE 64
47#endif 47#endif
48#define FRAME_ALIGN 64
48 49
49#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) 50/* Add FRAME_SIZE to the size x and align it to y. All definitions
51 * that use align_frame will include space for a frame.
52 */
53#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
50 54
51int main(void) 55int main(void)
52{ 56{
@@ -146,7 +150,8 @@ int main(void)
146 DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); 150 DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
147 BLANK(); 151 BLANK();
148 DEFINE(TASK_SZ, sizeof(struct task_struct)); 152 DEFINE(TASK_SZ, sizeof(struct task_struct));
149 DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); 153 /* TASK_SZ_ALGN includes space for a stack frame. */
154 DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
150 BLANK(); 155 BLANK();
151 DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); 156 DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
152 DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); 157 DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
@@ -233,7 +238,8 @@ int main(void)
233 DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); 238 DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
234 DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); 239 DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
235 DEFINE(PT_SIZE, sizeof(struct pt_regs)); 240 DEFINE(PT_SIZE, sizeof(struct pt_regs));
236 DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); 241 /* PT_SZ_ALGN includes space for a stack frame. */
242 DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
237 BLANK(); 243 BLANK();
238 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 244 DEFINE(TI_TASK, offsetof(struct thread_info, task));
239 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); 245 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
@@ -242,7 +248,8 @@ int main(void)
242 DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); 248 DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
243 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 249 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
244 DEFINE(THREAD_SZ, sizeof(struct thread_info)); 250 DEFINE(THREAD_SZ, sizeof(struct thread_info));
245 DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); 251 /* THREAD_SZ_ALGN includes space for a stack frame. */
252 DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
246 BLANK(); 253 BLANK();
247 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); 254 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
248 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); 255 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 3a44f7f704fa..6337adef30f6 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -364,32 +364,6 @@
364 .align 32 364 .align 32
365 .endm 365 .endm
366 366
367 /* The following are simple 32 vs 64 bit instruction
368 * abstractions for the macros */
369 .macro EXTR reg1,start,length,reg2
370#ifdef CONFIG_64BIT
371 extrd,u \reg1,32+(\start),\length,\reg2
372#else
373 extrw,u \reg1,\start,\length,\reg2
374#endif
375 .endm
376
377 .macro DEP reg1,start,length,reg2
378#ifdef CONFIG_64BIT
379 depd \reg1,32+(\start),\length,\reg2
380#else
381 depw \reg1,\start,\length,\reg2
382#endif
383 .endm
384
385 .macro DEPI val,start,length,reg
386#ifdef CONFIG_64BIT
387 depdi \val,32+(\start),\length,\reg
388#else
389 depwi \val,\start,\length,\reg
390#endif
391 .endm
392
393 /* In LP64, the space contains part of the upper 32 bits of the 367 /* In LP64, the space contains part of the upper 32 bits of the
394 * fault. We have to extract this and place it in the va, 368 * fault. We have to extract this and place it in the va,
395 * zeroing the corresponding bits in the space register */ 369 * zeroing the corresponding bits in the space register */
@@ -442,19 +416,19 @@
442 */ 416 */
443 .macro L2_ptep pmd,pte,index,va,fault 417 .macro L2_ptep pmd,pte,index,va,fault
444#if PT_NLEVELS == 3 418#if PT_NLEVELS == 3
445 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 419 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
446#else 420#else
447 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 421 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
448#endif 422#endif
449 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 423 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
450 copy %r0,\pte 424 copy %r0,\pte
451 ldw,s \index(\pmd),\pmd 425 ldw,s \index(\pmd),\pmd
452 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 426 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
453 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 427 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
454 copy \pmd,%r9 428 copy \pmd,%r9
455 SHLREG %r9,PxD_VALUE_SHIFT,\pmd 429 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
456 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 430 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
457 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 431 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
458 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 432 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
459 LDREG %r0(\pmd),\pte /* pmd is now pte */ 433 LDREG %r0(\pmd),\pte /* pmd is now pte */
460 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 434 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
@@ -605,7 +579,7 @@
605 depdi 0,31,32,\tmp 579 depdi 0,31,32,\tmp
606#endif 580#endif
607 copy \va,\tmp1 581 copy \va,\tmp1
608 DEPI 0,31,23,\tmp1 582 depi 0,31,23,\tmp1
609 cmpb,COND(<>),n \tmp,\tmp1,\fault 583 cmpb,COND(<>),n \tmp,\tmp1,\fault
610 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 584 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
611 depd,z \prot,8,7,\prot 585 depd,z \prot,8,7,\prot
@@ -997,13 +971,6 @@ intr_restore:
997 971
998 rfi 972 rfi
999 nop 973 nop
1000 nop
1001 nop
1002 nop
1003 nop
1004 nop
1005 nop
1006 nop
1007 974
1008#ifndef CONFIG_PREEMPT 975#ifndef CONFIG_PREEMPT
1009# define intr_do_preempt intr_restore 976# define intr_do_preempt intr_restore
@@ -2076,9 +2043,10 @@ syscall_restore:
2076 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2043 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2077 2044
2078 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2045 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2046 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
2079 rsm PSW_SM_I, %r0 2047 rsm PSW_SM_I, %r0
2080 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ 2048 copy %r1,%r30 /* Restore user sp */
2081 mfsp %sr3,%r1 /* Get users space id */ 2049 mfsp %sr3,%r1 /* Get user space id */
2082 mtsp %r1,%sr7 /* Restore sr7 */ 2050 mtsp %r1,%sr7 /* Restore sr7 */
2083 ssm PSW_SM_I, %r0 2051 ssm PSW_SM_I, %r0
2084 2052
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f5f96021caa0..68e75ce838d6 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page)
47 KILL_INSN 47 KILL_INSN
48 .endr 48 .endr
49 49
50 /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ 50 /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
51 /* Light-weight-syscall entry must always be located at 0xb0 */ 51 /* Light-weight-syscall entry must always be located at 0xb0 */
52 /* WARNING: Keep this number updated with table size changes */ 52 /* WARNING: Keep this number updated with table size changes */
53#define __NR_lws_entries (2) 53#define __NR_lws_entries (2)
54 54
55lws_entry: 55lws_entry:
56 /* Unconditional branch to lws_start, located on the 56 gate lws_start, %r0 /* increase privilege */
57 same gateway page */ 57 depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
58 b,n lws_start
59 58
60 /* Fill from 0xb4 to 0xe0 */ 59 /* Fill from 0xb8 to 0xe0 */
61 .rept 11 60 .rept 10
62 KILL_INSN 61 KILL_INSN
63 .endr 62 .endr
64 63
@@ -423,9 +422,6 @@ tracesys_sigexit:
423 422
424 *********************************************************/ 423 *********************************************************/
425lws_start: 424lws_start:
426 /* Gate and ensure we return to userspace */
427 gate .+8, %r0
428 depi 3, 31, 2, %r31 /* Ensure we return to userspace */
429 425
430#ifdef CONFIG_64BIT 426#ifdef CONFIG_64BIT
431 /* FIXME: If we are a 64-bit kernel just 427 /* FIXME: If we are a 64-bit kernel just
@@ -442,7 +438,7 @@ lws_start:
442#endif 438#endif
443 439
444 /* Is the lws entry number valid? */ 440 /* Is the lws entry number valid? */
445 comiclr,>>= __NR_lws_entries, %r20, %r0 441 comiclr,>> __NR_lws_entries, %r20, %r0
446 b,n lws_exit_nosys 442 b,n lws_exit_nosys
447 443
448 /* WARNING: Trashing sr2 and sr3 */ 444 /* WARNING: Trashing sr2 and sr3 */
@@ -473,7 +469,7 @@ lws_exit:
473 /* now reset the lowest bit of sp if it was set */ 469 /* now reset the lowest bit of sp if it was set */
474 xor %r30,%r1,%r30 470 xor %r30,%r1,%r30
475#endif 471#endif
476 be,n 0(%sr3, %r31) 472 be,n 0(%sr7, %r31)
477 473
478 474
479 475
@@ -529,7 +525,6 @@ lws_compare_and_swap32:
529#endif 525#endif
530 526
531lws_compare_and_swap: 527lws_compare_and_swap:
532#ifdef CONFIG_SMP
533 /* Load start of lock table */ 528 /* Load start of lock table */
534 ldil L%lws_lock_start, %r20 529 ldil L%lws_lock_start, %r20
535 ldo R%lws_lock_start(%r20), %r28 530 ldo R%lws_lock_start(%r20), %r28
@@ -572,8 +567,6 @@ cas_wouldblock:
572 ldo 2(%r0), %r28 /* 2nd case */ 567 ldo 2(%r0), %r28 /* 2nd case */
573 b lws_exit /* Contended... */ 568 b lws_exit /* Contended... */
574 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 569 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
575#endif
576/* CONFIG_SMP */
577 570
578 /* 571 /*
579 prev = *addr; 572 prev = *addr;
@@ -601,13 +594,11 @@ cas_action:
6011: ldw 0(%sr3,%r26), %r28 5941: ldw 0(%sr3,%r26), %r28
602 sub,<> %r28, %r25, %r0 595 sub,<> %r28, %r25, %r0
6032: stw %r24, 0(%sr3,%r26) 5962: stw %r24, 0(%sr3,%r26)
604#ifdef CONFIG_SMP
605 /* Free lock */ 597 /* Free lock */
606 stw %r20, 0(%sr2,%r20) 598 stw %r20, 0(%sr2,%r20)
607# if ENABLE_LWS_DEBUG 599#if ENABLE_LWS_DEBUG
608 /* Clear thread register indicator */ 600 /* Clear thread register indicator */
609 stw %r0, 4(%sr2,%r20) 601 stw %r0, 4(%sr2,%r20)
610# endif
611#endif 602#endif
612 /* Return to userspace, set no error */ 603 /* Return to userspace, set no error */
613 b lws_exit 604 b lws_exit
@@ -615,12 +606,10 @@ cas_action:
615 606
6163: 6073:
617 /* Error occured on load or store */ 608 /* Error occured on load or store */
618#ifdef CONFIG_SMP
619 /* Free lock */ 609 /* Free lock */
620 stw %r20, 0(%sr2,%r20) 610 stw %r20, 0(%sr2,%r20)
621# if ENABLE_LWS_DEBUG 611#if ENABLE_LWS_DEBUG
622 stw %r0, 4(%sr2,%r20) 612 stw %r0, 4(%sr2,%r20)
623# endif
624#endif 613#endif
625 b lws_exit 614 b lws_exit
626 ldo -EFAULT(%r0),%r21 /* set errno */ 615 ldo -EFAULT(%r0),%r21 /* set errno */
@@ -672,7 +661,6 @@ ENTRY(sys_call_table64)
672END(sys_call_table64) 661END(sys_call_table64)
673#endif 662#endif
674 663
675#ifdef CONFIG_SMP
676 /* 664 /*
677 All light-weight-syscall atomic operations 665 All light-weight-syscall atomic operations
678 will use this set of locks 666 will use this set of locks
@@ -694,8 +682,6 @@ ENTRY(lws_lock_start)
694 .endr 682 .endr
695END(lws_lock_start) 683END(lws_lock_start)
696 .previous 684 .previous
697#endif
698/* CONFIG_SMP for lws_lock_start */
699 685
700.end 686.end
701 687
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
index 3ca1c6149218..27a7492ddb0d 100644
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
342 return SIGNALCODE(SIGFPE, FPE_FLTINV); 342 return SIGNALCODE(SIGFPE, FPE_FLTINV);
343 case DIVISIONBYZEROEXCEPTION: 343 case DIVISIONBYZEROEXCEPTION:
344 update_trap_counts(Fpu_register, aflags, bflags, trap_counts); 344 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
345 Clear_excp_register(exception_index);
345 return SIGNALCODE(SIGFPE, FPE_FLTDIV); 346 return SIGNALCODE(SIGFPE, FPE_FLTDIV);
346 case INEXACTEXCEPTION: 347 case INEXACTEXCEPTION:
347 update_trap_counts(Fpu_register, aflags, bflags, trap_counts); 348 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index c6afbfc95770..18162ce4261e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -264,8 +264,7 @@ no_context:
264 264
265 out_of_memory: 265 out_of_memory:
266 up_read(&mm->mmap_sem); 266 up_read(&mm->mmap_sem);
267 printk(KERN_CRIT "VM: killing process %s\n", current->comm); 267 if (!user_mode(regs))
268 if (user_mode(regs)) 268 goto no_context;
269 do_group_exit(SIGKILL); 269 pagefault_out_of_memory();
270 goto no_context;
271} 270}