aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 00:13:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 00:13:26 -0500
commit5f32ed140dac726e880d292988ba20d16f545bda (patch)
treef08589f9489a05eb7a0b7f855ab96c57b0561cde /arch/parisc/kernel
parentc68fea3464cbe4f3e1382f9f74a7c04cdbfb92ad (diff)
parent1dda59b4f3d03fa28d86f3ea235655f0f96aab3e (diff)
Merge branch 'parisc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller. The bulk of this is optimized page coping/clearing and cache flushing (virtual caches are lovely) by John David Anglin. * 'parisc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (31 commits) arch/parisc/include/asm: use ARRAY_SIZE macro in mmzone.h parisc: remove empty lines and unnecessary #ifdef coding in include/asm/signal.h parisc: sendfile and sendfile64 syscall cleanups parisc: switch to available compat_sched_rr_get_interval implementation parisc: fix fallocate syscall parisc: fix error return codes for rt_sigaction and rt_sigprocmask parisc: convert msgrcv and msgsnd syscalls to use compat layer parisc: correctly wire up mq_* functions for CONFIG_COMPAT case parisc: fix personality on 32bit kernel parisc: wire up process_vm_readv, process_vm_writev, kcmp and finit_module syscalls parisc: led driver requires CONFIG_VM_EVENT_COUNTERS parisc: remove unused compat_rt_sigframe.h header parisc/mm/fault.c: Port OOM changes to do_page_fault parisc: space register variables need to be in native length (unsigned long) parisc: fix ptrace breakage parisc: always detect multiple physical ranges parisc: ensure that mmapped shared pages are aligned at SHMLBA addresses parisc: disable preemption while flushing D- or I-caches through TMPALIAS region parisc: remove IRQF_DISABLED parisc: fixes and cleanups in page cache flushing (4/4) ...
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/cache.c221
-rw-r--r--arch/parisc/kernel/entry.S4
-rw-r--r--arch/parisc/kernel/inventory.c2
-rw-r--r--arch/parisc/kernel/irq.c4
-rw-r--r--arch/parisc/kernel/pacache.S335
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c5
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/signal32.c15
-rw-r--r--arch/parisc/kernel/sys_parisc.c16
-rw-r--r--arch/parisc/kernel/sys_parisc32.c122
-rw-r--r--arch/parisc/kernel/syscall.S5
-rw-r--r--arch/parisc/kernel/syscall_table.S27
12 files changed, 540 insertions, 218 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 48e16dc20102..4b12890642eb 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -267,9 +267,11 @@ static inline void
267__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 267__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
268 unsigned long physaddr) 268 unsigned long physaddr)
269{ 269{
270 preempt_disable();
270 flush_dcache_page_asm(physaddr, vmaddr); 271 flush_dcache_page_asm(physaddr, vmaddr);
271 if (vma->vm_flags & VM_EXEC) 272 if (vma->vm_flags & VM_EXEC)
272 flush_icache_page_asm(physaddr, vmaddr); 273 flush_icache_page_asm(physaddr, vmaddr);
274 preempt_enable();
273} 275}
274 276
275void flush_dcache_page(struct page *page) 277void flush_dcache_page(struct page *page)
@@ -329,17 +331,6 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
329EXPORT_SYMBOL(flush_data_cache_local); 331EXPORT_SYMBOL(flush_data_cache_local);
330EXPORT_SYMBOL(flush_kernel_icache_range_asm); 332EXPORT_SYMBOL(flush_kernel_icache_range_asm);
331 333
332void clear_user_page_asm(void *page, unsigned long vaddr)
333{
334 unsigned long flags;
335 /* This function is implemented in assembly in pacache.S */
336 extern void __clear_user_page_asm(void *page, unsigned long vaddr);
337
338 purge_tlb_start(flags);
339 __clear_user_page_asm(page, vaddr);
340 purge_tlb_end(flags);
341}
342
343#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 334#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
344int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 335int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
345 336
@@ -373,20 +364,9 @@ void __init parisc_setup_cache_timing(void)
373 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 364 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
374} 365}
375 366
376extern void purge_kernel_dcache_page(unsigned long); 367extern void purge_kernel_dcache_page_asm(unsigned long);
377extern void clear_user_page_asm(void *page, unsigned long vaddr); 368extern void clear_user_page_asm(void *, unsigned long);
378 369extern void copy_user_page_asm(void *, void *, unsigned long);
379void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
380{
381 unsigned long flags;
382
383 purge_kernel_dcache_page((unsigned long)page);
384 purge_tlb_start(flags);
385 pdtlb_kernel(page);
386 purge_tlb_end(flags);
387 clear_user_page_asm(page, vaddr);
388}
389EXPORT_SYMBOL(clear_user_page);
390 370
391void flush_kernel_dcache_page_addr(void *addr) 371void flush_kernel_dcache_page_addr(void *addr)
392{ 372{
@@ -399,11 +379,26 @@ void flush_kernel_dcache_page_addr(void *addr)
399} 379}
400EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 380EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
401 381
382void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
383{
384 clear_page_asm(vto);
385 if (!parisc_requires_coherency())
386 flush_kernel_dcache_page_asm(vto);
387}
388EXPORT_SYMBOL(clear_user_page);
389
402void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 390void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
403 struct page *pg) 391 struct page *pg)
404{ 392{
405 /* no coherency needed (all in kmap/kunmap) */ 393 /* Copy using kernel mapping. No coherency is needed
406 copy_user_page_asm(vto, vfrom); 394 (all in kmap/kunmap) on machines that don't support
395 non-equivalent aliasing. However, the `from' page
396 needs to be flushed before it can be accessed through
397 the kernel mapping. */
398 preempt_disable();
399 flush_dcache_page_asm(__pa(vfrom), vaddr);
400 preempt_enable();
401 copy_page_asm(vto, vfrom);
407 if (!parisc_requires_coherency()) 402 if (!parisc_requires_coherency())
408 flush_kernel_dcache_page_asm(vto); 403 flush_kernel_dcache_page_asm(vto);
409} 404}
@@ -419,6 +414,24 @@ void kunmap_parisc(void *addr)
419EXPORT_SYMBOL(kunmap_parisc); 414EXPORT_SYMBOL(kunmap_parisc);
420#endif 415#endif
421 416
417void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
418{
419 unsigned long flags;
420
421 /* Note: purge_tlb_entries can be called at startup with
422 no context. */
423
424 /* Disable preemption while we play with %sr1. */
425 preempt_disable();
426 mtsp(mm->context, 1);
427 purge_tlb_start(flags);
428 pdtlb(addr);
429 pitlb(addr);
430 purge_tlb_end(flags);
431 preempt_enable();
432}
433EXPORT_SYMBOL(purge_tlb_entries);
434
422void __flush_tlb_range(unsigned long sid, unsigned long start, 435void __flush_tlb_range(unsigned long sid, unsigned long start,
423 unsigned long end) 436 unsigned long end)
424{ 437{
@@ -458,8 +471,66 @@ void flush_cache_all(void)
458 on_each_cpu(cacheflush_h_tmp_function, NULL, 1); 471 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
459} 472}
460 473
474static inline unsigned long mm_total_size(struct mm_struct *mm)
475{
476 struct vm_area_struct *vma;
477 unsigned long usize = 0;
478
479 for (vma = mm->mmap; vma; vma = vma->vm_next)
480 usize += vma->vm_end - vma->vm_start;
481 return usize;
482}
483
484static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
485{
486 pte_t *ptep = NULL;
487
488 if (!pgd_none(*pgd)) {
489 pud_t *pud = pud_offset(pgd, addr);
490 if (!pud_none(*pud)) {
491 pmd_t *pmd = pmd_offset(pud, addr);
492 if (!pmd_none(*pmd))
493 ptep = pte_offset_map(pmd, addr);
494 }
495 }
496 return ptep;
497}
498
461void flush_cache_mm(struct mm_struct *mm) 499void flush_cache_mm(struct mm_struct *mm)
462{ 500{
501 /* Flushing the whole cache on each cpu takes forever on
502 rp3440, etc. So, avoid it if the mm isn't too big. */
503 if (mm_total_size(mm) < parisc_cache_flush_threshold) {
504 struct vm_area_struct *vma;
505
506 if (mm->context == mfsp(3)) {
507 for (vma = mm->mmap; vma; vma = vma->vm_next) {
508 flush_user_dcache_range_asm(vma->vm_start,
509 vma->vm_end);
510 if (vma->vm_flags & VM_EXEC)
511 flush_user_icache_range_asm(
512 vma->vm_start, vma->vm_end);
513 }
514 } else {
515 pgd_t *pgd = mm->pgd;
516
517 for (vma = mm->mmap; vma; vma = vma->vm_next) {
518 unsigned long addr;
519
520 for (addr = vma->vm_start; addr < vma->vm_end;
521 addr += PAGE_SIZE) {
522 pte_t *ptep = get_ptep(pgd, addr);
523 if (ptep != NULL) {
524 pte_t pte = *ptep;
525 __flush_cache_page(vma, addr,
526 page_to_phys(pte_page(pte)));
527 }
528 }
529 }
530 }
531 return;
532 }
533
463#ifdef CONFIG_SMP 534#ifdef CONFIG_SMP
464 flush_cache_all(); 535 flush_cache_all();
465#else 536#else
@@ -485,20 +556,36 @@ flush_user_icache_range(unsigned long start, unsigned long end)
485 flush_instruction_cache(); 556 flush_instruction_cache();
486} 557}
487 558
488
489void flush_cache_range(struct vm_area_struct *vma, 559void flush_cache_range(struct vm_area_struct *vma,
490 unsigned long start, unsigned long end) 560 unsigned long start, unsigned long end)
491{ 561{
492 int sr3;
493
494 BUG_ON(!vma->vm_mm->context); 562 BUG_ON(!vma->vm_mm->context);
495 563
496 sr3 = mfsp(3); 564 if ((end - start) < parisc_cache_flush_threshold) {
497 if (vma->vm_mm->context == sr3) { 565 if (vma->vm_mm->context == mfsp(3)) {
498 flush_user_dcache_range(start,end); 566 flush_user_dcache_range_asm(start, end);
499 flush_user_icache_range(start,end); 567 if (vma->vm_flags & VM_EXEC)
568 flush_user_icache_range_asm(start, end);
569 } else {
570 unsigned long addr;
571 pgd_t *pgd = vma->vm_mm->pgd;
572
573 for (addr = start & PAGE_MASK; addr < end;
574 addr += PAGE_SIZE) {
575 pte_t *ptep = get_ptep(pgd, addr);
576 if (ptep != NULL) {
577 pte_t pte = *ptep;
578 flush_cache_page(vma,
579 addr, pte_pfn(pte));
580 }
581 }
582 }
500 } else { 583 } else {
584#ifdef CONFIG_SMP
501 flush_cache_all(); 585 flush_cache_all();
586#else
587 flush_cache_all_local();
588#endif
502 } 589 }
503} 590}
504 591
@@ -511,3 +598,67 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
511 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 598 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
512 599
513} 600}
601
602#ifdef CONFIG_PARISC_TMPALIAS
603
604void clear_user_highpage(struct page *page, unsigned long vaddr)
605{
606 void *vto;
607 unsigned long flags;
608
609 /* Clear using TMPALIAS region. The page doesn't need to
610 be flushed but the kernel mapping needs to be purged. */
611
612 vto = kmap_atomic(page, KM_USER0);
613
614 /* The PA-RISC 2.0 Architecture book states on page F-6:
615 "Before a write-capable translation is enabled, *all*
616 non-equivalently-aliased translations must be removed
617 from the page table and purged from the TLB. (Note
618 that the caches are not required to be flushed at this
619 time.) Before any non-equivalent aliased translation
620 is re-enabled, the virtual address range for the writeable
621 page (the entire page) must be flushed from the cache,
622 and the write-capable translation removed from the page
623 table and purged from the TLB." */
624
625 purge_kernel_dcache_page_asm((unsigned long)vto);
626 purge_tlb_start(flags);
627 pdtlb_kernel(vto);
628 purge_tlb_end(flags);
629 preempt_disable();
630 clear_user_page_asm(vto, vaddr);
631 preempt_enable();
632
633 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
634}
635
636void copy_user_highpage(struct page *to, struct page *from,
637 unsigned long vaddr, struct vm_area_struct *vma)
638{
639 void *vfrom, *vto;
640 unsigned long flags;
641
642 /* Copy using TMPALIAS region. This has the advantage
643 that the `from' page doesn't need to be flushed. However,
644 the `to' page must be flushed in copy_user_page_asm since
645 it can be used to bring in executable code. */
646
647 vfrom = kmap_atomic(from, KM_USER0);
648 vto = kmap_atomic(to, KM_USER1);
649
650 purge_kernel_dcache_page_asm((unsigned long)vto);
651 purge_tlb_start(flags);
652 pdtlb_kernel(vto);
653 pdtlb_kernel(vfrom);
654 purge_tlb_end(flags);
655 preempt_disable();
656 copy_user_page_asm(vto, vfrom, vaddr);
657 flush_dcache_page_asm(__pa(vto), vaddr);
658 preempt_enable();
659
660 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
661 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
662}
663
664#endif /* CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index eb7850b46c25..7c9648919c91 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -483,7 +483,7 @@
483 * B <-> _PAGE_DMB (memory break) 483 * B <-> _PAGE_DMB (memory break)
484 * 484 *
485 * Then incredible subtlety: The access rights are 485 * Then incredible subtlety: The access rights are
486 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 486 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
487 * See 3-14 of the parisc 2.0 manual 487 * See 3-14 of the parisc 2.0 manual
488 * 488 *
489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
@@ -493,7 +493,7 @@
493 493
494 /* PAGE_USER indicates the page can be read with user privileges, 494 /* PAGE_USER indicates the page can be read with user privileges,
495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
496 * contains _PAGE_READ */ 496 * contains _PAGE_READ) */
497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
498 depdi 7,11,3,\prot 498 depdi 7,11,3,\prot
499 /* If we're a gateway page, drop PL2 back to zero for promotion 499 /* If we're a gateway page, drop PL2 back to zero for promotion
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 08324aac3544..3295ef4a185d 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -186,12 +186,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
186 186
187 if (status != PDC_OK) { 187 if (status != PDC_OK) {
188 /* no more cell modules or error */ 188 /* no more cell modules or error */
189 kfree(pa_pdc_cell);
189 return status; 190 return status;
190 } 191 }
191 192
192 temp = pa_pdc_cell->cba; 193 temp = pa_pdc_cell->cba;
193 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); 194 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
194 if (!dev) { 195 if (!dev) {
196 kfree(pa_pdc_cell);
195 return PDC_OK; 197 return PDC_OK;
196 } 198 }
197 199
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 0299d63cd112..8094d3ed3b64 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -379,14 +379,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
379static struct irqaction timer_action = { 379static struct irqaction timer_action = {
380 .handler = timer_interrupt, 380 .handler = timer_interrupt,
381 .name = "timer", 381 .name = "timer",
382 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, 382 .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
383}; 383};
384 384
385#ifdef CONFIG_SMP 385#ifdef CONFIG_SMP
386static struct irqaction ipi_action = { 386static struct irqaction ipi_action = {
387 .handler = ipi_interrupt, 387 .handler = ipi_interrupt,
388 .name = "IPI", 388 .name = "IPI",
389 .flags = IRQF_DISABLED | IRQF_PERCPU, 389 .flags = IRQF_PERCPU,
390}; 390};
391#endif 391#endif
392 392
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 5d7218ad885c..312b48422a56 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -199,7 +199,6 @@ ENTRY(flush_instruction_cache_local)
199 .callinfo NO_CALLS 199 .callinfo NO_CALLS
200 .entry 200 .entry
201 201
202 mtsp %r0, %sr1
203 load32 cache_info, %r1 202 load32 cache_info, %r1
204 203
205 /* Flush Instruction Cache */ 204 /* Flush Instruction Cache */
@@ -208,7 +207,8 @@ ENTRY(flush_instruction_cache_local)
208 LDREG ICACHE_STRIDE(%r1), %arg1 207 LDREG ICACHE_STRIDE(%r1), %arg1
209 LDREG ICACHE_COUNT(%r1), %arg2 208 LDREG ICACHE_COUNT(%r1), %arg2
210 LDREG ICACHE_LOOP(%r1), %arg3 209 LDREG ICACHE_LOOP(%r1), %arg3
211 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/ 210 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
211 mtsp %r0, %sr1
212 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */ 212 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
213 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */ 213 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
214 214
@@ -220,7 +220,33 @@ fimanyloop: /* Loop if LOOP >= 2 */
220 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */ 220 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
221 221
222fioneloop: /* Loop if LOOP = 1 */ 222fioneloop: /* Loop if LOOP = 1 */
223 addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */ 223 /* Some implementations may flush with a single fice instruction */
224 cmpib,COND(>>=),n 15, %arg2, fioneloop2
225
226fioneloop1:
227 fice,m %arg1(%sr1, %arg0)
228 fice,m %arg1(%sr1, %arg0)
229 fice,m %arg1(%sr1, %arg0)
230 fice,m %arg1(%sr1, %arg0)
231 fice,m %arg1(%sr1, %arg0)
232 fice,m %arg1(%sr1, %arg0)
233 fice,m %arg1(%sr1, %arg0)
234 fice,m %arg1(%sr1, %arg0)
235 fice,m %arg1(%sr1, %arg0)
236 fice,m %arg1(%sr1, %arg0)
237 fice,m %arg1(%sr1, %arg0)
238 fice,m %arg1(%sr1, %arg0)
239 fice,m %arg1(%sr1, %arg0)
240 fice,m %arg1(%sr1, %arg0)
241 fice,m %arg1(%sr1, %arg0)
242 addib,COND(>) -16, %arg2, fioneloop1
243 fice,m %arg1(%sr1, %arg0)
244
245 /* Check if done */
246 cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
247
248fioneloop2:
249 addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
224 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */ 250 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
225 251
226fisync: 252fisync:
@@ -240,8 +266,7 @@ ENTRY(flush_data_cache_local)
240 .callinfo NO_CALLS 266 .callinfo NO_CALLS
241 .entry 267 .entry
242 268
243 mtsp %r0, %sr1 269 load32 cache_info, %r1
244 load32 cache_info, %r1
245 270
246 /* Flush Data Cache */ 271 /* Flush Data Cache */
247 272
@@ -249,7 +274,8 @@ ENTRY(flush_data_cache_local)
249 LDREG DCACHE_STRIDE(%r1), %arg1 274 LDREG DCACHE_STRIDE(%r1), %arg1
250 LDREG DCACHE_COUNT(%r1), %arg2 275 LDREG DCACHE_COUNT(%r1), %arg2
251 LDREG DCACHE_LOOP(%r1), %arg3 276 LDREG DCACHE_LOOP(%r1), %arg3
252 rsm PSW_SM_I, %r22 277 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
278 mtsp %r0, %sr1
253 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */ 279 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
254 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */ 280 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
255 281
@@ -261,7 +287,33 @@ fdmanyloop: /* Loop if LOOP >= 2 */
261 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */ 287 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
262 288
263fdoneloop: /* Loop if LOOP = 1 */ 289fdoneloop: /* Loop if LOOP = 1 */
264 addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */ 290 /* Some implementations may flush with a single fdce instruction */
291 cmpib,COND(>>=),n 15, %arg2, fdoneloop2
292
293fdoneloop1:
294 fdce,m %arg1(%sr1, %arg0)
295 fdce,m %arg1(%sr1, %arg0)
296 fdce,m %arg1(%sr1, %arg0)
297 fdce,m %arg1(%sr1, %arg0)
298 fdce,m %arg1(%sr1, %arg0)
299 fdce,m %arg1(%sr1, %arg0)
300 fdce,m %arg1(%sr1, %arg0)
301 fdce,m %arg1(%sr1, %arg0)
302 fdce,m %arg1(%sr1, %arg0)
303 fdce,m %arg1(%sr1, %arg0)
304 fdce,m %arg1(%sr1, %arg0)
305 fdce,m %arg1(%sr1, %arg0)
306 fdce,m %arg1(%sr1, %arg0)
307 fdce,m %arg1(%sr1, %arg0)
308 fdce,m %arg1(%sr1, %arg0)
309 addib,COND(>) -16, %arg2, fdoneloop1
310 fdce,m %arg1(%sr1, %arg0)
311
312 /* Check if done */
313 cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
314
315fdoneloop2:
316 addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
265 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ 317 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
266 318
267fdsync: 319fdsync:
@@ -277,7 +329,104 @@ ENDPROC(flush_data_cache_local)
277 329
278 .align 16 330 .align 16
279 331
280ENTRY(copy_user_page_asm) 332/* Macros to serialize TLB purge operations on SMP. */
333
334 .macro tlb_lock la,flags,tmp
335#ifdef CONFIG_SMP
336 ldil L%pa_tlb_lock,%r1
337 ldo R%pa_tlb_lock(%r1),\la
338 rsm PSW_SM_I,\flags
3391: LDCW 0(\la),\tmp
340 cmpib,<>,n 0,\tmp,3f
3412: ldw 0(\la),\tmp
342 cmpb,<> %r0,\tmp,1b
343 nop
344 b,n 2b
3453:
346#endif
347 .endm
348
349 .macro tlb_unlock la,flags,tmp
350#ifdef CONFIG_SMP
351 ldi 1,\tmp
352 stw \tmp,0(\la)
353 mtsm \flags
354#endif
355 .endm
356
357/* Clear page using kernel mapping. */
358
359ENTRY(clear_page_asm)
360 .proc
361 .callinfo NO_CALLS
362 .entry
363
364#ifdef CONFIG_64BIT
365
366 /* Unroll the loop. */
367 ldi (PAGE_SIZE / 128), %r1
368
3691:
370 std %r0, 0(%r26)
371 std %r0, 8(%r26)
372 std %r0, 16(%r26)
373 std %r0, 24(%r26)
374 std %r0, 32(%r26)
375 std %r0, 40(%r26)
376 std %r0, 48(%r26)
377 std %r0, 56(%r26)
378 std %r0, 64(%r26)
379 std %r0, 72(%r26)
380 std %r0, 80(%r26)
381 std %r0, 88(%r26)
382 std %r0, 96(%r26)
383 std %r0, 104(%r26)
384 std %r0, 112(%r26)
385 std %r0, 120(%r26)
386
387 /* Note reverse branch hint for addib is taken. */
388 addib,COND(>),n -1, %r1, 1b
389 ldo 128(%r26), %r26
390
391#else
392
393 /*
394 * Note that until (if) we start saving the full 64-bit register
395 * values on interrupt, we can't use std on a 32 bit kernel.
396 */
397 ldi (PAGE_SIZE / 64), %r1
398
3991:
400 stw %r0, 0(%r26)
401 stw %r0, 4(%r26)
402 stw %r0, 8(%r26)
403 stw %r0, 12(%r26)
404 stw %r0, 16(%r26)
405 stw %r0, 20(%r26)
406 stw %r0, 24(%r26)
407 stw %r0, 28(%r26)
408 stw %r0, 32(%r26)
409 stw %r0, 36(%r26)
410 stw %r0, 40(%r26)
411 stw %r0, 44(%r26)
412 stw %r0, 48(%r26)
413 stw %r0, 52(%r26)
414 stw %r0, 56(%r26)
415 stw %r0, 60(%r26)
416
417 addib,COND(>),n -1, %r1, 1b
418 ldo 64(%r26), %r26
419#endif
420 bv %r0(%r2)
421 nop
422 .exit
423
424 .procend
425ENDPROC(clear_page_asm)
426
427/* Copy page using kernel mapping. */
428
429ENTRY(copy_page_asm)
281 .proc 430 .proc
282 .callinfo NO_CALLS 431 .callinfo NO_CALLS
283 .entry 432 .entry
@@ -285,18 +434,14 @@ ENTRY(copy_user_page_asm)
285#ifdef CONFIG_64BIT 434#ifdef CONFIG_64BIT
286 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 435 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
287 * Unroll the loop by hand and arrange insn appropriately. 436 * Unroll the loop by hand and arrange insn appropriately.
288 * GCC probably can do this just as well. 437 * Prefetch doesn't improve performance on rp3440.
438 * GCC probably can do this just as well...
289 */ 439 */
290 440
291 ldd 0(%r25), %r19
292 ldi (PAGE_SIZE / 128), %r1 441 ldi (PAGE_SIZE / 128), %r1
293 442
294 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ 4431: ldd 0(%r25), %r19
295 ldw 128(%r25), %r0 /* prefetch 2 */ 444 ldd 8(%r25), %r20
296
2971: ldd 8(%r25), %r20
298 ldw 192(%r25), %r0 /* prefetch 3 */
299 ldw 256(%r25), %r0 /* prefetch 4 */
300 445
301 ldd 16(%r25), %r21 446 ldd 16(%r25), %r21
302 ldd 24(%r25), %r22 447 ldd 24(%r25), %r22
@@ -330,20 +475,16 @@ ENTRY(copy_user_page_asm)
330 475
331 ldd 112(%r25), %r21 476 ldd 112(%r25), %r21
332 ldd 120(%r25), %r22 477 ldd 120(%r25), %r22
478 ldo 128(%r25), %r25
333 std %r19, 96(%r26) 479 std %r19, 96(%r26)
334 std %r20, 104(%r26) 480 std %r20, 104(%r26)
335 481
336 ldo 128(%r25), %r25
337 std %r21, 112(%r26) 482 std %r21, 112(%r26)
338 std %r22, 120(%r26) 483 std %r22, 120(%r26)
339 ldo 128(%r26), %r26
340 484
341 /* conditional branches nullify on forward taken branch, and on 485 /* Note reverse branch hint for addib is taken. */
342 * non-taken backward branch. Note that .+4 is a backwards branch. 486 addib,COND(>),n -1, %r1, 1b
343 * The ldd should only get executed if the branch is taken. 487 ldo 128(%r26), %r26
344 */
345 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
346 ldd 0(%r25), %r19 /* start next loads */
347 488
348#else 489#else
349 490
@@ -399,7 +540,7 @@ ENTRY(copy_user_page_asm)
399 .exit 540 .exit
400 541
401 .procend 542 .procend
402ENDPROC(copy_user_page_asm) 543ENDPROC(copy_page_asm)
403 544
404/* 545/*
405 * NOTE: Code in clear_user_page has a hard coded dependency on the 546 * NOTE: Code in clear_user_page has a hard coded dependency on the
@@ -422,8 +563,6 @@ ENDPROC(copy_user_page_asm)
422 * %r23 physical page (shifted for tlb insert) of "from" translation 563 * %r23 physical page (shifted for tlb insert) of "from" translation
423 */ 564 */
424 565
425#if 0
426
427 /* 566 /*
428 * We can't do this since copy_user_page is used to bring in 567 * We can't do this since copy_user_page is used to bring in
429 * file data that might have instructions. Since the data would 568 * file data that might have instructions. Since the data would
@@ -435,6 +574,7 @@ ENDPROC(copy_user_page_asm)
435 * use it if more information is passed into copy_user_page(). 574 * use it if more information is passed into copy_user_page().
436 * Have to do some measurements to see if it is worthwhile to 575 * Have to do some measurements to see if it is worthwhile to
437 * lobby for such a change. 576 * lobby for such a change.
577 *
438 */ 578 */
439 579
440ENTRY(copy_user_page_asm) 580ENTRY(copy_user_page_asm)
@@ -442,16 +582,21 @@ ENTRY(copy_user_page_asm)
442 .callinfo NO_CALLS 582 .callinfo NO_CALLS
443 .entry 583 .entry
444 584
585 /* Convert virtual `to' and `from' addresses to physical addresses.
586 Move `from' physical address to non shadowed register. */
445 ldil L%(__PAGE_OFFSET), %r1 587 ldil L%(__PAGE_OFFSET), %r1
446 sub %r26, %r1, %r26 588 sub %r26, %r1, %r26
447 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ 589 sub %r25, %r1, %r23
448 590
449 ldil L%(TMPALIAS_MAP_START), %r28 591 ldil L%(TMPALIAS_MAP_START), %r28
450 /* FIXME for different page sizes != 4k */ 592 /* FIXME for different page sizes != 4k */
451#ifdef CONFIG_64BIT 593#ifdef CONFIG_64BIT
452 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 594#if (TMPALIAS_MAP_START >= 0x80000000)
453 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ 595 depdi 0, 31,32, %r28 /* clear any sign extension */
454 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 596#endif
597 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
598 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
599 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
455 depdi 0, 63,12, %r28 /* Clear any offset bits */ 600 depdi 0, 63,12, %r28 /* Clear any offset bits */
456 copy %r28, %r29 601 copy %r28, %r29
457 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ 602 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
@@ -466,10 +611,76 @@ ENTRY(copy_user_page_asm)
466 611
467 /* Purge any old translations */ 612 /* Purge any old translations */
468 613
614#ifdef CONFIG_PA20
615 pdtlb,l 0(%r28)
616 pdtlb,l 0(%r29)
617#else
618 tlb_lock %r20,%r21,%r22
469 pdtlb 0(%r28) 619 pdtlb 0(%r28)
470 pdtlb 0(%r29) 620 pdtlb 0(%r29)
621 tlb_unlock %r20,%r21,%r22
622#endif
623
624#ifdef CONFIG_64BIT
625 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
626 * Unroll the loop by hand and arrange insn appropriately.
627 * GCC probably can do this just as well.
628 */
471 629
472 ldi 64, %r1 630 ldd 0(%r29), %r19
631 ldi (PAGE_SIZE / 128), %r1
632
6331: ldd 8(%r29), %r20
634
635 ldd 16(%r29), %r21
636 ldd 24(%r29), %r22
637 std %r19, 0(%r28)
638 std %r20, 8(%r28)
639
640 ldd 32(%r29), %r19
641 ldd 40(%r29), %r20
642 std %r21, 16(%r28)
643 std %r22, 24(%r28)
644
645 ldd 48(%r29), %r21
646 ldd 56(%r29), %r22
647 std %r19, 32(%r28)
648 std %r20, 40(%r28)
649
650 ldd 64(%r29), %r19
651 ldd 72(%r29), %r20
652 std %r21, 48(%r28)
653 std %r22, 56(%r28)
654
655 ldd 80(%r29), %r21
656 ldd 88(%r29), %r22
657 std %r19, 64(%r28)
658 std %r20, 72(%r28)
659
660 ldd 96(%r29), %r19
661 ldd 104(%r29), %r20
662 std %r21, 80(%r28)
663 std %r22, 88(%r28)
664
665 ldd 112(%r29), %r21
666 ldd 120(%r29), %r22
667 std %r19, 96(%r28)
668 std %r20, 104(%r28)
669
670 ldo 128(%r29), %r29
671 std %r21, 112(%r28)
672 std %r22, 120(%r28)
673 ldo 128(%r28), %r28
674
675 /* conditional branches nullify on forward taken branch, and on
676 * non-taken backward branch. Note that .+4 is a backwards branch.
677 * The ldd should only get executed if the branch is taken.
678 */
679 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
680 ldd 0(%r29), %r19 /* start next loads */
681
682#else
683 ldi (PAGE_SIZE / 64), %r1
473 684
474 /* 685 /*
475 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw 686 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
@@ -480,9 +691,7 @@ ENTRY(copy_user_page_asm)
480 * use ldd/std on a 32 bit kernel. 691 * use ldd/std on a 32 bit kernel.
481 */ 692 */
482 693
483 6941: ldw 0(%r29), %r19
4841:
485 ldw 0(%r29), %r19
486 ldw 4(%r29), %r20 695 ldw 4(%r29), %r20
487 ldw 8(%r29), %r21 696 ldw 8(%r29), %r21
488 ldw 12(%r29), %r22 697 ldw 12(%r29), %r22
@@ -515,8 +724,10 @@ ENTRY(copy_user_page_asm)
515 stw %r21, 56(%r28) 724 stw %r21, 56(%r28)
516 stw %r22, 60(%r28) 725 stw %r22, 60(%r28)
517 ldo 64(%r28), %r28 726 ldo 64(%r28), %r28
727
518 addib,COND(>) -1, %r1,1b 728 addib,COND(>) -1, %r1,1b
519 ldo 64(%r29), %r29 729 ldo 64(%r29), %r29
730#endif
520 731
521 bv %r0(%r2) 732 bv %r0(%r2)
522 nop 733 nop
@@ -524,9 +735,8 @@ ENTRY(copy_user_page_asm)
524 735
525 .procend 736 .procend
526ENDPROC(copy_user_page_asm) 737ENDPROC(copy_user_page_asm)
527#endif
528 738
529ENTRY(__clear_user_page_asm) 739ENTRY(clear_user_page_asm)
530 .proc 740 .proc
531 .callinfo NO_CALLS 741 .callinfo NO_CALLS
532 .entry 742 .entry
@@ -550,7 +760,13 @@ ENTRY(__clear_user_page_asm)
550 760
551 /* Purge any old translation */ 761 /* Purge any old translation */
552 762
763#ifdef CONFIG_PA20
764 pdtlb,l 0(%r28)
765#else
766 tlb_lock %r20,%r21,%r22
553 pdtlb 0(%r28) 767 pdtlb 0(%r28)
768 tlb_unlock %r20,%r21,%r22
769#endif
554 770
555#ifdef CONFIG_64BIT 771#ifdef CONFIG_64BIT
556 ldi (PAGE_SIZE / 128), %r1 772 ldi (PAGE_SIZE / 128), %r1
@@ -580,8 +796,7 @@ ENTRY(__clear_user_page_asm)
580#else /* ! CONFIG_64BIT */ 796#else /* ! CONFIG_64BIT */
581 ldi (PAGE_SIZE / 64), %r1 797 ldi (PAGE_SIZE / 64), %r1
582 798
5831: 7991: stw %r0, 0(%r28)
584 stw %r0, 0(%r28)
585 stw %r0, 4(%r28) 800 stw %r0, 4(%r28)
586 stw %r0, 8(%r28) 801 stw %r0, 8(%r28)
587 stw %r0, 12(%r28) 802 stw %r0, 12(%r28)
@@ -606,7 +821,7 @@ ENTRY(__clear_user_page_asm)
606 .exit 821 .exit
607 822
608 .procend 823 .procend
609ENDPROC(__clear_user_page_asm) 824ENDPROC(clear_user_page_asm)
610 825
611ENTRY(flush_dcache_page_asm) 826ENTRY(flush_dcache_page_asm)
612 .proc 827 .proc
@@ -630,7 +845,13 @@ ENTRY(flush_dcache_page_asm)
630 845
631 /* Purge any old translation */ 846 /* Purge any old translation */
632 847
848#ifdef CONFIG_PA20
849 pdtlb,l 0(%r28)
850#else
851 tlb_lock %r20,%r21,%r22
633 pdtlb 0(%r28) 852 pdtlb 0(%r28)
853 tlb_unlock %r20,%r21,%r22
854#endif
634 855
635 ldil L%dcache_stride, %r1 856 ldil L%dcache_stride, %r1
636 ldw R%dcache_stride(%r1), %r1 857 ldw R%dcache_stride(%r1), %r1
@@ -663,8 +884,17 @@ ENTRY(flush_dcache_page_asm)
663 fdc,m %r1(%r28) 884 fdc,m %r1(%r28)
664 885
665 sync 886 sync
887
888#ifdef CONFIG_PA20
889 pdtlb,l 0(%r25)
890#else
891 tlb_lock %r20,%r21,%r22
892 pdtlb 0(%r25)
893 tlb_unlock %r20,%r21,%r22
894#endif
895
666 bv %r0(%r2) 896 bv %r0(%r2)
667 pdtlb (%r25) 897 nop
668 .exit 898 .exit
669 899
670 .procend 900 .procend
@@ -692,7 +922,13 @@ ENTRY(flush_icache_page_asm)
692 922
693 /* Purge any old translation */ 923 /* Purge any old translation */
694 924
695 pitlb (%sr4,%r28) 925#ifdef CONFIG_PA20
926 pitlb,l %r0(%sr4,%r28)
927#else
928 tlb_lock %r20,%r21,%r22
929 pitlb (%sr4,%r28)
930 tlb_unlock %r20,%r21,%r22
931#endif
696 932
697 ldil L%icache_stride, %r1 933 ldil L%icache_stride, %r1
698 ldw R%icache_stride(%r1), %r1 934 ldw R%icache_stride(%r1), %r1
@@ -727,8 +963,17 @@ ENTRY(flush_icache_page_asm)
727 fic,m %r1(%sr4,%r28) 963 fic,m %r1(%sr4,%r28)
728 964
729 sync 965 sync
966
967#ifdef CONFIG_PA20
968 pitlb,l %r0(%sr4,%r25)
969#else
970 tlb_lock %r20,%r21,%r22
971 pitlb (%sr4,%r25)
972 tlb_unlock %r20,%r21,%r22
973#endif
974
730 bv %r0(%r2) 975 bv %r0(%r2)
731 pitlb (%sr4,%r25) 976 nop
732 .exit 977 .exit
733 978
734 .procend 979 .procend
@@ -777,7 +1022,7 @@ ENTRY(flush_kernel_dcache_page_asm)
777 .procend 1022 .procend
778ENDPROC(flush_kernel_dcache_page_asm) 1023ENDPROC(flush_kernel_dcache_page_asm)
779 1024
780ENTRY(purge_kernel_dcache_page) 1025ENTRY(purge_kernel_dcache_page_asm)
781 .proc 1026 .proc
782 .callinfo NO_CALLS 1027 .callinfo NO_CALLS
783 .entry 1028 .entry
@@ -817,7 +1062,7 @@ ENTRY(purge_kernel_dcache_page)
817 .exit 1062 .exit
818 1063
819 .procend 1064 .procend
820ENDPROC(purge_kernel_dcache_page) 1065ENDPROC(purge_kernel_dcache_page_asm)
821 1066
822ENTRY(flush_user_dcache_range_asm) 1067ENTRY(flush_user_dcache_range_asm)
823 .proc 1068 .proc
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index ceec85de6290..6795dc6c995f 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -157,5 +157,6 @@ extern void _mcount(void);
157EXPORT_SYMBOL(_mcount); 157EXPORT_SYMBOL(_mcount);
158#endif 158#endif
159 159
160/* from pacache.S -- needed for copy_page */ 160/* from pacache.S -- needed for clear/copy_page */
161EXPORT_SYMBOL(copy_user_page_asm); 161EXPORT_SYMBOL(clear_page_asm);
162EXPORT_SYMBOL(copy_page_asm);
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index fd051705a407..52c85b2f502e 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -312,7 +312,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
312#if DEBUG_SIG 312#if DEBUG_SIG
313 /* Assert that we're flushing in the correct space... */ 313 /* Assert that we're flushing in the correct space... */
314 { 314 {
315 int sid; 315 unsigned long sid;
316 asm ("mfsp %%sr3,%0" : "=r" (sid)); 316 asm ("mfsp %%sr3,%0" : "=r" (sid));
317 DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n", 317 DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
318 sid, frame->tramp); 318 sid, frame->tramp);
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 5dede04f2f3e..2ddcabb616ce 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -65,7 +65,7 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
65{ 65{
66 compat_sigset_t s; 66 compat_sigset_t s;
67 67
68 if (sz != sizeof *set) 68 if (sz != sizeof(compat_sigset_t))
69 return -EINVAL; 69 return -EINVAL;
70 sigset_64to32(&s, set); 70 sigset_64to32(&s, set);
71 71
@@ -78,7 +78,7 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
78 compat_sigset_t s; 78 compat_sigset_t s;
79 int r; 79 int r;
80 80
81 if (sz != sizeof *set) 81 if (sz != sizeof(compat_sigset_t))
82 return -EINVAL; 82 return -EINVAL;
83 83
84 if ((r = copy_from_user(&s, up, sz)) == 0) { 84 if ((r = copy_from_user(&s, up, sz)) == 0) {
@@ -94,8 +94,11 @@ int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t _
94 sigset_t old_set, new_set; 94 sigset_t old_set, new_set;
95 int ret; 95 int ret;
96 96
97 if (set && get_sigset32(set, &new_set, sigsetsize)) 97 if (set) {
98 return -EFAULT; 98 ret = get_sigset32(set, &new_set, sigsetsize);
99 if (ret)
100 return ret;
101 }
99 102
100 KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL, 103 KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
101 oset ? (sigset_t __user *)&old_set : NULL, sigsetsize); 104 oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
@@ -128,6 +131,10 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigacti
128 struct k_sigaction new_sa, old_sa; 131 struct k_sigaction new_sa, old_sa;
129 int ret = -EINVAL; 132 int ret = -EINVAL;
130 133
134 /* XXX: Don't preclude handling different sized sigset_t's. */
135 if (sigsetsize != sizeof(compat_sigset_t))
136 return -EINVAL;
137
131 if (act) { 138 if (act) {
132 if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa)) 139 if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
133 return -EFAULT; 140 return -EFAULT;
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index f76c10863c62..54d619d4cac6 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -94,11 +94,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
94{ 94{
95 if (len > TASK_SIZE) 95 if (len > TASK_SIZE)
96 return -ENOMEM; 96 return -ENOMEM;
97 /* Might want to check for cache aliasing issues for MAP_FIXED case 97 if (flags & MAP_FIXED) {
98 * like ARM or MIPS ??? --BenH. 98 if ((flags & MAP_SHARED) &&
99 */ 99 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
100 if (flags & MAP_FIXED) 100 return -EINVAL;
101 return addr; 101 return addr;
102 }
102 if (!addr) 103 if (!addr)
103 addr = TASK_UNMAPPED_BASE; 104 addr = TASK_UNMAPPED_BASE;
104 105
@@ -212,6 +213,13 @@ asmlinkage long parisc_sync_file_range(int fd,
212 (loff_t)hi_nbytes << 32 | lo_nbytes, flags); 213 (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
213} 214}
214 215
216asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
217 u32 lenhi, u32 lenlo)
218{
219 return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
220 ((u64)lenhi << 32) | lenlo);
221}
222
215asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag) 223asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
216{ 224{
217 return -ENOMEM; 225 return -ENOMEM;
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 9cfdaa19ab63..eca69bb8ef5f 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -21,7 +21,6 @@
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/sem.h> 23#include <linux/sem.h>
24#include <linux/msg.h>
25#include <linux/shm.h> 24#include <linux/shm.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/uio.h> 26#include <linux/uio.h>
@@ -61,111 +60,23 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
61 return -ENOSYS; 60 return -ENOSYS;
62} 61}
63 62
64asmlinkage long sys32_sched_rr_get_interval(pid_t pid, 63/* Note: it is necessary to treat out_fd and in_fd as unsigned ints, with the
65 struct compat_timespec __user *interval) 64 * corresponding cast to a signed int to insure that the proper conversion
66{ 65 * (sign extension) between the register representation of a signed int (msr in
67 struct timespec t; 66 * 32-bit mode) and the register representation of a signed int (msr in 64-bit
68 int ret; 67 * mode) is performed.
69 68 */
70 KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t); 69asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd,
71 if (put_compat_timespec(&t, interval)) 70 compat_off_t __user *offset, compat_size_t count)
72 return -EFAULT;
73 return ret;
74}
75
76struct msgbuf32 {
77 int mtype;
78 char mtext[1];
79};
80
81asmlinkage long sys32_msgsnd(int msqid,
82 struct msgbuf32 __user *umsgp32,
83 size_t msgsz, int msgflg)
84{
85 struct msgbuf *mb;
86 struct msgbuf32 mb32;
87 int err;
88
89 if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
90 return -ENOMEM;
91
92 err = get_user(mb32.mtype, &umsgp32->mtype);
93 mb->mtype = mb32.mtype;
94 err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz);
95
96 if (err)
97 err = -EFAULT;
98 else
99 KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg);
100
101 kfree(mb);
102 return err;
103}
104
105asmlinkage long sys32_msgrcv(int msqid,
106 struct msgbuf32 __user *umsgp32,
107 size_t msgsz, long msgtyp, int msgflg)
108{
109 struct msgbuf *mb;
110 struct msgbuf32 mb32;
111 int err, len;
112
113 if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
114 return -ENOMEM;
115
116 KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg);
117
118 if (err >= 0) {
119 len = err;
120 mb32.mtype = mb->mtype;
121 err = put_user(mb32.mtype, &umsgp32->mtype);
122 err |= copy_to_user(&umsgp32->mtext, mb->mtext, len);
123 if (err)
124 err = -EFAULT;
125 else
126 err = len;
127 }
128
129 kfree(mb);
130 return err;
131}
132
133asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
134{ 71{
135 mm_segment_t old_fs = get_fs(); 72 return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
136 int ret;
137 off_t of;
138
139 if (offset && get_user(of, offset))
140 return -EFAULT;
141
142 set_fs(KERNEL_DS);
143 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
144 set_fs(old_fs);
145
146 if (offset && put_user(of, offset))
147 return -EFAULT;
148
149 return ret;
150} 73}
151 74
152asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count) 75asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd,
76 compat_loff_t __user *offset, compat_size_t count)
153{ 77{
154 mm_segment_t old_fs = get_fs(); 78 return sys_sendfile64((int)out_fd, (int)in_fd,
155 int ret; 79 (loff_t __user *)offset, count);
156 loff_t lof;
157
158 if (offset && get_user(lof, offset))
159 return -EFAULT;
160
161 set_fs(KERNEL_DS);
162 ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count);
163 set_fs(old_fs);
164
165 if (offset && put_user(lof, offset))
166 return -EFAULT;
167
168 return ret;
169} 80}
170 81
171 82
@@ -200,13 +111,6 @@ long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
200 buf, len); 111 buf, len);
201} 112}
202 113
203asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
204 u32 lenhi, u32 lenlo)
205{
206 return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
207 ((loff_t)lenhi << 32) | lenlo);
208}
209
210asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi, 114asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
211 u32 mask_lo, int fd, 115 u32 mask_lo, int fd,
212 const char __user *pathname) 116 const char __user *pathname)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 86742df0b194..5e055240f00b 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -309,10 +309,13 @@ tracesys_next:
309 LDREG TASK_PT_GR25(%r1), %r25 309 LDREG TASK_PT_GR25(%r1), %r25
310 LDREG TASK_PT_GR24(%r1), %r24 310 LDREG TASK_PT_GR24(%r1), %r24
311 LDREG TASK_PT_GR23(%r1), %r23 311 LDREG TASK_PT_GR23(%r1), %r23
312#ifdef CONFIG_64BIT
313 LDREG TASK_PT_GR22(%r1), %r22 312 LDREG TASK_PT_GR22(%r1), %r22
314 LDREG TASK_PT_GR21(%r1), %r21 313 LDREG TASK_PT_GR21(%r1), %r21
314#ifdef CONFIG_64BIT
315 ldo -16(%r30),%r29 /* Reference param save area */ 315 ldo -16(%r30),%r29 /* Reference param save area */
316#else
317 stw %r22, -52(%r30) /* 5th argument */
318 stw %r21, -56(%r30) /* 6th argument */
316#endif 319#endif
317 320
318 comiclr,>>= __NR_Linux_syscalls, %r20, %r0 321 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 54d950b067b7..129fd472c471 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -247,10 +247,7 @@
247 ENTRY_SAME(sched_yield) 247 ENTRY_SAME(sched_yield)
248 ENTRY_SAME(sched_get_priority_max) 248 ENTRY_SAME(sched_get_priority_max)
249 ENTRY_SAME(sched_get_priority_min) /* 160 */ 249 ENTRY_SAME(sched_get_priority_min) /* 160 */
250 /* These 2 would've worked if someone had defined struct timespec 250 ENTRY_COMP(sched_rr_get_interval)
251 * carefully, like timeval for example (which is about the same).
252 * Unfortunately it contains a long :-( */
253 ENTRY_DIFF(sched_rr_get_interval)
254 ENTRY_COMP(nanosleep) 251 ENTRY_COMP(nanosleep)
255 ENTRY_SAME(mremap) 252 ENTRY_SAME(mremap)
256 ENTRY_SAME(setresuid) 253 ENTRY_SAME(setresuid)
@@ -286,8 +283,8 @@
286 ENTRY_SAME(semop) /* 185 */ 283 ENTRY_SAME(semop) /* 185 */
287 ENTRY_SAME(semget) 284 ENTRY_SAME(semget)
288 ENTRY_DIFF(semctl) 285 ENTRY_DIFF(semctl)
289 ENTRY_DIFF(msgsnd) 286 ENTRY_COMP(msgsnd)
290 ENTRY_DIFF(msgrcv) 287 ENTRY_COMP(msgrcv)
291 ENTRY_SAME(msgget) /* 190 */ 288 ENTRY_SAME(msgget) /* 190 */
292 ENTRY_SAME(msgctl) 289 ENTRY_SAME(msgctl)
293 ENTRY_SAME(shmat) 290 ENTRY_SAME(shmat)
@@ -307,7 +304,7 @@
307 ENTRY_SAME(gettid) 304 ENTRY_SAME(gettid)
308 ENTRY_OURS(readahead) 305 ENTRY_OURS(readahead)
309 ENTRY_SAME(tkill) 306 ENTRY_SAME(tkill)
310 ENTRY_SAME(sendfile64) 307 ENTRY_DIFF(sendfile64)
311 ENTRY_COMP(futex) /* 210 */ 308 ENTRY_COMP(futex) /* 210 */
312 ENTRY_COMP(sched_setaffinity) 309 ENTRY_COMP(sched_setaffinity)
313 ENTRY_COMP(sched_getaffinity) 310 ENTRY_COMP(sched_getaffinity)
@@ -327,12 +324,12 @@
327 ENTRY_SAME(epoll_wait) 324 ENTRY_SAME(epoll_wait)
328 ENTRY_SAME(remap_file_pages) 325 ENTRY_SAME(remap_file_pages)
329 ENTRY_SAME(semtimedop) 326 ENTRY_SAME(semtimedop)
330 ENTRY_SAME(mq_open) 327 ENTRY_COMP(mq_open)
331 ENTRY_SAME(mq_unlink) /* 230 */ 328 ENTRY_SAME(mq_unlink) /* 230 */
332 ENTRY_SAME(mq_timedsend) 329 ENTRY_COMP(mq_timedsend)
333 ENTRY_SAME(mq_timedreceive) 330 ENTRY_COMP(mq_timedreceive)
334 ENTRY_SAME(mq_notify) 331 ENTRY_COMP(mq_notify)
335 ENTRY_SAME(mq_getsetattr) 332 ENTRY_COMP(mq_getsetattr)
336 ENTRY_COMP(waitid) /* 235 */ 333 ENTRY_COMP(waitid) /* 235 */
337 ENTRY_OURS(fadvise64_64) 334 ENTRY_OURS(fadvise64_64)
338 ENTRY_SAME(set_tid_address) 335 ENTRY_SAME(set_tid_address)
@@ -403,7 +400,7 @@
403 ENTRY_COMP(signalfd) 400 ENTRY_COMP(signalfd)
404 ENTRY_SAME(ni_syscall) /* was timerfd */ 401 ENTRY_SAME(ni_syscall) /* was timerfd */
405 ENTRY_SAME(eventfd) 402 ENTRY_SAME(eventfd)
406 ENTRY_COMP(fallocate) /* 305 */ 403 ENTRY_OURS(fallocate) /* 305 */
407 ENTRY_SAME(timerfd_create) 404 ENTRY_SAME(timerfd_create)
408 ENTRY_COMP(timerfd_settime) 405 ENTRY_COMP(timerfd_settime)
409 ENTRY_COMP(timerfd_gettime) 406 ENTRY_COMP(timerfd_gettime)
@@ -428,6 +425,10 @@
428 ENTRY_SAME(syncfs) 425 ENTRY_SAME(syncfs)
429 ENTRY_SAME(setns) 426 ENTRY_SAME(setns)
430 ENTRY_COMP(sendmmsg) 427 ENTRY_COMP(sendmmsg)
428 ENTRY_COMP(process_vm_readv) /* 330 */
429 ENTRY_COMP(process_vm_writev)
430 ENTRY_SAME(kcmp)
431 ENTRY_SAME(finit_module)
431 432
432 /* Nothing yet */ 433 /* Nothing yet */
433 434