aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-20 00:41:18 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-20 00:41:18 -0500
commit98a5d024eb589a863c1c79f3c3f8ecb666c0eec9 (patch)
tree456799ad9fce41cdb2186ba3872c7a077ec3bd05 /arch
parentd67e7ebb2a15bf0429c55f2fc1c4b02808367874 (diff)
parent1b15688b31d8c88881102426279e8cc03953860b (diff)
Merge branch 'upstream'
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/entry-common.S3
-rw-r--r--arch/arm/kernel/signal.c25
-rw-r--r--arch/arm/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/lib/getuser.S11
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/blockops.c185
-rw-r--r--arch/arm/mm/init.c24
-rw-r--r--arch/arm/mm/ioremap.c3
-rw-r--r--arch/ia64/kernel/ivt.S133
-rw-r--r--arch/parisc/kernel/drivers.c8
-rw-r--r--arch/parisc/kernel/entry.S1
-rw-r--r--arch/parisc/kernel/inventory.c2
-rw-r--r--arch/parisc/kernel/ioctl32.c546
-rw-r--r--arch/parisc/kernel/irq.c110
-rw-r--r--arch/parisc/kernel/perf.c33
-rw-r--r--arch/parisc/kernel/ptrace.c5
-rw-r--r--arch/parisc/kernel/signal.c1
-rw-r--r--arch/parisc/kernel/smp.c24
-rw-r--r--arch/parisc/kernel/syscall.S3
-rw-r--r--arch/powerpc/kernel/pci_64.c18
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/ptrace-common.h164
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/ptrace32.c3
-rw-r--r--arch/powerpc/mm/imalloc.c3
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mmu_decl.h14
-rw-r--r--arch/powerpc/mm/pgtable_64.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c6
-rw-r--r--arch/powerpc/sysdev/mpic.c13
31 files changed, 447 insertions, 912 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 7a3261f0bf79..9997098009a9 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(__arch_strncpy_from_user);
120EXPORT_SYMBOL(__get_user_1); 120EXPORT_SYMBOL(__get_user_1);
121EXPORT_SYMBOL(__get_user_2); 121EXPORT_SYMBOL(__get_user_2);
122EXPORT_SYMBOL(__get_user_4); 122EXPORT_SYMBOL(__get_user_4);
123EXPORT_SYMBOL(__get_user_8);
124 123
125EXPORT_SYMBOL(__put_user_1); 124EXPORT_SYMBOL(__put_user_1);
126EXPORT_SYMBOL(__put_user_2); 125EXPORT_SYMBOL(__put_user_2);
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 066597f4345a..f7f183075237 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,8 +48,7 @@ work_pending:
48 mov r0, sp @ 'regs' 48 mov r0, sp @ 'regs'
49 mov r2, why @ 'syscall' 49 mov r2, why @ 'syscall'
50 bl do_notify_resume 50 bl do_notify_resume
51 disable_irq @ disable interrupts 51 b ret_slow_syscall @ Check work again
52 b no_work_pending
53 52
54work_resched: 53work_resched:
55 bl schedule 54 bl schedule
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index a917e3dd3666..765922bcf9e7 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -595,23 +595,22 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
595 */ 595 */
596 ret |= !valid_user_regs(regs); 596 ret |= !valid_user_regs(regs);
597 597
598 /*
599 * Block the signal if we were unsuccessful.
600 */
601 if (ret != 0) { 598 if (ret != 0) {
602 spin_lock_irq(&tsk->sighand->siglock); 599 force_sigsegv(sig, tsk);
603 sigorsets(&tsk->blocked, &tsk->blocked, 600 return;
604 &ka->sa.sa_mask);
605 if (!(ka->sa.sa_flags & SA_NODEFER))
606 sigaddset(&tsk->blocked, sig);
607 recalc_sigpending();
608 spin_unlock_irq(&tsk->sighand->siglock);
609 } 601 }
610 602
611 if (ret == 0) 603 /*
612 return; 604 * Block the signal if we were successful.
605 */
606 spin_lock_irq(&tsk->sighand->siglock);
607 sigorsets(&tsk->blocked, &tsk->blocked,
608 &ka->sa.sa_mask);
609 if (!(ka->sa.sa_flags & SA_NODEFER))
610 sigaddset(&tsk->blocked, sig);
611 recalc_sigpending();
612 spin_unlock_irq(&tsk->sighand->siglock);
613 613
614 force_sigsegv(sig, tsk);
615} 614}
616 615
617/* 616/*
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 80c8e4c8cefa..9a47770114d4 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -172,6 +172,10 @@ SECTIONS
172 .comment 0 : { *(.comment) } 172 .comment 0 : { *(.comment) }
173} 173}
174 174
175/* those must never be empty */ 175/*
176 * These must never be empty
177 * If you have to comment these two assert statements out, your
178 * binutils is too old (for other reasons as well)
179 */
176ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") 180ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
177ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") 181ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index d204018070a4..c03ea8e666ba 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -54,15 +54,6 @@ __get_user_4:
54 mov r0, #0 54 mov r0, #0
55 mov pc, lr 55 mov pc, lr
56 56
57 .global __get_user_8
58__get_user_8:
595: ldrt r2, [r0], #4
606: ldrt r3, [r0]
61 mov r0, #0
62 mov pc, lr
63
64__get_user_bad_8:
65 mov r3, #0
66__get_user_bad: 57__get_user_bad:
67 mov r2, #0 58 mov r2, #0
68 mov r0, #-EFAULT 59 mov r0, #-EFAULT
@@ -73,6 +64,4 @@ __get_user_bad:
73 .long 2b, __get_user_bad 64 .long 2b, __get_user_bad
74 .long 3b, __get_user_bad 65 .long 3b, __get_user_bad
75 .long 4b, __get_user_bad 66 .long 4b, __get_user_bad
76 .long 5b, __get_user_bad_8
77 .long 6b, __get_user_bad_8
78.previous 67.previous
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 59f47d4c2dfe..ffe73ba2bf17 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -51,4 +51,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
51obj-$(CONFIG_CPU_SA110) += proc-sa110.o 51obj-$(CONFIG_CPU_SA110) += proc-sa110.o
52obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o 52obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
53obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 53obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
54obj-$(CONFIG_CPU_V6) += proc-v6.o blockops.o 54obj-$(CONFIG_CPU_V6) += proc-v6.o
diff --git a/arch/arm/mm/blockops.c b/arch/arm/mm/blockops.c
deleted file mode 100644
index 4f5ee2d08996..000000000000
--- a/arch/arm/mm/blockops.c
+++ /dev/null
@@ -1,185 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/errno.h>
4#include <linux/mm.h>
5
6#include <asm/memory.h>
7#include <asm/ptrace.h>
8#include <asm/cacheflush.h>
9#include <asm/traps.h>
10
11extern struct cpu_cache_fns blk_cache_fns;
12
13#define HARVARD_CACHE
14
15/*
16 * blk_flush_kern_dcache_page(kaddr)
17 *
18 * Ensure that the data held in the page kaddr is written back
19 * to the page in question.
20 *
21 * - kaddr - kernel address (guaranteed to be page aligned)
22 */
23static void __attribute__((naked))
24blk_flush_kern_dcache_page(void *kaddr)
25{
26 asm(
27 "add r1, r0, %0 \n\
28 sub r1, r1, %1 \n\
291: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
30 mov r0, #0 \n\
31 mcr p15, 0, r0, c7, c5, 0 \n\
32 mcr p15, 0, r0, c7, c10, 4 \n\
33 mov pc, lr"
34 :
35 : "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES));
36}
37
38/*
39 * blk_dma_inv_range(start,end)
40 *
41 * Invalidate the data cache within the specified region; we will
42 * be performing a DMA operation in this region and we want to
43 * purge old data in the cache.
44 *
45 * - start - virtual start address of region
46 * - end - virtual end address of region
47 */
48static void __attribute__((naked))
49blk_dma_inv_range_unified(unsigned long start, unsigned long end)
50{
51 asm(
52 "tst r0, %0 \n\
53 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
54 tst r1, %0 \n\
55 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
56 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
57 mov r0, #0 \n\
58 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
59 mov pc, lr"
60 :
61 : "I" (L1_CACHE_BYTES - 1));
62}
63
64static void __attribute__((naked))
65blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
66{
67 asm(
68 "tst r0, %0 \n\
69 mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
70 tst r1, %0 \n\
71 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
72 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
73 mov r0, #0 \n\
74 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
75 mov pc, lr"
76 :
77 : "I" (L1_CACHE_BYTES - 1));
78}
79
80/*
81 * blk_dma_clean_range(start,end)
82 * - start - virtual start address of region
83 * - end - virtual end address of region
84 */
85static void __attribute__((naked))
86blk_dma_clean_range(unsigned long start, unsigned long end)
87{
88 asm(
89 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
90 mov r0, #0 \n\
91 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
92 mov pc, lr");
93}
94
95/*
96 * blk_dma_flush_range(start,end)
97 * - start - virtual start address of region
98 * - end - virtual end address of region
99 */
100static void __attribute__((naked))
101blk_dma_flush_range(unsigned long start, unsigned long end)
102{
103 asm(
104 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
105 mov pc, lr");
106}
107
108static int blockops_trap(struct pt_regs *regs, unsigned int instr)
109{
110 regs->ARM_r4 |= regs->ARM_r2;
111 regs->ARM_pc += 4;
112 return 0;
113}
114
115static char *func[] = {
116 "Prefetch data range",
117 "Clean+Invalidate data range",
118 "Clean data range",
119 "Invalidate data range",
120 "Invalidate instr range"
121};
122
123static struct undef_hook blockops_hook __initdata = {
124 .instr_mask = 0x0fffffd0,
125 .instr_val = 0x0c401f00,
126 .cpsr_mask = PSR_T_BIT,
127 .cpsr_val = 0,
128 .fn = blockops_trap,
129};
130
131static int __init blockops_check(void)
132{
133 register unsigned int err asm("r4") = 0;
134 unsigned int err_pos = 1;
135 unsigned int cache_type;
136 int i;
137
138 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
139
140 printk("Checking V6 block cache operations:\n");
141 register_undef_hook(&blockops_hook);
142
143 __asm__ ("mov r0, %0\n\t"
144 "mov r1, %1\n\t"
145 "mov r2, #1\n\t"
146 ".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
147 "mov r2, #2\n\t"
148 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
149 "mov r2, #4\n\t"
150 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
151 "mov r2, #8\n\t"
152 ".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
153 "mov r2, #16\n\t"
154 ".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
155 :
156 : "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
157 : "r0", "r1", "r2");
158
159 unregister_undef_hook(&blockops_hook);
160
161 for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
162 printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
163
164 if ((err & 8) == 0) {
165 printk(" --> Using %s block cache invalidate\n",
166 cache_type & (1 << 24) ? "harvard" : "unified");
167 if (cache_type & (1 << 24))
168 cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
169 else
170 cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
171 }
172 if ((err & 4) == 0) {
173 printk(" --> Using block cache clean\n");
174 cpu_cache.dma_clean_range = blk_dma_clean_range;
175 }
176 if ((err & 2) == 0) {
177 printk(" --> Using block cache clean+invalidate\n");
178 cpu_cache.dma_flush_range = blk_dma_flush_range;
179 cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
180 }
181
182 return 0;
183}
184
185__initcall(blockops_check);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c168f322ef8c..8b276ee38acf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -420,7 +420,8 @@ static void __init bootmem_init(struct meminfo *mi)
420 * Set up device the mappings. Since we clear out the page tables for all 420 * Set up device the mappings. Since we clear out the page tables for all
421 * mappings above VMALLOC_END, we will remove any debug device mappings. 421 * mappings above VMALLOC_END, we will remove any debug device mappings.
422 * This means you have to be careful how you debug this function, or any 422 * This means you have to be careful how you debug this function, or any
423 * called function. (Do it by code inspection!) 423 * called function. This means you can't use any function or debugging
424 * method which may touch any device, otherwise the kernel _will_ crash.
424 */ 425 */
425static void __init devicemaps_init(struct machine_desc *mdesc) 426static void __init devicemaps_init(struct machine_desc *mdesc)
426{ 427{
@@ -428,6 +429,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
428 unsigned long addr; 429 unsigned long addr;
429 void *vectors; 430 void *vectors;
430 431
432 /*
433 * Allocate the vector page early.
434 */
435 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
436 BUG_ON(!vectors);
437
431 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 438 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
432 pmd_clear(pmd_off_k(addr)); 439 pmd_clear(pmd_off_k(addr));
433 440
@@ -461,12 +468,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
461 create_mapping(&map); 468 create_mapping(&map);
462#endif 469#endif
463 470
464 flush_cache_all();
465 local_flush_tlb_all();
466
467 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
468 BUG_ON(!vectors);
469
470 /* 471 /*
471 * Create a mapping for the machine vectors at the high-vectors 472 * Create a mapping for the machine vectors at the high-vectors
472 * location (0xffff0000). If we aren't using high-vectors, also 473 * location (0xffff0000). If we aren't using high-vectors, also
@@ -491,12 +492,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
491 mdesc->map_io(); 492 mdesc->map_io();
492 493
493 /* 494 /*
494 * Finally flush the tlb again - this ensures that we're in a 495 * Finally flush the caches and tlb to ensure that we're in a
495 * consistent state wrt the writebuffer if the writebuffer needs 496 * consistent state wrt the writebuffer. This also ensures that
496 * draining. After this point, we can start to touch devices 497 * any write-allocated cache lines in the vector page are written
497 * again. 498 * back. After this point, we can start to touch devices again.
498 */ 499 */
499 local_flush_tlb_all(); 500 local_flush_tlb_all();
501 flush_cache_all();
500} 502}
501 503
502/* 504/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0f128c28fee4..10901398e4a2 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -130,8 +130,7 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
130 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. 130 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
131 */ 131 */
132void __iomem * 132void __iomem *
133__ioremap(unsigned long phys_addr, size_t size, unsigned long flags, 133__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
134 unsigned long align)
135{ 134{
136 void * addr; 135 void * addr;
137 struct vm_struct * area; 136 struct vm_struct * area;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index e06f21f60dc5..301f2e9d262e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -91,16 +91,17 @@ ENTRY(vhpt_miss)
91 * (the "original") TLB miss, which may either be caused by an instruction 91 * (the "original") TLB miss, which may either be caused by an instruction
92 * fetch or a data access (or non-access). 92 * fetch or a data access (or non-access).
93 * 93 *
94 * What we do here is normal TLB miss handing for the _original_ miss, followed 94 * What we do here is normal TLB miss handing for the _original_ miss,
95 * by inserting the TLB entry for the virtual page table page that the VHPT 95 * followed by inserting the TLB entry for the virtual page table page
96 * walker was attempting to access. The latter gets inserted as long 96 * that the VHPT walker was attempting to access. The latter gets
97 * as both L1 and L2 have valid mappings for the faulting address. 97 * inserted as long as page table entry above pte level have valid
98 * The TLB entry for the original miss gets inserted only if 98 * mappings for the faulting address. The TLB entry for the original
99 * the L3 entry indicates that the page is present. 99 * miss gets inserted only if the pte entry indicates that the page is
100 * present.
100 * 101 *
101 * do_page_fault gets invoked in the following cases: 102 * do_page_fault gets invoked in the following cases:
102 * - the faulting virtual address uses unimplemented address bits 103 * - the faulting virtual address uses unimplemented address bits
103 * - the faulting virtual address has no L1, L2, or L3 mapping 104 * - the faulting virtual address has no valid page table mapping
104 */ 105 */
105 mov r16=cr.ifa // get address that caused the TLB miss 106 mov r16=cr.ifa // get address that caused the TLB miss
106#ifdef CONFIG_HUGETLB_PAGE 107#ifdef CONFIG_HUGETLB_PAGE
@@ -126,7 +127,7 @@ ENTRY(vhpt_miss)
126#endif 127#endif
127 ;; 128 ;;
128 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? 129 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
129 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address 130 shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
130 ;; 131 ;;
131(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 132(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
132 133
@@ -137,38 +138,38 @@ ENTRY(vhpt_miss)
137(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 138(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
138(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 139(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
139 ;; 140 ;;
140(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 141(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
141(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 142(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
142 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 143 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
143#ifdef CONFIG_PGTABLE_4 144#ifdef CONFIG_PGTABLE_4
144 shr.u r28=r22,PUD_SHIFT // shift L2 index into position 145 shr.u r28=r22,PUD_SHIFT // shift pud index into position
145#else 146#else
146 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 147 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
147#endif 148#endif
148 ;; 149 ;;
149 ld8 r17=[r17] // fetch the L1 entry (may be 0) 150 ld8 r17=[r17] // get *pgd (may be 0)
150 ;; 151 ;;
151(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 152(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
152#ifdef CONFIG_PGTABLE_4 153#ifdef CONFIG_PGTABLE_4
153 dep r28=r28,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 154 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
154 ;; 155 ;;
155 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 156 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
156(p7) ld8 r29=[r28] // fetch the L2 entry (may be 0) 157(p7) ld8 r29=[r28] // get *pud (may be 0)
157 ;; 158 ;;
158(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was L2 entry NULL? 159(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
159 dep r17=r18,r29,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 160 dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
160#else 161#else
161 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 162 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
162#endif 163#endif
163 ;; 164 ;;
164(p7) ld8 r20=[r17] // fetch the L3 entry (may be 0) 165(p7) ld8 r20=[r17] // get *pmd (may be 0)
165 shr.u r19=r22,PAGE_SHIFT // shift L4 index into position 166 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
166 ;; 167 ;;
167(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L3 entry NULL? 168(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
168 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L4 page table entry 169 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
169 ;; 170 ;;
170(p7) ld8 r18=[r21] // read the L4 PTE 171(p7) ld8 r18=[r21] // read *pte
171 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss 172 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
172 ;; 173 ;;
173(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 174(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
174 mov r22=cr.iha // get the VHPT address that caused the TLB miss 175 mov r22=cr.iha // get the VHPT address that caused the TLB miss
@@ -202,25 +203,33 @@ ENTRY(vhpt_miss)
202 dv_serialize_data 203 dv_serialize_data
203 204
204 /* 205 /*
205 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g 206 * Re-check pagetable entry. If they changed, we may have received a ptc.g
206 * between reading the pagetable and the "itc". If so, flush the entry we 207 * between reading the pagetable and the "itc". If so, flush the entry we
207 * inserted and retry. 208 * inserted and retry. At this point, we have:
209 *
210 * r28 = equivalent of pud_offset(pgd, ifa)
211 * r17 = equivalent of pmd_offset(pud, ifa)
212 * r21 = equivalent of pte_offset(pmd, ifa)
213 *
214 * r29 = *pud
215 * r20 = *pmd
216 * r18 = *pte
208 */ 217 */
209 ld8 r25=[r21] // read L4 entry again 218 ld8 r25=[r21] // read *pte again
210 ld8 r26=[r17] // read L3 PTE again 219 ld8 r26=[r17] // read *pmd again
211#ifdef CONFIG_PGTABLE_4 220#ifdef CONFIG_PGTABLE_4
212 ld8 r18=[r28] // read L2 entry again 221 ld8 r19=[r28] // read *pud again
213#endif 222#endif
214 cmp.ne p6,p7=r0,r0 223 cmp.ne p6,p7=r0,r0
215 ;; 224 ;;
216 cmp.ne.or.andcm p6,p7=r26,r20 // did L3 entry change 225 cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
217#ifdef CONFIG_PGTABLE_4 226#ifdef CONFIG_PGTABLE_4
218 cmp.ne.or.andcm p6,p7=r29,r18 // did L4 PTE change 227 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
219#endif 228#endif
220 mov r27=PAGE_SHIFT<<2 229 mov r27=PAGE_SHIFT<<2
221 ;; 230 ;;
222(p6) ptc.l r22,r27 // purge PTE page translation 231(p6) ptc.l r22,r27 // purge PTE page translation
223(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L4 PTE change 232(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
224 ;; 233 ;;
225(p6) ptc.l r16,r27 // purge translation 234(p6) ptc.l r16,r27 // purge translation
226#endif 235#endif
@@ -235,19 +244,19 @@ END(vhpt_miss)
235ENTRY(itlb_miss) 244ENTRY(itlb_miss)
236 DBG_FAULT(1) 245 DBG_FAULT(1)
237 /* 246 /*
238 * The ITLB handler accesses the L3 PTE via the virtually mapped linear 247 * The ITLB handler accesses the PTE via the virtually mapped linear
239 * page table. If a nested TLB miss occurs, we switch into physical 248 * page table. If a nested TLB miss occurs, we switch into physical
240 * mode, walk the page table, and then re-execute the L3 PTE read 249 * mode, walk the page table, and then re-execute the PTE read and
241 * and go on normally after that. 250 * go on normally after that.
242 */ 251 */
243 mov r16=cr.ifa // get virtual address 252 mov r16=cr.ifa // get virtual address
244 mov r29=b0 // save b0 253 mov r29=b0 // save b0
245 mov r31=pr // save predicates 254 mov r31=pr // save predicates
246.itlb_fault: 255.itlb_fault:
247 mov r17=cr.iha // get virtual address of L3 PTE 256 mov r17=cr.iha // get virtual address of PTE
248 movl r30=1f // load nested fault continuation point 257 movl r30=1f // load nested fault continuation point
249 ;; 258 ;;
2501: ld8 r18=[r17] // read L3 PTE 2591: ld8 r18=[r17] // read *pte
251 ;; 260 ;;
252 mov b0=r29 261 mov b0=r29
253 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 262 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
@@ -262,7 +271,7 @@ ENTRY(itlb_miss)
262 */ 271 */
263 dv_serialize_data 272 dv_serialize_data
264 273
265 ld8 r19=[r17] // read L3 PTE again and see if same 274 ld8 r19=[r17] // read *pte again and see if same
266 mov r20=PAGE_SHIFT<<2 // setup page size for purge 275 mov r20=PAGE_SHIFT<<2 // setup page size for purge
267 ;; 276 ;;
268 cmp.ne p7,p0=r18,r19 277 cmp.ne p7,p0=r18,r19
@@ -279,19 +288,19 @@ END(itlb_miss)
279ENTRY(dtlb_miss) 288ENTRY(dtlb_miss)
280 DBG_FAULT(2) 289 DBG_FAULT(2)
281 /* 290 /*
282 * The DTLB handler accesses the L3 PTE via the virtually mapped linear 291 * The DTLB handler accesses the PTE via the virtually mapped linear
283 * page table. If a nested TLB miss occurs, we switch into physical 292 * page table. If a nested TLB miss occurs, we switch into physical
284 * mode, walk the page table, and then re-execute the L3 PTE read 293 * mode, walk the page table, and then re-execute the PTE read and
285 * and go on normally after that. 294 * go on normally after that.
286 */ 295 */
287 mov r16=cr.ifa // get virtual address 296 mov r16=cr.ifa // get virtual address
288 mov r29=b0 // save b0 297 mov r29=b0 // save b0
289 mov r31=pr // save predicates 298 mov r31=pr // save predicates
290dtlb_fault: 299dtlb_fault:
291 mov r17=cr.iha // get virtual address of L3 PTE 300 mov r17=cr.iha // get virtual address of PTE
292 movl r30=1f // load nested fault continuation point 301 movl r30=1f // load nested fault continuation point
293 ;; 302 ;;
2941: ld8 r18=[r17] // read L3 PTE 3031: ld8 r18=[r17] // read *pte
295 ;; 304 ;;
296 mov b0=r29 305 mov b0=r29
297 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 306 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
@@ -306,7 +315,7 @@ dtlb_fault:
306 */ 315 */
307 dv_serialize_data 316 dv_serialize_data
308 317
309 ld8 r19=[r17] // read L3 PTE again and see if same 318 ld8 r19=[r17] // read *pte again and see if same
310 mov r20=PAGE_SHIFT<<2 // setup page size for purge 319 mov r20=PAGE_SHIFT<<2 // setup page size for purge
311 ;; 320 ;;
312 cmp.ne p7,p0=r18,r19 321 cmp.ne p7,p0=r18,r19
@@ -420,7 +429,7 @@ ENTRY(nested_dtlb_miss)
420 * r30: continuation address 429 * r30: continuation address
421 * r31: saved pr 430 * r31: saved pr
422 * 431 *
423 * Output: r17: physical address of L3 PTE of faulting address 432 * Output: r17: physical address of PTE of faulting address
424 * r29: saved b0 433 * r29: saved b0
425 * r30: continuation address 434 * r30: continuation address
426 * r31: saved pr 435 * r31: saved pr
@@ -450,33 +459,33 @@ ENTRY(nested_dtlb_miss)
450(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 459(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
451(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 460(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
452 ;; 461 ;;
453(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 462(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
454(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 463(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
455 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 464 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
456#ifdef CONFIG_PGTABLE_4 465#ifdef CONFIG_PGTABLE_4
457 shr.u r18=r22,PUD_SHIFT // shift L2 index into position 466 shr.u r18=r22,PUD_SHIFT // shift pud index into position
458#else 467#else
459 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 468 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
460#endif 469#endif
461 ;; 470 ;;
462 ld8 r17=[r17] // fetch the L1 entry (may be 0) 471 ld8 r17=[r17] // get *pgd (may be 0)
463 ;; 472 ;;
464(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 473(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
465 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 474 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
466 ;; 475 ;;
467#ifdef CONFIG_PGTABLE_4 476#ifdef CONFIG_PGTABLE_4
468(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) 477(p7) ld8 r17=[r17] // get *pud (may be 0)
469 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 478 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
470 ;; 479 ;;
471(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? 480(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
472 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 481 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
473 ;; 482 ;;
474#endif 483#endif
475(p7) ld8 r17=[r17] // fetch the L3 entry (may be 0) 484(p7) ld8 r17=[r17] // get *pmd (may be 0)
476 shr.u r19=r22,PAGE_SHIFT // shift L4 index into position 485 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
477 ;; 486 ;;
478(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L3 entry NULL? 487(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
479 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L4 page table entry 488 dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
480(p6) br.cond.spnt page_fault 489(p6) br.cond.spnt page_fault
481 mov b0=r30 490 mov b0=r30
482 br.sptk.many b0 // return to continuation point 491 br.sptk.many b0 // return to continuation point
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 988844a169e6..d016d672ec2b 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -499,8 +499,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
499 499
500 dev = create_parisc_device(mod_path); 500 dev = create_parisc_device(mod_path);
501 if (dev->id.hw_type != HPHW_FAULTY) { 501 if (dev->id.hw_type != HPHW_FAULTY) {
502 printk("Two devices have hardware path %s. Please file a bug with HP.\n" 502 printk(KERN_ERR "Two devices have hardware path [%s]. "
503 "In the meantime, you could try rearranging your cards.\n", parisc_pathname(dev)); 503 "IODC data for second device: "
504 "%02x%02x%02x%02x%02x%02x\n"
505 "Rearranging GSC cards sometimes helps\n",
506 parisc_pathname(dev), iodc_data[0], iodc_data[1],
507 iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
504 return NULL; 508 return NULL;
505 } 509 }
506 510
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c7e66ee5b083..9af4b22a6d77 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1846,6 +1846,7 @@ sys_clone_wrapper:
1846 ldo -16(%r30),%r29 /* Reference param save area */ 1846 ldo -16(%r30),%r29 /* Reference param save area */
1847#endif 1847#endif
1848 1848
1849 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1849 STREG %r2,PT_GR19(%r1) /* save for child */ 1850 STREG %r2,PT_GR19(%r1) /* save for child */
1850 STREG %r30,PT_GR21(%r1) 1851 STREG %r30,PT_GR21(%r1)
1851 BL sys_clone,%r2 1852 BL sys_clone,%r2
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 1a1c66422736..8f563871e83c 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
188 temp = pa_pdc_cell.cba; 188 temp = pa_pdc_cell.cba;
189 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); 189 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
190 if (!dev) { 190 if (!dev) {
191 return PDC_NE_MOD; 191 return PDC_OK;
192 } 192 }
193 193
194 /* alloc_pa_dev sets dev->hpa */ 194 /* alloc_pa_dev sets dev->hpa */
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
index 0a331104ad56..4eada1bb27f0 100644
--- a/arch/parisc/kernel/ioctl32.c
+++ b/arch/parisc/kernel/ioctl32.c
@@ -19,536 +19,6 @@
19#define CODE 19#define CODE
20#include "compat_ioctl.c" 20#include "compat_ioctl.c"
21 21
22/* Use this to get at 32-bit user passed pointers.
23 See sys_sparc32.c for description about these. */
24#define A(__x) ((unsigned long)(__x))
25/* The same for use with copy_from_user() and copy_to_user(). */
26#define B(__x) ((void *)(unsigned long)(__x))
27
28#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
29/* This really belongs in include/linux/drm.h -DaveM */
30#include "../../../drivers/char/drm/drm.h"
31
32typedef struct drm32_version {
33 int version_major; /* Major version */
34 int version_minor; /* Minor version */
35 int version_patchlevel;/* Patch level */
36 int name_len; /* Length of name buffer */
37 u32 name; /* Name of driver */
38 int date_len; /* Length of date buffer */
39 u32 date; /* User-space buffer to hold date */
40 int desc_len; /* Length of desc buffer */
41 u32 desc; /* User-space buffer to hold desc */
42} drm32_version_t;
43#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
44
45static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
46{
47 drm32_version_t *uversion = (drm32_version_t *)arg;
48 char *name_ptr, *date_ptr, *desc_ptr;
49 u32 tmp1, tmp2, tmp3;
50 drm_version_t kversion;
51 mm_segment_t old_fs;
52 int ret;
53
54 memset(&kversion, 0, sizeof(kversion));
55 if (get_user(kversion.name_len, &uversion->name_len) ||
56 get_user(kversion.date_len, &uversion->date_len) ||
57 get_user(kversion.desc_len, &uversion->desc_len) ||
58 get_user(tmp1, &uversion->name) ||
59 get_user(tmp2, &uversion->date) ||
60 get_user(tmp3, &uversion->desc))
61 return -EFAULT;
62
63 name_ptr = (char *) A(tmp1);
64 date_ptr = (char *) A(tmp2);
65 desc_ptr = (char *) A(tmp3);
66
67 ret = -ENOMEM;
68 if (kversion.name_len && name_ptr) {
69 kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
70 if (!kversion.name)
71 goto out;
72 }
73 if (kversion.date_len && date_ptr) {
74 kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
75 if (!kversion.date)
76 goto out;
77 }
78 if (kversion.desc_len && desc_ptr) {
79 kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
80 if (!kversion.desc)
81 goto out;
82 }
83
84 old_fs = get_fs();
85 set_fs(KERNEL_DS);
86 ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
87 set_fs(old_fs);
88
89 if (!ret) {
90 if ((kversion.name &&
91 copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
92 (kversion.date &&
93 copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
94 (kversion.desc &&
95 copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
96 ret = -EFAULT;
97 if (put_user(kversion.version_major, &uversion->version_major) ||
98 put_user(kversion.version_minor, &uversion->version_minor) ||
99 put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
100 put_user(kversion.name_len, &uversion->name_len) ||
101 put_user(kversion.date_len, &uversion->date_len) ||
102 put_user(kversion.desc_len, &uversion->desc_len))
103 ret = -EFAULT;
104 }
105
106out:
107 kfree(kversion.name);
108 kfree(kversion.date);
109 kfree(kversion.desc);
110 return ret;
111}
112
113typedef struct drm32_unique {
114 int unique_len; /* Length of unique */
115 u32 unique; /* Unique name for driver instantiation */
116} drm32_unique_t;
117#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
118#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
119
120static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
121{
122 drm32_unique_t *uarg = (drm32_unique_t *)arg;
123 drm_unique_t karg;
124 mm_segment_t old_fs;
125 char *uptr;
126 u32 tmp;
127 int ret;
128
129 if (get_user(karg.unique_len, &uarg->unique_len))
130 return -EFAULT;
131 karg.unique = NULL;
132
133 if (get_user(tmp, &uarg->unique))
134 return -EFAULT;
135
136 uptr = (char *) A(tmp);
137
138 if (uptr) {
139 karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
140 if (!karg.unique)
141 return -ENOMEM;
142 if (cmd == DRM32_IOCTL_SET_UNIQUE &&
143 copy_from_user(karg.unique, uptr, karg.unique_len)) {
144 kfree(karg.unique);
145 return -EFAULT;
146 }
147 }
148
149 old_fs = get_fs();
150 set_fs(KERNEL_DS);
151 if (cmd == DRM32_IOCTL_GET_UNIQUE)
152 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
153 else
154 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
155 set_fs(old_fs);
156
157 if (!ret) {
158 if (cmd == DRM32_IOCTL_GET_UNIQUE &&
159 uptr != NULL &&
160 copy_to_user(uptr, karg.unique, karg.unique_len))
161 ret = -EFAULT;
162 if (put_user(karg.unique_len, &uarg->unique_len))
163 ret = -EFAULT;
164 }
165
166 kfree(karg.unique);
167 return ret;
168}
169
170typedef struct drm32_map {
171 u32 offset; /* Requested physical address (0 for SAREA)*/
172 u32 size; /* Requested physical size (bytes) */
173 drm_map_type_t type; /* Type of memory to map */
174 drm_map_flags_t flags; /* Flags */
175 u32 handle; /* User-space: "Handle" to pass to mmap */
176 /* Kernel-space: kernel-virtual address */
177 int mtrr; /* MTRR slot used */
178 /* Private data */
179} drm32_map_t;
180#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
181
182static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
183{
184 drm32_map_t *uarg = (drm32_map_t *) arg;
185 drm_map_t karg;
186 mm_segment_t old_fs;
187 u32 tmp;
188 int ret;
189
190 ret = get_user(karg.offset, &uarg->offset);
191 ret |= get_user(karg.size, &uarg->size);
192 ret |= get_user(karg.type, &uarg->type);
193 ret |= get_user(karg.flags, &uarg->flags);
194 ret |= get_user(tmp, &uarg->handle);
195 ret |= get_user(karg.mtrr, &uarg->mtrr);
196 if (ret)
197 return -EFAULT;
198
199 karg.handle = (void *) A(tmp);
200
201 old_fs = get_fs();
202 set_fs(KERNEL_DS);
203 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
204 set_fs(old_fs);
205
206 if (!ret) {
207 ret = put_user(karg.offset, &uarg->offset);
208 ret |= put_user(karg.size, &uarg->size);
209 ret |= put_user(karg.type, &uarg->type);
210 ret |= put_user(karg.flags, &uarg->flags);
211 tmp = (u32) (long)karg.handle;
212 ret |= put_user(tmp, &uarg->handle);
213 ret |= put_user(karg.mtrr, &uarg->mtrr);
214 if (ret)
215 ret = -EFAULT;
216 }
217
218 return ret;
219}
220
221typedef struct drm32_buf_info {
222 int count; /* Entries in list */
223 u32 list; /* (drm_buf_desc_t *) */
224} drm32_buf_info_t;
225#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
226
227static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
228{
229 drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
230 drm_buf_desc_t *ulist;
231 drm_buf_info_t karg;
232 mm_segment_t old_fs;
233 int orig_count, ret;
234 u32 tmp;
235
236 if (get_user(karg.count, &uarg->count) ||
237 get_user(tmp, &uarg->list))
238 return -EFAULT;
239
240 ulist = (drm_buf_desc_t *) A(tmp);
241
242 orig_count = karg.count;
243
244 karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
245 if (!karg.list)
246 return -EFAULT;
247
248 old_fs = get_fs();
249 set_fs(KERNEL_DS);
250 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
251 set_fs(old_fs);
252
253 if (!ret) {
254 if (karg.count <= orig_count &&
255 (copy_to_user(ulist, karg.list,
256 karg.count * sizeof(drm_buf_desc_t))))
257 ret = -EFAULT;
258 if (put_user(karg.count, &uarg->count))
259 ret = -EFAULT;
260 }
261
262 kfree(karg.list);
263 return ret;
264}
265
266typedef struct drm32_buf_free {
267 int count;
268 u32 list; /* (int *) */
269} drm32_buf_free_t;
270#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
271
272static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
273{
274 drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
275 drm_buf_free_t karg;
276 mm_segment_t old_fs;
277 int *ulist;
278 int ret;
279 u32 tmp;
280
281 if (get_user(karg.count, &uarg->count) ||
282 get_user(tmp, &uarg->list))
283 return -EFAULT;
284
285 ulist = (int *) A(tmp);
286
287 karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
288 if (!karg.list)
289 return -ENOMEM;
290
291 ret = -EFAULT;
292 if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
293 goto out;
294
295 old_fs = get_fs();
296 set_fs(KERNEL_DS);
297 ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
298 set_fs(old_fs);
299
300out:
301 kfree(karg.list);
302 return ret;
303}
304
305typedef struct drm32_buf_pub {
306 int idx; /* Index into master buflist */
307 int total; /* Buffer size */
308 int used; /* Amount of buffer in use (for DMA) */
309 u32 address; /* Address of buffer (void *) */
310} drm32_buf_pub_t;
311
312typedef struct drm32_buf_map {
313 int count; /* Length of buflist */
314 u32 virtual; /* Mmaped area in user-virtual (void *) */
315 u32 list; /* Buffer information (drm_buf_pub_t *) */
316} drm32_buf_map_t;
317#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
318
319static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
320{
321 drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
322 drm32_buf_pub_t *ulist;
323 drm_buf_map_t karg;
324 mm_segment_t old_fs;
325 int orig_count, ret, i;
326 u32 tmp1, tmp2;
327
328 if (get_user(karg.count, &uarg->count) ||
329 get_user(tmp1, &uarg->virtual) ||
330 get_user(tmp2, &uarg->list))
331 return -EFAULT;
332
333 karg.virtual = (void *) A(tmp1);
334 ulist = (drm32_buf_pub_t *) A(tmp2);
335
336 orig_count = karg.count;
337
338 karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
339 if (!karg.list)
340 return -ENOMEM;
341
342 ret = -EFAULT;
343 for (i = 0; i < karg.count; i++) {
344 if (get_user(karg.list[i].idx, &ulist[i].idx) ||
345 get_user(karg.list[i].total, &ulist[i].total) ||
346 get_user(karg.list[i].used, &ulist[i].used) ||
347 get_user(tmp1, &ulist[i].address))
348 goto out;
349
350 karg.list[i].address = (void *) A(tmp1);
351 }
352
353 old_fs = get_fs();
354 set_fs(KERNEL_DS);
355 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
356 set_fs(old_fs);
357
358 if (!ret) {
359 for (i = 0; i < orig_count; i++) {
360 tmp1 = (u32) (long) karg.list[i].address;
361 if (put_user(karg.list[i].idx, &ulist[i].idx) ||
362 put_user(karg.list[i].total, &ulist[i].total) ||
363 put_user(karg.list[i].used, &ulist[i].used) ||
364 put_user(tmp1, &ulist[i].address)) {
365 ret = -EFAULT;
366 goto out;
367 }
368 }
369 if (put_user(karg.count, &uarg->count))
370 ret = -EFAULT;
371 }
372
373out:
374 kfree(karg.list);
375 return ret;
376}
377
378typedef struct drm32_dma {
379 /* Indices here refer to the offset into
380 buflist in drm_buf_get_t. */
381 int context; /* Context handle */
382 int send_count; /* Number of buffers to send */
383 u32 send_indices; /* List of handles to buffers (int *) */
384 u32 send_sizes; /* Lengths of data to send (int *) */
385 drm_dma_flags_t flags; /* Flags */
386 int request_count; /* Number of buffers requested */
387 int request_size; /* Desired size for buffers */
388 u32 request_indices; /* Buffer information (int *) */
389 u32 request_sizes; /* (int *) */
390 int granted_count; /* Number of buffers granted */
391} drm32_dma_t;
392#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
393
394/* RED PEN The DRM layer blindly dereferences the send/request
395 * indice/size arrays even though they are userland
396 * pointers. -DaveM
397 */
398static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
399{
400 drm32_dma_t *uarg = (drm32_dma_t *) arg;
401 int *u_si, *u_ss, *u_ri, *u_rs;
402 drm_dma_t karg;
403 mm_segment_t old_fs;
404 int ret;
405 u32 tmp1, tmp2, tmp3, tmp4;
406
407 karg.send_indices = karg.send_sizes = NULL;
408 karg.request_indices = karg.request_sizes = NULL;
409
410 if (get_user(karg.context, &uarg->context) ||
411 get_user(karg.send_count, &uarg->send_count) ||
412 get_user(tmp1, &uarg->send_indices) ||
413 get_user(tmp2, &uarg->send_sizes) ||
414 get_user(karg.flags, &uarg->flags) ||
415 get_user(karg.request_count, &uarg->request_count) ||
416 get_user(karg.request_size, &uarg->request_size) ||
417 get_user(tmp3, &uarg->request_indices) ||
418 get_user(tmp4, &uarg->request_sizes) ||
419 get_user(karg.granted_count, &uarg->granted_count))
420 return -EFAULT;
421
422 u_si = (int *) A(tmp1);
423 u_ss = (int *) A(tmp2);
424 u_ri = (int *) A(tmp3);
425 u_rs = (int *) A(tmp4);
426
427 if (karg.send_count) {
428 karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
429 karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
430
431 ret = -ENOMEM;
432 if (!karg.send_indices || !karg.send_sizes)
433 goto out;
434
435 ret = -EFAULT;
436 if (copy_from_user(karg.send_indices, u_si,
437 (karg.send_count * sizeof(int))) ||
438 copy_from_user(karg.send_sizes, u_ss,
439 (karg.send_count * sizeof(int))))
440 goto out;
441 }
442
443 if (karg.request_count) {
444 karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
445 karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
446
447 ret = -ENOMEM;
448 if (!karg.request_indices || !karg.request_sizes)
449 goto out;
450
451 ret = -EFAULT;
452 if (copy_from_user(karg.request_indices, u_ri,
453 (karg.request_count * sizeof(int))) ||
454 copy_from_user(karg.request_sizes, u_rs,
455 (karg.request_count * sizeof(int))))
456 goto out;
457 }
458
459 old_fs = get_fs();
460 set_fs(KERNEL_DS);
461 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
462 set_fs(old_fs);
463
464 if (!ret) {
465 if (put_user(karg.context, &uarg->context) ||
466 put_user(karg.send_count, &uarg->send_count) ||
467 put_user(karg.flags, &uarg->flags) ||
468 put_user(karg.request_count, &uarg->request_count) ||
469 put_user(karg.request_size, &uarg->request_size) ||
470 put_user(karg.granted_count, &uarg->granted_count))
471 ret = -EFAULT;
472
473 if (karg.send_count) {
474 if (copy_to_user(u_si, karg.send_indices,
475 (karg.send_count * sizeof(int))) ||
476 copy_to_user(u_ss, karg.send_sizes,
477 (karg.send_count * sizeof(int))))
478 ret = -EFAULT;
479 }
480 if (karg.request_count) {
481 if (copy_to_user(u_ri, karg.request_indices,
482 (karg.request_count * sizeof(int))) ||
483 copy_to_user(u_rs, karg.request_sizes,
484 (karg.request_count * sizeof(int))))
485 ret = -EFAULT;
486 }
487 }
488
489out:
490 kfree(karg.send_indices);
491 kfree(karg.send_sizes);
492 kfree(karg.request_indices);
493 kfree(karg.request_sizes);
494 return ret;
495}
496
497typedef struct drm32_ctx_res {
498 int count;
499 u32 contexts; /* (drm_ctx_t *) */
500} drm32_ctx_res_t;
501#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
502
503static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
504{
505 drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
506 drm_ctx_t *ulist;
507 drm_ctx_res_t karg;
508 mm_segment_t old_fs;
509 int orig_count, ret;
510 u32 tmp;
511
512 karg.contexts = NULL;
513 if (get_user(karg.count, &uarg->count) ||
514 get_user(tmp, &uarg->contexts))
515 return -EFAULT;
516
517 ulist = (drm_ctx_t *) A(tmp);
518
519 orig_count = karg.count;
520 if (karg.count && ulist) {
521 karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
522 if (!karg.contexts)
523 return -ENOMEM;
524 if (copy_from_user(karg.contexts, ulist,
525 (karg.count * sizeof(drm_ctx_t)))) {
526 kfree(karg.contexts);
527 return -EFAULT;
528 }
529 }
530
531 old_fs = get_fs();
532 set_fs(KERNEL_DS);
533 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
534 set_fs(old_fs);
535
536 if (!ret) {
537 if (orig_count) {
538 if (copy_to_user(ulist, karg.contexts,
539 (orig_count * sizeof(drm_ctx_t))))
540 ret = -EFAULT;
541 }
542 if (put_user(karg.count, &uarg->count))
543 ret = -EFAULT;
544 }
545
546 kfree(karg.contexts);
547 return ret;
548}
549
550#endif
551
552#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL }, 22#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
553#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl) 23#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
554 24
@@ -561,11 +31,6 @@ IOCTL_TABLE_START
561#define DECLARES 31#define DECLARES
562#include "compat_ioctl.c" 32#include "compat_ioctl.c"
563 33
564/* PA-specific ioctls */
565COMPATIBLE_IOCTL(PA_PERF_ON)
566COMPATIBLE_IOCTL(PA_PERF_OFF)
567COMPATIBLE_IOCTL(PA_PERF_VERSION)
568
569/* And these ioctls need translation */ 34/* And these ioctls need translation */
570HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc) 35HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
571HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc) 36HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
@@ -590,17 +55,6 @@ HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
590COMPATIBLE_IOCTL(RTC_EPOCH_SET) 55COMPATIBLE_IOCTL(RTC_EPOCH_SET)
591#endif 56#endif
592 57
593#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
594HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
595HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
596HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
597HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
598HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
599HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
600HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
601HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
602HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
603#endif /* DRM */
604IOCTL_TABLE_END 58IOCTL_TABLE_END
605 59
606int ioctl_table_size = ARRAY_SIZE(ioctl_start); 60int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 006385dbee66..197936d9359a 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -30,6 +30,9 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <asm/io.h>
34
35#include <asm/smp.h>
33 36
34#undef PARISC_IRQ_CR16_COUNTS 37#undef PARISC_IRQ_CR16_COUNTS
35 38
@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
43*/ 46*/
44static volatile unsigned long cpu_eiem = 0; 47static volatile unsigned long cpu_eiem = 0;
45 48
46static void cpu_set_eiem(void *info) 49static void cpu_disable_irq(unsigned int irq)
47{
48 set_eiem((unsigned long) info);
49}
50
51static inline void cpu_disable_irq(unsigned int irq)
52{ 50{
53 unsigned long eirr_bit = EIEM_MASK(irq); 51 unsigned long eirr_bit = EIEM_MASK(irq);
54 52
55 cpu_eiem &= ~eirr_bit; 53 cpu_eiem &= ~eirr_bit;
56 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); 54 /* Do nothing on the other CPUs. If they get this interrupt,
55 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
56 * handle it, and the set_eiem() at the bottom will ensure it
57 * then gets disabled */
57} 58}
58 59
59static void cpu_enable_irq(unsigned int irq) 60static void cpu_enable_irq(unsigned int irq)
60{ 61{
61 unsigned long eirr_bit = EIEM_MASK(irq); 62 unsigned long eirr_bit = EIEM_MASK(irq);
62 63
63 mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
64 cpu_eiem |= eirr_bit; 64 cpu_eiem |= eirr_bit;
65 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); 65
66 /* FIXME: while our interrupts aren't nested, we cannot reset
67 * the eiem mask if we're already in an interrupt. Once we
68 * implement nested interrupts, this can go away
69 */
70 if (!in_interrupt())
71 set_eiem(cpu_eiem);
72
73 /* This is just a simple NOP IPI. But what it does is cause
74 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
75 * of the interrupt handler */
76 smp_send_all_nop();
66} 77}
67 78
68static unsigned int cpu_startup_irq(unsigned int irq) 79static unsigned int cpu_startup_irq(unsigned int irq)
@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
74void no_ack_irq(unsigned int irq) { } 85void no_ack_irq(unsigned int irq) { }
75void no_end_irq(unsigned int irq) { } 86void no_end_irq(unsigned int irq) { }
76 87
88#ifdef CONFIG_SMP
89int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
90{
91 int cpu_dest;
92
93 /* timer and ipi have to always be received on all CPUs */
94 if (irq == TIMER_IRQ || irq == IPI_IRQ) {
95 /* Bad linux design decision. The mask has already
96 * been set; we must reset it */
97 irq_affinity[irq] = CPU_MASK_ALL;
98 return -EINVAL;
99 }
100
101 /* whatever mask they set, we just allow one CPU */
102 cpu_dest = first_cpu(*dest);
103 *dest = cpumask_of_cpu(cpu_dest);
104
105 return 0;
106}
107
108static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
109{
110 if (cpu_check_affinity(irq, &dest))
111 return;
112
113 irq_affinity[irq] = dest;
114}
115#endif
116
77static struct hw_interrupt_type cpu_interrupt_type = { 117static struct hw_interrupt_type cpu_interrupt_type = {
78 .typename = "CPU", 118 .typename = "CPU",
79 .startup = cpu_startup_irq, 119 .startup = cpu_startup_irq,
@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
82 .disable = cpu_disable_irq, 122 .disable = cpu_disable_irq,
83 .ack = no_ack_irq, 123 .ack = no_ack_irq,
84 .end = no_end_irq, 124 .end = no_end_irq,
85// .set_affinity = cpu_set_affinity_irq, 125#ifdef CONFIG_SMP
126 .set_affinity = cpu_set_affinity_irq,
127#endif
86}; 128};
87 129
88int show_interrupts(struct seq_file *p, void *v) 130int show_interrupts(struct seq_file *p, void *v)
@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide)
219 return -1; 261 return -1;
220} 262}
221 263
264
265unsigned long txn_affinity_addr(unsigned int irq, int cpu)
266{
267#ifdef CONFIG_SMP
268 irq_affinity[irq] = cpumask_of_cpu(cpu);
269#endif
270
271 return cpu_data[cpu].txn_addr;
272}
273
274
222unsigned long txn_alloc_addr(unsigned int virt_irq) 275unsigned long txn_alloc_addr(unsigned int virt_irq)
223{ 276{
224 static int next_cpu = -1; 277 static int next_cpu = -1;
@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
233 if (next_cpu >= NR_CPUS) 286 if (next_cpu >= NR_CPUS)
234 next_cpu = 0; /* nothing else, assign monarch */ 287 next_cpu = 0; /* nothing else, assign monarch */
235 288
236 return cpu_data[next_cpu].txn_addr; 289 return txn_affinity_addr(virt_irq, next_cpu);
237} 290}
238 291
239 292
@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs)
250 irq_enter(); 303 irq_enter();
251 304
252 /* 305 /*
253 * Only allow interrupt processing to be interrupted by the 306 * Don't allow TIMER or IPI nested interrupts.
254 * timer tick 307 * Allowing any single interrupt to nest can lead to that CPU
308 * handling interrupts with all enabled interrupts unmasked.
255 */ 309 */
256 set_eiem(EIEM_MASK(TIMER_IRQ)); 310 set_eiem(0UL);
257 311
258 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) 312 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
259 * 2) We loop here on EIRR contents in order to avoid 313 * 2) We loop here on EIRR contents in order to avoid
@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs)
267 if (!eirr_val) 321 if (!eirr_val)
268 break; 322 break;
269 323
270 if (eirr_val & EIEM_MASK(TIMER_IRQ))
271 set_eiem(0);
272
273 mtctl(eirr_val, 23); /* reset bits we are going to process */ 324 mtctl(eirr_val, 23); /* reset bits we are going to process */
274 325
275 /* Work our way from MSb to LSb...same order we alloc EIRs */ 326 /* Work our way from MSb to LSb...same order we alloc EIRs */
276 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { 327 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
328#ifdef CONFIG_SMP
329 cpumask_t dest = irq_affinity[irq];
330#endif
277 if (!(bit & eirr_val)) 331 if (!(bit & eirr_val))
278 continue; 332 continue;
279 333
280 /* clear bit in mask - can exit loop sooner */ 334 /* clear bit in mask - can exit loop sooner */
281 eirr_val &= ~bit; 335 eirr_val &= ~bit;
282 336
337#ifdef CONFIG_SMP
338 /* FIXME: because generic set affinity mucks
339 * with the affinity before sending it to us
340 * we can get the situation where the affinity is
341 * wrong for our CPU type interrupts */
342 if (irq != TIMER_IRQ && irq != IPI_IRQ &&
343 !cpu_isset(smp_processor_id(), dest)) {
344 int cpu = first_cpu(dest);
345
346 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
347 irq, smp_processor_id(), cpu);
348 gsc_writel(irq + CPU_IRQ_BASE,
349 cpu_data[cpu].hpa);
350 continue;
351 }
352#endif
353
283 __do_IRQ(irq, regs); 354 __do_IRQ(irq, regs);
284 } 355 }
285 } 356 }
286 set_eiem(cpu_eiem); 357
358 set_eiem(cpu_eiem); /* restore original mask */
287 irq_exit(); 359 irq_exit();
288} 360}
289 361
@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
291static struct irqaction timer_action = { 363static struct irqaction timer_action = {
292 .handler = timer_interrupt, 364 .handler = timer_interrupt,
293 .name = "timer", 365 .name = "timer",
366 .flags = SA_INTERRUPT,
294}; 367};
295 368
296#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
297static struct irqaction ipi_action = { 370static struct irqaction ipi_action = {
298 .handler = ipi_interrupt, 371 .handler = ipi_interrupt,
299 .name = "IPI", 372 .name = "IPI",
373 .flags = SA_INTERRUPT,
300}; 374};
301#endif 375#endif
302 376
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 44670d6e06f4..f6fec62b6a2f 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -196,8 +196,7 @@ static int perf_open(struct inode *inode, struct file *file);
196static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); 196static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
197static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 197static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
198 loff_t *ppos); 198 loff_t *ppos);
199static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 199static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
200 unsigned long arg);
201static void perf_start_counters(void); 200static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr); 201static int perf_stop_counters(uint32_t *raddr);
203static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); 202static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
@@ -438,48 +437,56 @@ static void perf_patch_images(void)
438 * must be running on the processor that you wish to change. 437 * must be running on the processor that you wish to change.
439 */ 438 */
440 439
441static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 440static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
442 unsigned long arg)
443{ 441{
444 long error_start; 442 long error_start;
445 uint32_t raddr[4]; 443 uint32_t raddr[4];
444 int error = 0;
446 445
446 lock_kernel();
447 switch (cmd) { 447 switch (cmd) {
448 448
449 case PA_PERF_ON: 449 case PA_PERF_ON:
450 /* Start the counters */ 450 /* Start the counters */
451 perf_start_counters(); 451 perf_start_counters();
452 return 0; 452 break;
453 453
454 case PA_PERF_OFF: 454 case PA_PERF_OFF:
455 error_start = perf_stop_counters(raddr); 455 error_start = perf_stop_counters(raddr);
456 if (error_start != 0) { 456 if (error_start != 0) {
457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); 457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
458 return -EFAULT; 458 error = -EFAULT;
459 break;
459 } 460 }
460 461
461 /* copy out the Counters */ 462 /* copy out the Counters */
462 if (copy_to_user((void __user *)arg, raddr, 463 if (copy_to_user((void __user *)arg, raddr,
463 sizeof (raddr)) != 0) { 464 sizeof (raddr)) != 0) {
464 return -EFAULT; 465 error = -EFAULT;
466 break;
465 } 467 }
466 return 0; 468 break;
467 469
468 case PA_PERF_VERSION: 470 case PA_PERF_VERSION:
469 /* Return the version # */ 471 /* Return the version # */
470 return put_user(PERF_VERSION, (int *)arg); 472 error = put_user(PERF_VERSION, (int *)arg);
473 break;
471 474
472 default: 475 default:
473 break; 476 error = -ENOTTY;
474 } 477 }
475 return -ENOTTY; 478
479 unlock_kernel();
480
481 return error;
476} 482}
477 483
478static struct file_operations perf_fops = { 484static struct file_operations perf_fops = {
479 .llseek = no_llseek, 485 .llseek = no_llseek,
480 .read = perf_read, 486 .read = perf_read,
481 .write = perf_write, 487 .write = perf_write,
482 .ioctl = perf_ioctl, 488 .unlocked_ioctl = perf_ioctl,
489 .compat_ioctl = perf_ioctl,
483 .open = perf_open, 490 .open = perf_open,
484 .release = perf_release 491 .release = perf_release
485}; 492};
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index b6fe202a620d..27160e8bf15b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -264,6 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
264 * sigkill. perhaps it should be put in the status 264 * sigkill. perhaps it should be put in the status
265 * that it wants to exit. 265 * that it wants to exit.
266 */ 266 */
267 ret = 0;
267 DBG("sys_ptrace(KILL)\n"); 268 DBG("sys_ptrace(KILL)\n");
268 if (child->exit_state == EXIT_ZOMBIE) /* already dead */ 269 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
269 goto out_tsk; 270 goto out_tsk;
@@ -344,11 +345,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
344 345
345 case PTRACE_GETEVENTMSG: 346 case PTRACE_GETEVENTMSG:
346 ret = put_user(child->ptrace_message, (unsigned int __user *) data); 347 ret = put_user(child->ptrace_message, (unsigned int __user *) data);
347 goto out; 348 goto out_tsk;
348 349
349 default: 350 default:
350 ret = ptrace_request(child, request, addr, data); 351 ret = ptrace_request(child, request, addr, data);
351 goto out; 352 goto out_tsk;
352 } 353 }
353 354
354out_wake_notrap: 355out_wake_notrap:
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 82c24e62ab63..3a25a7bd673e 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
296 struct rt_sigframe __user *frame; 296 struct rt_sigframe __user *frame;
297 unsigned long rp, usp; 297 unsigned long rp, usp;
298 unsigned long haddr, sigframe_size; 298 unsigned long haddr, sigframe_size;
299 struct siginfo si;
300 int err = 0; 299 int err = 0;
301#ifdef __LP64__ 300#ifdef __LP64__
302 compat_int_t compat_val; 301 compat_int_t compat_val;
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index a9ecf6465784..ce89da0f654d 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
181 while (ops) { 181 while (ops) {
182 unsigned long which = ffz(~ops); 182 unsigned long which = ffz(~ops);
183 183
184 ops &= ~(1 << which);
185
184 switch (which) { 186 switch (which) {
187 case IPI_NOP:
188#if (kDEBUG>=100)
189 printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
190#endif /* kDEBUG */
191 break;
192
185 case IPI_RESCHEDULE: 193 case IPI_RESCHEDULE:
186#if (kDEBUG>=100) 194#if (kDEBUG>=100)
187 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); 195 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
188#endif /* kDEBUG */ 196#endif /* kDEBUG */
189 ops &= ~(1 << IPI_RESCHEDULE);
190 /* 197 /*
191 * Reschedule callback. Everything to be 198 * Reschedule callback. Everything to be
192 * done is done by the interrupt return path. 199 * done is done by the interrupt return path.
@@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
197#if (kDEBUG>=100) 204#if (kDEBUG>=100)
198 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); 205 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
199#endif /* kDEBUG */ 206#endif /* kDEBUG */
200 ops &= ~(1 << IPI_CALL_FUNC);
201 { 207 {
202 volatile struct smp_call_struct *data; 208 volatile struct smp_call_struct *data;
203 void (*func)(void *info); 209 void (*func)(void *info);
@@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
231#if (kDEBUG>=100) 237#if (kDEBUG>=100)
232 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); 238 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
233#endif /* kDEBUG */ 239#endif /* kDEBUG */
234 ops &= ~(1 << IPI_CPU_START);
235#ifdef ENTRY_SYS_CPUS 240#ifdef ENTRY_SYS_CPUS
236 p->state = STATE_RUNNING; 241 p->state = STATE_RUNNING;
237#endif 242#endif
@@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
241#if (kDEBUG>=100) 246#if (kDEBUG>=100)
242 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); 247 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
243#endif /* kDEBUG */ 248#endif /* kDEBUG */
244 ops &= ~(1 << IPI_CPU_STOP);
245#ifdef ENTRY_SYS_CPUS 249#ifdef ENTRY_SYS_CPUS
246#else 250#else
247 halt_processor(); 251 halt_processor();
@@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
252#if (kDEBUG>=100) 256#if (kDEBUG>=100)
253 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); 257 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
254#endif /* kDEBUG */ 258#endif /* kDEBUG */
255 ops &= ~(1 << IPI_CPU_TEST);
256 break; 259 break;
257 260
258 default: 261 default:
259 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", 262 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
260 this_cpu, which); 263 this_cpu, which);
261 ops &= ~(1 << which);
262 return IRQ_NONE; 264 return IRQ_NONE;
263 } /* Switch */ 265 } /* Switch */
264 } /* while (ops) */ 266 } /* while (ops) */
@@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
312void 314void
313smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } 315smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
314 316
317void
318smp_send_all_nop(void)
319{
320 send_IPI_allbutself(IPI_NOP);
321}
322
315 323
316/** 324/**
317 * Run a function on all other CPUs. 325 * Run a function on all other CPUs.
@@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
338 346
339 /* Can deadlock when called with interrupts disabled */ 347 /* Can deadlock when called with interrupts disabled */
340 WARN_ON(irqs_disabled()); 348 WARN_ON(irqs_disabled());
349
350 /* can also deadlock if IPIs are disabled */
351 WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
352
341 353
342 data.func = func; 354 data.func = func;
343 data.info = info; 355 data.info = info;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index b29b76b42bb7..d66163492890 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -164,7 +164,7 @@ linux_gateway_entry:
164#endif 164#endif
165 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ 165 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
166 166
167 STREG %r20, TASK_PT_GR20(%r1) 167 STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
168 STREG %r21, TASK_PT_GR21(%r1) 168 STREG %r21, TASK_PT_GR21(%r1)
169 STREG %r22, TASK_PT_GR22(%r1) 169 STREG %r22, TASK_PT_GR22(%r1)
170 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ 170 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
@@ -527,6 +527,7 @@ lws_compare_and_swap:
527 We *must* giveup this call and fail. 527 We *must* giveup this call and fail.
528 */ 528 */
529 ldw 4(%sr2,%r20), %r28 /* Load thread register */ 529 ldw 4(%sr2,%r20), %r28 /* Load thread register */
530 /* WARNING: If cr27 cycles to the same value we have problems */
530 mfctl %cr27, %r21 /* Get current thread register */ 531 mfctl %cr27, %r21 /* Get current thread register */
531 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ 532 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
532 b lws_exit /* Return error! */ 533 b lws_exit /* Return error! */
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 5a5b24685081..8b6008ab217d 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -40,7 +40,7 @@
40#endif 40#endif
41 41
42unsigned long pci_probe_only = 1; 42unsigned long pci_probe_only = 1;
43unsigned long pci_assign_all_buses = 0; 43int pci_assign_all_buses = 0;
44 44
45/* 45/*
46 * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch 46 * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
@@ -55,11 +55,6 @@ static void fixup_resource(struct resource *res, struct pci_dev *dev);
55static void do_bus_setup(struct pci_bus *bus); 55static void do_bus_setup(struct pci_bus *bus);
56#endif 56#endif
57 57
58unsigned int pcibios_assign_all_busses(void)
59{
60 return pci_assign_all_buses;
61}
62
63/* pci_io_base -- the base address from which io bars are offsets. 58/* pci_io_base -- the base address from which io bars are offsets.
64 * This is the lowest I/O base address (so bar values are always positive), 59 * This is the lowest I/O base address (so bar values are always positive),
65 * and it *must* be the start of ISA space if an ISA bus exists because 60 * and it *must* be the start of ISA space if an ISA bus exists because
@@ -1186,17 +1181,6 @@ void phbs_remap_io(void)
1186 remap_bus_range(hose->bus); 1181 remap_bus_range(hose->bus);
1187} 1182}
1188 1183
1189/*
1190 * ppc64 can have multifunction devices that do not respond to function 0.
1191 * In this case we must scan all functions.
1192 * XXX this can go now, we use the OF device tree in all the
1193 * cases that caused problems. -- paulus
1194 */
1195int pcibios_scan_all_fns(struct pci_bus *bus, int devfn)
1196{
1197 return 0;
1198}
1199
1200static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1184static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
1201{ 1185{
1202 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1186 struct pci_controller *hose = pci_bus_to_host(dev->bus);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 59846b40d521..af4d1bc9a2eb 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -146,9 +146,6 @@ EXPORT_SYMBOL(pci_bus_io_base);
146EXPORT_SYMBOL(pci_bus_io_base_phys); 146EXPORT_SYMBOL(pci_bus_io_base_phys);
147EXPORT_SYMBOL(pci_bus_mem_base_phys); 147EXPORT_SYMBOL(pci_bus_mem_base_phys);
148EXPORT_SYMBOL(pci_bus_to_hose); 148EXPORT_SYMBOL(pci_bus_to_hose);
149EXPORT_SYMBOL(pci_resource_to_bus);
150EXPORT_SYMBOL(pci_phys_to_bus);
151EXPORT_SYMBOL(pci_bus_to_phys);
152#endif /* CONFIG_PCI */ 149#endif /* CONFIG_PCI */
153 150
154#ifdef CONFIG_NOT_COHERENT_CACHE 151#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/arch/powerpc/kernel/ptrace-common.h b/arch/powerpc/kernel/ptrace-common.h
new file mode 100644
index 000000000000..b1babb729673
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace-common.h
@@ -0,0 +1,164 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace-common.h
3 *
4 * Copyright (c) 2002 Stephen Rothwell, IBM Coproration
5 * Extracted from ptrace.c and ptrace32.c
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file README.legal in the main directory of
9 * this archive for more details.
10 */
11
12#ifndef _PPC64_PTRACE_COMMON_H
13#define _PPC64_PTRACE_COMMON_H
14
15#include <linux/config.h>
16#include <asm/system.h>
17
18/*
19 * Set of msr bits that gdb can change on behalf of a process.
20 */
21#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
22
23/*
24 * Get contents of register REGNO in task TASK.
25 */
26static inline unsigned long get_reg(struct task_struct *task, int regno)
27{
28 unsigned long tmp = 0;
29
30 /*
31 * Put the correct FP bits in, they might be wrong as a result
32 * of our lazy FP restore.
33 */
34 if (regno == PT_MSR) {
35 tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
36 tmp |= task->thread.fpexc_mode;
37 } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
38 tmp = ((unsigned long *)task->thread.regs)[regno];
39 }
40
41 return tmp;
42}
43
44/*
45 * Write contents of register REGNO in task TASK.
46 */
47static inline int put_reg(struct task_struct *task, int regno,
48 unsigned long data)
49{
50 if (regno < PT_SOFTE) {
51 if (regno == PT_MSR)
52 data = (data & MSR_DEBUGCHANGE)
53 | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
54 ((unsigned long *)task->thread.regs)[regno] = data;
55 return 0;
56 }
57 return -EIO;
58}
59
60static inline void set_single_step(struct task_struct *task)
61{
62 struct pt_regs *regs = task->thread.regs;
63 if (regs != NULL)
64 regs->msr |= MSR_SE;
65 set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
66}
67
68static inline void clear_single_step(struct task_struct *task)
69{
70 struct pt_regs *regs = task->thread.regs;
71 if (regs != NULL)
72 regs->msr &= ~MSR_SE;
73 clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
74}
75
76#ifdef CONFIG_ALTIVEC
77/*
78 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
79 * The transfer totals 34 quadword. Quadwords 0-31 contain the
80 * corresponding vector registers. Quadword 32 contains the vscr as the
81 * last word (offset 12) within that quadword. Quadword 33 contains the
82 * vrsave as the first word (offset 0) within the quadword.
83 *
84 * This definition of the VMX state is compatible with the current PPC32
85 * ptrace interface. This allows signal handling and ptrace to use the
86 * same structures. This also simplifies the implementation of a bi-arch
87 * (combined (32- and 64-bit) gdb.
88 */
89
90/*
91 * Get contents of AltiVec register state in task TASK
92 */
93static inline int get_vrregs(unsigned long __user *data,
94 struct task_struct *task)
95{
96 unsigned long regsize;
97
98 /* copy AltiVec registers VR[0] .. VR[31] */
99 regsize = 32 * sizeof(vector128);
100 if (copy_to_user(data, task->thread.vr, regsize))
101 return -EFAULT;
102 data += (regsize / sizeof(unsigned long));
103
104 /* copy VSCR */
105 regsize = 1 * sizeof(vector128);
106 if (copy_to_user(data, &task->thread.vscr, regsize))
107 return -EFAULT;
108 data += (regsize / sizeof(unsigned long));
109
110 /* copy VRSAVE */
111 if (put_user(task->thread.vrsave, (u32 __user *)data))
112 return -EFAULT;
113
114 return 0;
115}
116
117/*
118 * Write contents of AltiVec register state into task TASK.
119 */
120static inline int set_vrregs(struct task_struct *task,
121 unsigned long __user *data)
122{
123 unsigned long regsize;
124
125 /* copy AltiVec registers VR[0] .. VR[31] */
126 regsize = 32 * sizeof(vector128);
127 if (copy_from_user(task->thread.vr, data, regsize))
128 return -EFAULT;
129 data += (regsize / sizeof(unsigned long));
130
131 /* copy VSCR */
132 regsize = 1 * sizeof(vector128);
133 if (copy_from_user(&task->thread.vscr, data, regsize))
134 return -EFAULT;
135 data += (regsize / sizeof(unsigned long));
136
137 /* copy VRSAVE */
138 if (get_user(task->thread.vrsave, (u32 __user *)data))
139 return -EFAULT;
140
141 return 0;
142}
143#endif
144
145static inline int ptrace_set_debugreg(struct task_struct *task,
146 unsigned long addr, unsigned long data)
147{
148 /* We only support one DABR and no IABRS at the moment */
149 if (addr > 0)
150 return -EINVAL;
151
152 /* The bottom 3 bits are flags */
153 if ((data & ~0x7UL) >= TASK_SIZE)
154 return -EIO;
155
156 /* Ensure translation is on */
157 if (data && !(data & DABR_TRANSLATION))
158 return -EIO;
159
160 task->thread.dabr = data;
161 return 0;
162}
163
164#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3d2abd95c7ae..400793c71304 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -36,8 +36,9 @@
36#include <asm/page.h> 36#include <asm/page.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/system.h> 38#include <asm/system.h>
39
39#ifdef CONFIG_PPC64 40#ifdef CONFIG_PPC64
40#include <asm/ptrace-common.h> 41#include "ptrace-common.h"
41#endif 42#endif
42 43
43#ifdef CONFIG_PPC32 44#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 91eb952e0293..61762640b877 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -33,7 +33,8 @@
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/system.h> 35#include <asm/system.h>
36#include <asm/ptrace-common.h> 36
37#include "ptrace-common.h"
37 38
38/* 39/*
39 * does not yet catch signals sent when the child dies. 40 * does not yet catch signals sent when the child dies.
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index f4ca29cf5364..f9587bcc6a48 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -14,9 +14,10 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h>
18#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
19 18
19#include "mmu_decl.h"
20
20static DECLARE_MUTEX(imlist_sem); 21static DECLARE_MUTEX(imlist_sem);
21struct vm_struct * imlist = NULL; 22struct vm_struct * imlist = NULL;
22 23
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 1134f70f231d..81cfb0c2ec58 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -64,7 +64,8 @@
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h> 65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h> 67
68#include "mmu_decl.h"
68 69
69#ifdef DEBUG 70#ifdef DEBUG
70#define DBG(fmt...) printk(fmt) 71#define DBG(fmt...) printk(fmt)
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index a4d7a327c0e5..bea2d21ac6f7 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -33,7 +33,6 @@ extern void invalidate_tlbcam_entry(int index);
33 33
34extern int __map_without_bats; 34extern int __map_without_bats;
35extern unsigned long ioremap_base; 35extern unsigned long ioremap_base;
36extern unsigned long ioremap_bot;
37extern unsigned int rtas_data, rtas_size; 36extern unsigned int rtas_data, rtas_size;
38 37
39extern PTE *Hash, *Hash_end; 38extern PTE *Hash, *Hash_end;
@@ -42,6 +41,7 @@ extern unsigned long Hash_size, Hash_mask;
42extern unsigned int num_tlbcam_entries; 41extern unsigned int num_tlbcam_entries;
43#endif 42#endif
44 43
44extern unsigned long ioremap_bot;
45extern unsigned long __max_low_memory; 45extern unsigned long __max_low_memory;
46extern unsigned long __initial_memory_limit; 46extern unsigned long __initial_memory_limit;
47extern unsigned long total_memory; 47extern unsigned long total_memory;
@@ -84,4 +84,16 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
84 else 84 else
85 _tlbie(va); 85 _tlbie(va);
86} 86}
87#else /* CONFIG_PPC64 */
88/* imalloc region types */
89#define IM_REGION_UNUSED 0x1
90#define IM_REGION_SUBSET 0x2
91#define IM_REGION_EXISTS 0x4
92#define IM_REGION_OVERLAP 0x8
93#define IM_REGION_SUPERSET 0x10
94
95extern struct vm_struct * im_get_free_area(unsigned long size);
96extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
97 int region_type);
98extern void im_free(void *addr);
87#endif 99#endif
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c7f7bb6f30b3..2ffca63602c5 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -64,7 +64,8 @@
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h> 65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h> 67
68#include "mmu_decl.h"
68 69
69unsigned long ioremap_bot = IMALLOC_BASE; 70unsigned long ioremap_bot = IMALLOC_BASE;
70static unsigned long phbs_io_bot = PHBS_IO_BASE; 71static unsigned long phbs_io_bot = PHBS_IO_BASE;
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 957b09103422..fb2a7c798e82 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -34,6 +34,7 @@
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/hardirq.h> 35#include <linux/hardirq.h>
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/compiler.h>
37 38
38#include <asm/ptrace.h> 39#include <asm/ptrace.h>
39#include <asm/atomic.h> 40#include <asm/atomic.h>
@@ -631,8 +632,9 @@ void smp_core99_give_timebase(void)
631 mb(); 632 mb();
632 633
633 /* wait for the secondary to have taken it */ 634 /* wait for the secondary to have taken it */
634 for (t = 100000; t > 0 && sec_tb_reset; --t) 635 /* note: can't use udelay here, since it needs the timebase running */
635 udelay(10); 636 for (t = 10000000; t > 0 && sec_tb_reset; --t)
637 barrier();
636 if (sec_tb_reset) 638 if (sec_tb_reset)
637 /* XXX BUG_ON here? */ 639 /* XXX BUG_ON here? */
638 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n"); 640 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 105f05341a41..58d1cc2023c8 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -361,7 +361,8 @@ static void mpic_enable_irq(unsigned int irq)
361 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 361 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
362 362
363 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 363 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
364 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK); 364 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
365 ~MPIC_VECPRI_MASK);
365 366
366 /* make sure mask gets to controller before we return to user */ 367 /* make sure mask gets to controller before we return to user */
367 do { 368 do {
@@ -381,7 +382,8 @@ static void mpic_disable_irq(unsigned int irq)
381 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 382 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
382 383
383 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 384 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
384 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK); 385 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
386 MPIC_VECPRI_MASK);
385 387
386 /* make sure mask gets to controller before we return to user */ 388 /* make sure mask gets to controller before we return to user */
387 do { 389 do {
@@ -735,12 +737,13 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
735 737
736 spin_lock_irqsave(&mpic_lock, flags); 738 spin_lock_irqsave(&mpic_lock, flags);
737 if (is_ipi) { 739 if (is_ipi) {
738 reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK; 740 reg = mpic_ipi_read(irq - mpic->ipi_offset) &
741 ~MPIC_VECPRI_PRIORITY_MASK;
739 mpic_ipi_write(irq - mpic->ipi_offset, 742 mpic_ipi_write(irq - mpic->ipi_offset,
740 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 743 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
741 } else { 744 } else {
742 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI) 745 reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
743 & MPIC_VECPRI_PRIORITY_MASK; 746 & ~MPIC_VECPRI_PRIORITY_MASK;
744 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, 747 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
745 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 748 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
746 } 749 }