aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 11:18:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 11:18:34 -0400
commit5a96c5d0c58ead9a0ece03ffe1c116dea6dafe9c (patch)
tree17199c2c536f25a2b34e37045e9f7619a2dcbb3d /arch
parent13bbd8d90647132fc295d73b122567eb8987d298 (diff)
parent5f024a251f0b3b179bbc8fc62f3a650e49359db5 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/willy/parisc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/willy/parisc-2.6: (41 commits) [PARISC] Kill wall_jiffies use [PARISC] Honour "panic_on_oops" sysctl [PARISC] Fix fs/binfmt_som.c [PARISC] Export clear_user_page to modules [PARISC] Make DMA routines more stubby [PARISC] Define pci_get_legacy_ide_irq [PARISC] Fix CONFIG_DEBUG_SPINLOCK [PARISC] Fix HPUX compat compile with current GCC [PARISC] Fix iounmap compile warning [PARISC] Add support for Quicksilver AGPGART [PARISC] Move LBA and SBA register defines to the common ropes.h [PARISC] Create shared <asm/ropes.h> header [PARISC] Stash the lba_device in its struct device drvdata [PARISC] Generalize IS_ASTRO et al to take a parisc_device like [PARISC] Pretty print the name of the lba type on kernel boot [PARISC] Remove some obsolete comments and I checked that Reo is similar to Ike [PARISC] Add hardware found in the rp8400 [PARISC] Allow nested interrupts [PARISC] Further updates to timer_interrupt() [PARISC] remove halftick and copy clocktick to local var (gcc can optimize usage) ...
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/hpux/fs.c2
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c24
-rw-r--r--arch/parisc/kernel/cache.c48
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/irq.c151
-rw-r--r--arch/parisc/kernel/processor.c5
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/smp.c7
-rw-r--r--arch/parisc/kernel/sys_parisc.c45
-rw-r--r--arch/parisc/kernel/syscall_table.S4
-rw-r--r--arch/parisc/kernel/time.c208
-rw-r--r--arch/parisc/kernel/traps.c10
-rw-r--r--arch/parisc/mm/init.c23
-rw-r--r--arch/parisc/mm/ioremap.c2
16 files changed, 360 insertions, 200 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 6dd0ea8f88e0..d2101237442e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -127,7 +127,7 @@ config PA11
127 127
128config PREFETCH 128config PREFETCH
129 def_bool y 129 def_bool y
130 depends on PA8X00 130 depends on PA8X00 || PA7200
131 131
132config 64BIT 132config 64BIT
133 bool "64-bit kernel" 133 bool "64-bit kernel"
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 6e79dbf3f6bd..2d58b92b57e3 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -96,7 +96,7 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
96 put_user(namlen, &dirent->d_namlen); 96 put_user(namlen, &dirent->d_namlen);
97 copy_to_user(dirent->d_name, name, namlen); 97 copy_to_user(dirent->d_name, name, namlen);
98 put_user(0, dirent->d_name + namlen); 98 put_user(0, dirent->d_name + namlen);
99 ((char *) dirent) += reclen; 99 dirent = (void __user *)dirent + reclen;
100 buf->current_dir = dirent; 100 buf->current_dir = dirent;
101 buf->count -= reclen; 101 buf->count -= reclen;
102 return 0; 102 return 0;
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index d1833f164bbe..1e64e7b88110 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -87,7 +87,7 @@ struct elf_prpsinfo32
87 */ 87 */
88 88
89#define SET_PERSONALITY(ex, ibcs2) \ 89#define SET_PERSONALITY(ex, ibcs2) \
90 current->personality = PER_LINUX32; \ 90 set_thread_flag(TIF_32BIT); \
91 current->thread.map_base = DEFAULT_MAP_BASE32; \ 91 current->thread.map_base = DEFAULT_MAP_BASE32; \
92 current->thread.task_size = DEFAULT_TASK_SIZE32 \ 92 current->thread.task_size = DEFAULT_TASK_SIZE32 \
93 93
@@ -102,25 +102,3 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
102} 102}
103 103
104#include "../../../fs/binfmt_elf.c" 104#include "../../../fs/binfmt_elf.c"
105
106/* Set up a separate execution domain for ELF32 binaries running
107 * on an ELF64 kernel */
108
109static struct exec_domain parisc32_exec_domain = {
110 .name = "Linux/ELF32",
111 .pers_low = PER_LINUX32,
112 .pers_high = PER_LINUX32,
113};
114
115static int __init parisc32_exec_init(void)
116{
117 /* steal the identity signal mappings from the default domain */
118 parisc32_exec_domain.signal_map = default_exec_domain.signal_map;
119 parisc32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
120
121 register_exec_domain(&parisc32_exec_domain);
122
123 return 0;
124}
125
126__initcall(parisc32_exec_init);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index bc7c4a4e26a1..0be51e92a2fc 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -35,15 +35,12 @@ int icache_stride __read_mostly;
35EXPORT_SYMBOL(dcache_stride); 35EXPORT_SYMBOL(dcache_stride);
36 36
37 37
38#if defined(CONFIG_SMP)
39/* On some machines (e.g. ones with the Merced bus), there can be 38/* On some machines (e.g. ones with the Merced bus), there can be
40 * only a single PxTLB broadcast at a time; this must be guaranteed 39 * only a single PxTLB broadcast at a time; this must be guaranteed
41 * by software. We put a spinlock around all TLB flushes to 40 * by software. We put a spinlock around all TLB flushes to
42 * ensure this. 41 * ensure this.
43 */ 42 */
44DEFINE_SPINLOCK(pa_tlb_lock); 43DEFINE_SPINLOCK(pa_tlb_lock);
45EXPORT_SYMBOL(pa_tlb_lock);
46#endif
47 44
48struct pdc_cache_info cache_info __read_mostly; 45struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20 46#ifndef CONFIG_PA20
@@ -91,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
91 88
92 flush_kernel_dcache_page(page); 89 flush_kernel_dcache_page(page);
93 clear_bit(PG_dcache_dirty, &page->flags); 90 clear_bit(PG_dcache_dirty, &page->flags);
94 } 91 } else if (parisc_requires_coherency())
92 flush_kernel_dcache_page(page);
95} 93}
96 94
97void 95void
@@ -370,3 +368,45 @@ void parisc_setup_cache_timing(void)
370 368
371 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 369 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
372} 370}
371
372extern void purge_kernel_dcache_page(unsigned long);
373extern void clear_user_page_asm(void *page, unsigned long vaddr);
374
375void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
376{
377 purge_kernel_dcache_page((unsigned long)page);
378 purge_tlb_start();
379 pdtlb_kernel(page);
380 purge_tlb_end();
381 clear_user_page_asm(page, vaddr);
382}
383EXPORT_SYMBOL(clear_user_page);
384
385void flush_kernel_dcache_page_addr(void *addr)
386{
387 flush_kernel_dcache_page_asm(addr);
388 purge_tlb_start();
389 pdtlb_kernel(addr);
390 purge_tlb_end();
391}
392EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
393
394void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
395 struct page *pg)
396{
397 /* no coherency needed (all in kmap/kunmap) */
398 copy_user_page_asm(vto, vfrom);
399 if (!parisc_requires_coherency())
400 flush_kernel_dcache_page_asm(vto);
401}
402EXPORT_SYMBOL(copy_user_page);
403
404#ifdef CONFIG_PA8X00
405
406void kunmap_parisc(void *addr)
407{
408 if (parisc_requires_coherency())
409 flush_kernel_dcache_page_addr(addr);
410}
411EXPORT_SYMBOL(kunmap_parisc);
412#endif
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 192357a3b9fe..340b5e8d67ba 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -30,6 +30,7 @@
30 30
31 31
32#include <asm/psw.h> 32#include <asm/psw.h>
33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
33#include <asm/assembly.h> /* for LDREG/STREG defines */ 34#include <asm/assembly.h> /* for LDREG/STREG defines */
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/signal.h> 36#include <asm/signal.h>
@@ -478,11 +479,7 @@
478 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
479 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
480 copy \pmd,%r9 481 copy \pmd,%r9
481#ifdef CONFIG_64BIT 482 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
482 shld %r9,PxD_VALUE_SHIFT,\pmd
483#else
484 shlw %r9,PxD_VALUE_SHIFT,\pmd
485#endif
486 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 483 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
487 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 484 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
488 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 485 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
@@ -970,11 +967,7 @@ intr_return:
970 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount 967 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
971 ** irq_stat[] is defined using ____cacheline_aligned. 968 ** irq_stat[] is defined using ____cacheline_aligned.
972 */ 969 */
973#ifdef CONFIG_64BIT 970 SHLREG %r1,L1_CACHE_SHIFT,%r20
974 shld %r1, 6, %r20
975#else
976 shlw %r1, 5, %r20
977#endif
978 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 971 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
979#endif /* CONFIG_SMP */ 972#endif /* CONFIG_SMP */
980 973
@@ -1076,7 +1069,7 @@ intr_do_preempt:
1076 BL preempt_schedule_irq, %r2 1069 BL preempt_schedule_irq, %r2
1077 nop 1070 nop
1078 1071
1079 b intr_restore /* ssm PSW_SM_I done by intr_restore */ 1072 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1080#endif /* CONFIG_PREEMPT */ 1073#endif /* CONFIG_PREEMPT */
1081 1074
1082 .import do_signal,code 1075 .import do_signal,code
@@ -2115,11 +2108,7 @@ syscall_check_bh:
2115 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ 2108 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2116 2109
2117 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ 2110 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2118#ifdef CONFIG_64BIT 2111 SHLREG %r26,L1_CACHE_SHIFT,%r20
2119 shld %r26, 6, %r20
2120#else
2121 shlw %r26, 5, %r20
2122#endif
2123 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 2112 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2124#endif /* CONFIG_SMP */ 2113#endif /* CONFIG_SMP */
2125 2114
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 3058bffd8a2c..18ba4cb9159b 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -231,6 +231,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
231 {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"}, 231 {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
232 {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"}, 232 {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
233 {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"}, 233 {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
234 {HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
234 {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"}, 235 {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
235 {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"}, 236 {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
236 {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"}, 237 {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
@@ -584,8 +585,10 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
584 {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"}, 585 {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
585 {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"}, 586 {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
586 {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"}, 587 {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
588 {HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
587 {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"}, 589 {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
588 {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"}, 590 {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
591 {HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
589 {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"}, 592 {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
590 {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"}, 593 {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
591 {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"}, 594 {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 5b8803cc3d69..9bdd0197ceb7 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -45,6 +45,17 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
45*/ 45*/
46static volatile unsigned long cpu_eiem = 0; 46static volatile unsigned long cpu_eiem = 0;
47 47
48/*
49** ack bitmap ... habitually set to 1, but reset to zero
50** between ->ack() and ->end() of the interrupt to prevent
51** re-interruption of a processing interrupt.
52*/
53static volatile unsigned long global_ack_eiem = ~0UL;
54/*
55** Local bitmap, same as above but for per-cpu interrupts
56*/
57static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
58
48static void cpu_disable_irq(unsigned int irq) 59static void cpu_disable_irq(unsigned int irq)
49{ 60{
50 unsigned long eirr_bit = EIEM_MASK(irq); 61 unsigned long eirr_bit = EIEM_MASK(irq);
@@ -62,13 +73,6 @@ static void cpu_enable_irq(unsigned int irq)
62 73
63 cpu_eiem |= eirr_bit; 74 cpu_eiem |= eirr_bit;
64 75
65 /* FIXME: while our interrupts aren't nested, we cannot reset
66 * the eiem mask if we're already in an interrupt. Once we
67 * implement nested interrupts, this can go away
68 */
69 if (!in_interrupt())
70 set_eiem(cpu_eiem);
71
72 /* This is just a simple NOP IPI. But what it does is cause 76 /* This is just a simple NOP IPI. But what it does is cause
73 * all the other CPUs to do a set_eiem(cpu_eiem) at the end 77 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
74 * of the interrupt handler */ 78 * of the interrupt handler */
@@ -84,13 +88,45 @@ static unsigned int cpu_startup_irq(unsigned int irq)
84void no_ack_irq(unsigned int irq) { } 88void no_ack_irq(unsigned int irq) { }
85void no_end_irq(unsigned int irq) { } 89void no_end_irq(unsigned int irq) { }
86 90
91void cpu_ack_irq(unsigned int irq)
92{
93 unsigned long mask = EIEM_MASK(irq);
94 int cpu = smp_processor_id();
95
96 /* Clear in EIEM so we can no longer process */
97 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
98 per_cpu(local_ack_eiem, cpu) &= ~mask;
99 else
100 global_ack_eiem &= ~mask;
101
102 /* disable the interrupt */
103 set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
104 /* and now ack it */
105 mtctl(mask, 23);
106}
107
108void cpu_end_irq(unsigned int irq)
109{
110 unsigned long mask = EIEM_MASK(irq);
111 int cpu = smp_processor_id();
112
113 /* set it in the eiems---it's no longer in process */
114 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
115 per_cpu(local_ack_eiem, cpu) |= mask;
116 else
117 global_ack_eiem |= mask;
118
119 /* enable the interrupt */
120 set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
121}
122
87#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
88int cpu_check_affinity(unsigned int irq, cpumask_t *dest) 124int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
89{ 125{
90 int cpu_dest; 126 int cpu_dest;
91 127
92 /* timer and ipi have to always be received on all CPUs */ 128 /* timer and ipi have to always be received on all CPUs */
93 if (irq == TIMER_IRQ || irq == IPI_IRQ) { 129 if (CHECK_IRQ_PER_CPU(irq)) {
94 /* Bad linux design decision. The mask has already 130 /* Bad linux design decision. The mask has already
95 * been set; we must reset it */ 131 * been set; we must reset it */
96 irq_desc[irq].affinity = CPU_MASK_ALL; 132 irq_desc[irq].affinity = CPU_MASK_ALL;
@@ -119,8 +155,8 @@ static struct hw_interrupt_type cpu_interrupt_type = {
119 .shutdown = cpu_disable_irq, 155 .shutdown = cpu_disable_irq,
120 .enable = cpu_enable_irq, 156 .enable = cpu_enable_irq,
121 .disable = cpu_disable_irq, 157 .disable = cpu_disable_irq,
122 .ack = no_ack_irq, 158 .ack = cpu_ack_irq,
123 .end = no_end_irq, 159 .end = cpu_end_irq,
124#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP
125 .set_affinity = cpu_set_affinity_irq, 161 .set_affinity = cpu_set_affinity_irq,
126#endif 162#endif
@@ -209,7 +245,7 @@ int show_interrupts(struct seq_file *p, void *v)
209** Then use that to get the Transaction address and data. 245** Then use that to get the Transaction address and data.
210*/ 246*/
211 247
212int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data) 248int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
213{ 249{
214 if (irq_desc[irq].action) 250 if (irq_desc[irq].action)
215 return -EBUSY; 251 return -EBUSY;
@@ -298,82 +334,69 @@ unsigned int txn_alloc_data(unsigned int virt_irq)
298 return virt_irq - CPU_IRQ_BASE; 334 return virt_irq - CPU_IRQ_BASE;
299} 335}
300 336
337static inline int eirr_to_irq(unsigned long eirr)
338{
339#ifdef CONFIG_64BIT
340 int bit = fls64(eirr);
341#else
342 int bit = fls(eirr);
343#endif
344 return (BITS_PER_LONG - bit) + TIMER_IRQ;
345}
346
301/* ONLY called from entry.S:intr_extint() */ 347/* ONLY called from entry.S:intr_extint() */
302void do_cpu_irq_mask(struct pt_regs *regs) 348void do_cpu_irq_mask(struct pt_regs *regs)
303{ 349{
304 unsigned long eirr_val; 350 unsigned long eirr_val;
305 351 int irq, cpu = smp_processor_id();
306 irq_enter();
307
308 /*
309 * Don't allow TIMER or IPI nested interrupts.
310 * Allowing any single interrupt to nest can lead to that CPU
311 * handling interrupts with all enabled interrupts unmasked.
312 */
313 set_eiem(0UL);
314
315 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
316 * 2) We loop here on EIRR contents in order to avoid
317 * nested interrupts or having to take another interrupt
318 * when we could have just handled it right away.
319 */
320 for (;;) {
321 unsigned long bit = (1UL << (BITS_PER_LONG - 1));
322 unsigned int irq;
323 eirr_val = mfctl(23) & cpu_eiem;
324 if (!eirr_val)
325 break;
326
327 mtctl(eirr_val, 23); /* reset bits we are going to process */
328
329 /* Work our way from MSb to LSb...same order we alloc EIRs */
330 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
331#ifdef CONFIG_SMP 352#ifdef CONFIG_SMP
332 cpumask_t dest = irq_desc[irq].affinity; 353 cpumask_t dest;
333#endif 354#endif
334 if (!(bit & eirr_val))
335 continue;
336 355
337 /* clear bit in mask - can exit loop sooner */ 356 local_irq_disable();
338 eirr_val &= ~bit; 357 irq_enter();
339 358
340#ifdef CONFIG_SMP 359 eirr_val = mfctl(23) & cpu_eiem & global_ack_eiem &
341 /* FIXME: because generic set affinity mucks 360 per_cpu(local_ack_eiem, cpu);
342 * with the affinity before sending it to us 361 if (!eirr_val)
343 * we can get the situation where the affinity is 362 goto set_out;
344 * wrong for our CPU type interrupts */ 363 irq = eirr_to_irq(eirr_val);
345 if (irq != TIMER_IRQ && irq != IPI_IRQ &&
346 !cpu_isset(smp_processor_id(), dest)) {
347 int cpu = first_cpu(dest);
348
349 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
350 irq, smp_processor_id(), cpu);
351 gsc_writel(irq + CPU_IRQ_BASE,
352 cpu_data[cpu].hpa);
353 continue;
354 }
355#endif
356 364
357 __do_IRQ(irq, regs); 365#ifdef CONFIG_SMP
358 } 366 dest = irq_desc[irq].affinity;
367 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
368 !cpu_isset(smp_processor_id(), dest)) {
369 int cpu = first_cpu(dest);
370
371 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
372 irq, smp_processor_id(), cpu);
373 gsc_writel(irq + CPU_IRQ_BASE,
374 cpu_data[cpu].hpa);
375 goto set_out;
359 } 376 }
377#endif
378 __do_IRQ(irq, regs);
360 379
361 set_eiem(cpu_eiem); /* restore original mask */ 380 out:
362 irq_exit(); 381 irq_exit();
363} 382 return;
364 383
384 set_out:
385 set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
386 goto out;
387}
365 388
366static struct irqaction timer_action = { 389static struct irqaction timer_action = {
367 .handler = timer_interrupt, 390 .handler = timer_interrupt,
368 .name = "timer", 391 .name = "timer",
369 .flags = IRQF_DISABLED, 392 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
370}; 393};
371 394
372#ifdef CONFIG_SMP 395#ifdef CONFIG_SMP
373static struct irqaction ipi_action = { 396static struct irqaction ipi_action = {
374 .handler = ipi_interrupt, 397 .handler = ipi_interrupt,
375 .name = "IPI", 398 .name = "IPI",
376 .flags = IRQF_DISABLED, 399 .flags = IRQF_DISABLED | IRQF_PERCPU,
377}; 400};
378#endif 401#endif
379 402
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 99d7fca93104..fb81e5687e7c 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -143,8 +143,9 @@ static int __init processor_probe(struct parisc_device *dev)
143 p = &cpu_data[cpuid]; 143 p = &cpu_data[cpuid];
144 boot_cpu_data.cpu_count++; 144 boot_cpu_data.cpu_count++;
145 145
146 /* initialize counters */ 146 /* initialize counters - CPU 0 gets it_value set in time_init() */
147 memset(p, 0, sizeof(struct cpuinfo_parisc)); 147 if (cpuid)
148 memset(p, 0, sizeof(struct cpuinfo_parisc));
148 149
149 p->loops_per_jiffy = loops_per_jiffy; 150 p->loops_per_jiffy = loops_per_jiffy;
150 p->dev = dev; /* Save IODC data in case we need it */ 151 p->dev = dev; /* Save IODC data in case we need it */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index bb83880c5ee3..ee6653edeb7a 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -26,7 +26,6 @@
26#include <linux/stddef.h> 26#include <linux/stddef.h>
27#include <linux/compat.h> 27#include <linux/compat.h>
28#include <linux/elf.h> 28#include <linux/elf.h>
29#include <linux/personality.h>
30#include <asm/ucontext.h> 29#include <asm/ucontext.h>
31#include <asm/rt_sigframe.h> 30#include <asm/rt_sigframe.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -433,13 +432,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
433 if (in_syscall) { 432 if (in_syscall) {
434 regs->gr[31] = haddr; 433 regs->gr[31] = haddr;
435#ifdef __LP64__ 434#ifdef __LP64__
436 if (personality(current->personality) == PER_LINUX) 435 if (!test_thread_flag(TIF_32BIT))
437 sigframe_size |= 1; 436 sigframe_size |= 1;
438#endif 437#endif
439 } else { 438 } else {
440 unsigned long psw = USER_PSW; 439 unsigned long psw = USER_PSW;
441#ifdef __LP64__ 440#ifdef __LP64__
442 if (personality(current->personality) == PER_LINUX) 441 if (!test_thread_flag(TIF_32BIT))
443 psw |= PSW_W; 442 psw |= PSW_W;
444#endif 443#endif
445 444
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 98e40959a564..faad338f310e 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -262,6 +262,9 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
262 this_cpu, which); 262 this_cpu, which);
263 return IRQ_NONE; 263 return IRQ_NONE;
264 } /* Switch */ 264 } /* Switch */
265 /* let in any pending interrupts */
266 local_irq_enable();
267 local_irq_disable();
265 } /* while (ops) */ 268 } /* while (ops) */
266 } 269 }
267 return IRQ_HANDLED; 270 return IRQ_HANDLED;
@@ -430,8 +433,9 @@ smp_do_timer(struct pt_regs *regs)
430static void __init 433static void __init
431smp_cpu_init(int cpunum) 434smp_cpu_init(int cpunum)
432{ 435{
433 extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */ 436 extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
434 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ 437 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
438 extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
435 439
436 /* Set modes and Enable floating point coprocessor */ 440 /* Set modes and Enable floating point coprocessor */
437 (void) init_per_cpu(cpunum); 441 (void) init_per_cpu(cpunum);
@@ -457,6 +461,7 @@ smp_cpu_init(int cpunum)
457 enter_lazy_tlb(&init_mm, current); 461 enter_lazy_tlb(&init_mm, current);
458 462
459 init_IRQ(); /* make sure no IRQ's are enabled or pending */ 463 init_IRQ(); /* make sure no IRQ's are enabled or pending */
464 start_cpu_itimer();
460} 465}
461 466
462 467
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 8b5df98e2b31..1db5588ceacf 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -31,6 +31,8 @@
31#include <linux/shm.h> 31#include <linux/shm.h>
32#include <linux/smp_lock.h> 32#include <linux/smp_lock.h>
33#include <linux/syscalls.h> 33#include <linux/syscalls.h>
34#include <linux/utsname.h>
35#include <linux/personality.h>
34 36
35int sys_pipe(int __user *fildes) 37int sys_pipe(int __user *fildes)
36{ 38{
@@ -248,3 +250,46 @@ asmlinkage int sys_free_hugepages(unsigned long addr)
248{ 250{
249 return -EINVAL; 251 return -EINVAL;
250} 252}
253
254long parisc_personality(unsigned long personality)
255{
256 long err;
257
258 if (personality(current->personality) == PER_LINUX32
259 && personality == PER_LINUX)
260 personality = PER_LINUX32;
261
262 err = sys_personality(personality);
263 if (err == PER_LINUX32)
264 err = PER_LINUX;
265
266 return err;
267}
268
269static inline int override_machine(char __user *mach) {
270#ifdef CONFIG_COMPAT
271 if (personality(current->personality) == PER_LINUX32) {
272 if (__put_user(0, mach + 6) ||
273 __put_user(0, mach + 7))
274 return -EFAULT;
275 }
276
277 return 0;
278#else /*!CONFIG_COMPAT*/
279 return 0;
280#endif /*CONFIG_COMPAT*/
281}
282
283long parisc_newuname(struct new_utsname __user *utsname)
284{
285 int err = 0;
286
287 down_read(&uts_sem);
288 if (copy_to_user(utsname, &system_utsname, sizeof(*utsname)))
289 err = -EFAULT;
290 up_read(&uts_sem);
291
292 err = override_machine(utsname->machine);
293
294 return (long)err;
295}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index e27b432f90a8..701d66a596e8 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -132,7 +132,7 @@
132 ENTRY_SAME(socketpair) 132 ENTRY_SAME(socketpair)
133 ENTRY_SAME(setpgid) 133 ENTRY_SAME(setpgid)
134 ENTRY_SAME(send) 134 ENTRY_SAME(send)
135 ENTRY_SAME(newuname) 135 ENTRY_OURS(newuname)
136 ENTRY_SAME(umask) /* 60 */ 136 ENTRY_SAME(umask) /* 60 */
137 ENTRY_SAME(chroot) 137 ENTRY_SAME(chroot)
138 ENTRY_SAME(ustat) 138 ENTRY_SAME(ustat)
@@ -221,7 +221,7 @@
221 ENTRY_SAME(fchdir) 221 ENTRY_SAME(fchdir)
222 ENTRY_SAME(bdflush) 222 ENTRY_SAME(bdflush)
223 ENTRY_SAME(sysfs) /* 135 */ 223 ENTRY_SAME(sysfs) /* 135 */
224 ENTRY_SAME(personality) 224 ENTRY_OURS(personality)
225 ENTRY_SAME(ni_syscall) /* for afs_syscall */ 225 ENTRY_SAME(ni_syscall) /* for afs_syscall */
226 ENTRY_SAME(setfsuid) 226 ENTRY_SAME(setfsuid)
227 ENTRY_SAME(setfsgid) 227 ENTRY_SAME(setfsgid)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index ab641d67f551..b3496b592a2d 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -32,8 +32,7 @@
32 32
33#include <linux/timex.h> 33#include <linux/timex.h>
34 34
35static long clocktick __read_mostly; /* timer cycles per tick */ 35static unsigned long clocktick __read_mostly; /* timer cycles per tick */
36static long halftick __read_mostly;
37 36
38#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
39extern void smp_do_timer(struct pt_regs *regs); 38extern void smp_do_timer(struct pt_regs *regs);
@@ -41,46 +40,106 @@ extern void smp_do_timer(struct pt_regs *regs);
41 40
42irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 41irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
43{ 42{
44 long now; 43 unsigned long now;
45 long next_tick; 44 unsigned long next_tick;
46 int nticks; 45 unsigned long cycles_elapsed;
47 int cpu = smp_processor_id(); 46 unsigned long cycles_remainder;
47 unsigned int cpu = smp_processor_id();
48
49 /* gcc can optimize for "read-only" case with a local clocktick */
50 unsigned long cpt = clocktick;
48 51
49 profile_tick(CPU_PROFILING, regs); 52 profile_tick(CPU_PROFILING, regs);
50 53
51 now = mfctl(16); 54 /* Initialize next_tick to the expected tick time. */
52 /* initialize next_tick to time at last clocktick */
53 next_tick = cpu_data[cpu].it_value; 55 next_tick = cpu_data[cpu].it_value;
54 56
55 /* since time passes between the interrupt and the mfctl() 57 /* Get current interval timer.
56 * above, it is never true that last_tick + clocktick == now. If we 58 * CR16 reads as 64 bits in CPU wide mode.
57 * never miss a clocktick, we could set next_tick = last_tick + clocktick 59 * CR16 reads as 32 bits in CPU narrow mode.
58 * but maybe we'll miss ticks, hence the loop.
59 *
60 * Variables are *signed*.
61 */ 60 */
61 now = mfctl(16);
62
63 cycles_elapsed = now - next_tick;
62 64
63 nticks = 0; 65 if ((cycles_elapsed >> 5) < cpt) {
64 while((next_tick - now) < halftick) { 66 /* use "cheap" math (add/subtract) instead
65 next_tick += clocktick; 67 * of the more expensive div/mul method
66 nticks++; 68 */
69 cycles_remainder = cycles_elapsed;
70 while (cycles_remainder > cpt) {
71 cycles_remainder -= cpt;
72 }
73 } else {
74 cycles_remainder = cycles_elapsed % cpt;
67 } 75 }
68 mtctl(next_tick, 16); 76
77 /* Can we differentiate between "early CR16" (aka Scenario 1) and
78 * "long delay" (aka Scenario 3)? I don't think so.
79 *
80 * We expected timer_interrupt to be delivered at least a few hundred
81 * cycles after the IT fires. But it's arbitrary how much time passes
82 * before we call it "late". I've picked one second.
83 */
84/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
85#if HZ == 1000
86 if (cycles_elapsed > (cpt << 10) )
87#elif HZ == 250
88 if (cycles_elapsed > (cpt << 8) )
89#elif HZ == 100
90 if (cycles_elapsed > (cpt << 7) )
91#else
92#warn WTF is HZ set to anyway?
93 if (cycles_elapsed > (HZ * cpt) )
94#endif
95 {
96 /* Scenario 3: very long delay? bad in any case */
97 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
98 " cycles %lX rem %lX "
99 " next/now %lX/%lX\n",
100 cpu,
101 cycles_elapsed, cycles_remainder,
102 next_tick, now );
103 }
104
105 /* convert from "division remainder" to "remainder of clock tick" */
106 cycles_remainder = cpt - cycles_remainder;
107
108 /* Determine when (in CR16 cycles) next IT interrupt will fire.
109 * We want IT to fire modulo clocktick even if we miss/skip some.
110 * But those interrupts don't in fact get delivered that regularly.
111 */
112 next_tick = now + cycles_remainder;
113
69 cpu_data[cpu].it_value = next_tick; 114 cpu_data[cpu].it_value = next_tick;
70 115
71 while (nticks--) { 116 /* Skip one clocktick on purpose if we are likely to miss next_tick.
117 * We want to avoid the new next_tick being less than CR16.
118 * If that happened, itimer wouldn't fire until CR16 wrapped.
119 * We'll catch the tick we missed on the tick after that.
120 */
121 if (!(cycles_remainder >> 13))
122 next_tick += cpt;
123
124 /* Program the IT when to deliver the next interrupt. */
125 /* Only bottom 32-bits of next_tick are written to cr16. */
126 mtctl(next_tick, 16);
127
128
129 /* Done mucking with unreliable delivery of interrupts.
130 * Go do system house keeping.
131 */
72#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
73 smp_do_timer(regs); 133 smp_do_timer(regs);
74#else 134#else
75 update_process_times(user_mode(regs)); 135 update_process_times(user_mode(regs));
76#endif 136#endif
77 if (cpu == 0) { 137 if (cpu == 0) {
78 write_seqlock(&xtime_lock); 138 write_seqlock(&xtime_lock);
79 do_timer(1); 139 do_timer(regs);
80 write_sequnlock(&xtime_lock); 140 write_sequnlock(&xtime_lock);
81 }
82 } 141 }
83 142
84 /* check soft power switch status */ 143 /* check soft power switch status */
85 if (cpu == 0 && !atomic_read(&power_tasklet.count)) 144 if (cpu == 0 && !atomic_read(&power_tasklet.count))
86 tasklet_schedule(&power_tasklet); 145 tasklet_schedule(&power_tasklet);
@@ -106,14 +165,12 @@ unsigned long profile_pc(struct pt_regs *regs)
106EXPORT_SYMBOL(profile_pc); 165EXPORT_SYMBOL(profile_pc);
107 166
108 167
109/*** converted from ia64 ***/
110/* 168/*
111 * Return the number of micro-seconds that elapsed since the last 169 * Return the number of micro-seconds that elapsed since the last
112 * update to wall time (aka xtime). The xtime_lock 170 * update to wall time (aka xtime). The xtime_lock
113 * must be at least read-locked when calling this routine. 171 * must be at least read-locked when calling this routine.
114 */ 172 */
115static inline unsigned long 173static inline unsigned long gettimeoffset (void)
116gettimeoffset (void)
117{ 174{
118#ifndef CONFIG_SMP 175#ifndef CONFIG_SMP
119 /* 176 /*
@@ -121,21 +178,44 @@ gettimeoffset (void)
121 * Once parisc-linux learns the cr16 difference between processors, 178 * Once parisc-linux learns the cr16 difference between processors,
122 * this could be made to work. 179 * this could be made to work.
123 */ 180 */
124 long last_tick; 181 unsigned long now;
125 long elapsed_cycles; 182 unsigned long prev_tick;
126 183 unsigned long next_tick;
127 /* it_value is the intended time of the next tick */ 184 unsigned long elapsed_cycles;
128 last_tick = cpu_data[smp_processor_id()].it_value; 185 unsigned long usec;
129 186 unsigned long cpuid = smp_processor_id();
130 /* Subtract one tick and account for possible difference between 187 unsigned long cpt = clocktick;
131 * when we expected the tick and when it actually arrived. 188
132 * (aka wall vs real) 189 next_tick = cpu_data[cpuid].it_value;
133 */ 190 now = mfctl(16); /* Read the hardware interval timer. */
134 last_tick -= clocktick * (jiffies - wall_jiffies + 1); 191
135 elapsed_cycles = mfctl(16) - last_tick; 192 prev_tick = next_tick - cpt;
193
194 /* Assume Scenario 1: "now" is later than prev_tick. */
195 elapsed_cycles = now - prev_tick;
196
197/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
198#if HZ == 1000
199 if (elapsed_cycles > (cpt << 10) )
200#elif HZ == 250
201 if (elapsed_cycles > (cpt << 8) )
202#elif HZ == 100
203 if (elapsed_cycles > (cpt << 7) )
204#else
205#warn WTF is HZ set to anyway?
206 if (elapsed_cycles > (HZ * cpt) )
207#endif
208 {
209 /* Scenario 3: clock ticks are missing. */
210 printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!"
211 " cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
212 cpuid, elapsed_cycles / cpt,
213 elapsed_cycles, prev_tick, now, next_tick, cpt);
214 }
136 215
137 /* the precision of this math could be improved */ 216 /* FIXME: Can we improve the precision? Not with PAGE0. */
138 return elapsed_cycles / (PAGE0->mem_10msec / 10000); 217 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
218 return usec;
139#else 219#else
140 return 0; 220 return 0;
141#endif 221#endif
@@ -146,6 +226,7 @@ do_gettimeofday (struct timeval *tv)
146{ 226{
147 unsigned long flags, seq, usec, sec; 227 unsigned long flags, seq, usec, sec;
148 228
229 /* Hold xtime_lock and adjust timeval. */
149 do { 230 do {
150 seq = read_seqbegin_irqsave(&xtime_lock, flags); 231 seq = read_seqbegin_irqsave(&xtime_lock, flags);
151 usec = gettimeoffset(); 232 usec = gettimeoffset();
@@ -153,25 +234,13 @@ do_gettimeofday (struct timeval *tv)
153 usec += (xtime.tv_nsec / 1000); 234 usec += (xtime.tv_nsec / 1000);
154 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 235 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
155 236
156 if (unlikely(usec > LONG_MAX)) { 237 /* Move adjusted usec's into sec's. */
157 /* This can happen if the gettimeoffset adjustment is
158 * negative and xtime.tv_nsec is smaller than the
159 * adjustment */
160 printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec);
161 usec += USEC_PER_SEC;
162 --sec;
163 /* This should never happen, it means the negative
164 * time adjustment was more than a second, so there's
165 * something seriously wrong */
166 BUG_ON(usec > LONG_MAX);
167 }
168
169
170 while (usec >= USEC_PER_SEC) { 238 while (usec >= USEC_PER_SEC) {
171 usec -= USEC_PER_SEC; 239 usec -= USEC_PER_SEC;
172 ++sec; 240 ++sec;
173 } 241 }
174 242
243 /* Return adjusted result. */
175 tv->tv_sec = sec; 244 tv->tv_sec = sec;
176 tv->tv_usec = usec; 245 tv->tv_usec = usec;
177} 246}
@@ -223,22 +292,23 @@ unsigned long long sched_clock(void)
223} 292}
224 293
225 294
295void __init start_cpu_itimer(void)
296{
297 unsigned int cpu = smp_processor_id();
298 unsigned long next_tick = mfctl(16) + clocktick;
299
300 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
301
302 cpu_data[cpu].it_value = next_tick;
303}
304
226void __init time_init(void) 305void __init time_init(void)
227{ 306{
228 unsigned long next_tick;
229 static struct pdc_tod tod_data; 307 static struct pdc_tod tod_data;
230 308
231 clocktick = (100 * PAGE0->mem_10msec) / HZ; 309 clocktick = (100 * PAGE0->mem_10msec) / HZ;
232 halftick = clocktick / 2;
233 310
234 /* Setup clock interrupt timing */ 311 start_cpu_itimer(); /* get CPU 0 started */
235
236 next_tick = mfctl(16);
237 next_tick += clocktick;
238 cpu_data[smp_processor_id()].it_value = next_tick;
239
240 /* kick off Itimer (CR16) */
241 mtctl(next_tick, 16);
242 312
243 if(pdc_tod_read(&tod_data) == 0) { 313 if(pdc_tod_read(&tod_data) == 0) {
244 write_seqlock_irq(&xtime_lock); 314 write_seqlock_irq(&xtime_lock);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 77b28cb8aca6..65cd6ca32fed 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/delay.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
@@ -245,6 +246,15 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
245 current->comm, current->pid, str, err); 246 current->comm, current->pid, str, err);
246 show_regs(regs); 247 show_regs(regs);
247 248
249 if (in_interrupt())
250 panic("Fatal exception in interrupt");
251
252 if (panic_on_oops) {
253 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
254 ssleep(5);
255 panic("Fatal exception");
256 }
257
248 /* Wot's wrong wif bein' racy? */ 258 /* Wot's wrong wif bein' racy? */
249 if (current->thread.flags & PARISC_KERNEL_DEATH) { 259 if (current->thread.flags & PARISC_KERNEL_DEATH) {
250 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__); 260 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 25ad28d63e88..0667f2b4f977 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -31,10 +31,7 @@
31 31
32DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
33 33
34extern char _text; /* start of kernel code, defined by linker */
35extern int data_start; 34extern int data_start;
36extern char _end; /* end of BSS, defined by linker */
37extern char __init_begin, __init_end;
38 35
39#ifdef CONFIG_DISCONTIGMEM 36#ifdef CONFIG_DISCONTIGMEM
40struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 37struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
@@ -319,8 +316,8 @@ static void __init setup_bootmem(void)
319 316
320 reserve_bootmem_node(NODE_DATA(0), 0UL, 317 reserve_bootmem_node(NODE_DATA(0), 0UL,
321 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE)); 318 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
322 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text), 319 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
323 (unsigned long)(&_end - &_text)); 320 (unsigned long)(_end - _text));
324 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 321 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
325 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT)); 322 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
326 323
@@ -355,8 +352,8 @@ static void __init setup_bootmem(void)
355#endif 352#endif
356 353
357 data_resource.start = virt_to_phys(&data_start); 354 data_resource.start = virt_to_phys(&data_start);
358 data_resource.end = virt_to_phys(&_end)-1; 355 data_resource.end = virt_to_phys(_end) - 1;
359 code_resource.start = virt_to_phys(&_text); 356 code_resource.start = virt_to_phys(_text);
360 code_resource.end = virt_to_phys(&data_start)-1; 357 code_resource.end = virt_to_phys(&data_start)-1;
361 358
362 /* We don't know which region the kernel will be in, so try 359 /* We don't know which region the kernel will be in, so try
@@ -385,12 +382,12 @@ void free_initmem(void)
385 */ 382 */
386 local_irq_disable(); 383 local_irq_disable();
387 384
388 memset(&__init_begin, 0x00, 385 memset(__init_begin, 0x00,
389 (unsigned long)&__init_end - (unsigned long)&__init_begin); 386 (unsigned long)__init_end - (unsigned long)__init_begin);
390 387
391 flush_data_cache(); 388 flush_data_cache();
392 asm volatile("sync" : : ); 389 asm volatile("sync" : : );
393 flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end); 390 flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
394 asm volatile("sync" : : ); 391 asm volatile("sync" : : );
395 392
396 local_irq_enable(); 393 local_irq_enable();
@@ -398,8 +395,8 @@ void free_initmem(void)
398 395
399 /* align __init_begin and __init_end to page size, 396 /* align __init_begin and __init_end to page size,
400 ignoring linker script where we might have tried to save RAM */ 397 ignoring linker script where we might have tried to save RAM */
401 init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); 398 init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
402 init_end = PAGE_ALIGN((unsigned long)(&__init_end)); 399 init_end = PAGE_ALIGN((unsigned long)(__init_end));
403 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 400 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
404 ClearPageReserved(virt_to_page(addr)); 401 ClearPageReserved(virt_to_page(addr));
405 init_page_count(virt_to_page(addr)); 402 init_page_count(virt_to_page(addr));
@@ -578,7 +575,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
578 extern const unsigned long fault_vector_20; 575 extern const unsigned long fault_vector_20;
579 extern void * const linux_gateway_page; 576 extern void * const linux_gateway_page;
580 577
581 ro_start = __pa((unsigned long)&_text); 578 ro_start = __pa((unsigned long)_text);
582 ro_end = __pa((unsigned long)&data_start); 579 ro_end = __pa((unsigned long)&data_start);
583 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 580 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
584 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; 581 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 27384567a1d0..47a1d2ac9419 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -188,7 +188,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
188} 188}
189EXPORT_SYMBOL(__ioremap); 189EXPORT_SYMBOL(__ioremap);
190 190
191void iounmap(void __iomem *addr) 191void iounmap(const volatile void __iomem *addr)
192{ 192{
193 if (addr > high_memory) 193 if (addr > high_memory)
194 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); 194 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));