aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/Kconfig9
-rw-r--r--arch/parisc/Kconfig.debug11
-rw-r--r--arch/parisc/Makefile4
-rw-r--r--arch/parisc/include/asm/atomic.h23
-rw-r--r--arch/parisc/include/asm/dma-mapping.h3
-rw-r--r--arch/parisc/include/asm/hardirq.h32
-rw-r--r--arch/parisc/include/asm/processor.h20
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/tlbflush.h2
-rw-r--r--arch/parisc/kernel/cache.c6
-rw-r--r--arch/parisc/kernel/entry.S68
-rw-r--r--arch/parisc/kernel/hpmc.S4
-rw-r--r--arch/parisc/kernel/irq.c104
-rw-r--r--arch/parisc/kernel/pacache.S33
-rw-r--r--arch/parisc/kernel/setup.c2
-rw-r--r--arch/parisc/kernel/smp.c14
-rw-r--r--arch/parisc/kernel/syscall.S34
-rw-r--r--arch/parisc/kernel/traps.c24
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--drivers/parisc/sba_iommu.c19
21 files changed, 348 insertions, 70 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 433e75a2ee9a..cad060f288cf 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -13,6 +13,7 @@ config PARISC
13 select BUG 13 select BUG
14 select HAVE_PERF_EVENTS 14 select HAVE_PERF_EVENTS
15 select GENERIC_ATOMIC64 if !64BIT 15 select GENERIC_ATOMIC64 if !64BIT
16 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
16 select HAVE_GENERIC_HARDIRQS 17 select HAVE_GENERIC_HARDIRQS
17 select BROKEN_RODATA 18 select BROKEN_RODATA
18 select GENERIC_IRQ_PROBE 19 select GENERIC_IRQ_PROBE
@@ -242,6 +243,14 @@ config SMP
242 243
243 If you don't know what to do here, say N. 244 If you don't know what to do here, say N.
244 245
246config IRQSTACKS
247 bool "Use separate kernel stacks when processing interrupts"
248 default n
249 help
250 If you say Y here the kernel will use separate kernel stacks
251 for handling hard and soft interrupts. This can help avoid
252 overflowing the process kernel stacks.
253
245config HOTPLUG_CPU 254config HOTPLUG_CPU
246 bool 255 bool
247 default y if SMP 256 default y if SMP
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index bc989e522a04..08a332f6ee87 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -13,3 +13,14 @@ config DEBUG_RODATA
13 If in doubt, say "N". 13 If in doubt, say "N".
14 14
15endmenu 15endmenu
16
17config DEBUG_STACKOVERFLOW
18 bool "Check for stack overflows"
19 default y
20 depends on DEBUG_KERNEL
21 ---help---
22 Say Y here if you want to check the overflows of kernel, IRQ
23 and exception stacks. This option will cause messages of the
24 stacks in detail when free stack space drops below a certain
25 limit.
26 If in doubt, say "N".
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 113e28206503..2f967cc6649e 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -24,9 +24,7 @@ CHECKFLAGS += -D__hppa__=1
24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
25 25
26MACHINE := $(shell uname -m) 26MACHINE := $(shell uname -m)
27ifeq ($(MACHINE),parisc*) 27NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0)
28NATIVE := 1
29endif
30 28
31ifdef CONFIG_64BIT 29ifdef CONFIG_64BIT
32UTS_MACHINE := parisc64 30UTS_MACHINE := parisc64
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index f38e1984b242..472886ceab1d 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -229,6 +229,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
229 229
230#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 230#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
231 231
232/*
233 * atomic64_dec_if_positive - decrement by 1 if old value positive
234 * @v: pointer of type atomic_t
235 *
236 * The function returns the old value of *v minus 1, even if
237 * the atomic variable, v, was not decremented.
238 */
239static inline long atomic64_dec_if_positive(atomic64_t *v)
240{
241 long c, old, dec;
242 c = atomic64_read(v);
243 for (;;) {
244 dec = c - 1;
245 if (unlikely(dec < 0))
246 break;
247 old = atomic64_cmpxchg((v), c, dec);
248 if (likely(old == c))
249 break;
250 c = old;
251 }
252 return dec;
253}
254
232#endif /* !CONFIG_64BIT */ 255#endif /* !CONFIG_64BIT */
233 256
234 257
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 106b395688e1..d0eae5f2bd87 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -46,6 +46,9 @@ extern struct hppa_dma_ops pcx_dma_ops;
46 46
47extern struct hppa_dma_ops *hppa_dma_ops; 47extern struct hppa_dma_ops *hppa_dma_ops;
48 48
49#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
50#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
51
49static inline void * 52static inline void *
50dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 53dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
51 gfp_t flag) 54 gfp_t flag)
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 0d68184a76cb..12373c4dabab 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,11 +1,41 @@
1/* hardirq.h: PA-RISC hard IRQ support. 1/* hardirq.h: PA-RISC hard IRQ support.
2 * 2 *
3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2013 Helge Deller <deller@gmx.de>
4 */ 5 */
5 6
6#ifndef _PARISC_HARDIRQ_H 7#ifndef _PARISC_HARDIRQ_H
7#define _PARISC_HARDIRQ_H 8#define _PARISC_HARDIRQ_H
8 9
9#include <asm-generic/hardirq.h> 10#include <linux/cache.h>
11#include <linux/threads.h>
12#include <linux/irq.h>
13
14typedef struct {
15 unsigned int __softirq_pending;
16#ifdef CONFIG_DEBUG_STACKOVERFLOW
17 unsigned int kernel_stack_usage;
18#endif
19#ifdef CONFIG_SMP
20 unsigned int irq_resched_count;
21 unsigned int irq_call_count;
22#endif
23 unsigned int irq_tlb_count;
24} ____cacheline_aligned irq_cpustat_t;
25
26DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
27
28#define __ARCH_IRQ_STAT
29#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
30#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
31#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
32
33#define __ARCH_SET_SOFTIRQ_PENDING
34
35#define set_softirq_pending(x) \
36 this_cpu_write(irq_stat.__softirq_pending, (x))
37#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
38
39#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq)
10 40
11#endif /* _PARISC_HARDIRQ_H */ 41#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 09b54a57a48d..064015547d1e 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -20,8 +20,6 @@
20 20
21#endif /* __ASSEMBLY__ */ 21#endif /* __ASSEMBLY__ */
22 22
23#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
24
25/* 23/*
26 * Default implementation of macro that returns current 24 * Default implementation of macro that returns current
27 * instruction pointer ("program counter"). 25 * instruction pointer ("program counter").
@@ -61,6 +59,23 @@
61#ifndef __ASSEMBLY__ 59#ifndef __ASSEMBLY__
62 60
63/* 61/*
62 * IRQ STACK - used for irq handler
63 */
64#ifdef __KERNEL__
65
66#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
67
68union irq_stack_union {
69 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
70};
71
72DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
73
74void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
75
76#endif /* __KERNEL__ */
77
78/*
64 * Data detected about CPUs at boot time which is the same for all CPU's. 79 * Data detected about CPUs at boot time which is the same for all CPU's.
65 * HP boxes are SMP - ie identical processors. 80 * HP boxes are SMP - ie identical processors.
66 * 81 *
@@ -97,7 +112,6 @@ struct cpuinfo_parisc {
97 unsigned long txn_addr; /* MMIO addr of EIR or id_eid */ 112 unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
98#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
99 unsigned long pending_ipi; /* bitmap of type ipi_message_type */ 114 unsigned long pending_ipi; /* bitmap of type ipi_message_type */
100 unsigned long ipi_count; /* number ipi Interrupts */
101#endif 115#endif
102 unsigned long bh_count; /* number of times bh was invoked */ 116 unsigned long bh_count; /* number of times bh was invoked */
103 unsigned long prof_counter; /* per CPU profiling support */ 117 unsigned long prof_counter; /* per CPU profiling support */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 6182832e5b6c..540c88fa8f86 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -40,7 +40,7 @@ struct thread_info {
40 40
41/* thread information allocation */ 41/* thread information allocation */
42 42
43#define THREAD_SIZE_ORDER 2 43#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
44/* Be sure to hunt all references to this down when you change the size of 44/* Be sure to hunt all references to this down when you change the size of
45 * the kernel stack */ 45 * the kernel stack */
46#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 46#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 8f1a8100bf2d..5273da991e06 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -22,6 +22,8 @@ extern spinlock_t pa_tlb_lock;
22extern void flush_tlb_all(void); 22extern void flush_tlb_all(void);
23extern void flush_tlb_all_local(void *); 23extern void flush_tlb_all_local(void *);
24 24
25#define smp_flush_tlb_all() flush_tlb_all()
26
25/* 27/*
26 * flush_tlb_mm() 28 * flush_tlb_mm()
27 * 29 *
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 83ded26cad06..65fb4cbc3a0f 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -606,7 +606,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
606 /* Clear using TMPALIAS region. The page doesn't need to 606 /* Clear using TMPALIAS region. The page doesn't need to
607 be flushed but the kernel mapping needs to be purged. */ 607 be flushed but the kernel mapping needs to be purged. */
608 608
609 vto = kmap_atomic(page, KM_USER0); 609 vto = kmap_atomic(page);
610 610
611 /* The PA-RISC 2.0 Architecture book states on page F-6: 611 /* The PA-RISC 2.0 Architecture book states on page F-6:
612 "Before a write-capable translation is enabled, *all* 612 "Before a write-capable translation is enabled, *all*
@@ -641,8 +641,8 @@ void copy_user_highpage(struct page *to, struct page *from,
641 the `to' page must be flushed in copy_user_page_asm since 641 the `to' page must be flushed in copy_user_page_asm since
642 it can be used to bring in executable code. */ 642 it can be used to bring in executable code. */
643 643
644 vfrom = kmap_atomic(from, KM_USER0); 644 vfrom = kmap_atomic(from);
645 vto = kmap_atomic(to, KM_USER1); 645 vto = kmap_atomic(to);
646 646
647 purge_kernel_dcache_page_asm((unsigned long)vto); 647 purge_kernel_dcache_page_asm((unsigned long)vto);
648 purge_tlb_start(flags); 648 purge_tlb_start(flags);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index f33201bf8977..4bb96ad9b0b1 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -400,7 +400,15 @@
400#if PT_NLEVELS == 3 400#if PT_NLEVELS == 3
401 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 401 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
402#else 402#else
403# if defined(CONFIG_64BIT)
404 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
405 #else
406 # if PAGE_SIZE > 4096
407 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
408 # else
403 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 409 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
410 # endif
411# endif
404#endif 412#endif
405 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 413 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
406 copy %r0,\pte 414 copy %r0,\pte
@@ -615,7 +623,7 @@
615 623
616 .text 624 .text
617 625
618 .align PAGE_SIZE 626 .align 4096
619 627
620ENTRY(fault_vector_20) 628ENTRY(fault_vector_20)
621 /* First vector is invalid (0) */ 629 /* First vector is invalid (0) */
@@ -825,11 +833,6 @@ ENTRY(syscall_exit_rfi)
825 STREG %r19,PT_SR7(%r16) 833 STREG %r19,PT_SR7(%r16)
826 834
827intr_return: 835intr_return:
828 /* NOTE: Need to enable interrupts incase we schedule. */
829 ssm PSW_SM_I, %r0
830
831intr_check_resched:
832
833 /* check for reschedule */ 836 /* check for reschedule */
834 mfctl %cr30,%r1 837 mfctl %cr30,%r1
835 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 838 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -856,6 +859,11 @@ intr_check_sig:
856 LDREG PT_IASQ1(%r16), %r20 859 LDREG PT_IASQ1(%r16), %r20
857 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ 860 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
858 861
862 /* NOTE: We need to enable interrupts if we have to deliver
863 * signals. We used to do this earlier but it caused kernel
864 * stack overflows. */
865 ssm PSW_SM_I, %r0
866
859 copy %r0, %r25 /* long in_syscall = 0 */ 867 copy %r0, %r25 /* long in_syscall = 0 */
860#ifdef CONFIG_64BIT 868#ifdef CONFIG_64BIT
861 ldo -16(%r30),%r29 /* Reference param save area */ 869 ldo -16(%r30),%r29 /* Reference param save area */
@@ -907,6 +915,10 @@ intr_do_resched:
907 cmpib,COND(=) 0, %r20, intr_do_preempt 915 cmpib,COND(=) 0, %r20, intr_do_preempt
908 nop 916 nop
909 917
918 /* NOTE: We need to enable interrupts if we schedule. We used
919 * to do this earlier but it caused kernel stack overflows. */
920 ssm PSW_SM_I, %r0
921
910#ifdef CONFIG_64BIT 922#ifdef CONFIG_64BIT
911 ldo -16(%r30),%r29 /* Reference param save area */ 923 ldo -16(%r30),%r29 /* Reference param save area */
912#endif 924#endif
@@ -1694,7 +1706,8 @@ ENTRY(sys_\name\()_wrapper)
1694 ldo TASK_REGS(%r1),%r1 1706 ldo TASK_REGS(%r1),%r1
1695 reg_save %r1 1707 reg_save %r1
1696 mfctl %cr27, %r28 1708 mfctl %cr27, %r28
1697 b sys_\name 1709 ldil L%sys_\name, %r31
1710 be R%sys_\name(%sr4,%r31)
1698 STREG %r28, PT_CR27(%r1) 1711 STREG %r28, PT_CR27(%r1)
1699ENDPROC(sys_\name\()_wrapper) 1712ENDPROC(sys_\name\()_wrapper)
1700 .endm 1713 .endm
@@ -1997,6 +2010,47 @@ ftrace_stub:
1997ENDPROC(return_to_handler) 2010ENDPROC(return_to_handler)
1998#endif /* CONFIG_FUNCTION_TRACER */ 2011#endif /* CONFIG_FUNCTION_TRACER */
1999 2012
2013#ifdef CONFIG_IRQSTACKS
2014/* void call_on_stack(unsigned long param1, void *func,
2015 unsigned long new_stack) */
2016ENTRY(call_on_stack)
2017 copy %sp, %r1
2018
2019 /* Regarding the HPPA calling conventions for function pointers,
2020 we assume the PIC register is not changed across call. For
2021 CONFIG_64BIT, the argument pointer is left to point at the
2022 argument region allocated for the call to call_on_stack. */
2023# ifdef CONFIG_64BIT
2024 /* Switch to new stack. We allocate two 128 byte frames. */
2025 ldo 256(%arg2), %sp
2026 /* Save previous stack pointer and return pointer in frame marker */
2027 STREG %rp, -144(%sp)
2028 /* Calls always use function descriptor */
2029 LDREG 16(%arg1), %arg1
2030 bve,l (%arg1), %rp
2031 STREG %r1, -136(%sp)
2032 LDREG -144(%sp), %rp
2033 bve (%rp)
2034 LDREG -136(%sp), %sp
2035# else
2036 /* Switch to new stack. We allocate two 64 byte frames. */
2037 ldo 128(%arg2), %sp
2038 /* Save previous stack pointer and return pointer in frame marker */
2039 STREG %r1, -68(%sp)
2040 STREG %rp, -84(%sp)
2041 /* Calls use function descriptor if PLABEL bit is set */
2042 bb,>=,n %arg1, 30, 1f
2043 depwi 0,31,2, %arg1
2044 LDREG 0(%arg1), %arg1
20451:
2046 be,l 0(%sr4,%arg1), %sr0, %r31
2047 copy %r31, %rp
2048 LDREG -84(%sp), %rp
2049 bv (%rp)
2050 LDREG -68(%sp), %sp
2051# endif /* CONFIG_64BIT */
2052ENDPROC(call_on_stack)
2053#endif /* CONFIG_IRQSTACKS */
2000 2054
2001get_register: 2055get_register:
2002 /* 2056 /*
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 5595a2f31181..e158b6fbf1b4 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -55,13 +55,13 @@
55 * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc. 55 * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
56 */ 56 */
57 57
58 .align PAGE_SIZE 58 .align 4096
59hpmc_stack: 59hpmc_stack:
60 .block 16384 60 .block 16384
61 61
62#define HPMC_IODC_BUF_SIZE 0x8000 62#define HPMC_IODC_BUF_SIZE 0x8000
63 63
64 .align PAGE_SIZE 64 .align 4096
65hpmc_iodc_buf: 65hpmc_iodc_buf:
66 .block HPMC_IODC_BUF_SIZE 66 .block HPMC_IODC_BUF_SIZE
67 67
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 8094d3ed3b64..e255db0bb761 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -152,6 +152,39 @@ static struct irq_chip cpu_interrupt_type = {
152 .irq_retrigger = NULL, 152 .irq_retrigger = NULL,
153}; 153};
154 154
155DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
156#define irq_stats(x) (&per_cpu(irq_stat, x))
157
158/*
159 * /proc/interrupts printing for arch specific interrupts
160 */
161int arch_show_interrupts(struct seq_file *p, int prec)
162{
163 int j;
164
165#ifdef CONFIG_DEBUG_STACKOVERFLOW
166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n");
170#endif
171#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
179 seq_printf(p, " Function call interrupts\n");
180#endif
181 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n");
185 return 0;
186}
187
155int show_interrupts(struct seq_file *p, void *v) 188int show_interrupts(struct seq_file *p, void *v)
156{ 189{
157 int i = *(loff_t *) v, j; 190 int i = *(loff_t *) v, j;
@@ -219,6 +252,9 @@ int show_interrupts(struct seq_file *p, void *v)
219 raw_spin_unlock_irqrestore(&desc->lock, flags); 252 raw_spin_unlock_irqrestore(&desc->lock, flags);
220 } 253 }
221 254
255 if (i == NR_IRQS)
256 arch_show_interrupts(p, 3);
257
222 return 0; 258 return 0;
223} 259}
224 260
@@ -330,6 +366,66 @@ static inline int eirr_to_irq(unsigned long eirr)
330 return (BITS_PER_LONG - bit) + TIMER_IRQ; 366 return (BITS_PER_LONG - bit) + TIMER_IRQ;
331} 367}
332 368
369int sysctl_panic_on_stackoverflow = 1;
370
371static inline void stack_overflow_check(struct pt_regs *regs)
372{
373#ifdef CONFIG_DEBUG_STACKOVERFLOW
374 #define STACK_MARGIN (256*6)
375
376 /* Our stack starts directly behind the thread_info struct. */
377 unsigned long stack_start = (unsigned long) current_thread_info();
378 unsigned long sp = regs->gr[30];
379 unsigned long stack_usage;
380 unsigned int *last_usage;
381
382 /* if sr7 != 0, we interrupted a userspace process which we do not want
383 * to check for stack overflow. We will only check the kernel stack. */
384 if (regs->sr[7])
385 return;
386
387 /* calculate kernel stack usage */
388 stack_usage = sp - stack_start;
389 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
390
391 if (unlikely(stack_usage > *last_usage))
392 *last_usage = stack_usage;
393
394 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
395 return;
396
397 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
398 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
399 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
400
401 if (sysctl_panic_on_stackoverflow)
402 panic("low stack detected by irq handler - check messages\n");
403#endif
404}
405
406#ifdef CONFIG_IRQSTACKS
407DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
408
409static void execute_on_irq_stack(void *func, unsigned long param1)
410{
411 unsigned long *irq_stack_start;
412 unsigned long irq_stack;
413 int cpu = smp_processor_id();
414
415 irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
416 irq_stack = (unsigned long) irq_stack_start;
417 irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
418
419 BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
420 *irq_stack_start = 1;
421
422 /* This is where we switch to the IRQ stack. */
423 call_on_stack(param1, func, irq_stack);
424
425 *irq_stack_start = 0;
426}
427#endif /* CONFIG_IRQSTACKS */
428
333/* ONLY called from entry.S:intr_extint() */ 429/* ONLY called from entry.S:intr_extint() */
334void do_cpu_irq_mask(struct pt_regs *regs) 430void do_cpu_irq_mask(struct pt_regs *regs)
335{ 431{
@@ -364,7 +460,13 @@ void do_cpu_irq_mask(struct pt_regs *regs)
364 goto set_out; 460 goto set_out;
365 } 461 }
366#endif 462#endif
463 stack_overflow_check(regs);
464
465#ifdef CONFIG_IRQSTACKS
466 execute_on_irq_stack(&generic_handle_irq, irq);
467#else
367 generic_handle_irq(irq); 468 generic_handle_irq(irq);
469#endif /* CONFIG_IRQSTACKS */
368 470
369 out: 471 out:
370 irq_exit(); 472 irq_exit();
@@ -420,6 +522,4 @@ void __init init_IRQ(void)
420 cpu_eiem = EIEM_MASK(TIMER_IRQ); 522 cpu_eiem = EIEM_MASK(TIMER_IRQ);
421#endif 523#endif
422 set_eiem(cpu_eiem); /* EIEM : enable all external intr */ 524 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
423
424} 525}
425
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 312b48422a56..5e1de6072be5 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -563,6 +563,15 @@ ENDPROC(copy_page_asm)
563 * %r23 physical page (shifted for tlb insert) of "from" translation 563 * %r23 physical page (shifted for tlb insert) of "from" translation
564 */ 564 */
565 565
566 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
567 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
568 .macro convert_phys_for_tlb_insert20 phys
569 extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
570#if _PAGE_SIZE_ENCODING_DEFAULT
571 depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
572#endif
573 .endm
574
566 /* 575 /*
567 * We can't do this since copy_user_page is used to bring in 576 * We can't do this since copy_user_page is used to bring in
568 * file data that might have instructions. Since the data would 577 * file data that might have instructions. Since the data would
@@ -589,15 +598,14 @@ ENTRY(copy_user_page_asm)
589 sub %r25, %r1, %r23 598 sub %r25, %r1, %r23
590 599
591 ldil L%(TMPALIAS_MAP_START), %r28 600 ldil L%(TMPALIAS_MAP_START), %r28
592 /* FIXME for different page sizes != 4k */
593#ifdef CONFIG_64BIT 601#ifdef CONFIG_64BIT
594#if (TMPALIAS_MAP_START >= 0x80000000) 602#if (TMPALIAS_MAP_START >= 0x80000000)
595 depdi 0, 31,32, %r28 /* clear any sign extension */ 603 depdi 0, 31,32, %r28 /* clear any sign extension */
596#endif 604#endif
597 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 605 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
598 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ 606 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
599 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 607 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
600 depdi 0, 63,12, %r28 /* Clear any offset bits */ 608 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
601 copy %r28, %r29 609 copy %r28, %r29
602 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ 610 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
603#else 611#else
@@ -747,11 +755,10 @@ ENTRY(clear_user_page_asm)
747#ifdef CONFIG_64BIT 755#ifdef CONFIG_64BIT
748#if (TMPALIAS_MAP_START >= 0x80000000) 756#if (TMPALIAS_MAP_START >= 0x80000000)
749 depdi 0, 31,32, %r28 /* clear any sign extension */ 757 depdi 0, 31,32, %r28 /* clear any sign extension */
750 /* FIXME: page size dependend */
751#endif 758#endif
752 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 759 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
753 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 760 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
754 depdi 0, 63,12, %r28 /* Clear any offset bits */ 761 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
755#else 762#else
756 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 763 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
757 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 764 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
@@ -832,11 +839,10 @@ ENTRY(flush_dcache_page_asm)
832#ifdef CONFIG_64BIT 839#ifdef CONFIG_64BIT
833#if (TMPALIAS_MAP_START >= 0x80000000) 840#if (TMPALIAS_MAP_START >= 0x80000000)
834 depdi 0, 31,32, %r28 /* clear any sign extension */ 841 depdi 0, 31,32, %r28 /* clear any sign extension */
835 /* FIXME: page size dependend */
836#endif 842#endif
837 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 843 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
838 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 844 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
839 depdi 0, 63,12, %r28 /* Clear any offset bits */ 845 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
840#else 846#else
841 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 847 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
842 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 848 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
@@ -909,11 +915,10 @@ ENTRY(flush_icache_page_asm)
909#ifdef CONFIG_64BIT 915#ifdef CONFIG_64BIT
910#if (TMPALIAS_MAP_START >= 0x80000000) 916#if (TMPALIAS_MAP_START >= 0x80000000)
911 depdi 0, 31,32, %r28 /* clear any sign extension */ 917 depdi 0, 31,32, %r28 /* clear any sign extension */
912 /* FIXME: page size dependend */
913#endif 918#endif
914 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 919 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
915 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 920 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
916 depdi 0, 63,12, %r28 /* Clear any offset bits */ 921 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
917#else 922#else
918 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 923 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
919 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 924 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
@@ -959,7 +964,7 @@ ENTRY(flush_icache_page_asm)
959 fic,m %r1(%sr4,%r28) 964 fic,m %r1(%sr4,%r28)
960 fic,m %r1(%sr4,%r28) 965 fic,m %r1(%sr4,%r28)
961 fic,m %r1(%sr4,%r28) 966 fic,m %r1(%sr4,%r28)
962 cmpb,COND(<<) %r28, %r25,1b 967 cmpb,COND(<<) %r28, %r25,1b
963 fic,m %r1(%sr4,%r28) 968 fic,m %r1(%sr4,%r28)
964 969
965 sync 970 sync
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index a3328c2616b0..76b63e726a53 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -129,6 +129,8 @@ void __init setup_arch(char **cmdline_p)
129 printk(KERN_INFO "The 32-bit Kernel has started...\n"); 129 printk(KERN_INFO "The 32-bit Kernel has started...\n");
130#endif 130#endif
131 131
132 printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
133
132 pdc_console_init(); 134 pdc_console_init();
133 135
134#ifdef CONFIG_64BIT 136#ifdef CONFIG_64BIT
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index fd1bb1519c2b..e3614fb343e5 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -127,7 +127,7 @@ ipi_interrupt(int irq, void *dev_id)
127 unsigned long flags; 127 unsigned long flags;
128 128
129 /* Count this now; we may make a call that never returns. */ 129 /* Count this now; we may make a call that never returns. */
130 p->ipi_count++; 130 inc_irq_stat(irq_call_count);
131 131
132 mb(); /* Order interrupt and bit testing. */ 132 mb(); /* Order interrupt and bit testing. */
133 133
@@ -155,6 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
155 155
156 case IPI_RESCHEDULE: 156 case IPI_RESCHEDULE:
157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); 157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
158 inc_irq_stat(irq_resched_count);
158 scheduler_ipi(); 159 scheduler_ipi();
159 break; 160 break;
160 161
@@ -263,17 +264,6 @@ void arch_send_call_function_single_ipi(int cpu)
263} 264}
264 265
265/* 266/*
266 * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
267 * as we want to ensure all TLB's flushed before proceeding.
268 */
269
270void
271smp_flush_tlb_all(void)
272{
273 on_each_cpu(flush_tlb_all_local, NULL, 1);
274}
275
276/*
277 * Called by secondaries to update state and initialize CPU registers. 267 * Called by secondaries to update state and initialize CPU registers.
278 */ 268 */
279static void __init 269static void __init
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 5e055240f00b..e767ab733e32 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -1,12 +1,35 @@
1/* 1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 * 3 *
4 * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai> 4 * System call entry code / Linux gateway page
5 * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
5 * Licensed under the GNU GPL. 6 * Licensed under the GNU GPL.
6 * thanks to Philipp Rumpf, Mike Shaver and various others 7 * thanks to Philipp Rumpf, Mike Shaver and various others
7 * sorry about the wall, puffin.. 8 * sorry about the wall, puffin..
8 */ 9 */
9 10
11/*
12How does the Linux gateway page on PA-RISC work?
13------------------------------------------------
14The Linux gateway page on PA-RISC is "special".
15It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
16terminology it's Execute, promote to PL0) in the page map. So anything
17executing on this page executes with kernel level privilege (there's more to it
18than that: to have this happen, you also have to use a branch with a ,gate
19completer to activate the privilege promotion). The upshot is that everything
20that runs on the gateway page runs at kernel privilege but with the current
21user process address space (although you have access to kernel space via %sr2).
22For the 0x100 syscall entry, we redo the space registers to point to the kernel
23address space (preserving the user address space in %sr3), move to wide mode if
24required, save the user registers and branch into the kernel syscall entry
25point. For all the other functions, we execute at kernel privilege but don't
26flip address spaces. The basic upshot of this is that these code snippets are
27executed atomically (because the kernel can't be pre-empted) and they may
28perform architecturally forbidden (to PL3) operations (like setting control
29registers).
30*/
31
32
10#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
11#include <asm/unistd.h> 34#include <asm/unistd.h>
12#include <asm/errno.h> 35#include <asm/errno.h>
@@ -15,6 +38,7 @@
15#include <asm/thread_info.h> 38#include <asm/thread_info.h>
16#include <asm/assembly.h> 39#include <asm/assembly.h>
17#include <asm/processor.h> 40#include <asm/processor.h>
41#include <asm/cache.h>
18 42
19#include <linux/linkage.h> 43#include <linux/linkage.h>
20 44
@@ -643,7 +667,7 @@ ENTRY(end_linux_gateway_page)
643 667
644 .section .rodata,"a" 668 .section .rodata,"a"
645 669
646 .align PAGE_SIZE 670 .align 8
647 /* Light-weight-syscall table */ 671 /* Light-weight-syscall table */
648 /* Start of lws table. */ 672 /* Start of lws table. */
649ENTRY(lws_table) 673ENTRY(lws_table)
@@ -652,13 +676,13 @@ ENTRY(lws_table)
652END(lws_table) 676END(lws_table)
653 /* End of lws table */ 677 /* End of lws table */
654 678
655 .align PAGE_SIZE 679 .align 8
656ENTRY(sys_call_table) 680ENTRY(sys_call_table)
657#include "syscall_table.S" 681#include "syscall_table.S"
658END(sys_call_table) 682END(sys_call_table)
659 683
660#ifdef CONFIG_64BIT 684#ifdef CONFIG_64BIT
661 .align PAGE_SIZE 685 .align 8
662ENTRY(sys_call_table64) 686ENTRY(sys_call_table64)
663#define SYSCALL_TABLE_64BIT 687#define SYSCALL_TABLE_64BIT
664#include "syscall_table.S" 688#include "syscall_table.S"
@@ -674,7 +698,7 @@ END(sys_call_table64)
674 with ldcw. 698 with ldcw.
675 */ 699 */
676 .section .data 700 .section .data
677 .align PAGE_SIZE 701 .align L1_CACHE_BYTES
678ENTRY(lws_lock_start) 702ENTRY(lws_lock_start)
679 /* lws locks */ 703 /* lws locks */
680 .rept 16 704 .rept 16
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f702bff0bed9..fe41a98043bb 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -522,10 +522,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
522 */ 522 */
523 if (((unsigned long)regs->iaoq[0] & 3) && 523 if (((unsigned long)regs->iaoq[0] & 3) &&
524 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 524 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
525 /* Kill the user process later */ 525 /* Kill the user process later */
526 regs->iaoq[0] = 0 | 3; 526 regs->iaoq[0] = 0 | 3;
527 regs->iaoq[1] = regs->iaoq[0] + 4; 527 regs->iaoq[1] = regs->iaoq[0] + 4;
528 regs->iasq[0] = regs->iasq[1] = regs->sr[7]; 528 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
529 regs->gr[0] &= ~PSW_B; 529 regs->gr[0] &= ~PSW_B;
530 return; 530 return;
531 } 531 }
@@ -541,8 +541,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
541 541
542 /* set up a new led state on systems shipped with a LED State panel */ 542 /* set up a new led state on systems shipped with a LED State panel */
543 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); 543 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
544 544
545 parisc_terminate("High Priority Machine Check (HPMC)", 545 parisc_terminate("High Priority Machine Check (HPMC)",
546 regs, code, 0); 546 regs, code, 0);
547 /* NOT REACHED */ 547 /* NOT REACHED */
548 548
@@ -584,13 +584,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
584 /* Break instruction trap */ 584 /* Break instruction trap */
585 handle_break(regs); 585 handle_break(regs);
586 return; 586 return;
587 587
588 case 10: 588 case 10:
589 /* Privileged operation trap */ 589 /* Privileged operation trap */
590 die_if_kernel("Privileged operation", regs, code); 590 die_if_kernel("Privileged operation", regs, code);
591 si.si_code = ILL_PRVOPC; 591 si.si_code = ILL_PRVOPC;
592 goto give_sigill; 592 goto give_sigill;
593 593
594 case 11: 594 case 11:
595 /* Privileged register trap */ 595 /* Privileged register trap */
596 if ((regs->iir & 0xffdfffe0) == 0x034008a0) { 596 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
@@ -634,7 +634,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
634 if(user_mode(regs)){ 634 if(user_mode(regs)){
635 si.si_signo = SIGFPE; 635 si.si_signo = SIGFPE;
636 /* Set to zero, and let the userspace app figure it out from 636 /* Set to zero, and let the userspace app figure it out from
637 the insn pointed to by si_addr */ 637 the insn pointed to by si_addr */
638 si.si_code = 0; 638 si.si_code = 0;
639 si.si_addr = (void __user *) regs->iaoq[0]; 639 si.si_addr = (void __user *) regs->iaoq[0];
640 force_sig_info(SIGFPE, &si, current); 640 force_sig_info(SIGFPE, &si, current);
@@ -648,7 +648,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
648 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 648 die_if_kernel("Floating point exception", regs, 0); /* quiet */
649 handle_fpe(regs); 649 handle_fpe(regs);
650 return; 650 return;
651 651
652 case 15: 652 case 15:
653 /* Data TLB miss fault/Data page fault */ 653 /* Data TLB miss fault/Data page fault */
654 /* Fall through */ 654 /* Fall through */
@@ -660,15 +660,15 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
660 case 17: 660 case 17:
661 /* Non-access data TLB miss fault/Non-access data page fault */ 661 /* Non-access data TLB miss fault/Non-access data page fault */
662 /* FIXME: 662 /* FIXME:
663 Still need to add slow path emulation code here! 663 Still need to add slow path emulation code here!
664 If the insn used a non-shadow register, then the tlb 664 If the insn used a non-shadow register, then the tlb
665 handlers could not have their side-effect (e.g. probe 665 handlers could not have their side-effect (e.g. probe
666 writing to a target register) emulated since rfir would 666 writing to a target register) emulated since rfir would
667 erase the changes to said register. Instead we have to 667 erase the changes to said register. Instead we have to
668 setup everything, call this function we are in, and emulate 668 setup everything, call this function we are in, and emulate
669 by hand. Technically we need to emulate: 669 by hand. Technically we need to emulate:
670 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw 670 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
671 */ 671 */
672 fault_address = regs->ior; 672 fault_address = regs->ior;
673 fault_space = regs->isr; 673 fault_space = regs->isr;
674 break; 674 break;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 64a999882e4f..4bb095a2f6fc 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -95,7 +95,7 @@ SECTIONS
95 NOTES 95 NOTES
96 96
97 /* Data */ 97 /* Data */
98 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 98 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
99 99
100 /* PA-RISC locks requires 16-byte alignment */ 100 /* PA-RISC locks requires 16-byte alignment */
101 . = ALIGN(16); 101 . = ALIGN(16);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 157b931e7b09..ce939ac8622b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -1069,6 +1069,7 @@ void flush_tlb_all(void)
1069{ 1069{
1070 int do_recycle; 1070 int do_recycle;
1071 1071
1072 inc_irq_stat(irq_tlb_count);
1072 do_recycle = 0; 1073 do_recycle = 0;
1073 spin_lock(&sid_lock); 1074 spin_lock(&sid_lock);
1074 if (dirty_space_ids > RECYCLE_THRESHOLD) { 1075 if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1089,6 +1090,7 @@ void flush_tlb_all(void)
1089#else 1090#else
1090void flush_tlb_all(void) 1091void flush_tlb_all(void)
1091{ 1092{
1093 inc_irq_stat(irq_tlb_count);
1092 spin_lock(&sid_lock); 1094 spin_lock(&sid_lock);
1093 flush_tlb_all_local(NULL); 1095 flush_tlb_all_local(NULL);
1094 recycle_sids(); 1096 recycle_sids();
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 42cfcd9eb9aa..1ff1b67e8b27 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -575,7 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
575 575
576 mtsp(sid,1); 576 mtsp(sid,1);
577 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 577 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
578 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 578 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
579 579
580 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 580 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
581 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 581 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
@@ -1376,7 +1376,7 @@ static void
1376sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1376sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1377{ 1377{
1378 u32 iova_space_size, iova_space_mask; 1378 u32 iova_space_size, iova_space_mask;
1379 unsigned int pdir_size, iov_order; 1379 unsigned int pdir_size, iov_order, tcnfg;
1380 1380
1381 /* 1381 /*
1382 ** Determine IOVA Space size from memory size. 1382 ** Determine IOVA Space size from memory size.
@@ -1468,8 +1468,19 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1468 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); 1468 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1469 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1469 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1470 1470
1471 /* Set I/O PDIR Page size to 4K */ 1471 /* Set I/O PDIR Page size to system page size */
1472 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG); 1472 switch (PAGE_SHIFT) {
1473 case 12: tcnfg = 0; break; /* 4K */
1474 case 13: tcnfg = 1; break; /* 8K */
1475 case 14: tcnfg = 2; break; /* 16K */
1476 case 16: tcnfg = 3; break; /* 64K */
1477 default:
1478 panic(__FILE__ "Unsupported system page size %d",
1479 1 << PAGE_SHIFT);
1480 break;
1481 }
1482 /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */
1483 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1473 1484
1474 /* 1485 /*
1475 ** Clear I/O TLB of any possible entries. 1486 ** Clear I/O TLB of any possible entries.