aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/sections.h3
-rw-r--r--arch/ia64/kernel/head.S9
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/mips/Kconfig53
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
-rw-r--r--arch/mips/kernel/cevt-smtc.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c10
-rw-r--r--arch/mips/kernel/entry.S10
-rw-r--r--arch/mips/kernel/genex.S4
-rw-r--r--arch/mips/kernel/head.S1
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/smtc.c260
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c9
-rw-r--r--arch/mips/sibyte/swarm/Makefile3
-rw-r--r--arch/mips/sibyte/swarm/platform.c85
-rw-r--r--arch/mn10300/kernel/irq.c71
-rw-r--r--arch/mn10300/unit-asb2303/unit-init.c2
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c2
-rw-r--r--arch/powerpc/boot/dts/holly.dts106
-rw-r--r--arch/powerpc/kernel/idle.c6
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c12
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/lib/delay.c88
-rw-r--r--arch/x86/boot/compressed/relocs.c2
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/kgdb.c7
-rw-r--r--arch/x86/kernel/pci-gart_64.c20
-rw-r--r--arch/x86/kernel/vmi_32.c2
36 files changed, 866 insertions, 457 deletions
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
index f66799891036..1a873b36a4a1 100644
--- a/arch/ia64/include/asm/sections.h
+++ b/arch/ia64/include/asm/sections.h
@@ -11,6 +11,9 @@
11#include <asm-generic/sections.h> 11#include <asm-generic/sections.h>
12 12
13extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; 13extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
14#ifdef CONFIG_SMP
15extern char __cpu0_per_cpu[];
16#endif
14extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; 17extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
15extern char __start___rse_patchlist[], __end___rse_patchlist[]; 18extern char __start___rse_patchlist[], __end___rse_patchlist[];
16extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; 19extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 8bdea8eb62e3..66e491d8baac 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -367,16 +367,17 @@ start_ap:
367 ;; 367 ;;
368#else 368#else
369(isAP) br.few 2f 369(isAP) br.few 2f
370 mov r20=r19 370 movl r20=__cpu0_per_cpu
371 sub r19=r19,r18
372 ;; 371 ;;
373 shr.u r18=r18,3 372 shr.u r18=r18,3
3741: 3731:
375 ld8 r21=[r20],8;; 374 ld8 r21=[r19],8;;
376 st8[r19]=r21,8 375 st8[r20]=r21,8
377 adds r18=-1,r18;; 376 adds r18=-1,r18;;
378 cmp4.lt p7,p6=0,r18 377 cmp4.lt p7,p6=0,r18
379(p7) br.cond.dptk.few 1b 378(p7) br.cond.dptk.few 1b
379 mov r19=r20
380 ;;
3802: 3812:
381#endif 382#endif
382 tpa r19=r19 383 tpa r19=r19
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index de71da811cd6..10a7d47e8510 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -215,9 +215,6 @@ SECTIONS
215 /* Per-cpu data: */ 215 /* Per-cpu data: */
216 percpu : { } :percpu 216 percpu : { } :percpu
217 . = ALIGN(PERCPU_PAGE_SIZE); 217 . = ALIGN(PERCPU_PAGE_SIZE);
218#ifdef CONFIG_SMP
219 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
220#endif
221 __phys_per_cpu_start = .; 218 __phys_per_cpu_start = .;
222 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) 219 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
223 { 220 {
@@ -233,6 +230,11 @@ SECTIONS
233 data : { } :data 230 data : { } :data
234 .data : AT(ADDR(.data) - LOAD_OFFSET) 231 .data : AT(ADDR(.data) - LOAD_OFFSET)
235 { 232 {
233#ifdef CONFIG_SMP
234 . = ALIGN(PERCPU_PAGE_SIZE);
235 __cpu0_per_cpu = .;
236 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
237#endif
236 DATA_DATA 238 DATA_DATA
237 *(.data1) 239 *(.data1)
238 *(.gnu.linkonce.d*) 240 *(.gnu.linkonce.d*)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e566ff43884a..0ee085efbe29 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,7 +163,7 @@ per_cpu_init (void)
163 * get_zeroed_page(). 163 * get_zeroed_page().
164 */ 164 */
165 if (first_time) { 165 if (first_time) {
166 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; 166 void *cpu0_data = __cpu0_per_cpu;
167 167
168 first_time=0; 168 first_time=0;
169 169
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 78026aabaa7f..d8c5fcd89e5b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
144 144
145 for_each_possible_early_cpu(cpu) { 145 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 146 if (cpu == 0) {
147 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; 147 void *cpu0_data = __cpu0_per_cpu;
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 148 __per_cpu_offset[cpu] = (char*)cpu0_data -
149 __per_cpu_start; 149 __per_cpu_start;
150 } else if (node == node_cpuid[cpu].nid) { 150 } else if (node == node_cpuid[cpu].nid) {
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 49896a2a1d72..1e06d233fa83 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -211,6 +211,7 @@ config MIPS_MALTA
211 select SYS_SUPPORTS_64BIT_KERNEL 211 select SYS_SUPPORTS_64BIT_KERNEL
212 select SYS_SUPPORTS_BIG_ENDIAN 212 select SYS_SUPPORTS_BIG_ENDIAN
213 select SYS_SUPPORTS_LITTLE_ENDIAN 213 select SYS_SUPPORTS_LITTLE_ENDIAN
214 select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken
214 select SYS_SUPPORTS_MULTITHREADING 215 select SYS_SUPPORTS_MULTITHREADING
215 select SYS_SUPPORTS_SMARTMIPS 216 select SYS_SUPPORTS_SMARTMIPS
216 help 217 help
@@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC
1403 depends on CPU_MIPS32_R2 1404 depends on CPU_MIPS32_R2
1404 #depends on CPU_MIPS64_R2 # once there is hardware ... 1405 #depends on CPU_MIPS64_R2 # once there is hardware ...
1405 depends on SYS_SUPPORTS_MULTITHREADING 1406 depends on SYS_SUPPORTS_MULTITHREADING
1406 select GENERIC_CLOCKEVENTS_BROADCAST
1407 select CPU_MIPSR2_IRQ_VI 1407 select CPU_MIPSR2_IRQ_VI
1408 select CPU_MIPSR2_IRQ_EI 1408 select CPU_MIPSR2_IRQ_EI
1409 select MIPS_MT 1409 select MIPS_MT
@@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER
1451 Includes a loader for loading an elf relocatable object 1451 Includes a loader for loading an elf relocatable object
1452 onto another VPE and running it. 1452 onto another VPE and running it.
1453 1453
1454config MIPS_MT_SMTC_INSTANT_REPLAY
1455 bool "Low-latency Dispatch of Deferred SMTC IPIs"
1456 depends on MIPS_MT_SMTC && !PREEMPT
1457 default y
1458 help
1459 SMTC pseudo-interrupts between TCs are deferred and queued
1460 if the target TC is interrupt-inhibited (IXMT). In the first
1461 SMTC prototypes, these queued IPIs were serviced on return
1462 to user mode, or on entry into the kernel idle loop. The
1463 INSTANT_REPLAY option dispatches them as part of local_irq_restore()
1464 processing, which adds runtime overhead (hence the option to turn
1465 it off), but ensures that IPIs are handled promptly even under
1466 heavy I/O interrupt load.
1467
1468config MIPS_MT_SMTC_IM_BACKSTOP 1454config MIPS_MT_SMTC_IM_BACKSTOP
1469 bool "Use per-TC register bits as backstop for inhibited IM bits" 1455 bool "Use per-TC register bits as backstop for inhibited IM bits"
1470 depends on MIPS_MT_SMTC 1456 depends on MIPS_MT_SMTC
1471 default y 1457 default n
1472 help 1458 help
1473 To support multiple TC microthreads acting as "CPUs" within 1459 To support multiple TC microthreads acting as "CPUs" within
1474 a VPE, VPE-wide interrupt mask bits must be specially manipulated 1460 a VPE, VPE-wide interrupt mask bits must be specially manipulated
1475 during interrupt handling. To support legacy drivers and interrupt 1461 during interrupt handling. To support legacy drivers and interrupt
1476 controller management code, SMTC has a "backstop" to track and 1462 controller management code, SMTC has a "backstop" to track and
1477 if necessary restore the interrupt mask. This has some performance 1463 if necessary restore the interrupt mask. This has some performance
1478 impact on interrupt service overhead. Disable it only if you know 1464 impact on interrupt service overhead.
1479 what you are doing.
1480 1465
1481config MIPS_MT_SMTC_IRQAFF 1466config MIPS_MT_SMTC_IRQAFF
1482 bool "Support IRQ affinity API" 1467 bool "Support IRQ affinity API"
@@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF
1486 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 1471 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
1487 for SMTC Linux kernel. Requires platform support, of which 1472 for SMTC Linux kernel. Requires platform support, of which
1488 an example can be found in the MIPS kernel i8259 and Malta 1473 an example can be found in the MIPS kernel i8259 and Malta
1489 platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY 1474 platform code. Adds some overhead to interrupt dispatch, and
1490 be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to 1475 should be used only if you know what you are doing.
1491 interrupt dispatch, and should be used only if you know what
1492 you are doing.
1493 1476
1494config MIPS_VPE_LOADER_TOM 1477config MIPS_VPE_LOADER_TOM
1495 bool "Load VPE program into memory hidden from linux" 1478 bool "Load VPE program into memory hidden from linux"
@@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD
1517 "exit" syscall notifying other kernel modules the SP program is 1500 "exit" syscall notifying other kernel modules the SP program is
1518 exiting. You probably want to say yes here. 1501 exiting. You probably want to say yes here.
1519 1502
1503config MIPS_CMP
1504 bool "MIPS CMP framework support"
1505 depends on SYS_SUPPORTS_MIPS_CMP
1506 select SYNC_R4K if BROKEN
1507 select SYS_SUPPORTS_SMP
1508 select SYS_SUPPORTS_SCHED_SMT if SMP
1509 select WEAK_ORDERING
1510 default n
1511 help
1512 This is a placeholder option for the GCMP work. It will need to
1513 be handled differently...
1514
1520config SB1_PASS_1_WORKAROUNDS 1515config SB1_PASS_1_WORKAROUNDS
1521 bool 1516 bool
1522 depends on CPU_SB1_PASS_1 1517 depends on CPU_SB1_PASS_1
@@ -1693,6 +1688,9 @@ config SMP
1693config SMP_UP 1688config SMP_UP
1694 bool 1689 bool
1695 1690
1691config SYS_SUPPORTS_MIPS_CMP
1692 bool
1693
1696config SYS_SUPPORTS_SMP 1694config SYS_SUPPORTS_SMP
1697 bool 1695 bool
1698 1696
@@ -1740,17 +1738,6 @@ config NR_CPUS
1740 performance should round up your number of processors to the next 1738 performance should round up your number of processors to the next
1741 power of two. 1739 power of two.
1742 1740
1743config MIPS_CMP
1744 bool "MIPS CMP framework support"
1745 depends on SMP
1746 select SYNC_R4K
1747 select SYS_SUPPORTS_SCHED_SMT
1748 select WEAK_ORDERING
1749 default n
1750 help
1751 This is a placeholder option for the GCMP work. It will need to
1752 be handled differently...
1753
1754source "kernel/time/Kconfig" 1741source "kernel/time/Kconfig"
1755 1742
1756# 1743#
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 706f93974797..25775cb54000 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
13obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
13obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 14obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
14obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 15obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
15obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 16obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0d..4a4c59f2737a 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
12 12
13#include <asm/smtc_ipi.h> 13#include <asm/smtc_ipi.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/cevt-r4k.h>
16
17/*
18 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
19 * of these routines with SMTC-specific variants.
20 */
21
22#ifndef CONFIG_MIPS_MT_SMTC
15 23
16static int mips_next_event(unsigned long delta, 24static int mips_next_event(unsigned long delta,
17 struct clock_event_device *evt) 25 struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
19 unsigned int cnt; 27 unsigned int cnt;
20 int res; 28 int res;
21 29
22#ifdef CONFIG_MIPS_MT_SMTC
23 {
24 unsigned long flags, vpflags;
25 local_irq_save(flags);
26 vpflags = dvpe();
27#endif
28 cnt = read_c0_count(); 30 cnt = read_c0_count();
29 cnt += delta; 31 cnt += delta;
30 write_c0_compare(cnt); 32 write_c0_compare(cnt);
31 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 33 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
32#ifdef CONFIG_MIPS_MT_SMTC
33 evpe(vpflags);
34 local_irq_restore(flags);
35 }
36#endif
37 return res; 34 return res;
38} 35}
39 36
40static void mips_set_mode(enum clock_event_mode mode, 37#endif /* CONFIG_MIPS_MT_SMTC */
41 struct clock_event_device *evt) 38
39void mips_set_clock_mode(enum clock_event_mode mode,
40 struct clock_event_device *evt)
42{ 41{
43 /* Nothing to do ... */ 42 /* Nothing to do ... */
44} 43}
45 44
46static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 45DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
47static int cp0_timer_irq_installed; 46int cp0_timer_irq_installed;
48 47
49/* 48#ifndef CONFIG_MIPS_MT_SMTC
50 * Timer ack for an R4k-compatible timer of a known frequency.
51 */
52static void c0_timer_ack(void)
53{
54 write_c0_compare(read_c0_compare());
55}
56 49
57/* 50irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
58 * Possibly handle a performance counter interrupt.
59 * Return true if the timer interrupt should not be checked
60 */
61static inline int handle_perf_irq(int r2)
62{
63 /*
64 * The performance counter overflow interrupt may be shared with the
65 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
66 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
67 * and we can't reliably determine if a counter interrupt has also
68 * happened (!r2) then don't check for a timer interrupt.
69 */
70 return (cp0_perfcount_irq < 0) &&
71 perf_irq() == IRQ_HANDLED &&
72 !r2;
73}
74
75static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
76{ 51{
77 const int r2 = cpu_has_mips_r2; 52 const int r2 = cpu_has_mips_r2;
78 struct clock_event_device *cd; 53 struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
93 * interrupt. Being the paranoiacs we are we check anyway. 68 * interrupt. Being the paranoiacs we are we check anyway.
94 */ 69 */
95 if (!r2 || (read_c0_cause() & (1 << 30))) { 70 if (!r2 || (read_c0_cause() & (1 << 30))) {
96 c0_timer_ack(); 71 /* Clear Count/Compare Interrupt */
97#ifdef CONFIG_MIPS_MT_SMTC 72 write_c0_compare(read_c0_compare());
98 if (cpu_data[cpu].vpe_id)
99 goto out;
100 cpu = 0;
101#endif
102 cd = &per_cpu(mips_clockevent_device, cpu); 73 cd = &per_cpu(mips_clockevent_device, cpu);
103 cd->event_handler(cd); 74 cd->event_handler(cd);
104 } 75 }
@@ -107,65 +78,16 @@ out:
107 return IRQ_HANDLED; 78 return IRQ_HANDLED;
108} 79}
109 80
110static struct irqaction c0_compare_irqaction = { 81#endif /* Not CONFIG_MIPS_MT_SMTC */
82
83struct irqaction c0_compare_irqaction = {
111 .handler = c0_compare_interrupt, 84 .handler = c0_compare_interrupt,
112#ifdef CONFIG_MIPS_MT_SMTC
113 .flags = IRQF_DISABLED,
114#else
115 .flags = IRQF_DISABLED | IRQF_PERCPU, 85 .flags = IRQF_DISABLED | IRQF_PERCPU,
116#endif
117 .name = "timer", 86 .name = "timer",
118}; 87};
119 88
120#ifdef CONFIG_MIPS_MT_SMTC
121DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
122
123static void smtc_set_mode(enum clock_event_mode mode,
124 struct clock_event_device *evt)
125{
126}
127
128static void mips_broadcast(cpumask_t mask)
129{
130 unsigned int cpu;
131
132 for_each_cpu_mask(cpu, mask)
133 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
134}
135
136static void setup_smtc_dummy_clockevent_device(void)
137{
138 //uint64_t mips_freq = mips_hpt_^frequency;
139 unsigned int cpu = smp_processor_id();
140 struct clock_event_device *cd;
141 89
142 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 90void mips_event_handler(struct clock_event_device *dev)
143
144 cd->name = "SMTC";
145 cd->features = CLOCK_EVT_FEAT_DUMMY;
146
147 /* Calculate the min / max delta */
148 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
149 cd->shift = 0; //32;
150 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
151 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
152
153 cd->rating = 200;
154 cd->irq = 17; //-1;
155// if (cpu)
156// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
157// else
158 cd->cpumask = cpumask_of_cpu(cpu);
159
160 cd->set_mode = smtc_set_mode;
161
162 cd->broadcast = mips_broadcast;
163
164 clockevents_register_device(cd);
165}
166#endif
167
168static void mips_event_handler(struct clock_event_device *dev)
169{ 91{
170} 92}
171 93
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
177 return (read_c0_cause() >> cp0_compare_irq) & 0x100; 99 return (read_c0_cause() >> cp0_compare_irq) & 0x100;
178} 100}
179 101
180static int c0_compare_int_usable(void) 102/*
103 * Compare interrupt can be routed and latched outside the core,
104 * so a single execution hazard barrier may not be enough to give
105 * it time to clear as seen in the Cause register. 4 time the
106 * pipeline depth seems reasonably conservative, and empirically
107 * works better in configurations with high CPU/bus clock ratios.
108 */
109
110#define compare_change_hazard() \
111 do { \
112 irq_disable_hazard(); \
113 irq_disable_hazard(); \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 } while (0)
117
118int c0_compare_int_usable(void)
181{ 119{
182 unsigned int delta; 120 unsigned int delta;
183 unsigned int cnt; 121 unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
187 */ 125 */
188 if (c0_compare_int_pending()) { 126 if (c0_compare_int_pending()) {
189 write_c0_compare(read_c0_count()); 127 write_c0_compare(read_c0_count());
190 irq_disable_hazard(); 128 compare_change_hazard();
191 if (c0_compare_int_pending()) 129 if (c0_compare_int_pending())
192 return 0; 130 return 0;
193 } 131 }
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
196 cnt = read_c0_count(); 134 cnt = read_c0_count();
197 cnt += delta; 135 cnt += delta;
198 write_c0_compare(cnt); 136 write_c0_compare(cnt);
199 irq_disable_hazard(); 137 compare_change_hazard();
200 if ((int)(read_c0_count() - cnt) < 0) 138 if ((int)(read_c0_count() - cnt) < 0)
201 break; 139 break;
202 /* increase delta if the timer was already expired */ 140 /* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
205 while ((int)(read_c0_count() - cnt) <= 0) 143 while ((int)(read_c0_count() - cnt) <= 0)
206 ; /* Wait for expiry */ 144 ; /* Wait for expiry */
207 145
146 compare_change_hazard();
208 if (!c0_compare_int_pending()) 147 if (!c0_compare_int_pending())
209 return 0; 148 return 0;
210 149
211 write_c0_compare(read_c0_count()); 150 write_c0_compare(read_c0_count());
212 irq_disable_hazard(); 151 compare_change_hazard();
213 if (c0_compare_int_pending()) 152 if (c0_compare_int_pending())
214 return 0; 153 return 0;
215 154
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
219 return 1; 158 return 1;
220} 159}
221 160
161#ifndef CONFIG_MIPS_MT_SMTC
162
222int __cpuinit mips_clockevent_init(void) 163int __cpuinit mips_clockevent_init(void)
223{ 164{
224 uint64_t mips_freq = mips_hpt_frequency; 165 uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
229 if (!cpu_has_counter || !mips_hpt_frequency) 170 if (!cpu_has_counter || !mips_hpt_frequency)
230 return -ENXIO; 171 return -ENXIO;
231 172
232#ifdef CONFIG_MIPS_MT_SMTC
233 setup_smtc_dummy_clockevent_device();
234
235 /*
236 * On SMTC we only register VPE0's compare interrupt as clockevent
237 * device.
238 */
239 if (cpu)
240 return 0;
241#endif
242
243 if (!c0_compare_int_usable()) 173 if (!c0_compare_int_usable())
244 return -ENXIO; 174 return -ENXIO;
245 175
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
265 195
266 cd->rating = 300; 196 cd->rating = 300;
267 cd->irq = irq; 197 cd->irq = irq;
268#ifdef CONFIG_MIPS_MT_SMTC
269 cd->cpumask = CPU_MASK_ALL;
270#else
271 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of_cpu(cpu);
272#endif
273 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
274 cd->set_mode = mips_set_mode; 200 cd->set_mode = mips_set_clock_mode;
275 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
276 202
277 clockevents_register_device(cd); 203 clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
281 207
282 cp0_timer_irq_installed = 1; 208 cp0_timer_irq_installed = 1;
283 209
284#ifdef CONFIG_MIPS_MT_SMTC
285#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
286 setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
287#else
288 setup_irq(irq, &c0_compare_irqaction); 210 setup_irq(irq, &c0_compare_irqaction);
289#endif
290 211
291 return 0; 212 return 0;
292} 213}
214
215#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
new file mode 100644
index 000000000000..5162fe4b5952
--- /dev/null
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -0,0 +1,321 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10#include <linux/clockchips.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13
14#include <asm/smtc_ipi.h>
15#include <asm/time.h>
16#include <asm/cevt-r4k.h>
17
18/*
19 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
20 * or other MIPS MT cores.
21 *
22 * Notes on SMTC Support:
23 *
24 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
25 * But there's only one Count/Compare pair per VPE, and Compare
26 * interrupts are taken opportunisitically by available TCs
27 * bound to the VPE with the Count register. The new timer
28 * framework provides for global broadcasts, but we really
29 * want VPE-level multicasts for best behavior. So instead
30 * of invoking the high-level clock-event broadcast code,
31 * this version of SMTC support uses the historical SMTC
32 * multicast mechanisms "under the hood", appearing to the
33 * generic clock layer as if the interrupts are per-CPU.
34 *
35 * The approach taken here is to maintain a set of NR_CPUS
36 * virtual timers, and track which "CPU" needs to be alerted
37 * at each event.
38 *
39 * It's unlikely that we'll see a MIPS MT core with more than
40 * 2 VPEs, but we *know* that we won't need to handle more
41 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
42 * is always going to be overkill, but always going to be enough.
43 */
44
45unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
46static int smtc_nextinvpe[NR_CPUS];
47
48/*
49 * Timestamps stored are absolute values to be programmed
50 * into Count register. Valid timestamps will never be zero.
51 * If a Zero Count value is actually calculated, it is converted
52 * to be a 1, which will introduce 1 or two CPU cycles of error
53 * roughly once every four billion events, which at 1000 HZ means
54 * about once every 50 days. If that's actually a problem, one
55 * could alternate squashing 0 to 1 and to -1.
56 */
57
58#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
59#define ISVALID(x) ((x) != 0L)
60
61/*
62 * Time comparison is subtle, as it's really truncated
63 * modular arithmetic.
64 */
65
66#define IS_SOONER(a, b, reference) \
67 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
68
69/*
70 * CATCHUP_INCREMENT, used when the function falls behind the counter.
71 * Could be an increasing function instead of a constant;
72 */
73
74#define CATCHUP_INCREMENT 64
75
76static int mips_next_event(unsigned long delta,
77 struct clock_event_device *evt)
78{
79 unsigned long flags;
80 unsigned int mtflags;
81 unsigned long timestamp, reference, previous;
82 unsigned long nextcomp = 0L;
83 int vpe = current_cpu_data.vpe_id;
84 int cpu = smp_processor_id();
85 local_irq_save(flags);
86 mtflags = dmt();
87
88 /*
89 * Maintain the per-TC virtual timer
90 * and program the per-VPE shared Count register
91 * as appropriate here...
92 */
93 reference = (unsigned long)read_c0_count();
94 timestamp = MAKEVALID(reference + delta);
95 /*
96 * To really model the clock, we have to catch the case
97 * where the current next-in-VPE timestamp is the old
98 * timestamp for the calling CPE, but the new value is
99 * in fact later. In that case, we have to do a full
100 * scan and discover the new next-in-VPE CPU id and
101 * timestamp.
102 */
103 previous = smtc_nexttime[vpe][cpu];
104 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
105 && IS_SOONER(previous, timestamp, reference)) {
106 int i;
107 int soonest = cpu;
108
109 /*
110 * Update timestamp array here, so that new
111 * value gets considered along with those of
112 * other virtual CPUs on the VPE.
113 */
114 smtc_nexttime[vpe][cpu] = timestamp;
115 for_each_online_cpu(i) {
116 if (ISVALID(smtc_nexttime[vpe][i])
117 && IS_SOONER(smtc_nexttime[vpe][i],
118 smtc_nexttime[vpe][soonest], reference)) {
119 soonest = i;
120 }
121 }
122 smtc_nextinvpe[vpe] = soonest;
123 nextcomp = smtc_nexttime[vpe][soonest];
124 /*
125 * Otherwise, we don't have to process the whole array rank,
126 * we just have to see if the event horizon has gotten closer.
127 */
128 } else {
129 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
130 IS_SOONER(timestamp,
131 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
132 smtc_nextinvpe[vpe] = cpu;
133 nextcomp = timestamp;
134 }
135 /*
136 * Since next-in-VPE may me the same as the executing
137 * virtual CPU, we update the array *after* checking
138 * its value.
139 */
140 smtc_nexttime[vpe][cpu] = timestamp;
141 }
142
143 /*
144 * It may be that, in fact, we don't need to update Compare,
145 * but if we do, we want to make sure we didn't fall into
146 * a crack just behind Count.
147 */
148 if (ISVALID(nextcomp)) {
149 write_c0_compare(nextcomp);
150 ehb();
151 /*
152 * We never return an error, we just make sure
153 * that we trigger the handlers as quickly as
154 * we can if we fell behind.
155 */
156 while ((nextcomp - (unsigned long)read_c0_count())
157 > (unsigned long)LONG_MAX) {
158 nextcomp += CATCHUP_INCREMENT;
159 write_c0_compare(nextcomp);
160 ehb();
161 }
162 }
163 emt(mtflags);
164 local_irq_restore(flags);
165 return 0;
166}
167
168
169void smtc_distribute_timer(int vpe)
170{
171 unsigned long flags;
172 unsigned int mtflags;
173 int cpu;
174 struct clock_event_device *cd;
175 unsigned long nextstamp = 0L;
176 unsigned long reference;
177
178
179repeat:
180 for_each_online_cpu(cpu) {
181 /*
182 * Find virtual CPUs within the current VPE who have
183 * unserviced timer requests whose time is now past.
184 */
185 local_irq_save(flags);
186 mtflags = dmt();
187 if (cpu_data[cpu].vpe_id == vpe &&
188 ISVALID(smtc_nexttime[vpe][cpu])) {
189 reference = (unsigned long)read_c0_count();
190 if ((smtc_nexttime[vpe][cpu] - reference)
191 > (unsigned long)LONG_MAX) {
192 smtc_nexttime[vpe][cpu] = 0L;
193 emt(mtflags);
194 local_irq_restore(flags);
195 /*
196 * We don't send IPIs to ourself.
197 */
198 if (cpu != smp_processor_id()) {
199 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
200 } else {
201 cd = &per_cpu(mips_clockevent_device, cpu);
202 cd->event_handler(cd);
203 }
204 } else {
205 /* Local to VPE but Valid Time not yet reached. */
206 if (!ISVALID(nextstamp) ||
207 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
208 reference)) {
209 smtc_nextinvpe[vpe] = cpu;
210 nextstamp = smtc_nexttime[vpe][cpu];
211 }
212 emt(mtflags);
213 local_irq_restore(flags);
214 }
215 } else {
216 emt(mtflags);
217 local_irq_restore(flags);
218
219 }
220 }
221 /* Reprogram for interrupt at next soonest timestamp for VPE */
222 if (ISVALID(nextstamp)) {
223 write_c0_compare(nextstamp);
224 ehb();
225 if ((nextstamp - (unsigned long)read_c0_count())
226 > (unsigned long)LONG_MAX)
227 goto repeat;
228 }
229}
230
231
232irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
233{
234 int cpu = smp_processor_id();
235
236 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
237 handle_perf_irq(1);
238
239 if (read_c0_cause() & (1 << 30)) {
240 /* Clear Count/Compare Interrupt */
241 write_c0_compare(read_c0_compare());
242 smtc_distribute_timer(cpu_data[cpu].vpe_id);
243 }
244 return IRQ_HANDLED;
245}
246
247
248int __cpuinit mips_clockevent_init(void)
249{
250 uint64_t mips_freq = mips_hpt_frequency;
251 unsigned int cpu = smp_processor_id();
252 struct clock_event_device *cd;
253 unsigned int irq;
254 int i;
255 int j;
256
257 if (!cpu_has_counter || !mips_hpt_frequency)
258 return -ENXIO;
259 if (cpu == 0) {
260 for (i = 0; i < num_possible_cpus(); i++) {
261 smtc_nextinvpe[i] = 0;
262 for (j = 0; j < num_possible_cpus(); j++)
263 smtc_nexttime[i][j] = 0L;
264 }
265 /*
266 * SMTC also can't have the usablility test
267 * run by secondary TCs once Compare is in use.
268 */
269 if (!c0_compare_int_usable())
270 return -ENXIO;
271 }
272
273 /*
274 * With vectored interrupts things are getting platform specific.
275 * get_c0_compare_int is a hook to allow a platform to return the
276 * interrupt number of it's liking.
277 */
278 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
279 if (get_c0_compare_int)
280 irq = get_c0_compare_int();
281
282 cd = &per_cpu(mips_clockevent_device, cpu);
283
284 cd->name = "MIPS";
285 cd->features = CLOCK_EVT_FEAT_ONESHOT;
286
287 /* Calculate the min / max delta */
288 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
289 cd->shift = 32;
290 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
291 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
292
293 cd->rating = 300;
294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu);
296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler;
299
300 clockevents_register_device(cd);
301
302 /*
303 * On SMTC we only want to do the data structure
304 * initialization and IRQ setup once.
305 */
306 if (cpu)
307 return 0;
308 /*
309 * And we need the hwmask associated with the c0_compare
310 * vector to be initialized.
311 */
312 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
313 if (cp0_timer_irq_installed)
314 return 0;
315
316 cp0_timer_irq_installed = 1;
317
318 setup_irq(irq, &c0_compare_irqaction);
319
320 return 0;
321}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 11c92dc53791..e621fda8ab37 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -54,14 +54,18 @@ extern void r4k_wait(void);
54 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes 54 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
55 * using this version a gamble. 55 * using this version a gamble.
56 */ 56 */
57static void r4k_wait_irqoff(void) 57void r4k_wait_irqoff(void)
58{ 58{
59 local_irq_disable(); 59 local_irq_disable();
60 if (!need_resched()) 60 if (!need_resched())
61 __asm__(" .set mips3 \n" 61 __asm__(" .set push \n"
62 " .set mips3 \n"
62 " wait \n" 63 " wait \n"
63 " .set mips0 \n"); 64 " .set pop \n");
64 local_irq_enable(); 65 local_irq_enable();
66 __asm__(" .globl __pastwait \n"
67 "__pastwait: \n");
68 return;
65} 69}
66 70
67/* 71/*
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939d..ffa331029e08 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
79 79
80FEXPORT(restore_all) # restore full frame 80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC 81#ifdef CONFIG_MIPS_MT_SMTC
82/* Detect and execute deferred IPI "interrupts" */
83 LONG_L s0, TI_REGS($28)
84 LONG_S sp, TI_REGS($28)
85 jal deferred_smtc_ipi
86 LONG_S s0, TI_REGS($28)
87#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 82#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
88/* Re-arm any temporarily masked interrupts not explicitly "acked" */ 83/* Re-arm any temporarily masked interrupts not explicitly "acked" */
89 mfc0 v0, CP0_TCSTATUS 84 mfc0 v0, CP0_TCSTATUS
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
112 xor t0, t0, t3 107 xor t0, t0, t3
113 mtc0 t0, CP0_TCCONTEXT 108 mtc0 t0, CP0_TCCONTEXT
114#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 109#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
110/* Detect and execute deferred IPI "interrupts" */
111 LONG_L s0, TI_REGS($28)
112 LONG_S sp, TI_REGS($28)
113 jal deferred_smtc_ipi
114 LONG_S s0, TI_REGS($28)
115#endif /* CONFIG_MIPS_MT_SMTC */ 115#endif /* CONFIG_MIPS_MT_SMTC */
116 .set noat 116 .set noat
117 RESTORE_TEMP 117 RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index f886dd7f708e..01dcbe38fa01 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
282 and t0, a0, t1 282 and t0, a0, t1
283#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 283#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
284 mfc0 t2, CP0_TCCONTEXT 284 mfc0 t2, CP0_TCCONTEXT
285 or t0, t0, t2 285 or t2, t0, t2
286 mtc0 t0, CP0_TCCONTEXT 286 mtc0 t2, CP0_TCCONTEXT
287#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 287#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
288 xor t1, t1, t0 288 xor t1, t1, t0
289 mtc0 t1, CP0_STATUS 289 mtc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 361364501d34..492a0a8d70fb 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -22,6 +22,7 @@
22#include <asm/irqflags.h> 22#include <asm/irqflags.h>
23#include <asm/regdef.h> 23#include <asm/regdef.h>
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/pgtable-bits.h>
25#include <asm/mipsregs.h> 26#include <asm/mipsregs.h>
26#include <asm/stackframe.h> 27#include <asm/stackframe.h>
27 28
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740c..dc9eb72ed9de 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
159/* 159/*
160 * FPU Use Factor empirically derived from experiments on 34K 160 * FPU Use Factor empirically derived from experiments on 34K
161 */ 161 */
162#define FPUSEFACTOR 333 162#define FPUSEFACTOR 2000
163 163
164static __init int mt_fp_affinity_init(void) 164static __init int mt_fp_affinity_init(void)
165{ 165{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ce7684335a41..22fc19bbe87f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
55 while (1) { 55 while (1) {
56 tick_nohz_stop_sched_tick(1); 56 tick_nohz_stop_sched_tick(1);
57 while (!need_resched()) { 57 while (!need_resched()) {
58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 58#ifdef CONFIG_MIPS_MT_SMTC
59 extern void smtc_idle_loop_hook(void); 59 extern void smtc_idle_loop_hook(void);
60 60
61 smtc_idle_loop_hook(); 61 smtc_idle_loop_hook();
@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
145 */ 145 */
146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); 146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
148
149#ifdef CONFIG_MIPS_MT_SMTC
150 /*
151 * SMTC restores TCStatus after Status, and the CU bits
152 * are aliased there.
153 */
154 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
155#endif
148 clear_tsk_thread_flag(p, TIF_USEDFPU); 156 clear_tsk_thread_flag(p, TIF_USEDFPU);
149 157
150#ifdef CONFIG_MIPS_MT_FPAFF 158#ifdef CONFIG_MIPS_MT_FPAFF
151 clear_tsk_thread_flag(p, TIF_FPUBOUND); 159 clear_tsk_thread_flag(p, TIF_FPUBOUND);
152
153 /*
154 * FPU affinity support is cleaner if we track the
155 * user-visible CPU affinity from the very beginning.
156 * The generic cpus_allowed mask will already have
157 * been copied from the parent before copy_thread
158 * is invoked.
159 */
160 p->thread.user_cpus_allowed = p->cpus_allowed;
161#endif /* CONFIG_MIPS_MT_FPAFF */ 160#endif /* CONFIG_MIPS_MT_FPAFF */
162 161
163 if (clone_flags & CLONE_SETTLS) 162 if (clone_flags & CLONE_SETTLS)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a5..96ffc9c6d194 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
238 case FPC_EIR: { /* implementation / version register */ 238 case FPC_EIR: { /* implementation / version register */
239 unsigned int flags; 239 unsigned int flags;
240#ifdef CONFIG_MIPS_MT_SMTC 240#ifdef CONFIG_MIPS_MT_SMTC
241 unsigned int irqflags; 241 unsigned long irqflags;
242 unsigned int mtflags; 242 unsigned int mtflags;
243#endif /* CONFIG_MIPS_MT_SMTC */ 243#endif /* CONFIG_MIPS_MT_SMTC */
244 244
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a516286532ab..897fb2b4751c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,4 +1,21 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */ 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
2 19
3#include <linux/clockchips.h> 20#include <linux/clockchips.h>
4#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -21,7 +38,6 @@
21#include <asm/time.h> 38#include <asm/time.h>
22#include <asm/addrspace.h> 39#include <asm/addrspace.h>
23#include <asm/smtc.h> 40#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h> 41#include <asm/smtc_proc.h>
26 42
27/* 43/*
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
58 74
59asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 75asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
60 76
61/*
62 * Clock interrupt "latch" buffers, per "CPU"
63 */
64
65static atomic_t ipi_timer_latch[NR_CPUS];
66 77
67/* 78/*
68 * Number of InterProcessor Interrupt (IPI) message buffers to allocate 79 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
70 81
71#define IPIBUF_PER_CPU 4 82#define IPIBUF_PER_CPU 4
72 83
73static struct smtc_ipi_q IPIQ[NR_CPUS]; 84struct smtc_ipi_q IPIQ[NR_CPUS];
74static struct smtc_ipi_q freeIPIq; 85static struct smtc_ipi_q freeIPIq;
75 86
76 87
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
282 * phys_cpu_present_map and the logical/physical mappings. 293 * phys_cpu_present_map and the logical/physical mappings.
283 */ 294 */
284 295
285int __init mipsmt_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
286{ 297{
287 int i, ntcs; 298 int i, ntcs;
288 299
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
325 write_tc_c0_tcstatus((read_tc_c0_tcstatus() 336 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 337 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
327 | TCSTATUS_A); 338 | TCSTATUS_A);
328 write_tc_c0_tccontext(0); 339 /*
340 * TCContext gets an offset from the base of the IPIQ array
341 * to be used in low-level code to detect the presence of
342 * an active IPI queue
343 */
344 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
329 /* Bind tc to vpe */ 345 /* Bind tc to vpe */
330 write_tc_c0_tcbind(vpe); 346 write_tc_c0_tcbind(vpe);
331 /* In general, all TCs should have the same cpu_data indications */ 347 /* In general, all TCs should have the same cpu_data indications */
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
336 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 352 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
337 cpu_data[cpu].vpe_id = vpe; 353 cpu_data[cpu].vpe_id = vpe;
338 cpu_data[cpu].tc_id = tc; 354 cpu_data[cpu].tc_id = tc;
355 /* Multi-core SMTC hasn't been tested, but be prepared */
356 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
339} 357}
340 358
359/*
360 * Tweak to get Count registes in as close a sync as possible.
361 * Value seems good for 34K-class cores.
362 */
363
364#define CP0_SKEW 8
341 365
342void mipsmt_prepare_cpus(void) 366void smtc_prepare_cpus(int cpus)
343{ 367{
344 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 368 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
345 unsigned long flags; 369 unsigned long flags;
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
363 IPIQ[i].head = IPIQ[i].tail = NULL; 387 IPIQ[i].head = IPIQ[i].tail = NULL;
364 spin_lock_init(&IPIQ[i].lock); 388 spin_lock_init(&IPIQ[i].lock);
365 IPIQ[i].depth = 0; 389 IPIQ[i].depth = 0;
366 atomic_set(&ipi_timer_latch[i], 0);
367 } 390 }
368 391
369 /* cpu_data index starts at zero */ 392 /* cpu_data index starts at zero */
370 cpu = 0; 393 cpu = 0;
371 cpu_data[cpu].vpe_id = 0; 394 cpu_data[cpu].vpe_id = 0;
372 cpu_data[cpu].tc_id = 0; 395 cpu_data[cpu].tc_id = 0;
396 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
373 cpu++; 397 cpu++;
374 398
375 /* Report on boot-time options */ 399 /* Report on boot-time options */
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
484 write_vpe_c0_compare(0); 508 write_vpe_c0_compare(0);
485 /* Propagate Config7 */ 509 /* Propagate Config7 */
486 write_vpe_c0_config7(read_c0_config7()); 510 write_vpe_c0_config7(read_c0_config7());
487 write_vpe_c0_count(read_c0_count()); 511 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
512 ehb();
488 } 513 }
489 /* enable multi-threading within VPE */ 514 /* enable multi-threading within VPE */
490 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 515 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
556void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 581void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
557{ 582{
558 extern u32 kernelsp[NR_CPUS]; 583 extern u32 kernelsp[NR_CPUS];
559 long flags; 584 unsigned long flags;
560 int mtflags; 585 int mtflags;
561 586
562 LOCK_MT_PRA(); 587 LOCK_MT_PRA();
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
585 610
586void smtc_init_secondary(void) 611void smtc_init_secondary(void)
587{ 612{
588 /*
589 * Start timer on secondary VPEs if necessary.
590 * plat_timer_setup has already have been invoked by init/main
591 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
592 * SMTC init code assigns TCs consdecutively and in ascending order
593 * to across available VPEs.
594 */
595 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
596 ((read_c0_tcbind() & TCBIND_CURVPE)
597 != cpu_data[smp_processor_id() - 1].vpe_id)){
598 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
599 }
600
601 local_irq_enable(); 613 local_irq_enable();
602} 614}
603 615
604void smtc_smp_finish(void) 616void smtc_smp_finish(void)
605{ 617{
618 int cpu = smp_processor_id();
619
620 /*
621 * Lowest-numbered CPU per VPE starts a clock tick.
622 * Like per_cpu_trap_init() hack, this assumes that
623 * SMTC init code assigns TCs consdecutively and
624 * in ascending order across available VPEs.
625 */
626 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
627 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
628
606 printk("TC %d going on-line as CPU %d\n", 629 printk("TC %d going on-line as CPU %d\n",
607 cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 630 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
608} 631}
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
753{ 776{
754 int tcstatus; 777 int tcstatus;
755 struct smtc_ipi *pipi; 778 struct smtc_ipi *pipi;
756 long flags; 779 unsigned long flags;
757 int mtflags; 780 int mtflags;
781 unsigned long tcrestart;
782 extern void r4k_wait_irqoff(void), __pastwait(void);
758 783
759 if (cpu == smp_processor_id()) { 784 if (cpu == smp_processor_id()) {
760 printk("Cannot Send IPI to self!\n"); 785 printk("Cannot Send IPI to self!\n");
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
771 pipi->arg = (void *)action; 796 pipi->arg = (void *)action;
772 pipi->dest = cpu; 797 pipi->dest = cpu;
773 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 798 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
774 if (type == SMTC_CLOCK_TICK)
775 atomic_inc(&ipi_timer_latch[cpu]);
776 /* If not on same VPE, enqueue and send cross-VPE interrupt */ 799 /* If not on same VPE, enqueue and send cross-VPE interrupt */
777 smtc_ipi_nq(&IPIQ[cpu], pipi); 800 smtc_ipi_nq(&IPIQ[cpu], pipi);
778 LOCK_CORE_PRA(); 801 LOCK_CORE_PRA();
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
800 823
801 if ((tcstatus & TCSTATUS_IXMT) != 0) { 824 if ((tcstatus & TCSTATUS_IXMT) != 0) {
802 /* 825 /*
803 * Spin-waiting here can deadlock, 826 * If we're in the the irq-off version of the wait
804 * so we queue the message for the target TC. 827 * loop, we need to force exit from the wait and
828 * do a direct post of the IPI.
829 */
830 if (cpu_wait == r4k_wait_irqoff) {
831 tcrestart = read_tc_c0_tcrestart();
832 if (tcrestart >= (unsigned long)r4k_wait_irqoff
833 && tcrestart < (unsigned long)__pastwait) {
834 write_tc_c0_tcrestart(__pastwait);
835 tcstatus &= ~TCSTATUS_IXMT;
836 write_tc_c0_tcstatus(tcstatus);
837 goto postdirect;
838 }
839 }
840 /*
841 * Otherwise we queue the message for the target TC
842 * to pick up when he does a local_irq_restore()
805 */ 843 */
806 write_tc_c0_tchalt(0); 844 write_tc_c0_tchalt(0);
807 UNLOCK_CORE_PRA(); 845 UNLOCK_CORE_PRA();
808 /* Try to reduce redundant timer interrupt messages */
809 if (type == SMTC_CLOCK_TICK) {
810 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
811 smtc_ipi_nq(&freeIPIq, pipi);
812 return;
813 }
814 }
815 smtc_ipi_nq(&IPIQ[cpu], pipi); 846 smtc_ipi_nq(&IPIQ[cpu], pipi);
816 } else { 847 } else {
817 if (type == SMTC_CLOCK_TICK) 848postdirect:
818 atomic_inc(&ipi_timer_latch[cpu]);
819 post_direct_ipi(cpu, pipi); 849 post_direct_ipi(cpu, pipi);
820 write_tc_c0_tchalt(0); 850 write_tc_c0_tchalt(0);
821 UNLOCK_CORE_PRA(); 851 UNLOCK_CORE_PRA();
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
883 smp_call_function_interrupt(); 913 smp_call_function_interrupt();
884} 914}
885 915
886DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); 916DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
887 917
888void ipi_decode(struct smtc_ipi *pipi) 918void ipi_decode(struct smtc_ipi *pipi)
889{ 919{
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
891 struct clock_event_device *cd; 921 struct clock_event_device *cd;
892 void *arg_copy = pipi->arg; 922 void *arg_copy = pipi->arg;
893 int type_copy = pipi->type; 923 int type_copy = pipi->type;
894 int ticks;
895
896 smtc_ipi_nq(&freeIPIq, pipi); 924 smtc_ipi_nq(&freeIPIq, pipi);
897 switch (type_copy) { 925 switch (type_copy) {
898 case SMTC_CLOCK_TICK: 926 case SMTC_CLOCK_TICK:
899 irq_enter(); 927 irq_enter();
900 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; 928 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
901 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 929 cd = &per_cpu(mips_clockevent_device, cpu);
902 ticks = atomic_read(&ipi_timer_latch[cpu]); 930 cd->event_handler(cd);
903 atomic_sub(ticks, &ipi_timer_latch[cpu]);
904 while (ticks) {
905 cd->event_handler(cd);
906 ticks--;
907 }
908 irq_exit(); 931 irq_exit();
909 break; 932 break;
910 933
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
937 } 960 }
938} 961}
939 962
963/*
964 * Similar to smtc_ipi_replay(), but invoked from context restore,
965 * so it reuses the current exception frame rather than set up a
966 * new one with self_ipi.
967 */
968
940void deferred_smtc_ipi(void) 969void deferred_smtc_ipi(void)
941{ 970{
942 struct smtc_ipi *pipi; 971 int cpu = smp_processor_id();
943 unsigned long flags;
944/* DEBUG */
945 int q = smp_processor_id();
946 972
947 /* 973 /*
948 * Test is not atomic, but much faster than a dequeue, 974 * Test is not atomic, but much faster than a dequeue,
949 * and the vast majority of invocations will have a null queue. 975 * and the vast majority of invocations will have a null queue.
976 * If irq_disabled when this was called, then any IPIs queued
977 * after we test last will be taken on the next irq_enable/restore.
978 * If interrupts were enabled, then any IPIs added after the
979 * last test will be taken directly.
950 */ 980 */
951 if (IPIQ[q].head != NULL) { 981
952 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { 982 while (IPIQ[cpu].head != NULL) {
953 /* ipi_decode() should be called with interrupts off */ 983 struct smtc_ipi_q *q = &IPIQ[cpu];
954 local_irq_save(flags); 984 struct smtc_ipi *pipi;
985 unsigned long flags;
986
987 /*
988 * It may be possible we'll come in with interrupts
989 * already enabled.
990 */
991 local_irq_save(flags);
992
993 spin_lock(&q->lock);
994 pipi = __smtc_ipi_dq(q);
995 spin_unlock(&q->lock);
996 if (pipi != NULL)
955 ipi_decode(pipi); 997 ipi_decode(pipi);
956 local_irq_restore(flags); 998 /*
957 } 999 * The use of the __raw_local restore isn't
1000 * as obviously necessary here as in smtc_ipi_replay(),
1001 * but it's more efficient, given that we're already
1002 * running down the IPI queue.
1003 */
1004 __raw_local_irq_restore(flags);
958 } 1005 }
959} 1006}
960 1007
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
975 struct smtc_ipi *pipi; 1022 struct smtc_ipi *pipi;
976 unsigned long tcstatus; 1023 unsigned long tcstatus;
977 int sent; 1024 int sent;
978 long flags; 1025 unsigned long flags;
979 unsigned int mtflags; 1026 unsigned int mtflags;
980 unsigned int vpflags; 1027 unsigned int vpflags;
981 1028
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1066 1113
1067/* 1114/*
1068 * SMTC-specific hacks invoked from elsewhere in the kernel. 1115 * SMTC-specific hacks invoked from elsewhere in the kernel.
1069 *
1070 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1071 * called with interrupts disabled. We do rely on interrupts being disabled
1072 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1073 * result in a recursive call to raw_local_irq_restore().
1074 */ 1116 */
1075 1117
1076static void __smtc_ipi_replay(void) 1118 /*
1119 * smtc_ipi_replay is called from raw_local_irq_restore
1120 */
1121
1122void smtc_ipi_replay(void)
1077{ 1123{
1078 unsigned int cpu = smp_processor_id(); 1124 unsigned int cpu = smp_processor_id();
1079 1125
1080 /* 1126 /*
1081 * To the extent that we've ever turned interrupts off, 1127 * To the extent that we've ever turned interrupts off,
1082 * we may have accumulated deferred IPIs. This is subtle. 1128 * we may have accumulated deferred IPIs. This is subtle.
1083 * If we use the smtc_ipi_qdepth() macro, we'll get an
1084 * exact number - but we'll also disable interrupts
1085 * and create a window of failure where a new IPI gets
1086 * queued after we test the depth but before we re-enable
1087 * interrupts. So long as IXMT never gets set, however,
1088 * we should be OK: If we pick up something and dispatch 1129 * we should be OK: If we pick up something and dispatch
1089 * it here, that's great. If we see nothing, but concurrent 1130 * it here, that's great. If we see nothing, but concurrent
1090 * with this operation, another TC sends us an IPI, IXMT 1131 * with this operation, another TC sends us an IPI, IXMT
1091 * is clear, and we'll handle it as a real pseudo-interrupt 1132 * is clear, and we'll handle it as a real pseudo-interrupt
1092 * and not a pseudo-pseudo interrupt. 1133 * and not a pseudo-pseudo interrupt. The important thing
1134 * is to do the last check for queued message *after* the
1135 * re-enabling of interrupts.
1093 */ 1136 */
1094 if (IPIQ[cpu].depth > 0) { 1137 while (IPIQ[cpu].head != NULL) {
1095 while (1) { 1138 struct smtc_ipi_q *q = &IPIQ[cpu];
1096 struct smtc_ipi_q *q = &IPIQ[cpu]; 1139 struct smtc_ipi *pipi;
1097 struct smtc_ipi *pipi; 1140 unsigned long flags;
1098 extern void self_ipi(struct smtc_ipi *); 1141
1099 1142 /*
1100 spin_lock(&q->lock); 1143 * It's just possible we'll come in with interrupts
1101 pipi = __smtc_ipi_dq(q); 1144 * already enabled.
1102 spin_unlock(&q->lock); 1145 */
1103 if (!pipi) 1146 local_irq_save(flags);
1104 break; 1147
1148 spin_lock(&q->lock);
1149 pipi = __smtc_ipi_dq(q);
1150 spin_unlock(&q->lock);
1151 /*
1152 ** But use a raw restore here to avoid recursion.
1153 */
1154 __raw_local_irq_restore(flags);
1105 1155
1156 if (pipi) {
1106 self_ipi(pipi); 1157 self_ipi(pipi);
1107 smtc_cpu_stats[cpu].selfipis++; 1158 smtc_cpu_stats[cpu].selfipis++;
1108 } 1159 }
1109 } 1160 }
1110} 1161}
1111 1162
1112void smtc_ipi_replay(void)
1113{
1114 raw_local_irq_disable();
1115 __smtc_ipi_replay();
1116}
1117
1118EXPORT_SYMBOL(smtc_ipi_replay); 1163EXPORT_SYMBOL(smtc_ipi_replay);
1119 1164
1120void smtc_idle_loop_hook(void) 1165void smtc_idle_loop_hook(void)
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
1193 } 1238 }
1194 } 1239 }
1195 1240
1196 /*
1197 * Now that we limit outstanding timer IPIs, check for hung TC
1198 */
1199 for (tc = 0; tc < NR_CPUS; tc++) {
1200 /* Don't check ourself - we'll dequeue IPIs just below */
1201 if ((tc != smp_processor_id()) &&
1202 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1203 if (clock_hang_reported[tc] == 0) {
1204 pdb_msg += sprintf(pdb_msg,
1205 "TC %d looks hung with timer latch at %d\n",
1206 tc, atomic_read(&ipi_timer_latch[tc]));
1207 clock_hang_reported[tc]++;
1208 }
1209 }
1210 }
1211 emt(mtflags); 1241 emt(mtflags);
1212 local_irq_restore(flags); 1242 local_irq_restore(flags);
1213 if (pdb_msg != &id_ho_db_msg[0]) 1243 if (pdb_msg != &id_ho_db_msg[0])
1214 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1244 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1215#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1245#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1216 1246
1217 /* 1247 smtc_ipi_replay();
1218 * Replay any accumulated deferred IPIs. If "Instant Replay"
1219 * is in use, there should never be any.
1220 */
1221#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1222 {
1223 unsigned long flags;
1224
1225 local_irq_save(flags);
1226 __smtc_ipi_replay();
1227 local_irq_restore(flags);
1228 }
1229#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1230} 1248}
1231 1249
1232void smtc_soft_dump(void) 1250void smtc_soft_dump(void)
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
1242 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1260 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1243 } 1261 }
1244 smtc_ipi_qdump(); 1262 smtc_ipi_qdump();
1245 printk("Timer IPI Backlogs:\n");
1246 for (i=0; i < NR_CPUS; i++) {
1247 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1248 }
1249 printk("%d Recoveries of \"stolen\" FPU\n", 1263 printk("%d Recoveries of \"stolen\" FPU\n",
1250 atomic_read(&smtc_fpu_recoveries)); 1264 atomic_read(&smtc_fpu_recoveries));
1251} 1265}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5fd0cd020af5..b602ac6eb47d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void)
825 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 825 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
826 cpumask_t tmask; 826 cpumask_t tmask;
827 827
828 cpus_and(tmask, current->thread.user_cpus_allowed, 828 current->thread.user_cpus_allowed
829 mt_fpu_cpumask); 829 = current->cpus_allowed;
830 cpus_and(tmask, current->cpus_allowed,
831 mt_fpu_cpumask);
830 set_cpus_allowed(current, tmask); 832 set_cpus_allowed(current, tmask);
831 set_thread_flag(TIF_FPUBOUND); 833 set_thread_flag(TIF_FPUBOUND);
832 } 834 }
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 3b7dd722c32a..cef2db8d2225 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
15obj-$(CONFIG_PCI) += malta-pci.o 15obj-$(CONFIG_PCI) += malta-pci.o
16 16
17# FIXME FIXME FIXME 17# FIXME FIXME FIXME
18obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o 18obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
19 19
20EXTRA_CFLAGS += -Werror 20EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 5ea705e49454..f84a46a8ae6e 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
84 84
85static void __init msmtc_smp_setup(void) 85static void __init msmtc_smp_setup(void)
86{ 86{
87 mipsmt_build_cpu_map(0); 87 /*
88 * we won't get the definitive value until
89 * we've run smtc_prepare_cpus later, but
90 * we would appear to need an upper bound now.
91 */
92 smp_num_siblings = smtc_build_cpu_map(0);
88} 93}
89 94
90static void __init msmtc_prepare_cpus(unsigned int max_cpus) 95static void __init msmtc_prepare_cpus(unsigned int max_cpus)
91{ 96{
92 mipsmt_prepare_cpus(); 97 smtc_prepare_cpus(max_cpus);
93} 98}
94 99
95struct plat_smp_ops msmtc_smp_ops = { 100struct plat_smp_ops msmtc_smp_ops = {
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
index f18ba9201bbc..7b45f199d92a 100644
--- a/arch/mips/sibyte/swarm/Makefile
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -1,3 +1,4 @@
1obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o 1obj-y := platform.o setup.o rtc_xicor1241.o \
2 rtc_m41t81.o
2 3
3obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o 4obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
new file mode 100644
index 000000000000..54847fe1e564
--- /dev/null
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -0,0 +1,85 @@
1#include <linux/err.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/io.h>
5#include <linux/platform_device.h>
6#include <linux/ata_platform.h>
7
8#include <asm/sibyte/board.h>
9#include <asm/sibyte/sb1250_genbus.h>
10#include <asm/sibyte/sb1250_regs.h>
11
12#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR)
13
14#define DRV_NAME "pata-swarm"
15
16#define SWARM_IDE_SHIFT 5
17#define SWARM_IDE_BASE 0x1f0
18#define SWARM_IDE_CTRL 0x3f6
19
20static struct resource swarm_pata_resource[] = {
21 {
22 .name = "Swarm GenBus IDE",
23 .flags = IORESOURCE_MEM,
24 }, {
25 .name = "Swarm GenBus IDE",
26 .flags = IORESOURCE_MEM,
27 }, {
28 .name = "Swarm GenBus IDE",
29 .flags = IORESOURCE_IRQ,
30 .start = K_INT_GB_IDE,
31 .end = K_INT_GB_IDE,
32 },
33};
34
35static struct pata_platform_info pata_platform_data = {
36 .ioport_shift = SWARM_IDE_SHIFT,
37};
38
39static struct platform_device swarm_pata_device = {
40 .name = "pata_platform",
41 .id = -1,
42 .resource = swarm_pata_resource,
43 .num_resources = ARRAY_SIZE(swarm_pata_resource),
44 .dev = {
45 .platform_data = &pata_platform_data,
46 .coherent_dma_mask = ~0, /* grumble */
47 },
48};
49
50static int __init swarm_pata_init(void)
51{
52 u8 __iomem *base;
53 phys_t offset, size;
54 struct resource *r;
55
56 if (!SIBYTE_HAVE_IDE)
57 return -ENODEV;
58
59 base = ioremap(A_IO_EXT_BASE, 0x800);
60 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
61 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
62 iounmap(base);
63
64 offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
65 size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
66 if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
67 pr_info(DRV_NAME ": PATA interface at GenBus disabled\n");
68
69 return -EBUSY;
70 }
71
72 pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS);
73
74 r = swarm_pata_resource;
75 r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT);
76 r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1;
77 r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT);
78 r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1;
79
80 return platform_device_register(&swarm_pata_device);
81}
82
83device_initcall(swarm_pata_init);
84
85#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 761c434a2488..56c64ccc9c21 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -20,22 +20,8 @@ EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
20atomic_t irq_err_count; 20atomic_t irq_err_count;
21 21
22/* 22/*
23 * MN10300 INTC controller operations 23 * MN10300 interrupt controller operations
24 */ 24 */
25static void mn10300_cpupic_disable(unsigned int irq)
26{
27 u16 tmp = GxICR(irq);
28 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
29 tmp = GxICR(irq);
30}
31
32static void mn10300_cpupic_enable(unsigned int irq)
33{
34 u16 tmp = GxICR(irq);
35 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
36 tmp = GxICR(irq);
37}
38
39static void mn10300_cpupic_ack(unsigned int irq) 25static void mn10300_cpupic_ack(unsigned int irq)
40{ 26{
41 u16 tmp; 27 u16 tmp;
@@ -60,26 +46,54 @@ static void mn10300_cpupic_mask_ack(unsigned int irq)
60static void mn10300_cpupic_unmask(unsigned int irq) 46static void mn10300_cpupic_unmask(unsigned int irq)
61{ 47{
62 u16 tmp = GxICR(irq); 48 u16 tmp = GxICR(irq);
63 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 49 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
64 tmp = GxICR(irq); 50 tmp = GxICR(irq);
65} 51}
66 52
67static void mn10300_cpupic_end(unsigned int irq) 53static void mn10300_cpupic_unmask_clear(unsigned int irq)
68{ 54{
55 /* the MN10300 PIC latches its interrupt request bit, even after the
56 * device has ceased to assert its interrupt line and the interrupt
57 * channel has been disabled in the PIC, so for level-triggered
58 * interrupts we need to clear the request bit when we re-enable */
69 u16 tmp = GxICR(irq); 59 u16 tmp = GxICR(irq);
70 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; 60 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
71 tmp = GxICR(irq); 61 tmp = GxICR(irq);
72} 62}
73 63
74static struct irq_chip mn10300_cpu_pic = { 64/*
75 .name = "cpu", 65 * MN10300 PIC level-triggered IRQ handling.
76 .disable = mn10300_cpupic_disable, 66 *
77 .enable = mn10300_cpupic_enable, 67 * The PIC has no 'ACK' function per se. It is possible to clear individual
68 * channel latches, but each latch relatches whether or not the channel is
69 * masked, so we need to clear the latch when we unmask the channel.
70 *
71 * Also for this reason, we don't supply an ack() op (it's unused anyway if
72 * mask_ack() is provided), and mask_ack() just masks.
73 */
74static struct irq_chip mn10300_cpu_pic_level = {
75 .name = "cpu_l",
76 .disable = mn10300_cpupic_mask,
77 .enable = mn10300_cpupic_unmask_clear,
78 .ack = NULL,
79 .mask = mn10300_cpupic_mask,
80 .mask_ack = mn10300_cpupic_mask,
81 .unmask = mn10300_cpupic_unmask_clear,
82};
83
84/*
85 * MN10300 PIC edge-triggered IRQ handling.
86 *
87 * We use the latch clearing function of the PIC as the 'ACK' function.
88 */
89static struct irq_chip mn10300_cpu_pic_edge = {
90 .name = "cpu_e",
91 .disable = mn10300_cpupic_mask,
92 .enable = mn10300_cpupic_unmask,
78 .ack = mn10300_cpupic_ack, 93 .ack = mn10300_cpupic_ack,
79 .mask = mn10300_cpupic_mask, 94 .mask = mn10300_cpupic_mask,
80 .mask_ack = mn10300_cpupic_mask_ack, 95 .mask_ack = mn10300_cpupic_mask_ack,
81 .unmask = mn10300_cpupic_unmask, 96 .unmask = mn10300_cpupic_unmask,
82 .end = mn10300_cpupic_end,
83}; 97};
84 98
85/* 99/*
@@ -114,7 +128,8 @@ void set_intr_level(int irq, u16 level)
114 */ 128 */
115void set_intr_postackable(int irq) 129void set_intr_postackable(int irq)
116{ 130{
117 set_irq_handler(irq, handle_level_irq); 131 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
132 handle_level_irq);
118} 133}
119 134
120/* 135/*
@@ -126,8 +141,12 @@ void __init init_IRQ(void)
126 141
127 for (irq = 0; irq < NR_IRQS; irq++) 142 for (irq = 0; irq < NR_IRQS; irq++)
128 if (irq_desc[irq].chip == &no_irq_type) 143 if (irq_desc[irq].chip == &no_irq_type)
129 set_irq_chip_and_handler(irq, &mn10300_cpu_pic, 144 /* due to the PIC latching interrupt requests, even
130 handle_edge_irq); 145 * when the IRQ is disabled, IRQ_PENDING is superfluous
146 * and we can use handle_level_irq() for edge-triggered
147 * interrupts */
148 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
149 handle_level_irq);
131 unit_init_IRQ(); 150 unit_init_IRQ();
132} 151}
133 152
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c
index 14b2c817cff8..70e8cb4ea266 100644
--- a/arch/mn10300/unit-asb2303/unit-init.c
+++ b/arch/mn10300/unit-asb2303/unit-init.c
@@ -51,7 +51,7 @@ void __init unit_init_IRQ(void)
51 switch (GET_XIRQ_TRIGGER(extnum)) { 51 switch (GET_XIRQ_TRIGGER(extnum)) {
52 case XIRQ_TRIGGER_HILEVEL: 52 case XIRQ_TRIGGER_HILEVEL:
53 case XIRQ_TRIGGER_LOWLEVEL: 53 case XIRQ_TRIGGER_LOWLEVEL:
54 set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); 54 set_intr_postackable(XIRQ2IRQ(extnum));
55 break; 55 break;
56 default: 56 default:
57 break; 57 break;
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index 6a352414a358..72812a9439ac 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -52,7 +52,7 @@ void __init unit_init_IRQ(void)
52 switch (GET_XIRQ_TRIGGER(extnum)) { 52 switch (GET_XIRQ_TRIGGER(extnum)) {
53 case XIRQ_TRIGGER_HILEVEL: 53 case XIRQ_TRIGGER_HILEVEL:
54 case XIRQ_TRIGGER_LOWLEVEL: 54 case XIRQ_TRIGGER_LOWLEVEL:
55 set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); 55 set_intr_postackable(XIRQ2IRQ(extnum));
56 break; 56 break;
57 default: 57 default:
58 break; 58 break;
diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts
index f87fe7b9ced9..c6e11ebecebb 100644
--- a/arch/powerpc/boot/dts/holly.dts
+++ b/arch/powerpc/boot/dts/holly.dts
@@ -133,61 +133,61 @@
133 reg = <0x00007400 0x00000400>; 133 reg = <0x00007400 0x00000400>;
134 big-endian; 134 big-endian;
135 }; 135 };
136 };
136 137
137 pci@1000 { 138 pci@c0001000 {
138 device_type = "pci"; 139 device_type = "pci";
139 compatible = "tsi109-pci", "tsi108-pci"; 140 compatible = "tsi109-pci", "tsi108-pci";
140 #interrupt-cells = <1>; 141 #interrupt-cells = <1>;
141 #size-cells = <2>; 142 #size-cells = <2>;
142 #address-cells = <3>; 143 #address-cells = <3>;
143 reg = <0x00001000 0x00001000>; 144 reg = <0xc0001000 0x00001000>;
144 bus-range = <0x0 0x0>; 145 bus-range = <0x0 0x0>;
145 /*----------------------------------------------------+ 146 /*----------------------------------------------------+
146 | PCI memory range. 147 | PCI memory range.
147 | 01 denotes I/O space 148 | 01 denotes I/O space
148 | 02 denotes 32-bit memory space 149 | 02 denotes 32-bit memory space
149 +----------------------------------------------------*/ 150 +----------------------------------------------------*/
150 ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000 151 ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000
151 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>; 152 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>;
152 clock-frequency = <133333332>; 153 clock-frequency = <133333332>;
153 interrupt-parent = <&MPIC>; 154 interrupt-parent = <&MPIC>;
155 interrupts = <0x17 0x2>;
156 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
157 /*----------------------------------------------------+
158 | The INTA, INTB, INTC, INTD are shared.
159 +----------------------------------------------------*/
160 interrupt-map = <
161 0x800 0x0 0x0 0x1 &RT0 0x24 0x0
162 0x800 0x0 0x0 0x2 &RT0 0x25 0x0
163 0x800 0x0 0x0 0x3 &RT0 0x26 0x0
164 0x800 0x0 0x0 0x4 &RT0 0x27 0x0
165
166 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
167 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
168 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
169 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
170
171 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
172 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
173 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
174 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
175
176 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
177 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
178 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
179 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
180 >;
181
182 RT0: router@1180 {
183 device_type = "pic-router";
184 interrupt-controller;
185 big-endian;
186 clock-frequency = <0>;
187 #address-cells = <0>;
188 #interrupt-cells = <2>;
154 interrupts = <0x17 0x2>; 189 interrupts = <0x17 0x2>;
155 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 190 interrupt-parent = <&MPIC>;
156 /*----------------------------------------------------+
157 | The INTA, INTB, INTC, INTD are shared.
158 +----------------------------------------------------*/
159 interrupt-map = <
160 0x800 0x0 0x0 0x1 &RT0 0x24 0x0
161 0x800 0x0 0x0 0x2 &RT0 0x25 0x0
162 0x800 0x0 0x0 0x3 &RT0 0x26 0x0
163 0x800 0x0 0x0 0x4 &RT0 0x27 0x0
164
165 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
166 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
167 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
168 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
169
170 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
171 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
172 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
173 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
174
175 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
176 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
177 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
178 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
179 >;
180
181 RT0: router@1180 {
182 device_type = "pic-router";
183 interrupt-controller;
184 big-endian;
185 clock-frequency = <0>;
186 #address-cells = <0>;
187 #interrupt-cells = <2>;
188 interrupts = <0x17 0x2>;
189 interrupt-parent = <&MPIC>;
190 };
191 }; 191 };
192 }; 192 };
193 193
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index d308a9f70f1b..31982d05d81a 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -34,11 +34,7 @@
34#include <asm/smp.h> 34#include <asm/smp.h>
35 35
36#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
37/* this is used for software suspend, and that shuts down 37#define cpu_should_die() cpu_is_offline(smp_processor_id())
38 * CPUs even while the system is still booting... */
39#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
40 (system_state == SYSTEM_RUNNING \
41 || system_state == SYSTEM_BOOTING))
42#else 38#else
43#define cpu_should_die() 0 39#define cpu_should_die() 0
44#endif 40#endif
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index ef74a0763ec1..8c619963becc 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -219,11 +219,21 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev)
219 int i; 219 int i;
220 u8 *dummy; 220 u8 *dummy;
221 struct pci_bus *bus = dev->bus; 221 struct pci_bus *bus = dev->bus;
222 resource_size_t end = 0;
223
224 for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) {
225 unsigned long flags = pci_resource_flags(dev, i);
226 if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM)
227 end = pci_resource_end(dev, i);
228 }
222 229
223 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 230 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
224 if ((bus->resource[i]) && 231 if ((bus->resource[i]) &&
225 (bus->resource[i]->flags & IORESOURCE_MEM)) { 232 (bus->resource[i]->flags & IORESOURCE_MEM)) {
226 dummy = ioremap(bus->resource[i]->end - 3, 0x4); 233 if (bus->resource[i]->end == end)
234 dummy = ioremap(bus->resource[i]->start, 0x4);
235 else
236 dummy = ioremap(bus->resource[i]->end - 3, 0x4);
227 if (dummy) { 237 if (dummy) {
228 in_8(dummy); 238 in_8(dummy);
229 iounmap(dummy); 239 iounmap(dummy);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ca114fe46ffb..06acb1a18bbc 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -169,6 +169,8 @@ void init_cpu_timer(void)
169 169
170static void clock_comparator_interrupt(__u16 code) 170static void clock_comparator_interrupt(__u16 code)
171{ 171{
172 if (S390_lowcore.clock_comparator == -1ULL)
173 set_clock_comparator(S390_lowcore.clock_comparator);
172} 174}
173 175
174static void etr_timing_alert(struct etr_irq_parm *); 176static void etr_timing_alert(struct etr_irq_parm *);
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index fc6ab6094df8..0953cee05efc 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,14 +1,9 @@
1/* 1/*
2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 2 * Precise Delay Loops for S390
4 * 3 *
5 * S390 version 4 * Copyright IBM Corp. 1999,2008
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 * Derived from "arch/i386/lib/delay.c"
10 * Copyright (C) 1993 Linus Torvalds
11 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
12 */ 7 */
13 8
14#include <linux/sched.h> 9#include <linux/sched.h>
@@ -29,30 +24,31 @@ void __delay(unsigned long loops)
29 asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); 24 asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
30} 25}
31 26
32/* 27static void __udelay_disabled(unsigned long usecs)
33 * Waits for 'usecs' microseconds using the TOD clock comparator.
34 */
35void __udelay(unsigned long usecs)
36{ 28{
37 u64 end, time, old_cc = 0; 29 unsigned long mask, cr0, cr0_saved;
38 unsigned long flags, cr0, mask, dummy; 30 u64 clock_saved;
39 int irq_context;
40 31
41 irq_context = in_interrupt(); 32 clock_saved = local_tick_disable();
42 if (!irq_context) 33 set_clock_comparator(get_clock() + ((u64) usecs << 12));
43 local_bh_disable(); 34 __ctl_store(cr0_saved, 0, 0);
44 local_irq_save(flags); 35 cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
45 if (raw_irqs_disabled_flags(flags)) { 36 __ctl_load(cr0 , 0, 0);
46 old_cc = local_tick_disable(); 37 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 S390_lowcore.clock_comparator = -1ULL; 38 trace_hardirqs_on();
48 __ctl_store(cr0, 0, 0); 39 __load_psw_mask(mask);
49 dummy = (cr0 & 0xffff00e0) | 0x00000800; 40 local_irq_disable();
50 __ctl_load(dummy , 0, 0); 41 __ctl_load(cr0_saved, 0, 0);
51 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; 42 local_tick_enable(clock_saved);
52 } else 43 set_clock_comparator(S390_lowcore.clock_comparator);
53 mask = psw_kernel_bits | PSW_MASK_WAIT | 44}
54 PSW_MASK_EXT | PSW_MASK_IO;
55 45
46static void __udelay_enabled(unsigned long usecs)
47{
48 unsigned long mask;
49 u64 end, time;
50
51 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
56 end = get_clock() + ((u64) usecs << 12); 52 end = get_clock() + ((u64) usecs << 12);
57 do { 53 do {
58 time = end < S390_lowcore.clock_comparator ? 54 time = end < S390_lowcore.clock_comparator ?
@@ -62,13 +58,37 @@ void __udelay(unsigned long usecs)
62 __load_psw_mask(mask); 58 __load_psw_mask(mask);
63 local_irq_disable(); 59 local_irq_disable();
64 } while (get_clock() < end); 60 } while (get_clock() < end);
61 set_clock_comparator(S390_lowcore.clock_comparator);
62}
65 63
66 if (raw_irqs_disabled_flags(flags)) { 64/*
67 __ctl_load(cr0, 0, 0); 65 * Waits for 'usecs' microseconds using the TOD clock comparator.
68 local_tick_enable(old_cc); 66 */
67void __udelay(unsigned long usecs)
68{
69 unsigned long flags;
70
71 preempt_disable();
72 local_irq_save(flags);
73 if (in_irq()) {
74 __udelay_disabled(usecs);
75 goto out;
76 }
77 if (in_softirq()) {
78 if (raw_irqs_disabled_flags(flags))
79 __udelay_disabled(usecs);
80 else
81 __udelay_enabled(usecs);
82 goto out;
69 } 83 }
70 if (!irq_context) 84 if (raw_irqs_disabled_flags(flags)) {
85 local_bh_disable();
86 __udelay_disabled(usecs);
71 _local_bh_enable(); 87 _local_bh_enable();
72 set_clock_comparator(S390_lowcore.clock_comparator); 88 goto out;
89 }
90 __udelay_enabled(usecs);
91out:
73 local_irq_restore(flags); 92 local_irq_restore(flags);
93 preempt_enable();
74} 94}
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index a1310c52fc0c..857e492c571e 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
492 continue; 492 continue;
493 } 493 }
494 sh_symtab = sec_symtab->symtab; 494 sh_symtab = sec_symtab->symtab;
495 sym_strtab = sec->link->strtab; 495 sym_strtab = sec_symtab->link->strtab;
496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
497 Elf32_Rel *rel; 497 Elf32_Rel *rel;
498 Elf32_Sym *sym; 498 Elf32_Sym *sym;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index bfd10fd211cd..c102af85df9c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1605,6 +1605,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1605 */ 1605 */
1606 { 1606 {
1607 .callback = dmi_ignore_irq0_timer_override, 1607 .callback = dmi_ignore_irq0_timer_override,
1608 .ident = "HP nx6115 laptop",
1609 .matches = {
1610 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1611 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
1612 },
1613 },
1614 {
1615 .callback = dmi_ignore_irq0_timer_override,
1608 .ident = "HP NX6125 laptop", 1616 .ident = "HP NX6125 laptop",
1609 .matches = { 1617 .matches = {
1610 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1618 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -1619,6 +1627,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1619 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), 1627 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
1620 }, 1628 },
1621 }, 1629 },
1630 {
1631 .callback = dmi_ignore_irq0_timer_override,
1632 .ident = "HP 6715b laptop",
1633 .matches = {
1634 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1635 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
1636 },
1637 },
1622 {} 1638 {}
1623}; 1639};
1624 1640
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index b117d7f8a564..885c8265e6b5 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -834,7 +834,7 @@ static int __init enable_mtrr_cleanup_setup(char *str)
834 enable_mtrr_cleanup = 1; 834 enable_mtrr_cleanup = 1;
835 return 0; 835 return 0;
836} 836}
837early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup); 837early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
838 838
839struct var_mtrr_state { 839struct var_mtrr_state {
840 unsigned long range_startk; 840 unsigned long range_startk;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8282a2139681..10435a120d22 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -455,12 +455,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
455 return NOTIFY_DONE; 455 return NOTIFY_DONE;
456 456
457 case DIE_NMI_IPI: 457 case DIE_NMI_IPI:
458 if (atomic_read(&kgdb_active) != -1) { 458 /* Just ignore, we will handle the roundup on DIE_NMI. */
459 /* KGDB CPU roundup */
460 kgdb_nmicallback(raw_smp_processor_id(), regs);
461 was_in_debug_nmi[raw_smp_processor_id()] = 1;
462 touch_nmi_watchdog();
463 }
464 return NOTIFY_DONE; 459 return NOTIFY_DONE;
465 460
466 case DIE_NMIUNKNOWN: 461 case DIE_NMIUNKNOWN:
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 49285f8fd4d5..be33a5442d82 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -626,7 +626,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
626 struct pci_dev *dev; 626 struct pci_dev *dev;
627 void *gatt; 627 void *gatt;
628 int i, error; 628 int i, error;
629 unsigned long start_pfn, end_pfn;
630 629
631 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 630 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
632 aper_size = aper_base = info->aper_size = 0; 631 aper_size = aper_base = info->aper_size = 0;
@@ -672,12 +671,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
672 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", 671 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
673 aper_base, aper_size>>10); 672 aper_base, aper_size>>10);
674 673
675 /* need to map that range */
676 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
677 if (end_pfn > max_low_pfn_mapped) {
678 start_pfn = (aper_base>>PAGE_SHIFT);
679 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
680 }
681 return 0; 674 return 0;
682 675
683 nommu: 676 nommu:
@@ -727,7 +720,8 @@ void __init gart_iommu_init(void)
727{ 720{
728 struct agp_kern_info info; 721 struct agp_kern_info info;
729 unsigned long iommu_start; 722 unsigned long iommu_start;
730 unsigned long aper_size; 723 unsigned long aper_base, aper_size;
724 unsigned long start_pfn, end_pfn;
731 unsigned long scratch; 725 unsigned long scratch;
732 long i; 726 long i;
733 727
@@ -765,8 +759,16 @@ void __init gart_iommu_init(void)
765 return; 759 return;
766 } 760 }
767 761
762 /* need to map that range */
763 aper_size = info.aper_size << 20;
764 aper_base = info.aper_base;
765 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
766 if (end_pfn > max_low_pfn_mapped) {
767 start_pfn = (aper_base>>PAGE_SHIFT);
768 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
769 }
770
768 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 771 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
769 aper_size = info.aper_size * 1024 * 1024;
770 iommu_size = check_iommu_size(info.aper_base, aper_size); 772 iommu_size = check_iommu_size(info.aper_base, aper_size);
771 iommu_pages = iommu_size >> PAGE_SHIFT; 773 iommu_pages = iommu_size >> PAGE_SHIFT;
772 774
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 6ca515d6db54..edfb09f30479 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
235 const void *desc) 235 const void *desc)
236{ 236{
237 u32 *ldt_entry = (u32 *)desc; 237 u32 *ldt_entry = (u32 *)desc;
238 vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); 238 vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
239} 239}
240 240
241static void vmi_load_sp0(struct tss_struct *tss, 241static void vmi_load_sp0(struct tss_struct *tss,