aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/perf_event.h6
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/perf_event.c11
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/mach-msm/Kconfig8
-rw-r--r--arch/arm/mach-msm/Makefile5
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c20
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c19
-rw-r--r--arch/arm/mach-msm/board-trout-gpio.c8
-rw-r--r--arch/arm/mach-msm/board-trout-panel.c297
-rw-r--r--arch/arm/mach-msm/clock.c15
-rw-r--r--arch/arm/mach-msm/devices-msm7x00.c69
-rw-r--r--arch/arm/mach-msm/devices-msm7x30.c72
-rw-r--r--arch/arm/mach-msm/devices-msm8x60-iommu.c243
-rw-r--r--arch/arm/mach-msm/devices-qsd8x50.c71
-rw-r--r--arch/arm/mach-msm/devices.h6
-rw-r--r--arch/arm/mach-msm/gpio-v2.c426
-rw-r--r--arch/arm/mach-msm/include/mach/iommu.h15
-rw-r--r--arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h22
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8x60.h7
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h3
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x60.h3
-rw-r--r--arch/arm/mach-msm/io.c1
-rw-r--r--arch/arm/mach-msm/iommu.c146
-rw-r--r--arch/arm/mach-msm/iommu_dev.c4
-rw-r--r--arch/arm/mach-msm/sirc.c3
-rw-r--r--arch/arm/mach-msm/smd.c17
-rw-r--r--arch/arm/mach-msm/smd_debug.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c32
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c70
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c13
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/powerpc/kernel/e500-pmu.c2
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c2
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c2
-rw-r--r--arch/powerpc/kernel/power4-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c2
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/qeth.h51
-rw-r--r--arch/sh/Kconfig20
-rw-r--r--arch/sh/boards/board-secureedge5410.c2
-rw-r--r--arch/sh/boards/mach-rsk/devices-rsk7203.c37
-rw-r--r--arch/sh/boards/mach-sdk7786/Makefile2
-rw-r--r--arch/sh/boards/mach-sdk7786/nmi.c83
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c1
-rw-r--r--arch/sh/boards/mach-se/7206/setup.c6
-rw-r--r--arch/sh/boards/mach-se/board-se7619.c6
-rw-r--r--arch/sh/configs/migor_defconfig2
-rw-r--r--arch/sh/drivers/pci/pci.c3
-rw-r--r--arch/sh/include/asm/io.h345
-rw-r--r--arch/sh/include/asm/io_generic.h25
-rw-r--r--arch/sh/include/asm/machvec.h21
-rw-r--r--arch/sh/include/asm/ptrace.h4
-rw-r--r--arch/sh/include/asm/ptrace_32.h2
-rw-r--r--arch/sh/include/asm/ptrace_64.h2
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h164
-rw-r--r--arch/sh/include/mach-sdk7786/mach/fpga.h8
-rw-r--r--arch/sh/kernel/Makefile6
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/proc.c148
-rw-r--r--arch/sh/kernel/cpu/sh2/clock-sh7619.c22
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c20
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c21
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7206.c20
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c2
-rw-r--r--arch/sh/kernel/io_generic.c180
-rw-r--r--arch/sh/kernel/iomap.c165
-rw-r--r--arch/sh/kernel/ioport.c43
-rw-r--r--arch/sh/kernel/machvec.c22
-rw-r--r--arch/sh/kernel/perf_event.c2
-rw-r--r--arch/sh/kernel/setup.c144
-rw-r--r--arch/sparc/boot/Makefile28
-rw-r--r--arch/sparc/boot/piggyback.c272
-rw-r--r--arch/sparc/boot/piggyback_32.c137
-rw-r--r--arch/sparc/boot/piggyback_64.c110
-rw-r--r--arch/sparc/include/asm/leon.h12
-rw-r--r--arch/sparc/include/asm/leon_amba.h6
-rw-r--r--arch/sparc/include/asm/oplib_32.h48
-rw-r--r--arch/sparc/include/asm/oplib_64.h4
-rw-r--r--arch/sparc/include/asm/perf_event.h4
-rw-r--r--arch/sparc/kernel/head_32.S3
-rw-r--r--arch/sparc/kernel/leon_kernel.c114
-rw-r--r--arch/sparc/kernel/nmi.c2
-rw-r--r--arch/sparc/kernel/perf_event.c9
-rw-r--r--arch/sparc/kernel/prom_32.c27
-rw-r--r--arch/sparc/kernel/setup_32.c3
-rw-r--r--arch/sparc/mm/sun4c.c8
-rw-r--r--arch/sparc/prom/Makefile2
-rw-r--r--arch/sparc/prom/bootstr_32.c3
-rw-r--r--arch/sparc/prom/console_32.c7
-rw-r--r--arch/sparc/prom/console_64.c2
-rw-r--r--arch/sparc/prom/devmap.c53
-rw-r--r--arch/sparc/prom/init_64.c3
-rw-r--r--arch/sparc/prom/misc_32.c2
-rw-r--r--arch/sparc/prom/mp.c78
-rw-r--r--arch/sparc/prom/palloc.c43
-rw-r--r--arch/sparc/prom/ranges.c6
-rw-r--r--arch/sparc/prom/tree_32.c50
-rw-r--r--arch/x86/Kconfig41
-rw-r--r--arch/x86/Kconfig.debug11
-rw-r--r--arch/x86/boot/compressed/head_64.S2
-rw-r--r--arch/x86/include/asm/alternative.h8
-rw-r--r--arch/x86/include/asm/amd_nb.h49
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/bootparam.h1
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/i387.h24
-rw-r--r--arch/x86/include/asm/io_apic.h8
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/kdebug.h2
-rw-r--r--arch/x86/include/asm/mce.h3
-rw-r--r--arch/x86/include/asm/microcode.h6
-rw-r--r--arch/x86/include/asm/mpspec.h31
-rw-r--r--arch/x86/include/asm/mpspec_def.h7
-rw-r--r--arch/x86/include/asm/mrst-vrtc.h9
-rw-r--r--arch/x86/include/asm/mrst.h14
-rw-r--r--arch/x86/include/asm/msr-index.h16
-rw-r--r--arch/x86/include/asm/nmi.h53
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/pci.h1
-rw-r--r--arch/x86/include/asm/perf_event.h2
-rw-r--r--arch/x86/include/asm/perf_event_p4.h63
-rw-r--r--arch/x86/include/asm/setup.h6
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h1
-rw-r--r--arch/x86/include/asm/stacktrace.h33
-rw-r--r--arch/x86/include/asm/timer.h6
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h9
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/alternative.c52
-rw-r--r--arch/x86/kernel/amd_nb.c135
-rw-r--r--arch/x86/kernel/apb_timer.c1
-rw-r--r--arch/x86/kernel/aperture_64.c10
-rw-r--r--arch/x86/kernel/apic/Makefile5
-rw-r--r--arch/x86/kernel/apic/apic.c119
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c36
-rw-r--r--arch/x86/kernel/apic/io_apic.c83
-rw-r--r--arch/x86/kernel/apic/nmi.c567
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c61
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c147
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c135
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c40
-rw-r--r--arch/x86/kernel/cpu/perf_event.c74
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c26
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c644
-rw-r--r--arch/x86/kernel/dumpstack.c12
-rw-r--r--arch/x86/kernel/dumpstack_32.c25
-rw-r--r--arch/x86/kernel/dumpstack_64.c24
-rw-r--r--arch/x86/kernel/early_printk.c3
-rw-r--r--arch/x86/kernel/ftrace.c3
-rw-r--r--arch/x86/kernel/head32.c3
-rw-r--r--arch/x86/kernel/head_32.S83
-rw-r--r--arch/x86/kernel/kprobes.c113
-rw-r--r--arch/x86/kernel/microcode_amd.c34
-rw-r--r--arch/x86/kernel/pci-gart_64.c34
-rw-r--r--arch/x86/kernel/process.c10
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c16
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/smpboot.c36
-rw-r--r--arch/x86/kernel/stacktrace.c8
-rw-r--r--arch/x86/kernel/time.c18
-rw-r--r--arch/x86/kernel/trampoline_64.S2
-rw-r--r--arch/x86/kernel/traps.c35
-rw-r--r--arch/x86/kernel/tsc.c96
-rw-r--r--arch/x86/kernel/verify_cpu.S (renamed from arch/x86/kernel/verify_cpu_64.S)49
-rw-r--r--arch/x86/kernel/vmlinux.lds.S8
-rw-r--r--arch/x86/lguest/i386_head.S105
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/amdtopology_64.c (renamed from arch/x86/mm/k8topology_64.c)12
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/init_32.c20
-rw-r--r--arch/x86/mm/kmemcheck/error.c2
-rw-r--r--arch/x86/mm/numa_64.c22
-rw-r--r--arch/x86/mm/pageattr.c33
-rw-r--r--arch/x86/mm/setup_nx.c2
-rw-r--r--arch/x86/mm/srat_32.c1
-rw-r--r--arch/x86/mm/srat_64.c10
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c3
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c3
-rw-r--r--arch/x86/oprofile/op_model_amd.c55
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/pci/Makefile1
-rw-r--r--arch/x86/pci/ce4100.c315
-rw-r--r--arch/x86/pci/pcbios.c23
-rw-r--r--arch/x86/platform/Makefile2
-rw-r--r--arch/x86/platform/ce4100/Makefile1
-rw-r--r--arch/x86/platform/ce4100/ce4100.c132
-rw-r--r--arch/x86/platform/iris/Makefile1
-rw-r--r--arch/x86/platform/iris/iris.c91
-rw-r--r--arch/x86/platform/mrst/Makefile2
-rw-r--r--arch/x86/platform/mrst/early_printk_mrst.c (renamed from arch/x86/kernel/early_printk_mrst.c)0
-rw-r--r--arch/x86/platform/mrst/mrst.c546
-rw-r--r--arch/x86/platform/mrst/vrtc.c165
-rw-r--r--arch/x86/platform/sfi/sfi.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c22
-rw-r--r--arch/x86/platform/visws/visws_quirks.c2
210 files changed, 5316 insertions, 3746 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 8bf0fa652eb6..f78c2be4242b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI
175config HAVE_ARCH_JUMP_LABEL 175config HAVE_ARCH_JUMP_LABEL
176 bool 176 bool
177 177
178config HAVE_ARCH_MUTEX_CPU_RELAX
179 bool
180
178source "kernel/gcov/Kconfig" 181source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h
index fe792ca818f6..5996e7a6757e 100644
--- a/arch/alpha/include/asm/perf_event.h
+++ b/arch/alpha/include/asm/perf_event.h
@@ -1,10 +1,4 @@
1#ifndef __ASM_ALPHA_PERF_EVENT_H 1#ifndef __ASM_ALPHA_PERF_EVENT_H
2#define __ASM_ALPHA_PERF_EVENT_H 2#define __ASM_ALPHA_PERF_EVENT_H
3 3
4#ifdef CONFIG_PERF_EVENTS
5extern void init_hw_perf_events(void);
6#else
7static inline void init_hw_perf_events(void) { }
8#endif
9
10#endif /* __ASM_ALPHA_PERF_EVENT_H */ 4#endif /* __ASM_ALPHA_PERF_EVENT_H */
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 5f77afb88e89..4c8bb374eb0a 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -112,8 +112,6 @@ init_IRQ(void)
112 wrent(entInt, 0); 112 wrent(entInt, 0);
113 113
114 alpha_mv.init_irq(); 114 alpha_mv.init_irq();
115
116 init_hw_perf_events();
117} 115}
118 116
119/* 117/*
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 1cc49683fb69..90561c45e7d8 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/kdebug.h> 15#include <linux/kdebug.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/init.h>
17 18
18#include <asm/hwrpb.h> 19#include <asm/hwrpb.h>
19#include <asm/atomic.h> 20#include <asm/atomic.h>
@@ -863,13 +864,13 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
863/* 864/*
864 * Init call to initialise performance events at kernel startup. 865 * Init call to initialise performance events at kernel startup.
865 */ 866 */
866void __init init_hw_perf_events(void) 867int __init init_hw_perf_events(void)
867{ 868{
868 pr_info("Performance events: "); 869 pr_info("Performance events: ");
869 870
870 if (!supported_cpu()) { 871 if (!supported_cpu()) {
871 pr_cont("No support for your CPU.\n"); 872 pr_cont("No support for your CPU.\n");
872 return; 873 return 0;
873 } 874 }
874 875
875 pr_cont("Supported CPU type!\n"); 876 pr_cont("Supported CPU type!\n");
@@ -881,6 +882,8 @@ void __init init_hw_perf_events(void)
881 /* And set up PMU specification */ 882 /* And set up PMU specification */
882 alpha_pmu = &ev67_pmu; 883 alpha_pmu = &ev67_pmu;
883 884
884 perf_pmu_register(&pmu); 885 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
885}
886 886
887 return 0;
888}
889early_initcall(init_hw_perf_events);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 624e2a5de2b3..5efa2647a2fb 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -654,11 +654,11 @@ init_hw_perf_events(void)
654 pr_info("no hardware support available\n"); 654 pr_info("no hardware support available\n");
655 } 655 }
656 656
657 perf_pmu_register(&pmu); 657 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
658 658
659 return 0; 659 return 0;
660} 660}
661arch_initcall(init_hw_perf_events); 661early_initcall(init_hw_perf_events);
662 662
663/* 663/*
664 * Callchain handling code. 664 * Callchain handling code.
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 31e5fd63ec9a..fae931ac2e56 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -44,6 +44,7 @@ config ARCH_MSM8X60
44 select CPU_V7 44 select CPU_V7
45 select MSM_V2_TLMM 45 select MSM_V2_TLMM
46 select MSM_GPIOMUX 46 select MSM_GPIOMUX
47 select IOMMU_API
47 48
48endchoice 49endchoice
49 50
@@ -124,6 +125,10 @@ config MACH_MSM8X60_FFA
124 125
125endmenu 126endmenu
126 127
128config IOMMU_PGTABLES_L2
129 def_bool y
130 depends on ARCH_MSM8X60 && MMU && SMP && CPU_DCACHE_DISABLE=n
131
127config MSM_DEBUG_UART 132config MSM_DEBUG_UART
128 int 133 int
129 default 1 if MSM_DEBUG_UART1 134 default 1 if MSM_DEBUG_UART1
@@ -164,4 +169,7 @@ config MSM_GPIOMUX
164 169
165config MSM_V2_TLMM 170config MSM_V2_TLMM
166 bool 171 bool
172
173config IOMMU_API
174 bool
167endif 175endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index b5a7b07a44f5..59646bbd6195 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
20obj-$(CONFIG_MSM_SMD) += last_radio_log.o 20obj-$(CONFIG_MSM_SMD) += last_radio_log.o
21 21
22obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o 22obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o
23obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o board-trout-panel.o devices-msm7x00.o
23obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o 24obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o
24obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o 25obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o
25obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o 26obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o
@@ -28,6 +29,8 @@ obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60.o
28obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o 29obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
29obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o gpiomux-v1.o gpiomux.o 30obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o gpiomux-v1.o gpiomux.o
30obj-$(CONFIG_ARCH_MSM8X60) += gpiomux-8x60.o gpiomux-v2.o gpiomux.o 31obj-$(CONFIG_ARCH_MSM8X60) += gpiomux-8x60.o gpiomux-v2.o gpiomux.o
31ifndef CONFIG_MSM_V2_TLMM 32ifdef CONFIG_MSM_V2_TLMM
33obj-y += gpio-v2.o
34else
32obj-y += gpio.o 35obj-y += gpio.o
33endif 36endif
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 05241df3f9b6..6f3b9735e970 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/smsc911x.h> 24#include <linux/smsc911x.h>
25#include <linux/usb/msm_hsusb.h>
25 26
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
@@ -39,11 +40,26 @@
39 40
40extern struct sys_timer msm_timer; 41extern struct sys_timer msm_timer;
41 42
43static int hsusb_phy_init_seq[] = {
44 0x30, 0x32, /* Enable and set Pre-Emphasis Depth to 20% */
45 0x02, 0x36, /* Disable CDR Auto Reset feature */
46 -1
47};
48
49static struct msm_otg_platform_data msm_otg_pdata = {
50 .phy_init_seq = hsusb_phy_init_seq,
51 .mode = USB_PERIPHERAL,
52 .otg_control = OTG_PHY_CONTROL,
53};
54
42static struct platform_device *devices[] __initdata = { 55static struct platform_device *devices[] __initdata = {
43#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER) 56#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER)
44 &msm_device_uart2, 57 &msm_device_uart2,
45#endif 58#endif
46 &msm_device_smd, 59 &msm_device_smd,
60 &msm_device_otg,
61 &msm_device_hsusb,
62 &msm_device_hsusb_host,
47}; 63};
48 64
49static void __init msm7x30_init_irq(void) 65static void __init msm7x30_init_irq(void)
@@ -53,6 +69,10 @@ static void __init msm7x30_init_irq(void)
53 69
54static void __init msm7x30_init(void) 70static void __init msm7x30_init(void)
55{ 71{
72 msm_device_otg.dev.platform_data = &msm_otg_pdata;
73 msm_device_hsusb.dev.parent = &msm_device_otg.dev;
74 msm_device_hsusb_host.dev.parent = &msm_device_otg.dev;
75
56 platform_add_devices(devices, ARRAY_SIZE(devices)); 76 platform_add_devices(devices, ARRAY_SIZE(devices));
57} 77}
58 78
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index ed2af4ad97ed..2e8391307f55 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -20,6 +20,7 @@
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/usb/msm_hsusb.h>
23 24
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
25#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
@@ -74,9 +75,24 @@ static int __init msm_init_smc91x(void)
74} 75}
75module_init(msm_init_smc91x); 76module_init(msm_init_smc91x);
76 77
78static int hsusb_phy_init_seq[] = {
79 0x08, 0x31, /* Increase HS Driver Amplitude */
80 0x20, 0x32, /* Enable and set Pre-Emphasis Depth to 10% */
81 -1
82};
83
84static struct msm_otg_platform_data msm_otg_pdata = {
85 .phy_init_seq = hsusb_phy_init_seq,
86 .mode = USB_PERIPHERAL,
87 .otg_control = OTG_PHY_CONTROL,
88};
89
77static struct platform_device *devices[] __initdata = { 90static struct platform_device *devices[] __initdata = {
78 &msm_device_uart3, 91 &msm_device_uart3,
79 &msm_device_smd, 92 &msm_device_smd,
93 &msm_device_otg,
94 &msm_device_hsusb,
95 &msm_device_hsusb_host,
80}; 96};
81 97
82static void __init qsd8x50_map_io(void) 98static void __init qsd8x50_map_io(void)
@@ -93,6 +109,9 @@ static void __init qsd8x50_init_irq(void)
93 109
94static void __init qsd8x50_init(void) 110static void __init qsd8x50_init(void)
95{ 111{
112 msm_device_otg.dev.platform_data = &msm_otg_pdata;
113 msm_device_hsusb.dev.parent = &msm_device_otg.dev;
114 msm_device_hsusb_host.dev.parent = &msm_device_otg.dev;
96 platform_add_devices(devices, ARRAY_SIZE(devices)); 115 platform_add_devices(devices, ARRAY_SIZE(devices));
97} 116}
98 117
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c
index c50f3afc3134..f8c09ef6666f 100644
--- a/arch/arm/mach-msm/board-trout-gpio.c
+++ b/arch/arm/mach-msm/board-trout-gpio.c
@@ -72,6 +72,13 @@ static int msm_gpiolib_direction_output(struct gpio_chip *chip,
72 return 0; 72 return 0;
73} 73}
74 74
75static int trout_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
76{
77 struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip);
78
79 return TROUT_GPIO_TO_INT(offset + chip->base);
80}
81
75#define TROUT_GPIO_BANK(name, reg_num, base_gpio, shadow_val) \ 82#define TROUT_GPIO_BANK(name, reg_num, base_gpio, shadow_val) \
76 { \ 83 { \
77 .chip = { \ 84 .chip = { \
@@ -80,6 +87,7 @@ static int msm_gpiolib_direction_output(struct gpio_chip *chip,
80 .direction_output = msm_gpiolib_direction_output, \ 87 .direction_output = msm_gpiolib_direction_output, \
81 .get = msm_gpiolib_get, \ 88 .get = msm_gpiolib_get, \
82 .set = msm_gpiolib_set, \ 89 .set = msm_gpiolib_set, \
90 .to_irq = trout_gpio_to_irq, \
83 .base = base_gpio, \ 91 .base = base_gpio, \
84 .ngpio = 8, \ 92 .ngpio = 8, \
85 }, \ 93 }, \
diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c
new file mode 100644
index 000000000000..729bb49a44ca
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-panel.c
@@ -0,0 +1,297 @@
1/* linux/arch/arm/mach-msm/board-trout-mddi.c
2** Author: Brian Swetland <swetland@google.com>
3*/
4
5#include <linux/kernel.h>
6#include <linux/init.h>
7#include <linux/platform_device.h>
8#include <linux/delay.h>
9#include <linux/leds.h>
10#include <linux/clk.h>
11#include <linux/err.h>
12
13#include <asm/io.h>
14#include <asm/gpio.h>
15#include <asm/mach-types.h>
16
17#include <mach/msm_fb.h>
18#include <mach/vreg.h>
19
20#include "board-trout.h"
21#include "proc_comm.h"
22#include "devices.h"
23
24#define TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS 255
25
26#define MDDI_CLIENT_CORE_BASE 0x108000
27#define LCD_CONTROL_BLOCK_BASE 0x110000
28#define SPI_BLOCK_BASE 0x120000
29#define I2C_BLOCK_BASE 0x130000
30#define PWM_BLOCK_BASE 0x140000
31#define GPIO_BLOCK_BASE 0x150000
32#define SYSTEM_BLOCK1_BASE 0x160000
33#define SYSTEM_BLOCK2_BASE 0x170000
34
35
36#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
37#define SYSCLKENA (MDDI_CLIENT_CORE_BASE|0x2C)
38#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
39
40#define V_VDDE2E_VDD2_GPIO 0
41#define MDDI_RST_N 82
42
43#define MDDICAP0 (MDDI_CLIENT_CORE_BASE|0x00)
44#define MDDICAP1 (MDDI_CLIENT_CORE_BASE|0x04)
45#define MDDICAP2 (MDDI_CLIENT_CORE_BASE|0x08)
46#define MDDICAP3 (MDDI_CLIENT_CORE_BASE|0x0C)
47#define MDCAPCHG (MDDI_CLIENT_CORE_BASE|0x10)
48#define MDCRCERC (MDDI_CLIENT_CORE_BASE|0x14)
49#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
50#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
51#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
52#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
53#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
54#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
55#define TESTMODE (MDDI_CLIENT_CORE_BASE|0x30)
56#define FIFOMONI (MDDI_CLIENT_CORE_BASE|0x34)
57#define INTMONI (MDDI_CLIENT_CORE_BASE|0x38)
58#define MDIOBIST (MDDI_CLIENT_CORE_BASE|0x3C)
59#define MDIOPSET (MDDI_CLIENT_CORE_BASE|0x40)
60#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
61#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
62#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
63#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
64#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
65
66#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
67#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
68#define START (LCD_CONTROL_BLOCK_BASE|0x08)
69#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
70#define CMN (LCD_CONTROL_BLOCK_BASE|0x10)
71#define GAMMA (LCD_CONTROL_BLOCK_BASE|0x14)
72#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
73#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
74#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
75#define HDE_LEFT (LCD_CONTROL_BLOCK_BASE|0x24)
76#define VDE_TOP (LCD_CONTROL_BLOCK_BASE|0x28)
77#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
78#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
79#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
80#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
81#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
82#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
83#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
84#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
85#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
86#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
87#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58)
88#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
89#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
90#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
91#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
92#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
93#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
94#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
95#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
96#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
97#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
98#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
99#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
100#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
101
102#define SSICTL (SPI_BLOCK_BASE|0x00)
103#define SSITIME (SPI_BLOCK_BASE|0x04)
104#define SSITX (SPI_BLOCK_BASE|0x08)
105#define SSIRX (SPI_BLOCK_BASE|0x0C)
106#define SSIINTC (SPI_BLOCK_BASE|0x10)
107#define SSIINTS (SPI_BLOCK_BASE|0x14)
108#define SSIDBG1 (SPI_BLOCK_BASE|0x18)
109#define SSIDBG2 (SPI_BLOCK_BASE|0x1C)
110#define SSIID (SPI_BLOCK_BASE|0x20)
111
112#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
113#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
114#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
115#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
116#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00)
117
118#define GPIODATA (GPIO_BLOCK_BASE|0x00)
119#define GPIODIR (GPIO_BLOCK_BASE|0x04)
120#define GPIOIS (GPIO_BLOCK_BASE|0x08)
121#define GPIOIBE (GPIO_BLOCK_BASE|0x0C)
122#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
123#define GPIOIE (GPIO_BLOCK_BASE|0x14)
124#define GPIORIS (GPIO_BLOCK_BASE|0x18)
125#define GPIOMIS (GPIO_BLOCK_BASE|0x1C)
126#define GPIOIC (GPIO_BLOCK_BASE|0x20)
127#define GPIOOMS (GPIO_BLOCK_BASE|0x24)
128#define GPIOPC (GPIO_BLOCK_BASE|0x28)
129#define GPIOID (GPIO_BLOCK_BASE|0x30)
130
131#define SPI_WRITE(reg, val) \
132 { SSITX, 0x00010000 | (((reg) & 0xff) << 8) | ((val) & 0xff) }, \
133 { 0, 5 },
134
135#define SPI_WRITE1(reg) \
136 { SSITX, (reg) & 0xff }, \
137 { 0, 5 },
138
139struct mddi_table {
140 uint32_t reg;
141 uint32_t value;
142};
143static struct mddi_table mddi_toshiba_init_table[] = {
144 { DPSET0, 0x09e90046 },
145 { DPSET1, 0x00000118 },
146 { DPSUS, 0x00000000 },
147 { DPRUN, 0x00000001 },
148 { 1, 14 }, /* msleep 14 */
149 { SYSCKENA, 0x00000001 },
150 { CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */
151
152 { GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */
153 { GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */
154 { GPIOSEL, 0/*0x00000173*/}, /* # SYS.GPIOSEL # GPIO port multiplexing control */
155 { GPIOPC, 0x03C300C0 }, /* # GPI .GPIOPC # GPIO2,3 PD cut */
156 { WKREQ, 0x00000000 }, /* # SYS.WKREQ # Wake-up request event is VSYNC alignment */
157
158 { GPIOIBE, 0x000003FF },
159 { GPIOIS, 0x00000000 },
160 { GPIOIC, 0x000003FF },
161 { GPIOIE, 0x00000000 },
162
163 { GPIODATA, 0x00040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
164 { 1, 1 }, /* msleep 1 */
165 { GPIODATA, 0x02040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
166 { DRAMPWR, 0x00000001 }, /* eDRAM power */
167};
168
169#define GPIOSEL_VWAKEINT (1U << 0)
170#define INTMASK_VWAKEOUT (1U << 0)
171
172
173static struct clk *gp_clk;
174static int trout_new_backlight = 1;
175static struct vreg *vreg_mddi_1v5;
176static struct vreg *vreg_lcm_2v85;
177
178static void trout_process_mddi_table(struct msm_mddi_client_data *client_data,
179 struct mddi_table *table, size_t count)
180{
181 int i;
182 for (i = 0; i < count; i++) {
183 uint32_t reg = table[i].reg;
184 uint32_t value = table[i].value;
185
186 if (reg == 0)
187 udelay(value);
188 else if (reg == 1)
189 msleep(value);
190 else
191 client_data->remote_write(client_data, value, reg);
192 }
193}
194
195static int trout_mddi_toshiba_client_init(
196 struct msm_mddi_bridge_platform_data *bridge_data,
197 struct msm_mddi_client_data *client_data)
198{
199 int panel_id;
200
201 client_data->auto_hibernate(client_data, 0);
202 trout_process_mddi_table(client_data, mddi_toshiba_init_table,
203 ARRAY_SIZE(mddi_toshiba_init_table));
204 client_data->auto_hibernate(client_data, 1);
205 panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
206 if (panel_id > 1) {
207 printk(KERN_WARNING "unknown panel id at mddi_enable\n");
208 return -1;
209 }
210 return 0;
211}
212
213static int trout_mddi_toshiba_client_uninit(
214 struct msm_mddi_bridge_platform_data *bridge_data,
215 struct msm_mddi_client_data *client_data)
216{
217 return 0;
218}
219
220static struct resource resources_msm_fb[] = {
221 {
222 .start = MSM_FB_BASE,
223 .end = MSM_FB_BASE + MSM_FB_SIZE,
224 .flags = IORESOURCE_MEM,
225 },
226};
227
228struct msm_mddi_bridge_platform_data toshiba_client_data = {
229 .init = trout_mddi_toshiba_client_init,
230 .uninit = trout_mddi_toshiba_client_uninit,
231 .fb_data = {
232 .xres = 320,
233 .yres = 480,
234 .width = 45,
235 .height = 67,
236 .output_format = 0,
237 },
238};
239
240static struct msm_mddi_platform_data mddi_pdata = {
241 .clk_rate = 122880000,
242 .fb_resource = resources_msm_fb,
243 .num_clients = 1,
244 .client_platform_data = {
245 {
246 .product_id = (0xd263 << 16 | 0),
247 .name = "mddi_c_d263_0000",
248 .id = 0,
249 .client_data = &toshiba_client_data,
250 .clk_rate = 0,
251 },
252 },
253};
254
255int __init trout_init_panel(void)
256{
257 int rc;
258
259 if (!machine_is_trout())
260 return 0;
261 vreg_mddi_1v5 = vreg_get(0, "gp2");
262 if (IS_ERR(vreg_mddi_1v5))
263 return PTR_ERR(vreg_mddi_1v5);
264 vreg_lcm_2v85 = vreg_get(0, "gp4");
265 if (IS_ERR(vreg_lcm_2v85))
266 return PTR_ERR(vreg_lcm_2v85);
267
268 trout_new_backlight = system_rev >= 5;
269 if (trout_new_backlight) {
270 uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT,
271 GPIO_NO_PULL, GPIO_8MA);
272 msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
273 } else {
274 uint32_t config = PCOM_GPIO_CFG(27, 1, GPIO_OUTPUT,
275 GPIO_NO_PULL, GPIO_8MA);
276 msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
277
278 gp_clk = clk_get(NULL, "gp_clk");
279 if (IS_ERR(gp_clk)) {
280 printk(KERN_ERR "trout_init_panel: could not get gp"
281 "clock\n");
282 gp_clk = NULL;
283 }
284 rc = clk_set_rate(gp_clk, 19200000);
285 if (rc)
286 printk(KERN_ERR "trout_init_panel: set clock rate "
287 "failed\n");
288 }
289
290 rc = platform_device_register(&msm_device_mdp);
291 if (rc)
292 return rc;
293 msm_device_mddi0.dev.platform_data = &mddi_pdata;
294 return platform_device_register(&msm_device_mddi0);
295}
296
297device_initcall(trout_init_panel);
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index c57210f4f06a..2069bfaa3a26 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -120,6 +120,21 @@ EXPORT_SYMBOL(clk_get_rate);
120 120
121int clk_set_rate(struct clk *clk, unsigned long rate) 121int clk_set_rate(struct clk *clk, unsigned long rate)
122{ 122{
123 int ret;
124 if (clk->flags & CLKFLAG_MAX) {
125 ret = clk->ops->set_max_rate(clk->id, rate);
126 if (ret)
127 return ret;
128 }
129 if (clk->flags & CLKFLAG_MIN) {
130 ret = clk->ops->set_min_rate(clk->id, rate);
131 if (ret)
132 return ret;
133 }
134
135 if (clk->flags & CLKFLAG_MAX || clk->flags & CLKFLAG_MIN)
136 return ret;
137
123 return clk->ops->set_rate(clk->id, rate); 138 return clk->ops->set_rate(clk->id, rate);
124} 139}
125EXPORT_SYMBOL(clk_set_rate); 140EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c
index 4e8c0bcdc92d..fb548a8a21db 100644
--- a/arch/arm/mach-msm/devices-msm7x00.c
+++ b/arch/arm/mach-msm/devices-msm7x00.c
@@ -347,6 +347,73 @@ int __init msm_add_sdcc(unsigned int controller,
347 return platform_device_register(pdev); 347 return platform_device_register(pdev);
348} 348}
349 349
350static struct resource resources_mddi0[] = {
351 {
352 .start = MSM_PMDH_PHYS,
353 .end = MSM_PMDH_PHYS + MSM_PMDH_SIZE - 1,
354 .flags = IORESOURCE_MEM,
355 },
356 {
357 .start = INT_MDDI_PRI,
358 .end = INT_MDDI_PRI,
359 .flags = IORESOURCE_IRQ,
360 },
361};
362
363static struct resource resources_mddi1[] = {
364 {
365 .start = MSM_EMDH_PHYS,
366 .end = MSM_EMDH_PHYS + MSM_EMDH_SIZE - 1,
367 .flags = IORESOURCE_MEM,
368 },
369 {
370 .start = INT_MDDI_EXT,
371 .end = INT_MDDI_EXT,
372 .flags = IORESOURCE_IRQ,
373 },
374};
375
376struct platform_device msm_device_mddi0 = {
377 .name = "msm_mddi",
378 .id = 0,
379 .num_resources = ARRAY_SIZE(resources_mddi0),
380 .resource = resources_mddi0,
381 .dev = {
382 .coherent_dma_mask = 0xffffffff,
383 },
384};
385
386struct platform_device msm_device_mddi1 = {
387 .name = "msm_mddi",
388 .id = 1,
389 .num_resources = ARRAY_SIZE(resources_mddi1),
390 .resource = resources_mddi1,
391 .dev = {
392 .coherent_dma_mask = 0xffffffff,
393 },
394};
395
396static struct resource resources_mdp[] = {
397 {
398 .start = MSM_MDP_PHYS,
399 .end = MSM_MDP_PHYS + MSM_MDP_SIZE - 1,
400 .name = "mdp",
401 .flags = IORESOURCE_MEM
402 },
403 {
404 .start = INT_MDP,
405 .end = INT_MDP,
406 .flags = IORESOURCE_IRQ,
407 },
408};
409
410struct platform_device msm_device_mdp = {
411 .name = "msm_mdp",
412 .id = 0,
413 .num_resources = ARRAY_SIZE(resources_mdp),
414 .resource = resources_mdp,
415};
416
350struct clk msm_clocks_7x01a[] = { 417struct clk msm_clocks_7x01a[] = {
351 CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), 418 CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
352 CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0), 419 CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
@@ -364,7 +431,7 @@ struct clk msm_clocks_7x01a[] = {
364 CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF), 431 CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
365 CLK_PCOM("pbus_clk", PBUS_CLK, NULL, 0), 432 CLK_PCOM("pbus_clk", PBUS_CLK, NULL, 0),
366 CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0), 433 CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
367 CLK_PCOM("pmdh_clk", PMDH_CLK, NULL, OFF ), 434 CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
368 CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF), 435 CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
369 CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF), 436 CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
370 CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF), 437 CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF),
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index 7fcf2e3b7698..4e9a0ab3e937 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -56,6 +56,77 @@ struct platform_device msm_device_smd = {
56 .id = -1, 56 .id = -1,
57}; 57};
58 58
59static struct resource resources_otg[] = {
60 {
61 .start = MSM_HSUSB_PHYS,
62 .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
63 .flags = IORESOURCE_MEM,
64 },
65 {
66 .start = INT_USB_HS,
67 .end = INT_USB_HS,
68 .flags = IORESOURCE_IRQ,
69 },
70};
71
72struct platform_device msm_device_otg = {
73 .name = "msm_otg",
74 .id = -1,
75 .num_resources = ARRAY_SIZE(resources_otg),
76 .resource = resources_otg,
77 .dev = {
78 .coherent_dma_mask = 0xffffffff,
79 },
80};
81
82static struct resource resources_hsusb[] = {
83 {
84 .start = MSM_HSUSB_PHYS,
85 .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
86 .flags = IORESOURCE_MEM,
87 },
88 {
89 .start = INT_USB_HS,
90 .end = INT_USB_HS,
91 .flags = IORESOURCE_IRQ,
92 },
93};
94
95struct platform_device msm_device_hsusb = {
96 .name = "msm_hsusb",
97 .id = -1,
98 .num_resources = ARRAY_SIZE(resources_hsusb),
99 .resource = resources_hsusb,
100 .dev = {
101 .coherent_dma_mask = 0xffffffff,
102 },
103};
104
105static u64 dma_mask = 0xffffffffULL;
106static struct resource resources_hsusb_host[] = {
107 {
108 .start = MSM_HSUSB_PHYS,
109 .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
110 .flags = IORESOURCE_MEM,
111 },
112 {
113 .start = INT_USB_HS,
114 .end = INT_USB_HS,
115 .flags = IORESOURCE_IRQ,
116 },
117};
118
119struct platform_device msm_device_hsusb_host = {
120 .name = "msm_hsusb_host",
121 .id = -1,
122 .num_resources = ARRAY_SIZE(resources_hsusb_host),
123 .resource = resources_hsusb_host,
124 .dev = {
125 .dma_mask = &dma_mask,
126 .coherent_dma_mask = 0xffffffffULL,
127 },
128};
129
59struct clk msm_clocks_7x30[] = { 130struct clk msm_clocks_7x30[] = {
60 CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), 131 CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
61 CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0), 132 CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
@@ -107,6 +178,7 @@ struct clk msm_clocks_7x30[] = {
107 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), 178 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
108 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), 179 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
109 CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0), 180 CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
181 CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
110 CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), 182 CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
111 CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), 183 CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
112 CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, NULL, OFF), 184 CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/devices-msm8x60-iommu.c b/arch/arm/mach-msm/devices-msm8x60-iommu.c
index 89b9d4437e92..f9e7bd34ec59 100644
--- a/arch/arm/mach-msm/devices-msm8x60-iommu.c
+++ b/arch/arm/mach-msm/devices-msm8x60-iommu.c
@@ -254,60 +254,86 @@ static struct resource msm_iommu_gfx2d0_resources[] = {
254 }, 254 },
255}; 255};
256 256
257static struct resource msm_iommu_gfx2d1_resources[] = {
258 {
259 .start = MSM_IOMMU_GFX2D1_PHYS,
260 .end = MSM_IOMMU_GFX2D1_PHYS + MSM_IOMMU_GFX2D1_SIZE - 1,
261 .name = "physbase",
262 .flags = IORESOURCE_MEM,
263 },
264 {
265 .name = "nonsecure_irq",
266 .start = SMMU_GFX2D1_CB_SC_NON_SECURE_IRQ,
267 .end = SMMU_GFX2D1_CB_SC_NON_SECURE_IRQ,
268 .flags = IORESOURCE_IRQ,
269 },
270 {
271 .name = "secure_irq",
272 .start = SMMU_GFX2D1_CB_SC_SECURE_IRQ,
273 .end = SMMU_GFX2D1_CB_SC_SECURE_IRQ,
274 .flags = IORESOURCE_IRQ,
275 },
276};
277
257static struct platform_device msm_root_iommu_dev = { 278static struct platform_device msm_root_iommu_dev = {
258 .name = "msm_iommu", 279 .name = "msm_iommu",
259 .id = -1, 280 .id = -1,
260}; 281};
261 282
262static struct msm_iommu_dev jpegd_smmu = { 283static struct msm_iommu_dev jpegd_iommu = {
263 .name = "jpegd", 284 .name = "jpegd",
264 .clk_rate = -1 285 .clk_rate = -1
265}; 286};
266 287
267static struct msm_iommu_dev vpe_smmu = { 288static struct msm_iommu_dev vpe_iommu = {
268 .name = "vpe" 289 .name = "vpe"
269}; 290};
270 291
271static struct msm_iommu_dev mdp0_smmu = { 292static struct msm_iommu_dev mdp0_iommu = {
272 .name = "mdp0" 293 .name = "mdp0"
273}; 294};
274 295
275static struct msm_iommu_dev mdp1_smmu = { 296static struct msm_iommu_dev mdp1_iommu = {
276 .name = "mdp1" 297 .name = "mdp1"
277}; 298};
278 299
279static struct msm_iommu_dev rot_smmu = { 300static struct msm_iommu_dev rot_iommu = {
280 .name = "rot" 301 .name = "rot"
281}; 302};
282 303
283static struct msm_iommu_dev ijpeg_smmu = { 304static struct msm_iommu_dev ijpeg_iommu = {
284 .name = "ijpeg" 305 .name = "ijpeg"
285}; 306};
286 307
287static struct msm_iommu_dev vfe_smmu = { 308static struct msm_iommu_dev vfe_iommu = {
288 .name = "vfe", 309 .name = "vfe",
289 .clk_rate = -1 310 .clk_rate = -1
290}; 311};
291 312
292static struct msm_iommu_dev vcodec_a_smmu = { 313static struct msm_iommu_dev vcodec_a_iommu = {
293 .name = "vcodec_a" 314 .name = "vcodec_a"
294}; 315};
295 316
296static struct msm_iommu_dev vcodec_b_smmu = { 317static struct msm_iommu_dev vcodec_b_iommu = {
297 .name = "vcodec_b" 318 .name = "vcodec_b"
298}; 319};
299 320
300static struct msm_iommu_dev gfx3d_smmu = { 321static struct msm_iommu_dev gfx3d_iommu = {
301 .name = "gfx3d", 322 .name = "gfx3d",
302 .clk_rate = 27000000 323 .clk_rate = 27000000
303}; 324};
304 325
305static struct msm_iommu_dev gfx2d0_smmu = { 326static struct msm_iommu_dev gfx2d0_iommu = {
306 .name = "gfx2d0", 327 .name = "gfx2d0",
307 .clk_rate = 27000000 328 .clk_rate = 27000000
308}; 329};
309 330
310static struct platform_device msm_device_smmu_jpegd = { 331static struct msm_iommu_dev gfx2d1_iommu = {
332 .name = "gfx2d1",
333 .clk_rate = 27000000
334};
335
336static struct platform_device msm_device_iommu_jpegd = {
311 .name = "msm_iommu", 337 .name = "msm_iommu",
312 .id = 0, 338 .id = 0,
313 .dev = { 339 .dev = {
@@ -317,7 +343,7 @@ static struct platform_device msm_device_smmu_jpegd = {
317 .resource = msm_iommu_jpegd_resources, 343 .resource = msm_iommu_jpegd_resources,
318}; 344};
319 345
320static struct platform_device msm_device_smmu_vpe = { 346static struct platform_device msm_device_iommu_vpe = {
321 .name = "msm_iommu", 347 .name = "msm_iommu",
322 .id = 1, 348 .id = 1,
323 .dev = { 349 .dev = {
@@ -327,7 +353,7 @@ static struct platform_device msm_device_smmu_vpe = {
327 .resource = msm_iommu_vpe_resources, 353 .resource = msm_iommu_vpe_resources,
328}; 354};
329 355
330static struct platform_device msm_device_smmu_mdp0 = { 356static struct platform_device msm_device_iommu_mdp0 = {
331 .name = "msm_iommu", 357 .name = "msm_iommu",
332 .id = 2, 358 .id = 2,
333 .dev = { 359 .dev = {
@@ -337,7 +363,7 @@ static struct platform_device msm_device_smmu_mdp0 = {
337 .resource = msm_iommu_mdp0_resources, 363 .resource = msm_iommu_mdp0_resources,
338}; 364};
339 365
340static struct platform_device msm_device_smmu_mdp1 = { 366static struct platform_device msm_device_iommu_mdp1 = {
341 .name = "msm_iommu", 367 .name = "msm_iommu",
342 .id = 3, 368 .id = 3,
343 .dev = { 369 .dev = {
@@ -347,7 +373,7 @@ static struct platform_device msm_device_smmu_mdp1 = {
347 .resource = msm_iommu_mdp1_resources, 373 .resource = msm_iommu_mdp1_resources,
348}; 374};
349 375
350static struct platform_device msm_device_smmu_rot = { 376static struct platform_device msm_device_iommu_rot = {
351 .name = "msm_iommu", 377 .name = "msm_iommu",
352 .id = 4, 378 .id = 4,
353 .dev = { 379 .dev = {
@@ -357,7 +383,7 @@ static struct platform_device msm_device_smmu_rot = {
357 .resource = msm_iommu_rot_resources, 383 .resource = msm_iommu_rot_resources,
358}; 384};
359 385
360static struct platform_device msm_device_smmu_ijpeg = { 386static struct platform_device msm_device_iommu_ijpeg = {
361 .name = "msm_iommu", 387 .name = "msm_iommu",
362 .id = 5, 388 .id = 5,
363 .dev = { 389 .dev = {
@@ -367,7 +393,7 @@ static struct platform_device msm_device_smmu_ijpeg = {
367 .resource = msm_iommu_ijpeg_resources, 393 .resource = msm_iommu_ijpeg_resources,
368}; 394};
369 395
370static struct platform_device msm_device_smmu_vfe = { 396static struct platform_device msm_device_iommu_vfe = {
371 .name = "msm_iommu", 397 .name = "msm_iommu",
372 .id = 6, 398 .id = 6,
373 .dev = { 399 .dev = {
@@ -377,7 +403,7 @@ static struct platform_device msm_device_smmu_vfe = {
377 .resource = msm_iommu_vfe_resources, 403 .resource = msm_iommu_vfe_resources,
378}; 404};
379 405
380static struct platform_device msm_device_smmu_vcodec_a = { 406static struct platform_device msm_device_iommu_vcodec_a = {
381 .name = "msm_iommu", 407 .name = "msm_iommu",
382 .id = 7, 408 .id = 7,
383 .dev = { 409 .dev = {
@@ -387,7 +413,7 @@ static struct platform_device msm_device_smmu_vcodec_a = {
387 .resource = msm_iommu_vcodec_a_resources, 413 .resource = msm_iommu_vcodec_a_resources,
388}; 414};
389 415
390static struct platform_device msm_device_smmu_vcodec_b = { 416static struct platform_device msm_device_iommu_vcodec_b = {
391 .name = "msm_iommu", 417 .name = "msm_iommu",
392 .id = 8, 418 .id = 8,
393 .dev = { 419 .dev = {
@@ -397,7 +423,7 @@ static struct platform_device msm_device_smmu_vcodec_b = {
397 .resource = msm_iommu_vcodec_b_resources, 423 .resource = msm_iommu_vcodec_b_resources,
398}; 424};
399 425
400static struct platform_device msm_device_smmu_gfx3d = { 426static struct platform_device msm_device_iommu_gfx3d = {
401 .name = "msm_iommu", 427 .name = "msm_iommu",
402 .id = 9, 428 .id = 9,
403 .dev = { 429 .dev = {
@@ -407,7 +433,7 @@ static struct platform_device msm_device_smmu_gfx3d = {
407 .resource = msm_iommu_gfx3d_resources, 433 .resource = msm_iommu_gfx3d_resources,
408}; 434};
409 435
410static struct platform_device msm_device_smmu_gfx2d0 = { 436static struct platform_device msm_device_iommu_gfx2d0 = {
411 .name = "msm_iommu", 437 .name = "msm_iommu",
412 .id = 10, 438 .id = 10,
413 .dev = { 439 .dev = {
@@ -417,6 +443,16 @@ static struct platform_device msm_device_smmu_gfx2d0 = {
417 .resource = msm_iommu_gfx2d0_resources, 443 .resource = msm_iommu_gfx2d0_resources,
418}; 444};
419 445
446struct platform_device msm_device_iommu_gfx2d1 = {
447 .name = "msm_iommu",
448 .id = 11,
449 .dev = {
450 .parent = &msm_root_iommu_dev.dev,
451 },
452 .num_resources = ARRAY_SIZE(msm_iommu_gfx2d1_resources),
453 .resource = msm_iommu_gfx2d1_resources,
454};
455
420static struct msm_iommu_ctx_dev jpegd_src_ctx = { 456static struct msm_iommu_ctx_dev jpegd_src_ctx = {
421 .name = "jpegd_src", 457 .name = "jpegd_src",
422 .num = 0, 458 .num = 0,
@@ -519,41 +555,36 @@ static struct msm_iommu_ctx_dev vcodec_b_mm2_ctx = {
519 .mids = {0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1} 555 .mids = {0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1}
520}; 556};
521 557
522static struct msm_iommu_ctx_dev gfx3d_rbpa_ctx = { 558static struct msm_iommu_ctx_dev gfx3d_user_ctx = {
523 .name = "gfx3d_rbpa", 559 .name = "gfx3d_user",
524 .num = 0, 560 .num = 0,
525 .mids = {-1} 561 .mids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1}
526}; 562};
527 563
528static struct msm_iommu_ctx_dev gfx3d_cpvgttc_ctx = { 564static struct msm_iommu_ctx_dev gfx3d_priv_ctx = {
529 .name = "gfx3d_cpvgttc", 565 .name = "gfx3d_priv",
530 .num = 1, 566 .num = 1,
531 .mids = {0, 1, 2, 3, 4, 5, 6, 7, -1} 567 .mids = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
532}; 568 31, -1}
533
534static struct msm_iommu_ctx_dev gfx3d_smmu_ctx = {
535 .name = "gfx3d_smmu",
536 .num = 2,
537 .mids = {8, 9, 10, 11, 12, -1}
538}; 569};
539 570
540static struct msm_iommu_ctx_dev gfx2d0_pixv1_ctx = { 571static struct msm_iommu_ctx_dev gfx2d0_2d0_ctx = {
541 .name = "gfx2d0_pixv1_smmu", 572 .name = "gfx2d0_2d0",
542 .num = 0, 573 .num = 0,
543 .mids = {0, 3, 4, -1} 574 .mids = {0, 1, 2, 3, 4, 5, 6, 7, -1}
544}; 575};
545 576
546static struct msm_iommu_ctx_dev gfx2d0_texv3_ctx = { 577static struct msm_iommu_ctx_dev gfx2d1_2d1_ctx = {
547 .name = "gfx2d0_texv3_smmu", 578 .name = "gfx2d1_2d1",
548 .num = 1, 579 .num = 0,
549 .mids = {1, 6, 7, -1} 580 .mids = {0, 1, 2, 3, 4, 5, 6, 7, -1}
550}; 581};
551 582
552static struct platform_device msm_device_jpegd_src_ctx = { 583static struct platform_device msm_device_jpegd_src_ctx = {
553 .name = "msm_iommu_ctx", 584 .name = "msm_iommu_ctx",
554 .id = 0, 585 .id = 0,
555 .dev = { 586 .dev = {
556 .parent = &msm_device_smmu_jpegd.dev, 587 .parent = &msm_device_iommu_jpegd.dev,
557 }, 588 },
558}; 589};
559 590
@@ -561,7 +592,7 @@ static struct platform_device msm_device_jpegd_dst_ctx = {
561 .name = "msm_iommu_ctx", 592 .name = "msm_iommu_ctx",
562 .id = 1, 593 .id = 1,
563 .dev = { 594 .dev = {
564 .parent = &msm_device_smmu_jpegd.dev, 595 .parent = &msm_device_iommu_jpegd.dev,
565 }, 596 },
566}; 597};
567 598
@@ -569,7 +600,7 @@ static struct platform_device msm_device_vpe_src_ctx = {
569 .name = "msm_iommu_ctx", 600 .name = "msm_iommu_ctx",
570 .id = 2, 601 .id = 2,
571 .dev = { 602 .dev = {
572 .parent = &msm_device_smmu_vpe.dev, 603 .parent = &msm_device_iommu_vpe.dev,
573 }, 604 },
574}; 605};
575 606
@@ -577,7 +608,7 @@ static struct platform_device msm_device_vpe_dst_ctx = {
577 .name = "msm_iommu_ctx", 608 .name = "msm_iommu_ctx",
578 .id = 3, 609 .id = 3,
579 .dev = { 610 .dev = {
580 .parent = &msm_device_smmu_vpe.dev, 611 .parent = &msm_device_iommu_vpe.dev,
581 }, 612 },
582}; 613};
583 614
@@ -585,7 +616,7 @@ static struct platform_device msm_device_mdp_vg1_ctx = {
585 .name = "msm_iommu_ctx", 616 .name = "msm_iommu_ctx",
586 .id = 4, 617 .id = 4,
587 .dev = { 618 .dev = {
588 .parent = &msm_device_smmu_mdp0.dev, 619 .parent = &msm_device_iommu_mdp0.dev,
589 }, 620 },
590}; 621};
591 622
@@ -593,7 +624,7 @@ static struct platform_device msm_device_mdp_rgb1_ctx = {
593 .name = "msm_iommu_ctx", 624 .name = "msm_iommu_ctx",
594 .id = 5, 625 .id = 5,
595 .dev = { 626 .dev = {
596 .parent = &msm_device_smmu_mdp0.dev, 627 .parent = &msm_device_iommu_mdp0.dev,
597 }, 628 },
598}; 629};
599 630
@@ -601,7 +632,7 @@ static struct platform_device msm_device_mdp_vg2_ctx = {
601 .name = "msm_iommu_ctx", 632 .name = "msm_iommu_ctx",
602 .id = 6, 633 .id = 6,
603 .dev = { 634 .dev = {
604 .parent = &msm_device_smmu_mdp1.dev, 635 .parent = &msm_device_iommu_mdp1.dev,
605 }, 636 },
606}; 637};
607 638
@@ -609,7 +640,7 @@ static struct platform_device msm_device_mdp_rgb2_ctx = {
609 .name = "msm_iommu_ctx", 640 .name = "msm_iommu_ctx",
610 .id = 7, 641 .id = 7,
611 .dev = { 642 .dev = {
612 .parent = &msm_device_smmu_mdp1.dev, 643 .parent = &msm_device_iommu_mdp1.dev,
613 }, 644 },
614}; 645};
615 646
@@ -617,7 +648,7 @@ static struct platform_device msm_device_rot_src_ctx = {
617 .name = "msm_iommu_ctx", 648 .name = "msm_iommu_ctx",
618 .id = 8, 649 .id = 8,
619 .dev = { 650 .dev = {
620 .parent = &msm_device_smmu_rot.dev, 651 .parent = &msm_device_iommu_rot.dev,
621 }, 652 },
622}; 653};
623 654
@@ -625,7 +656,7 @@ static struct platform_device msm_device_rot_dst_ctx = {
625 .name = "msm_iommu_ctx", 656 .name = "msm_iommu_ctx",
626 .id = 9, 657 .id = 9,
627 .dev = { 658 .dev = {
628 .parent = &msm_device_smmu_rot.dev, 659 .parent = &msm_device_iommu_rot.dev,
629 }, 660 },
630}; 661};
631 662
@@ -633,7 +664,7 @@ static struct platform_device msm_device_ijpeg_src_ctx = {
633 .name = "msm_iommu_ctx", 664 .name = "msm_iommu_ctx",
634 .id = 10, 665 .id = 10,
635 .dev = { 666 .dev = {
636 .parent = &msm_device_smmu_ijpeg.dev, 667 .parent = &msm_device_iommu_ijpeg.dev,
637 }, 668 },
638}; 669};
639 670
@@ -641,7 +672,7 @@ static struct platform_device msm_device_ijpeg_dst_ctx = {
641 .name = "msm_iommu_ctx", 672 .name = "msm_iommu_ctx",
642 .id = 11, 673 .id = 11,
643 .dev = { 674 .dev = {
644 .parent = &msm_device_smmu_ijpeg.dev, 675 .parent = &msm_device_iommu_ijpeg.dev,
645 }, 676 },
646}; 677};
647 678
@@ -649,7 +680,7 @@ static struct platform_device msm_device_vfe_imgwr_ctx = {
649 .name = "msm_iommu_ctx", 680 .name = "msm_iommu_ctx",
650 .id = 12, 681 .id = 12,
651 .dev = { 682 .dev = {
652 .parent = &msm_device_smmu_vfe.dev, 683 .parent = &msm_device_iommu_vfe.dev,
653 }, 684 },
654}; 685};
655 686
@@ -657,7 +688,7 @@ static struct platform_device msm_device_vfe_misc_ctx = {
657 .name = "msm_iommu_ctx", 688 .name = "msm_iommu_ctx",
658 .id = 13, 689 .id = 13,
659 .dev = { 690 .dev = {
660 .parent = &msm_device_smmu_vfe.dev, 691 .parent = &msm_device_iommu_vfe.dev,
661 }, 692 },
662}; 693};
663 694
@@ -665,7 +696,7 @@ static struct platform_device msm_device_vcodec_a_stream_ctx = {
665 .name = "msm_iommu_ctx", 696 .name = "msm_iommu_ctx",
666 .id = 14, 697 .id = 14,
667 .dev = { 698 .dev = {
668 .parent = &msm_device_smmu_vcodec_a.dev, 699 .parent = &msm_device_iommu_vcodec_a.dev,
669 }, 700 },
670}; 701};
671 702
@@ -673,7 +704,7 @@ static struct platform_device msm_device_vcodec_a_mm1_ctx = {
673 .name = "msm_iommu_ctx", 704 .name = "msm_iommu_ctx",
674 .id = 15, 705 .id = 15,
675 .dev = { 706 .dev = {
676 .parent = &msm_device_smmu_vcodec_a.dev, 707 .parent = &msm_device_iommu_vcodec_a.dev,
677 }, 708 },
678}; 709};
679 710
@@ -681,76 +712,70 @@ static struct platform_device msm_device_vcodec_b_mm2_ctx = {
681 .name = "msm_iommu_ctx", 712 .name = "msm_iommu_ctx",
682 .id = 16, 713 .id = 16,
683 .dev = { 714 .dev = {
684 .parent = &msm_device_smmu_vcodec_b.dev, 715 .parent = &msm_device_iommu_vcodec_b.dev,
685 }, 716 },
686}; 717};
687 718
688static struct platform_device msm_device_gfx3d_rbpa_ctx = { 719static struct platform_device msm_device_gfx3d_user_ctx = {
689 .name = "msm_iommu_ctx", 720 .name = "msm_iommu_ctx",
690 .id = 17, 721 .id = 17,
691 .dev = { 722 .dev = {
692 .parent = &msm_device_smmu_gfx3d.dev, 723 .parent = &msm_device_iommu_gfx3d.dev,
693 }, 724 },
694}; 725};
695 726
696static struct platform_device msm_device_gfx3d_cpvgttc_ctx = { 727static struct platform_device msm_device_gfx3d_priv_ctx = {
697 .name = "msm_iommu_ctx", 728 .name = "msm_iommu_ctx",
698 .id = 18, 729 .id = 18,
699 .dev = { 730 .dev = {
700 .parent = &msm_device_smmu_gfx3d.dev, 731 .parent = &msm_device_iommu_gfx3d.dev,
701 }, 732 },
702}; 733};
703 734
704static struct platform_device msm_device_gfx3d_smmu_ctx = { 735static struct platform_device msm_device_gfx2d0_2d0_ctx = {
705 .name = "msm_iommu_ctx", 736 .name = "msm_iommu_ctx",
706 .id = 19, 737 .id = 19,
707 .dev = { 738 .dev = {
708 .parent = &msm_device_smmu_gfx3d.dev, 739 .parent = &msm_device_iommu_gfx2d0.dev,
709 }, 740 },
710}; 741};
711 742
712static struct platform_device msm_device_gfx2d0_pixv1_ctx = { 743static struct platform_device msm_device_gfx2d1_2d1_ctx = {
713 .name = "msm_iommu_ctx", 744 .name = "msm_iommu_ctx",
714 .id = 20, 745 .id = 20,
715 .dev = { 746 .dev = {
716 .parent = &msm_device_smmu_gfx2d0.dev, 747 .parent = &msm_device_iommu_gfx2d1.dev,
717 },
718};
719
720static struct platform_device msm_device_gfx2d0_texv3_ctx = {
721 .name = "msm_iommu_ctx",
722 .id = 21,
723 .dev = {
724 .parent = &msm_device_smmu_gfx2d0.dev,
725 }, 748 },
726}; 749};
727 750
728static struct platform_device *msm_iommu_devs[] = { 751static struct platform_device *msm_iommu_devs[] = {
729 &msm_device_smmu_jpegd, 752 &msm_device_iommu_jpegd,
730 &msm_device_smmu_vpe, 753 &msm_device_iommu_vpe,
731 &msm_device_smmu_mdp0, 754 &msm_device_iommu_mdp0,
732 &msm_device_smmu_mdp1, 755 &msm_device_iommu_mdp1,
733 &msm_device_smmu_rot, 756 &msm_device_iommu_rot,
734 &msm_device_smmu_ijpeg, 757 &msm_device_iommu_ijpeg,
735 &msm_device_smmu_vfe, 758 &msm_device_iommu_vfe,
736 &msm_device_smmu_vcodec_a, 759 &msm_device_iommu_vcodec_a,
737 &msm_device_smmu_vcodec_b, 760 &msm_device_iommu_vcodec_b,
738 &msm_device_smmu_gfx3d, 761 &msm_device_iommu_gfx3d,
739 &msm_device_smmu_gfx2d0, 762 &msm_device_iommu_gfx2d0,
763 &msm_device_iommu_gfx2d1,
740}; 764};
741 765
742static struct msm_iommu_dev *msm_iommu_data[] = { 766static struct msm_iommu_dev *msm_iommu_data[] = {
743 &jpegd_smmu, 767 &jpegd_iommu,
744 &vpe_smmu, 768 &vpe_iommu,
745 &mdp0_smmu, 769 &mdp0_iommu,
746 &mdp1_smmu, 770 &mdp1_iommu,
747 &rot_smmu, 771 &rot_iommu,
748 &ijpeg_smmu, 772 &ijpeg_iommu,
749 &vfe_smmu, 773 &vfe_iommu,
750 &vcodec_a_smmu, 774 &vcodec_a_iommu,
751 &vcodec_b_smmu, 775 &vcodec_b_iommu,
752 &gfx3d_smmu, 776 &gfx3d_iommu,
753 &gfx2d0_smmu, 777 &gfx2d0_iommu,
778 &gfx2d1_iommu,
754}; 779};
755 780
756static struct platform_device *msm_iommu_ctx_devs[] = { 781static struct platform_device *msm_iommu_ctx_devs[] = {
@@ -771,11 +796,10 @@ static struct platform_device *msm_iommu_ctx_devs[] = {
771 &msm_device_vcodec_a_stream_ctx, 796 &msm_device_vcodec_a_stream_ctx,
772 &msm_device_vcodec_a_mm1_ctx, 797 &msm_device_vcodec_a_mm1_ctx,
773 &msm_device_vcodec_b_mm2_ctx, 798 &msm_device_vcodec_b_mm2_ctx,
774 &msm_device_gfx3d_rbpa_ctx, 799 &msm_device_gfx3d_user_ctx,
775 &msm_device_gfx3d_cpvgttc_ctx, 800 &msm_device_gfx3d_priv_ctx,
776 &msm_device_gfx3d_smmu_ctx, 801 &msm_device_gfx2d0_2d0_ctx,
777 &msm_device_gfx2d0_pixv1_ctx, 802 &msm_device_gfx2d1_2d1_ctx,
778 &msm_device_gfx2d0_texv3_ctx,
779}; 803};
780 804
781static struct msm_iommu_ctx_dev *msm_iommu_ctx_data[] = { 805static struct msm_iommu_ctx_dev *msm_iommu_ctx_data[] = {
@@ -796,14 +820,13 @@ static struct msm_iommu_ctx_dev *msm_iommu_ctx_data[] = {
796 &vcodec_a_stream_ctx, 820 &vcodec_a_stream_ctx,
797 &vcodec_a_mm1_ctx, 821 &vcodec_a_mm1_ctx,
798 &vcodec_b_mm2_ctx, 822 &vcodec_b_mm2_ctx,
799 &gfx3d_rbpa_ctx, 823 &gfx3d_user_ctx,
800 &gfx3d_cpvgttc_ctx, 824 &gfx3d_priv_ctx,
801 &gfx3d_smmu_ctx, 825 &gfx2d0_2d0_ctx,
802 &gfx2d0_pixv1_ctx, 826 &gfx2d1_2d1_ctx,
803 &gfx2d0_texv3_ctx,
804}; 827};
805 828
806static int msm8x60_iommu_init(void) 829static int __init msm8x60_iommu_init(void)
807{ 830{
808 int ret, i; 831 int ret, i;
809 832
@@ -826,7 +849,7 @@ static int msm8x60_iommu_init(void)
826 ret = platform_device_register(msm_iommu_devs[i]); 849 ret = platform_device_register(msm_iommu_devs[i]);
827 850
828 if (ret != 0) { 851 if (ret != 0) {
829 pr_err("platform_device_register smmu failed, " 852 pr_err("platform_device_register iommu failed, "
830 "i = %d\n", i); 853 "i = %d\n", i);
831 goto failure_unwind; 854 goto failure_unwind;
832 } 855 }
@@ -837,7 +860,7 @@ static int msm8x60_iommu_init(void)
837 msm_iommu_ctx_data[i], 860 msm_iommu_ctx_data[i],
838 sizeof(*msm_iommu_ctx_devs[i])); 861 sizeof(*msm_iommu_ctx_devs[i]));
839 if (ret != 0) { 862 if (ret != 0) {
840 pr_err("platform_device_add_data smmu failed, " 863 pr_err("platform_device_add_data iommu failed, "
841 "i = %d\n", i); 864 "i = %d\n", i);
842 goto failure_unwind2; 865 goto failure_unwind2;
843 } 866 }
@@ -863,7 +886,7 @@ failure:
863 return ret; 886 return ret;
864} 887}
865 888
866static void msm8x60_iommu_exit(void) 889static void __exit msm8x60_iommu_exit(void)
867{ 890{
868 int i; 891 int i;
869 892
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
index 6fe67c5d1ae0..a4b798f20ccb 100644
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -53,6 +53,77 @@ struct platform_device msm_device_smd = {
53 .id = -1, 53 .id = -1,
54}; 54};
55 55
56static struct resource resources_otg[] = {
57 {
58 .start = MSM_HSUSB_PHYS,
59 .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
60 .flags = IORESOURCE_MEM,
61 },
62 {
63 .start = INT_USB_HS,
64 .end = INT_USB_HS,
65 .flags = IORESOURCE_IRQ,
66 },
67};
68
69struct platform_device msm_device_otg = {
70 .name = "msm_otg",
71 .id = -1,
72 .num_resources = ARRAY_SIZE(resources_otg),
73 .resource = resources_otg,
74 .dev = {
75 .coherent_dma_mask = 0xffffffff,
76 },
77};
78
79static struct resource resources_hsusb[] = {
80 {
81 .start = MSM_HSUSB_PHYS,
82 .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
83 .flags = IORESOURCE_MEM,
84 },
85 {
86 .start = INT_USB_HS,
87 .end = INT_USB_HS,
88 .flags = IORESOURCE_IRQ,
89 },
90};
91
92struct platform_device msm_device_hsusb = {
93 .name = "msm_hsusb",
94 .id = -1,
95 .num_resources = ARRAY_SIZE(resources_hsusb),
96 .resource = resources_hsusb,
97 .dev = {
98 .coherent_dma_mask = 0xffffffff,
99 },
100};
101
102static u64 dma_mask = 0xffffffffULL;
103static struct resource resources_hsusb_host[] = {
104 {
105 .start = MSM_HSUSB_PHYS,
106 .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
107 .flags = IORESOURCE_MEM,
108 },
109 {
110 .start = INT_USB_HS,
111 .end = INT_USB_HS,
112 .flags = IORESOURCE_IRQ,
113 },
114};
115
116struct platform_device msm_device_hsusb_host = {
117 .name = "msm_hsusb_host",
118 .id = -1,
119 .num_resources = ARRAY_SIZE(resources_hsusb_host),
120 .resource = resources_hsusb_host,
121 .dev = {
122 .dma_mask = &dma_mask,
123 .coherent_dma_mask = 0xffffffffULL,
124 },
125};
126
56struct clk msm_clocks_8x50[] = { 127struct clk msm_clocks_8x50[] = {
57 CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), 128 CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
58 CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN), 129 CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 568443e76423..87c70bfce2bd 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -28,6 +28,8 @@ extern struct platform_device msm_device_sdc3;
28extern struct platform_device msm_device_sdc4; 28extern struct platform_device msm_device_sdc4;
29 29
30extern struct platform_device msm_device_hsusb; 30extern struct platform_device msm_device_hsusb;
31extern struct platform_device msm_device_otg;
32extern struct platform_device msm_device_hsusb_host;
31 33
32extern struct platform_device msm_device_i2c; 34extern struct platform_device msm_device_i2c;
33 35
@@ -35,6 +37,10 @@ extern struct platform_device msm_device_smd;
35 37
36extern struct platform_device msm_device_nand; 38extern struct platform_device msm_device_nand;
37 39
40extern struct platform_device msm_device_mddi0;
41extern struct platform_device msm_device_mddi1;
42extern struct platform_device msm_device_mdp;
43
38extern struct clk msm_clocks_7x01a[]; 44extern struct clk msm_clocks_7x01a[];
39extern unsigned msm_num_clocks_7x01a; 45extern unsigned msm_num_clocks_7x01a;
40 46
diff --git a/arch/arm/mach-msm/gpio-v2.c b/arch/arm/mach-msm/gpio-v2.c
new file mode 100644
index 000000000000..0de19ec74e34
--- /dev/null
+++ b/arch/arm/mach-msm/gpio-v2.c
@@ -0,0 +1,426 @@
1/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 *
17 */
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/bitmap.h>
21#include <linux/bitops.h>
22#include <linux/gpio.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/spinlock.h>
30#include <mach/msm_iomap.h>
31#include "gpiomux.h"
32
33/* Bits of interest in the GPIO_IN_OUT register.
34 */
35enum {
36 GPIO_IN = 0,
37 GPIO_OUT = 1
38};
39
40/* Bits of interest in the GPIO_INTR_STATUS register.
41 */
42enum {
43 INTR_STATUS = 0,
44};
45
46/* Bits of interest in the GPIO_CFG register.
47 */
48enum {
49 GPIO_OE = 9,
50};
51
52/* Bits of interest in the GPIO_INTR_CFG register.
53 * When a GPIO triggers, two separate decisions are made, controlled
54 * by two separate flags.
55 *
56 * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
57 * register for that GPIO will be updated to reflect the triggering of that
58 * gpio. If this bit is 0, this register will not be updated.
59 * - Second, INTR_ENABLE controls whether an interrupt is triggered.
60 *
61 * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
62 * can be triggered but the status register will not reflect it.
63 */
64enum {
65 INTR_ENABLE = 0,
66 INTR_POL_CTL = 1,
67 INTR_DECT_CTL = 2,
68 INTR_RAW_STATUS_EN = 3,
69};
70
71/* Codes of interest in GPIO_INTR_CFG_SU.
72 */
73enum {
74 TARGET_PROC_SCORPION = 4,
75 TARGET_PROC_NONE = 7,
76};
77
78
79#define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio)))
80#define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio)))
81#define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio)))
82#define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio)))
83#define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio)))
84
85/**
86 * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
87 *
88 * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
89 * keeping track of which gpios are unmasked as irq sources, we avoid
90 * having to do readl calls on hundreds of iomapped registers each time
91 * the summary interrupt fires in order to locate the active interrupts.
92 *
93 * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
94 * as wakeup sources. When the device is suspended, interrupts which are
95 * not wakeup sources are disabled.
96 *
97 * @dual_edge_irqs: a bitmap used to track which irqs are configured
98 * as dual-edge, as this is not supported by the hardware and requires
99 * some special handling in the driver.
100 */
101struct msm_gpio_dev {
102 struct gpio_chip gpio_chip;
103 DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS);
104 DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS);
105 DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS);
106};
107
108static DEFINE_SPINLOCK(tlmm_lock);
109
110static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
111{
112 return container_of(chip, struct msm_gpio_dev, gpio_chip);
113}
114
115static inline void set_gpio_bits(unsigned n, void __iomem *reg)
116{
117 writel(readl(reg) | n, reg);
118}
119
120static inline void clear_gpio_bits(unsigned n, void __iomem *reg)
121{
122 writel(readl(reg) & ~n, reg);
123}
124
125static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
126{
127 return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN);
128}
129
130static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
131{
132 writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset));
133}
134
135static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
136{
137 unsigned long irq_flags;
138
139 spin_lock_irqsave(&tlmm_lock, irq_flags);
140 clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
141 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
142 return 0;
143}
144
145static int msm_gpio_direction_output(struct gpio_chip *chip,
146 unsigned offset,
147 int val)
148{
149 unsigned long irq_flags;
150
151 spin_lock_irqsave(&tlmm_lock, irq_flags);
152 msm_gpio_set(chip, offset, val);
153 set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
154 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
155 return 0;
156}
157
158static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
159{
160 return msm_gpiomux_get(chip->base + offset);
161}
162
163static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
164{
165 msm_gpiomux_put(chip->base + offset);
166}
167
168static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
169{
170 return MSM_GPIO_TO_INT(chip->base + offset);
171}
172
173static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
174{
175 return irq - MSM_GPIO_TO_INT(chip->base);
176}
177
178static struct msm_gpio_dev msm_gpio = {
179 .gpio_chip = {
180 .base = 0,
181 .ngpio = NR_GPIO_IRQS,
182 .direction_input = msm_gpio_direction_input,
183 .direction_output = msm_gpio_direction_output,
184 .get = msm_gpio_get,
185 .set = msm_gpio_set,
186 .to_irq = msm_gpio_to_irq,
187 .request = msm_gpio_request,
188 .free = msm_gpio_free,
189 },
190};
191
192/* For dual-edge interrupts in software, since the hardware has no
193 * such support:
194 *
195 * At appropriate moments, this function may be called to flip the polarity
196 * settings of both-edge irq lines to try and catch the next edge.
197 *
198 * The attempt is considered successful if:
199 * - the status bit goes high, indicating that an edge was caught, or
200 * - the input value of the gpio doesn't change during the attempt.
201 * If the value changes twice during the process, that would cause the first
202 * test to fail but would force the second, as two opposite
203 * transitions would cause a detection no matter the polarity setting.
204 *
205 * The do-loop tries to sledge-hammer closed the timing hole between
206 * the initial value-read and the polarity-write - if the line value changes
207 * during that window, an interrupt is lost, the new polarity setting is
208 * incorrect, and the first success test will fail, causing a retry.
209 *
210 * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
211 */
212static void msm_gpio_update_dual_edge_pos(unsigned gpio)
213{
214 int loop_limit = 100;
215 unsigned val, val2, intstat;
216
217 do {
218 val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
219 if (val)
220 clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
221 else
222 set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
223 val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
224 intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS);
225 if (intstat || val == val2)
226 return;
227 } while (loop_limit-- > 0);
228 pr_err("dual-edge irq failed to stabilize, "
229 "interrupts dropped. %#08x != %#08x\n",
230 val, val2);
231}
232
233static void msm_gpio_irq_ack(unsigned int irq)
234{
235 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
236
237 writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
238 if (test_bit(gpio, msm_gpio.dual_edge_irqs))
239 msm_gpio_update_dual_edge_pos(gpio);
240}
241
242static void msm_gpio_irq_mask(unsigned int irq)
243{
244 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
245 unsigned long irq_flags;
246
247 spin_lock_irqsave(&tlmm_lock, irq_flags);
248 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
249 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
250 __clear_bit(gpio, msm_gpio.enabled_irqs);
251 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
252}
253
254static void msm_gpio_irq_unmask(unsigned int irq)
255{
256 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
257 unsigned long irq_flags;
258
259 spin_lock_irqsave(&tlmm_lock, irq_flags);
260 __set_bit(gpio, msm_gpio.enabled_irqs);
261 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
262 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
263 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
264}
265
266static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type)
267{
268 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
269 unsigned long irq_flags;
270 uint32_t bits;
271
272 spin_lock_irqsave(&tlmm_lock, irq_flags);
273
274 bits = readl(GPIO_INTR_CFG(gpio));
275
276 if (flow_type & IRQ_TYPE_EDGE_BOTH) {
277 bits |= BIT(INTR_DECT_CTL);
278 irq_desc[irq].handle_irq = handle_edge_irq;
279 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
280 __set_bit(gpio, msm_gpio.dual_edge_irqs);
281 else
282 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
283 } else {
284 bits &= ~BIT(INTR_DECT_CTL);
285 irq_desc[irq].handle_irq = handle_level_irq;
286 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
287 }
288
289 if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
290 bits |= BIT(INTR_POL_CTL);
291 else
292 bits &= ~BIT(INTR_POL_CTL);
293
294 writel(bits, GPIO_INTR_CFG(gpio));
295
296 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
297 msm_gpio_update_dual_edge_pos(gpio);
298
299 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
300
301 return 0;
302}
303
304/*
305 * When the summary IRQ is raised, any number of GPIO lines may be high.
306 * It is the job of the summary handler to find all those GPIO lines
307 * which have been set as summary IRQ lines and which are triggered,
308 * and to call their interrupt handlers.
309 */
310static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
311{
312 unsigned long i;
313
314 for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
315 i < NR_GPIO_IRQS;
316 i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) {
317 if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
318 generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
319 i));
320 }
321 desc->chip->ack(irq);
322}
323
324static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on)
325{
326 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
327
328 if (on) {
329 if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
330 set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1);
331 set_bit(gpio, msm_gpio.wake_irqs);
332 } else {
333 clear_bit(gpio, msm_gpio.wake_irqs);
334 if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
335 set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0);
336 }
337
338 return 0;
339}
340
341static struct irq_chip msm_gpio_irq_chip = {
342 .name = "msmgpio",
343 .mask = msm_gpio_irq_mask,
344 .unmask = msm_gpio_irq_unmask,
345 .ack = msm_gpio_irq_ack,
346 .set_type = msm_gpio_irq_set_type,
347 .set_wake = msm_gpio_irq_set_wake,
348};
349
350static int __devinit msm_gpio_probe(struct platform_device *dev)
351{
352 int i, irq, ret;
353
354 bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
355 bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS);
356 bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS);
357 msm_gpio.gpio_chip.label = dev->name;
358 ret = gpiochip_add(&msm_gpio.gpio_chip);
359 if (ret < 0)
360 return ret;
361
362 for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
363 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
364 set_irq_chip(irq, &msm_gpio_irq_chip);
365 set_irq_handler(irq, handle_level_irq);
366 set_irq_flags(irq, IRQF_VALID);
367 }
368
369 set_irq_chained_handler(TLMM_SCSS_SUMMARY_IRQ,
370 msm_summary_irq_handler);
371 return 0;
372}
373
374static int __devexit msm_gpio_remove(struct platform_device *dev)
375{
376 int ret = gpiochip_remove(&msm_gpio.gpio_chip);
377
378 if (ret < 0)
379 return ret;
380
381 set_irq_handler(TLMM_SCSS_SUMMARY_IRQ, NULL);
382
383 return 0;
384}
385
386static struct platform_driver msm_gpio_driver = {
387 .probe = msm_gpio_probe,
388 .remove = __devexit_p(msm_gpio_remove),
389 .driver = {
390 .name = "msmgpio",
391 .owner = THIS_MODULE,
392 },
393};
394
395static struct platform_device msm_device_gpio = {
396 .name = "msmgpio",
397 .id = -1,
398};
399
400static int __init msm_gpio_init(void)
401{
402 int rc;
403
404 rc = platform_driver_register(&msm_gpio_driver);
405 if (!rc) {
406 rc = platform_device_register(&msm_device_gpio);
407 if (rc)
408 platform_driver_unregister(&msm_gpio_driver);
409 }
410
411 return rc;
412}
413
414static void __exit msm_gpio_exit(void)
415{
416 platform_device_unregister(&msm_device_gpio);
417 platform_driver_unregister(&msm_gpio_driver);
418}
419
420postcore_initcall(msm_gpio_init);
421module_exit(msm_gpio_exit);
422
423MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
424MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
425MODULE_LICENSE("GPL v2");
426MODULE_ALIAS("platform:msmgpio");
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 218ef5732a24..296c0f10f230 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -20,13 +20,26 @@
20 20
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22 22
23/* Sharability attributes of MSM IOMMU mappings */
24#define MSM_IOMMU_ATTR_NON_SH 0x0
25#define MSM_IOMMU_ATTR_SH 0x4
26
27/* Cacheability attributes of MSM IOMMU mappings */
28#define MSM_IOMMU_ATTR_NONCACHED 0x0
29#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
30#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
31#define MSM_IOMMU_ATTR_CACHED_WT 0x3
32
33/* Mask for the cache policy attribute */
34#define MSM_IOMMU_CP_MASK 0x03
35
23/* Maximum number of Machine IDs that we are allowing to be mapped to the same 36/* Maximum number of Machine IDs that we are allowing to be mapped to the same
24 * context bank. The number of MIDs mapped to the same CB does not affect 37 * context bank. The number of MIDs mapped to the same CB does not affect
25 * performance, but there is a practical limit on how many distinct MIDs may 38 * performance, but there is a practical limit on how many distinct MIDs may
26 * be present. These mappings are typically determined at design time and are 39 * be present. These mappings are typically determined at design time and are
27 * not expected to change at run time. 40 * not expected to change at run time.
28 */ 41 */
29#define MAX_NUM_MIDS 16 42#define MAX_NUM_MIDS 32
30 43
31/** 44/**
32 * struct msm_iommu_dev - a single IOMMU hardware instance 45 * struct msm_iommu_dev - a single IOMMU hardware instance
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h b/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h
index f9386d3a2f77..c2c3da9444f4 100644
--- a/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h
@@ -54,6 +54,7 @@ do { \
54 54
55#define NUM_FL_PTE 4096 55#define NUM_FL_PTE 4096
56#define NUM_SL_PTE 256 56#define NUM_SL_PTE 256
57#define NUM_TEX_CLASS 8
57 58
58/* First-level page table bits */ 59/* First-level page table bits */
59#define FL_BASE_MASK 0xFFFFFC00 60#define FL_BASE_MASK 0xFFFFFC00
@@ -63,6 +64,9 @@ do { \
63#define FL_AP_WRITE (1 << 10) 64#define FL_AP_WRITE (1 << 10)
64#define FL_AP_READ (1 << 11) 65#define FL_AP_READ (1 << 11)
65#define FL_SHARED (1 << 16) 66#define FL_SHARED (1 << 16)
67#define FL_BUFFERABLE (1 << 2)
68#define FL_CACHEABLE (1 << 3)
69#define FL_TEX0 (1 << 12)
66#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20) 70#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
67 71
68/* Second-level page table bits */ 72/* Second-level page table bits */
@@ -73,8 +77,20 @@ do { \
73#define SL_AP0 (1 << 4) 77#define SL_AP0 (1 << 4)
74#define SL_AP1 (2 << 4) 78#define SL_AP1 (2 << 4)
75#define SL_SHARED (1 << 10) 79#define SL_SHARED (1 << 10)
80#define SL_BUFFERABLE (1 << 2)
81#define SL_CACHEABLE (1 << 3)
82#define SL_TEX0 (1 << 6)
76#define SL_OFFSET(va) (((va) & 0xFF000) >> 12) 83#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
77 84
85/* Memory type and cache policy attributes */
86#define MT_SO 0
87#define MT_DEV 1
88#define MT_NORMAL 2
89#define CP_NONCACHED 0
90#define CP_WB_WA 1
91#define CP_WT 2
92#define CP_WB_NWA 3
93
78/* Global register setters / getters */ 94/* Global register setters / getters */
79#define SET_M2VCBR_N(b, N, v) SET_GLOBAL_REG_N(M2VCBR_N, N, (b), (v)) 95#define SET_M2VCBR_N(b, N, v) SET_GLOBAL_REG_N(M2VCBR_N, N, (b), (v))
80#define SET_CBACR_N(b, N, v) SET_GLOBAL_REG_N(CBACR_N, N, (b), (v)) 96#define SET_CBACR_N(b, N, v) SET_GLOBAL_REG_N(CBACR_N, N, (b), (v))
@@ -706,7 +722,9 @@ do { \
706#define GET_OCPC5(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC5) 722#define GET_OCPC5(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC5)
707#define GET_OCPC6(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC6) 723#define GET_OCPC6(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC6)
708#define GET_OCPC7(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC7) 724#define GET_OCPC7(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC7)
709 725#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
726#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> \
727 ((n) * 2 + 16))
710 728
711/* PAR */ 729/* PAR */
712#define GET_FAULT(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT) 730#define GET_FAULT(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT)
@@ -750,6 +768,8 @@ do { \
750#define GET_NOS5(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS5) 768#define GET_NOS5(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS5)
751#define GET_NOS6(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS6) 769#define GET_NOS6(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS6)
752#define GET_NOS7(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS7) 770#define GET_NOS7(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS7)
771#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
772#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
753 773
754 774
755/* RESUME */ 775/* RESUME */
diff --git a/arch/arm/mach-msm/include/mach/irqs-8x60.h b/arch/arm/mach-msm/include/mach/irqs-8x60.h
index 36074cfc9ad2..f65841c74c0b 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8x60.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8x60.h
@@ -237,7 +237,12 @@
237#define GSBI11_QUP_IRQ (GIC_SPI_START + 194) 237#define GSBI11_QUP_IRQ (GIC_SPI_START + 194)
238#define INT_UART12DM_IRQ (GIC_SPI_START + 195) 238#define INT_UART12DM_IRQ (GIC_SPI_START + 195)
239#define GSBI12_QUP_IRQ (GIC_SPI_START + 196) 239#define GSBI12_QUP_IRQ (GIC_SPI_START + 196)
240/*SPI 197 to 216 arent used in 8x60*/ 240
241/*SPI 197 to 209 arent used in 8x60*/
242#define SMMU_GFX2D1_CB_SC_SECURE_IRQ (GIC_SPI_START + 210)
243#define SMMU_GFX2D1_CB_SC_NON_SECURE_IRQ (GIC_SPI_START + 211)
244
245/*SPI 212 to 216 arent used in 8x60*/
241#define SMPSS_SPARE_1 (GIC_SPI_START + 217) 246#define SMPSS_SPARE_1 (GIC_SPI_START + 217)
242#define SMPSS_SPARE_2 (GIC_SPI_START + 218) 247#define SMPSS_SPARE_2 (GIC_SPI_START + 218)
243#define SMPSS_SPARE_3 (GIC_SPI_START + 219) 248#define SMPSS_SPARE_3 (GIC_SPI_START + 219)
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
index 8a00c2defbc1..0fd7b68ca114 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -119,4 +119,7 @@
119#define MSM_AD5_PHYS 0xA7000000 119#define MSM_AD5_PHYS 0xA7000000
120#define MSM_AD5_SIZE (SZ_1M*13) 120#define MSM_AD5_SIZE (SZ_1M*13)
121 121
122#define MSM_HSUSB_PHYS 0xA3600000
123#define MSM_HSUSB_SIZE SZ_1K
124
122#endif 125#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
index 45bab50e3ee6..7c43a9bff1a9 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
@@ -98,4 +98,7 @@
98#define MSM_IOMMU_GFX2D0_PHYS 0x07D00000 98#define MSM_IOMMU_GFX2D0_PHYS 0x07D00000
99#define MSM_IOMMU_GFX2D0_SIZE SZ_1M 99#define MSM_IOMMU_GFX2D0_SIZE SZ_1M
100 100
101#define MSM_IOMMU_GFX2D1_PHYS 0x07E00000
102#define MSM_IOMMU_GFX2D1_SIZE SZ_1M
103
101#endif 104#endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index d36b61074146..f912d7bf1889 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -163,3 +163,4 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
163 return __arm_ioremap_caller(phys_addr, size, mtype, 163 return __arm_ioremap_caller(phys_addr, size, mtype,
164 __builtin_return_address(0)); 164 __builtin_return_address(0));
165} 165}
166EXPORT_SYMBOL(__msm_ioremap);
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
index f71747db3bee..e2d58e4cb0d7 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/arch/arm/mach-msm/iommu.c
@@ -33,6 +33,16 @@
33#include <mach/iommu_hw-8xxx.h> 33#include <mach/iommu_hw-8xxx.h>
34#include <mach/iommu.h> 34#include <mach/iommu.h>
35 35
36#define MRC(reg, processor, op1, crn, crm, op2) \
37__asm__ __volatile__ ( \
38" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
39: "=r" (reg))
40
41#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
42#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
43
44static int msm_iommu_tex_class[4];
45
36DEFINE_SPINLOCK(msm_iommu_lock); 46DEFINE_SPINLOCK(msm_iommu_lock);
37 47
38struct msm_priv { 48struct msm_priv {
@@ -40,23 +50,26 @@ struct msm_priv {
40 struct list_head list_attached; 50 struct list_head list_attached;
41}; 51};
42 52
43static void __flush_iotlb(struct iommu_domain *domain) 53static int __flush_iotlb(struct iommu_domain *domain)
44{ 54{
45 struct msm_priv *priv = domain->priv; 55 struct msm_priv *priv = domain->priv;
46 struct msm_iommu_drvdata *iommu_drvdata; 56 struct msm_iommu_drvdata *iommu_drvdata;
47 struct msm_iommu_ctx_drvdata *ctx_drvdata; 57 struct msm_iommu_ctx_drvdata *ctx_drvdata;
48 58 int ret = 0;
49#ifndef CONFIG_IOMMU_PGTABLES_L2 59#ifndef CONFIG_IOMMU_PGTABLES_L2
50 unsigned long *fl_table = priv->pgtable; 60 unsigned long *fl_table = priv->pgtable;
51 int i; 61 int i;
52 62
53 dmac_flush_range(fl_table, fl_table + SZ_16K); 63 if (!list_empty(&priv->list_attached)) {
64 dmac_flush_range(fl_table, fl_table + SZ_16K);
54 65
55 for (i = 0; i < NUM_FL_PTE; i++) 66 for (i = 0; i < NUM_FL_PTE; i++)
56 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { 67 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
57 void *sl_table = __va(fl_table[i] & FL_BASE_MASK); 68 void *sl_table = __va(fl_table[i] &
58 dmac_flush_range(sl_table, sl_table + SZ_4K); 69 FL_BASE_MASK);
59 } 70 dmac_flush_range(sl_table, sl_table + SZ_4K);
71 }
72 }
60#endif 73#endif
61 74
62 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { 75 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
@@ -66,6 +79,8 @@ static void __flush_iotlb(struct iommu_domain *domain)
66 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); 79 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
67 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); 80 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
68 } 81 }
82
83 return ret;
69} 84}
70 85
71static void __reset_context(void __iomem *base, int ctx) 86static void __reset_context(void __iomem *base, int ctx)
@@ -95,6 +110,7 @@ static void __reset_context(void __iomem *base, int ctx)
95 110
96static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) 111static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
97{ 112{
113 unsigned int prrr, nmrr;
98 __reset_context(base, ctx); 114 __reset_context(base, ctx);
99 115
100 /* Set up HTW mode */ 116 /* Set up HTW mode */
@@ -127,11 +143,11 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
127 /* Turn on TEX Remap */ 143 /* Turn on TEX Remap */
128 SET_TRE(base, ctx, 1); 144 SET_TRE(base, ctx, 1);
129 145
130 /* Do not configure PRRR / NMRR on the IOMMU for now. We will assume 146 /* Set TEX remap attributes */
131 * TEX class 0 for everything until attributes are properly worked out 147 RCP15_PRRR(prrr);
132 */ 148 RCP15_NMRR(nmrr);
133 SET_PRRR(base, ctx, 0); 149 SET_PRRR(base, ctx, prrr);
134 SET_NMRR(base, ctx, 0); 150 SET_NMRR(base, ctx, nmrr);
135 151
136 /* Turn on BFB prefetch */ 152 /* Turn on BFB prefetch */
137 SET_BFBDFE(base, ctx, 1); 153 SET_BFBDFE(base, ctx, 1);
@@ -238,6 +254,11 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
238 goto fail; 254 goto fail;
239 } 255 }
240 256
257 if (!list_empty(&ctx_drvdata->attached_elm)) {
258 ret = -EBUSY;
259 goto fail;
260 }
261
241 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) 262 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
242 if (tmp_drvdata == ctx_drvdata) { 263 if (tmp_drvdata == ctx_drvdata) {
243 ret = -EBUSY; 264 ret = -EBUSY;
@@ -248,7 +269,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
248 __pa(priv->pgtable)); 269 __pa(priv->pgtable));
249 270
250 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); 271 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
251 __flush_iotlb(domain); 272 ret = __flush_iotlb(domain);
252 273
253fail: 274fail:
254 spin_unlock_irqrestore(&msm_iommu_lock, flags); 275 spin_unlock_irqrestore(&msm_iommu_lock, flags);
@@ -263,6 +284,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
263 struct msm_iommu_drvdata *iommu_drvdata; 284 struct msm_iommu_drvdata *iommu_drvdata;
264 struct msm_iommu_ctx_drvdata *ctx_drvdata; 285 struct msm_iommu_ctx_drvdata *ctx_drvdata;
265 unsigned long flags; 286 unsigned long flags;
287 int ret;
266 288
267 spin_lock_irqsave(&msm_iommu_lock, flags); 289 spin_lock_irqsave(&msm_iommu_lock, flags);
268 priv = domain->priv; 290 priv = domain->priv;
@@ -277,7 +299,10 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
277 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) 299 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
278 goto fail; 300 goto fail;
279 301
280 __flush_iotlb(domain); 302 ret = __flush_iotlb(domain);
303 if (ret)
304 goto fail;
305
281 __reset_context(iommu_drvdata->base, ctx_dev->num); 306 __reset_context(iommu_drvdata->base, ctx_dev->num);
282 list_del_init(&ctx_drvdata->attached_elm); 307 list_del_init(&ctx_drvdata->attached_elm);
283 308
@@ -296,12 +321,21 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
296 unsigned long *sl_table; 321 unsigned long *sl_table;
297 unsigned long *sl_pte; 322 unsigned long *sl_pte;
298 unsigned long sl_offset; 323 unsigned long sl_offset;
324 unsigned int pgprot;
299 size_t len = 0x1000UL << order; 325 size_t len = 0x1000UL << order;
300 int ret = 0; 326 int ret = 0, tex, sh;
301 327
302 spin_lock_irqsave(&msm_iommu_lock, flags); 328 spin_lock_irqsave(&msm_iommu_lock, flags);
303 priv = domain->priv;
304 329
330 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
331 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
332
333 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
334 ret = -EINVAL;
335 goto fail;
336 }
337
338 priv = domain->priv;
305 if (!priv) { 339 if (!priv) {
306 ret = -EINVAL; 340 ret = -EINVAL;
307 goto fail; 341 goto fail;
@@ -322,6 +356,18 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
322 goto fail; 356 goto fail;
323 } 357 }
324 358
359 if (len == SZ_16M || len == SZ_1M) {
360 pgprot = sh ? FL_SHARED : 0;
361 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
362 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
363 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
364 } else {
365 pgprot = sh ? SL_SHARED : 0;
366 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
367 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
368 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
369 }
370
325 fl_offset = FL_OFFSET(va); /* Upper 12 bits */ 371 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
326 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ 372 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
327 373
@@ -330,17 +376,17 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
330 for (i = 0; i < 16; i++) 376 for (i = 0; i < 16; i++)
331 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | 377 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
332 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | 378 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
333 FL_SHARED; 379 FL_SHARED | pgprot;
334 } 380 }
335 381
336 if (len == SZ_1M) 382 if (len == SZ_1M)
337 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | 383 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE |
338 FL_TYPE_SECT | FL_SHARED; 384 FL_TYPE_SECT | FL_SHARED | pgprot;
339 385
340 /* Need a 2nd level table */ 386 /* Need a 2nd level table */
341 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { 387 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
342 unsigned long *sl; 388 unsigned long *sl;
343 sl = (unsigned long *) __get_free_pages(GFP_KERNEL, 389 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
344 get_order(SZ_4K)); 390 get_order(SZ_4K));
345 391
346 if (!sl) { 392 if (!sl) {
@@ -360,17 +406,17 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
360 406
361 if (len == SZ_4K) 407 if (len == SZ_4K)
362 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | 408 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 |
363 SL_SHARED | SL_TYPE_SMALL; 409 SL_SHARED | SL_TYPE_SMALL | pgprot;
364 410
365 if (len == SZ_64K) { 411 if (len == SZ_64K) {
366 int i; 412 int i;
367 413
368 for (i = 0; i < 16; i++) 414 for (i = 0; i < 16; i++)
369 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | 415 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
370 SL_AP1 | SL_SHARED | SL_TYPE_LARGE; 416 SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
371 } 417 }
372 418
373 __flush_iotlb(domain); 419 ret = __flush_iotlb(domain);
374fail: 420fail:
375 spin_unlock_irqrestore(&msm_iommu_lock, flags); 421 spin_unlock_irqrestore(&msm_iommu_lock, flags);
376 return ret; 422 return ret;
@@ -455,7 +501,7 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
455 } 501 }
456 } 502 }
457 503
458 __flush_iotlb(domain); 504 ret = __flush_iotlb(domain);
459fail: 505fail:
460 spin_unlock_irqrestore(&msm_iommu_lock, flags); 506 spin_unlock_irqrestore(&msm_iommu_lock, flags);
461 return ret; 507 return ret;
@@ -490,9 +536,6 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
490 SET_CTX_TLBIALL(base, ctx, 0); 536 SET_CTX_TLBIALL(base, ctx, 0);
491 SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT); 537 SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT);
492 538
493 if (GET_FAULT(base, ctx))
494 goto fail;
495
496 par = GET_PAR(base, ctx); 539 par = GET_PAR(base, ctx);
497 540
498 /* We are dealing with a supersection */ 541 /* We are dealing with a supersection */
@@ -501,6 +544,9 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
501 else /* Upper 20 bits from PAR, lower 12 from VA */ 544 else /* Upper 20 bits from PAR, lower 12 from VA */
502 ret = (par & 0xFFFFF000) | (va & 0x00000FFF); 545 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
503 546
547 if (GET_FAULT(base, ctx))
548 ret = 0;
549
504fail: 550fail:
505 spin_unlock_irqrestore(&msm_iommu_lock, flags); 551 spin_unlock_irqrestore(&msm_iommu_lock, flags);
506 return ret; 552 return ret;
@@ -543,8 +589,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
543{ 589{
544 struct msm_iommu_drvdata *drvdata = dev_id; 590 struct msm_iommu_drvdata *drvdata = dev_id;
545 void __iomem *base; 591 void __iomem *base;
546 unsigned int fsr = 0; 592 unsigned int fsr;
547 int ncb = 0, i = 0; 593 int ncb, i;
548 594
549 spin_lock(&msm_iommu_lock); 595 spin_lock(&msm_iommu_lock);
550 596
@@ -555,7 +601,6 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
555 601
556 base = drvdata->base; 602 base = drvdata->base;
557 603
558 pr_err("===== WOAH! =====\n");
559 pr_err("Unexpected IOMMU page fault!\n"); 604 pr_err("Unexpected IOMMU page fault!\n");
560 pr_err("base = %08x\n", (unsigned int) base); 605 pr_err("base = %08x\n", (unsigned int) base);
561 606
@@ -585,8 +630,47 @@ static struct iommu_ops msm_iommu_ops = {
585 .domain_has_cap = msm_iommu_domain_has_cap 630 .domain_has_cap = msm_iommu_domain_has_cap
586}; 631};
587 632
588static int msm_iommu_init(void) 633static int __init get_tex_class(int icp, int ocp, int mt, int nos)
634{
635 int i = 0;
636 unsigned int prrr = 0;
637 unsigned int nmrr = 0;
638 int c_icp, c_ocp, c_mt, c_nos;
639
640 RCP15_PRRR(prrr);
641 RCP15_NMRR(nmrr);
642
643 for (i = 0; i < NUM_TEX_CLASS; i++) {
644 c_nos = PRRR_NOS(prrr, i);
645 c_mt = PRRR_MT(prrr, i);
646 c_icp = NMRR_ICP(nmrr, i);
647 c_ocp = NMRR_OCP(nmrr, i);
648
649 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
650 return i;
651 }
652
653 return -ENODEV;
654}
655
656static void __init setup_iommu_tex_classes(void)
657{
658 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
659 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
660
661 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
662 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
663
664 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
665 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
666
667 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
668 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
669}
670
671static int __init msm_iommu_init(void)
589{ 672{
673 setup_iommu_tex_classes();
590 register_iommu(&msm_iommu_ops); 674 register_iommu(&msm_iommu_ops);
591 return 0; 675 return 0;
592} 676}
diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c
index 9019cee2907b..b83c73b41fd1 100644
--- a/arch/arm/mach-msm/iommu_dev.c
+++ b/arch/arm/mach-msm/iommu_dev.c
@@ -346,7 +346,7 @@ static struct platform_driver msm_iommu_ctx_driver = {
346 .remove = msm_iommu_ctx_remove, 346 .remove = msm_iommu_ctx_remove,
347}; 347};
348 348
349static int msm_iommu_driver_init(void) 349static int __init msm_iommu_driver_init(void)
350{ 350{
351 int ret; 351 int ret;
352 ret = platform_driver_register(&msm_iommu_driver); 352 ret = platform_driver_register(&msm_iommu_driver);
@@ -365,7 +365,7 @@ error:
365 return ret; 365 return ret;
366} 366}
367 367
368static void msm_iommu_driver_exit(void) 368static void __exit msm_iommu_driver_exit(void)
369{ 369{
370 platform_driver_unregister(&msm_iommu_ctx_driver); 370 platform_driver_unregister(&msm_iommu_ctx_driver);
371 platform_driver_unregister(&msm_iommu_driver); 371 platform_driver_unregister(&msm_iommu_driver);
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c
index b0794524ba6e..152eefda3ce6 100644
--- a/arch/arm/mach-msm/sirc.c
+++ b/arch/arm/mach-msm/sirc.c
@@ -40,9 +40,6 @@ static struct sirc_cascade_regs sirc_reg_table[] = {
40 } 40 }
41}; 41};
42 42
43static unsigned int save_type;
44static unsigned int save_polarity;
45
46/* Mask off the given interrupt. Keep the int_enable mask in sync with 43/* Mask off the given interrupt. Keep the int_enable mask in sync with
47 the enable reg, so it can be restored after power collapse. */ 44 the enable reg, so it can be restored after power collapse. */
48static void sirc_irq_mask(unsigned int irq) 45static void sirc_irq_mask(unsigned int irq)
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index f07dc7c738f0..657be73297db 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -14,6 +14,8 @@
14 * 14 *
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/platform_device.h> 19#include <linux/platform_device.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/fs.h> 21#include <linux/fs.h>
@@ -89,7 +91,7 @@ static void smd_diag(void)
89 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG); 91 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
90 if (x != 0) { 92 if (x != 0) {
91 x[SZ_DIAG_ERR_MSG - 1] = 0; 93 x[SZ_DIAG_ERR_MSG - 1] = 0;
92 pr_info("smem: DIAG '%s'\n", x); 94 pr_debug("DIAG '%s'\n", x);
93 } 95 }
94} 96}
95 97
@@ -312,7 +314,7 @@ static void smd_state_change(struct smd_channel *ch,
312{ 314{
313 ch->last_state = next; 315 ch->last_state = next;
314 316
315 pr_info("SMD: ch %d %d -> %d\n", ch->n, last, next); 317 pr_debug("ch %d %d -> %d\n", ch->n, last, next);
316 318
317 switch (next) { 319 switch (next) {
318 case SMD_SS_OPENING: 320 case SMD_SS_OPENING:
@@ -601,7 +603,7 @@ static int smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
601 ch->pdev.name = ch->name; 603 ch->pdev.name = ch->name;
602 ch->pdev.id = -1; 604 ch->pdev.id = -1;
603 605
604 pr_info("smd_alloc_channel() cid=%02d size=%05d '%s'\n", 606 pr_debug("smd_alloc_channel() cid=%02d size=%05d '%s'\n",
605 ch->n, ch->fifo_size, ch->name); 607 ch->n, ch->fifo_size, ch->name);
606 608
607 mutex_lock(&smd_creation_mutex); 609 mutex_lock(&smd_creation_mutex);
@@ -621,7 +623,7 @@ static void smd_channel_probe_worker(struct work_struct *work)
621 623
622 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64); 624 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
623 if (!shared) { 625 if (!shared) {
624 pr_err("smd: cannot find allocation table\n"); 626 pr_err("cannot find allocation table\n");
625 return; 627 return;
626 } 628 }
627 for (n = 0; n < 64; n++) { 629 for (n = 0; n < 64; n++) {
@@ -725,8 +727,6 @@ int smd_close(smd_channel_t *ch)
725{ 727{
726 unsigned long flags; 728 unsigned long flags;
727 729
728 pr_info("smd_close(%p)\n", ch);
729
730 if (ch == 0) 730 if (ch == 0)
731 return -1; 731 return -1;
732 732
@@ -939,7 +939,6 @@ int smsm_set_sleep_duration(uint32_t delay)
939int smd_core_init(void) 939int smd_core_init(void)
940{ 940{
941 int r; 941 int r;
942 pr_info("smd_core_init()\n");
943 942
944 /* wait for essential items to be initialized */ 943 /* wait for essential items to be initialized */
945 for (;;) { 944 for (;;) {
@@ -992,15 +991,11 @@ int smd_core_init(void)
992 smsm_change_state(SMSM_STATE_APPS_DEM, ~0, 0); 991 smsm_change_state(SMSM_STATE_APPS_DEM, ~0, 0);
993#endif 992#endif
994 993
995 pr_info("smd_core_init() done\n");
996
997 return 0; 994 return 0;
998} 995}
999 996
1000static int __devinit msm_smd_probe(struct platform_device *pdev) 997static int __devinit msm_smd_probe(struct platform_device *pdev)
1001{ 998{
1002 pr_info("smd_init()\n");
1003
1004 /* 999 /*
1005 * If we haven't waited for the ARM9 to boot up till now, 1000 * If we haven't waited for the ARM9 to boot up till now,
1006 * then we need to wait here. Otherwise this should just 1001 * then we need to wait here. Otherwise this should just
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index f91c3b7bc655..8736afff82f3 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -270,8 +270,10 @@ void smsm_print_sleep_info(void)
270{ 270{
271 unsigned long flags; 271 unsigned long flags;
272 uint32_t *ptr; 272 uint32_t *ptr;
273#ifndef CONFIG_ARCH_MSM_SCORPION
273 struct tramp_gpio_smem *gpio; 274 struct tramp_gpio_smem *gpio;
274 struct smsm_interrupt_info *int_info; 275 struct smsm_interrupt_info *int_info;
276#endif
275 277
276 278
277 spin_lock_irqsave(&smem_lock, flags); 279 spin_lock_irqsave(&smem_lock, flags);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 89ed1be2d62e..8be261506056 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -642,31 +642,13 @@ static void __init omap3pandora_init_irq(void)
642 omap_gpio_init(); 642 omap_gpio_init();
643} 643}
644 644
645static void pandora_wl1251_set_power(bool enable) 645static void __init pandora_wl1251_init(void)
646{
647 /*
648 * Keep power always on until wl1251_sdio driver learns to re-init
649 * the chip after powering it down and back up.
650 */
651}
652
653static struct wl12xx_platform_data pandora_wl1251_pdata = {
654 .set_power = pandora_wl1251_set_power,
655 .use_eeprom = true,
656};
657
658static struct platform_device pandora_wl1251_data = {
659 .name = "wl1251_data",
660 .id = -1,
661 .dev = {
662 .platform_data = &pandora_wl1251_pdata,
663 },
664};
665
666static void pandora_wl1251_init(void)
667{ 646{
647 struct wl12xx_platform_data pandora_wl1251_pdata;
668 int ret; 648 int ret;
669 649
650 memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
651
670 ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq"); 652 ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq");
671 if (ret < 0) 653 if (ret < 0)
672 goto fail; 654 goto fail;
@@ -679,6 +661,11 @@ static void pandora_wl1251_init(void)
679 if (pandora_wl1251_pdata.irq < 0) 661 if (pandora_wl1251_pdata.irq < 0)
680 goto fail_irq; 662 goto fail_irq;
681 663
664 pandora_wl1251_pdata.use_eeprom = true;
665 ret = wl12xx_set_platform_data(&pandora_wl1251_pdata);
666 if (ret < 0)
667 goto fail_irq;
668
682 return; 669 return;
683 670
684fail_irq: 671fail_irq:
@@ -691,7 +678,6 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
691 &pandora_leds_gpio, 678 &pandora_leds_gpio,
692 &pandora_keys_gpio, 679 &pandora_keys_gpio,
693 &pandora_dss_device, 680 &pandora_dss_device,
694 &pandora_wl1251_data,
695 &pandora_vwlan_device, 681 &pandora_vwlan_device,
696}; 682};
697 683
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index ac429ff2c20d..f92dbd0c06d5 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -502,7 +502,12 @@ static struct platform_device keysc_device = {
502static struct resource mipidsi0_resources[] = { 502static struct resource mipidsi0_resources[] = {
503 [0] = { 503 [0] = {
504 .start = 0xffc60000, 504 .start = 0xffc60000,
505 .end = 0xffc68fff, 505 .end = 0xffc63073,
506 .flags = IORESOURCE_MEM,
507 },
508 [1] = {
509 .start = 0xffc68000,
510 .end = 0xffc680ef,
506 .flags = IORESOURCE_MEM, 511 .flags = IORESOURCE_MEM,
507 }, 512 },
508}; 513};
@@ -510,6 +515,7 @@ static struct resource mipidsi0_resources[] = {
510static struct sh_mipi_dsi_info mipidsi0_info = { 515static struct sh_mipi_dsi_info mipidsi0_info = {
511 .data_format = MIPI_RGB888, 516 .data_format = MIPI_RGB888,
512 .lcd_chan = &lcdc_info.ch[0], 517 .lcd_chan = &lcdc_info.ch[0],
518 .vsynw_offset = 17,
513}; 519};
514 520
515static struct platform_device mipidsi0_device = { 521static struct platform_device mipidsi0_device = {
@@ -522,44 +528,6 @@ static struct platform_device mipidsi0_device = {
522 }, 528 },
523}; 529};
524 530
525/* This function will disappear when we switch to (runtime) PM */
526static int __init ap4evb_init_display_clk(void)
527{
528 struct clk *lcdc_clk;
529 struct clk *dsitx_clk;
530 int ret;
531
532 lcdc_clk = clk_get(&lcdc_device.dev, "sh_mobile_lcdc_fb.0");
533 if (IS_ERR(lcdc_clk))
534 return PTR_ERR(lcdc_clk);
535
536 dsitx_clk = clk_get(&mipidsi0_device.dev, "sh-mipi-dsi.0");
537 if (IS_ERR(dsitx_clk)) {
538 ret = PTR_ERR(dsitx_clk);
539 goto eclkdsitxget;
540 }
541
542 ret = clk_enable(lcdc_clk);
543 if (ret < 0)
544 goto eclklcdcon;
545
546 ret = clk_enable(dsitx_clk);
547 if (ret < 0)
548 goto eclkdsitxon;
549
550 return 0;
551
552eclkdsitxon:
553 clk_disable(lcdc_clk);
554eclklcdcon:
555 clk_put(dsitx_clk);
556eclkdsitxget:
557 clk_put(lcdc_clk);
558
559 return ret;
560}
561device_initcall(ap4evb_init_display_clk);
562
563static struct platform_device *qhd_devices[] __initdata = { 531static struct platform_device *qhd_devices[] __initdata = {
564 &mipidsi0_device, 532 &mipidsi0_device,
565 &keysc_device, 533 &keysc_device,
@@ -765,10 +733,15 @@ static struct platform_device lcdc1_device = {
765 }, 733 },
766}; 734};
767 735
736static long ap4evb_clk_optimize(unsigned long target, unsigned long *best_freq,
737 unsigned long *parent_freq);
738
739
768static struct sh_mobile_hdmi_info hdmi_info = { 740static struct sh_mobile_hdmi_info hdmi_info = {
769 .lcd_chan = &sh_mobile_lcdc1_info.ch[0], 741 .lcd_chan = &sh_mobile_lcdc1_info.ch[0],
770 .lcd_dev = &lcdc1_device.dev, 742 .lcd_dev = &lcdc1_device.dev,
771 .flags = HDMI_SND_SRC_SPDIF, 743 .flags = HDMI_SND_SRC_SPDIF,
744 .clk_optimize_parent = ap4evb_clk_optimize,
772}; 745};
773 746
774static struct resource hdmi_resources[] = { 747static struct resource hdmi_resources[] = {
@@ -795,6 +768,25 @@ static struct platform_device hdmi_device = {
795 }, 768 },
796}; 769};
797 770
771static long ap4evb_clk_optimize(unsigned long target, unsigned long *best_freq,
772 unsigned long *parent_freq)
773{
774 struct clk *hdmi_ick = clk_get(&hdmi_device.dev, "ick");
775 long error;
776
777 if (IS_ERR(hdmi_ick)) {
778 int ret = PTR_ERR(hdmi_ick);
779 pr_err("Cannot get HDMI ICK: %d\n", ret);
780 return ret;
781 }
782
783 error = clk_round_parent(hdmi_ick, target, best_freq, parent_freq, 1, 64);
784
785 clk_put(hdmi_ick);
786
787 return error;
788}
789
798static struct gpio_led ap4evb_leds[] = { 790static struct gpio_led ap4evb_leds[] = {
799 { 791 {
800 .name = "led4", 792 .name = "led4",
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index d98deb497c2f..9aa8d68d1a9c 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -507,7 +507,7 @@ enum { MSTP001,
507 MSTP223, 507 MSTP223,
508 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 508 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
509 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 509 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
510 MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, 510 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
511 MSTP_NR }; 511 MSTP_NR };
512 512
513#define MSTP(_parent, _reg, _bit, _flags) \ 513#define MSTP(_parent, _reg, _bit, _flags) \
@@ -543,6 +543,7 @@ static struct clk mstp_clks[MSTP_NR] = {
543 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ 543 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
544 [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */ 544 [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */
545 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */ 545 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */
546 [MSTP423] = MSTP(&div4_clks[DIV4_B], SMSTPCR4, 23, 0), /* DSITX1 */
546 [MSTP415] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 15, 0), /* SDHI2 */ 547 [MSTP415] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 15, 0), /* SDHI2 */
547 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */ 548 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
548 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */ 549 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
@@ -596,9 +597,10 @@ static struct clk_lookup lookups[] = {
596 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]), 597 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
597 CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]), 598 CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
598 CLKDEV_CON_ID("hdmi_clk", &div6_reparent_clks[DIV6_HDMI]), 599 CLKDEV_CON_ID("hdmi_clk", &div6_reparent_clks[DIV6_HDMI]),
599 CLKDEV_CON_ID("dsit_clk", &div6_clks[DIV6_DSIT]), 600 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
600 CLKDEV_CON_ID("dsi0p_clk", &div6_clks[DIV6_DSI0P]), 601 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
601 CLKDEV_CON_ID("dsi1p_clk", &div6_clks[DIV6_DSI1P]), 602 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
603 CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
602 604
603 /* MSTP32 clocks */ 605 /* MSTP32 clocks */
604 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */ 606 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
@@ -610,7 +612,7 @@ static struct clk_lookup lookups[] = {
610 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */ 612 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
611 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ 613 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
612 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ 614 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
613 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 615 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX0 */
614 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ 616 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
615 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ 617 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
616 CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */ 618 CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */
@@ -633,6 +635,7 @@ static struct clk_lookup lookups[] = {
633 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ 635 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
634 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ 636 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
635 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ 637 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
638 CLKDEV_DEV_ID("sh-mipi-dsi.1", &mstp_clks[MSTP423]), /* DSITX1 */
636 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]), /* SDHI2 */ 639 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]), /* SDHI2 */
637 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */ 640 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
638 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */ 641 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 5c7c6fc07565..183e0d226669 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1047,6 +1047,6 @@ init_hw_perf_events(void)
1047 1047
1048 return 0; 1048 return 0;
1049} 1049}
1050arch_initcall(init_hw_perf_events); 1050early_initcall(init_hw_perf_events);
1051 1051
1052#endif /* defined(CONFIG_CPU_MIPS32)... */ 1052#endif /* defined(CONFIG_CPU_MIPS32)... */
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
index 7c07de0d8943..b150b510510f 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/kernel/e500-pmu.c
@@ -126,4 +126,4 @@ static int init_e500_pmu(void)
126 return register_fsl_emb_pmu(&e500_pmu); 126 return register_fsl_emb_pmu(&e500_pmu);
127} 127}
128 128
129arch_initcall(init_e500_pmu); 129early_initcall(init_e500_pmu);
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index 09d72028f317..2cc5e0301d0b 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -414,4 +414,4 @@ static int init_mpc7450_pmu(void)
414 return register_power_pmu(&mpc7450_pmu); 414 return register_power_pmu(&mpc7450_pmu);
415} 415}
416 416
417arch_initcall(init_mpc7450_pmu); 417early_initcall(init_mpc7450_pmu);
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 3129c855933c..567480705789 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1379,7 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu)
1379 freeze_events_kernel = MMCR0_FCHV; 1379 freeze_events_kernel = MMCR0_FCHV;
1380#endif /* CONFIG_PPC64 */ 1380#endif /* CONFIG_PPC64 */
1381 1381
1382 perf_pmu_register(&power_pmu); 1382 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
1383 perf_cpu_notifier(power_pmu_notifier); 1383 perf_cpu_notifier(power_pmu_notifier);
1384 1384
1385 return 0; 1385 return 0;
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 7ecca59ddf77..4dcf5f831e9d 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -681,7 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
681 pr_info("%s performance monitor hardware support registered\n", 681 pr_info("%s performance monitor hardware support registered\n",
682 pmu->name); 682 pmu->name);
683 683
684 perf_pmu_register(&fsl_emb_pmu); 684 perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
685 685
686 return 0; 686 return 0;
687} 687}
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 2a361cdda635..ead8b3c2649e 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -613,4 +613,4 @@ static int init_power4_pmu(void)
613 return register_power_pmu(&power4_pmu); 613 return register_power_pmu(&power4_pmu);
614} 614}
615 615
616arch_initcall(init_power4_pmu); 616early_initcall(init_power4_pmu);
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 199de527d411..eca0ac595cb6 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -682,4 +682,4 @@ static int init_power5p_pmu(void)
682 return register_power_pmu(&power5p_pmu); 682 return register_power_pmu(&power5p_pmu);
683} 683}
684 684
685arch_initcall(init_power5p_pmu); 685early_initcall(init_power5p_pmu);
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 98b6a729a9dd..d5ff0f64a5e6 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -621,4 +621,4 @@ static int init_power5_pmu(void)
621 return register_power_pmu(&power5_pmu); 621 return register_power_pmu(&power5_pmu);
622} 622}
623 623
624arch_initcall(init_power5_pmu); 624early_initcall(init_power5_pmu);
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 84a607bda8fb..31603927e376 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -544,4 +544,4 @@ static int init_power6_pmu(void)
544 return register_power_pmu(&power6_pmu); 544 return register_power_pmu(&power6_pmu);
545} 545}
546 546
547arch_initcall(init_power6_pmu); 547early_initcall(init_power6_pmu);
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 852f7b7f6b40..593740fcb799 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -369,4 +369,4 @@ static int init_power7_pmu(void)
369 return register_power_pmu(&power7_pmu); 369 return register_power_pmu(&power7_pmu);
370} 370}
371 371
372arch_initcall(init_power7_pmu); 372early_initcall(init_power7_pmu);
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 3fee685de4df..9a6e093858fe 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -494,4 +494,4 @@ static int init_ppc970_pmu(void)
494 return register_power_pmu(&ppc970_pmu); 494 return register_power_pmu(&ppc970_pmu);
495} 495}
496 496
497arch_initcall(init_ppc970_pmu); 497early_initcall(init_ppc970_pmu);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e0b98e71ff47..6c6d7b339aae 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,6 +99,7 @@ config S390
99 select HAVE_KERNEL_LZMA 99 select HAVE_KERNEL_LZMA
100 select HAVE_KERNEL_LZO 100 select HAVE_KERNEL_LZO
101 select HAVE_GET_USER_PAGES_FAST 101 select HAVE_GET_USER_PAGES_FAST
102 select HAVE_ARCH_MUTEX_CPU_RELAX
102 select ARCH_INLINE_SPIN_TRYLOCK 103 select ARCH_INLINE_SPIN_TRYLOCK
103 select ARCH_INLINE_SPIN_TRYLOCK_BH 104 select ARCH_INLINE_SPIN_TRYLOCK_BH
104 select ARCH_INLINE_SPIN_LOCK 105 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 458c1f7fbc18..688271f5f2e4 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,3 +7,5 @@
7 */ 7 */
8 8
9#include <asm-generic/mutex-dec.h> 9#include <asm-generic/mutex-dec.h>
10
11#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 06cbd1e8c943..90efda0b137d 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -28,39 +28,70 @@ struct qeth_arp_cache_entry {
28 __u8 reserved2[32]; 28 __u8 reserved2[32];
29} __attribute__ ((packed)); 29} __attribute__ ((packed));
30 30
31enum qeth_arp_ipaddrtype {
32 QETHARP_IP_ADDR_V4 = 1,
33 QETHARP_IP_ADDR_V6 = 2,
34};
35struct qeth_arp_entrytype {
36 __u8 mac;
37 __u8 ip;
38} __attribute__((packed));
39
40#define QETH_QARP_MEDIASPECIFIC_BYTES 32
41#define QETH_QARP_MACADDRTYPE_BYTES 1
31struct qeth_arp_qi_entry7 { 42struct qeth_arp_qi_entry7 {
32 __u8 media_specific[32]; 43 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
33 __u8 macaddr_type; 44 struct qeth_arp_entrytype type;
34 __u8 ipaddr_type;
35 __u8 macaddr[6]; 45 __u8 macaddr[6];
36 __u8 ipaddr[4]; 46 __u8 ipaddr[4];
37} __attribute__((packed)); 47} __attribute__((packed));
38 48
49struct qeth_arp_qi_entry7_ipv6 {
50 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
51 struct qeth_arp_entrytype type;
52 __u8 macaddr[6];
53 __u8 ipaddr[16];
54} __attribute__((packed));
55
39struct qeth_arp_qi_entry7_short { 56struct qeth_arp_qi_entry7_short {
40 __u8 macaddr_type; 57 struct qeth_arp_entrytype type;
41 __u8 ipaddr_type;
42 __u8 macaddr[6]; 58 __u8 macaddr[6];
43 __u8 ipaddr[4]; 59 __u8 ipaddr[4];
44} __attribute__((packed)); 60} __attribute__((packed));
45 61
62struct qeth_arp_qi_entry7_short_ipv6 {
63 struct qeth_arp_entrytype type;
64 __u8 macaddr[6];
65 __u8 ipaddr[16];
66} __attribute__((packed));
67
46struct qeth_arp_qi_entry5 { 68struct qeth_arp_qi_entry5 {
47 __u8 media_specific[32]; 69 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
48 __u8 macaddr_type; 70 struct qeth_arp_entrytype type;
49 __u8 ipaddr_type;
50 __u8 ipaddr[4]; 71 __u8 ipaddr[4];
51} __attribute__((packed)); 72} __attribute__((packed));
52 73
74struct qeth_arp_qi_entry5_ipv6 {
75 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
76 struct qeth_arp_entrytype type;
77 __u8 ipaddr[16];
78} __attribute__((packed));
79
53struct qeth_arp_qi_entry5_short { 80struct qeth_arp_qi_entry5_short {
54 __u8 macaddr_type; 81 struct qeth_arp_entrytype type;
55 __u8 ipaddr_type;
56 __u8 ipaddr[4]; 82 __u8 ipaddr[4];
57} __attribute__((packed)); 83} __attribute__((packed));
58 84
85struct qeth_arp_qi_entry5_short_ipv6 {
86 struct qeth_arp_entrytype type;
87 __u8 ipaddr[16];
88} __attribute__((packed));
59/* 89/*
60 * can be set by user if no "media specific information" is wanted 90 * can be set by user if no "media specific information" is wanted
61 * -> saves a lot of space in user space buffer 91 * -> saves a lot of space in user space buffer
62 */ 92 */
63#define QETH_QARP_STRIP_ENTRIES 0x8000 93#define QETH_QARP_STRIP_ENTRIES 0x8000
94#define QETH_QARP_WITH_IPV6 0x4000
64#define QETH_QARP_REQUEST_MASK 0x00ff 95#define QETH_QARP_REQUEST_MASK 0x00ff
65 96
66/* data sent to user space as result of query arp ioctl */ 97/* data sent to user space as result of query arp ioctl */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 25cf0b34dffd..e9e71120040c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -162,7 +162,8 @@ config ARCH_HAS_CPU_IDLE_WAIT
162 def_bool y 162 def_bool y
163 163
164config NO_IOPORT 164config NO_IOPORT
165 bool 165 def_bool !PCI
166 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV
166 167
167config IO_TRAPPED 168config IO_TRAPPED
168 bool 169 bool
@@ -275,6 +276,7 @@ config CPU_SUBTYPE_SH7203
275 select CPU_HAS_FPU 276 select CPU_HAS_FPU
276 select SYS_SUPPORTS_CMT 277 select SYS_SUPPORTS_CMT
277 select SYS_SUPPORTS_MTU2 278 select SYS_SUPPORTS_MTU2
279 select ARCH_WANT_OPTIONAL_GPIOLIB
278 280
279config CPU_SUBTYPE_SH7206 281config CPU_SUBTYPE_SH7206
280 bool "Support SH7206 processor" 282 bool "Support SH7206 processor"
@@ -346,6 +348,7 @@ config CPU_SUBTYPE_SH7720
346 select CPU_SH3 348 select CPU_SH3
347 select CPU_HAS_DSP 349 select CPU_HAS_DSP
348 select SYS_SUPPORTS_CMT 350 select SYS_SUPPORTS_CMT
351 select ARCH_WANT_OPTIONAL_GPIOLIB
349 help 352 help
350 Select SH7720 if you have a SH3-DSP SH7720 CPU. 353 Select SH7720 if you have a SH3-DSP SH7720 CPU.
351 354
@@ -408,6 +411,7 @@ config CPU_SUBTYPE_SH7723
408 select ARCH_SHMOBILE 411 select ARCH_SHMOBILE
409 select ARCH_SPARSEMEM_ENABLE 412 select ARCH_SPARSEMEM_ENABLE
410 select SYS_SUPPORTS_CMT 413 select SYS_SUPPORTS_CMT
414 select ARCH_WANT_OPTIONAL_GPIOLIB
411 help 415 help
412 Select SH7723 if you have an SH-MobileR2 CPU. 416 Select SH7723 if you have an SH-MobileR2 CPU.
413 417
@@ -418,6 +422,7 @@ config CPU_SUBTYPE_SH7724
418 select ARCH_SHMOBILE 422 select ARCH_SHMOBILE
419 select ARCH_SPARSEMEM_ENABLE 423 select ARCH_SPARSEMEM_ENABLE
420 select SYS_SUPPORTS_CMT 424 select SYS_SUPPORTS_CMT
425 select ARCH_WANT_OPTIONAL_GPIOLIB
421 help 426 help
422 Select SH7724 if you have an SH-MobileR2R CPU. 427 Select SH7724 if you have an SH-MobileR2R CPU.
423 428
@@ -425,6 +430,7 @@ config CPU_SUBTYPE_SH7757
425 bool "Support SH7757 processor" 430 bool "Support SH7757 processor"
426 select CPU_SH4A 431 select CPU_SH4A
427 select CPU_SHX2 432 select CPU_SHX2
433 select ARCH_WANT_OPTIONAL_GPIOLIB
428 help 434 help
429 Select SH7757 if you have a SH4A SH7757 CPU. 435 Select SH7757 if you have a SH4A SH7757 CPU.
430 436
@@ -448,6 +454,7 @@ config CPU_SUBTYPE_SH7785
448 select CPU_SHX2 454 select CPU_SHX2
449 select ARCH_SPARSEMEM_ENABLE 455 select ARCH_SPARSEMEM_ENABLE
450 select SYS_SUPPORTS_NUMA 456 select SYS_SUPPORTS_NUMA
457 select ARCH_WANT_OPTIONAL_GPIOLIB
451 458
452config CPU_SUBTYPE_SH7786 459config CPU_SUBTYPE_SH7786
453 bool "Support SH7786 processor" 460 bool "Support SH7786 processor"
@@ -455,6 +462,7 @@ config CPU_SUBTYPE_SH7786
455 select CPU_SHX3 462 select CPU_SHX3
456 select CPU_HAS_PTEAEX 463 select CPU_HAS_PTEAEX
457 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 464 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
465 select ARCH_WANT_OPTIONAL_GPIOLIB
458 466
459config CPU_SUBTYPE_SHX3 467config CPU_SUBTYPE_SHX3
460 bool "Support SH-X3 processor" 468 bool "Support SH-X3 processor"
@@ -479,6 +487,7 @@ config CPU_SUBTYPE_SH7722
479 select ARCH_SPARSEMEM_ENABLE 487 select ARCH_SPARSEMEM_ENABLE
480 select SYS_SUPPORTS_NUMA 488 select SYS_SUPPORTS_NUMA
481 select SYS_SUPPORTS_CMT 489 select SYS_SUPPORTS_CMT
490 select ARCH_WANT_OPTIONAL_GPIOLIB
482 491
483config CPU_SUBTYPE_SH7366 492config CPU_SUBTYPE_SH7366
484 bool "Support SH7366 processor" 493 bool "Support SH7366 processor"
@@ -568,15 +577,6 @@ config SH_CLK_CPG_LEGACY
568 def_bool y if !CPU_SUBTYPE_SH7785 && !ARCH_SHMOBILE && \ 577 def_bool y if !CPU_SUBTYPE_SH7785 && !ARCH_SHMOBILE && \
569 !CPU_SHX3 && !CPU_SUBTYPE_SH7757 578 !CPU_SHX3 && !CPU_SUBTYPE_SH7757
570 579
571config SH_CLK_MD
572 int "CPU Mode Pin Setting"
573 depends on CPU_SH2
574 default 6 if CPU_SUBTYPE_SH7206
575 default 5 if CPU_SUBTYPE_SH7619
576 default 0
577 help
578 MD2 - MD0 pin setting.
579
580source "kernel/time/Kconfig" 580source "kernel/time/Kconfig"
581 581
582endmenu 582endmenu
diff --git a/arch/sh/boards/board-secureedge5410.c b/arch/sh/boards/board-secureedge5410.c
index 32f875e8493d..f968f17891a4 100644
--- a/arch/sh/boards/board-secureedge5410.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -29,8 +29,6 @@ unsigned short secureedge5410_ioport;
29 */ 29 */
30static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id) 30static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
31{ 31{
32 ctrl_delay(); /* dummy read */
33
34 printk("SnapGear: erase switch interrupt!\n"); 32 printk("SnapGear: erase switch interrupt!\n");
35 33
36 return IRQ_HANDLED; 34 return IRQ_HANDLED;
diff --git a/arch/sh/boards/mach-rsk/devices-rsk7203.c b/arch/sh/boards/mach-rsk/devices-rsk7203.c
index 4fa08ba10253..a8089f79d058 100644
--- a/arch/sh/boards/mach-rsk/devices-rsk7203.c
+++ b/arch/sh/boards/mach-rsk/devices-rsk7203.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Renesas Technology Europe RSK+ 7203 Support. 2 * Renesas Technology Europe RSK+ 7203 Support.
3 * 3 *
4 * Copyright (C) 2008 Paul Mundt 4 * Copyright (C) 2008 - 2010 Paul Mundt
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
@@ -12,7 +12,9 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/smsc911x.h> 14#include <linux/smsc911x.h>
15#include <linux/input.h>
15#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/gpio_keys.h>
16#include <linux/leds.h> 18#include <linux/leds.h>
17#include <asm/machvec.h> 19#include <asm/machvec.h>
18#include <asm/io.h> 20#include <asm/io.h>
@@ -84,9 +86,42 @@ static struct platform_device led_device = {
84 }, 86 },
85}; 87};
86 88
89static struct gpio_keys_button rsk7203_gpio_keys_table[] = {
90 {
91 .code = BTN_0,
92 .gpio = GPIO_PB0,
93 .active_low = 1,
94 .desc = "SW1",
95 }, {
96 .code = BTN_1,
97 .gpio = GPIO_PB1,
98 .active_low = 1,
99 .desc = "SW2",
100 }, {
101 .code = BTN_2,
102 .gpio = GPIO_PB2,
103 .active_low = 1,
104 .desc = "SW3",
105 },
106};
107
108static struct gpio_keys_platform_data rsk7203_gpio_keys_info = {
109 .buttons = rsk7203_gpio_keys_table,
110 .nbuttons = ARRAY_SIZE(rsk7203_gpio_keys_table),
111 .poll_interval = 50, /* default to 50ms */
112};
113
114static struct platform_device keys_device = {
115 .name = "gpio-keys-polled",
116 .dev = {
117 .platform_data = &rsk7203_gpio_keys_info,
118 },
119};
120
87static struct platform_device *rsk7203_devices[] __initdata = { 121static struct platform_device *rsk7203_devices[] __initdata = {
88 &smsc911x_device, 122 &smsc911x_device,
89 &led_device, 123 &led_device,
124 &keys_device,
90}; 125};
91 126
92static int __init rsk7203_devices_setup(void) 127static int __init rsk7203_devices_setup(void)
diff --git a/arch/sh/boards/mach-sdk7786/Makefile b/arch/sh/boards/mach-sdk7786/Makefile
index 23ff7d4ac491..8ae56e9560ac 100644
--- a/arch/sh/boards/mach-sdk7786/Makefile
+++ b/arch/sh/boards/mach-sdk7786/Makefile
@@ -1,4 +1,4 @@
1obj-y := fpga.o irq.o setup.o 1obj-y := fpga.o irq.o nmi.o setup.o
2 2
3obj-$(CONFIG_GENERIC_GPIO) += gpio.o 3obj-$(CONFIG_GENERIC_GPIO) += gpio.o
4obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o 4obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
diff --git a/arch/sh/boards/mach-sdk7786/nmi.c b/arch/sh/boards/mach-sdk7786/nmi.c
new file mode 100644
index 000000000000..edcfa1f568ba
--- /dev/null
+++ b/arch/sh/boards/mach-sdk7786/nmi.c
@@ -0,0 +1,83 @@
1/*
2 * SDK7786 FPGA NMI Support.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <mach/fpga.h>
14
15enum {
16 NMI_MODE_MANUAL,
17 NMI_MODE_AUX,
18 NMI_MODE_MASKED,
19 NMI_MODE_ANY,
20 NMI_MODE_UNKNOWN,
21};
22
23/*
24 * Default to the manual NMI switch.
25 */
26static unsigned int __initdata nmi_mode = NMI_MODE_ANY;
27
28static int __init nmi_mode_setup(char *str)
29{
30 if (!str)
31 return 0;
32
33 if (strcmp(str, "manual") == 0)
34 nmi_mode = NMI_MODE_MANUAL;
35 else if (strcmp(str, "aux") == 0)
36 nmi_mode = NMI_MODE_AUX;
37 else if (strcmp(str, "masked") == 0)
38 nmi_mode = NMI_MODE_MASKED;
39 else if (strcmp(str, "any") == 0)
40 nmi_mode = NMI_MODE_ANY;
41 else {
42 nmi_mode = NMI_MODE_UNKNOWN;
43 pr_warning("Unknown NMI mode %s\n", str);
44 }
45
46 printk("Set NMI mode to %d\n", nmi_mode);
47 return 0;
48}
49early_param("nmi_mode", nmi_mode_setup);
50
51void __init sdk7786_nmi_init(void)
52{
53 unsigned int source, mask, tmp;
54
55 switch (nmi_mode) {
56 case NMI_MODE_MANUAL:
57 source = NMISR_MAN_NMI;
58 mask = NMIMR_MAN_NMIM;
59 break;
60 case NMI_MODE_AUX:
61 source = NMISR_AUX_NMI;
62 mask = NMIMR_AUX_NMIM;
63 break;
64 case NMI_MODE_ANY:
65 source = NMISR_MAN_NMI | NMISR_AUX_NMI;
66 mask = NMIMR_MAN_NMIM | NMIMR_AUX_NMIM;
67 break;
68 case NMI_MODE_MASKED:
69 case NMI_MODE_UNKNOWN:
70 default:
71 source = mask = 0;
72 break;
73 }
74
75 /* Set the NMI source */
76 tmp = fpga_read_reg(NMISR);
77 tmp &= ~NMISR_MASK;
78 tmp |= source;
79 fpga_write_reg(tmp, NMISR);
80
81 /* And the IRQ masking */
82 fpga_write_reg(NMIMR_MASK ^ mask, NMIMR);
83}
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 7e0c4e3878e0..75e4ddbbec3e 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -237,6 +237,7 @@ static void __init sdk7786_setup(char **cmdline_p)
237 pr_info("Renesas Technology Europe SDK7786 support:\n"); 237 pr_info("Renesas Technology Europe SDK7786 support:\n");
238 238
239 sdk7786_fpga_init(); 239 sdk7786_fpga_init();
240 sdk7786_nmi_init();
240 241
241 pr_info("\tPCB revision:\t%d\n", fpga_read_reg(PCBRR) & 0xf); 242 pr_info("\tPCB revision:\t%d\n", fpga_read_reg(PCBRR) & 0xf);
242 243
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 7f4871c71a01..33039e0dc568 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -79,6 +79,11 @@ static int __init se7206_devices_setup(void)
79} 79}
80__initcall(se7206_devices_setup); 80__initcall(se7206_devices_setup);
81 81
82static int se7206_mode_pins(void)
83{
84 return MODE_PIN1 | MODE_PIN2;
85}
86
82/* 87/*
83 * The Machine Vector 88 * The Machine Vector
84 */ 89 */
@@ -87,4 +92,5 @@ static struct sh_machine_vector mv_se __initmv = {
87 .mv_name = "SolutionEngine", 92 .mv_name = "SolutionEngine",
88 .mv_nr_irqs = 256, 93 .mv_nr_irqs = 256,
89 .mv_init_irq = init_se7206_IRQ, 94 .mv_init_irq = init_se7206_IRQ,
95 .mv_mode_pins = se7206_mode_pins,
90}; 96};
diff --git a/arch/sh/boards/mach-se/board-se7619.c b/arch/sh/boards/mach-se/board-se7619.c
index 1d0ef7faa10d..82b6d4a5dc02 100644
--- a/arch/sh/boards/mach-se/board-se7619.c
+++ b/arch/sh/boards/mach-se/board-se7619.c
@@ -11,6 +11,11 @@
11#include <asm/io.h> 11#include <asm/io.h>
12#include <asm/machvec.h> 12#include <asm/machvec.h>
13 13
14static int se7619_mode_pins(void)
15{
16 return MODE_PIN2 | MODE_PIN0;
17}
18
14/* 19/*
15 * The Machine Vector 20 * The Machine Vector
16 */ 21 */
@@ -18,4 +23,5 @@
18static struct sh_machine_vector mv_se __initmv = { 23static struct sh_machine_vector mv_se __initmv = {
19 .mv_name = "SolutionEngine", 24 .mv_name = "SolutionEngine",
20 .mv_nr_irqs = 108, 25 .mv_nr_irqs = 108,
26 .mv_mode_pins = se7619_mode_pins,
21}; 27};
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index 9ad904a110de..cc61eda44922 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -54,6 +54,8 @@ CONFIG_INPUT_EVDEV=y
54# CONFIG_KEYBOARD_ATKBD is not set 54# CONFIG_KEYBOARD_ATKBD is not set
55CONFIG_KEYBOARD_SH_KEYSC=y 55CONFIG_KEYBOARD_SH_KEYSC=y
56# CONFIG_INPUT_MOUSE is not set 56# CONFIG_INPUT_MOUSE is not set
57CONFIG_INPUT_TOUCHSCREEN=y
58CONFIG_TOUCHSCREEN_MIGOR=y
57# CONFIG_SERIO is not set 59# CONFIG_SERIO is not set
58CONFIG_VT_HW_CONSOLE_BINDING=y 60CONFIG_VT_HW_CONSOLE_BINDING=y
59CONFIG_SERIAL_SH_SCI=y 61CONFIG_SERIAL_SH_SCI=y
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 60ee09a4e121..a09c77dd09db 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -382,14 +382,13 @@ static void __iomem *ioport_map_pci(struct pci_dev *dev,
382 struct pci_channel *chan = dev->sysdata; 382 struct pci_channel *chan = dev->sysdata;
383 383
384 if (unlikely(!chan->io_map_base)) { 384 if (unlikely(!chan->io_map_base)) {
385 chan->io_map_base = generic_io_base; 385 chan->io_map_base = sh_io_port_base;
386 386
387 if (pci_domains_supported) 387 if (pci_domains_supported)
388 panic("To avoid data corruption io_map_base MUST be " 388 panic("To avoid data corruption io_map_base MUST be "
389 "set with multiple PCI domains."); 389 "set with multiple PCI domains.");
390 } 390 }
391 391
392
393 return (void __iomem *)(chan->io_map_base + port); 392 return (void __iomem *)(chan->io_map_base + port);
394} 393}
395 394
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index b237d525d592..89ab2c57a4c2 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -1,5 +1,6 @@
1#ifndef __ASM_SH_IO_H 1#ifndef __ASM_SH_IO_H
2#define __ASM_SH_IO_H 2#define __ASM_SH_IO_H
3
3/* 4/*
4 * Convention: 5 * Convention:
5 * read{b,w,l,q}/write{b,w,l,q} are for PCI, 6 * read{b,w,l,q}/write{b,w,l,q} are for PCI,
@@ -15,12 +16,6 @@
15 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice 16 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
16 * these have the same semantics as the __raw variants, and as such, all 17 * these have the same semantics as the __raw variants, and as such, all
17 * new code should be using the __raw versions. 18 * new code should be using the __raw versions.
18 *
19 * All ISA I/O routines are wrapped through the machine vector. If a
20 * board does not provide overrides, a generic set that are copied in
21 * from the default machine vector are used instead. These are largely
22 * for old compat code for I/O offseting to SuperIOs, all of which are
23 * better handled through the machvec ioport mapping routines these days.
24 */ 19 */
25#include <linux/errno.h> 20#include <linux/errno.h>
26#include <asm/cache.h> 21#include <asm/cache.h>
@@ -31,39 +26,10 @@
31#include <asm-generic/iomap.h> 26#include <asm-generic/iomap.h>
32 27
33#ifdef __KERNEL__ 28#ifdef __KERNEL__
34/* 29#define __IO_PREFIX generic
35 * Depending on which platform we are running on, we need different
36 * I/O functions.
37 */
38#define __IO_PREFIX generic
39#include <asm/io_generic.h> 30#include <asm/io_generic.h>
40#include <asm/io_trapped.h> 31#include <asm/io_trapped.h>
41 32
42#ifdef CONFIG_HAS_IOPORT
43
44#define inb(p) sh_mv.mv_inb((p))
45#define inw(p) sh_mv.mv_inw((p))
46#define inl(p) sh_mv.mv_inl((p))
47#define outb(x,p) sh_mv.mv_outb((x),(p))
48#define outw(x,p) sh_mv.mv_outw((x),(p))
49#define outl(x,p) sh_mv.mv_outl((x),(p))
50
51#define inb_p(p) sh_mv.mv_inb_p((p))
52#define inw_p(p) sh_mv.mv_inw_p((p))
53#define inl_p(p) sh_mv.mv_inl_p((p))
54#define outb_p(x,p) sh_mv.mv_outb_p((x),(p))
55#define outw_p(x,p) sh_mv.mv_outw_p((x),(p))
56#define outl_p(x,p) sh_mv.mv_outl_p((x),(p))
57
58#define insb(p,b,c) sh_mv.mv_insb((p), (b), (c))
59#define insw(p,b,c) sh_mv.mv_insw((p), (b), (c))
60#define insl(p,b,c) sh_mv.mv_insl((p), (b), (c))
61#define outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c))
62#define outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c))
63#define outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c))
64
65#endif
66
67#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) 33#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
68#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) 34#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
69#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v)) 35#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
@@ -74,68 +40,39 @@
74#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) 40#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
75#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) 41#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
76 42
77#define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; }) 43#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
78#define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; }) 44#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
79#define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; }) 45 __raw_readw(c)); __v; })
80#define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; }) 46#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
81 47 __raw_readl(c)); __v; })
82#define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) 48#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64) \
83#define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) 49 __raw_readq(c)); __v; })
84#define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) 50
85#define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); }) 51#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
86 52#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
87/* 53 cpu_to_le16(v),c))
88 * Legacy SuperH on-chip I/O functions 54#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
89 * 55 cpu_to_le32(v),c))
90 * These are all deprecated, all new (and especially cross-platform) code 56#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64) \
91 * should be using the __raw_xxx() routines directly. 57 cpu_to_le64(v),c))
92 */ 58
93static inline u8 __deprecated ctrl_inb(unsigned long addr) 59#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
94{ 60#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
95 return __raw_readb(addr); 61#define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; })
96} 62#define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; })
97 63
98static inline u16 __deprecated ctrl_inw(unsigned long addr) 64#define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); })
99{ 65#define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); })
100 return __raw_readw(addr); 66#define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); })
101} 67#define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); })
102 68
103static inline u32 __deprecated ctrl_inl(unsigned long addr) 69#define readsb(p,d,l) __raw_readsb(p,d,l)
104{ 70#define readsw(p,d,l) __raw_readsw(p,d,l)
105 return __raw_readl(addr); 71#define readsl(p,d,l) __raw_readsl(p,d,l)
106} 72
107 73#define writesb(p,d,l) __raw_writesb(p,d,l)
108static inline u64 __deprecated ctrl_inq(unsigned long addr) 74#define writesw(p,d,l) __raw_writesw(p,d,l)
109{ 75#define writesl(p,d,l) __raw_writesl(p,d,l)
110 return __raw_readq(addr);
111}
112
113static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
114{
115 __raw_writeb(v, addr);
116}
117
118static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
119{
120 __raw_writew(v, addr);
121}
122
123static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
124{
125 __raw_writel(v, addr);
126}
127
128static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
129{
130 __raw_writeq(v, addr);
131}
132
133extern unsigned long generic_io_base;
134
135static inline void ctrl_delay(void)
136{
137 __raw_readw(generic_io_base);
138}
139 76
140#define __BUILD_UNCACHED_IO(bwlq, type) \ 77#define __BUILD_UNCACHED_IO(bwlq, type) \
141static inline type read##bwlq##_uncached(unsigned long addr) \ 78static inline type read##bwlq##_uncached(unsigned long addr) \
@@ -159,10 +96,11 @@ __BUILD_UNCACHED_IO(w, u16)
159__BUILD_UNCACHED_IO(l, u32) 96__BUILD_UNCACHED_IO(l, u32)
160__BUILD_UNCACHED_IO(q, u64) 97__BUILD_UNCACHED_IO(q, u64)
161 98
162#define __BUILD_MEMORY_STRING(bwlq, type) \ 99#define __BUILD_MEMORY_STRING(pfx, bwlq, type) \
163 \ 100 \
164static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ 101static inline void \
165 const void *addr, unsigned int count) \ 102pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \
103 unsigned int count) \
166{ \ 104{ \
167 const volatile type *__addr = addr; \ 105 const volatile type *__addr = addr; \
168 \ 106 \
@@ -172,8 +110,8 @@ static inline void __raw_writes##bwlq(volatile void __iomem *mem, \
172 } \ 110 } \
173} \ 111} \
174 \ 112 \
175static inline void __raw_reads##bwlq(volatile void __iomem *mem, \ 113static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
176 void *addr, unsigned int count) \ 114 void *addr, unsigned int count) \
177{ \ 115{ \
178 volatile type *__addr = addr; \ 116 volatile type *__addr = addr; \
179 \ 117 \
@@ -183,85 +121,166 @@ static inline void __raw_reads##bwlq(volatile void __iomem *mem, \
183 } \ 121 } \
184} 122}
185 123
186__BUILD_MEMORY_STRING(b, u8) 124__BUILD_MEMORY_STRING(__raw_, b, u8)
187__BUILD_MEMORY_STRING(w, u16) 125__BUILD_MEMORY_STRING(__raw_, w, u16)
188 126
189#ifdef CONFIG_SUPERH32 127#ifdef CONFIG_SUPERH32
190void __raw_writesl(void __iomem *addr, const void *data, int longlen); 128void __raw_writesl(void __iomem *addr, const void *data, int longlen);
191void __raw_readsl(const void __iomem *addr, void *data, int longlen); 129void __raw_readsl(const void __iomem *addr, void *data, int longlen);
192#else 130#else
193__BUILD_MEMORY_STRING(l, u32) 131__BUILD_MEMORY_STRING(__raw_, l, u32)
194#endif 132#endif
195 133
196__BUILD_MEMORY_STRING(q, u64) 134__BUILD_MEMORY_STRING(__raw_, q, u64)
197 135
198#define writesb __raw_writesb 136#ifdef CONFIG_HAS_IOPORT
199#define writesw __raw_writesw 137
200#define writesl __raw_writesl 138/*
201 139 * Slowdown I/O port space accesses for antique hardware.
202#define readsb __raw_readsb 140 */
203#define readsw __raw_readsw 141#undef CONF_SLOWDOWN_IO
204#define readsl __raw_readsl 142
205 143/*
206#define readb_relaxed(a) readb(a) 144 * On SuperH I/O ports are memory mapped, so we access them using normal
207#define readw_relaxed(a) readw(a) 145 * load/store instructions. sh_io_port_base is the virtual address to
208#define readl_relaxed(a) readl(a) 146 * which all ports are being mapped.
209#define readq_relaxed(a) readq(a) 147 */
210 148extern const unsigned long sh_io_port_base;
211#ifndef CONFIG_GENERIC_IOMAP 149
212/* Simple MMIO */ 150static inline void __set_io_port_base(unsigned long pbase)
213#define ioread8(a) __raw_readb(a) 151{
214#define ioread16(a) __raw_readw(a) 152 *(unsigned long *)&sh_io_port_base = pbase;
215#define ioread16be(a) be16_to_cpu(__raw_readw((a))) 153 barrier();
216#define ioread32(a) __raw_readl(a) 154}
217#define ioread32be(a) be32_to_cpu(__raw_readl((a))) 155
218 156#ifdef CONFIG_GENERIC_IOMAP
219#define iowrite8(v,a) __raw_writeb((v),(a)) 157#define __ioport_map ioport_map
220#define iowrite16(v,a) __raw_writew((v),(a)) 158#else
221#define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) 159extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
222#define iowrite32(v,a) __raw_writel((v),(a))
223#define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
224
225#define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c))
226#define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c))
227#define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c))
228
229#define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c))
230#define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c))
231#define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c))
232#endif 160#endif
233 161
234#define mmio_insb(p,d,c) __raw_readsb(p,d,c) 162#ifdef CONF_SLOWDOWN_IO
235#define mmio_insw(p,d,c) __raw_readsw(p,d,c) 163#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
236#define mmio_insl(p,d,c) __raw_readsl(p,d,c) 164#else
165#define SLOW_DOWN_IO
166#endif
237 167
238#define mmio_outsb(p,s,c) __raw_writesb(p,s,c) 168#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
239#define mmio_outsw(p,s,c) __raw_writesw(p,s,c) 169 \
240#define mmio_outsl(p,s,c) __raw_writesl(p,s,c) 170static inline void pfx##out##bwlq##p(type val, unsigned long port) \
171{ \
172 volatile type *__addr; \
173 \
174 __addr = __ioport_map(port, sizeof(type)); \
175 *__addr = val; \
176 slow; \
177} \
178 \
179static inline type pfx##in##bwlq##p(unsigned long port) \
180{ \
181 volatile type *__addr; \
182 type __val; \
183 \
184 __addr = __ioport_map(port, sizeof(type)); \
185 __val = *__addr; \
186 slow; \
187 \
188 return __val; \
189}
241 190
242/* synco on SH-4A, otherwise a nop */ 191#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
243#define mmiowb() wmb() 192 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
193 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
244 194
245#define IO_SPACE_LIMIT 0xffffffff 195#define BUILDIO_IOPORT(bwlq, type) \
196 __BUILD_IOPORT_PFX(, bwlq, type)
246 197
247#ifdef CONFIG_HAS_IOPORT 198BUILDIO_IOPORT(b, u8)
199BUILDIO_IOPORT(w, u16)
200BUILDIO_IOPORT(l, u32)
201BUILDIO_IOPORT(q, u64)
202
203#define __BUILD_IOPORT_STRING(bwlq, type) \
204 \
205static inline void outs##bwlq(unsigned long port, const void *addr, \
206 unsigned int count) \
207{ \
208 const volatile type *__addr = addr; \
209 \
210 while (count--) { \
211 out##bwlq(*__addr, port); \
212 __addr++; \
213 } \
214} \
215 \
216static inline void ins##bwlq(unsigned long port, void *addr, \
217 unsigned int count) \
218{ \
219 volatile type *__addr = addr; \
220 \
221 while (count--) { \
222 *__addr = in##bwlq(port); \
223 __addr++; \
224 } \
225}
226
227__BUILD_IOPORT_STRING(b, u8)
228__BUILD_IOPORT_STRING(w, u16)
229__BUILD_IOPORT_STRING(l, u32)
230__BUILD_IOPORT_STRING(q, u64)
231
232#endif
248 233
249/* 234/*
250 * This function provides a method for the generic case where a 235 * Legacy SuperH on-chip I/O functions
251 * board-specific ioport_map simply needs to return the port + some
252 * arbitrary port base.
253 * 236 *
254 * We use this at board setup time to implicitly set the port base, and 237 * These are all deprecated, all new (and especially cross-platform) code
255 * as a result, we can use the generic ioport_map. 238 * should be using the __raw_xxx() routines directly.
256 */ 239 */
257static inline void __set_io_port_base(unsigned long pbase) 240static inline u8 __deprecated ctrl_inb(unsigned long addr)
258{ 241{
259 generic_io_base = pbase; 242 return __raw_readb(addr);
260} 243}
261 244
262#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n)) 245static inline u16 __deprecated ctrl_inw(unsigned long addr)
246{
247 return __raw_readw(addr);
248}
263 249
264#endif 250static inline u32 __deprecated ctrl_inl(unsigned long addr)
251{
252 return __raw_readl(addr);
253}
254
255static inline u64 __deprecated ctrl_inq(unsigned long addr)
256{
257 return __raw_readq(addr);
258}
259
260static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
261{
262 __raw_writeb(v, addr);
263}
264
265static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
266{
267 __raw_writew(v, addr);
268}
269
270static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
271{
272 __raw_writel(v, addr);
273}
274
275static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
276{
277 __raw_writeq(v, addr);
278}
279
280#define IO_SPACE_LIMIT 0xffffffff
281
282/* synco on SH-4A, otherwise a nop */
283#define mmiowb() wmb()
265 284
266/* We really want to try and get these to memcpy etc */ 285/* We really want to try and get these to memcpy etc */
267void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); 286void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
@@ -395,10 +414,6 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
395#define ioremap_nocache ioremap 414#define ioremap_nocache ioremap
396#define iounmap __iounmap 415#define iounmap __iounmap
397 416
398#define maybebadio(port) \
399 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
400 __func__, __LINE__, (port), (u32)__builtin_return_address(0))
401
402/* 417/*
403 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 418 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
404 * access 419 * access
diff --git a/arch/sh/include/asm/io_generic.h b/arch/sh/include/asm/io_generic.h
index 491df93cbf8e..b5f6956f19c8 100644
--- a/arch/sh/include/asm/io_generic.h
+++ b/arch/sh/include/asm/io_generic.h
@@ -11,31 +11,6 @@
11#error "Don't include this header without a valid system prefix" 11#error "Don't include this header without a valid system prefix"
12#endif 12#endif
13 13
14u8 IO_CONCAT(__IO_PREFIX,inb)(unsigned long);
15u16 IO_CONCAT(__IO_PREFIX,inw)(unsigned long);
16u32 IO_CONCAT(__IO_PREFIX,inl)(unsigned long);
17
18void IO_CONCAT(__IO_PREFIX,outb)(u8, unsigned long);
19void IO_CONCAT(__IO_PREFIX,outw)(u16, unsigned long);
20void IO_CONCAT(__IO_PREFIX,outl)(u32, unsigned long);
21
22u8 IO_CONCAT(__IO_PREFIX,inb_p)(unsigned long);
23u16 IO_CONCAT(__IO_PREFIX,inw_p)(unsigned long);
24u32 IO_CONCAT(__IO_PREFIX,inl_p)(unsigned long);
25void IO_CONCAT(__IO_PREFIX,outb_p)(u8, unsigned long);
26void IO_CONCAT(__IO_PREFIX,outw_p)(u16, unsigned long);
27void IO_CONCAT(__IO_PREFIX,outl_p)(u32, unsigned long);
28
29void IO_CONCAT(__IO_PREFIX,insb)(unsigned long, void *dst, unsigned long count);
30void IO_CONCAT(__IO_PREFIX,insw)(unsigned long, void *dst, unsigned long count);
31void IO_CONCAT(__IO_PREFIX,insl)(unsigned long, void *dst, unsigned long count);
32void IO_CONCAT(__IO_PREFIX,outsb)(unsigned long, const void *src, unsigned long count);
33void IO_CONCAT(__IO_PREFIX,outsw)(unsigned long, const void *src, unsigned long count);
34void IO_CONCAT(__IO_PREFIX,outsl)(unsigned long, const void *src, unsigned long count);
35
36void *IO_CONCAT(__IO_PREFIX,ioremap)(unsigned long offset, unsigned long size);
37void IO_CONCAT(__IO_PREFIX,iounmap)(void *addr);
38
39void __iomem *IO_CONCAT(__IO_PREFIX,ioport_map)(unsigned long addr, unsigned int size); 14void __iomem *IO_CONCAT(__IO_PREFIX,ioport_map)(unsigned long addr, unsigned int size);
40void IO_CONCAT(__IO_PREFIX,ioport_unmap)(void __iomem *addr); 15void IO_CONCAT(__IO_PREFIX,ioport_unmap)(void __iomem *addr);
41void IO_CONCAT(__IO_PREFIX,mem_init)(void); 16void IO_CONCAT(__IO_PREFIX,mem_init)(void);
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index a0b0cf79cf8a..dd5d6e5bf204 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -23,27 +23,6 @@ struct sh_machine_vector {
23 void (*mv_init_irq)(void); 23 void (*mv_init_irq)(void);
24 24
25#ifdef CONFIG_HAS_IOPORT 25#ifdef CONFIG_HAS_IOPORT
26 u8 (*mv_inb)(unsigned long);
27 u16 (*mv_inw)(unsigned long);
28 u32 (*mv_inl)(unsigned long);
29 void (*mv_outb)(u8, unsigned long);
30 void (*mv_outw)(u16, unsigned long);
31 void (*mv_outl)(u32, unsigned long);
32
33 u8 (*mv_inb_p)(unsigned long);
34 u16 (*mv_inw_p)(unsigned long);
35 u32 (*mv_inl_p)(unsigned long);
36 void (*mv_outb_p)(u8, unsigned long);
37 void (*mv_outw_p)(u16, unsigned long);
38 void (*mv_outl_p)(u32, unsigned long);
39
40 void (*mv_insb)(unsigned long, void *dst, unsigned long count);
41 void (*mv_insw)(unsigned long, void *dst, unsigned long count);
42 void (*mv_insl)(unsigned long, void *dst, unsigned long count);
43 void (*mv_outsb)(unsigned long, const void *src, unsigned long count);
44 void (*mv_outsw)(unsigned long, const void *src, unsigned long count);
45 void (*mv_outsl)(unsigned long, const void *src, unsigned long count);
46
47 void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); 26 void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
48 void (*mv_ioport_unmap)(void __iomem *); 27 void (*mv_ioport_unmap)(void __iomem *);
49#endif 28#endif
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index f6edc10aa0d3..de167d3a1a80 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -40,8 +40,8 @@
40#include <asm/system.h> 40#include <asm/system.h>
41 41
42#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 42#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
43#define user_stack_pointer(regs) ((unsigned long)(regs)->regs[15]) 43#define user_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
44#define kernel_stack_pointer(regs) ((unsigned long)(regs)->regs[15]) 44#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
45#define instruction_pointer(regs) ((unsigned long)(regs)->pc) 45#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
46 46
47extern void show_regs(struct pt_regs *); 47extern void show_regs(struct pt_regs *);
diff --git a/arch/sh/include/asm/ptrace_32.h b/arch/sh/include/asm/ptrace_32.h
index 35d9e257558c..6c2239cca1a2 100644
--- a/arch/sh/include/asm/ptrace_32.h
+++ b/arch/sh/include/asm/ptrace_32.h
@@ -76,7 +76,7 @@ struct pt_dspregs {
76#ifdef __KERNEL__ 76#ifdef __KERNEL__
77 77
78#define MAX_REG_OFFSET offsetof(struct pt_regs, tra) 78#define MAX_REG_OFFSET offsetof(struct pt_regs, tra)
79#define regs_return_value(regs) ((regs)->regs[0]) 79#define regs_return_value(_regs) ((_regs)->regs[0])
80 80
81#endif /* __KERNEL__ */ 81#endif /* __KERNEL__ */
82 82
diff --git a/arch/sh/include/asm/ptrace_64.h b/arch/sh/include/asm/ptrace_64.h
index d43c1cb0bbe7..bf9be7764d69 100644
--- a/arch/sh/include/asm/ptrace_64.h
+++ b/arch/sh/include/asm/ptrace_64.h
@@ -13,7 +13,7 @@ struct pt_regs {
13#ifdef __KERNEL__ 13#ifdef __KERNEL__
14 14
15#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7]) 15#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
16#define regs_return_value(regs) ((regs)->regs[3]) 16#define regs_return_value(_regs) ((_regs)->regs[3])
17 17
18#endif /* __KERNEL__ */ 18#endif /* __KERNEL__ */
19 19
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
index 9f4dd252c981..c48a9c3420da 100644
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ b/arch/sh/include/asm/unaligned-sh4a.h
@@ -18,10 +18,20 @@
18 * of spill registers and blowing up when building at low optimization 18 * of spill registers and blowing up when building at low optimization
19 * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. 19 * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
20 */ 20 */
21#include <linux/unaligned/packed_struct.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <asm/byteorder.h> 23#include <asm/byteorder.h>
23 24
24static __always_inline u32 __get_unaligned_cpu32(const u8 *p) 25static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
26{
27#ifdef __LITTLE_ENDIAN
28 return p[0] | p[1] << 8;
29#else
30 return p[0] << 8 | p[1];
31#endif
32}
33
34static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
25{ 35{
26 unsigned long unaligned; 36 unsigned long unaligned;
27 37
@@ -34,218 +44,148 @@ static __always_inline u32 __get_unaligned_cpu32(const u8 *p)
34 return unaligned; 44 return unaligned;
35} 45}
36 46
37struct __una_u16 { u16 x __attribute__((packed)); };
38struct __una_u32 { u32 x __attribute__((packed)); };
39struct __una_u64 { u64 x __attribute__((packed)); };
40
41static inline u16 __get_unaligned_cpu16(const u8 *p)
42{
43#ifdef __LITTLE_ENDIAN
44 return p[0] | p[1] << 8;
45#else
46 return p[0] << 8 | p[1];
47#endif
48}
49
50/* 47/*
51 * Even though movua.l supports auto-increment on the read side, it can 48 * Even though movua.l supports auto-increment on the read side, it can
52 * only store to r0 due to instruction encoding constraints, so just let 49 * only store to r0 due to instruction encoding constraints, so just let
53 * the compiler sort it out on its own. 50 * the compiler sort it out on its own.
54 */ 51 */
55static inline u64 __get_unaligned_cpu64(const u8 *p) 52static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
56{ 53{
57#ifdef __LITTLE_ENDIAN 54#ifdef __LITTLE_ENDIAN
58 return (u64)__get_unaligned_cpu32(p + 4) << 32 | 55 return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
59 __get_unaligned_cpu32(p); 56 sh4a_get_unaligned_cpu32(p);
60#else 57#else
61 return (u64)__get_unaligned_cpu32(p) << 32 | 58 return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
62 __get_unaligned_cpu32(p + 4); 59 sh4a_get_unaligned_cpu32(p + 4);
63#endif 60#endif
64} 61}
65 62
66static inline u16 get_unaligned_le16(const void *p) 63static inline u16 get_unaligned_le16(const void *p)
67{ 64{
68 return le16_to_cpu(__get_unaligned_cpu16(p)); 65 return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
69} 66}
70 67
71static inline u32 get_unaligned_le32(const void *p) 68static inline u32 get_unaligned_le32(const void *p)
72{ 69{
73 return le32_to_cpu(__get_unaligned_cpu32(p)); 70 return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
74} 71}
75 72
76static inline u64 get_unaligned_le64(const void *p) 73static inline u64 get_unaligned_le64(const void *p)
77{ 74{
78 return le64_to_cpu(__get_unaligned_cpu64(p)); 75 return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
79} 76}
80 77
81static inline u16 get_unaligned_be16(const void *p) 78static inline u16 get_unaligned_be16(const void *p)
82{ 79{
83 return be16_to_cpu(__get_unaligned_cpu16(p)); 80 return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
84} 81}
85 82
86static inline u32 get_unaligned_be32(const void *p) 83static inline u32 get_unaligned_be32(const void *p)
87{ 84{
88 return be32_to_cpu(__get_unaligned_cpu32(p)); 85 return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
89} 86}
90 87
91static inline u64 get_unaligned_be64(const void *p) 88static inline u64 get_unaligned_be64(const void *p)
92{ 89{
93 return be64_to_cpu(__get_unaligned_cpu64(p)); 90 return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
94} 91}
95 92
96static inline void __put_le16_noalign(u8 *p, u16 val) 93static inline void nonnative_put_le16(u16 val, u8 *p)
97{ 94{
98 *p++ = val; 95 *p++ = val;
99 *p++ = val >> 8; 96 *p++ = val >> 8;
100} 97}
101 98
102static inline void __put_le32_noalign(u8 *p, u32 val) 99static inline void nonnative_put_le32(u32 val, u8 *p)
103{ 100{
104 __put_le16_noalign(p, val); 101 nonnative_put_le16(val, p);
105 __put_le16_noalign(p + 2, val >> 16); 102 nonnative_put_le16(val >> 16, p + 2);
106} 103}
107 104
108static inline void __put_le64_noalign(u8 *p, u64 val) 105static inline void nonnative_put_le64(u64 val, u8 *p)
109{ 106{
110 __put_le32_noalign(p, val); 107 nonnative_put_le32(val, p);
111 __put_le32_noalign(p + 4, val >> 32); 108 nonnative_put_le32(val >> 32, p + 4);
112} 109}
113 110
114static inline void __put_be16_noalign(u8 *p, u16 val) 111static inline void nonnative_put_be16(u16 val, u8 *p)
115{ 112{
116 *p++ = val >> 8; 113 *p++ = val >> 8;
117 *p++ = val; 114 *p++ = val;
118} 115}
119 116
120static inline void __put_be32_noalign(u8 *p, u32 val) 117static inline void nonnative_put_be32(u32 val, u8 *p)
121{ 118{
122 __put_be16_noalign(p, val >> 16); 119 nonnative_put_be16(val >> 16, p);
123 __put_be16_noalign(p + 2, val); 120 nonnative_put_be16(val, p + 2);
124} 121}
125 122
126static inline void __put_be64_noalign(u8 *p, u64 val) 123static inline void nonnative_put_be64(u64 val, u8 *p)
127{ 124{
128 __put_be32_noalign(p, val >> 32); 125 nonnative_put_be32(val >> 32, p);
129 __put_be32_noalign(p + 4, val); 126 nonnative_put_be32(val, p + 4);
130} 127}
131 128
132static inline void put_unaligned_le16(u16 val, void *p) 129static inline void put_unaligned_le16(u16 val, void *p)
133{ 130{
134#ifdef __LITTLE_ENDIAN 131#ifdef __LITTLE_ENDIAN
135 ((struct __una_u16 *)p)->x = val; 132 __put_unaligned_cpu16(val, p);
136#else 133#else
137 __put_le16_noalign(p, val); 134 nonnative_put_le16(val, p);
138#endif 135#endif
139} 136}
140 137
141static inline void put_unaligned_le32(u32 val, void *p) 138static inline void put_unaligned_le32(u32 val, void *p)
142{ 139{
143#ifdef __LITTLE_ENDIAN 140#ifdef __LITTLE_ENDIAN
144 ((struct __una_u32 *)p)->x = val; 141 __put_unaligned_cpu32(val, p);
145#else 142#else
146 __put_le32_noalign(p, val); 143 nonnative_put_le32(val, p);
147#endif 144#endif
148} 145}
149 146
150static inline void put_unaligned_le64(u64 val, void *p) 147static inline void put_unaligned_le64(u64 val, void *p)
151{ 148{
152#ifdef __LITTLE_ENDIAN 149#ifdef __LITTLE_ENDIAN
153 ((struct __una_u64 *)p)->x = val; 150 __put_unaligned_cpu64(val, p);
154#else 151#else
155 __put_le64_noalign(p, val); 152 nonnative_put_le64(val, p);
156#endif 153#endif
157} 154}
158 155
159static inline void put_unaligned_be16(u16 val, void *p) 156static inline void put_unaligned_be16(u16 val, void *p)
160{ 157{
161#ifdef __BIG_ENDIAN 158#ifdef __BIG_ENDIAN
162 ((struct __una_u16 *)p)->x = val; 159 __put_unaligned_cpu16(val, p);
163#else 160#else
164 __put_be16_noalign(p, val); 161 nonnative_put_be16(val, p);
165#endif 162#endif
166} 163}
167 164
168static inline void put_unaligned_be32(u32 val, void *p) 165static inline void put_unaligned_be32(u32 val, void *p)
169{ 166{
170#ifdef __BIG_ENDIAN 167#ifdef __BIG_ENDIAN
171 ((struct __una_u32 *)p)->x = val; 168 __put_unaligned_cpu32(val, p);
172#else 169#else
173 __put_be32_noalign(p, val); 170 nonnative_put_be32(val, p);
174#endif 171#endif
175} 172}
176 173
177static inline void put_unaligned_be64(u64 val, void *p) 174static inline void put_unaligned_be64(u64 val, void *p)
178{ 175{
179#ifdef __BIG_ENDIAN 176#ifdef __BIG_ENDIAN
180 ((struct __una_u64 *)p)->x = val; 177 __put_unaligned_cpu64(val, p);
181#else 178#else
182 __put_be64_noalign(p, val); 179 nonnative_put_be64(val, p);
183#endif 180#endif
184} 181}
185 182
186/* 183/*
187 * Cause a link-time error if we try an unaligned access other than 184 * While it's a bit non-obvious, even though the generic le/be wrappers
188 * 1,2,4 or 8 bytes long 185 * use the __get/put_xxx prefixing, they actually wrap in to the
186 * non-prefixed get/put_xxx variants as provided above.
189 */ 187 */
190extern void __bad_unaligned_access_size(void); 188#include <linux/unaligned/generic.h>
191
192#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
193 __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
194 __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
195 __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
196 __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
197 __bad_unaligned_access_size())))); \
198 }))
199
200#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
201 __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
202 __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
203 __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
204 __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
205 __bad_unaligned_access_size())))); \
206 }))
207
208#define __put_unaligned_le(val, ptr) ({ \
209 void *__gu_p = (ptr); \
210 switch (sizeof(*(ptr))) { \
211 case 1: \
212 *(u8 *)__gu_p = (__force u8)(val); \
213 break; \
214 case 2: \
215 put_unaligned_le16((__force u16)(val), __gu_p); \
216 break; \
217 case 4: \
218 put_unaligned_le32((__force u32)(val), __gu_p); \
219 break; \
220 case 8: \
221 put_unaligned_le64((__force u64)(val), __gu_p); \
222 break; \
223 default: \
224 __bad_unaligned_access_size(); \
225 break; \
226 } \
227 (void)0; })
228
229#define __put_unaligned_be(val, ptr) ({ \
230 void *__gu_p = (ptr); \
231 switch (sizeof(*(ptr))) { \
232 case 1: \
233 *(u8 *)__gu_p = (__force u8)(val); \
234 break; \
235 case 2: \
236 put_unaligned_be16((__force u16)(val), __gu_p); \
237 break; \
238 case 4: \
239 put_unaligned_be32((__force u32)(val), __gu_p); \
240 break; \
241 case 8: \
242 put_unaligned_be64((__force u64)(val), __gu_p); \
243 break; \
244 default: \
245 __bad_unaligned_access_size(); \
246 break; \
247 } \
248 (void)0; })
249 189
250#ifdef __LITTLE_ENDIAN 190#ifdef __LITTLE_ENDIAN
251# define get_unaligned __get_unaligned_le 191# define get_unaligned __get_unaligned_le
diff --git a/arch/sh/include/mach-sdk7786/mach/fpga.h b/arch/sh/include/mach-sdk7786/mach/fpga.h
index 40f0c2d3690c..a9cdac469927 100644
--- a/arch/sh/include/mach-sdk7786/mach/fpga.h
+++ b/arch/sh/include/mach-sdk7786/mach/fpga.h
@@ -14,11 +14,16 @@
14#define INTTESTR 0x040 14#define INTTESTR 0x040
15#define SYSSR 0x050 15#define SYSSR 0x050
16#define NRGPR 0x060 16#define NRGPR 0x060
17
17#define NMISR 0x070 18#define NMISR 0x070
19#define NMISR_MAN_NMI BIT(0)
20#define NMISR_AUX_NMI BIT(1)
21#define NMISR_MASK (NMISR_MAN_NMI | NMISR_AUX_NMI)
18 22
19#define NMIMR 0x080 23#define NMIMR 0x080
20#define NMIMR_MAN_NMIM BIT(0) /* Manual NMI mask */ 24#define NMIMR_MAN_NMIM BIT(0) /* Manual NMI mask */
21#define NMIMR_AUX_NMIM BIT(1) /* Auxiliary NMI mask */ 25#define NMIMR_AUX_NMIM BIT(1) /* Auxiliary NMI mask */
26#define NMIMR_MASK (NMIMR_MAN_NMIM | NMIMR_AUX_NMIM)
22 27
23#define INTBSR 0x090 28#define INTBSR 0x090
24#define INTBMR 0x0a0 29#define INTBMR 0x0a0
@@ -126,6 +131,9 @@
126extern void __iomem *sdk7786_fpga_base; 131extern void __iomem *sdk7786_fpga_base;
127extern void sdk7786_fpga_init(void); 132extern void sdk7786_fpga_init(void);
128 133
134/* arch/sh/boards/mach-sdk7786/nmi.c */
135extern void sdk7786_nmi_init(void);
136
129#define SDK7786_FPGA_REGADDR(reg) (sdk7786_fpga_base + (reg)) 137#define SDK7786_FPGA_REGADDR(reg) (sdk7786_fpga_base + (reg))
130 138
131/* 139/*
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index cf6522179523..77f7ae1d4647 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -20,6 +20,11 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
20 syscalls_$(BITS).o time.o topology.o traps.o \ 20 syscalls_$(BITS).o time.o topology.o traps.o \
21 traps_$(BITS).o unwinder.o 21 traps_$(BITS).o unwinder.o
22 22
23ifndef CONFIG_GENERIC_IOMAP
24obj-y += iomap.o
25obj-$(CONFIG_HAS_IOPORT) += ioport.o
26endif
27
23obj-y += cpu/ 28obj-y += cpu/
24obj-$(CONFIG_VSYSCALL) += vsyscall/ 29obj-$(CONFIG_VSYSCALL) += vsyscall/
25obj-$(CONFIG_SMP) += smp.o 30obj-$(CONFIG_SMP) += smp.o
@@ -39,7 +44,6 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
39obj-$(CONFIG_HIBERNATION) += swsusp.o 44obj-$(CONFIG_HIBERNATION) += swsusp.o
40obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o 45obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
41obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o 46obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
42obj-$(CONFIG_HAS_IOPORT) += io_generic.o
43 47
44obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 48obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
45obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o 49obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index 4edcb60a1355..d49c2135fd48 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -20,4 +20,4 @@ obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
20obj-$(CONFIG_SH_FPU) += fpu.o 20obj-$(CONFIG_SH_FPU) += fpu.o
21obj-$(CONFIG_SH_FPU_EMU) += fpu.o 21obj-$(CONFIG_SH_FPU_EMU) += fpu.o
22 22
23obj-y += irq/ init.o clock.o hwblk.o 23obj-y += irq/ init.o clock.o hwblk.o proc.o
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
new file mode 100644
index 000000000000..e80a936f409a
--- /dev/null
+++ b/arch/sh/kernel/cpu/proc.c
@@ -0,0 +1,148 @@
1#include <linux/seq_file.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <asm/machvec.h>
5#include <asm/processor.h>
6
7static const char *cpu_name[] = {
8 [CPU_SH7201] = "SH7201",
9 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
10 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
11 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
12 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
13 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
14 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
15 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
16 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
17 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
18 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
19 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
20 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
21 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
22 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
23 [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
24 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
25 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
26 [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
27 [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
28 [CPU_SH_NONE] = "Unknown"
29};
30
31const char *get_cpu_subtype(struct sh_cpuinfo *c)
32{
33 return cpu_name[c->type];
34}
35EXPORT_SYMBOL(get_cpu_subtype);
36
37#ifdef CONFIG_PROC_FS
38/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
39static const char *cpu_flags[] = {
40 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
41 "ptea", "llsc", "l2", "op32", "pteaex", NULL
42};
43
44static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
45{
46 unsigned long i;
47
48 seq_printf(m, "cpu flags\t:");
49
50 if (!c->flags) {
51 seq_printf(m, " %s\n", cpu_flags[0]);
52 return;
53 }
54
55 for (i = 0; cpu_flags[i]; i++)
56 if ((c->flags & (1 << i)))
57 seq_printf(m, " %s", cpu_flags[i+1]);
58
59 seq_printf(m, "\n");
60}
61
62static void show_cacheinfo(struct seq_file *m, const char *type,
63 struct cache_info info)
64{
65 unsigned int cache_size;
66
67 cache_size = info.ways * info.sets * info.linesz;
68
69 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
70 type, cache_size >> 10, info.ways);
71}
72
73/*
74 * Get CPU information for use by the procfs.
75 */
76static int show_cpuinfo(struct seq_file *m, void *v)
77{
78 struct sh_cpuinfo *c = v;
79 unsigned int cpu = c - cpu_data;
80
81 if (!cpu_online(cpu))
82 return 0;
83
84 if (cpu == 0)
85 seq_printf(m, "machine\t\t: %s\n", get_system_type());
86 else
87 seq_printf(m, "\n");
88
89 seq_printf(m, "processor\t: %d\n", cpu);
90 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
91 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
92 if (c->cut_major == -1)
93 seq_printf(m, "cut\t\t: unknown\n");
94 else if (c->cut_minor == -1)
95 seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
96 else
97 seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
98
99 show_cpuflags(m, c);
100
101 seq_printf(m, "cache type\t: ");
102
103 /*
104 * Check for what type of cache we have, we support both the
105 * unified cache on the SH-2 and SH-3, as well as the harvard
106 * style cache on the SH-4.
107 */
108 if (c->icache.flags & SH_CACHE_COMBINED) {
109 seq_printf(m, "unified\n");
110 show_cacheinfo(m, "cache", c->icache);
111 } else {
112 seq_printf(m, "split (harvard)\n");
113 show_cacheinfo(m, "icache", c->icache);
114 show_cacheinfo(m, "dcache", c->dcache);
115 }
116
117 /* Optional secondary cache */
118 if (c->flags & CPU_HAS_L2_CACHE)
119 show_cacheinfo(m, "scache", c->scache);
120
121 seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
122
123 seq_printf(m, "bogomips\t: %lu.%02lu\n",
124 c->loops_per_jiffy/(500000/HZ),
125 (c->loops_per_jiffy/(5000/HZ)) % 100);
126
127 return 0;
128}
129
130static void *c_start(struct seq_file *m, loff_t *pos)
131{
132 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
133}
134static void *c_next(struct seq_file *m, void *v, loff_t *pos)
135{
136 ++*pos;
137 return c_start(m, pos);
138}
139static void c_stop(struct seq_file *m, void *v)
140{
141}
142const struct seq_operations cpuinfo_op = {
143 .start = c_start,
144 .next = c_next,
145 .stop = c_stop,
146 .show = show_cpuinfo,
147};
148#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
index 0c9f24d7a02f..5b7f12e58a8d 100644
--- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -14,24 +14,18 @@
14 */ 14 */
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/io.h>
17#include <asm/clock.h> 18#include <asm/clock.h>
18#include <asm/freq.h> 19#include <asm/freq.h>
19#include <asm/io.h> 20#include <asm/processor.h>
20 21
21static const int pll1rate[] = {1,2}; 22static const int pll1rate[] = {1,2};
22static const int pfc_divisors[] = {1,2,0,4}; 23static const int pfc_divisors[] = {1,2,0,4};
23 24static unsigned int pll2_mult;
24#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
25#define PLL2 (4)
26#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
27#define PLL2 (2)
28#else
29#error "Illigal Clock Mode!"
30#endif
31 25
32static void master_clk_init(struct clk *clk) 26static void master_clk_init(struct clk *clk)
33{ 27{
34 clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 7]; 28 clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
35} 29}
36 30
37static struct clk_ops sh7619_master_clk_ops = { 31static struct clk_ops sh7619_master_clk_ops = {
@@ -70,6 +64,14 @@ static struct clk_ops *sh7619_clk_ops[] = {
70 64
71void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 65void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
72{ 66{
67 if (test_mode_pin(MODE_PIN2 | MODE_PIN0) ||
68 test_mode_pin(MODE_PIN2 | MODE_PIN1))
69 pll2_mult = 2;
70 else if (test_mode_pin(MODE_PIN0) || test_mode_pin(MODE_PIN1))
71 pll2_mult = 4;
72
73 BUG_ON(!pll2_mult);
74
73 if (idx < ARRAY_SIZE(sh7619_clk_ops)) 75 if (idx < ARRAY_SIZE(sh7619_clk_ops))
74 *ops = sh7619_clk_ops[idx]; 76 *ops = sh7619_clk_ops[idx];
75} 77}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index c509c40cba4b..1174e2d96c03 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -22,19 +22,12 @@ static const int pll1rate[]={1,2,3,4,6,8};
22static const int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 0) 25static unsigned int pll2_mult;
26#define PLL2 (4)
27#elif (CONFIG_SH_CLK_MD == 2)
28#define PLL2 (2)
29#elif (CONFIG_SH_CLK_MD == 3)
30#define PLL2 (1)
31#else
32#error "Illegal Clock Mode!"
33#endif
34 26
35static void master_clk_init(struct clk *clk) 27static void master_clk_init(struct clk *clk)
36{ 28{
37 clk->rate = 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; 29 clk->rate = 10000000 * pll2_mult *
30 pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
38} 31}
39 32
40static struct clk_ops sh7201_master_clk_ops = { 33static struct clk_ops sh7201_master_clk_ops = {
@@ -80,6 +73,13 @@ static struct clk_ops *sh7201_clk_ops[] = {
80 73
81void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 74void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
82{ 75{
76 if (test_mode_pin(MODE_PIN1 | MODE_PIN0))
77 pll2_mult = 1;
78 else if (test_mode_pin(MODE_PIN1))
79 pll2_mult = 2;
80 else
81 pll2_mult = 4;
82
83 if (idx < ARRAY_SIZE(sh7201_clk_ops)) 83 if (idx < ARRAY_SIZE(sh7201_clk_ops))
84 *ops = sh7201_clk_ops[idx]; 84 *ops = sh7201_clk_ops[idx];
85} 85}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
index 7e75d8f79502..95a008e8b735 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -25,21 +25,11 @@ static const int pll1rate[]={8,12,16,0};
25static const int pfc_divisors[]={1,2,3,4,6,8,12}; 25static const int pfc_divisors[]={1,2,3,4,6,8,12};
26#define ifc_divisors pfc_divisors 26#define ifc_divisors pfc_divisors
27 27
28#if (CONFIG_SH_CLK_MD == 0) 28static unsigned int pll2_mult;
29#define PLL2 (1)
30#elif (CONFIG_SH_CLK_MD == 1)
31#define PLL2 (2)
32#elif (CONFIG_SH_CLK_MD == 2)
33#define PLL2 (4)
34#elif (CONFIG_SH_CLK_MD == 3)
35#define PLL2 (4)
36#else
37#error "Illegal Clock Mode!"
38#endif
39 29
40static void master_clk_init(struct clk *clk) 30static void master_clk_init(struct clk *clk)
41{ 31{
42 clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * PLL2 ; 32 clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * pll2_mult;
43} 33}
44 34
45static struct clk_ops sh7203_master_clk_ops = { 35static struct clk_ops sh7203_master_clk_ops = {
@@ -79,6 +69,13 @@ static struct clk_ops *sh7203_clk_ops[] = {
79 69
80void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 70void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
81{ 71{
72 if (test_mode_pin(MODE_PIN1))
73 pll2_mult = 4;
74 else if (test_mode_pin(MODE_PIN0))
75 pll2_mult = 2;
76 else
77 pll2_mult = 1;
78
82 if (idx < ARRAY_SIZE(sh7203_clk_ops)) 79 if (idx < ARRAY_SIZE(sh7203_clk_ops))
83 *ops = sh7203_clk_ops[idx]; 80 *ops = sh7203_clk_ops[idx];
84} 81}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
index b27a5e2687ab..3c314d7cd6e6 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -22,19 +22,11 @@ static const int pll1rate[]={1,2,3,4,6,8};
22static const int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 2) 25static unsigned int pll2_mult;
26#define PLL2 (4)
27#elif (CONFIG_SH_CLK_MD == 6)
28#define PLL2 (2)
29#elif (CONFIG_SH_CLK_MD == 7)
30#define PLL2 (1)
31#else
32#error "Illigal Clock Mode!"
33#endif
34 26
35static void master_clk_init(struct clk *clk) 27static void master_clk_init(struct clk *clk)
36{ 28{
37 clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; 29 clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
38} 30}
39 31
40static struct clk_ops sh7206_master_clk_ops = { 32static struct clk_ops sh7206_master_clk_ops = {
@@ -79,7 +71,13 @@ static struct clk_ops *sh7206_clk_ops[] = {
79 71
80void __init arch_init_clk_ops(struct clk_ops **ops, int idx) 72void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
81{ 73{
74 if (test_mode_pin(MODE_PIN2 | MODE_PIN1 | MODE_PIN0))
75 pll2_mult = 1;
76 else if (test_mode_pin(MODE_PIN2 | MODE_PIN1))
77 pll2_mult = 2;
78 else if (test_mode_pin(MODE_PIN1))
79 pll2_mult = 4;
80
82 if (idx < ARRAY_SIZE(sh7206_clk_ops)) 81 if (idx < ARRAY_SIZE(sh7206_clk_ops))
83 *ops = sh7206_clk_ops[idx]; 82 *ops = sh7206_clk_ops[idx];
84} 83}
85
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
index dbf3b4bb71fe..748955df018d 100644
--- a/arch/sh/kernel/cpu/sh4/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -250,4 +250,4 @@ static int __init sh7750_pmu_init(void)
250 250
251 return register_sh_pmu(&sh7750_pmu); 251 return register_sh_pmu(&sh7750_pmu);
252} 252}
253arch_initcall(sh7750_pmu_init); 253early_initcall(sh7750_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
index 580276525731..17e6bebfede0 100644
--- a/arch/sh/kernel/cpu/sh4a/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -284,4 +284,4 @@ static int __init sh4a_pmu_init(void)
284 284
285 return register_sh_pmu(&sh4a_pmu); 285 return register_sh_pmu(&sh4a_pmu);
286} 286}
287arch_initcall(sh4a_pmu_init); 287early_initcall(sh4a_pmu_init);
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
deleted file mode 100644
index 447d78f666f9..000000000000
--- a/arch/sh/kernel/io_generic.c
+++ /dev/null
@@ -1,180 +0,0 @@
1/*
2 * arch/sh/kernel/io_generic.c
3 *
4 * Copyright (C) 2000 Niibe Yutaka
5 * Copyright (C) 2005 - 2007 Paul Mundt
6 *
7 * Generic I/O routine. These can be used where a machine specific version
8 * is not required.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/module.h>
15#include <linux/io.h>
16#include <asm/machvec.h>
17
18#ifdef CONFIG_CPU_SH3
19/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
20 * workaround. */
21/* I'm not sure SH7709 has this kind of bug */
22#define dummy_read() __raw_readb(0xba000000)
23#else
24#define dummy_read()
25#endif
26
27unsigned long generic_io_base = 0;
28
29u8 generic_inb(unsigned long port)
30{
31 return __raw_readb(__ioport_map(port, 1));
32}
33
34u16 generic_inw(unsigned long port)
35{
36 return __raw_readw(__ioport_map(port, 2));
37}
38
39u32 generic_inl(unsigned long port)
40{
41 return __raw_readl(__ioport_map(port, 4));
42}
43
44u8 generic_inb_p(unsigned long port)
45{
46 unsigned long v = generic_inb(port);
47
48 ctrl_delay();
49 return v;
50}
51
52u16 generic_inw_p(unsigned long port)
53{
54 unsigned long v = generic_inw(port);
55
56 ctrl_delay();
57 return v;
58}
59
60u32 generic_inl_p(unsigned long port)
61{
62 unsigned long v = generic_inl(port);
63
64 ctrl_delay();
65 return v;
66}
67
68/*
69 * insb/w/l all read a series of bytes/words/longs from a fixed port
70 * address. However as the port address doesn't change we only need to
71 * convert the port address to real address once.
72 */
73
74void generic_insb(unsigned long port, void *dst, unsigned long count)
75{
76 __raw_readsb(__ioport_map(port, 1), dst, count);
77 dummy_read();
78}
79
80void generic_insw(unsigned long port, void *dst, unsigned long count)
81{
82 __raw_readsw(__ioport_map(port, 2), dst, count);
83 dummy_read();
84}
85
86void generic_insl(unsigned long port, void *dst, unsigned long count)
87{
88 __raw_readsl(__ioport_map(port, 4), dst, count);
89 dummy_read();
90}
91
92void generic_outb(u8 b, unsigned long port)
93{
94 __raw_writeb(b, __ioport_map(port, 1));
95}
96
97void generic_outw(u16 b, unsigned long port)
98{
99 __raw_writew(b, __ioport_map(port, 2));
100}
101
102void generic_outl(u32 b, unsigned long port)
103{
104 __raw_writel(b, __ioport_map(port, 4));
105}
106
107void generic_outb_p(u8 b, unsigned long port)
108{
109 generic_outb(b, port);
110 ctrl_delay();
111}
112
113void generic_outw_p(u16 b, unsigned long port)
114{
115 generic_outw(b, port);
116 ctrl_delay();
117}
118
119void generic_outl_p(u32 b, unsigned long port)
120{
121 generic_outl(b, port);
122 ctrl_delay();
123}
124
125/*
126 * outsb/w/l all write a series of bytes/words/longs to a fixed port
127 * address. However as the port address doesn't change we only need to
128 * convert the port address to real address once.
129 */
130void generic_outsb(unsigned long port, const void *src, unsigned long count)
131{
132 __raw_writesb(__ioport_map(port, 1), src, count);
133 dummy_read();
134}
135
136void generic_outsw(unsigned long port, const void *src, unsigned long count)
137{
138 __raw_writesw(__ioport_map(port, 2), src, count);
139 dummy_read();
140}
141
142void generic_outsl(unsigned long port, const void *src, unsigned long count)
143{
144 __raw_writesl(__ioport_map(port, 4), src, count);
145 dummy_read();
146}
147
148void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
149{
150#ifdef P1SEG
151 if (PXSEG(addr) >= P1SEG)
152 return (void __iomem *)addr;
153#endif
154
155 return (void __iomem *)(addr + generic_io_base);
156}
157
158void generic_ioport_unmap(void __iomem *addr)
159{
160}
161
162#ifndef CONFIG_GENERIC_IOMAP
163void __iomem *ioport_map(unsigned long port, unsigned int nr)
164{
165 void __iomem *ret;
166
167 ret = __ioport_map_trapped(port, nr);
168 if (ret)
169 return ret;
170
171 return __ioport_map(port, nr);
172}
173EXPORT_SYMBOL(ioport_map);
174
175void ioport_unmap(void __iomem *addr)
176{
177 sh_mv.mv_ioport_unmap(addr);
178}
179EXPORT_SYMBOL(ioport_unmap);
180#endif /* CONFIG_GENERIC_IOMAP */
diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c
new file mode 100644
index 000000000000..2e8e8b9b9cef
--- /dev/null
+++ b/arch/sh/kernel/iomap.c
@@ -0,0 +1,165 @@
1/*
2 * arch/sh/kernel/iomap.c
3 *
4 * Copyright (C) 2000 Niibe Yutaka
5 * Copyright (C) 2005 - 2007 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/module.h>
12#include <linux/io.h>
13
14unsigned int ioread8(void __iomem *addr)
15{
16 return readb(addr);
17}
18EXPORT_SYMBOL(ioread8);
19
20unsigned int ioread16(void __iomem *addr)
21{
22 return readw(addr);
23}
24EXPORT_SYMBOL(ioread16);
25
26unsigned int ioread16be(void __iomem *addr)
27{
28 return be16_to_cpu(__raw_readw(addr));
29}
30EXPORT_SYMBOL(ioread16be);
31
32unsigned int ioread32(void __iomem *addr)
33{
34 return readl(addr);
35}
36EXPORT_SYMBOL(ioread32);
37
38unsigned int ioread32be(void __iomem *addr)
39{
40 return be32_to_cpu(__raw_readl(addr));
41}
42EXPORT_SYMBOL(ioread32be);
43
44void iowrite8(u8 val, void __iomem *addr)
45{
46 writeb(val, addr);
47}
48EXPORT_SYMBOL(iowrite8);
49
50void iowrite16(u16 val, void __iomem *addr)
51{
52 writew(val, addr);
53}
54EXPORT_SYMBOL(iowrite16);
55
56void iowrite16be(u16 val, void __iomem *addr)
57{
58 __raw_writew(cpu_to_be16(val), addr);
59}
60EXPORT_SYMBOL(iowrite16be);
61
62void iowrite32(u32 val, void __iomem *addr)
63{
64 writel(val, addr);
65}
66EXPORT_SYMBOL(iowrite32);
67
68void iowrite32be(u32 val, void __iomem *addr)
69{
70 __raw_writel(cpu_to_be32(val), addr);
71}
72EXPORT_SYMBOL(iowrite32be);
73
74/*
75 * These are the "repeat MMIO read/write" functions.
76 * Note the "__raw" accesses, since we don't want to
77 * convert to CPU byte order. We write in "IO byte
78 * order" (we also don't have IO barriers).
79 */
80static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
81{
82 while (--count >= 0) {
83 u8 data = __raw_readb(addr);
84 *dst = data;
85 dst++;
86 }
87}
88
89static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
90{
91 while (--count >= 0) {
92 u16 data = __raw_readw(addr);
93 *dst = data;
94 dst++;
95 }
96}
97
98static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
99{
100 while (--count >= 0) {
101 u32 data = __raw_readl(addr);
102 *dst = data;
103 dst++;
104 }
105}
106
107static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
108{
109 while (--count >= 0) {
110 __raw_writeb(*src, addr);
111 src++;
112 }
113}
114
115static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
116{
117 while (--count >= 0) {
118 __raw_writew(*src, addr);
119 src++;
120 }
121}
122
123static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
124{
125 while (--count >= 0) {
126 __raw_writel(*src, addr);
127 src++;
128 }
129}
130
131void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
132{
133 mmio_insb(addr, dst, count);
134}
135EXPORT_SYMBOL(ioread8_rep);
136
137void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
138{
139 mmio_insw(addr, dst, count);
140}
141EXPORT_SYMBOL(ioread16_rep);
142
143void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
144{
145 mmio_insl(addr, dst, count);
146}
147EXPORT_SYMBOL(ioread32_rep);
148
149void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
150{
151 mmio_outsb(addr, src, count);
152}
153EXPORT_SYMBOL(iowrite8_rep);
154
155void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
156{
157 mmio_outsw(addr, src, count);
158}
159EXPORT_SYMBOL(iowrite16_rep);
160
161void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
162{
163 mmio_outsl(addr, src, count);
164}
165EXPORT_SYMBOL(iowrite32_rep);
diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c
new file mode 100644
index 000000000000..e3ad6103e7c1
--- /dev/null
+++ b/arch/sh/kernel/ioport.c
@@ -0,0 +1,43 @@
1/*
2 * arch/sh/kernel/ioport.c
3 *
4 * Copyright (C) 2000 Niibe Yutaka
5 * Copyright (C) 2005 - 2007 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/module.h>
12#include <linux/io.h>
13
14const unsigned long sh_io_port_base __read_mostly = -1;
15EXPORT_SYMBOL(sh_io_port_base);
16
17void __iomem *__ioport_map(unsigned long addr, unsigned int size)
18{
19 if (sh_mv.mv_ioport_map)
20 return sh_mv.mv_ioport_map(addr, size);
21
22 return (void __iomem *)(addr + sh_io_port_base);
23}
24EXPORT_SYMBOL(__ioport_map);
25
26void __iomem *ioport_map(unsigned long port, unsigned int nr)
27{
28 void __iomem *ret;
29
30 ret = __ioport_map_trapped(port, nr);
31 if (ret)
32 return ret;
33
34 return __ioport_map(port, nr);
35}
36EXPORT_SYMBOL(ioport_map);
37
38void ioport_unmap(void __iomem *addr)
39{
40 if (sh_mv.mv_ioport_unmap)
41 sh_mv.mv_ioport_unmap(addr);
42}
43EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index 9f9bb63616ad..3d722e49db08 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -118,28 +118,6 @@ void __init sh_mv_setup(void)
118 sh_mv.mv_##elem = generic_##elem; \ 118 sh_mv.mv_##elem = generic_##elem; \
119} while (0) 119} while (0)
120 120
121#ifdef CONFIG_HAS_IOPORT
122
123#ifdef P2SEG
124 __set_io_port_base(P2SEG);
125#else
126 __set_io_port_base(0);
127#endif
128
129 mv_set(inb); mv_set(inw); mv_set(inl);
130 mv_set(outb); mv_set(outw); mv_set(outl);
131
132 mv_set(inb_p); mv_set(inw_p); mv_set(inl_p);
133 mv_set(outb_p); mv_set(outw_p); mv_set(outl_p);
134
135 mv_set(insb); mv_set(insw); mv_set(insl);
136 mv_set(outsb); mv_set(outsw); mv_set(outsl);
137
138 mv_set(ioport_map);
139 mv_set(ioport_unmap);
140
141#endif
142
143 mv_set(irq_demux); 121 mv_set(irq_demux);
144 mv_set(mode_pins); 122 mv_set(mode_pins);
145 mv_set(mem_init); 123 mv_set(mem_init);
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 5a4b33435650..2ee21a47b5af 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -389,7 +389,7 @@ int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
389 389
390 WARN_ON(_pmu->num_events > MAX_HWEVENTS); 390 WARN_ON(_pmu->num_events > MAX_HWEVENTS);
391 391
392 perf_pmu_register(&pmu); 392 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
393 perf_cpu_notifier(sh_pmu_notifier); 393 perf_cpu_notifier(sh_pmu_notifier);
394 return 0; 394 return 0;
395} 395}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index d6b018c7ebdc..4f267160c515 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -12,7 +12,6 @@
12#include <linux/initrd.h> 12#include <linux/initrd.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/console.h> 14#include <linux/console.h>
15#include <linux/seq_file.h>
16#include <linux/root_dev.h> 15#include <linux/root_dev.h>
17#include <linux/utsname.h> 16#include <linux/utsname.h>
18#include <linux/nodemask.h> 17#include <linux/nodemask.h>
@@ -319,146 +318,3 @@ int test_mode_pin(int pin)
319{ 318{
320 return sh_mv.mv_mode_pins() & pin; 319 return sh_mv.mv_mode_pins() & pin;
321} 320}
322
323static const char *cpu_name[] = {
324 [CPU_SH7201] = "SH7201",
325 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
326 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
327 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
328 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
329 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
330 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
331 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
332 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
333 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
334 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
335 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
336 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
337 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
338 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
339 [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
340 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
341 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
342 [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
343 [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
344 [CPU_SH_NONE] = "Unknown"
345};
346
347const char *get_cpu_subtype(struct sh_cpuinfo *c)
348{
349 return cpu_name[c->type];
350}
351EXPORT_SYMBOL(get_cpu_subtype);
352
353#ifdef CONFIG_PROC_FS
354/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
355static const char *cpu_flags[] = {
356 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
357 "ptea", "llsc", "l2", "op32", "pteaex", NULL
358};
359
360static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
361{
362 unsigned long i;
363
364 seq_printf(m, "cpu flags\t:");
365
366 if (!c->flags) {
367 seq_printf(m, " %s\n", cpu_flags[0]);
368 return;
369 }
370
371 for (i = 0; cpu_flags[i]; i++)
372 if ((c->flags & (1 << i)))
373 seq_printf(m, " %s", cpu_flags[i+1]);
374
375 seq_printf(m, "\n");
376}
377
378static void show_cacheinfo(struct seq_file *m, const char *type,
379 struct cache_info info)
380{
381 unsigned int cache_size;
382
383 cache_size = info.ways * info.sets * info.linesz;
384
385 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
386 type, cache_size >> 10, info.ways);
387}
388
389/*
390 * Get CPU information for use by the procfs.
391 */
392static int show_cpuinfo(struct seq_file *m, void *v)
393{
394 struct sh_cpuinfo *c = v;
395 unsigned int cpu = c - cpu_data;
396
397 if (!cpu_online(cpu))
398 return 0;
399
400 if (cpu == 0)
401 seq_printf(m, "machine\t\t: %s\n", get_system_type());
402 else
403 seq_printf(m, "\n");
404
405 seq_printf(m, "processor\t: %d\n", cpu);
406 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
407 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
408 if (c->cut_major == -1)
409 seq_printf(m, "cut\t\t: unknown\n");
410 else if (c->cut_minor == -1)
411 seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
412 else
413 seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
414
415 show_cpuflags(m, c);
416
417 seq_printf(m, "cache type\t: ");
418
419 /*
420 * Check for what type of cache we have, we support both the
421 * unified cache on the SH-2 and SH-3, as well as the harvard
422 * style cache on the SH-4.
423 */
424 if (c->icache.flags & SH_CACHE_COMBINED) {
425 seq_printf(m, "unified\n");
426 show_cacheinfo(m, "cache", c->icache);
427 } else {
428 seq_printf(m, "split (harvard)\n");
429 show_cacheinfo(m, "icache", c->icache);
430 show_cacheinfo(m, "dcache", c->dcache);
431 }
432
433 /* Optional secondary cache */
434 if (c->flags & CPU_HAS_L2_CACHE)
435 show_cacheinfo(m, "scache", c->scache);
436
437 seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
438
439 seq_printf(m, "bogomips\t: %lu.%02lu\n",
440 c->loops_per_jiffy/(500000/HZ),
441 (c->loops_per_jiffy/(5000/HZ)) % 100);
442
443 return 0;
444}
445
446static void *c_start(struct seq_file *m, loff_t *pos)
447{
448 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
449}
450static void *c_next(struct seq_file *m, void *v, loff_t *pos)
451{
452 ++*pos;
453 return c_start(m, pos);
454}
455static void c_stop(struct seq_file *m, void *v)
456{
457}
458const struct seq_operations cpuinfo_op = {
459 .start = c_start,
460 .next = c_next,
461 .stop = c_stop,
462 .show = show_cpuinfo,
463};
464#endif /* CONFIG_PROC_FS */
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 97e3feb9ff1b..a2c5898c1ab1 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -6,25 +6,24 @@
6ROOT_IMG := /usr/src/root.img 6ROOT_IMG := /usr/src/root.img
7ELFTOAOUT := elftoaout 7ELFTOAOUT := elftoaout
8 8
9hostprogs-y := piggyback_32 piggyback_64 btfixupprep 9hostprogs-y := piggyback btfixupprep
10targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout 10targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
11clean-files := System.map 11clean-files := System.map
12 12
13quiet_cmd_elftoaout = ELFTOAOUT $@ 13quiet_cmd_elftoaout = ELFTOAOUT $@
14 cmd_elftoaout = $(ELFTOAOUT) $(obj)/image -o $@ 14 cmd_elftoaout = $(ELFTOAOUT) $(obj)/image -o $@
15quiet_cmd_piggy = PIGGY $@
16 cmd_piggy = $(obj)/piggyback $(BITS) $@ System.map $(ROOT_IMG)
17quiet_cmd_strip = STRIP $@
18 cmd_strip = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start $< -o $@
15 19
16ifeq ($(CONFIG_SPARC32),y) 20ifeq ($(CONFIG_SPARC32),y)
17quiet_cmd_piggy = PIGGY $@
18 cmd_piggy = $(obj)/piggyback_32 $@ System.map $(ROOT_IMG)
19quiet_cmd_btfix = BTFIX $@ 21quiet_cmd_btfix = BTFIX $@
20 cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@ 22 cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
21quiet_cmd_sysmap = SYSMAP $(obj)/System.map 23quiet_cmd_sysmap = SYSMAP $(obj)/System.map
22 cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap 24 cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
23quiet_cmd_image = LD $@ 25quiet_cmd_image = LD $@
24 cmd_image = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) -o $@ 26 cmd_image = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) -o $@
25quiet_cmd_strip = STRIP $@
26 cmd_strip = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start $(obj)/image -o $@
27
28 27
29define rule_image 28define rule_image
30 $(if $($(quiet)cmd_image), \ 29 $(if $($(quiet)cmd_image), \
@@ -57,10 +56,7 @@ $(obj)/image: $(obj)/btfix.o FORCE
57 56
58$(obj)/zImage: $(obj)/image 57$(obj)/zImage: $(obj)/image
59 $(call if_changed,strip) 58 $(call if_changed,strip)
60 59 @echo ' kernel: $@ is ready'
61$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_32 System.map $(ROOT_IMG) FORCE
62 $(call if_changed,elftoaout)
63 $(call if_changed,piggy)
64 60
65$(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE 61$(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE
66 $(call if_changed,btfix) 62 $(call if_changed,btfix)
@@ -68,11 +64,6 @@ $(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE
68endif 64endif
69 65
70ifeq ($(CONFIG_SPARC64),y) 66ifeq ($(CONFIG_SPARC64),y)
71quiet_cmd_piggy = PIGGY $@
72 cmd_piggy = $(obj)/piggyback_64 $@ System.map $(ROOT_IMG)
73quiet_cmd_strip = STRIP $@
74 cmd_strip = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start vmlinux -o $@
75
76 67
77# Actual linking 68# Actual linking
78$(obj)/image: vmlinux FORCE 69$(obj)/image: vmlinux FORCE
@@ -81,10 +72,6 @@ $(obj)/image: vmlinux FORCE
81 72
82$(obj)/zImage: $(obj)/image 73$(obj)/zImage: $(obj)/image
83 $(call if_changed,gzip) 74 $(call if_changed,gzip)
84
85$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
86 $(call if_changed,elftoaout)
87 $(call if_changed,piggy)
88 @echo ' kernel: $@ is ready' 75 @echo ' kernel: $@ is ready'
89 76
90$(obj)/vmlinux.aout: vmlinux FORCE 77$(obj)/vmlinux.aout: vmlinux FORCE
@@ -92,3 +79,6 @@ $(obj)/vmlinux.aout: vmlinux FORCE
92 @echo ' kernel: $@ is ready' 79 @echo ' kernel: $@ is ready'
93endif 80endif
94 81
82$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
83 $(call if_changed,elftoaout)
84 $(call if_changed,piggy)
diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c
new file mode 100644
index 000000000000..c0a798fcf030
--- /dev/null
+++ b/arch/sparc/boot/piggyback.c
@@ -0,0 +1,272 @@
1/*
2 Simple utility to make a single-image install kernel with initial ramdisk
3 for Sparc tftpbooting without need to set up nfs.
4
5 Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 Pete Zaitcev <zaitcev@yahoo.com> endian fixes for cross-compiles, 2000.
7 Copyright (C) 2011 Sam Ravnborg <sam@ravnborg.org>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
22
23#include <dirent.h>
24#include <stdlib.h>
25#include <string.h>
26#include <unistd.h>
27#include <ctype.h>
28#include <errno.h>
29#include <fcntl.h>
30#include <stdio.h>
31
32#include <sys/types.h>
33#include <sys/stat.h>
34
35/*
36 * Note: run this on an a.out kernel (use elftoaout for it),
37 * as PROM looks for a.out image only.
38 */
39
40#define AOUT_TEXT_OFFSET 32
41
42static int is64bit = 0;
43
44/* align to power-of-two size */
45static int align(int n)
46{
47 if (is64bit)
48 return (n + 0x1fff) & ~0x1fff;
49 else
50 return (n + 0xfff) & ~0xfff;
51}
52
53/* read two bytes as big endian */
54static unsigned short ld2(char *p)
55{
56 return (p[0] << 8) | p[1];
57}
58
59/* save 4 bytes as big endian */
60static void st4(char *p, unsigned int x)
61{
62 p[0] = x >> 24;
63 p[1] = x >> 16;
64 p[2] = x >> 8;
65 p[3] = x;
66}
67
68static void die(const char *str)
69{
70 perror(str);
71 exit(1);
72}
73
74static void usage(void)
75{
76 /* fs_img.gz is an image of initial ramdisk. */
77 fprintf(stderr, "Usage: piggyback bits vmlinux.aout System.map fs_img.gz\n");
78 fprintf(stderr, "\tKernel image will be modified in place.\n");
79 exit(1);
80}
81
82static int start_line(const char *line)
83{
84 if (strcmp(line + 8, " T _start\n") == 0)
85 return 1;
86 else if (strcmp(line + 16, " T _start\n") == 0)
87 return 1;
88 return 0;
89}
90
91static int end_line(const char *line)
92{
93 if (strcmp(line + 8, " A _end\n") == 0)
94 return 1;
95 else if (strcmp (line + 16, " A _end\n") == 0)
96 return 1;
97 return 0;
98}
99
100/*
101 * Find address for start and end in System.map.
102 * The file looks like this:
103 * f0004000 T _start
104 * f0379f79 A _end
105 * 1234567890123456
106 * ^coloumn 1
107 * There is support for 64 bit addresses too.
108 *
109 * Return 0 if either start or end is not found
110 */
111static int get_start_end(const char *filename, unsigned int *start,
112 unsigned int *end)
113{
114 FILE *map;
115 char buffer[1024];
116
117 *start = 0;
118 *end = 0;
119 map = fopen(filename, "r");
120 if (!map)
121 die(filename);
122 while (fgets(buffer, 1024, map)) {
123 if (start_line(buffer))
124 *start = strtoul(buffer, NULL, 16);
125 else if (end_line(buffer))
126 *end = strtoul(buffer, NULL, 16);
127 }
128 fclose (map);
129
130 if (*start == 0 || *end == 0)
131 return 0;
132
133 return 1;
134}
135
136#define LOOKBACK (128 * 4)
137#define BUFSIZE 1024
138/*
139 * Find the HdrS entry from head_32/head_64.
140 * We check if it is at the beginning of the file (sparc64 case)
141 * and if not we search for it.
142 * When we search do so in steps of 4 as HdrS is on a 4-byte aligned
143 * address (it is on same alignment as sparc instructions)
144 * Return the offset to the HdrS entry (as off_t)
145 */
146static off_t get_hdrs_offset(int kernelfd, const char *filename)
147{
148 char buffer[BUFSIZE];
149 off_t offset;
150 int i;
151
152 if (lseek(kernelfd, 0, SEEK_SET) < 0)
153 die("lseek");
154 if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE)
155 die(filename);
156
157 if (buffer[40] == 'H' && buffer[41] == 'd' &&
158 buffer[42] == 'r' && buffer[43] == 'S') {
159 return 40;
160 } else {
161 /* Find the gokernel label */
162 /* Decode offset from branch instruction */
163 offset = ld2(buffer + AOUT_TEXT_OFFSET + 2) << 2;
164 /* Go back 512 bytes so we do not miss HdrS */
165 offset -= LOOKBACK;
166 /* skip a.out header */
167 offset += AOUT_TEXT_OFFSET;
168 if (lseek(kernelfd, offset, SEEK_SET) < 0)
169 die("lseek");
170 if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE)
171 die(filename);
172
173 for (i = 0; i < LOOKBACK; i += 4) {
174 if (buffer[i + 0] == 'H' && buffer[i + 1] == 'd' &&
175 buffer[i + 2] == 'r' && buffer[i + 3] == 'S') {
176 return offset + i;
177 }
178 }
179 }
180 fprintf (stderr, "Couldn't find headers signature in %s\n", filename);
181 exit(1);
182}
183
184int main(int argc,char **argv)
185{
186 static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 };
187 char buffer[1024];
188 unsigned int i, start, end;
189 off_t offset;
190 struct stat s;
191 int image, tail;
192
193 if (argc != 5)
194 usage();
195 if (strcmp(argv[1], "64") == 0)
196 is64bit = 1;
197 if (stat (argv[4], &s) < 0)
198 die(argv[4]);
199
200 if (!get_start_end(argv[3], &start, &end)) {
201 fprintf(stderr, "Could not determine start and end from %s\n",
202 argv[3]);
203 exit(1);
204 }
205 if ((image = open(argv[2], O_RDWR)) < 0)
206 die(argv[2]);
207 if (read(image, buffer, 512) != 512)
208 die(argv[2]);
209 if (memcmp(buffer, aout_magic, 4) != 0) {
210 fprintf (stderr, "Not a.out. Don't blame me.\n");
211 exit(1);
212 }
213 /*
214 * We need to fill in values for
215 * sparc_ramdisk_image + sparc_ramdisk_size
216 * To locate these symbols search for the "HdrS" text which appear
217 * in the image a little before the gokernel symbol.
218 * See definition of these in init_32.S
219 */
220
221 offset = get_hdrs_offset(image, argv[2]);
222 /* skip HdrS + LINUX_VERSION_CODE + HdrS version */
223 offset += 10;
224
225 if (lseek(image, offset, 0) < 0)
226 die("lseek");
227
228 /*
229 * root_flags = 0
230 * root_dev = 1 (RAMDISK_MAJOR)
231 * ram_flags = 0
232 * sparc_ramdisk_image = "PAGE aligned address after _end")
233 * sparc_ramdisk_size = size of image
234 */
235 st4(buffer, 0);
236 st4(buffer + 4, 0x01000000);
237 st4(buffer + 8, align(end + 32));
238 st4(buffer + 12, s.st_size);
239
240 if (write(image, buffer + 2, 14) != 14)
241 die(argv[2]);
242
243 /* For sparc64 update a_text and clear a_data + a_bss */
244 if (is64bit)
245 {
246 if (lseek(image, 4, 0) < 0)
247 die("lseek");
248 /* a_text */
249 st4(buffer, align(end + 32 + 8191) - (start & ~0x3fffffUL) +
250 s.st_size);
251 /* a_data */
252 st4(buffer + 4, 0);
253 /* a_bss */
254 st4(buffer + 8, 0);
255 if (write(image, buffer, 12) != 12)
256 die(argv[2]);
257 }
258
259 /* seek page aligned boundary in the image file and add boot image */
260 if (lseek(image, AOUT_TEXT_OFFSET - start + align(end + 32), 0) < 0)
261 die("lseek");
262 if ((tail = open(argv[4], O_RDONLY)) < 0)
263 die(argv[4]);
264 while ((i = read(tail, buffer, 1024)) > 0)
265 if (write(image, buffer, i) != i)
266 die(argv[2]);
267 if (close(image) < 0)
268 die("close");
269 if (close(tail) < 0)
270 die("close");
271 return 0;
272}
diff --git a/arch/sparc/boot/piggyback_32.c b/arch/sparc/boot/piggyback_32.c
deleted file mode 100644
index ac944aec7301..000000000000
--- a/arch/sparc/boot/piggyback_32.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 Simple utility to make a single-image install kernel with initial ramdisk
3 for Sparc tftpbooting without need to set up nfs.
4
5 Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 Pete Zaitcev <zaitcev@yahoo.com> endian fixes for cross-compiles, 2000.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
21
22#include <stdio.h>
23#include <string.h>
24#include <ctype.h>
25#include <errno.h>
26#include <fcntl.h>
27#include <dirent.h>
28#include <unistd.h>
29#include <stdlib.h>
30#include <sys/types.h>
31#include <sys/stat.h>
32
33/*
34 * Note: run this on an a.out kernel (use elftoaout for it),
35 * as PROM looks for a.out image only.
36 */
37
38static unsigned short ld2(char *p)
39{
40 return (p[0] << 8) | p[1];
41}
42
43static unsigned int ld4(char *p)
44{
45 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
46}
47
48static void st4(char *p, unsigned int x)
49{
50 p[0] = x >> 24;
51 p[1] = x >> 16;
52 p[2] = x >> 8;
53 p[3] = x;
54}
55
56static void usage(void)
57{
58 /* fs_img.gz is an image of initial ramdisk. */
59 fprintf(stderr, "Usage: piggyback vmlinux.aout System.map fs_img.gz\n");
60 fprintf(stderr, "\tKernel image will be modified in place.\n");
61 exit(1);
62}
63
64static void die(char *str)
65{
66 perror (str);
67 exit(1);
68}
69
70int main(int argc,char **argv)
71{
72 static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 };
73 char buffer[1024], *q, *r;
74 unsigned int i, j, k, start, end, offset;
75 FILE *map;
76 struct stat s;
77 int image, tail;
78
79 if (argc != 4) usage();
80 start = end = 0;
81 if (stat (argv[3], &s) < 0) die (argv[3]);
82 map = fopen (argv[2], "r");
83 if (!map) die(argv[2]);
84 while (fgets (buffer, 1024, map)) {
85 if (!strcmp (buffer + 8, " T start\n") || !strcmp (buffer + 16, " T start\n"))
86 start = strtoul (buffer, NULL, 16);
87 else if (!strcmp (buffer + 8, " A _end\n") || !strcmp (buffer + 16, " A _end\n"))
88 end = strtoul (buffer, NULL, 16);
89 }
90 fclose (map);
91 if (!start || !end) {
92 fprintf (stderr, "Could not determine start and end from System.map\n");
93 exit(1);
94 }
95 if ((image = open(argv[1],O_RDWR)) < 0) die(argv[1]);
96 if (read(image,buffer,512) != 512) die(argv[1]);
97 if (memcmp (buffer, "\177ELF", 4) == 0) {
98 q = buffer + ld4(buffer + 28);
99 i = ld4(q + 4) + ld4(buffer + 24) - ld4(q + 8);
100 if (lseek(image,i,0) < 0) die("lseek");
101 if (read(image,buffer,512) != 512) die(argv[1]);
102 j = 0;
103 } else if (memcmp(buffer, aout_magic, 4) == 0) {
104 i = j = 32;
105 } else {
106 fprintf (stderr, "Not ELF nor a.out. Don't blame me.\n");
107 exit(1);
108 }
109 k = i;
110 i += (ld2(buffer + j + 2)<<2) - 512;
111 if (lseek(image,i,0) < 0) die("lseek");
112 if (read(image,buffer,1024) != 1024) die(argv[1]);
113 for (q = buffer, r = q + 512; q < r; q += 4) {
114 if (*q == 'H' && q[1] == 'd' && q[2] == 'r' && q[3] == 'S')
115 break;
116 }
117 if (q == r) {
118 fprintf (stderr, "Couldn't find headers signature in the kernel.\n");
119 exit(1);
120 }
121 offset = i + (q - buffer) + 10;
122 if (lseek(image, offset, 0) < 0) die ("lseek");
123
124 st4(buffer, 0);
125 st4(buffer + 4, 0x01000000);
126 st4(buffer + 8, (end + 32 + 4095) & ~4095);
127 st4(buffer + 12, s.st_size);
128
129 if (write(image,buffer+2,14) != 14) die (argv[1]);
130 if (lseek(image, k - start + ((end + 32 + 4095) & ~4095), 0) < 0) die ("lseek");
131 if ((tail = open(argv[3],O_RDONLY)) < 0) die(argv[3]);
132 while ((i = read (tail,buffer,1024)) > 0)
133 if (write(image,buffer,i) != i) die (argv[1]);
134 if (close(image) < 0) die("close");
135 if (close(tail) < 0) die("close");
136 return 0;
137}
diff --git a/arch/sparc/boot/piggyback_64.c b/arch/sparc/boot/piggyback_64.c
deleted file mode 100644
index a26a686cb5aa..000000000000
--- a/arch/sparc/boot/piggyback_64.c
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 Simple utility to make a single-image install kernel with initial ramdisk
3 for Sparc64 tftpbooting without need to set up nfs.
4
5 Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
20
21#include <stdio.h>
22#include <string.h>
23#include <ctype.h>
24#include <errno.h>
25#include <fcntl.h>
26#include <dirent.h>
27#include <unistd.h>
28#include <stdlib.h>
29#include <sys/types.h>
30#include <sys/stat.h>
31
32/* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly
33 usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */
34
35static void die(char *str)
36{
37 perror (str);
38 exit(1);
39}
40
41int main(int argc,char **argv)
42{
43 char buffer [1024], *q, *r;
44 unsigned int i, j, k, start, end, offset;
45 FILE *map;
46 struct stat s;
47 int image, tail;
48
49 start = end = 0;
50 if (stat (argv[3], &s) < 0) die (argv[3]);
51 map = fopen (argv[2], "r");
52 if (!map) die(argv[2]);
53 while (fgets (buffer, 1024, map)) {
54 if (!strcmp (buffer + 19, "_start\n"))
55 start = strtoul (buffer + 8, NULL, 16);
56 else if (!strcmp (buffer + 19, "_end\n"))
57 end = strtoul (buffer + 8, NULL, 16);
58 }
59 fclose (map);
60 if ((image = open(argv[1],O_RDWR)) < 0) die(argv[1]);
61 if (read(image,buffer,512) != 512) die(argv[1]);
62 if (!memcmp (buffer, "\177ELF", 4)) {
63 unsigned int *p = (unsigned int *)(buffer + *(unsigned int *)(buffer + 28));
64
65 i = p[1] + *(unsigned int *)(buffer + 24) - p[2];
66 if (lseek(image,i,0) < 0) die("lseek");
67 if (read(image,buffer,512) != 512) die(argv[1]);
68 j = 0;
69 } else if (*(unsigned int *)buffer == 0x01030107) {
70 i = j = 32;
71 } else {
72 fprintf (stderr, "Not ELF nor a.out. Don't blame me.\n");
73 exit(1);
74 }
75 k = i;
76 if (j == 32 && buffer[40] == 'H' && buffer[41] == 'd' && buffer[42] == 'r' && buffer[43] == 'S') {
77 offset = 40 + 10;
78 } else {
79 i += ((*(unsigned short *)(buffer + j + 2))<<2) - 512;
80 if (lseek(image,i,0) < 0) die("lseek");
81 if (read(image,buffer,1024) != 1024) die(argv[1]);
82 for (q = buffer, r = q + 512; q < r; q += 4) {
83 if (*q == 'H' && q[1] == 'd' && q[2] == 'r' && q[3] == 'S')
84 break;
85 }
86 if (q == r) {
87 fprintf (stderr, "Couldn't find headers signature in the kernel.\n");
88 exit(1);
89 }
90 offset = i + (q - buffer) + 10;
91 }
92 if (lseek(image, offset, 0) < 0) die ("lseek");
93 *(unsigned *)buffer = 0;
94 *(unsigned *)(buffer + 4) = 0x01000000;
95 *(unsigned *)(buffer + 8) = ((end + 32 + 8191) & ~8191);
96 *(unsigned *)(buffer + 12) = s.st_size;
97 if (write(image,buffer+2,14) != 14) die (argv[1]);
98 if (lseek(image, 4, 0) < 0) die ("lseek");
99 *(unsigned *)buffer = ((end + 32 + 8191) & ~8191) - (start & ~0x3fffffUL) + s.st_size;
100 *(unsigned *)(buffer + 4) = 0;
101 *(unsigned *)(buffer + 8) = 0;
102 if (write(image,buffer,12) != 12) die (argv[1]);
103 if (lseek(image, k - start + ((end + 32 + 8191) & ~8191), 0) < 0) die ("lseek");
104 if ((tail = open(argv[3],O_RDONLY)) < 0) die(argv[3]);
105 while ((i = read (tail,buffer,1024)) > 0)
106 if (write(image,buffer,i) != i) die (argv[1]);
107 if (close(image) < 0) die("close");
108 if (close(tail) < 0) die("close");
109 return 0;
110}
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 3ea5964c43b4..8580d1764f90 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -224,6 +224,18 @@ static inline void sparc_leon3_disable_cache(void)
224 "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); 224 "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
225}; 225};
226 226
227static inline unsigned long sparc_leon3_asr17(void)
228{
229 u32 asr17;
230 __asm__ __volatile__ ("rd %%asr17, %0\n\t" : "=r"(asr17));
231 return asr17;
232};
233
234static inline int sparc_leon3_cpuid(void)
235{
236 return sparc_leon3_asr17() >> 28;
237}
238
227#endif /*!__ASSEMBLY__*/ 239#endif /*!__ASSEMBLY__*/
228 240
229#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index 618e88821795..263c719e96f5 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -100,9 +100,8 @@ struct leon3_irqctrl_regs_map {
100 u32 mpbroadcast; 100 u32 mpbroadcast;
101 u32 notused02; 101 u32 notused02;
102 u32 notused03; 102 u32 notused03;
103 u32 notused10; 103 u32 ampctrl;
104 u32 notused11; 104 u32 icsel[2];
105 u32 notused12;
106 u32 notused13; 105 u32 notused13;
107 u32 notused20; 106 u32 notused20;
108 u32 notused21; 107 u32 notused21;
@@ -112,6 +111,7 @@ struct leon3_irqctrl_regs_map {
112 u32 force[16]; 111 u32 force[16];
113 /* Extended IRQ registers */ 112 /* Extended IRQ registers */
114 u32 intid[16]; /* 0xc0 */ 113 u32 intid[16]; /* 0xc0 */
114 u32 unused[(0x1000-0x100)/4];
115}; 115};
116 116
117struct leon3_apbuart_regs_map { 117struct leon3_apbuart_regs_map {
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index 9e5c64084b86..71e5e9aeb67e 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -48,18 +48,6 @@ extern void prom_init(struct linux_romvec *rom_ptr);
48/* Boot argument acquisition, returns the boot command line string. */ 48/* Boot argument acquisition, returns the boot command line string. */
49extern char *prom_getbootargs(void); 49extern char *prom_getbootargs(void);
50 50
51/* Device utilities. */
52
53/* Map and unmap devices in IO space at virtual addresses. Note that the
54 * virtual address you pass is a request and the prom may put your mappings
55 * somewhere else, so check your return value as that is where your new
56 * mappings really are!
57 *
58 * Another note, these are only available on V2 or higher proms!
59 */
60extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
61extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
62
63/* Miscellaneous routines, don't really fit in any category per se. */ 51/* Miscellaneous routines, don't really fit in any category per se. */
64 52
65/* Reboot the machine with the command line passed. */ 53/* Reboot the machine with the command line passed. */
@@ -76,7 +64,7 @@ extern void prom_cmdline(void);
76/* Enter the prom, with no chance of continuation for the stand-alone 64/* Enter the prom, with no chance of continuation for the stand-alone
77 * which calls this. 65 * which calls this.
78 */ 66 */
79extern void prom_halt(void) __attribute__ ((noreturn)); 67extern void __noreturn prom_halt(void);
80 68
81/* Set the PROM 'sync' callback function to the passed function pointer. 69/* Set the PROM 'sync' callback function to the passed function pointer.
82 * When the user gives the 'sync' command at the prom prompt while the 70 * When the user gives the 'sync' command at the prom prompt while the
@@ -117,25 +105,6 @@ extern void prom_write(const char *buf, unsigned int len);
117extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table, 105extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
118 int context, char *program_counter); 106 int context, char *program_counter);
119 107
120/* Stop the CPU with the passed device tree node. */
121extern int prom_stopcpu(int cpunode);
122
123/* Idle the CPU with the passed device tree node. */
124extern int prom_idlecpu(int cpunode);
125
126/* Re-Start the CPU with the passed device tree node. */
127extern int prom_restartcpu(int cpunode);
128
129/* PROM memory allocation facilities... */
130
131/* Allocated at possibly the given virtual address a chunk of the
132 * indicated size.
133 */
134extern char *prom_alloc(char *virt_hint, unsigned int size);
135
136/* Free a previously allocated chunk. */
137extern void prom_free(char *virt_addr, unsigned int size);
138
139/* Sun4/sun4c specific memory-management startup hook. */ 108/* Sun4/sun4c specific memory-management startup hook. */
140 109
141/* Map the passed segment in the given context at the passed 110/* Map the passed segment in the given context at the passed
@@ -144,6 +113,8 @@ extern void prom_free(char *virt_addr, unsigned int size);
144extern void prom_putsegment(int context, unsigned long virt_addr, 113extern void prom_putsegment(int context, unsigned long virt_addr,
145 int physical_segment); 114 int physical_segment);
146 115
116/* Initialize the memory lists based upon the prom version. */
117void prom_meminit(void);
147 118
148/* PROM device tree traversal functions... */ 119/* PROM device tree traversal functions... */
149 120
@@ -178,19 +149,11 @@ extern int prom_getbool(phandle node, char *prop);
178/* Acquire a string property, null string on error. */ 149/* Acquire a string property, null string on error. */
179extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize); 150extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
180 151
181/* Does the passed node have the given "name"? YES=1 NO=0 */
182extern int prom_nodematch(phandle thisnode, char *name);
183
184/* Search all siblings starting at the passed node for "name" matching 152/* Search all siblings starting at the passed node for "name" matching
185 * the given string. Returns the node on success, zero on failure. 153 * the given string. Returns the node on success, zero on failure.
186 */ 154 */
187extern phandle prom_searchsiblings(phandle node_start, char *name); 155extern phandle prom_searchsiblings(phandle node_start, char *name);
188 156
189/* Return the first property type, as a string, for the given node.
190 * Returns a null string on error.
191 */
192extern char *prom_firstprop(phandle node, char *buffer);
193
194/* Returns the next property after the passed property for the given 157/* Returns the next property after the passed property for the given
195 * node. Returns null string on failure. 158 * node. Returns null string on failure.
196 */ 159 */
@@ -199,9 +162,6 @@ extern char *prom_nextprop(phandle node, char *prev_property, char *buffer);
199/* Returns phandle of the path specified */ 162/* Returns phandle of the path specified */
200extern phandle prom_finddevice(char *name); 163extern phandle prom_finddevice(char *name);
201 164
202/* Returns 1 if the specified node has given property. */
203extern int prom_node_has_property(phandle node, char *property);
204
205/* Set the indicated property at the given node with the passed value. 165/* Set the indicated property at the given node with the passed value.
206 * Returns the number of bytes of your value that the prom took. 166 * Returns the number of bytes of your value that the prom took.
207 */ 167 */
@@ -219,6 +179,8 @@ extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nr
219extern void prom_apply_generic_ranges(phandle node, phandle parent, 179extern void prom_apply_generic_ranges(phandle node, phandle parent,
220 struct linux_prom_registers *sbusregs, int nregs); 180 struct linux_prom_registers *sbusregs, int nregs);
221 181
182void prom_ranges_init(void);
183
222/* CPU probing helpers. */ 184/* CPU probing helpers. */
223int cpu_find_by_instance(int instance, phandle *prom_node, int *mid); 185int cpu_find_by_instance(int instance, phandle *prom_node, int *mid);
224int cpu_find_by_mid(int mid, phandle *prom_node); 186int cpu_find_by_mid(int mid, phandle *prom_node);
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index 8cd0df34e82b..97a90475c314 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -18,8 +18,8 @@ extern char prom_version[];
18 */ 18 */
19extern phandle prom_root_node; 19extern phandle prom_root_node;
20 20
21/* PROM stdin and stdout */ 21/* PROM stdout */
22extern int prom_stdin, prom_stdout; 22extern int prom_stdout;
23 23
24/* /chosen node of the prom device tree, this stays constant after 24/* /chosen node of the prom device tree, this stays constant after
25 * initialization is complete. 25 * initialization is complete.
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
index 6e8bfa1786da..4d3dbe3703e9 100644
--- a/arch/sparc/include/asm/perf_event.h
+++ b/arch/sparc/include/asm/perf_event.h
@@ -4,8 +4,6 @@
4#ifdef CONFIG_PERF_EVENTS 4#ifdef CONFIG_PERF_EVENTS
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6 6
7extern void init_hw_perf_events(void);
8
9#define perf_arch_fetch_caller_regs(regs, ip) \ 7#define perf_arch_fetch_caller_regs(regs, ip) \
10do { \ 8do { \
11 unsigned long _pstate, _asi, _pil, _i7, _fp; \ 9 unsigned long _pstate, _asi, _pil, _i7, _fp; \
@@ -26,8 +24,6 @@ do { \
26 (regs)->u_regs[UREG_I6] = _fp; \ 24 (regs)->u_regs[UREG_I6] = _fp; \
27 (regs)->u_regs[UREG_I7] = _i7; \ 25 (regs)->u_regs[UREG_I7] = _i7; \
28} while (0) 26} while (0)
29#else
30static inline void init_hw_perf_events(void) { }
31#endif 27#endif
32 28
33#endif 29#endif
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 21bb2590d4ae..59423491cef8 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -73,12 +73,11 @@ sun4e_notsup:
73 73
74 /* The Sparc trap table, bootloader gives us control at _start. */ 74 /* The Sparc trap table, bootloader gives us control at _start. */
75 __HEAD 75 __HEAD
76 .globl start, _stext, _start, __stext 76 .globl _stext, _start, __stext
77 .globl trapbase 77 .globl trapbase
78_start: /* danger danger */ 78_start: /* danger danger */
79__stext: 79__stext:
80_stext: 80_stext:
81start:
82trapbase: 81trapbase:
83#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
84trapbase_cpu0: 83trapbase_cpu0:
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index f01c42661ee5..fdab7f854f80 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -23,15 +23,16 @@
23#include "prom.h" 23#include "prom.h"
24#include "irq.h" 24#include "irq.h"
25 25
26struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */ 26struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */
27struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */ 27struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */
28struct amba_apb_device leon_percpu_timer_dev[16]; 28struct amba_apb_device leon_percpu_timer_dev[16];
29 29
30int leondebug_irq_disable; 30int leondebug_irq_disable;
31int leon_debug_irqout; 31int leon_debug_irqout;
32static int dummy_master_l10_counter; 32static int dummy_master_l10_counter;
33 33
34unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */ 34unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
35unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
35unsigned int sparc_leon_eirq; 36unsigned int sparc_leon_eirq;
36#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0])) 37#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0]))
37 38
@@ -105,21 +106,79 @@ static void leon_disable_irq(unsigned int irq_nr)
105void __init leon_init_timers(irq_handler_t counter_fn) 106void __init leon_init_timers(irq_handler_t counter_fn)
106{ 107{
107 int irq; 108 int irq;
109 struct device_node *rootnp, *np, *nnp;
110 struct property *pp;
111 int len;
112 int cpu, icsel;
113 int ampopts;
108 114
109 leondebug_irq_disable = 0; 115 leondebug_irq_disable = 0;
110 leon_debug_irqout = 0; 116 leon_debug_irqout = 0;
111 master_l10_counter = (unsigned int *)&dummy_master_l10_counter; 117 master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
112 dummy_master_l10_counter = 0; 118 dummy_master_l10_counter = 0;
113 119
114 if (leon3_gptimer_regs && leon3_irqctrl_regs) { 120 /*Find IRQMP IRQ Controller Registers base address otherwise bail out.*/
115 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); 121 rootnp = of_find_node_by_path("/ambapp0");
116 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, 122 if (!rootnp)
117 (((1000000 / HZ) - 1))); 123 goto bad;
118 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); 124 np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
125 if (!np) {
126 np = of_find_node_by_name(rootnp, "01_00d");
127 if (!np)
128 goto bad;
129 }
130 pp = of_find_property(np, "reg", &len);
131 if (!pp)
132 goto bad;
133 leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value;
134
135 /* Find GPTIMER Timer Registers base address otherwise bail out. */
136 nnp = rootnp;
137 do {
138 np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
139 if (!np) {
140 np = of_find_node_by_name(nnp, "01_011");
141 if (!np)
142 goto bad;
143 }
144
145 ampopts = 0;
146 pp = of_find_property(np, "ampopts", &len);
147 if (pp) {
148 ampopts = *(int *)pp->value;
149 if (ampopts == 0) {
150 /* Skip this instance, resource already
151 * allocated by other OS */
152 nnp = np;
153 continue;
154 }
155 }
156
157 /* Select Timer-Instance on Timer Core. Default is zero */
158 leon3_gptimer_idx = ampopts & 0x7;
159
160 pp = of_find_property(np, "reg", &len);
161 if (pp)
162 leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
163 pp->value;
164 pp = of_find_property(np, "interrupts", &len);
165 if (pp)
166 leon3_gptimer_irq = *(unsigned int *)pp->value;
167 } while (0);
168
169 if (leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq) {
170 LEON3_BYPASS_STORE_PA(
171 &leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
172 LEON3_BYPASS_STORE_PA(
173 &leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
174 (((1000000 / HZ) - 1)));
175 LEON3_BYPASS_STORE_PA(
176 &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
119 177
120#ifdef CONFIG_SMP 178#ifdef CONFIG_SMP
121 leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs; 179 leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs;
122 leon_percpu_timer_dev[0].irq = leon3_gptimer_irq+1; 180 leon_percpu_timer_dev[0].irq = leon3_gptimer_irq + 1 +
181 leon3_gptimer_idx;
123 182
124 if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & 183 if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
125 (1<<LEON3_GPTIMER_SEPIRQ))) { 184 (1<<LEON3_GPTIMER_SEPIRQ))) {
@@ -127,17 +186,33 @@ void __init leon_init_timers(irq_handler_t counter_fn)
127 BUG(); 186 BUG();
128 } 187 }
129 188
130 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); 189 LEON3_BYPASS_STORE_PA(
131 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1))); 190 &leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, 0);
132 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); 191 LEON3_BYPASS_STORE_PA(
192 &leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
193 (((1000000/HZ) - 1)));
194 LEON3_BYPASS_STORE_PA(
195 &leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, 0);
133# endif 196# endif
134 197
198 /*
199 * The IRQ controller may (if implemented) consist of multiple
200 * IRQ controllers, each mapped on a 4Kb boundary.
201 * Each CPU may be routed to different IRQCTRLs, however
202 * we assume that all CPUs (in SMP system) is routed to the
203 * same IRQ Controller, and for non-SMP only one IRQCTRL is
204 * accessed anyway.
205 * In AMP systems, Linux must run on CPU0 for the time being.
206 */
207 cpu = sparc_leon3_cpuid();
208 icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[cpu/8]);
209 icsel = (icsel >> ((7 - (cpu&0x7)) * 4)) & 0xf;
210 leon3_irqctrl_regs += icsel;
135 } else { 211 } else {
136 printk(KERN_ERR "No Timer/irqctrl found\n"); 212 goto bad;
137 BUG();
138 } 213 }
139 214
140 irq = request_irq(leon3_gptimer_irq, 215 irq = request_irq(leon3_gptimer_irq+leon3_gptimer_idx,
141 counter_fn, 216 counter_fn,
142 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); 217 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
143 218
@@ -169,13 +244,13 @@ void __init leon_init_timers(irq_handler_t counter_fn)
169# endif 244# endif
170 245
171 if (leon3_gptimer_regs) { 246 if (leon3_gptimer_regs) {
172 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 247 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
173 LEON3_GPTIMER_EN | 248 LEON3_GPTIMER_EN |
174 LEON3_GPTIMER_RL | 249 LEON3_GPTIMER_RL |
175 LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); 250 LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN);
176 251
177#ifdef CONFIG_SMP 252#ifdef CONFIG_SMP
178 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 253 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
179 LEON3_GPTIMER_EN | 254 LEON3_GPTIMER_EN |
180 LEON3_GPTIMER_RL | 255 LEON3_GPTIMER_RL |
181 LEON3_GPTIMER_LD | 256 LEON3_GPTIMER_LD |
@@ -183,6 +258,11 @@ void __init leon_init_timers(irq_handler_t counter_fn)
183#endif 258#endif
184 259
185 } 260 }
261 return;
262bad:
263 printk(KERN_ERR "No Timer/irqctrl found\n");
264 BUG();
265 return;
186} 266}
187 267
188void leon_clear_clock_irq(void) 268void leon_clear_clock_irq(void)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index a4bd7ba74c89..300f810142f5 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -270,8 +270,6 @@ int __init nmi_init(void)
270 atomic_set(&nmi_active, -1); 270 atomic_set(&nmi_active, -1);
271 } 271 }
272 } 272 }
273 if (!err)
274 init_hw_perf_events();
275 273
276 return err; 274 return err;
277} 275}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 0d6deb55a2ae..760578687e7c 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1307,20 +1307,23 @@ static bool __init supported_pmu(void)
1307 return false; 1307 return false;
1308} 1308}
1309 1309
1310void __init init_hw_perf_events(void) 1310int __init init_hw_perf_events(void)
1311{ 1311{
1312 pr_info("Performance events: "); 1312 pr_info("Performance events: ");
1313 1313
1314 if (!supported_pmu()) { 1314 if (!supported_pmu()) {
1315 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); 1315 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1316 return; 1316 return 0;
1317 } 1317 }
1318 1318
1319 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1319 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1320 1320
1321 perf_pmu_register(&pmu); 1321 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1322 register_die_notifier(&perf_event_nmi_notifier); 1322 register_die_notifier(&perf_event_nmi_notifier);
1323
1324 return 0;
1323} 1325}
1326early_initcall(init_hw_perf_events);
1324 1327
1325void perf_callchain_kernel(struct perf_callchain_entry *entry, 1328void perf_callchain_kernel(struct perf_callchain_entry *entry,
1326 struct pt_regs *regs) 1329 struct pt_regs *regs)
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index 0a37e8cfd160..05fb25330583 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -136,18 +136,29 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
136/* "name:vendor:device@irq,addrlo" */ 136/* "name:vendor:device@irq,addrlo" */
137static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) 137static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
138{ 138{
139 struct amba_prom_registers *regs; unsigned int *intr; 139 struct amba_prom_registers *regs;
140 unsigned int *device, *vendor; 140 unsigned int *intr, *device, *vendor, reg0;
141 struct property *prop; 141 struct property *prop;
142 int interrupt = 0;
142 143
144 /* In order to get a unique ID in the device tree (multiple AMBA devices
145 * may have the same name) the node number is printed
146 */
143 prop = of_find_property(dp, "reg", NULL); 147 prop = of_find_property(dp, "reg", NULL);
144 if (!prop) 148 if (!prop) {
145 return; 149 reg0 = (unsigned int)dp->phandle;
146 regs = prop->value; 150 } else {
151 regs = prop->value;
152 reg0 = regs->phys_addr;
153 }
154
155 /* Not all cores have Interrupt */
147 prop = of_find_property(dp, "interrupts", NULL); 156 prop = of_find_property(dp, "interrupts", NULL);
148 if (!prop) 157 if (!prop)
149 return; 158 intr = &interrupt; /* IRQ0 does not exist */
150 intr = prop->value; 159 else
160 intr = prop->value;
161
151 prop = of_find_property(dp, "vendor", NULL); 162 prop = of_find_property(dp, "vendor", NULL);
152 if (!prop) 163 if (!prop)
153 return; 164 return;
@@ -159,7 +170,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
159 170
160 sprintf(tmp_buf, "%s:%d:%d@%x,%x", 171 sprintf(tmp_buf, "%s:%d:%d@%x,%x",
161 dp->name, *vendor, *device, 172 dp->name, *vendor, *device,
162 *intr, regs->phys_addr); 173 *intr, reg0);
163} 174}
164 175
165static void __init __build_path_component(struct device_node *dp, char *tmp_buf) 176static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index b22ce6100403..648f2161b851 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -185,7 +185,6 @@ static void __init boot_flags_init(char *commands)
185 185
186extern void sun4c_probe_vac(void); 186extern void sun4c_probe_vac(void);
187extern char cputypval; 187extern char cputypval;
188extern unsigned long start, end;
189 188
190extern unsigned short root_flags; 189extern unsigned short root_flags;
191extern unsigned short root_dev; 190extern unsigned short root_dev;
@@ -210,7 +209,7 @@ void __init setup_arch(char **cmdline_p)
210 int i; 209 int i;
211 unsigned long highest_paddr; 210 unsigned long highest_paddr;
212 211
213 sparc_ttable = (struct tt_entry *) &start; 212 sparc_ttable = (struct tt_entry *) &trapbase;
214 213
215 /* Initialize PROM console and command line. */ 214 /* Initialize PROM console and command line. */
216 *cmdline_p = prom_getbootargs(); 215 *cmdline_p = prom_getbootargs();
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index ddd0d86e508e..b5137cc2aba3 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -435,16 +435,14 @@ void __init sun4c_probe_memerr_reg(void)
435 435
436static inline void sun4c_init_ss2_cache_bug(void) 436static inline void sun4c_init_ss2_cache_bug(void)
437{ 437{
438 extern unsigned long start;
439
440 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || 438 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
441 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || 439 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
442 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { 440 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
443 /* Whee.. */ 441 /* Whee.. */
444 printk("SS2 cache bug detected, uncaching trap table page\n"); 442 printk("SS2 cache bug detected, uncaching trap table page\n");
445 sun4c_flush_page((unsigned int) &start); 443 sun4c_flush_page((unsigned int) &_start);
446 sun4c_put_pte(((unsigned long) &start), 444 sun4c_put_pte(((unsigned long) &_start),
447 (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE)); 445 (sun4c_get_pte((unsigned long) &_start) | _SUN4C_PAGE_NOCACHE));
448 } 446 }
449} 447}
450 448
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
index 816c0fa12dc0..8287bbe88768 100644
--- a/arch/sparc/prom/Makefile
+++ b/arch/sparc/prom/Makefile
@@ -5,12 +5,10 @@ asflags := -ansi
5ccflags := -Werror 5ccflags := -Werror
6 6
7lib-y := bootstr_$(BITS).o 7lib-y := bootstr_$(BITS).o
8lib-$(CONFIG_SPARC32) += devmap.o
9lib-y += init_$(BITS).o 8lib-y += init_$(BITS).o
10lib-$(CONFIG_SPARC32) += memory.o 9lib-$(CONFIG_SPARC32) += memory.o
11lib-y += misc_$(BITS).o 10lib-y += misc_$(BITS).o
12lib-$(CONFIG_SPARC32) += mp.o 11lib-$(CONFIG_SPARC32) += mp.o
13lib-$(CONFIG_SPARC32) += palloc.o
14lib-$(CONFIG_SPARC32) += ranges.o 12lib-$(CONFIG_SPARC32) += ranges.o
15lib-$(CONFIG_SPARC32) += segment.o 13lib-$(CONFIG_SPARC32) += segment.o
16lib-y += console_$(BITS).o 14lib-y += console_$(BITS).o
diff --git a/arch/sparc/prom/bootstr_32.c b/arch/sparc/prom/bootstr_32.c
index 916831da7e67..f5ec32e0d419 100644
--- a/arch/sparc/prom/bootstr_32.c
+++ b/arch/sparc/prom/bootstr_32.c
@@ -29,7 +29,8 @@ prom_getbootargs(void)
29 /* Start from 1 and go over fd(0,0,0)kernel */ 29 /* Start from 1 and go over fd(0,0,0)kernel */
30 for(iter = 1; iter < 8; iter++) { 30 for(iter = 1; iter < 8; iter++) {
31 arg = (*(romvec->pv_v0bootargs))->argv[iter]; 31 arg = (*(romvec->pv_v0bootargs))->argv[iter];
32 if(arg == 0) break; 32 if (arg == NULL)
33 break;
33 while(*arg != 0) { 34 while(*arg != 0) {
34 /* Leave place for space and null. */ 35 /* Leave place for space and null. */
35 if(cp >= barg_buf + BARG_LEN-2){ 36 if(cp >= barg_buf + BARG_LEN-2){
diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c
index 48863108a44c..b05e3db5fa63 100644
--- a/arch/sparc/prom/console_32.c
+++ b/arch/sparc/prom/console_32.c
@@ -27,13 +27,14 @@ static int prom_nbputchar(const char *buf)
27 spin_lock_irqsave(&prom_lock, flags); 27 spin_lock_irqsave(&prom_lock, flags);
28 switch(prom_vers) { 28 switch(prom_vers) {
29 case PROM_V0: 29 case PROM_V0:
30 i = (*(romvec->pv_nbputchar))(*buf); 30 if ((*(romvec->pv_nbputchar))(*buf))
31 i = 1;
31 break; 32 break;
32 case PROM_V2: 33 case PROM_V2:
33 case PROM_V3: 34 case PROM_V3:
34 if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, 35 if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout,
35 buf, 0x1) == 1) 36 buf, 0x1) == 1)
36 i = 0; 37 i = 1;
37 break; 38 break;
38 default: 39 default:
39 break; 40 break;
@@ -47,7 +48,7 @@ void prom_console_write_buf(const char *buf, int len)
47{ 48{
48 while (len) { 49 while (len) {
49 int n = prom_nbputchar(buf); 50 int n = prom_nbputchar(buf);
50 if (n) 51 if (n < 0)
51 continue; 52 continue;
52 len--; 53 len--;
53 buf++; 54 buf++;
diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c
index ed39e75828bd..9de6c8cfe04a 100644
--- a/arch/sparc/prom/console_64.c
+++ b/arch/sparc/prom/console_64.c
@@ -13,8 +13,6 @@
13#include <asm/system.h> 13#include <asm/system.h>
14#include <linux/string.h> 14#include <linux/string.h>
15 15
16extern int prom_stdin, prom_stdout;
17
18static int __prom_console_write_buf(const char *buf, int len) 16static int __prom_console_write_buf(const char *buf, int len)
19{ 17{
20 unsigned long args[7]; 18 unsigned long args[7];
diff --git a/arch/sparc/prom/devmap.c b/arch/sparc/prom/devmap.c
deleted file mode 100644
index 46157d2aba0d..000000000000
--- a/arch/sparc/prom/devmap.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * promdevmap.c: Map device/IO areas to virtual addresses.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/sched.h>
10
11#include <asm/openprom.h>
12#include <asm/oplib.h>
13
14extern void restore_current(void);
15
16/* Just like the routines in palloc.c, these should not be used
17 * by the kernel at all. Bootloader facility mainly. And again,
18 * this is only available on V2 proms and above.
19 */
20
21/* Map physical device address 'paddr' in IO space 'ios' of size
22 * 'num_bytes' to a virtual address, with 'vhint' being a hint to
23 * the prom as to where you would prefer the mapping. We return
24 * where the prom actually mapped it.
25 */
26char *
27prom_mapio(char *vhint, int ios, unsigned int paddr, unsigned int num_bytes)
28{
29 unsigned long flags;
30 char *ret;
31
32 spin_lock_irqsave(&prom_lock, flags);
33 if((num_bytes == 0) || (paddr == 0)) ret = (char *) 0x0;
34 else
35 ret = (*(romvec->pv_v2devops.v2_dumb_mmap))(vhint, ios, paddr,
36 num_bytes);
37 restore_current();
38 spin_unlock_irqrestore(&prom_lock, flags);
39 return ret;
40}
41
42/* Unmap an IO/device area that was mapped using the above routine. */
43void
44prom_unmapio(char *vaddr, unsigned int num_bytes)
45{
46 unsigned long flags;
47
48 if(num_bytes == 0x0) return;
49 spin_lock_irqsave(&prom_lock, flags);
50 (*(romvec->pv_v2devops.v2_dumb_munmap))(vaddr, num_bytes);
51 restore_current();
52 spin_unlock_irqrestore(&prom_lock, flags);
53}
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
index 3ff911e7d25b..9c6ac4b81ded 100644
--- a/arch/sparc/prom/init_64.c
+++ b/arch/sparc/prom/init_64.c
@@ -18,7 +18,7 @@
18char prom_version[80]; 18char prom_version[80];
19 19
20/* The root node of the prom device tree. */ 20/* The root node of the prom device tree. */
21int prom_stdin, prom_stdout; 21int prom_stdout;
22phandle prom_chosen_node; 22phandle prom_chosen_node;
23 23
24/* You must call prom_init() before you attempt to use any of the 24/* You must call prom_init() before you attempt to use any of the
@@ -38,7 +38,6 @@ void __init prom_init(void *cif_handler, void *cif_stack)
38 if (!prom_chosen_node || prom_chosen_node == -1) 38 if (!prom_chosen_node || prom_chosen_node == -1)
39 prom_halt(); 39 prom_halt();
40 40
41 prom_stdin = prom_getint(prom_chosen_node, "stdin");
42 prom_stdout = prom_getint(prom_chosen_node, "stdout"); 41 prom_stdout = prom_getint(prom_chosen_node, "stdout");
43 42
44 node = prom_finddevice("/openprom"); 43 node = prom_finddevice("/openprom");
diff --git a/arch/sparc/prom/misc_32.c b/arch/sparc/prom/misc_32.c
index 4d61c540bb3d..8c278c311ba4 100644
--- a/arch/sparc/prom/misc_32.c
+++ b/arch/sparc/prom/misc_32.c
@@ -70,7 +70,7 @@ prom_cmdline(void)
70/* Drop into the prom, but completely terminate the program. 70/* Drop into the prom, but completely terminate the program.
71 * No chance of continuing. 71 * No chance of continuing.
72 */ 72 */
73void 73void __noreturn
74prom_halt(void) 74prom_halt(void)
75{ 75{
76 unsigned long flags; 76 unsigned long flags;
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
index 4c4dc79f65af..97c44c9ddbc8 100644
--- a/arch/sparc/prom/mp.c
+++ b/arch/sparc/prom/mp.c
@@ -41,81 +41,3 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
41 41
42 return ret; 42 return ret;
43} 43}
44
45/* Stop CPU with device prom-tree node 'cpunode'.
46 * XXX Again, what does the return value really mean? XXX
47 */
48int
49prom_stopcpu(int cpunode)
50{
51 int ret;
52 unsigned long flags;
53
54 spin_lock_irqsave(&prom_lock, flags);
55 switch(prom_vers) {
56 case PROM_V0:
57 case PROM_V2:
58 default:
59 ret = -1;
60 break;
61 case PROM_V3:
62 ret = (*(romvec->v3_cpustop))(cpunode);
63 break;
64 };
65 restore_current();
66 spin_unlock_irqrestore(&prom_lock, flags);
67
68 return ret;
69}
70
71/* Make CPU with device prom-tree node 'cpunode' idle.
72 * XXX Return value, anyone? XXX
73 */
74int
75prom_idlecpu(int cpunode)
76{
77 int ret;
78 unsigned long flags;
79
80 spin_lock_irqsave(&prom_lock, flags);
81 switch(prom_vers) {
82 case PROM_V0:
83 case PROM_V2:
84 default:
85 ret = -1;
86 break;
87 case PROM_V3:
88 ret = (*(romvec->v3_cpuidle))(cpunode);
89 break;
90 };
91 restore_current();
92 spin_unlock_irqrestore(&prom_lock, flags);
93
94 return ret;
95}
96
97/* Resume the execution of CPU with nodeid 'cpunode'.
98 * XXX Come on, somebody has to know... XXX
99 */
100int
101prom_restartcpu(int cpunode)
102{
103 int ret;
104 unsigned long flags;
105
106 spin_lock_irqsave(&prom_lock, flags);
107 switch(prom_vers) {
108 case PROM_V0:
109 case PROM_V2:
110 default:
111 ret = -1;
112 break;
113 case PROM_V3:
114 ret = (*(romvec->v3_cpuresume))(cpunode);
115 break;
116 };
117 restore_current();
118 spin_unlock_irqrestore(&prom_lock, flags);
119
120 return ret;
121}
diff --git a/arch/sparc/prom/palloc.c b/arch/sparc/prom/palloc.c
deleted file mode 100644
index 2e2a88b211fb..000000000000
--- a/arch/sparc/prom/palloc.c
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * palloc.c: Memory allocation from the Sun PROM.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/openprom.h>
8#include <asm/oplib.h>
9
10/* You should not call these routines after memory management
11 * has been initialized in the kernel, if fact you should not
12 * use these if at all possible in the kernel. They are mainly
13 * to be used for a bootloader for temporary allocations which
14 * it will free before jumping into the kernel it has loaded.
15 *
16 * Also, these routines don't work on V0 proms, only V2 and later.
17 */
18
19/* Allocate a chunk of memory of size 'num_bytes' giving a suggestion
20 * of virtual_hint as the preferred virtual base address of this chunk.
21 * There are no guarantees that you will get the allocation, or that
22 * the prom will abide by your "hint". So check your return value.
23 */
24char *
25prom_alloc(char *virtual_hint, unsigned int num_bytes)
26{
27 if(prom_vers == PROM_V0) return (char *) 0x0;
28 if(num_bytes == 0x0) return (char *) 0x0;
29 return (*(romvec->pv_v2devops.v2_dumb_mem_alloc))(virtual_hint, num_bytes);
30}
31
32/* Free a previously allocated chunk back to the prom at virtual address
33 * 'vaddr' of size 'num_bytes'. NOTE: This vaddr is not the hint you
34 * used for the allocation, but the virtual address the prom actually
35 * returned to you. They may be have been the same, they may have not,
36 * doesn't matter.
37 */
38void
39prom_free(char *vaddr, unsigned int num_bytes)
40{
41 if((prom_vers == PROM_V0) || (num_bytes == 0x0)) return;
42 (*(romvec->pv_v2devops.v2_dumb_mem_free))(vaddr, num_bytes);
43}
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
index 541fc829c207..0857aa9e839d 100644
--- a/arch/sparc/prom/ranges.c
+++ b/arch/sparc/prom/ranges.c
@@ -13,8 +13,8 @@
13#include <asm/types.h> 13#include <asm/types.h>
14#include <asm/system.h> 14#include <asm/system.h>
15 15
16struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX]; 16static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
17int num_obio_ranges; 17static int num_obio_ranges;
18 18
19/* Adjust register values based upon the ranges parameters. */ 19/* Adjust register values based upon the ranges parameters. */
20static void 20static void
@@ -35,7 +35,7 @@ prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
35 } 35 }
36} 36}
37 37
38void 38static void
39prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1, 39prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
40 struct linux_prom_ranges *ranges2, int nranges2) 40 struct linux_prom_ranges *ranges2, int nranges2)
41{ 41{
diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
index 535e2e69ac1d..bc8e4cb87a68 100644
--- a/arch/sparc/prom/tree_32.c
+++ b/arch/sparc/prom/tree_32.c
@@ -20,7 +20,7 @@ extern void restore_current(void);
20static char promlib_buf[128]; 20static char promlib_buf[128];
21 21
22/* Internal version of prom_getchild that does not alter return values. */ 22/* Internal version of prom_getchild that does not alter return values. */
23phandle __prom_getchild(phandle node) 23static phandle __prom_getchild(phandle node)
24{ 24{
25 unsigned long flags; 25 unsigned long flags;
26 phandle cnode; 26 phandle cnode;
@@ -52,7 +52,7 @@ phandle prom_getchild(phandle node)
52EXPORT_SYMBOL(prom_getchild); 52EXPORT_SYMBOL(prom_getchild);
53 53
54/* Internal version of prom_getsibling that does not alter return values. */ 54/* Internal version of prom_getsibling that does not alter return values. */
55phandle __prom_getsibling(phandle node) 55static phandle __prom_getsibling(phandle node)
56{ 56{
57 unsigned long flags; 57 unsigned long flags;
58 phandle cnode; 58 phandle cnode;
@@ -177,20 +177,6 @@ void prom_getstring(phandle node, char *prop, char *user_buf, int ubuf_size)
177EXPORT_SYMBOL(prom_getstring); 177EXPORT_SYMBOL(prom_getstring);
178 178
179 179
180/* Does the device at node 'node' have name 'name'?
181 * YES = 1 NO = 0
182 */
183int prom_nodematch(phandle node, char *name)
184{
185 int error;
186
187 static char namebuf[128];
188 error = prom_getproperty(node, "name", namebuf, sizeof(namebuf));
189 if (error == -1) return 0;
190 if(strcmp(namebuf, name) == 0) return 1;
191 return 0;
192}
193
194/* Search siblings at 'node_start' for a node with name 180/* Search siblings at 'node_start' for a node with name
195 * 'nodename'. Return node if successful, zero if not. 181 * 'nodename'. Return node if successful, zero if not.
196 */ 182 */
@@ -214,7 +200,7 @@ phandle prom_searchsiblings(phandle node_start, char *nodename)
214EXPORT_SYMBOL(prom_searchsiblings); 200EXPORT_SYMBOL(prom_searchsiblings);
215 201
216/* Interal version of nextprop that does not alter return values. */ 202/* Interal version of nextprop that does not alter return values. */
217char *__prom_nextprop(phandle node, char * oprop) 203static char *__prom_nextprop(phandle node, char * oprop)
218{ 204{
219 unsigned long flags; 205 unsigned long flags;
220 char *prop; 206 char *prop;
@@ -227,17 +213,6 @@ char *__prom_nextprop(phandle node, char * oprop)
227 return prop; 213 return prop;
228} 214}
229 215
230/* Return the first property name for node 'node'. */
231/* buffer is unused argument, but as v9 uses it, we need to have the same interface */
232char *prom_firstprop(phandle node, char *bufer)
233{
234 if (node == 0 || node == -1)
235 return "";
236
237 return __prom_nextprop(node, "");
238}
239EXPORT_SYMBOL(prom_firstprop);
240
241/* Return the property type string after property type 'oprop' 216/* Return the property type string after property type 'oprop'
242 * at node 'node' . Returns empty string if no more 217 * at node 'node' . Returns empty string if no more
243 * property types for this node. 218 * property types for this node.
@@ -299,19 +274,6 @@ phandle prom_finddevice(char *name)
299} 274}
300EXPORT_SYMBOL(prom_finddevice); 275EXPORT_SYMBOL(prom_finddevice);
301 276
302int prom_node_has_property(phandle node, char *prop)
303{
304 char *current_property = "";
305
306 do {
307 current_property = prom_nextprop(node, current_property, NULL);
308 if(!strcmp(current_property, prop))
309 return 1;
310 } while (*current_property);
311 return 0;
312}
313EXPORT_SYMBOL(prom_node_has_property);
314
315/* Set property 'pname' at node 'node' to value 'value' which has a length 277/* Set property 'pname' at node 'node' to value 'value' which has a length
316 * of 'size' bytes. Return the number of bytes the prom accepted. 278 * of 'size' bytes. Return the number of bytes the prom accepted.
317 */ 279 */
@@ -320,8 +282,10 @@ int prom_setprop(phandle node, const char *pname, char *value, int size)
320 unsigned long flags; 282 unsigned long flags;
321 int ret; 283 int ret;
322 284
323 if(size == 0) return 0; 285 if (size == 0)
324 if((pname == 0) || (value == 0)) return 0; 286 return 0;
287 if ((pname == NULL) || (value == NULL))
288 return 0;
325 spin_lock_irqsave(&prom_lock, flags); 289 spin_lock_irqsave(&prom_lock, flags);
326 ret = prom_nodeops->no_setprop(node, pname, value, size); 290 ret = prom_nodeops->no_setprop(node, pname, value, size);
327 restore_current(); 291 restore_current();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e330da21b84f..b6fccb07123e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -377,6 +377,18 @@ config X86_ELAN
377 377
378 If unsure, choose "PC-compatible" instead. 378 If unsure, choose "PC-compatible" instead.
379 379
380config X86_INTEL_CE
381 bool "CE4100 TV platform"
382 depends on PCI
383 depends on PCI_GODIRECT
384 depends on X86_32
385 depends on X86_EXTENDED_PLATFORM
386 select X86_REBOOTFIXUPS
387 ---help---
388 Select for the Intel CE media processor (CE4100) SOC.
389 This option compiles in support for the CE4100 SOC for settop
390 boxes and media devices.
391
380config X86_MRST 392config X86_MRST
381 bool "Moorestown MID platform" 393 bool "Moorestown MID platform"
382 depends on PCI 394 depends on PCI
@@ -385,6 +397,10 @@ config X86_MRST
385 depends on X86_EXTENDED_PLATFORM 397 depends on X86_EXTENDED_PLATFORM
386 depends on X86_IO_APIC 398 depends on X86_IO_APIC
387 select APB_TIMER 399 select APB_TIMER
400 select I2C
401 select SPI
402 select INTEL_SCU_IPC
403 select X86_PLATFORM_DEVICES
388 ---help--- 404 ---help---
389 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin 405 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
390 Internet Device(MID) platform. Moorestown consists of two chips: 406 Internet Device(MID) platform. Moorestown consists of two chips:
@@ -466,6 +482,19 @@ config X86_ES7000
466 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is 482 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
467 supposed to run on an IA32-based Unisys ES7000 system. 483 supposed to run on an IA32-based Unisys ES7000 system.
468 484
485config X86_32_IRIS
486 tristate "Eurobraille/Iris poweroff module"
487 depends on X86_32
488 ---help---
489 The Iris machines from EuroBraille do not have APM or ACPI support
490 to shut themselves down properly. A special I/O sequence is
491 needed to do so, which is what this module does at
492 kernel shutdown.
493
494 This is only for Iris machines from EuroBraille.
495
496 If unused, say N.
497
469config SCHED_OMIT_FRAME_POINTER 498config SCHED_OMIT_FRAME_POINTER
470 def_bool y 499 def_bool y
471 prompt "Single-depth WCHAN output" 500 prompt "Single-depth WCHAN output"
@@ -1141,16 +1170,16 @@ config NUMA
1141comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 1170comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
1142 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI) 1171 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
1143 1172
1144config K8_NUMA 1173config AMD_NUMA
1145 def_bool y 1174 def_bool y
1146 prompt "Old style AMD Opteron NUMA detection" 1175 prompt "Old style AMD Opteron NUMA detection"
1147 depends on X86_64 && NUMA && PCI 1176 depends on X86_64 && NUMA && PCI
1148 ---help--- 1177 ---help---
1149 Enable K8 NUMA node topology detection. You should say Y here if 1178 Enable AMD NUMA node topology detection. You should say Y here if
1150 you have a multi processor AMD K8 system. This uses an old 1179 you have a multi processor AMD system. This uses an old method to
1151 method to read the NUMA configuration directly from the builtin 1180 read the NUMA configuration directly from the builtin Northbridge
1152 Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA 1181 of Opteron. It is recommended to use X86_64_ACPI_NUMA instead,
1153 instead, which also takes priority if both are compiled in. 1182 which also takes priority if both are compiled in.
1154 1183
1155config X86_64_ACPI_NUMA 1184config X86_64_ACPI_NUMA
1156 def_bool y 1185 def_bool y
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index b59ee765414e..45143bbcfe5e 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -117,6 +117,17 @@ config DEBUG_RODATA_TEST
117 feature as well as for the change_page_attr() infrastructure. 117 feature as well as for the change_page_attr() infrastructure.
118 If in doubt, say "N" 118 If in doubt, say "N"
119 119
120config DEBUG_SET_MODULE_RONX
121 bool "Set loadable kernel module data as NX and text as RO"
122 depends on MODULES
123 ---help---
124 This option helps catch unintended modifications to loadable
125 kernel module's text and read-only data. It also prevents execution
126 of module data. Such protection may interfere with run-time code
127 patching and dynamic kernel tracing - and they might also protect
128 against certain classes of kernel exploits.
129 If in doubt, say "N".
130
120config DEBUG_NX_TEST 131config DEBUG_NX_TEST
121 tristate "Testcase for the NX non-executable stack feature" 132 tristate "Testcase for the NX non-executable stack feature"
122 depends on DEBUG_KERNEL && m 133 depends on DEBUG_KERNEL && m
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 52f85a196fa0..35af09d13dc1 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -182,7 +182,7 @@ no_longmode:
182 hlt 182 hlt
183 jmp 1b 183 jmp 1b
184 184
185#include "../../kernel/verify_cpu_64.S" 185#include "../../kernel/verify_cpu.S"
186 186
187 /* 187 /*
188 * Be careful here startup_64 needs to be at a predictable 188 * Be careful here startup_64 needs to be at a predictable
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 76561d20ea2f..13009d1af99a 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -66,6 +66,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
66extern void alternatives_smp_module_del(struct module *mod); 66extern void alternatives_smp_module_del(struct module *mod);
67extern void alternatives_smp_switch(int smp); 67extern void alternatives_smp_switch(int smp);
68extern int alternatives_text_reserved(void *start, void *end); 68extern int alternatives_text_reserved(void *start, void *end);
69extern bool skip_smp_alternatives;
69#else 70#else
70static inline void alternatives_smp_module_add(struct module *mod, char *name, 71static inline void alternatives_smp_module_add(struct module *mod, char *name,
71 void *locks, void *locks_end, 72 void *locks, void *locks_end,
@@ -180,8 +181,15 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
180 * On the local CPU you need to be protected again NMI or MCE handlers seeing an 181 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
181 * inconsistent instruction while you patch. 182 * inconsistent instruction while you patch.
182 */ 183 */
184struct text_poke_param {
185 void *addr;
186 const void *opcode;
187 size_t len;
188};
189
183extern void *text_poke(void *addr, const void *opcode, size_t len); 190extern void *text_poke(void *addr, const void *opcode, size_t len);
184extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 191extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
192extern void text_poke_smp_batch(struct text_poke_param *params, int n);
185 193
186#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 194#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
187#define IDEAL_NOP_SIZE_5 5 195#define IDEAL_NOP_SIZE_5 5
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index c8517f81b21e..6aee50d655d1 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,36 +3,53 @@
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
6extern struct pci_device_id k8_nb_ids[]; 6extern struct pci_device_id amd_nb_misc_ids[];
7struct bootnode; 7struct bootnode;
8 8
9extern int early_is_k8_nb(u32 value); 9extern int early_is_amd_nb(u32 value);
10extern int cache_k8_northbridges(void); 10extern int amd_cache_northbridges(void);
11extern void k8_flush_garts(void); 11extern void amd_flush_garts(void);
12extern int k8_get_nodes(struct bootnode *nodes); 12extern int amd_get_nodes(struct bootnode *nodes);
13extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); 13extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
14extern int k8_scan_nodes(void); 14extern int amd_scan_nodes(void);
15 15
16struct k8_northbridge_info { 16struct amd_northbridge {
17 struct pci_dev *misc;
18};
19
20struct amd_northbridge_info {
17 u16 num; 21 u16 num;
18 u8 gart_supported; 22 u64 flags;
19 struct pci_dev **nb_misc; 23 struct amd_northbridge *nb;
20}; 24};
21extern struct k8_northbridge_info k8_northbridges; 25extern struct amd_northbridge_info amd_northbridges;
26
27#define AMD_NB_GART 0x1
28#define AMD_NB_L3_INDEX_DISABLE 0x2
22 29
23#ifdef CONFIG_AMD_NB 30#ifdef CONFIG_AMD_NB
24 31
25static inline struct pci_dev *node_to_k8_nb_misc(int node) 32static inline int amd_nb_num(void)
26{ 33{
27 return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; 34 return amd_northbridges.num;
28} 35}
29 36
30#else 37static inline int amd_nb_has_feature(int feature)
38{
39 return ((amd_northbridges.flags & feature) == feature);
40}
31 41
32static inline struct pci_dev *node_to_k8_nb_misc(int node) 42static inline struct amd_northbridge *node_to_amd_nb(int node)
33{ 43{
34 return NULL; 44 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
35} 45}
46
47#else
48
49#define amd_nb_num(x) 0
50#define amd_nb_has_feature(x) false
51#define node_to_amd_nb(x) NULL
52
36#endif 53#endif
37 54
38 55
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f6ce0bda3b98..cf12007796db 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -238,6 +238,7 @@ extern void setup_boot_APIC_clock(void);
238extern void setup_secondary_APIC_clock(void); 238extern void setup_secondary_APIC_clock(void);
239extern int APIC_init_uniprocessor(void); 239extern int APIC_init_uniprocessor(void);
240extern void enable_NMI_through_LVT0(void); 240extern void enable_NMI_through_LVT0(void);
241extern int apic_force_enable(void);
241 242
242/* 243/*
243 * On 32bit this is mach-xxx local 244 * On 32bit this is mach-xxx local
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index a859ca461fb0..47a30ff8e517 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -145,6 +145,7 @@
145 145
146#ifdef CONFIG_X86_32 146#ifdef CONFIG_X86_32
147# define MAX_IO_APICS 64 147# define MAX_IO_APICS 64
148# define MAX_LOCAL_APIC 256
148#else 149#else
149# define MAX_IO_APICS 128 150# define MAX_IO_APICS 128
150# define MAX_LOCAL_APIC 32768 151# define MAX_LOCAL_APIC 32768
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
index 8e6218550e77..c8bfe63a06de 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/asm/bootparam.h
@@ -124,6 +124,7 @@ enum {
124 X86_SUBARCH_LGUEST, 124 X86_SUBARCH_LGUEST,
125 X86_SUBARCH_XEN, 125 X86_SUBARCH_XEN,
126 X86_SUBARCH_MRST, 126 X86_SUBARCH_MRST,
127 X86_SUBARCH_CE4100,
127 X86_NR_SUBARCHS, 128 X86_NR_SUBARCHS,
128}; 129};
129 130
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 9479a037419f..0141b234406f 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -117,6 +117,10 @@ enum fixed_addresses {
117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ 117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ 118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
119 __end_of_permanent_fixed_addresses, 119 __end_of_permanent_fixed_addresses,
120
121#ifdef CONFIG_X86_MRST
122 FIX_LNW_VRTC,
123#endif
120 /* 124 /*
121 * 256 temporary boot-time mappings, used by early_ioremap(), 125 * 256 temporary boot-time mappings, used by early_ioremap(),
122 * before ioremap() is functional. 126 * before ioremap() is functional.
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 4aa2bb3b242a..ef328901c802 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -93,6 +93,17 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
93 int err; 93 int err;
94 94
95 /* See comment in fxsave() below. */ 95 /* See comment in fxsave() below. */
96#ifdef CONFIG_AS_FXSAVEQ
97 asm volatile("1: fxrstorq %[fx]\n\t"
98 "2:\n"
99 ".section .fixup,\"ax\"\n"
100 "3: movl $-1,%[err]\n"
101 " jmp 2b\n"
102 ".previous\n"
103 _ASM_EXTABLE(1b, 3b)
104 : [err] "=r" (err)
105 : [fx] "m" (*fx), "0" (0));
106#else
96 asm volatile("1: rex64/fxrstor (%[fx])\n\t" 107 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
97 "2:\n" 108 "2:\n"
98 ".section .fixup,\"ax\"\n" 109 ".section .fixup,\"ax\"\n"
@@ -102,6 +113,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
102 _ASM_EXTABLE(1b, 3b) 113 _ASM_EXTABLE(1b, 3b)
103 : [err] "=r" (err) 114 : [err] "=r" (err)
104 : [fx] "R" (fx), "m" (*fx), "0" (0)); 115 : [fx] "R" (fx), "m" (*fx), "0" (0));
116#endif
105 return err; 117 return err;
106} 118}
107 119
@@ -119,6 +131,17 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
119 return -EFAULT; 131 return -EFAULT;
120 132
121 /* See comment in fxsave() below. */ 133 /* See comment in fxsave() below. */
134#ifdef CONFIG_AS_FXSAVEQ
135 asm volatile("1: fxsaveq %[fx]\n\t"
136 "2:\n"
137 ".section .fixup,\"ax\"\n"
138 "3: movl $-1,%[err]\n"
139 " jmp 2b\n"
140 ".previous\n"
141 _ASM_EXTABLE(1b, 3b)
142 : [err] "=r" (err), [fx] "=m" (*fx)
143 : "0" (0));
144#else
122 asm volatile("1: rex64/fxsave (%[fx])\n\t" 145 asm volatile("1: rex64/fxsave (%[fx])\n\t"
123 "2:\n" 146 "2:\n"
124 ".section .fixup,\"ax\"\n" 147 ".section .fixup,\"ax\"\n"
@@ -128,6 +151,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
128 _ASM_EXTABLE(1b, 3b) 151 _ASM_EXTABLE(1b, 3b)
129 : [err] "=r" (err), "=m" (*fx) 152 : [err] "=r" (err), "=m" (*fx)
130 : [fx] "R" (fx), "0" (0)); 153 : [fx] "R" (fx), "0" (0));
154#endif
131 if (unlikely(err) && 155 if (unlikely(err) &&
132 __clear_user(fx, sizeof(struct i387_fxsave_struct))) 156 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
133 err = -EFAULT; 157 err = -EFAULT;
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index a6b28d017c2f..0c5ca4e30d7b 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -159,7 +159,7 @@ struct io_apic_irq_attr;
159extern int io_apic_set_pci_routing(struct device *dev, int irq, 159extern int io_apic_set_pci_routing(struct device *dev, int irq,
160 struct io_apic_irq_attr *irq_attr); 160 struct io_apic_irq_attr *irq_attr);
161void setup_IO_APIC_irq_extra(u32 gsi); 161void setup_IO_APIC_irq_extra(u32 gsi);
162extern void ioapic_init_mappings(void); 162extern void ioapic_and_gsi_init(void);
163extern void ioapic_insert_resources(void); 163extern void ioapic_insert_resources(void);
164 164
165extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); 165extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
@@ -168,10 +168,9 @@ extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
168extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); 168extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
169extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); 169extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
170 170
171extern void probe_nr_irqs_gsi(void);
172extern int get_nr_irqs_gsi(void); 171extern int get_nr_irqs_gsi(void);
173
174extern void setup_ioapic_ids_from_mpc(void); 172extern void setup_ioapic_ids_from_mpc(void);
173extern void setup_ioapic_ids_from_mpc_nocheck(void);
175 174
176struct mp_ioapic_gsi{ 175struct mp_ioapic_gsi{
177 u32 gsi_base; 176 u32 gsi_base;
@@ -189,9 +188,8 @@ extern void __init pre_init_apic_IRQ0(void);
189#define io_apic_assign_pci_irqs 0 188#define io_apic_assign_pci_irqs 0
190#define setup_ioapic_ids_from_mpc x86_init_noop 189#define setup_ioapic_ids_from_mpc x86_init_noop
191static const int timer_through_8259 = 0; 190static const int timer_through_8259 = 0;
192static inline void ioapic_init_mappings(void) { } 191static inline void ioapic_and_gsi_init(void) { }
193static inline void ioapic_insert_resources(void) { } 192static inline void ioapic_insert_resources(void) { }
194static inline void probe_nr_irqs_gsi(void) { }
195#define gsi_top (NR_IRQS_LEGACY) 193#define gsi_top (NR_IRQS_LEGACY)
196static inline int mp_find_ioapic(u32 gsi) { return 0; } 194static inline int mp_find_ioapic(u32 gsi) { return 0; }
197 195
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 13b0ebaa512f..ba870bb6dd8e 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -15,10 +15,6 @@ static inline int irq_canonicalize(int irq)
15 return ((irq == 2) ? 9 : irq); 15 return ((irq == 2) ? 9 : irq);
16} 16}
17 17
18#ifdef CONFIG_X86_LOCAL_APIC
19# define ARCH_HAS_NMI_WATCHDOG
20#endif
21
22#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
23extern void irq_ctx_init(int cpu); 19extern void irq_ctx_init(int cpu);
24#else 20#else
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 5bdfca86581b..f23eb2528464 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -28,7 +28,7 @@ extern void die(const char *, struct pt_regs *,long);
28extern int __must_check __die(const char *, struct pt_regs *, long); 28extern int __must_check __die(const char *, struct pt_regs *, long);
29extern void show_registers(struct pt_regs *regs); 29extern void show_registers(struct pt_regs *regs);
30extern void show_trace(struct task_struct *t, struct pt_regs *regs, 30extern void show_trace(struct task_struct *t, struct pt_regs *regs,
31 unsigned long *sp, unsigned long bp); 31 unsigned long *sp);
32extern void __show_regs(struct pt_regs *regs, int all); 32extern void __show_regs(struct pt_regs *regs, int all);
33extern void show_regs(struct pt_regs *regs); 33extern void show_regs(struct pt_regs *regs);
34extern unsigned long oops_begin(void); 34extern unsigned long oops_begin(void);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c62c13cb9788..eb16e94ae04f 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -223,6 +223,9 @@ void intel_init_thermal(struct cpuinfo_x86 *c);
223 223
224void mce_log_therm_throt_event(__u64 status); 224void mce_log_therm_throt_event(__u64 status);
225 225
226/* Interrupt Handler for core thermal thresholds */
227extern int (*platform_thermal_notify)(__u64 msr_val);
228
226#ifdef CONFIG_X86_THERMAL_VECTOR 229#ifdef CONFIG_X86_THERMAL_VECTOR
227extern void mcheck_intel_therm_init(void); 230extern void mcheck_intel_therm_init(void);
228#else 231#else
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index ef51b501e22a..24215072d0e1 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -48,6 +48,12 @@ static inline struct microcode_ops * __init init_intel_microcode(void)
48 48
49#ifdef CONFIG_MICROCODE_AMD 49#ifdef CONFIG_MICROCODE_AMD
50extern struct microcode_ops * __init init_amd_microcode(void); 50extern struct microcode_ops * __init init_amd_microcode(void);
51
52static inline void get_ucode_data(void *to, const u8 *from, size_t n)
53{
54 memcpy(to, from, n);
55}
56
51#else 57#else
52static inline struct microcode_ops * __init init_amd_microcode(void) 58static inline struct microcode_ops * __init init_amd_microcode(void)
53{ 59{
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index c82868e9f905..0c90dd9f0505 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -5,8 +5,9 @@
5 5
6#include <asm/mpspec_def.h> 6#include <asm/mpspec_def.h>
7#include <asm/x86_init.h> 7#include <asm/x86_init.h>
8#include <asm/apicdef.h>
8 9
9extern int apic_version[MAX_APICS]; 10extern int apic_version[];
10extern int pic_mode; 11extern int pic_mode;
11 12
12#ifdef CONFIG_X86_32 13#ifdef CONFIG_X86_32
@@ -107,7 +108,7 @@ extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
107 int active_high_low); 108 int active_high_low);
108#endif /* CONFIG_ACPI */ 109#endif /* CONFIG_ACPI */
109 110
110#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 111#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_LOCAL_APIC)
111 112
112struct physid_mask { 113struct physid_mask {
113 unsigned long mask[PHYSID_ARRAY_SIZE]; 114 unsigned long mask[PHYSID_ARRAY_SIZE];
@@ -122,31 +123,31 @@ typedef struct physid_mask physid_mask_t;
122 test_and_set_bit(physid, (map).mask) 123 test_and_set_bit(physid, (map).mask)
123 124
124#define physids_and(dst, src1, src2) \ 125#define physids_and(dst, src1, src2) \
125 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) 126 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
126 127
127#define physids_or(dst, src1, src2) \ 128#define physids_or(dst, src1, src2) \
128 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) 129 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
129 130
130#define physids_clear(map) \ 131#define physids_clear(map) \
131 bitmap_zero((map).mask, MAX_APICS) 132 bitmap_zero((map).mask, MAX_LOCAL_APIC)
132 133
133#define physids_complement(dst, src) \ 134#define physids_complement(dst, src) \
134 bitmap_complement((dst).mask, (src).mask, MAX_APICS) 135 bitmap_complement((dst).mask, (src).mask, MAX_LOCAL_APIC)
135 136
136#define physids_empty(map) \ 137#define physids_empty(map) \
137 bitmap_empty((map).mask, MAX_APICS) 138 bitmap_empty((map).mask, MAX_LOCAL_APIC)
138 139
139#define physids_equal(map1, map2) \ 140#define physids_equal(map1, map2) \
140 bitmap_equal((map1).mask, (map2).mask, MAX_APICS) 141 bitmap_equal((map1).mask, (map2).mask, MAX_LOCAL_APIC)
141 142
142#define physids_weight(map) \ 143#define physids_weight(map) \
143 bitmap_weight((map).mask, MAX_APICS) 144 bitmap_weight((map).mask, MAX_LOCAL_APIC)
144 145
145#define physids_shift_right(d, s, n) \ 146#define physids_shift_right(d, s, n) \
146 bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) 147 bitmap_shift_right((d).mask, (s).mask, n, MAX_LOCAL_APIC)
147 148
148#define physids_shift_left(d, s, n) \ 149#define physids_shift_left(d, s, n) \
149 bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) 150 bitmap_shift_left((d).mask, (s).mask, n, MAX_LOCAL_APIC)
150 151
151static inline unsigned long physids_coerce(physid_mask_t *map) 152static inline unsigned long physids_coerce(physid_mask_t *map)
152{ 153{
@@ -159,14 +160,6 @@ static inline void physids_promote(unsigned long physids, physid_mask_t *map)
159 map->mask[0] = physids; 160 map->mask[0] = physids;
160} 161}
161 162
162/* Note: will create very large stack frames if physid_mask_t is big */
163#define physid_mask_of_physid(physid) \
164 ({ \
165 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
166 physid_set(physid, __physid_mask); \
167 __physid_mask; \
168 })
169
170static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) 163static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
171{ 164{
172 physids_clear(*map); 165 physids_clear(*map);
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 4a7f96d7c188..c0a955a9a087 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -15,13 +15,6 @@
15 15
16#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
17# define MAX_MPC_ENTRY 1024 17# define MAX_MPC_ENTRY 1024
18# define MAX_APICS 256
19#else
20# if NR_CPUS <= 255
21# define MAX_APICS 255
22# else
23# define MAX_APICS 32768
24# endif
25#endif 18#endif
26 19
27/* Intel MP Floating Pointer Structure */ 20/* Intel MP Floating Pointer Structure */
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h
new file mode 100644
index 000000000000..73668abdbedf
--- /dev/null
+++ b/arch/x86/include/asm/mrst-vrtc.h
@@ -0,0 +1,9 @@
1#ifndef _MRST_VRTC_H
2#define _MRST_VRTC_H
3
4extern unsigned char vrtc_cmos_read(unsigned char reg);
5extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
6extern unsigned long vrtc_get_time(void);
7extern int vrtc_set_mmss(unsigned long nowtime);
8
9#endif
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 4a711a684b17..719f00b28ff5 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -14,7 +14,9 @@
14#include <linux/sfi.h> 14#include <linux/sfi.h>
15 15
16extern int pci_mrst_init(void); 16extern int pci_mrst_init(void);
17int __init sfi_parse_mrtc(struct sfi_table_header *table); 17extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
18extern int sfi_mrtc_num;
19extern struct sfi_rtc_table_entry sfi_mrtc_array[];
18 20
19/* 21/*
20 * Medfield is the follow-up of Moorestown, it combines two chip solution into 22 * Medfield is the follow-up of Moorestown, it combines two chip solution into
@@ -50,4 +52,14 @@ extern void mrst_early_console_init(void);
50 52
51extern struct console early_hsu_console; 53extern struct console early_hsu_console;
52extern void hsu_early_console_init(void); 54extern void hsu_early_console_init(void);
55
56extern void intel_scu_devices_create(void);
57extern void intel_scu_devices_destroy(void);
58
59/* VRTC timer */
60#define MRST_VRTC_MAP_SZ (1024)
61/*#define MRST_VRTC_PGOFFSET (0xc00) */
62
63extern void mrst_rtc_init(void);
64
53#endif /* _ASM_X86_MRST_H */ 65#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6b89f5e86021..4d0dfa0d998e 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -123,6 +123,10 @@
123#define MSR_AMD64_IBSCTL 0xc001103a 123#define MSR_AMD64_IBSCTL 0xc001103a
124#define MSR_AMD64_IBSBRTARGET 0xc001103b 124#define MSR_AMD64_IBSBRTARGET 0xc001103b
125 125
126/* Fam 15h MSRs */
127#define MSR_F15H_PERF_CTL 0xc0010200
128#define MSR_F15H_PERF_CTR 0xc0010201
129
126/* Fam 10h MSRs */ 130/* Fam 10h MSRs */
127#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 131#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
128#define FAM10H_MMIO_CONF_ENABLE (1<<0) 132#define FAM10H_MMIO_CONF_ENABLE (1<<0)
@@ -253,6 +257,18 @@
253#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) 257#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
254#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) 258#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
255 259
260/* Thermal Thresholds Support */
261#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
262#define THERM_SHIFT_THRESHOLD0 8
263#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
264#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
265#define THERM_SHIFT_THRESHOLD1 16
266#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
267#define THERM_STATUS_THRESHOLD0 (1 << 6)
268#define THERM_LOG_THRESHOLD0 (1 << 7)
269#define THERM_STATUS_THRESHOLD1 (1 << 8)
270#define THERM_LOG_THRESHOLD1 (1 << 9)
271
256/* MISC_ENABLE bits: architectural */ 272/* MISC_ENABLE bits: architectural */
257#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) 273#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
258#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) 274#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 932f0f86b4b7..c4021b953510 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -5,41 +5,15 @@
5#include <asm/irq.h> 5#include <asm/irq.h>
6#include <asm/io.h> 6#include <asm/io.h>
7 7
8#ifdef ARCH_HAS_NMI_WATCHDOG 8#ifdef CONFIG_X86_LOCAL_APIC
9
10/**
11 * do_nmi_callback
12 *
13 * Check to see if a callback exists and execute it. Return 1
14 * if the handler exists and was handled successfully.
15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu);
17 9
18extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); 10extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
19extern int check_nmi_watchdog(void);
20#if !defined(CONFIG_LOCKUP_DETECTOR)
21extern int nmi_watchdog_enabled;
22#endif
23extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 11extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
24extern int reserve_perfctr_nmi(unsigned int); 12extern int reserve_perfctr_nmi(unsigned int);
25extern void release_perfctr_nmi(unsigned int); 13extern void release_perfctr_nmi(unsigned int);
26extern int reserve_evntsel_nmi(unsigned int); 14extern int reserve_evntsel_nmi(unsigned int);
27extern void release_evntsel_nmi(unsigned int); 15extern void release_evntsel_nmi(unsigned int);
28 16
29extern void setup_apic_nmi_watchdog(void *);
30extern void stop_apic_nmi_watchdog(void *);
31extern void disable_timer_nmi_watchdog(void);
32extern void enable_timer_nmi_watchdog(void);
33extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
34extern void cpu_nmi_set_wd_enabled(void);
35
36extern atomic_t nmi_active;
37extern unsigned int nmi_watchdog;
38#define NMI_NONE 0
39#define NMI_IO_APIC 1
40#define NMI_LOCAL_APIC 2
41#define NMI_INVALID 3
42
43struct ctl_table; 17struct ctl_table;
44extern int proc_nmi_enabled(struct ctl_table *, int , 18extern int proc_nmi_enabled(struct ctl_table *, int ,
45 void __user *, size_t *, loff_t *); 19 void __user *, size_t *, loff_t *);
@@ -47,33 +21,8 @@ extern int unknown_nmi_panic;
47 21
48void arch_trigger_all_cpu_backtrace(void); 22void arch_trigger_all_cpu_backtrace(void);
49#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 23#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
50
51static inline void localise_nmi_watchdog(void)
52{
53 if (nmi_watchdog == NMI_IO_APIC)
54 nmi_watchdog = NMI_LOCAL_APIC;
55}
56
57/* check if nmi_watchdog is active (ie was specified at boot) */
58static inline int nmi_watchdog_active(void)
59{
60 /*
61 * actually it should be:
62 * return (nmi_watchdog == NMI_LOCAL_APIC ||
63 * nmi_watchdog == NMI_IO_APIC)
64 * but since they are power of two we could use a
65 * cheaper way --cvg
66 */
67 return nmi_watchdog & (NMI_LOCAL_APIC | NMI_IO_APIC);
68}
69#endif 24#endif
70 25
71void lapic_watchdog_stop(void);
72int lapic_watchdog_init(unsigned nmi_hz);
73int lapic_wd_event(unsigned nmi_hz);
74unsigned lapic_adjust_nmi_hz(unsigned hz);
75void disable_lapic_nmi_watchdog(void);
76void enable_lapic_nmi_watchdog(void);
77void stop_nmi(void); 26void stop_nmi(void);
78void restart_nmi(void); 27void restart_nmi(void);
79 28
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ef9975812c77..7709c12431b8 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -112,7 +112,7 @@ static inline void arch_safe_halt(void)
112 112
113static inline void halt(void) 113static inline void halt(void)
114{ 114{
115 PVOP_VCALL0(pv_irq_ops.safe_halt); 115 PVOP_VCALL0(pv_irq_ops.halt);
116} 116}
117 117
118static inline void wbinvd(void) 118static inline void wbinvd(void)
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index ca0437c714b2..676129229630 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -65,6 +65,7 @@ extern unsigned long pci_mem_start;
65 65
66#define PCIBIOS_MIN_CARDBUS_IO 0x4000 66#define PCIBIOS_MIN_CARDBUS_IO 0x4000
67 67
68extern int pcibios_enabled;
68void pcibios_config_init(void); 69void pcibios_config_init(void);
69struct pci_bus *pcibios_scan_root(int bus); 70struct pci_bus *pcibios_scan_root(int bus);
70 71
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 550e26b1dbb3..d9d4dae305f6 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -125,7 +125,6 @@ union cpuid10_edx {
125#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 125#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
126 126
127#ifdef CONFIG_PERF_EVENTS 127#ifdef CONFIG_PERF_EVENTS
128extern void init_hw_perf_events(void);
129extern void perf_events_lapic_init(void); 128extern void perf_events_lapic_init(void);
130 129
131#define PERF_EVENT_INDEX_OFFSET 0 130#define PERF_EVENT_INDEX_OFFSET 0
@@ -156,7 +155,6 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
156} 155}
157 156
158#else 157#else
159static inline void init_hw_perf_events(void) { }
160static inline void perf_events_lapic_init(void) { } 158static inline void perf_events_lapic_init(void) { }
161#endif 159#endif
162 160
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index a70cd216be5d..295e2ff18a6a 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -744,14 +744,6 @@ enum P4_ESCR_EMASKS {
744}; 744};
745 745
746/* 746/*
747 * P4 PEBS specifics (Replay Event only)
748 *
749 * Format (bits):
750 * 0-6: metric from P4_PEBS_METRIC enum
751 * 7 : reserved
752 * 8 : reserved
753 * 9-11 : reserved
754 *
755 * Note we have UOP and PEBS bits reserved for now 747 * Note we have UOP and PEBS bits reserved for now
756 * just in case if we will need them once 748 * just in case if we will need them once
757 */ 749 */
@@ -788,5 +780,60 @@ enum P4_PEBS_METRIC {
788 P4_PEBS_METRIC__max 780 P4_PEBS_METRIC__max
789}; 781};
790 782
783/*
784 * Notes on internal configuration of ESCR+CCCR tuples
785 *
786 * Since P4 has quite the different architecture of
787 * performance registers in compare with "architectural"
788 * once and we have on 64 bits to keep configuration
789 * of performance event, the following trick is used.
790 *
791 * 1) Since both ESCR and CCCR registers have only low
792 * 32 bits valuable, we pack them into a single 64 bit
793 * configuration. Low 32 bits of such config correspond
794 * to low 32 bits of CCCR register and high 32 bits
795 * correspond to low 32 bits of ESCR register.
796 *
797 * 2) The meaning of every bit of such config field can
798 * be found in Intel SDM but it should be noted that
799 * we "borrow" some reserved bits for own usage and
800 * clean them or set to a proper value when we do
801 * a real write to hardware registers.
802 *
803 * 3) The format of bits of config is the following
804 * and should be either 0 or set to some predefined
805 * values:
806 *
807 * Low 32 bits
808 * -----------
809 * 0-6: P4_PEBS_METRIC enum
810 * 7-11: reserved
811 * 12: reserved (Enable)
812 * 13-15: reserved (ESCR select)
813 * 16-17: Active Thread
814 * 18: Compare
815 * 19: Complement
816 * 20-23: Threshold
817 * 24: Edge
818 * 25: reserved (FORCE_OVF)
819 * 26: reserved (OVF_PMI_T0)
820 * 27: reserved (OVF_PMI_T1)
821 * 28-29: reserved
822 * 30: reserved (Cascade)
823 * 31: reserved (OVF)
824 *
825 * High 32 bits
826 * ------------
827 * 0: reserved (T1_USR)
828 * 1: reserved (T1_OS)
829 * 2: reserved (T0_USR)
830 * 3: reserved (T0_OS)
831 * 4: Tag Enable
832 * 5-8: Tag Value
833 * 9-24: Event Mask (may use P4_ESCR_EMASK_BIT helper)
834 * 25-30: enum P4_EVENTS
835 * 31: reserved (HT thread)
836 */
837
791#endif /* PERF_EVENT_P4_H */ 838#endif /* PERF_EVENT_P4_H */
792 839
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index d6763b139a84..db8aa19a08a2 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -53,6 +53,12 @@ extern void x86_mrst_early_setup(void);
53static inline void x86_mrst_early_setup(void) { } 53static inline void x86_mrst_early_setup(void) { }
54#endif 54#endif
55 55
56#ifdef CONFIG_X86_INTEL_CE
57extern void x86_ce4100_early_setup(void);
58#else
59static inline void x86_ce4100_early_setup(void) { }
60#endif
61
56#ifndef _SETUP 62#ifndef _SETUP
57 63
58/* 64/*
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 1def60114906..6c22bf353f26 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -48,7 +48,6 @@ static inline void __init smpboot_setup_io_apic(void)
48 setup_IO_APIC(); 48 setup_IO_APIC();
49 else { 49 else {
50 nr_ioapics = 0; 50 nr_ioapics = 0;
51 localise_nmi_watchdog();
52 } 51 }
53#endif 52#endif
54} 53}
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 2b16a2ad23dc..52b5c7ed3608 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -7,6 +7,7 @@
7#define _ASM_X86_STACKTRACE_H 7#define _ASM_X86_STACKTRACE_H
8 8
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <linux/ptrace.h>
10 11
11extern int kstack_depth_to_print; 12extern int kstack_depth_to_print;
12 13
@@ -46,7 +47,7 @@ struct stacktrace_ops {
46}; 47};
47 48
48void dump_trace(struct task_struct *tsk, struct pt_regs *regs, 49void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
49 unsigned long *stack, unsigned long bp, 50 unsigned long *stack,
50 const struct stacktrace_ops *ops, void *data); 51 const struct stacktrace_ops *ops, void *data);
51 52
52#ifdef CONFIG_X86_32 53#ifdef CONFIG_X86_32
@@ -57,13 +58,39 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
57#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) 58#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
58#endif 59#endif
59 60
61#ifdef CONFIG_FRAME_POINTER
62static inline unsigned long
63stack_frame(struct task_struct *task, struct pt_regs *regs)
64{
65 unsigned long bp;
66
67 if (regs)
68 return regs->bp;
69
70 if (task == current) {
71 /* Grab bp right from our regs */
72 get_bp(bp);
73 return bp;
74 }
75
76 /* bp is the last reg pushed by switch_to */
77 return *(unsigned long *)task->thread.sp;
78}
79#else
80static inline unsigned long
81stack_frame(struct task_struct *task, struct pt_regs *regs)
82{
83 return 0;
84}
85#endif
86
60extern void 87extern void
61show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 88show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
62 unsigned long *stack, unsigned long bp, char *log_lvl); 89 unsigned long *stack, char *log_lvl);
63 90
64extern void 91extern void
65show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 92show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
66 unsigned long *sp, unsigned long bp, char *log_lvl); 93 unsigned long *sp, char *log_lvl);
67 94
68extern unsigned int code_bytes; 95extern unsigned int code_bytes;
69 96
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 5469630b27f5..fa7b9176b76c 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -10,12 +10,6 @@
10unsigned long long native_sched_clock(void); 10unsigned long long native_sched_clock(void);
11extern int recalibrate_cpu_khz(void); 11extern int recalibrate_cpu_khz(void);
12 12
13#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
14extern int timer_ack;
15#else
16# define timer_ack (0)
17#endif
18
19extern int no_timer_check; 13extern int no_timer_check;
20 14
21/* Accelerators for sched_clock() 15/* Accelerators for sched_clock()
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 42d412fd8b02..ce1d54c8a433 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -26,20 +26,22 @@
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512, 26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. 27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 * 28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32 29 * We will use one set for sending BAU messages from each of the
30 * cpu's on the uvhub. 30 * cpu's on the uvhub.
31 * 31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set. 32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). 33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */ 34 */
35 35
36#define MAX_CPUS_PER_UVHUB 64
37#define MAX_CPUS_PER_SOCKET 32
38#define UV_ADP_SIZE 64 /* hardware-provided max. */
39#define UV_CPUS_PER_ACT_STATUS 32 /* hardware-provided max. */
36#define UV_ITEMS_PER_DESCRIPTOR 8 40#define UV_ITEMS_PER_DESCRIPTOR 8
37/* the 'throttle' to prevent the hardware stay-busy bug */ 41/* the 'throttle' to prevent the hardware stay-busy bug */
38#define MAX_BAU_CONCURRENT 3 42#define MAX_BAU_CONCURRENT 3
39#define UV_CPUS_PER_ACT_STATUS 32
40#define UV_ACT_STATUS_MASK 0x3 43#define UV_ACT_STATUS_MASK 0x3
41#define UV_ACT_STATUS_SIZE 2 44#define UV_ACT_STATUS_SIZE 2
42#define UV_ADP_SIZE 32
43#define UV_DISTRIBUTION_SIZE 256 45#define UV_DISTRIBUTION_SIZE 256
44#define UV_SW_ACK_NPENDING 8 46#define UV_SW_ACK_NPENDING 8
45#define UV_NET_ENDPOINT_INTD 0x38 47#define UV_NET_ENDPOINT_INTD 0x38
@@ -100,7 +102,6 @@
100 * number of destination side software ack resources 102 * number of destination side software ack resources
101 */ 103 */
102#define DEST_NUM_RESOURCES 8 104#define DEST_NUM_RESOURCES 8
103#define MAX_CPUS_PER_NODE 32
104/* 105/*
105 * completion statuses for sending a TLB flush message 106 * completion statuses for sending a TLB flush message
106 */ 107 */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 1e994754d323..34244b2cd880 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -85,7 +85,6 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
85obj-$(CONFIG_KGDB) += kgdb.o 85obj-$(CONFIG_KGDB) += kgdb.o
86obj-$(CONFIG_VM86) += vm86_32.o 86obj-$(CONFIG_VM86) += vm86_32.o
87obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 87obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
88obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o
89 88
90obj-$(CONFIG_HPET_TIMER) += hpet.o 89obj-$(CONFIG_HPET_TIMER) += hpet.o
91obj-$(CONFIG_APB_TIMER) += apb_timer.o 90obj-$(CONFIG_APB_TIMER) += apb_timer.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 71232b941b6c..17c8090fabd4 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -198,6 +198,11 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
198{ 198{
199 unsigned int ver = 0; 199 unsigned int ver = 0;
200 200
201 if (id >= (MAX_LOCAL_APIC-1)) {
202 printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
203 return;
204 }
205
201 if (!enabled) { 206 if (!enabled) {
202 ++disabled_cpus; 207 ++disabled_cpus;
203 return; 208 return;
@@ -910,13 +915,13 @@ static int __init acpi_parse_madt_lapic_entries(void)
910 acpi_register_lapic_address(acpi_lapic_addr); 915 acpi_register_lapic_address(acpi_lapic_addr);
911 916
912 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, 917 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
913 acpi_parse_sapic, MAX_APICS); 918 acpi_parse_sapic, MAX_LOCAL_APIC);
914 919
915 if (!count) { 920 if (!count) {
916 x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, 921 x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
917 acpi_parse_x2apic, MAX_APICS); 922 acpi_parse_x2apic, MAX_LOCAL_APIC);
918 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, 923 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
919 acpi_parse_lapic, MAX_APICS); 924 acpi_parse_lapic, MAX_LOCAL_APIC);
920 } 925 }
921 if (!count && !x2count) { 926 if (!count && !x2count) {
922 printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 927 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5079f24c955a..123608531c8f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -353,6 +353,7 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)
353 mutex_unlock(&smp_alt); 353 mutex_unlock(&smp_alt);
354} 354}
355 355
356bool skip_smp_alternatives;
356void alternatives_smp_switch(int smp) 357void alternatives_smp_switch(int smp)
357{ 358{
358 struct smp_alt_module *mod; 359 struct smp_alt_module *mod;
@@ -368,7 +369,7 @@ void alternatives_smp_switch(int smp)
368 printk("lockdep: fixing up alternatives.\n"); 369 printk("lockdep: fixing up alternatives.\n");
369#endif 370#endif
370 371
371 if (noreplace_smp || smp_alt_once) 372 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
372 return; 373 return;
373 BUG_ON(!smp && (num_online_cpus() > 1)); 374 BUG_ON(!smp && (num_online_cpus() > 1));
374 375
@@ -591,17 +592,21 @@ static atomic_t stop_machine_first;
591static int wrote_text; 592static int wrote_text;
592 593
593struct text_poke_params { 594struct text_poke_params {
594 void *addr; 595 struct text_poke_param *params;
595 const void *opcode; 596 int nparams;
596 size_t len;
597}; 597};
598 598
599static int __kprobes stop_machine_text_poke(void *data) 599static int __kprobes stop_machine_text_poke(void *data)
600{ 600{
601 struct text_poke_params *tpp = data; 601 struct text_poke_params *tpp = data;
602 struct text_poke_param *p;
603 int i;
602 604
603 if (atomic_dec_and_test(&stop_machine_first)) { 605 if (atomic_dec_and_test(&stop_machine_first)) {
604 text_poke(tpp->addr, tpp->opcode, tpp->len); 606 for (i = 0; i < tpp->nparams; i++) {
607 p = &tpp->params[i];
608 text_poke(p->addr, p->opcode, p->len);
609 }
605 smp_wmb(); /* Make sure other cpus see that this has run */ 610 smp_wmb(); /* Make sure other cpus see that this has run */
606 wrote_text = 1; 611 wrote_text = 1;
607 } else { 612 } else {
@@ -610,8 +615,12 @@ static int __kprobes stop_machine_text_poke(void *data)
610 smp_mb(); /* Load wrote_text before following execution */ 615 smp_mb(); /* Load wrote_text before following execution */
611 } 616 }
612 617
613 flush_icache_range((unsigned long)tpp->addr, 618 for (i = 0; i < tpp->nparams; i++) {
614 (unsigned long)tpp->addr + tpp->len); 619 p = &tpp->params[i];
620 flush_icache_range((unsigned long)p->addr,
621 (unsigned long)p->addr + p->len);
622 }
623
615 return 0; 624 return 0;
616} 625}
617 626
@@ -631,10 +640,13 @@ static int __kprobes stop_machine_text_poke(void *data)
631void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) 640void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
632{ 641{
633 struct text_poke_params tpp; 642 struct text_poke_params tpp;
643 struct text_poke_param p;
634 644
635 tpp.addr = addr; 645 p.addr = addr;
636 tpp.opcode = opcode; 646 p.opcode = opcode;
637 tpp.len = len; 647 p.len = len;
648 tpp.params = &p;
649 tpp.nparams = 1;
638 atomic_set(&stop_machine_first, 1); 650 atomic_set(&stop_machine_first, 1);
639 wrote_text = 0; 651 wrote_text = 0;
640 /* Use __stop_machine() because the caller already got online_cpus. */ 652 /* Use __stop_machine() because the caller already got online_cpus. */
@@ -642,6 +654,26 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
642 return addr; 654 return addr;
643} 655}
644 656
657/**
658 * text_poke_smp_batch - Update instructions on a live kernel on SMP
659 * @params: an array of text_poke parameters
660 * @n: the number of elements in params.
661 *
662 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
663 * stop_machine() is heavy task, it is better to aggregate text_poke requests
664 * and do it once if possible.
665 *
666 * Note: Must be called under get_online_cpus() and text_mutex.
667 */
668void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
669{
670 struct text_poke_params tpp = {.params = params, .nparams = n};
671
672 atomic_set(&stop_machine_first, 1);
673 wrote_text = 0;
674 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
675}
676
645#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 677#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
646 678
647#ifdef CONFIG_X86_64 679#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 8f6463d8ed0d..affacb5e0065 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -12,95 +12,116 @@
12 12
13static u32 *flush_words; 13static u32 *flush_words;
14 14
15struct pci_device_id k8_nb_ids[] = { 15struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19 {} 19 {}
20}; 20};
21EXPORT_SYMBOL(k8_nb_ids); 21EXPORT_SYMBOL(amd_nb_misc_ids);
22 22
23struct k8_northbridge_info k8_northbridges; 23struct amd_northbridge_info amd_northbridges;
24EXPORT_SYMBOL(k8_northbridges); 24EXPORT_SYMBOL(amd_northbridges);
25 25
26static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 26static struct pci_dev *next_northbridge(struct pci_dev *dev,
27 struct pci_device_id *ids)
27{ 28{
28 do { 29 do {
29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 30 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
30 if (!dev) 31 if (!dev)
31 break; 32 break;
32 } while (!pci_match_id(&k8_nb_ids[0], dev)); 33 } while (!pci_match_id(ids, dev));
33 return dev; 34 return dev;
34} 35}
35 36
36int cache_k8_northbridges(void) 37int amd_cache_northbridges(void)
37{ 38{
38 int i; 39 int i = 0;
39 struct pci_dev *dev; 40 struct amd_northbridge *nb;
41 struct pci_dev *misc;
40 42
41 if (k8_northbridges.num) 43 if (amd_nb_num())
42 return 0; 44 return 0;
43 45
44 dev = NULL; 46 misc = NULL;
45 while ((dev = next_k8_northbridge(dev)) != NULL) 47 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
46 k8_northbridges.num++; 48 i++;
47 49
48 /* some CPU families (e.g. family 0x11) do not support GART */ 50 if (i == 0)
49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 51 return 0;
50 boot_cpu_data.x86 == 0x15)
51 k8_northbridges.gart_supported = 1;
52 52
53 k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * 53 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
54 sizeof(void *), GFP_KERNEL); 54 if (!nb)
55 if (!k8_northbridges.nb_misc)
56 return -ENOMEM; 55 return -ENOMEM;
57 56
58 if (!k8_northbridges.num) { 57 amd_northbridges.nb = nb;
59 k8_northbridges.nb_misc[0] = NULL; 58 amd_northbridges.num = i;
60 return 0;
61 }
62 59
63 if (k8_northbridges.gart_supported) { 60 misc = NULL;
64 flush_words = kmalloc(k8_northbridges.num * sizeof(u32), 61 for (i = 0; i != amd_nb_num(); i++) {
65 GFP_KERNEL); 62 node_to_amd_nb(i)->misc = misc =
66 if (!flush_words) { 63 next_northbridge(misc, amd_nb_misc_ids);
67 kfree(k8_northbridges.nb_misc); 64 }
68 return -ENOMEM; 65
69 } 66 /* some CPU families (e.g. family 0x11) do not support GART */
70 } 67 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
68 boot_cpu_data.x86 == 0x15)
69 amd_northbridges.flags |= AMD_NB_GART;
70
71 /*
72 * Some CPU families support L3 Cache Index Disable. There are some
73 * limitations because of E382 and E388 on family 0x10.
74 */
75 if (boot_cpu_data.x86 == 0x10 &&
76 boot_cpu_data.x86_model >= 0x8 &&
77 (boot_cpu_data.x86_model > 0x9 ||
78 boot_cpu_data.x86_mask >= 0x1))
79 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
71 80
72 dev = NULL;
73 i = 0;
74 while ((dev = next_k8_northbridge(dev)) != NULL) {
75 k8_northbridges.nb_misc[i] = dev;
76 if (k8_northbridges.gart_supported)
77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
78 }
79 k8_northbridges.nb_misc[i] = NULL;
80 return 0; 81 return 0;
81} 82}
82EXPORT_SYMBOL_GPL(cache_k8_northbridges); 83EXPORT_SYMBOL_GPL(amd_cache_northbridges);
83 84
84/* Ignores subdevice/subvendor but as far as I can figure out 85/* Ignores subdevice/subvendor but as far as I can figure out
85 they're useless anyways */ 86 they're useless anyways */
86int __init early_is_k8_nb(u32 device) 87int __init early_is_amd_nb(u32 device)
87{ 88{
88 struct pci_device_id *id; 89 struct pci_device_id *id;
89 u32 vendor = device & 0xffff; 90 u32 vendor = device & 0xffff;
90 device >>= 16; 91 device >>= 16;
91 for (id = k8_nb_ids; id->vendor; id++) 92 for (id = amd_nb_misc_ids; id->vendor; id++)
92 if (vendor == id->vendor && device == id->device) 93 if (vendor == id->vendor && device == id->device)
93 return 1; 94 return 1;
94 return 0; 95 return 0;
95} 96}
96 97
97void k8_flush_garts(void) 98int amd_cache_gart(void)
99{
100 int i;
101
102 if (!amd_nb_has_feature(AMD_NB_GART))
103 return 0;
104
105 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
106 if (!flush_words) {
107 amd_northbridges.flags &= ~AMD_NB_GART;
108 return -ENOMEM;
109 }
110
111 for (i = 0; i != amd_nb_num(); i++)
112 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
113 &flush_words[i]);
114
115 return 0;
116}
117
118void amd_flush_garts(void)
98{ 119{
99 int flushed, i; 120 int flushed, i;
100 unsigned long flags; 121 unsigned long flags;
101 static DEFINE_SPINLOCK(gart_lock); 122 static DEFINE_SPINLOCK(gart_lock);
102 123
103 if (!k8_northbridges.gart_supported) 124 if (!amd_nb_has_feature(AMD_NB_GART))
104 return; 125 return;
105 126
106 /* Avoid races between AGP and IOMMU. In theory it's not needed 127 /* Avoid races between AGP and IOMMU. In theory it's not needed
@@ -109,16 +130,16 @@ void k8_flush_garts(void)
109 that it doesn't matter to serialize more. -AK */ 130 that it doesn't matter to serialize more. -AK */
110 spin_lock_irqsave(&gart_lock, flags); 131 spin_lock_irqsave(&gart_lock, flags);
111 flushed = 0; 132 flushed = 0;
112 for (i = 0; i < k8_northbridges.num; i++) { 133 for (i = 0; i < amd_nb_num(); i++) {
113 pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, 134 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
114 flush_words[i]|1); 135 flush_words[i] | 1);
115 flushed++; 136 flushed++;
116 } 137 }
117 for (i = 0; i < k8_northbridges.num; i++) { 138 for (i = 0; i < amd_nb_num(); i++) {
118 u32 w; 139 u32 w;
119 /* Make sure the hardware actually executed the flush*/ 140 /* Make sure the hardware actually executed the flush*/
120 for (;;) { 141 for (;;) {
121 pci_read_config_dword(k8_northbridges.nb_misc[i], 142 pci_read_config_dword(node_to_amd_nb(i)->misc,
122 0x9c, &w); 143 0x9c, &w);
123 if (!(w & 1)) 144 if (!(w & 1))
124 break; 145 break;
@@ -129,19 +150,23 @@ void k8_flush_garts(void)
129 if (!flushed) 150 if (!flushed)
130 printk("nothing to flush?\n"); 151 printk("nothing to flush?\n");
131} 152}
132EXPORT_SYMBOL_GPL(k8_flush_garts); 153EXPORT_SYMBOL_GPL(amd_flush_garts);
133 154
134static __init int init_k8_nbs(void) 155static __init int init_amd_nbs(void)
135{ 156{
136 int err = 0; 157 int err = 0;
137 158
138 err = cache_k8_northbridges(); 159 err = amd_cache_northbridges();
139 160
140 if (err < 0) 161 if (err < 0)
141 printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); 162 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
163
164 if (amd_cache_gart() < 0)
165 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
166 "GART support disabled.\n");
142 167
143 return err; 168 return err;
144} 169}
145 170
146/* This has to go after the PCI subsystem */ 171/* This has to go after the PCI subsystem */
147fs_initcall(init_k8_nbs); 172fs_initcall(init_amd_nbs);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 92543c73cf8e..7c9ab59653e8 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -315,6 +315,7 @@ static void apbt_setup_irq(struct apbt_dev *adev)
315 315
316 if (system_state == SYSTEM_BOOTING) { 316 if (system_state == SYSTEM_BOOTING) {
317 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 317 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
318 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
318 /* APB timer irqs are set up as mp_irqs, timer is edge type */ 319 /* APB timer irqs are set up as mp_irqs, timer is edge type */
319 __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); 320 __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
320 if (request_irq(adev->irq, apbt_interrupt_handler, 321 if (request_irq(adev->irq, apbt_interrupt_handler,
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index b3a16e8f0703..dcd7c83e1659 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -206,7 +206,7 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
206 * Do an PCI bus scan by hand because we're running before the PCI 206 * Do an PCI bus scan by hand because we're running before the PCI
207 * subsystem. 207 * subsystem.
208 * 208 *
209 * All K8 AGP bridges are AGPv3 compliant, so we can do this scan 209 * All AMD AGP bridges are AGPv3 compliant, so we can do this scan
210 * generically. It's probably overkill to always scan all slots because 210 * generically. It's probably overkill to always scan all slots because
211 * the AGP bridges should be always an own bus on the HT hierarchy, 211 * the AGP bridges should be always an own bus on the HT hierarchy,
212 * but do it here for future safety. 212 * but do it here for future safety.
@@ -303,7 +303,7 @@ void __init early_gart_iommu_check(void)
303 dev_limit = bus_dev_ranges[i].dev_limit; 303 dev_limit = bus_dev_ranges[i].dev_limit;
304 304
305 for (slot = dev_base; slot < dev_limit; slot++) { 305 for (slot = dev_base; slot < dev_limit; slot++) {
306 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 306 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
307 continue; 307 continue;
308 308
309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); 309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
@@ -358,7 +358,7 @@ void __init early_gart_iommu_check(void)
358 dev_limit = bus_dev_ranges[i].dev_limit; 358 dev_limit = bus_dev_ranges[i].dev_limit;
359 359
360 for (slot = dev_base; slot < dev_limit; slot++) { 360 for (slot = dev_base; slot < dev_limit; slot++) {
361 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 361 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
362 continue; 362 continue;
363 363
364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); 364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
@@ -400,7 +400,7 @@ int __init gart_iommu_hole_init(void)
400 dev_limit = bus_dev_ranges[i].dev_limit; 400 dev_limit = bus_dev_ranges[i].dev_limit;
401 401
402 for (slot = dev_base; slot < dev_limit; slot++) { 402 for (slot = dev_base; slot < dev_limit; slot++) {
403 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 403 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
404 continue; 404 continue;
405 405
406 iommu_detected = 1; 406 iommu_detected = 1;
@@ -518,7 +518,7 @@ out:
518 dev_base = bus_dev_ranges[i].dev_base; 518 dev_base = bus_dev_ranges[i].dev_base;
519 dev_limit = bus_dev_ranges[i].dev_limit; 519 dev_limit = bus_dev_ranges[i].dev_limit;
520 for (slot = dev_base; slot < dev_limit; slot++) { 520 for (slot = dev_base; slot < dev_limit; slot++) {
521 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 521 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
522 continue; 522 continue;
523 523
524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); 524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 910f20b457c4..3966b564ea47 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -3,10 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o 5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o
6ifneq ($(CONFIG_HARDLOCKUP_DETECTOR),y) 6obj-y += hw_nmi.o
7obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
8endif
9obj-$(CONFIG_HARDLOCKUP_DETECTOR) += hw_nmi.o
10 7
11obj-$(CONFIG_X86_IO_APIC) += io_apic.o 8obj-$(CONFIG_X86_IO_APIC) += io_apic.o
12obj-$(CONFIG_SMP) += ipi.o 9obj-$(CONFIG_SMP) += ipi.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 78218135b48e..879999a5230f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -31,7 +31,6 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/cpu.h> 32#include <linux/cpu.h>
33#include <linux/dmi.h> 33#include <linux/dmi.h>
34#include <linux/nmi.h>
35#include <linux/smp.h> 34#include <linux/smp.h>
36#include <linux/mm.h> 35#include <linux/mm.h>
37 36
@@ -432,17 +431,18 @@ int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
432 reserved = reserve_eilvt_offset(offset, new); 431 reserved = reserve_eilvt_offset(offset, new);
433 432
434 if (reserved != new) { 433 if (reserved != new) {
435 pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " 434 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
436 "vector 0x%x was already reserved by another core, " 435 "vector 0x%x, but the register is already in use for "
437 "APIC%lX=0x%x\n", 436 "vector 0x%x on another cpu\n",
438 smp_processor_id(), new, reserved, reg, old); 437 smp_processor_id(), reg, offset, new, reserved);
439 return -EINVAL; 438 return -EINVAL;
440 } 439 }
441 440
442 if (!eilvt_entry_is_changeable(old, new)) { 441 if (!eilvt_entry_is_changeable(old, new)) {
443 pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " 442 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
444 "register already in use, APIC%lX=0x%x\n", 443 "vector 0x%x, but the register is already in use for "
445 smp_processor_id(), new, reg, old); 444 "vector 0x%x on this cpu\n",
445 smp_processor_id(), reg, offset, new, old);
446 return -EBUSY; 446 return -EBUSY;
447 } 447 }
448 448
@@ -799,11 +799,7 @@ void __init setup_boot_APIC_clock(void)
799 * PIT/HPET going. Otherwise register lapic as a dummy 799 * PIT/HPET going. Otherwise register lapic as a dummy
800 * device. 800 * device.
801 */ 801 */
802 if (nmi_watchdog != NMI_IO_APIC) 802 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
803 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
804 else
805 pr_warning("APIC timer registered as dummy,"
806 " due to nmi_watchdog=%d!\n", nmi_watchdog);
807 803
808 /* Setup the lapic or request the broadcast */ 804 /* Setup the lapic or request the broadcast */
809 setup_APIC_timer(); 805 setup_APIC_timer();
@@ -1387,7 +1383,6 @@ void __cpuinit end_local_APIC_setup(void)
1387 } 1383 }
1388#endif 1384#endif
1389 1385
1390 setup_apic_nmi_watchdog(NULL);
1391 apic_pm_activate(); 1386 apic_pm_activate();
1392 1387
1393 /* 1388 /*
@@ -1538,13 +1533,60 @@ static int __init detect_init_APIC(void)
1538 return 0; 1533 return 0;
1539} 1534}
1540#else 1535#else
1536
1537static int apic_verify(void)
1538{
1539 u32 features, h, l;
1540
1541 /*
1542 * The APIC feature bit should now be enabled
1543 * in `cpuid'
1544 */
1545 features = cpuid_edx(1);
1546 if (!(features & (1 << X86_FEATURE_APIC))) {
1547 pr_warning("Could not enable APIC!\n");
1548 return -1;
1549 }
1550 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1551 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1552
1553 /* The BIOS may have set up the APIC at some other address */
1554 rdmsr(MSR_IA32_APICBASE, l, h);
1555 if (l & MSR_IA32_APICBASE_ENABLE)
1556 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1557
1558 pr_info("Found and enabled local APIC!\n");
1559 return 0;
1560}
1561
1562int apic_force_enable(void)
1563{
1564 u32 h, l;
1565
1566 if (disable_apic)
1567 return -1;
1568
1569 /*
1570 * Some BIOSes disable the local APIC in the APIC_BASE
1571 * MSR. This can only be done in software for Intel P6 or later
1572 * and AMD K7 (Model > 1) or later.
1573 */
1574 rdmsr(MSR_IA32_APICBASE, l, h);
1575 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1576 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1577 l &= ~MSR_IA32_APICBASE_BASE;
1578 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
1579 wrmsr(MSR_IA32_APICBASE, l, h);
1580 enabled_via_apicbase = 1;
1581 }
1582 return apic_verify();
1583}
1584
1541/* 1585/*
1542 * Detect and initialize APIC 1586 * Detect and initialize APIC
1543 */ 1587 */
1544static int __init detect_init_APIC(void) 1588static int __init detect_init_APIC(void)
1545{ 1589{
1546 u32 h, l, features;
1547
1548 /* Disabled by kernel option? */ 1590 /* Disabled by kernel option? */
1549 if (disable_apic) 1591 if (disable_apic)
1550 return -1; 1592 return -1;
@@ -1574,38 +1616,12 @@ static int __init detect_init_APIC(void)
1574 "you can enable it with \"lapic\"\n"); 1616 "you can enable it with \"lapic\"\n");
1575 return -1; 1617 return -1;
1576 } 1618 }
1577 /* 1619 if (apic_force_enable())
1578 * Some BIOSes disable the local APIC in the APIC_BASE 1620 return -1;
1579 * MSR. This can only be done in software for Intel P6 or later 1621 } else {
1580 * and AMD K7 (Model > 1) or later. 1622 if (apic_verify())
1581 */ 1623 return -1;
1582 rdmsr(MSR_IA32_APICBASE, l, h);
1583 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1584 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1585 l &= ~MSR_IA32_APICBASE_BASE;
1586 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
1587 wrmsr(MSR_IA32_APICBASE, l, h);
1588 enabled_via_apicbase = 1;
1589 }
1590 }
1591 /*
1592 * The APIC feature bit should now be enabled
1593 * in `cpuid'
1594 */
1595 features = cpuid_edx(1);
1596 if (!(features & (1 << X86_FEATURE_APIC))) {
1597 pr_warning("Could not enable APIC!\n");
1598 return -1;
1599 } 1624 }
1600 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1601 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1602
1603 /* The BIOS may have set up the APIC at some other address */
1604 rdmsr(MSR_IA32_APICBASE, l, h);
1605 if (l & MSR_IA32_APICBASE_ENABLE)
1606 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1607
1608 pr_info("Found and enabled local APIC!\n");
1609 1625
1610 apic_pm_activate(); 1626 apic_pm_activate();
1611 1627
@@ -1693,7 +1709,7 @@ void __init init_apic_mappings(void)
1693 * This initializes the IO-APIC and APIC hardware if this is 1709 * This initializes the IO-APIC and APIC hardware if this is
1694 * a UP kernel. 1710 * a UP kernel.
1695 */ 1711 */
1696int apic_version[MAX_APICS]; 1712int apic_version[MAX_LOCAL_APIC];
1697 1713
1698int __init APIC_init_uniprocessor(void) 1714int __init APIC_init_uniprocessor(void)
1699{ 1715{
@@ -1758,17 +1774,10 @@ int __init APIC_init_uniprocessor(void)
1758 setup_IO_APIC(); 1774 setup_IO_APIC();
1759 else { 1775 else {
1760 nr_ioapics = 0; 1776 nr_ioapics = 0;
1761 localise_nmi_watchdog();
1762 } 1777 }
1763#else
1764 localise_nmi_watchdog();
1765#endif 1778#endif
1766 1779
1767 x86_init.timers.setup_percpu_clockev(); 1780 x86_init.timers.setup_percpu_clockev();
1768#ifdef CONFIG_X86_64
1769 check_nmi_watchdog();
1770#endif
1771
1772 return 0; 1781 return 0;
1773} 1782}
1774 1783
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 62f6e1e55b90..72ec29e1ae06 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -17,20 +17,31 @@
17#include <linux/nmi.h> 17#include <linux/nmi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19 19
20#ifdef CONFIG_HARDLOCKUP_DETECTOR
20u64 hw_nmi_get_sample_period(void) 21u64 hw_nmi_get_sample_period(void)
21{ 22{
22 return (u64)(cpu_khz) * 1000 * 60; 23 return (u64)(cpu_khz) * 1000 * 60;
23} 24}
25#endif
24 26
25#ifdef ARCH_HAS_NMI_WATCHDOG 27#ifdef arch_trigger_all_cpu_backtrace
26
27/* For reliability, we're prepared to waste bits here. */ 28/* For reliability, we're prepared to waste bits here. */
28static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; 29static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
29 30
31/* "in progress" flag of arch_trigger_all_cpu_backtrace */
32static unsigned long backtrace_flag;
33
30void arch_trigger_all_cpu_backtrace(void) 34void arch_trigger_all_cpu_backtrace(void)
31{ 35{
32 int i; 36 int i;
33 37
38 if (test_and_set_bit(0, &backtrace_flag))
39 /*
40 * If there is already a trigger_all_cpu_backtrace() in progress
41 * (backtrace_flag == 1), don't output double cpu dump infos.
42 */
43 return;
44
34 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 45 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
35 46
36 printk(KERN_INFO "sending NMI to all CPUs:\n"); 47 printk(KERN_INFO "sending NMI to all CPUs:\n");
@@ -42,6 +53,9 @@ void arch_trigger_all_cpu_backtrace(void)
42 break; 53 break;
43 mdelay(1); 54 mdelay(1);
44 } 55 }
56
57 clear_bit(0, &backtrace_flag);
58 smp_mb__after_clear_bit();
45} 59}
46 60
47static int __kprobes 61static int __kprobes
@@ -50,7 +64,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
50{ 64{
51 struct die_args *args = __args; 65 struct die_args *args = __args;
52 struct pt_regs *regs; 66 struct pt_regs *regs;
53 int cpu = smp_processor_id(); 67 int cpu;
54 68
55 switch (cmd) { 69 switch (cmd) {
56 case DIE_NMI: 70 case DIE_NMI:
@@ -62,6 +76,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
62 } 76 }
63 77
64 regs = args->regs; 78 regs = args->regs;
79 cpu = smp_processor_id();
65 80
66 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 81 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
67 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 82 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -91,18 +106,3 @@ static int __init register_trigger_all_cpu_backtrace(void)
91} 106}
92early_initcall(register_trigger_all_cpu_backtrace); 107early_initcall(register_trigger_all_cpu_backtrace);
93#endif 108#endif
94
95/* STUB calls to mimic old nmi_watchdog behaviour */
96#if defined(CONFIG_X86_LOCAL_APIC)
97unsigned int nmi_watchdog = NMI_NONE;
98EXPORT_SYMBOL(nmi_watchdog);
99void acpi_nmi_enable(void) { return; }
100void acpi_nmi_disable(void) { return; }
101#endif
102atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
103EXPORT_SYMBOL(nmi_active);
104int unknown_nmi_panic;
105void cpu_nmi_set_wd_enabled(void) { return; }
106void stop_apic_nmi_watchdog(void *unused) { return; }
107void setup_apic_nmi_watchdog(void *unused) { return; }
108int __init check_nmi_watchdog(void) { return 0; }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index fadcd743a74f..f6cd5b410770 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -54,7 +54,6 @@
54#include <asm/dma.h> 54#include <asm/dma.h>
55#include <asm/timer.h> 55#include <asm/timer.h>
56#include <asm/i8259.h> 56#include <asm/i8259.h>
57#include <asm/nmi.h>
58#include <asm/msidef.h> 57#include <asm/msidef.h>
59#include <asm/hypertransport.h> 58#include <asm/hypertransport.h>
60#include <asm/setup.h> 59#include <asm/setup.h>
@@ -1934,8 +1933,7 @@ void disable_IO_APIC(void)
1934 * 1933 *
1935 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1934 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1936 */ 1935 */
1937 1936void __init setup_ioapic_ids_from_mpc_nocheck(void)
1938void __init setup_ioapic_ids_from_mpc(void)
1939{ 1937{
1940 union IO_APIC_reg_00 reg_00; 1938 union IO_APIC_reg_00 reg_00;
1941 physid_mask_t phys_id_present_map; 1939 physid_mask_t phys_id_present_map;
@@ -1944,15 +1942,6 @@ void __init setup_ioapic_ids_from_mpc(void)
1944 unsigned char old_id; 1942 unsigned char old_id;
1945 unsigned long flags; 1943 unsigned long flags;
1946 1944
1947 if (acpi_ioapic)
1948 return;
1949 /*
1950 * Don't check I/O APIC IDs for xAPIC systems. They have
1951 * no meaning without the serial APIC bus.
1952 */
1953 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1954 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1955 return;
1956 /* 1945 /*
1957 * This is broken; anything with a real cpu count has to 1946 * This is broken; anything with a real cpu count has to
1958 * circumvent this idiocy regardless. 1947 * circumvent this idiocy regardless.
@@ -2006,7 +1995,6 @@ void __init setup_ioapic_ids_from_mpc(void)
2006 physids_or(phys_id_present_map, phys_id_present_map, tmp); 1995 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2007 } 1996 }
2008 1997
2009
2010 /* 1998 /*
2011 * We need to adjust the IRQ routing table 1999 * We need to adjust the IRQ routing table
2012 * if the ID changed. 2000 * if the ID changed.
@@ -2042,6 +2030,21 @@ void __init setup_ioapic_ids_from_mpc(void)
2042 apic_printk(APIC_VERBOSE, " ok.\n"); 2030 apic_printk(APIC_VERBOSE, " ok.\n");
2043 } 2031 }
2044} 2032}
2033
2034void __init setup_ioapic_ids_from_mpc(void)
2035{
2036
2037 if (acpi_ioapic)
2038 return;
2039 /*
2040 * Don't check I/O APIC IDs for xAPIC systems. They have
2041 * no meaning without the serial APIC bus.
2042 */
2043 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2044 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2045 return;
2046 setup_ioapic_ids_from_mpc_nocheck();
2047}
2045#endif 2048#endif
2046 2049
2047int no_timer_check __initdata; 2050int no_timer_check __initdata;
@@ -2642,24 +2645,6 @@ static void lapic_register_intr(int irq)
2642 "edge"); 2645 "edge");
2643} 2646}
2644 2647
2645static void __init setup_nmi(void)
2646{
2647 /*
2648 * Dirty trick to enable the NMI watchdog ...
2649 * We put the 8259A master into AEOI mode and
2650 * unmask on all local APICs LVT0 as NMI.
2651 *
2652 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2653 * is from Maciej W. Rozycki - so we do not have to EOI from
2654 * the NMI handler or the timer interrupt.
2655 */
2656 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2657
2658 enable_NMI_through_LVT0();
2659
2660 apic_printk(APIC_VERBOSE, " done.\n");
2661}
2662
2663/* 2648/*
2664 * This looks a bit hackish but it's about the only one way of sending 2649 * This looks a bit hackish but it's about the only one way of sending
2665 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2650 * a few INTA cycles to 8259As and any associated glue logic. ICR does
@@ -2765,15 +2750,6 @@ static inline void __init check_timer(void)
2765 */ 2750 */
2766 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2751 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2767 legacy_pic->init(1); 2752 legacy_pic->init(1);
2768#ifdef CONFIG_X86_32
2769 {
2770 unsigned int ver;
2771
2772 ver = apic_read(APIC_LVR);
2773 ver = GET_APIC_VERSION(ver);
2774 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2775 }
2776#endif
2777 2753
2778 pin1 = find_isa_irq_pin(0, mp_INT); 2754 pin1 = find_isa_irq_pin(0, mp_INT);
2779 apic1 = find_isa_irq_apic(0, mp_INT); 2755 apic1 = find_isa_irq_apic(0, mp_INT);
@@ -2821,10 +2797,6 @@ static inline void __init check_timer(void)
2821 unmask_ioapic(cfg); 2797 unmask_ioapic(cfg);
2822 } 2798 }
2823 if (timer_irq_works()) { 2799 if (timer_irq_works()) {
2824 if (nmi_watchdog == NMI_IO_APIC) {
2825 setup_nmi();
2826 legacy_pic->unmask(0);
2827 }
2828 if (disable_timer_pin_1 > 0) 2800 if (disable_timer_pin_1 > 0)
2829 clear_IO_APIC_pin(0, pin1); 2801 clear_IO_APIC_pin(0, pin1);
2830 goto out; 2802 goto out;
@@ -2850,11 +2822,6 @@ static inline void __init check_timer(void)
2850 if (timer_irq_works()) { 2822 if (timer_irq_works()) {
2851 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2823 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2852 timer_through_8259 = 1; 2824 timer_through_8259 = 1;
2853 if (nmi_watchdog == NMI_IO_APIC) {
2854 legacy_pic->mask(0);
2855 setup_nmi();
2856 legacy_pic->unmask(0);
2857 }
2858 goto out; 2825 goto out;
2859 } 2826 }
2860 /* 2827 /*
@@ -2866,15 +2833,6 @@ static inline void __init check_timer(void)
2866 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2833 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2867 } 2834 }
2868 2835
2869 if (nmi_watchdog == NMI_IO_APIC) {
2870 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2871 "through the IO-APIC - disabling NMI Watchdog!\n");
2872 nmi_watchdog = NMI_NONE;
2873 }
2874#ifdef CONFIG_X86_32
2875 timer_ack = 0;
2876#endif
2877
2878 apic_printk(APIC_QUIET, KERN_INFO 2836 apic_printk(APIC_QUIET, KERN_INFO
2879 "...trying to set up timer as Virtual Wire IRQ...\n"); 2837 "...trying to set up timer as Virtual Wire IRQ...\n");
2880 2838
@@ -3639,7 +3597,7 @@ int __init io_apic_get_redir_entries (int ioapic)
3639 return reg_01.bits.entries + 1; 3597 return reg_01.bits.entries + 1;
3640} 3598}
3641 3599
3642void __init probe_nr_irqs_gsi(void) 3600static void __init probe_nr_irqs_gsi(void)
3643{ 3601{
3644 int nr; 3602 int nr;
3645 3603
@@ -3956,7 +3914,7 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3956 return res; 3914 return res;
3957} 3915}
3958 3916
3959void __init ioapic_init_mappings(void) 3917void __init ioapic_and_gsi_init(void)
3960{ 3918{
3961 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3919 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3962 struct resource *ioapic_res; 3920 struct resource *ioapic_res;
@@ -3994,6 +3952,8 @@ fake_ioapic_page:
3994 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3952 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3995 ioapic_res++; 3953 ioapic_res++;
3996 } 3954 }
3955
3956 probe_nr_irqs_gsi();
3997} 3957}
3998 3958
3999void __init ioapic_insert_resources(void) 3959void __init ioapic_insert_resources(void)
@@ -4103,7 +4063,8 @@ void __init pre_init_apic_IRQ0(void)
4103 4063
4104 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4064 printk(KERN_INFO "Early APIC setup for system timer0\n");
4105#ifndef CONFIG_SMP 4065#ifndef CONFIG_SMP
4106 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 4066 physid_set_mask_of_physid(boot_cpu_physical_apicid,
4067 &phys_cpu_present_map);
4107#endif 4068#endif
4108 /* Make sure the irq descriptor is set up */ 4069 /* Make sure the irq descriptor is set up */
4109 cfg = alloc_irq_and_cfg_at(0, 0); 4070 cfg = alloc_irq_and_cfg_at(0, 0);
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
deleted file mode 100644
index c90041ccb742..000000000000
--- a/arch/x86/kernel/apic/nmi.c
+++ /dev/null
@@ -1,567 +0,0 @@
1/*
2 * NMI watchdog support on APIC systems
3 *
4 * Started by Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes:
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
10 * Pavel Machek and
11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
12 */
13
14#include <asm/apic.h>
15
16#include <linux/nmi.h>
17#include <linux/mm.h>
18#include <linux/delay.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/sysdev.h>
23#include <linux/sysctl.h>
24#include <linux/percpu.h>
25#include <linux/kprobes.h>
26#include <linux/cpumask.h>
27#include <linux/kernel_stat.h>
28#include <linux/kdebug.h>
29#include <linux/smp.h>
30
31#include <asm/i8259.h>
32#include <asm/io_apic.h>
33#include <asm/proto.h>
34#include <asm/timer.h>
35
36#include <asm/mce.h>
37
38#include <asm/mach_traps.h>
39
40int unknown_nmi_panic;
41int nmi_watchdog_enabled;
42
43/* For reliability, we're prepared to waste bits here. */
44static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
45
46/* nmi_active:
47 * >0: the lapic NMI watchdog is active, but can be disabled
48 * <0: the lapic NMI watchdog has not been set up, and cannot
49 * be enabled
50 * 0: the lapic NMI watchdog is disabled, but can be enabled
51 */
52atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
53EXPORT_SYMBOL(nmi_active);
54
55unsigned int nmi_watchdog = NMI_NONE;
56EXPORT_SYMBOL(nmi_watchdog);
57
58static int panic_on_timeout;
59
60static unsigned int nmi_hz = HZ;
61static DEFINE_PER_CPU(short, wd_enabled);
62static int endflag __initdata;
63
64static inline unsigned int get_nmi_count(int cpu)
65{
66 return per_cpu(irq_stat, cpu).__nmi_count;
67}
68
69static inline int mce_in_progress(void)
70{
71#if defined(CONFIG_X86_MCE)
72 return atomic_read(&mce_entry) > 0;
73#endif
74 return 0;
75}
76
77/*
78 * Take the local apic timer and PIT/HPET into account. We don't
79 * know which one is active, when we have highres/dyntick on
80 */
81static inline unsigned int get_timer_irqs(int cpu)
82{
83 return per_cpu(irq_stat, cpu).apic_timer_irqs +
84 per_cpu(irq_stat, cpu).irq0_irqs;
85}
86
87#ifdef CONFIG_SMP
88/*
89 * The performance counters used by NMI_LOCAL_APIC don't trigger when
90 * the CPU is idle. To make sure the NMI watchdog really ticks on all
91 * CPUs during the test make them busy.
92 */
93static __init void nmi_cpu_busy(void *data)
94{
95 local_irq_enable_in_hardirq();
96 /*
97 * Intentionally don't use cpu_relax here. This is
98 * to make sure that the performance counter really ticks,
99 * even if there is a simulator or similar that catches the
100 * pause instruction. On a real HT machine this is fine because
101 * all other CPUs are busy with "useless" delay loops and don't
102 * care if they get somewhat less cycles.
103 */
104 while (endflag == 0)
105 mb();
106}
107#endif
108
109static void report_broken_nmi(int cpu, unsigned int *prev_nmi_count)
110{
111 printk(KERN_CONT "\n");
112
113 printk(KERN_WARNING
114 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
115 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
116
117 printk(KERN_WARNING
118 "Please report this to bugzilla.kernel.org,\n");
119 printk(KERN_WARNING
120 "and attach the output of the 'dmesg' command.\n");
121
122 per_cpu(wd_enabled, cpu) = 0;
123 atomic_dec(&nmi_active);
124}
125
126static void __acpi_nmi_disable(void *__unused)
127{
128 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
129}
130
131int __init check_nmi_watchdog(void)
132{
133 unsigned int *prev_nmi_count;
134 int cpu;
135
136 if (!nmi_watchdog_active() || !atomic_read(&nmi_active))
137 return 0;
138
139 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
140 if (!prev_nmi_count)
141 goto error;
142
143 printk(KERN_INFO "Testing NMI watchdog ... ");
144
145#ifdef CONFIG_SMP
146 if (nmi_watchdog == NMI_LOCAL_APIC)
147 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
148#endif
149
150 for_each_possible_cpu(cpu)
151 prev_nmi_count[cpu] = get_nmi_count(cpu);
152 local_irq_enable();
153 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
154
155 for_each_online_cpu(cpu) {
156 if (!per_cpu(wd_enabled, cpu))
157 continue;
158 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
159 report_broken_nmi(cpu, prev_nmi_count);
160 }
161 endflag = 1;
162 if (!atomic_read(&nmi_active)) {
163 kfree(prev_nmi_count);
164 atomic_set(&nmi_active, -1);
165 goto error;
166 }
167 printk("OK.\n");
168
169 /*
170 * now that we know it works we can reduce NMI frequency to
171 * something more reasonable; makes a difference in some configs
172 */
173 if (nmi_watchdog == NMI_LOCAL_APIC)
174 nmi_hz = lapic_adjust_nmi_hz(1);
175
176 kfree(prev_nmi_count);
177 return 0;
178error:
179 if (nmi_watchdog == NMI_IO_APIC) {
180 if (!timer_through_8259)
181 legacy_pic->mask(0);
182 on_each_cpu(__acpi_nmi_disable, NULL, 1);
183 }
184
185#ifdef CONFIG_X86_32
186 timer_ack = 0;
187#endif
188 return -1;
189}
190
191static int __init setup_nmi_watchdog(char *str)
192{
193 unsigned int nmi;
194
195 if (!strncmp(str, "panic", 5)) {
196 panic_on_timeout = 1;
197 str = strchr(str, ',');
198 if (!str)
199 return 1;
200 ++str;
201 }
202
203 if (!strncmp(str, "lapic", 5))
204 nmi_watchdog = NMI_LOCAL_APIC;
205 else if (!strncmp(str, "ioapic", 6))
206 nmi_watchdog = NMI_IO_APIC;
207 else {
208 get_option(&str, &nmi);
209 if (nmi >= NMI_INVALID)
210 return 0;
211 nmi_watchdog = nmi;
212 }
213
214 return 1;
215}
216__setup("nmi_watchdog=", setup_nmi_watchdog);
217
218/*
219 * Suspend/resume support
220 */
221#ifdef CONFIG_PM
222
223static int nmi_pm_active; /* nmi_active before suspend */
224
225static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
226{
227 /* only CPU0 goes here, other CPUs should be offline */
228 nmi_pm_active = atomic_read(&nmi_active);
229 stop_apic_nmi_watchdog(NULL);
230 BUG_ON(atomic_read(&nmi_active) != 0);
231 return 0;
232}
233
234static int lapic_nmi_resume(struct sys_device *dev)
235{
236 /* only CPU0 goes here, other CPUs should be offline */
237 if (nmi_pm_active > 0) {
238 setup_apic_nmi_watchdog(NULL);
239 touch_nmi_watchdog();
240 }
241 return 0;
242}
243
244static struct sysdev_class nmi_sysclass = {
245 .name = "lapic_nmi",
246 .resume = lapic_nmi_resume,
247 .suspend = lapic_nmi_suspend,
248};
249
250static struct sys_device device_lapic_nmi = {
251 .id = 0,
252 .cls = &nmi_sysclass,
253};
254
255static int __init init_lapic_nmi_sysfs(void)
256{
257 int error;
258
259 /*
260 * should really be a BUG_ON but b/c this is an
261 * init call, it just doesn't work. -dcz
262 */
263 if (nmi_watchdog != NMI_LOCAL_APIC)
264 return 0;
265
266 if (atomic_read(&nmi_active) < 0)
267 return 0;
268
269 error = sysdev_class_register(&nmi_sysclass);
270 if (!error)
271 error = sysdev_register(&device_lapic_nmi);
272 return error;
273}
274
275/* must come after the local APIC's device_initcall() */
276late_initcall(init_lapic_nmi_sysfs);
277
278#endif /* CONFIG_PM */
279
280static void __acpi_nmi_enable(void *__unused)
281{
282 apic_write(APIC_LVT0, APIC_DM_NMI);
283}
284
285/*
286 * Enable timer based NMIs on all CPUs:
287 */
288void acpi_nmi_enable(void)
289{
290 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
291 on_each_cpu(__acpi_nmi_enable, NULL, 1);
292}
293
294/*
295 * Disable timer based NMIs on all CPUs:
296 */
297void acpi_nmi_disable(void)
298{
299 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
300 on_each_cpu(__acpi_nmi_disable, NULL, 1);
301}
302
303/*
304 * This function is called as soon the LAPIC NMI watchdog driver has everything
305 * in place and it's ready to check if the NMIs belong to the NMI watchdog
306 */
307void cpu_nmi_set_wd_enabled(void)
308{
309 __get_cpu_var(wd_enabled) = 1;
310}
311
312void setup_apic_nmi_watchdog(void *unused)
313{
314 if (__get_cpu_var(wd_enabled))
315 return;
316
317 /* cheap hack to support suspend/resume */
318 /* if cpu0 is not active neither should the other cpus */
319 if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
320 return;
321
322 switch (nmi_watchdog) {
323 case NMI_LOCAL_APIC:
324 if (lapic_watchdog_init(nmi_hz) < 0) {
325 __get_cpu_var(wd_enabled) = 0;
326 return;
327 }
328 /* FALL THROUGH */
329 case NMI_IO_APIC:
330 __get_cpu_var(wd_enabled) = 1;
331 atomic_inc(&nmi_active);
332 }
333}
334
335void stop_apic_nmi_watchdog(void *unused)
336{
337 /* only support LOCAL and IO APICs for now */
338 if (!nmi_watchdog_active())
339 return;
340 if (__get_cpu_var(wd_enabled) == 0)
341 return;
342 if (nmi_watchdog == NMI_LOCAL_APIC)
343 lapic_watchdog_stop();
344 else
345 __acpi_nmi_disable(NULL);
346 __get_cpu_var(wd_enabled) = 0;
347 atomic_dec(&nmi_active);
348}
349
350/*
351 * the best way to detect whether a CPU has a 'hard lockup' problem
352 * is to check it's local APIC timer IRQ counts. If they are not
353 * changing then that CPU has some problem.
354 *
355 * as these watchdog NMI IRQs are generated on every CPU, we only
356 * have to check the current processor.
357 *
358 * since NMIs don't listen to _any_ locks, we have to be extremely
359 * careful not to rely on unsafe variables. The printk might lock
360 * up though, so we have to break up any console locks first ...
361 * [when there will be more tty-related locks, break them up here too!]
362 */
363
364static DEFINE_PER_CPU(unsigned, last_irq_sum);
365static DEFINE_PER_CPU(long, alert_counter);
366static DEFINE_PER_CPU(int, nmi_touch);
367
368void touch_nmi_watchdog(void)
369{
370 if (nmi_watchdog_active()) {
371 unsigned cpu;
372
373 /*
374 * Tell other CPUs to reset their alert counters. We cannot
375 * do it ourselves because the alert count increase is not
376 * atomic.
377 */
378 for_each_present_cpu(cpu) {
379 if (per_cpu(nmi_touch, cpu) != 1)
380 per_cpu(nmi_touch, cpu) = 1;
381 }
382 }
383
384 /*
385 * Tickle the softlockup detector too:
386 */
387 touch_softlockup_watchdog();
388}
389EXPORT_SYMBOL(touch_nmi_watchdog);
390
391notrace __kprobes int
392nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
393{
394 /*
395 * Since current_thread_info()-> is always on the stack, and we
396 * always switch the stack NMI-atomically, it's safe to use
397 * smp_processor_id().
398 */
399 unsigned int sum;
400 int touched = 0;
401 int cpu = smp_processor_id();
402 int rc = 0;
403
404 sum = get_timer_irqs(cpu);
405
406 if (__get_cpu_var(nmi_touch)) {
407 __get_cpu_var(nmi_touch) = 0;
408 touched = 1;
409 }
410
411 /* We can be called before check_nmi_watchdog, hence NULL check. */
412 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
413 static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
414
415 raw_spin_lock(&lock);
416 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
417 show_regs(regs);
418 dump_stack();
419 raw_spin_unlock(&lock);
420 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
421
422 rc = 1;
423 }
424
425 /* Could check oops_in_progress here too, but it's safer not to */
426 if (mce_in_progress())
427 touched = 1;
428
429 /* if the none of the timers isn't firing, this cpu isn't doing much */
430 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
431 /*
432 * Ayiee, looks like this CPU is stuck ...
433 * wait a few IRQs (5 seconds) before doing the oops ...
434 */
435 __this_cpu_inc(alert_counter);
436 if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
437 /*
438 * die_nmi will return ONLY if NOTIFY_STOP happens..
439 */
440 die_nmi("BUG: NMI Watchdog detected LOCKUP",
441 regs, panic_on_timeout);
442 } else {
443 __get_cpu_var(last_irq_sum) = sum;
444 __this_cpu_write(alert_counter, 0);
445 }
446
447 /* see if the nmi watchdog went off */
448 if (!__get_cpu_var(wd_enabled))
449 return rc;
450 switch (nmi_watchdog) {
451 case NMI_LOCAL_APIC:
452 rc |= lapic_wd_event(nmi_hz);
453 break;
454 case NMI_IO_APIC:
455 /*
456 * don't know how to accurately check for this.
457 * just assume it was a watchdog timer interrupt
458 * This matches the old behaviour.
459 */
460 rc = 1;
461 break;
462 }
463 return rc;
464}
465
466#ifdef CONFIG_SYSCTL
467
468static void enable_ioapic_nmi_watchdog_single(void *unused)
469{
470 __get_cpu_var(wd_enabled) = 1;
471 atomic_inc(&nmi_active);
472 __acpi_nmi_enable(NULL);
473}
474
475static void enable_ioapic_nmi_watchdog(void)
476{
477 on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1);
478 touch_nmi_watchdog();
479}
480
481static void disable_ioapic_nmi_watchdog(void)
482{
483 on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
484}
485
486static int __init setup_unknown_nmi_panic(char *str)
487{
488 unknown_nmi_panic = 1;
489 return 1;
490}
491__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
492
493static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
494{
495 unsigned char reason = get_nmi_reason();
496 char buf[64];
497
498 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
499 die_nmi(buf, regs, 1); /* Always panic here */
500 return 0;
501}
502
503/*
504 * proc handler for /proc/sys/kernel/nmi
505 */
506int proc_nmi_enabled(struct ctl_table *table, int write,
507 void __user *buffer, size_t *length, loff_t *ppos)
508{
509 int old_state;
510
511 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
512 old_state = nmi_watchdog_enabled;
513 proc_dointvec(table, write, buffer, length, ppos);
514 if (!!old_state == !!nmi_watchdog_enabled)
515 return 0;
516
517 if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) {
518 printk(KERN_WARNING
519 "NMI watchdog is permanently disabled\n");
520 return -EIO;
521 }
522
523 if (nmi_watchdog == NMI_LOCAL_APIC) {
524 if (nmi_watchdog_enabled)
525 enable_lapic_nmi_watchdog();
526 else
527 disable_lapic_nmi_watchdog();
528 } else if (nmi_watchdog == NMI_IO_APIC) {
529 if (nmi_watchdog_enabled)
530 enable_ioapic_nmi_watchdog();
531 else
532 disable_ioapic_nmi_watchdog();
533 } else {
534 printk(KERN_WARNING
535 "NMI watchdog doesn't know what hardware to touch\n");
536 return -EIO;
537 }
538 return 0;
539}
540
541#endif /* CONFIG_SYSCTL */
542
543int do_nmi_callback(struct pt_regs *regs, int cpu)
544{
545#ifdef CONFIG_SYSCTL
546 if (unknown_nmi_panic)
547 return unknown_nmi_panic_callback(regs, cpu);
548#endif
549 return 0;
550}
551
552void arch_trigger_all_cpu_backtrace(void)
553{
554 int i;
555
556 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
557
558 printk(KERN_INFO "sending NMI to all CPUs:\n");
559 apic->send_IPI_all(NMI_VECTOR);
560
561 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
562 for (i = 0; i < 10 * 1000; i++) {
563 if (cpumask_empty(to_cpumask(backtrace_mask)))
564 break;
565 mdelay(1);
566 }
567}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c1c52c341f40..2a3f2a7db243 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -48,6 +48,16 @@ unsigned int uv_apicid_hibits;
48EXPORT_SYMBOL_GPL(uv_apicid_hibits); 48EXPORT_SYMBOL_GPL(uv_apicid_hibits);
49static DEFINE_SPINLOCK(uv_nmi_lock); 49static DEFINE_SPINLOCK(uv_nmi_lock);
50 50
51static unsigned long __init uv_early_read_mmr(unsigned long addr)
52{
53 unsigned long val, *mmr;
54
55 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
56 val = *mmr;
57 early_iounmap(mmr, sizeof(*mmr));
58 return val;
59}
60
51static inline bool is_GRU_range(u64 start, u64 end) 61static inline bool is_GRU_range(u64 start, u64 end)
52{ 62{
53 return start >= gru_start_paddr && end <= gru_end_paddr; 63 return start >= gru_start_paddr && end <= gru_end_paddr;
@@ -58,28 +68,24 @@ static bool uv_is_untracked_pat_range(u64 start, u64 end)
58 return is_ISA_range(start, end) || is_GRU_range(start, end); 68 return is_ISA_range(start, end) || is_GRU_range(start, end);
59} 69}
60 70
61static int early_get_nodeid(void) 71static int __init early_get_pnodeid(void)
62{ 72{
63 union uvh_node_id_u node_id; 73 union uvh_node_id_u node_id;
64 unsigned long *mmr; 74 union uvh_rh_gam_config_mmr_u m_n_config;
65 75 int pnode;
66 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
67 node_id.v = *mmr;
68 early_iounmap(mmr, sizeof(*mmr));
69 76
70 /* Currently, all blades have same revision number */ 77 /* Currently, all blades have same revision number */
78 node_id.v = uv_early_read_mmr(UVH_NODE_ID);
79 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
71 uv_min_hub_revision_id = node_id.s.revision; 80 uv_min_hub_revision_id = node_id.s.revision;
72 81
73 return node_id.s.node_id; 82 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
83 return pnode;
74} 84}
75 85
76static void __init early_get_apic_pnode_shift(void) 86static void __init early_get_apic_pnode_shift(void)
77{ 87{
78 unsigned long *mmr; 88 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
79
80 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_APICID, sizeof(*mmr));
81 uvh_apicid.v = *mmr;
82 early_iounmap(mmr, sizeof(*mmr));
83 if (!uvh_apicid.v) 89 if (!uvh_apicid.v)
84 /* 90 /*
85 * Old bios, use default value 91 * Old bios, use default value
@@ -95,21 +101,17 @@ static void __init early_get_apic_pnode_shift(void)
95static void __init uv_set_apicid_hibit(void) 101static void __init uv_set_apicid_hibit(void)
96{ 102{
97 union uvh_lb_target_physical_apic_id_mask_u apicid_mask; 103 union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
98 unsigned long *mmr;
99 104
100 mmr = early_ioremap(UV_LOCAL_MMR_BASE | 105 apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK);
101 UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr));
102 apicid_mask.v = *mmr;
103 early_iounmap(mmr, sizeof(*mmr));
104 uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK; 106 uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
105} 107}
106 108
107static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 109static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
108{ 110{
109 int nodeid; 111 int pnodeid;
110 112
111 if (!strcmp(oem_id, "SGI")) { 113 if (!strcmp(oem_id, "SGI")) {
112 nodeid = early_get_nodeid(); 114 pnodeid = early_get_pnodeid();
113 early_get_apic_pnode_shift(); 115 early_get_apic_pnode_shift();
114 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 116 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
115 x86_platform.nmi_init = uv_nmi_init; 117 x86_platform.nmi_init = uv_nmi_init;
@@ -119,7 +121,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
119 uv_system_type = UV_X2APIC; 121 uv_system_type = UV_X2APIC;
120 else if (!strcmp(oem_table_id, "UVH")) { 122 else if (!strcmp(oem_table_id, "UVH")) {
121 __get_cpu_var(x2apic_extra_bits) = 123 __get_cpu_var(x2apic_extra_bits) =
122 nodeid << (uvh_apicid.s.pnode_shift - 1); 124 pnodeid << uvh_apicid.s.pnode_shift;
123 uv_system_type = UV_NON_UNIQUE_APIC; 125 uv_system_type = UV_NON_UNIQUE_APIC;
124 uv_set_apicid_hibit(); 126 uv_set_apicid_hibit();
125 return 1; 127 return 1;
@@ -682,27 +684,32 @@ void uv_nmi_init(void)
682void __init uv_system_init(void) 684void __init uv_system_init(void)
683{ 685{
684 union uvh_rh_gam_config_mmr_u m_n_config; 686 union uvh_rh_gam_config_mmr_u m_n_config;
687 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
685 union uvh_node_id_u node_id; 688 union uvh_node_id_u node_id;
686 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 689 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
687 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 690 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
688 int gnode_extra, max_pnode = 0; 691 int gnode_extra, max_pnode = 0;
689 unsigned long mmr_base, present, paddr; 692 unsigned long mmr_base, present, paddr;
690 unsigned short pnode_mask; 693 unsigned short pnode_mask, pnode_io_mask;
691 694
692 map_low_mmrs(); 695 map_low_mmrs();
693 696
694 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); 697 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
695 m_val = m_n_config.s.m_skt; 698 m_val = m_n_config.s.m_skt;
696 n_val = m_n_config.s.n_skt; 699 n_val = m_n_config.s.n_skt;
700 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
701 n_io = mmioh.s.n_io;
697 mmr_base = 702 mmr_base =
698 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 703 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
699 ~UV_MMR_ENABLE; 704 ~UV_MMR_ENABLE;
700 pnode_mask = (1 << n_val) - 1; 705 pnode_mask = (1 << n_val) - 1;
706 pnode_io_mask = (1 << n_io) - 1;
707
701 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 708 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
702 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; 709 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
703 gnode_upper = ((unsigned long)gnode_extra << m_val); 710 gnode_upper = ((unsigned long)gnode_extra << m_val);
704 printk(KERN_DEBUG "UV: N %d, M %d, gnode_upper 0x%lx, gnode_extra 0x%x\n", 711 printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
705 n_val, m_val, gnode_upper, gnode_extra); 712 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
706 713
707 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 714 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
708 715
@@ -735,7 +742,7 @@ void __init uv_system_init(void)
735 for (j = 0; j < 64; j++) { 742 for (j = 0; j < 64; j++) {
736 if (!test_bit(j, &present)) 743 if (!test_bit(j, &present))
737 continue; 744 continue;
738 pnode = (i * 64 + j); 745 pnode = (i * 64 + j) & pnode_mask;
739 uv_blade_info[blade].pnode = pnode; 746 uv_blade_info[blade].pnode = pnode;
740 uv_blade_info[blade].nr_possible_cpus = 0; 747 uv_blade_info[blade].nr_possible_cpus = 0;
741 uv_blade_info[blade].nr_online_cpus = 0; 748 uv_blade_info[blade].nr_online_cpus = 0;
@@ -756,6 +763,7 @@ void __init uv_system_init(void)
756 /* 763 /*
757 * apic_pnode_shift must be set before calling uv_apicid_to_pnode(); 764 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
758 */ 765 */
766 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
759 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift; 767 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
760 pnode = uv_apicid_to_pnode(apicid); 768 pnode = uv_apicid_to_pnode(apicid);
761 blade = boot_pnode_to_blade(pnode); 769 blade = boot_pnode_to_blade(pnode);
@@ -772,7 +780,6 @@ void __init uv_system_init(void)
772 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 780 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
773 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 781 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
774 uv_cpu_hub_info(cpu)->pnode = pnode; 782 uv_cpu_hub_info(cpu)->pnode = pnode;
775 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
776 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1; 783 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
777 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 784 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
778 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; 785 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
@@ -796,7 +803,7 @@ void __init uv_system_init(void)
796 803
797 map_gru_high(max_pnode); 804 map_gru_high(max_pnode);
798 map_mmr_high(max_pnode); 805 map_mmr_high(max_pnode);
799 map_mmioh_high(max_pnode); 806 map_mmioh_high(max_pnode & pnode_io_mask);
800 807
801 uv_cpu_init(); 808 uv_cpu_init();
802 uv_scir_register_cpu_notifier(); 809 uv_scir_register_cpu_notifier();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4b68bda30938..1d59834396bd 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -894,7 +894,6 @@ void __init identify_boot_cpu(void)
894#else 894#else
895 vgetcpu_set_mode(); 895 vgetcpu_set_mode();
896#endif 896#endif
897 init_hw_perf_events();
898} 897}
899 898
900void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 899void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 17ad03366211..9ecf81f9b90f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -149,8 +149,7 @@ union _cpuid4_leaf_ecx {
149}; 149};
150 150
151struct amd_l3_cache { 151struct amd_l3_cache {
152 struct pci_dev *dev; 152 struct amd_northbridge *nb;
153 bool can_disable;
154 unsigned indices; 153 unsigned indices;
155 u8 subcaches[4]; 154 u8 subcaches[4];
156}; 155};
@@ -311,14 +310,12 @@ struct _cache_attr {
311/* 310/*
312 * L3 cache descriptors 311 * L3 cache descriptors
313 */ 312 */
314static struct amd_l3_cache **__cpuinitdata l3_caches;
315
316static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) 313static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
317{ 314{
318 unsigned int sc0, sc1, sc2, sc3; 315 unsigned int sc0, sc1, sc2, sc3;
319 u32 val = 0; 316 u32 val = 0;
320 317
321 pci_read_config_dword(l3->dev, 0x1C4, &val); 318 pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
322 319
323 /* calculate subcache sizes */ 320 /* calculate subcache sizes */
324 l3->subcaches[0] = sc0 = !(val & BIT(0)); 321 l3->subcaches[0] = sc0 = !(val & BIT(0));
@@ -330,47 +327,14 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
330 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 327 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
331} 328}
332 329
333static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 330static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
334{ 331 int index)
335 struct amd_l3_cache *l3;
336 struct pci_dev *dev = node_to_k8_nb_misc(node);
337
338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
339 if (!l3) {
340 printk(KERN_WARNING "Error allocating L3 struct\n");
341 return NULL;
342 }
343
344 l3->dev = dev;
345
346 amd_calc_l3_indices(l3);
347
348 return l3;
349}
350
351static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
352 int index)
353{ 332{
333 static struct amd_l3_cache *__cpuinitdata l3_caches;
354 int node; 334 int node;
355 335
356 if (boot_cpu_data.x86 != 0x10) 336 /* only for L3, and not in virtualized environments */
357 return; 337 if (index < 3 || amd_nb_num() == 0)
358
359 if (index < 3)
360 return;
361
362 /* see errata #382 and #388 */
363 if (boot_cpu_data.x86_model < 0x8)
364 return;
365
366 if ((boot_cpu_data.x86_model == 0x8 ||
367 boot_cpu_data.x86_model == 0x9)
368 &&
369 boot_cpu_data.x86_mask < 0x1)
370 return;
371
372 /* not in virtualized environments */
373 if (k8_northbridges.num == 0)
374 return; 338 return;
375 339
376 /* 340 /*
@@ -378,7 +342,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
378 * never freed but this is done only on shutdown so it doesn't matter. 342 * never freed but this is done only on shutdown so it doesn't matter.
379 */ 343 */
380 if (!l3_caches) { 344 if (!l3_caches) {
381 int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); 345 int size = amd_nb_num() * sizeof(struct amd_l3_cache);
382 346
383 l3_caches = kzalloc(size, GFP_ATOMIC); 347 l3_caches = kzalloc(size, GFP_ATOMIC);
384 if (!l3_caches) 348 if (!l3_caches)
@@ -387,14 +351,12 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
387 351
388 node = amd_get_nb_id(smp_processor_id()); 352 node = amd_get_nb_id(smp_processor_id());
389 353
390 if (!l3_caches[node]) { 354 if (!l3_caches[node].nb) {
391 l3_caches[node] = amd_init_l3_cache(node); 355 l3_caches[node].nb = node_to_amd_nb(node);
392 l3_caches[node]->can_disable = true; 356 amd_calc_l3_indices(&l3_caches[node]);
393 } 357 }
394 358
395 WARN_ON(!l3_caches[node]); 359 this_leaf->l3 = &l3_caches[node];
396
397 this_leaf->l3 = l3_caches[node];
398} 360}
399 361
400/* 362/*
@@ -408,7 +370,7 @@ int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
408{ 370{
409 unsigned int reg = 0; 371 unsigned int reg = 0;
410 372
411 pci_read_config_dword(l3->dev, 0x1BC + slot * 4, &reg); 373 pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, &reg);
412 374
413 /* check whether this slot is activated already */ 375 /* check whether this slot is activated already */
414 if (reg & (3UL << 30)) 376 if (reg & (3UL << 30))
@@ -422,7 +384,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
422{ 384{
423 int index; 385 int index;
424 386
425 if (!this_leaf->l3 || !this_leaf->l3->can_disable) 387 if (!this_leaf->l3 ||
388 !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
426 return -EINVAL; 389 return -EINVAL;
427 390
428 index = amd_get_l3_disable_slot(this_leaf->l3, slot); 391 index = amd_get_l3_disable_slot(this_leaf->l3, slot);
@@ -457,7 +420,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
457 if (!l3->subcaches[i]) 420 if (!l3->subcaches[i])
458 continue; 421 continue;
459 422
460 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); 423 pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
461 424
462 /* 425 /*
463 * We need to WBINVD on a core on the node containing the L3 426 * We need to WBINVD on a core on the node containing the L3
@@ -467,7 +430,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
467 wbinvd_on_cpu(cpu); 430 wbinvd_on_cpu(cpu);
468 431
469 reg |= BIT(31); 432 reg |= BIT(31);
470 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); 433 pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
471 } 434 }
472} 435}
473 436
@@ -524,7 +487,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
524 if (!capable(CAP_SYS_ADMIN)) 487 if (!capable(CAP_SYS_ADMIN))
525 return -EPERM; 488 return -EPERM;
526 489
527 if (!this_leaf->l3 || !this_leaf->l3->can_disable) 490 if (!this_leaf->l3 ||
491 !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
528 return -EINVAL; 492 return -EINVAL;
529 493
530 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); 494 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
@@ -545,7 +509,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
545#define STORE_CACHE_DISABLE(slot) \ 509#define STORE_CACHE_DISABLE(slot) \
546static ssize_t \ 510static ssize_t \
547store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ 511store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
548 const char *buf, size_t count) \ 512 const char *buf, size_t count) \
549{ \ 513{ \
550 return store_cache_disable(this_leaf, buf, count, slot); \ 514 return store_cache_disable(this_leaf, buf, count, slot); \
551} 515}
@@ -558,10 +522,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
558 show_cache_disable_1, store_cache_disable_1); 522 show_cache_disable_1, store_cache_disable_1);
559 523
560#else /* CONFIG_AMD_NB */ 524#else /* CONFIG_AMD_NB */
561static void __cpuinit 525#define amd_init_l3_cache(x, y)
562amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
563{
564};
565#endif /* CONFIG_AMD_NB */ 526#endif /* CONFIG_AMD_NB */
566 527
567static int 528static int
@@ -575,7 +536,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
575 536
576 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 537 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
577 amd_cpuid4(index, &eax, &ebx, &ecx); 538 amd_cpuid4(index, &eax, &ebx, &ecx);
578 amd_check_l3_disable(this_leaf, index); 539 amd_init_l3_cache(this_leaf, index);
579 } else { 540 } else {
580 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 541 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
581 } 542 }
@@ -983,30 +944,48 @@ define_one_ro(size);
983define_one_ro(shared_cpu_map); 944define_one_ro(shared_cpu_map);
984define_one_ro(shared_cpu_list); 945define_one_ro(shared_cpu_list);
985 946
986#define DEFAULT_SYSFS_CACHE_ATTRS \
987 &type.attr, \
988 &level.attr, \
989 &coherency_line_size.attr, \
990 &physical_line_partition.attr, \
991 &ways_of_associativity.attr, \
992 &number_of_sets.attr, \
993 &size.attr, \
994 &shared_cpu_map.attr, \
995 &shared_cpu_list.attr
996
997static struct attribute *default_attrs[] = { 947static struct attribute *default_attrs[] = {
998 DEFAULT_SYSFS_CACHE_ATTRS, 948 &type.attr,
949 &level.attr,
950 &coherency_line_size.attr,
951 &physical_line_partition.attr,
952 &ways_of_associativity.attr,
953 &number_of_sets.attr,
954 &size.attr,
955 &shared_cpu_map.attr,
956 &shared_cpu_list.attr,
999 NULL 957 NULL
1000}; 958};
1001 959
1002static struct attribute *default_l3_attrs[] = {
1003 DEFAULT_SYSFS_CACHE_ATTRS,
1004#ifdef CONFIG_AMD_NB 960#ifdef CONFIG_AMD_NB
1005 &cache_disable_0.attr, 961static struct attribute ** __cpuinit amd_l3_attrs(void)
1006 &cache_disable_1.attr, 962{
963 static struct attribute **attrs;
964 int n;
965
966 if (attrs)
967 return attrs;
968
969 n = sizeof (default_attrs) / sizeof (struct attribute *);
970
971 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
972 n += 2;
973
974 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
975 if (attrs == NULL)
976 return attrs = default_attrs;
977
978 for (n = 0; default_attrs[n]; n++)
979 attrs[n] = default_attrs[n];
980
981 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
982 attrs[n++] = &cache_disable_0.attr;
983 attrs[n++] = &cache_disable_1.attr;
984 }
985
986 return attrs;
987}
1007#endif 988#endif
1008 NULL
1009};
1010 989
1011static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 990static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1012{ 991{
@@ -1117,11 +1096,11 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1117 1096
1118 this_leaf = CPUID4_INFO_IDX(cpu, i); 1097 this_leaf = CPUID4_INFO_IDX(cpu, i);
1119 1098
1120 if (this_leaf->l3 && this_leaf->l3->can_disable) 1099 ktype_cache.default_attrs = default_attrs;
1121 ktype_cache.default_attrs = default_l3_attrs; 1100#ifdef CONFIG_AMD_NB
1122 else 1101 if (this_leaf->l3)
1123 ktype_cache.default_attrs = default_attrs; 1102 ktype_cache.default_attrs = amd_l3_attrs();
1124 1103#endif
1125 retval = kobject_init_and_add(&(this_object->kobj), 1104 retval = kobject_init_and_add(&(this_object->kobj),
1126 &ktype_cache, 1105 &ktype_cache,
1127 per_cpu(ici_cache_kobject, cpu), 1106 per_cpu(ici_cache_kobject, cpu),
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 80c482382d5c..5bf2fac52aca 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -31,8 +31,6 @@
31#include <asm/mce.h> 31#include <asm/mce.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33 33
34#define PFX "mce_threshold: "
35#define VERSION "version 1.1.1"
36#define NR_BANKS 6 34#define NR_BANKS 6
37#define NR_BLOCKS 9 35#define NR_BLOCKS 9
38#define THRESHOLD_MAX 0xFFF 36#define THRESHOLD_MAX 0xFFF
@@ -59,12 +57,6 @@ struct threshold_block {
59 struct list_head miscj; 57 struct list_head miscj;
60}; 58};
61 59
62/* defaults used early on boot */
63static struct threshold_block threshold_defaults = {
64 .interrupt_enable = 0,
65 .threshold_limit = THRESHOLD_MAX,
66};
67
68struct threshold_bank { 60struct threshold_bank {
69 struct kobject *kobj; 61 struct kobject *kobj;
70 struct threshold_block *blocks; 62 struct threshold_block *blocks;
@@ -89,50 +81,101 @@ static void amd_threshold_interrupt(void);
89struct thresh_restart { 81struct thresh_restart {
90 struct threshold_block *b; 82 struct threshold_block *b;
91 int reset; 83 int reset;
84 int set_lvt_off;
85 int lvt_off;
92 u16 old_limit; 86 u16 old_limit;
93}; 87};
94 88
89static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
90{
91 int msr = (hi & MASK_LVTOFF_HI) >> 20;
92
93 if (apic < 0) {
94 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
95 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
96 b->bank, b->block, b->address, hi, lo);
97 return 0;
98 }
99
100 if (apic != msr) {
101 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
102 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
103 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
104 return 0;
105 }
106
107 return 1;
108};
109
95/* must be called with correct cpu affinity */ 110/* must be called with correct cpu affinity */
96/* Called via smp_call_function_single() */ 111/* Called via smp_call_function_single() */
97static void threshold_restart_bank(void *_tr) 112static void threshold_restart_bank(void *_tr)
98{ 113{
99 struct thresh_restart *tr = _tr; 114 struct thresh_restart *tr = _tr;
100 u32 mci_misc_hi, mci_misc_lo; 115 u32 hi, lo;
101 116
102 rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); 117 rdmsr(tr->b->address, lo, hi);
103 118
104 if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 119 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
105 tr->reset = 1; /* limit cannot be lower than err count */ 120 tr->reset = 1; /* limit cannot be lower than err count */
106 121
107 if (tr->reset) { /* reset err count and overflow bit */ 122 if (tr->reset) { /* reset err count and overflow bit */
108 mci_misc_hi = 123 hi =
109 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 124 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
110 (THRESHOLD_MAX - tr->b->threshold_limit); 125 (THRESHOLD_MAX - tr->b->threshold_limit);
111 } else if (tr->old_limit) { /* change limit w/o reset */ 126 } else if (tr->old_limit) { /* change limit w/o reset */
112 int new_count = (mci_misc_hi & THRESHOLD_MAX) + 127 int new_count = (hi & THRESHOLD_MAX) +
113 (tr->old_limit - tr->b->threshold_limit); 128 (tr->old_limit - tr->b->threshold_limit);
114 129
115 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | 130 hi = (hi & ~MASK_ERR_COUNT_HI) |
116 (new_count & THRESHOLD_MAX); 131 (new_count & THRESHOLD_MAX);
117 } 132 }
118 133
134 if (tr->set_lvt_off) {
135 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
136 /* set new lvt offset */
137 hi &= ~MASK_LVTOFF_HI;
138 hi |= tr->lvt_off << 20;
139 }
140 }
141
119 tr->b->interrupt_enable ? 142 tr->b->interrupt_enable ?
120 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : 143 (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
121 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 144 (hi &= ~MASK_INT_TYPE_HI);
122 145
123 mci_misc_hi |= MASK_COUNT_EN_HI; 146 hi |= MASK_COUNT_EN_HI;
124 wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); 147 wrmsr(tr->b->address, lo, hi);
148}
149
150static void mce_threshold_block_init(struct threshold_block *b, int offset)
151{
152 struct thresh_restart tr = {
153 .b = b,
154 .set_lvt_off = 1,
155 .lvt_off = offset,
156 };
157
158 b->threshold_limit = THRESHOLD_MAX;
159 threshold_restart_bank(&tr);
160};
161
162static int setup_APIC_mce(int reserved, int new)
163{
164 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
165 APIC_EILVT_MSG_FIX, 0))
166 return new;
167
168 return reserved;
125} 169}
126 170
127/* cpu init entry point, called from mce.c with preempt off */ 171/* cpu init entry point, called from mce.c with preempt off */
128void mce_amd_feature_init(struct cpuinfo_x86 *c) 172void mce_amd_feature_init(struct cpuinfo_x86 *c)
129{ 173{
174 struct threshold_block b;
130 unsigned int cpu = smp_processor_id(); 175 unsigned int cpu = smp_processor_id();
131 u32 low = 0, high = 0, address = 0; 176 u32 low = 0, high = 0, address = 0;
132 unsigned int bank, block; 177 unsigned int bank, block;
133 struct thresh_restart tr; 178 int offset = -1;
134 int lvt_off = -1;
135 u8 offset;
136 179
137 for (bank = 0; bank < NR_BANKS; ++bank) { 180 for (bank = 0; bank < NR_BANKS; ++bank) {
138 for (block = 0; block < NR_BLOCKS; ++block) { 181 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -163,39 +206,16 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
163 if (shared_bank[bank] && c->cpu_core_id) 206 if (shared_bank[bank] && c->cpu_core_id)
164 break; 207 break;
165#endif 208#endif
166 offset = (high & MASK_LVTOFF_HI) >> 20; 209 offset = setup_APIC_mce(offset,
167 if (lvt_off < 0) { 210 (high & MASK_LVTOFF_HI) >> 20);
168 if (setup_APIC_eilvt(offset,
169 THRESHOLD_APIC_VECTOR,
170 APIC_EILVT_MSG_FIX, 0)) {
171 pr_err(FW_BUG "cpu %d, failed to "
172 "setup threshold interrupt "
173 "for bank %d, block %d "
174 "(MSR%08X=0x%x%08x)",
175 smp_processor_id(), bank, block,
176 address, high, low);
177 continue;
178 }
179 lvt_off = offset;
180 } else if (lvt_off != offset) {
181 pr_err(FW_BUG "cpu %d, invalid threshold "
182 "interrupt offset %d for bank %d,"
183 "block %d (MSR%08X=0x%x%08x)",
184 smp_processor_id(), lvt_off, bank,
185 block, address, high, low);
186 continue;
187 }
188
189 high &= ~MASK_LVTOFF_HI;
190 high |= lvt_off << 20;
191 wrmsr(address, low, high);
192 211
193 threshold_defaults.address = address; 212 memset(&b, 0, sizeof(b));
194 tr.b = &threshold_defaults; 213 b.cpu = cpu;
195 tr.reset = 0; 214 b.bank = bank;
196 tr.old_limit = 0; 215 b.block = block;
197 threshold_restart_bank(&tr); 216 b.address = address;
198 217
218 mce_threshold_block_init(&b, offset);
199 mce_threshold_vector = amd_threshold_interrupt; 219 mce_threshold_vector = amd_threshold_interrupt;
200 } 220 }
201 } 221 }
@@ -298,9 +318,8 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
298 318
299 b->interrupt_enable = !!new; 319 b->interrupt_enable = !!new;
300 320
321 memset(&tr, 0, sizeof(tr));
301 tr.b = b; 322 tr.b = b;
302 tr.reset = 0;
303 tr.old_limit = 0;
304 323
305 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 324 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
306 325
@@ -321,10 +340,10 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
321 if (new < 1) 340 if (new < 1)
322 new = 1; 341 new = 1;
323 342
343 memset(&tr, 0, sizeof(tr));
324 tr.old_limit = b->threshold_limit; 344 tr.old_limit = b->threshold_limit;
325 b->threshold_limit = new; 345 b->threshold_limit = new;
326 tr.b = b; 346 tr.b = b;
327 tr.reset = 0;
328 347
329 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); 348 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
330 349
@@ -603,9 +622,9 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
603 continue; 622 continue;
604 err = threshold_create_bank(cpu, bank); 623 err = threshold_create_bank(cpu, bank);
605 if (err) 624 if (err)
606 goto out; 625 return err;
607 } 626 }
608out: 627
609 return err; 628 return err;
610} 629}
611 630
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 4b683267eca5..e12246ff5aa6 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -53,8 +53,13 @@ struct thermal_state {
53 struct _thermal_state core_power_limit; 53 struct _thermal_state core_power_limit;
54 struct _thermal_state package_throttle; 54 struct _thermal_state package_throttle;
55 struct _thermal_state package_power_limit; 55 struct _thermal_state package_power_limit;
56 struct _thermal_state core_thresh0;
57 struct _thermal_state core_thresh1;
56}; 58};
57 59
60/* Callback to handle core threshold interrupts */
61int (*platform_thermal_notify)(__u64 msr_val);
62
58static DEFINE_PER_CPU(struct thermal_state, thermal_state); 63static DEFINE_PER_CPU(struct thermal_state, thermal_state);
59 64
60static atomic_t therm_throt_en = ATOMIC_INIT(0); 65static atomic_t therm_throt_en = ATOMIC_INIT(0);
@@ -200,6 +205,22 @@ static int therm_throt_process(bool new_event, int event, int level)
200 return 0; 205 return 0;
201} 206}
202 207
208static int thresh_event_valid(int event)
209{
210 struct _thermal_state *state;
211 unsigned int this_cpu = smp_processor_id();
212 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
213 u64 now = get_jiffies_64();
214
215 state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1;
216
217 if (time_before64(now, state->next_check))
218 return 0;
219
220 state->next_check = now + CHECK_INTERVAL;
221 return 1;
222}
223
203#ifdef CONFIG_SYSFS 224#ifdef CONFIG_SYSFS
204/* Add/Remove thermal_throttle interface for CPU device: */ 225/* Add/Remove thermal_throttle interface for CPU device: */
205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, 226static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
@@ -313,6 +334,22 @@ device_initcall(thermal_throttle_init_device);
313#define PACKAGE_THROTTLED ((__u64)2 << 62) 334#define PACKAGE_THROTTLED ((__u64)2 << 62)
314#define PACKAGE_POWER_LIMIT ((__u64)3 << 62) 335#define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
315 336
337static void notify_thresholds(__u64 msr_val)
338{
339 /* check whether the interrupt handler is defined;
340 * otherwise simply return
341 */
342 if (!platform_thermal_notify)
343 return;
344
345 /* lower threshold reached */
346 if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0))
347 platform_thermal_notify(msr_val);
348 /* higher threshold reached */
349 if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1))
350 platform_thermal_notify(msr_val);
351}
352
316/* Thermal transition interrupt handler */ 353/* Thermal transition interrupt handler */
317static void intel_thermal_interrupt(void) 354static void intel_thermal_interrupt(void)
318{ 355{
@@ -321,6 +358,9 @@ static void intel_thermal_interrupt(void)
321 358
322 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 359 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
323 360
361 /* Check for violation of core thermal thresholds*/
362 notify_thresholds(msr_val);
363
324 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, 364 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
325 THERMAL_THROTTLING_EVENT, 365 THERMAL_THROTTLING_EVENT,
326 CORE_LEVEL) != 0) 366 CORE_LEVEL) != 0)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 6d75b9145b13..0a360d146596 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -330,9 +330,6 @@ static bool reserve_pmc_hardware(void)
330{ 330{
331 int i; 331 int i;
332 332
333 if (nmi_watchdog == NMI_LOCAL_APIC)
334 disable_lapic_nmi_watchdog();
335
336 for (i = 0; i < x86_pmu.num_counters; i++) { 333 for (i = 0; i < x86_pmu.num_counters; i++) {
337 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 334 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
338 goto perfctr_fail; 335 goto perfctr_fail;
@@ -355,9 +352,6 @@ perfctr_fail:
355 for (i--; i >= 0; i--) 352 for (i--; i >= 0; i--)
356 release_perfctr_nmi(x86_pmu.perfctr + i); 353 release_perfctr_nmi(x86_pmu.perfctr + i);
357 354
358 if (nmi_watchdog == NMI_LOCAL_APIC)
359 enable_lapic_nmi_watchdog();
360
361 return false; 355 return false;
362} 356}
363 357
@@ -369,9 +363,6 @@ static void release_pmc_hardware(void)
369 release_perfctr_nmi(x86_pmu.perfctr + i); 363 release_perfctr_nmi(x86_pmu.perfctr + i);
370 release_evntsel_nmi(x86_pmu.eventsel + i); 364 release_evntsel_nmi(x86_pmu.eventsel + i);
371 } 365 }
372
373 if (nmi_watchdog == NMI_LOCAL_APIC)
374 enable_lapic_nmi_watchdog();
375} 366}
376 367
377#else 368#else
@@ -384,15 +375,53 @@ static void release_pmc_hardware(void) {}
384static bool check_hw_exists(void) 375static bool check_hw_exists(void)
385{ 376{
386 u64 val, val_new = 0; 377 u64 val, val_new = 0;
387 int ret = 0; 378 int i, reg, ret = 0;
379
380 /*
381 * Check to see if the BIOS enabled any of the counters, if so
382 * complain and bail.
383 */
384 for (i = 0; i < x86_pmu.num_counters; i++) {
385 reg = x86_pmu.eventsel + i;
386 ret = rdmsrl_safe(reg, &val);
387 if (ret)
388 goto msr_fail;
389 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
390 goto bios_fail;
391 }
388 392
393 if (x86_pmu.num_counters_fixed) {
394 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
395 ret = rdmsrl_safe(reg, &val);
396 if (ret)
397 goto msr_fail;
398 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
399 if (val & (0x03 << i*4))
400 goto bios_fail;
401 }
402 }
403
404 /*
405 * Now write a value and read it back to see if it matches,
406 * this is needed to detect certain hardware emulators (qemu/kvm)
407 * that don't trap on the MSR access and always return 0s.
408 */
389 val = 0xabcdUL; 409 val = 0xabcdUL;
390 ret |= checking_wrmsrl(x86_pmu.perfctr, val); 410 ret = checking_wrmsrl(x86_pmu.perfctr, val);
391 ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); 411 ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
392 if (ret || val != val_new) 412 if (ret || val != val_new)
393 return false; 413 goto msr_fail;
394 414
395 return true; 415 return true;
416
417bios_fail:
418 printk(KERN_CONT "Broken BIOS detected, using software events only.\n");
419 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
420 return false;
421
422msr_fail:
423 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
424 return false;
396} 425}
397 426
398static void reserve_ds_buffers(void); 427static void reserve_ds_buffers(void);
@@ -451,7 +480,7 @@ static int x86_setup_perfctr(struct perf_event *event)
451 struct hw_perf_event *hwc = &event->hw; 480 struct hw_perf_event *hwc = &event->hw;
452 u64 config; 481 u64 config;
453 482
454 if (!hwc->sample_period) { 483 if (!is_sampling_event(event)) {
455 hwc->sample_period = x86_pmu.max_period; 484 hwc->sample_period = x86_pmu.max_period;
456 hwc->last_period = hwc->sample_period; 485 hwc->last_period = hwc->sample_period;
457 local64_set(&hwc->period_left, hwc->sample_period); 486 local64_set(&hwc->period_left, hwc->sample_period);
@@ -1362,7 +1391,7 @@ static void __init pmu_check_apic(void)
1362 pr_info("no hardware sampling interrupt available.\n"); 1391 pr_info("no hardware sampling interrupt available.\n");
1363} 1392}
1364 1393
1365void __init init_hw_perf_events(void) 1394int __init init_hw_perf_events(void)
1366{ 1395{
1367 struct event_constraint *c; 1396 struct event_constraint *c;
1368 int err; 1397 int err;
@@ -1377,20 +1406,18 @@ void __init init_hw_perf_events(void)
1377 err = amd_pmu_init(); 1406 err = amd_pmu_init();
1378 break; 1407 break;
1379 default: 1408 default:
1380 return; 1409 return 0;
1381 } 1410 }
1382 if (err != 0) { 1411 if (err != 0) {
1383 pr_cont("no PMU driver, software events only.\n"); 1412 pr_cont("no PMU driver, software events only.\n");
1384 return; 1413 return 0;
1385 } 1414 }
1386 1415
1387 pmu_check_apic(); 1416 pmu_check_apic();
1388 1417
1389 /* sanity check that the hardware exists or is emulated */ 1418 /* sanity check that the hardware exists or is emulated */
1390 if (!check_hw_exists()) { 1419 if (!check_hw_exists())
1391 pr_cont("Broken PMU hardware detected, software events only.\n"); 1420 return 0;
1392 return;
1393 }
1394 1421
1395 pr_cont("%s PMU driver.\n", x86_pmu.name); 1422 pr_cont("%s PMU driver.\n", x86_pmu.name);
1396 1423
@@ -1438,9 +1465,12 @@ void __init init_hw_perf_events(void)
1438 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); 1465 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1439 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); 1466 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1440 1467
1441 perf_pmu_register(&pmu); 1468 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1442 perf_cpu_notifier(x86_pmu_notifier); 1469 perf_cpu_notifier(x86_pmu_notifier);
1470
1471 return 0;
1443} 1472}
1473early_initcall(init_hw_perf_events);
1444 1474
1445static inline void x86_pmu_read(struct perf_event *event) 1475static inline void x86_pmu_read(struct perf_event *event)
1446{ 1476{
@@ -1686,7 +1716,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1686 1716
1687 perf_callchain_store(entry, regs->ip); 1717 perf_callchain_store(entry, regs->ip);
1688 1718
1689 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); 1719 dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
1690} 1720}
1691 1721
1692#ifdef CONFIG_COMPAT 1722#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index e421b8cd6944..67e2202a6039 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,7 +1,5 @@
1#ifdef CONFIG_CPU_SUP_AMD 1#ifdef CONFIG_CPU_SUP_AMD
2 2
3static DEFINE_RAW_SPINLOCK(amd_nb_lock);
4
5static __initconst const u64 amd_hw_cache_event_ids 3static __initconst const u64 amd_hw_cache_event_ids
6 [PERF_COUNT_HW_CACHE_MAX] 4 [PERF_COUNT_HW_CACHE_MAX]
7 [PERF_COUNT_HW_CACHE_OP_MAX] 5 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -275,7 +273,7 @@ done:
275 return &emptyconstraint; 273 return &emptyconstraint;
276} 274}
277 275
278static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) 276static struct amd_nb *amd_alloc_nb(int cpu)
279{ 277{
280 struct amd_nb *nb; 278 struct amd_nb *nb;
281 int i; 279 int i;
@@ -285,7 +283,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
285 if (!nb) 283 if (!nb)
286 return NULL; 284 return NULL;
287 285
288 nb->nb_id = nb_id; 286 nb->nb_id = -1;
289 287
290 /* 288 /*
291 * initialize all possible NB constraints 289 * initialize all possible NB constraints
@@ -306,7 +304,7 @@ static int amd_pmu_cpu_prepare(int cpu)
306 if (boot_cpu_data.x86_max_cores < 2) 304 if (boot_cpu_data.x86_max_cores < 2)
307 return NOTIFY_OK; 305 return NOTIFY_OK;
308 306
309 cpuc->amd_nb = amd_alloc_nb(cpu, -1); 307 cpuc->amd_nb = amd_alloc_nb(cpu);
310 if (!cpuc->amd_nb) 308 if (!cpuc->amd_nb)
311 return NOTIFY_BAD; 309 return NOTIFY_BAD;
312 310
@@ -325,8 +323,6 @@ static void amd_pmu_cpu_starting(int cpu)
325 nb_id = amd_get_nb_id(cpu); 323 nb_id = amd_get_nb_id(cpu);
326 WARN_ON_ONCE(nb_id == BAD_APICID); 324 WARN_ON_ONCE(nb_id == BAD_APICID);
327 325
328 raw_spin_lock(&amd_nb_lock);
329
330 for_each_online_cpu(i) { 326 for_each_online_cpu(i) {
331 nb = per_cpu(cpu_hw_events, i).amd_nb; 327 nb = per_cpu(cpu_hw_events, i).amd_nb;
332 if (WARN_ON_ONCE(!nb)) 328 if (WARN_ON_ONCE(!nb))
@@ -341,8 +337,6 @@ static void amd_pmu_cpu_starting(int cpu)
341 337
342 cpuc->amd_nb->nb_id = nb_id; 338 cpuc->amd_nb->nb_id = nb_id;
343 cpuc->amd_nb->refcnt++; 339 cpuc->amd_nb->refcnt++;
344
345 raw_spin_unlock(&amd_nb_lock);
346} 340}
347 341
348static void amd_pmu_cpu_dead(int cpu) 342static void amd_pmu_cpu_dead(int cpu)
@@ -354,8 +348,6 @@ static void amd_pmu_cpu_dead(int cpu)
354 348
355 cpuhw = &per_cpu(cpu_hw_events, cpu); 349 cpuhw = &per_cpu(cpu_hw_events, cpu);
356 350
357 raw_spin_lock(&amd_nb_lock);
358
359 if (cpuhw->amd_nb) { 351 if (cpuhw->amd_nb) {
360 struct amd_nb *nb = cpuhw->amd_nb; 352 struct amd_nb *nb = cpuhw->amd_nb;
361 353
@@ -364,8 +356,6 @@ static void amd_pmu_cpu_dead(int cpu)
364 356
365 cpuhw->amd_nb = NULL; 357 cpuhw->amd_nb = NULL;
366 } 358 }
367
368 raw_spin_unlock(&amd_nb_lock);
369} 359}
370 360
371static __initconst const struct x86_pmu amd_pmu = { 361static __initconst const struct x86_pmu amd_pmu = {
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index c8f5c088cad1..24e390e40f2e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -816,6 +816,32 @@ static int intel_pmu_hw_config(struct perf_event *event)
816 if (ret) 816 if (ret)
817 return ret; 817 return ret;
818 818
819 if (event->attr.precise_ip &&
820 (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
821 /*
822 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
823 * (0x003c) so that we can use it with PEBS.
824 *
825 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
826 * PEBS capable. However we can use INST_RETIRED.ANY_P
827 * (0x00c0), which is a PEBS capable event, to get the same
828 * count.
829 *
830 * INST_RETIRED.ANY_P counts the number of cycles that retires
831 * CNTMASK instructions. By setting CNTMASK to a value (16)
832 * larger than the maximum number of instructions that can be
833 * retired per cycle (4) and then inverting the condition, we
834 * count all cycles that retire 16 or less instructions, which
835 * is every cycle.
836 *
837 * Thereby we gain a PEBS capable cycle counter.
838 */
839 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
840
841 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
842 event->hw.config = alt_config;
843 }
844
819 if (event->attr.type != PERF_TYPE_RAW) 845 if (event->attr.type != PERF_TYPE_RAW)
820 return 0; 846 return 0;
821 847
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index d9f4ff8fcd69..d5a236615501 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -16,32 +16,12 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/nmi.h> 19#include <asm/nmi.h>
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/perf_event.h> 23#include <asm/perf_event.h>
24 24
25struct nmi_watchdog_ctlblk {
26 unsigned int cccr_msr;
27 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
28 unsigned int evntsel_msr; /* the MSR to select the events to handle */
29};
30
31/* Interface defining a CPU specific perfctr watchdog */
32struct wd_ops {
33 int (*reserve)(void);
34 void (*unreserve)(void);
35 int (*setup)(unsigned nmi_hz);
36 void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
37 void (*stop)(void);
38 unsigned perfctr;
39 unsigned evntsel;
40 u64 checkbit;
41};
42
43static const struct wd_ops *wd_ops;
44
45/* 25/*
46 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
47 * offset from MSR_P4_BSU_ESCR0. 27 * offset from MSR_P4_BSU_ESCR0.
@@ -60,8 +40,6 @@ static const struct wd_ops *wd_ops;
60static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); 40static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
61static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); 41static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
62 42
63static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
64
65/* converts an msr to an appropriate reservation bit */ 43/* converts an msr to an appropriate reservation bit */
66static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) 44static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
67{ 45{
@@ -172,623 +150,3 @@ void release_evntsel_nmi(unsigned int msr)
172 clear_bit(counter, evntsel_nmi_owner); 150 clear_bit(counter, evntsel_nmi_owner);
173} 151}
174EXPORT_SYMBOL(release_evntsel_nmi); 152EXPORT_SYMBOL(release_evntsel_nmi);
175
176void disable_lapic_nmi_watchdog(void)
177{
178 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
179
180 if (atomic_read(&nmi_active) <= 0)
181 return;
182
183 on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
184
185 if (wd_ops)
186 wd_ops->unreserve();
187
188 BUG_ON(atomic_read(&nmi_active) != 0);
189}
190
191void enable_lapic_nmi_watchdog(void)
192{
193 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
194
195 /* are we already enabled */
196 if (atomic_read(&nmi_active) != 0)
197 return;
198
199 /* are we lapic aware */
200 if (!wd_ops)
201 return;
202 if (!wd_ops->reserve()) {
203 printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");
204 return;
205 }
206
207 on_each_cpu(setup_apic_nmi_watchdog, NULL, 1);
208 touch_nmi_watchdog();
209}
210
211/*
212 * Activate the NMI watchdog via the local APIC.
213 */
214
215static unsigned int adjust_for_32bit_ctr(unsigned int hz)
216{
217 u64 counter_val;
218 unsigned int retval = hz;
219
220 /*
221 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
222 * are writable, with higher bits sign extending from bit 31.
223 * So, we can only program the counter with 31 bit values and
224 * 32nd bit should be 1, for 33.. to be 1.
225 * Find the appropriate nmi_hz
226 */
227 counter_val = (u64)cpu_khz * 1000;
228 do_div(counter_val, retval);
229 if (counter_val > 0x7fffffffULL) {
230 u64 count = (u64)cpu_khz * 1000;
231 do_div(count, 0x7fffffffUL);
232 retval = count + 1;
233 }
234 return retval;
235}
236
237static void write_watchdog_counter(unsigned int perfctr_msr,
238 const char *descr, unsigned nmi_hz)
239{
240 u64 count = (u64)cpu_khz * 1000;
241
242 do_div(count, nmi_hz);
243 if (descr)
244 pr_debug("setting %s to -0x%08Lx\n", descr, count);
245 wrmsrl(perfctr_msr, 0 - count);
246}
247
248static void write_watchdog_counter32(unsigned int perfctr_msr,
249 const char *descr, unsigned nmi_hz)
250{
251 u64 count = (u64)cpu_khz * 1000;
252
253 do_div(count, nmi_hz);
254 if (descr)
255 pr_debug("setting %s to -0x%08Lx\n", descr, count);
256 wrmsr(perfctr_msr, (u32)(-count), 0);
257}
258
259/*
260 * AMD K7/K8/Family10h/Family11h support.
261 * AMD keeps this interface nicely stable so there is not much variety
262 */
263#define K7_EVNTSEL_ENABLE (1 << 22)
264#define K7_EVNTSEL_INT (1 << 20)
265#define K7_EVNTSEL_OS (1 << 17)
266#define K7_EVNTSEL_USR (1 << 16)
267#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
268#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
269
270static int setup_k7_watchdog(unsigned nmi_hz)
271{
272 unsigned int perfctr_msr, evntsel_msr;
273 unsigned int evntsel;
274 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
275
276 perfctr_msr = wd_ops->perfctr;
277 evntsel_msr = wd_ops->evntsel;
278
279 wrmsrl(perfctr_msr, 0UL);
280
281 evntsel = K7_EVNTSEL_INT
282 | K7_EVNTSEL_OS
283 | K7_EVNTSEL_USR
284 | K7_NMI_EVENT;
285
286 /* setup the timer */
287 wrmsr(evntsel_msr, evntsel, 0);
288 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz);
289
290 /* initialize the wd struct before enabling */
291 wd->perfctr_msr = perfctr_msr;
292 wd->evntsel_msr = evntsel_msr;
293 wd->cccr_msr = 0; /* unused */
294
295 /* ok, everything is initialized, announce that we're set */
296 cpu_nmi_set_wd_enabled();
297
298 apic_write(APIC_LVTPC, APIC_DM_NMI);
299 evntsel |= K7_EVNTSEL_ENABLE;
300 wrmsr(evntsel_msr, evntsel, 0);
301
302 return 1;
303}
304
305static void single_msr_stop_watchdog(void)
306{
307 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
308
309 wrmsr(wd->evntsel_msr, 0, 0);
310}
311
312static int single_msr_reserve(void)
313{
314 if (!reserve_perfctr_nmi(wd_ops->perfctr))
315 return 0;
316
317 if (!reserve_evntsel_nmi(wd_ops->evntsel)) {
318 release_perfctr_nmi(wd_ops->perfctr);
319 return 0;
320 }
321 return 1;
322}
323
324static void single_msr_unreserve(void)
325{
326 release_evntsel_nmi(wd_ops->evntsel);
327 release_perfctr_nmi(wd_ops->perfctr);
328}
329
330static void __kprobes
331single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
332{
333 /* start the cycle over again */
334 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
335}
336
337static const struct wd_ops k7_wd_ops = {
338 .reserve = single_msr_reserve,
339 .unreserve = single_msr_unreserve,
340 .setup = setup_k7_watchdog,
341 .rearm = single_msr_rearm,
342 .stop = single_msr_stop_watchdog,
343 .perfctr = MSR_K7_PERFCTR0,
344 .evntsel = MSR_K7_EVNTSEL0,
345 .checkbit = 1ULL << 47,
346};
347
348/*
349 * Intel Model 6 (PPro+,P2,P3,P-M,Core1)
350 */
351#define P6_EVNTSEL0_ENABLE (1 << 22)
352#define P6_EVNTSEL_INT (1 << 20)
353#define P6_EVNTSEL_OS (1 << 17)
354#define P6_EVNTSEL_USR (1 << 16)
355#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
356#define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
357
358static int setup_p6_watchdog(unsigned nmi_hz)
359{
360 unsigned int perfctr_msr, evntsel_msr;
361 unsigned int evntsel;
362 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
363
364 perfctr_msr = wd_ops->perfctr;
365 evntsel_msr = wd_ops->evntsel;
366
367 /* KVM doesn't implement this MSR */
368 if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
369 return 0;
370
371 evntsel = P6_EVNTSEL_INT
372 | P6_EVNTSEL_OS
373 | P6_EVNTSEL_USR
374 | P6_NMI_EVENT;
375
376 /* setup the timer */
377 wrmsr(evntsel_msr, evntsel, 0);
378 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
379 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz);
380
381 /* initialize the wd struct before enabling */
382 wd->perfctr_msr = perfctr_msr;
383 wd->evntsel_msr = evntsel_msr;
384 wd->cccr_msr = 0; /* unused */
385
386 /* ok, everything is initialized, announce that we're set */
387 cpu_nmi_set_wd_enabled();
388
389 apic_write(APIC_LVTPC, APIC_DM_NMI);
390 evntsel |= P6_EVNTSEL0_ENABLE;
391 wrmsr(evntsel_msr, evntsel, 0);
392
393 return 1;
394}
395
396static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
397{
398 /*
399 * P6 based Pentium M need to re-unmask
400 * the apic vector but it doesn't hurt
401 * other P6 variant.
402 * ArchPerfom/Core Duo also needs this
403 */
404 apic_write(APIC_LVTPC, APIC_DM_NMI);
405
406 /* P6/ARCH_PERFMON has 32 bit counter write */
407 write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz);
408}
409
410static const struct wd_ops p6_wd_ops = {
411 .reserve = single_msr_reserve,
412 .unreserve = single_msr_unreserve,
413 .setup = setup_p6_watchdog,
414 .rearm = p6_rearm,
415 .stop = single_msr_stop_watchdog,
416 .perfctr = MSR_P6_PERFCTR0,
417 .evntsel = MSR_P6_EVNTSEL0,
418 .checkbit = 1ULL << 39,
419};
420
421/*
422 * Intel P4 performance counters.
423 * By far the most complicated of all.
424 */
425#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7)
426#define P4_ESCR_EVENT_SELECT(N) ((N) << 25)
427#define P4_ESCR_OS (1 << 3)
428#define P4_ESCR_USR (1 << 2)
429#define P4_CCCR_OVF_PMI0 (1 << 26)
430#define P4_CCCR_OVF_PMI1 (1 << 27)
431#define P4_CCCR_THRESHOLD(N) ((N) << 20)
432#define P4_CCCR_COMPLEMENT (1 << 19)
433#define P4_CCCR_COMPARE (1 << 18)
434#define P4_CCCR_REQUIRED (3 << 16)
435#define P4_CCCR_ESCR_SELECT(N) ((N) << 13)
436#define P4_CCCR_ENABLE (1 << 12)
437#define P4_CCCR_OVF (1 << 31)
438
439#define P4_CONTROLS 18
440static unsigned int p4_controls[18] = {
441 MSR_P4_BPU_CCCR0,
442 MSR_P4_BPU_CCCR1,
443 MSR_P4_BPU_CCCR2,
444 MSR_P4_BPU_CCCR3,
445 MSR_P4_MS_CCCR0,
446 MSR_P4_MS_CCCR1,
447 MSR_P4_MS_CCCR2,
448 MSR_P4_MS_CCCR3,
449 MSR_P4_FLAME_CCCR0,
450 MSR_P4_FLAME_CCCR1,
451 MSR_P4_FLAME_CCCR2,
452 MSR_P4_FLAME_CCCR3,
453 MSR_P4_IQ_CCCR0,
454 MSR_P4_IQ_CCCR1,
455 MSR_P4_IQ_CCCR2,
456 MSR_P4_IQ_CCCR3,
457 MSR_P4_IQ_CCCR4,
458 MSR_P4_IQ_CCCR5,
459};
460/*
461 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
462 * CRU_ESCR0 (with any non-null event selector) through a complemented
463 * max threshold. [IA32-Vol3, Section 14.9.9]
464 */
465static int setup_p4_watchdog(unsigned nmi_hz)
466{
467 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
468 unsigned int evntsel, cccr_val;
469 unsigned int misc_enable, dummy;
470 unsigned int ht_num;
471 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
472
473 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
474 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
475 return 0;
476
477#ifdef CONFIG_SMP
478 /* detect which hyperthread we are on */
479 if (smp_num_siblings == 2) {
480 unsigned int ebx, apicid;
481
482 ebx = cpuid_ebx(1);
483 apicid = (ebx >> 24) & 0xff;
484 ht_num = apicid & 1;
485 } else
486#endif
487 ht_num = 0;
488
489 /*
490 * performance counters are shared resources
491 * assign each hyperthread its own set
492 * (re-use the ESCR0 register, seems safe
493 * and keeps the cccr_val the same)
494 */
495 if (!ht_num) {
496 /* logical cpu 0 */
497 perfctr_msr = MSR_P4_IQ_PERFCTR0;
498 evntsel_msr = MSR_P4_CRU_ESCR0;
499 cccr_msr = MSR_P4_IQ_CCCR0;
500 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
501
502 /*
503 * If we're on the kdump kernel or other situation, we may
504 * still have other performance counter registers set to
505 * interrupt and they'll keep interrupting forever because
506 * of the P4_CCCR_OVF quirk. So we need to ACK all the
507 * pending interrupts and disable all the registers here,
508 * before reenabling the NMI delivery. Refer to p4_rearm()
509 * about the P4_CCCR_OVF quirk.
510 */
511 if (reset_devices) {
512 unsigned int low, high;
513 int i;
514
515 for (i = 0; i < P4_CONTROLS; i++) {
516 rdmsr(p4_controls[i], low, high);
517 low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
518 wrmsr(p4_controls[i], low, high);
519 }
520 }
521 } else {
522 /* logical cpu 1 */
523 perfctr_msr = MSR_P4_IQ_PERFCTR1;
524 evntsel_msr = MSR_P4_CRU_ESCR0;
525 cccr_msr = MSR_P4_IQ_CCCR1;
526
527 /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
528 if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4)
529 cccr_val = P4_CCCR_OVF_PMI0;
530 else
531 cccr_val = P4_CCCR_OVF_PMI1;
532 cccr_val |= P4_CCCR_ESCR_SELECT(4);
533 }
534
535 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
536 | P4_ESCR_OS
537 | P4_ESCR_USR;
538
539 cccr_val |= P4_CCCR_THRESHOLD(15)
540 | P4_CCCR_COMPLEMENT
541 | P4_CCCR_COMPARE
542 | P4_CCCR_REQUIRED;
543
544 wrmsr(evntsel_msr, evntsel, 0);
545 wrmsr(cccr_msr, cccr_val, 0);
546 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
547
548 wd->perfctr_msr = perfctr_msr;
549 wd->evntsel_msr = evntsel_msr;
550 wd->cccr_msr = cccr_msr;
551
552 /* ok, everything is initialized, announce that we're set */
553 cpu_nmi_set_wd_enabled();
554
555 apic_write(APIC_LVTPC, APIC_DM_NMI);
556 cccr_val |= P4_CCCR_ENABLE;
557 wrmsr(cccr_msr, cccr_val, 0);
558 return 1;
559}
560
561static void stop_p4_watchdog(void)
562{
563 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
564 wrmsr(wd->cccr_msr, 0, 0);
565 wrmsr(wd->evntsel_msr, 0, 0);
566}
567
568static int p4_reserve(void)
569{
570 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0))
571 return 0;
572#ifdef CONFIG_SMP
573 if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1))
574 goto fail1;
575#endif
576 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
577 goto fail2;
578 /* RED-PEN why is ESCR1 not reserved here? */
579 return 1;
580 fail2:
581#ifdef CONFIG_SMP
582 if (smp_num_siblings > 1)
583 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
584 fail1:
585#endif
586 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
587 return 0;
588}
589
590static void p4_unreserve(void)
591{
592#ifdef CONFIG_SMP
593 if (smp_num_siblings > 1)
594 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
595#endif
596 release_evntsel_nmi(MSR_P4_CRU_ESCR0);
597 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
598}
599
600static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
601{
602 unsigned dummy;
603 /*
604 * P4 quirks:
605 * - An overflown perfctr will assert its interrupt
606 * until the OVF flag in its CCCR is cleared.
607 * - LVTPC is masked on interrupt and must be
608 * unmasked by the LVTPC handler.
609 */
610 rdmsrl(wd->cccr_msr, dummy);
611 dummy &= ~P4_CCCR_OVF;
612 wrmsrl(wd->cccr_msr, dummy);
613 apic_write(APIC_LVTPC, APIC_DM_NMI);
614 /* start the cycle over again */
615 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
616}
617
618static const struct wd_ops p4_wd_ops = {
619 .reserve = p4_reserve,
620 .unreserve = p4_unreserve,
621 .setup = setup_p4_watchdog,
622 .rearm = p4_rearm,
623 .stop = stop_p4_watchdog,
624 /* RED-PEN this is wrong for the other sibling */
625 .perfctr = MSR_P4_BPU_PERFCTR0,
626 .evntsel = MSR_P4_BSU_ESCR0,
627 .checkbit = 1ULL << 39,
628};
629
630/*
631 * Watchdog using the Intel architected PerfMon.
632 * Used for Core2 and hopefully all future Intel CPUs.
633 */
634#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
635#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
636
637static struct wd_ops intel_arch_wd_ops;
638
639static int setup_intel_arch_watchdog(unsigned nmi_hz)
640{
641 unsigned int ebx;
642 union cpuid10_eax eax;
643 unsigned int unused;
644 unsigned int perfctr_msr, evntsel_msr;
645 unsigned int evntsel;
646 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
647
648 /*
649 * Check whether the Architectural PerfMon supports
650 * Unhalted Core Cycles Event or not.
651 * NOTE: Corresponding bit = 0 in ebx indicates event present.
652 */
653 cpuid(10, &(eax.full), &ebx, &unused, &unused);
654 if ((eax.split.mask_length <
655 (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
656 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
657 return 0;
658
659 perfctr_msr = wd_ops->perfctr;
660 evntsel_msr = wd_ops->evntsel;
661
662 wrmsrl(perfctr_msr, 0UL);
663
664 evntsel = ARCH_PERFMON_EVENTSEL_INT
665 | ARCH_PERFMON_EVENTSEL_OS
666 | ARCH_PERFMON_EVENTSEL_USR
667 | ARCH_PERFMON_NMI_EVENT_SEL
668 | ARCH_PERFMON_NMI_EVENT_UMASK;
669
670 /* setup the timer */
671 wrmsr(evntsel_msr, evntsel, 0);
672 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
673 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
674
675 wd->perfctr_msr = perfctr_msr;
676 wd->evntsel_msr = evntsel_msr;
677 wd->cccr_msr = 0; /* unused */
678
679 /* ok, everything is initialized, announce that we're set */
680 cpu_nmi_set_wd_enabled();
681
682 apic_write(APIC_LVTPC, APIC_DM_NMI);
683 evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE;
684 wrmsr(evntsel_msr, evntsel, 0);
685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
686 return 1;
687}
688
689static struct wd_ops intel_arch_wd_ops __read_mostly = {
690 .reserve = single_msr_reserve,
691 .unreserve = single_msr_unreserve,
692 .setup = setup_intel_arch_watchdog,
693 .rearm = p6_rearm,
694 .stop = single_msr_stop_watchdog,
695 .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
696 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
697};
698
699static void probe_nmi_watchdog(void)
700{
701 switch (boot_cpu_data.x86_vendor) {
702 case X86_VENDOR_AMD:
703 if (boot_cpu_data.x86 == 6 ||
704 (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15))
705 wd_ops = &k7_wd_ops;
706 return;
707 case X86_VENDOR_INTEL:
708 /* Work around where perfctr1 doesn't have a working enable
709 * bit as described in the following errata:
710 * AE49 Core Duo and Intel Core Solo 65 nm
711 * AN49 Intel Pentium Dual-Core
712 * AF49 Dual-Core Intel Xeon Processor LV
713 */
714 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) ||
715 ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 15 &&
716 boot_cpu_data.x86_mask == 4))) {
717 intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
718 intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
719 }
720 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
721 wd_ops = &intel_arch_wd_ops;
722 break;
723 }
724 switch (boot_cpu_data.x86) {
725 case 6:
726 if (boot_cpu_data.x86_model > 13)
727 return;
728
729 wd_ops = &p6_wd_ops;
730 break;
731 case 15:
732 wd_ops = &p4_wd_ops;
733 break;
734 default:
735 return;
736 }
737 break;
738 }
739}
740
741/* Interface to nmi.c */
742
743int lapic_watchdog_init(unsigned nmi_hz)
744{
745 if (!wd_ops) {
746 probe_nmi_watchdog();
747 if (!wd_ops) {
748 printk(KERN_INFO "NMI watchdog: CPU not supported\n");
749 return -1;
750 }
751
752 if (!wd_ops->reserve()) {
753 printk(KERN_ERR
754 "NMI watchdog: cannot reserve perfctrs\n");
755 return -1;
756 }
757 }
758
759 if (!(wd_ops->setup(nmi_hz))) {
760 printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n",
761 raw_smp_processor_id());
762 return -1;
763 }
764
765 return 0;
766}
767
768void lapic_watchdog_stop(void)
769{
770 if (wd_ops)
771 wd_ops->stop();
772}
773
774unsigned lapic_adjust_nmi_hz(unsigned hz)
775{
776 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
777 if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
778 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
779 hz = adjust_for_32bit_ctr(hz);
780 return hz;
781}
782
783int __kprobes lapic_wd_event(unsigned nmi_hz)
784{
785 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
786 u64 ctr;
787
788 rdmsrl(wd->perfctr_msr, ctr);
789 if (ctr & wd_ops->checkbit) /* perfctr still running? */
790 return 0;
791
792 wd_ops->rearm(wd, nmi_hz);
793 return 1;
794}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 6e8752c1bd52..8474c998cbd4 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -175,21 +175,21 @@ static const struct stacktrace_ops print_trace_ops = {
175 175
176void 176void
177show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 177show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
178 unsigned long *stack, unsigned long bp, char *log_lvl) 178 unsigned long *stack, char *log_lvl)
179{ 179{
180 printk("%sCall Trace:\n", log_lvl); 180 printk("%sCall Trace:\n", log_lvl);
181 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 181 dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
182} 182}
183 183
184void show_trace(struct task_struct *task, struct pt_regs *regs, 184void show_trace(struct task_struct *task, struct pt_regs *regs,
185 unsigned long *stack, unsigned long bp) 185 unsigned long *stack)
186{ 186{
187 show_trace_log_lvl(task, regs, stack, bp, ""); 187 show_trace_log_lvl(task, regs, stack, "");
188} 188}
189 189
190void show_stack(struct task_struct *task, unsigned long *sp) 190void show_stack(struct task_struct *task, unsigned long *sp)
191{ 191{
192 show_stack_log_lvl(task, NULL, sp, 0, ""); 192 show_stack_log_lvl(task, NULL, sp, "");
193} 193}
194 194
195/* 195/*
@@ -210,7 +210,7 @@ void dump_stack(void)
210 init_utsname()->release, 210 init_utsname()->release,
211 (int)strcspn(init_utsname()->version, " "), 211 (int)strcspn(init_utsname()->version, " "),
212 init_utsname()->version); 212 init_utsname()->version);
213 show_trace(NULL, NULL, &stack, bp); 213 show_trace(NULL, NULL, &stack);
214} 214}
215EXPORT_SYMBOL(dump_stack); 215EXPORT_SYMBOL(dump_stack);
216 216
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 1bc7f75a5bda..74cc1eda384b 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -17,11 +17,12 @@
17#include <asm/stacktrace.h> 17#include <asm/stacktrace.h>
18 18
19 19
20void dump_trace(struct task_struct *task, struct pt_regs *regs, 20void dump_trace(struct task_struct *task,
21 unsigned long *stack, unsigned long bp, 21 struct pt_regs *regs, unsigned long *stack,
22 const struct stacktrace_ops *ops, void *data) 22 const struct stacktrace_ops *ops, void *data)
23{ 23{
24 int graph = 0; 24 int graph = 0;
25 unsigned long bp;
25 26
26 if (!task) 27 if (!task)
27 task = current; 28 task = current;
@@ -34,18 +35,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
34 stack = (unsigned long *)task->thread.sp; 35 stack = (unsigned long *)task->thread.sp;
35 } 36 }
36 37
37#ifdef CONFIG_FRAME_POINTER 38 bp = stack_frame(task, regs);
38 if (!bp) {
39 if (task == current) {
40 /* Grab bp right from our regs */
41 get_bp(bp);
42 } else {
43 /* bp is the last reg pushed by switch_to */
44 bp = *(unsigned long *) task->thread.sp;
45 }
46 }
47#endif
48
49 for (;;) { 39 for (;;) {
50 struct thread_info *context; 40 struct thread_info *context;
51 41
@@ -65,7 +55,7 @@ EXPORT_SYMBOL(dump_trace);
65 55
66void 56void
67show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 57show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
68 unsigned long *sp, unsigned long bp, char *log_lvl) 58 unsigned long *sp, char *log_lvl)
69{ 59{
70 unsigned long *stack; 60 unsigned long *stack;
71 int i; 61 int i;
@@ -87,7 +77,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
87 touch_nmi_watchdog(); 77 touch_nmi_watchdog();
88 } 78 }
89 printk(KERN_CONT "\n"); 79 printk(KERN_CONT "\n");
90 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 80 show_trace_log_lvl(task, regs, sp, log_lvl);
91} 81}
92 82
93 83
@@ -112,8 +102,7 @@ void show_registers(struct pt_regs *regs)
112 u8 *ip; 102 u8 *ip;
113 103
114 printk(KERN_EMERG "Stack:\n"); 104 printk(KERN_EMERG "Stack:\n");
115 show_stack_log_lvl(NULL, regs, &regs->sp, 105 show_stack_log_lvl(NULL, regs, &regs->sp, KERN_EMERG);
116 0, KERN_EMERG);
117 106
118 printk(KERN_EMERG "Code: "); 107 printk(KERN_EMERG "Code: ");
119 108
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6a340485249a..64101335de19 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -139,8 +139,8 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
139 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 139 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
140 */ 140 */
141 141
142void dump_trace(struct task_struct *task, struct pt_regs *regs, 142void dump_trace(struct task_struct *task,
143 unsigned long *stack, unsigned long bp, 143 struct pt_regs *regs, unsigned long *stack,
144 const struct stacktrace_ops *ops, void *data) 144 const struct stacktrace_ops *ops, void *data)
145{ 145{
146 const unsigned cpu = get_cpu(); 146 const unsigned cpu = get_cpu();
@@ -149,6 +149,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
149 unsigned used = 0; 149 unsigned used = 0;
150 struct thread_info *tinfo; 150 struct thread_info *tinfo;
151 int graph = 0; 151 int graph = 0;
152 unsigned long bp;
152 153
153 if (!task) 154 if (!task)
154 task = current; 155 task = current;
@@ -160,18 +161,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
160 stack = (unsigned long *)task->thread.sp; 161 stack = (unsigned long *)task->thread.sp;
161 } 162 }
162 163
163#ifdef CONFIG_FRAME_POINTER 164 bp = stack_frame(task, regs);
164 if (!bp) {
165 if (task == current) {
166 /* Grab bp right from our regs */
167 get_bp(bp);
168 } else {
169 /* bp is the last reg pushed by switch_to */
170 bp = *(unsigned long *) task->thread.sp;
171 }
172 }
173#endif
174
175 /* 165 /*
176 * Print function call entries in all stacks, starting at the 166 * Print function call entries in all stacks, starting at the
177 * current stack address. If the stacks consist of nested 167 * current stack address. If the stacks consist of nested
@@ -235,7 +225,7 @@ EXPORT_SYMBOL(dump_trace);
235 225
236void 226void
237show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 227show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
238 unsigned long *sp, unsigned long bp, char *log_lvl) 228 unsigned long *sp, char *log_lvl)
239{ 229{
240 unsigned long *irq_stack_end; 230 unsigned long *irq_stack_end;
241 unsigned long *irq_stack; 231 unsigned long *irq_stack;
@@ -279,7 +269,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
279 preempt_enable(); 269 preempt_enable();
280 270
281 printk(KERN_CONT "\n"); 271 printk(KERN_CONT "\n");
282 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 272 show_trace_log_lvl(task, regs, sp, log_lvl);
283} 273}
284 274
285void show_registers(struct pt_regs *regs) 275void show_registers(struct pt_regs *regs)
@@ -308,7 +298,7 @@ void show_registers(struct pt_regs *regs)
308 298
309 printk(KERN_EMERG "Stack:\n"); 299 printk(KERN_EMERG "Stack:\n");
310 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 300 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
311 regs->bp, KERN_EMERG); 301 KERN_EMERG);
312 302
313 printk(KERN_EMERG "Code: "); 303 printk(KERN_EMERG "Code: ");
314 304
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 4572f25f9325..cd28a350f7f9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -240,7 +240,7 @@ static int __init setup_early_printk(char *buf)
240 if (!strncmp(buf, "xen", 3)) 240 if (!strncmp(buf, "xen", 3))
241 early_console_register(&xenboot_console, keep); 241 early_console_register(&xenboot_console, keep);
242#endif 242#endif
243#ifdef CONFIG_X86_MRST_EARLY_PRINTK 243#ifdef CONFIG_EARLY_PRINTK_MRST
244 if (!strncmp(buf, "mrst", 4)) { 244 if (!strncmp(buf, "mrst", 4)) {
245 mrst_early_console_init(); 245 mrst_early_console_init();
246 early_console_register(&early_mrst_console, keep); 246 early_console_register(&early_mrst_console, keep);
@@ -250,7 +250,6 @@ static int __init setup_early_printk(char *buf)
250 hsu_early_console_init(); 250 hsu_early_console_init();
251 early_console_register(&early_hsu_console, keep); 251 early_console_register(&early_hsu_console, keep);
252 } 252 }
253
254#endif 253#endif
255 buf++; 254 buf++;
256 } 255 }
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 3afb33f14d2d..298448656b60 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/module.h>
22 23
23#include <trace/syscall.h> 24#include <trace/syscall.h>
24 25
@@ -49,6 +50,7 @@ static DEFINE_PER_CPU(int, save_modifying_code);
49int ftrace_arch_code_modify_prepare(void) 50int ftrace_arch_code_modify_prepare(void)
50{ 51{
51 set_kernel_text_rw(); 52 set_kernel_text_rw();
53 set_all_modules_text_rw();
52 modifying_code = 1; 54 modifying_code = 1;
53 return 0; 55 return 0;
54} 56}
@@ -56,6 +58,7 @@ int ftrace_arch_code_modify_prepare(void)
56int ftrace_arch_code_modify_post_process(void) 58int ftrace_arch_code_modify_post_process(void)
57{ 59{
58 modifying_code = 0; 60 modifying_code = 0;
61 set_all_modules_text_ro();
59 set_kernel_text_ro(); 62 set_kernel_text_ro();
60 return 0; 63 return 0;
61} 64}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 763310165fa0..7f138b3c3c52 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -61,6 +61,9 @@ void __init i386_start_kernel(void)
61 case X86_SUBARCH_MRST: 61 case X86_SUBARCH_MRST:
62 x86_mrst_early_setup(); 62 x86_mrst_early_setup();
63 break; 63 break;
64 case X86_SUBARCH_CE4100:
65 x86_ce4100_early_setup();
66 break;
64 default: 67 default:
65 i386_default_early_setup(); 68 i386_default_early_setup();
66 break; 69 break;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c0dbd9ac24f0..9f54b209c378 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -139,39 +139,6 @@ ENTRY(startup_32)
139 movl %eax, pa(olpc_ofw_pgd) 139 movl %eax, pa(olpc_ofw_pgd)
140#endif 140#endif
141 141
142#ifdef CONFIG_PARAVIRT
143 /* This is can only trip for a broken bootloader... */
144 cmpw $0x207, pa(boot_params + BP_version)
145 jb default_entry
146
147 /* Paravirt-compatible boot parameters. Look to see what architecture
148 we're booting under. */
149 movl pa(boot_params + BP_hardware_subarch), %eax
150 cmpl $num_subarch_entries, %eax
151 jae bad_subarch
152
153 movl pa(subarch_entries)(,%eax,4), %eax
154 subl $__PAGE_OFFSET, %eax
155 jmp *%eax
156
157bad_subarch:
158WEAK(lguest_entry)
159WEAK(xen_entry)
160 /* Unknown implementation; there's really
161 nothing we can do at this point. */
162 ud2a
163
164 __INITDATA
165
166subarch_entries:
167 .long default_entry /* normal x86/PC */
168 .long lguest_entry /* lguest hypervisor */
169 .long xen_entry /* Xen hypervisor */
170 .long default_entry /* Moorestown MID */
171num_subarch_entries = (. - subarch_entries) / 4
172.previous
173#endif /* CONFIG_PARAVIRT */
174
175/* 142/*
176 * Initialize page tables. This creates a PDE and a set of page 143 * Initialize page tables. This creates a PDE and a set of page
177 * tables, which are located immediately beyond __brk_base. The variable 144 * tables, which are located immediately beyond __brk_base. The variable
@@ -181,7 +148,6 @@ num_subarch_entries = (. - subarch_entries) / 4
181 * 148 *
182 * Note that the stack is not yet set up! 149 * Note that the stack is not yet set up!
183 */ 150 */
184default_entry:
185#ifdef CONFIG_X86_PAE 151#ifdef CONFIG_X86_PAE
186 152
187 /* 153 /*
@@ -261,7 +227,42 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
261 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 227 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
262 movl %eax,pa(initial_page_table+0xffc) 228 movl %eax,pa(initial_page_table+0xffc)
263#endif 229#endif
264 jmp 3f 230
231#ifdef CONFIG_PARAVIRT
232 /* This is can only trip for a broken bootloader... */
233 cmpw $0x207, pa(boot_params + BP_version)
234 jb default_entry
235
236 /* Paravirt-compatible boot parameters. Look to see what architecture
237 we're booting under. */
238 movl pa(boot_params + BP_hardware_subarch), %eax
239 cmpl $num_subarch_entries, %eax
240 jae bad_subarch
241
242 movl pa(subarch_entries)(,%eax,4), %eax
243 subl $__PAGE_OFFSET, %eax
244 jmp *%eax
245
246bad_subarch:
247WEAK(lguest_entry)
248WEAK(xen_entry)
249 /* Unknown implementation; there's really
250 nothing we can do at this point. */
251 ud2a
252
253 __INITDATA
254
255subarch_entries:
256 .long default_entry /* normal x86/PC */
257 .long lguest_entry /* lguest hypervisor */
258 .long xen_entry /* Xen hypervisor */
259 .long default_entry /* Moorestown MID */
260num_subarch_entries = (. - subarch_entries) / 4
261.previous
262#else
263 jmp default_entry
264#endif /* CONFIG_PARAVIRT */
265
265/* 266/*
266 * Non-boot CPU entry point; entered from trampoline.S 267 * Non-boot CPU entry point; entered from trampoline.S
267 * We can't lgdt here, because lgdt itself uses a data segment, but 268 * We can't lgdt here, because lgdt itself uses a data segment, but
@@ -282,7 +283,7 @@ ENTRY(startup_32_smp)
282 movl %eax,%fs 283 movl %eax,%fs
283 movl %eax,%gs 284 movl %eax,%gs
284#endif /* CONFIG_SMP */ 285#endif /* CONFIG_SMP */
2853: 286default_entry:
286 287
287/* 288/*
288 * New page tables may be in 4Mbyte page mode and may 289 * New page tables may be in 4Mbyte page mode and may
@@ -316,6 +317,10 @@ ENTRY(startup_32_smp)
316 subl $0x80000001, %eax 317 subl $0x80000001, %eax
317 cmpl $(0x8000ffff-0x80000001), %eax 318 cmpl $(0x8000ffff-0x80000001), %eax
318 ja 6f 319 ja 6f
320
321 /* Clear bogus XD_DISABLE bits */
322 call verify_cpu
323
319 mov $0x80000001, %eax 324 mov $0x80000001, %eax
320 cpuid 325 cpuid
321 /* Execute Disable bit supported? */ 326 /* Execute Disable bit supported? */
@@ -611,6 +616,8 @@ ignore_int:
611#endif 616#endif
612 iret 617 iret
613 618
619#include "verify_cpu.S"
620
614 __REFDATA 621 __REFDATA
615.align 4 622.align 4
616ENTRY(initial_code) 623ENTRY(initial_code)
@@ -622,13 +629,13 @@ ENTRY(initial_code)
622__PAGE_ALIGNED_BSS 629__PAGE_ALIGNED_BSS
623 .align PAGE_SIZE_asm 630 .align PAGE_SIZE_asm
624#ifdef CONFIG_X86_PAE 631#ifdef CONFIG_X86_PAE
625ENTRY(initial_pg_pmd) 632initial_pg_pmd:
626 .fill 1024*KPMDS,4,0 633 .fill 1024*KPMDS,4,0
627#else 634#else
628ENTRY(initial_page_table) 635ENTRY(initial_page_table)
629 .fill 1024,4,0 636 .fill 1024,4,0
630#endif 637#endif
631ENTRY(initial_pg_fixmap) 638initial_pg_fixmap:
632 .fill 1024,4,0 639 .fill 1024,4,0
633ENTRY(empty_zero_page) 640ENTRY(empty_zero_page)
634 .fill 4096,1,0 641 .fill 4096,1,0
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 1cbd54c0df99..5940282bd2f9 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1184,6 +1184,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1184{ 1184{
1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1186 1186
1187 /* This is possible if op is under delayed unoptimizing */
1188 if (kprobe_disabled(&op->kp))
1189 return;
1190
1187 preempt_disable(); 1191 preempt_disable();
1188 if (kprobe_running()) { 1192 if (kprobe_running()) {
1189 kprobes_inc_nmissed_count(&op->kp); 1193 kprobes_inc_nmissed_count(&op->kp);
@@ -1401,10 +1405,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
1401 return 0; 1405 return 0;
1402} 1406}
1403 1407
1404/* Replace a breakpoint (int3) with a relative jump. */ 1408#define MAX_OPTIMIZE_PROBES 256
1405int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op) 1409static struct text_poke_param *jump_poke_params;
1410static struct jump_poke_buffer {
1411 u8 buf[RELATIVEJUMP_SIZE];
1412} *jump_poke_bufs;
1413
1414static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
1415 u8 *insn_buf,
1416 struct optimized_kprobe *op)
1406{ 1417{
1407 unsigned char jmp_code[RELATIVEJUMP_SIZE];
1408 s32 rel = (s32)((long)op->optinsn.insn - 1418 s32 rel = (s32)((long)op->optinsn.insn -
1409 ((long)op->kp.addr + RELATIVEJUMP_SIZE)); 1419 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
1410 1420
@@ -1412,16 +1422,79 @@ int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op)
1412 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, 1422 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
1413 RELATIVE_ADDR_SIZE); 1423 RELATIVE_ADDR_SIZE);
1414 1424
1415 jmp_code[0] = RELATIVEJUMP_OPCODE; 1425 insn_buf[0] = RELATIVEJUMP_OPCODE;
1416 *(s32 *)(&jmp_code[1]) = rel; 1426 *(s32 *)(&insn_buf[1]) = rel;
1427
1428 tprm->addr = op->kp.addr;
1429 tprm->opcode = insn_buf;
1430 tprm->len = RELATIVEJUMP_SIZE;
1431}
1432
1433/*
1434 * Replace breakpoints (int3) with relative jumps.
1435 * Caller must call with locking kprobe_mutex and text_mutex.
1436 */
1437void __kprobes arch_optimize_kprobes(struct list_head *oplist)
1438{
1439 struct optimized_kprobe *op, *tmp;
1440 int c = 0;
1441
1442 list_for_each_entry_safe(op, tmp, oplist, list) {
1443 WARN_ON(kprobe_disabled(&op->kp));
1444 /* Setup param */
1445 setup_optimize_kprobe(&jump_poke_params[c],
1446 jump_poke_bufs[c].buf, op);
1447 list_del_init(&op->list);
1448 if (++c >= MAX_OPTIMIZE_PROBES)
1449 break;
1450 }
1417 1451
1418 /* 1452 /*
1419 * text_poke_smp doesn't support NMI/MCE code modifying. 1453 * text_poke_smp doesn't support NMI/MCE code modifying.
1420 * However, since kprobes itself also doesn't support NMI/MCE 1454 * However, since kprobes itself also doesn't support NMI/MCE
1421 * code probing, it's not a problem. 1455 * code probing, it's not a problem.
1422 */ 1456 */
1423 text_poke_smp(op->kp.addr, jmp_code, RELATIVEJUMP_SIZE); 1457 text_poke_smp_batch(jump_poke_params, c);
1424 return 0; 1458}
1459
1460static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
1461 u8 *insn_buf,
1462 struct optimized_kprobe *op)
1463{
1464 /* Set int3 to first byte for kprobes */
1465 insn_buf[0] = BREAKPOINT_INSTRUCTION;
1466 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
1467
1468 tprm->addr = op->kp.addr;
1469 tprm->opcode = insn_buf;
1470 tprm->len = RELATIVEJUMP_SIZE;
1471}
1472
1473/*
1474 * Recover original instructions and breakpoints from relative jumps.
1475 * Caller must call with locking kprobe_mutex.
1476 */
1477extern void arch_unoptimize_kprobes(struct list_head *oplist,
1478 struct list_head *done_list)
1479{
1480 struct optimized_kprobe *op, *tmp;
1481 int c = 0;
1482
1483 list_for_each_entry_safe(op, tmp, oplist, list) {
1484 /* Setup param */
1485 setup_unoptimize_kprobe(&jump_poke_params[c],
1486 jump_poke_bufs[c].buf, op);
1487 list_move(&op->list, done_list);
1488 if (++c >= MAX_OPTIMIZE_PROBES)
1489 break;
1490 }
1491
1492 /*
1493 * text_poke_smp doesn't support NMI/MCE code modifying.
1494 * However, since kprobes itself also doesn't support NMI/MCE
1495 * code probing, it's not a problem.
1496 */
1497 text_poke_smp_batch(jump_poke_params, c);
1425} 1498}
1426 1499
1427/* Replace a relative jump with a breakpoint (int3). */ 1500/* Replace a relative jump with a breakpoint (int3). */
@@ -1453,11 +1526,35 @@ static int __kprobes setup_detour_execution(struct kprobe *p,
1453 } 1526 }
1454 return 0; 1527 return 0;
1455} 1528}
1529
1530static int __kprobes init_poke_params(void)
1531{
1532 /* Allocate code buffer and parameter array */
1533 jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
1534 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1535 if (!jump_poke_bufs)
1536 return -ENOMEM;
1537
1538 jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
1539 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1540 if (!jump_poke_params) {
1541 kfree(jump_poke_bufs);
1542 jump_poke_bufs = NULL;
1543 return -ENOMEM;
1544 }
1545
1546 return 0;
1547}
1548#else /* !CONFIG_OPTPROBES */
1549static int __kprobes init_poke_params(void)
1550{
1551 return 0;
1552}
1456#endif 1553#endif
1457 1554
1458int __init arch_init_kprobes(void) 1555int __init arch_init_kprobes(void)
1459{ 1556{
1460 return 0; 1557 return init_poke_params();
1461} 1558}
1462 1559
1463int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1560int __kprobes arch_trampoline_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index ce0cb4721c9a..0fe6d1a66c38 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -155,12 +155,6 @@ static int apply_microcode_amd(int cpu)
155 return 0; 155 return 0;
156} 156}
157 157
158static int get_ucode_data(void *to, const u8 *from, size_t n)
159{
160 memcpy(to, from, n);
161 return 0;
162}
163
164static void * 158static void *
165get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) 159get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
166{ 160{
@@ -168,8 +162,7 @@ get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
168 u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; 162 u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
169 void *mc; 163 void *mc;
170 164
171 if (get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR)) 165 get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR);
172 return NULL;
173 166
174 if (section_hdr[0] != UCODE_UCODE_TYPE) { 167 if (section_hdr[0] != UCODE_UCODE_TYPE) {
175 pr_err("error: invalid type field in container file section header\n"); 168 pr_err("error: invalid type field in container file section header\n");
@@ -183,16 +176,13 @@ get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
183 return NULL; 176 return NULL;
184 } 177 }
185 178
186 mc = vmalloc(UCODE_MAX_SIZE); 179 mc = vzalloc(UCODE_MAX_SIZE);
187 if (mc) { 180 if (!mc)
188 memset(mc, 0, UCODE_MAX_SIZE); 181 return NULL;
189 if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, 182
190 total_size)) { 183 get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size);
191 vfree(mc); 184 *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
192 mc = NULL; 185
193 } else
194 *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
195 }
196 return mc; 186 return mc;
197} 187}
198 188
@@ -202,8 +192,7 @@ static int install_equiv_cpu_table(const u8 *buf)
202 unsigned int *buf_pos = (unsigned int *)container_hdr; 192 unsigned int *buf_pos = (unsigned int *)container_hdr;
203 unsigned long size; 193 unsigned long size;
204 194
205 if (get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE)) 195 get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE);
206 return 0;
207 196
208 size = buf_pos[2]; 197 size = buf_pos[2];
209 198
@@ -219,10 +208,7 @@ static int install_equiv_cpu_table(const u8 *buf)
219 } 208 }
220 209
221 buf += UCODE_CONTAINER_HEADER_SIZE; 210 buf += UCODE_CONTAINER_HEADER_SIZE;
222 if (get_ucode_data(equiv_cpu_table, buf, size)) { 211 get_ucode_data(equiv_cpu_table, buf, size);
223 vfree(equiv_cpu_table);
224 return 0;
225 }
226 212
227 return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ 213 return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
228} 214}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index ba0f0ca9f280..c01ffa5b9b87 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -143,7 +143,7 @@ static void flush_gart(void)
143 143
144 spin_lock_irqsave(&iommu_bitmap_lock, flags); 144 spin_lock_irqsave(&iommu_bitmap_lock, flags);
145 if (need_flush) { 145 if (need_flush) {
146 k8_flush_garts(); 146 amd_flush_garts();
147 need_flush = false; 147 need_flush = false;
148 } 148 }
149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -561,17 +561,17 @@ static void enable_gart_translations(void)
561{ 561{
562 int i; 562 int i;
563 563
564 if (!k8_northbridges.gart_supported) 564 if (!amd_nb_has_feature(AMD_NB_GART))
565 return; 565 return;
566 566
567 for (i = 0; i < k8_northbridges.num; i++) { 567 for (i = 0; i < amd_nb_num(); i++) {
568 struct pci_dev *dev = k8_northbridges.nb_misc[i]; 568 struct pci_dev *dev = node_to_amd_nb(i)->misc;
569 569
570 enable_gart_translation(dev, __pa(agp_gatt_table)); 570 enable_gart_translation(dev, __pa(agp_gatt_table));
571 } 571 }
572 572
573 /* Flush the GART-TLB to remove stale entries */ 573 /* Flush the GART-TLB to remove stale entries */
574 k8_flush_garts(); 574 amd_flush_garts();
575} 575}
576 576
577/* 577/*
@@ -596,13 +596,13 @@ static void gart_fixup_northbridges(struct sys_device *dev)
596 if (!fix_up_north_bridges) 596 if (!fix_up_north_bridges)
597 return; 597 return;
598 598
599 if (!k8_northbridges.gart_supported) 599 if (!amd_nb_has_feature(AMD_NB_GART))
600 return; 600 return;
601 601
602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 602 pr_info("PCI-DMA: Restoring GART aperture settings\n");
603 603
604 for (i = 0; i < k8_northbridges.num; i++) { 604 for (i = 0; i < amd_nb_num(); i++) {
605 struct pci_dev *dev = k8_northbridges.nb_misc[i]; 605 struct pci_dev *dev = node_to_amd_nb(i)->misc;
606 606
607 /* 607 /*
608 * Don't enable translations just yet. That is the next 608 * Don't enable translations just yet. That is the next
@@ -644,7 +644,7 @@ static struct sys_device device_gart = {
644 * Private Northbridge GATT initialization in case we cannot use the 644 * Private Northbridge GATT initialization in case we cannot use the
645 * AGP driver for some reason. 645 * AGP driver for some reason.
646 */ 646 */
647static __init int init_k8_gatt(struct agp_kern_info *info) 647static __init int init_amd_gatt(struct agp_kern_info *info)
648{ 648{
649 unsigned aper_size, gatt_size, new_aper_size; 649 unsigned aper_size, gatt_size, new_aper_size;
650 unsigned aper_base, new_aper_base; 650 unsigned aper_base, new_aper_base;
@@ -656,8 +656,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
656 656
657 aper_size = aper_base = info->aper_size = 0; 657 aper_size = aper_base = info->aper_size = 0;
658 dev = NULL; 658 dev = NULL;
659 for (i = 0; i < k8_northbridges.num; i++) { 659 for (i = 0; i < amd_nb_num(); i++) {
660 dev = k8_northbridges.nb_misc[i]; 660 dev = node_to_amd_nb(i)->misc;
661 new_aper_base = read_aperture(dev, &new_aper_size); 661 new_aper_base = read_aperture(dev, &new_aper_size);
662 if (!new_aper_base) 662 if (!new_aper_base)
663 goto nommu; 663 goto nommu;
@@ -725,13 +725,13 @@ static void gart_iommu_shutdown(void)
725 if (!no_agp) 725 if (!no_agp)
726 return; 726 return;
727 727
728 if (!k8_northbridges.gart_supported) 728 if (!amd_nb_has_feature(AMD_NB_GART))
729 return; 729 return;
730 730
731 for (i = 0; i < k8_northbridges.num; i++) { 731 for (i = 0; i < amd_nb_num(); i++) {
732 u32 ctl; 732 u32 ctl;
733 733
734 dev = k8_northbridges.nb_misc[i]; 734 dev = node_to_amd_nb(i)->misc;
735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
736 736
737 ctl &= ~GARTEN; 737 ctl &= ~GARTEN;
@@ -749,14 +749,14 @@ int __init gart_iommu_init(void)
749 unsigned long scratch; 749 unsigned long scratch;
750 long i; 750 long i;
751 751
752 if (!k8_northbridges.gart_supported) 752 if (!amd_nb_has_feature(AMD_NB_GART))
753 return 0; 753 return 0;
754 754
755#ifndef CONFIG_AGP_AMD64 755#ifndef CONFIG_AGP_AMD64
756 no_agp = 1; 756 no_agp = 1;
757#else 757#else
758 /* Makefile puts PCI initialization via subsys_initcall first. */ 758 /* Makefile puts PCI initialization via subsys_initcall first. */
759 /* Add other K8 AGP bridge drivers here */ 759 /* Add other AMD AGP bridge drivers here */
760 no_agp = no_agp || 760 no_agp = no_agp ||
761 (agp_amd64_init() < 0) || 761 (agp_amd64_init() < 0) ||
762 (agp_copy_info(agp_bridge, &info) < 0); 762 (agp_copy_info(agp_bridge, &info) < 0);
@@ -765,7 +765,7 @@ int __init gart_iommu_init(void)
765 if (no_iommu || 765 if (no_iommu ||
766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
767 !gart_iommu_aperture || 767 !gart_iommu_aperture ||
768 (no_agp && init_k8_gatt(&info) < 0)) { 768 (no_agp && init_amd_gatt(&info) < 0)) {
769 if (max_pfn > MAX_DMA32_PFN) { 769 if (max_pfn > MAX_DMA32_PFN) {
770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
771 pr_warning("falling back to iommu=soft.\n"); 771 pr_warning("falling back to iommu=soft.\n");
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 57d1868a86aa..c852041bfc3d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -91,8 +91,7 @@ void exit_thread(void)
91void show_regs(struct pt_regs *regs) 91void show_regs(struct pt_regs *regs)
92{ 92{
93 show_registers(regs); 93 show_registers(regs);
94 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 94 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs));
95 regs->bp);
96} 95}
97 96
98void show_regs_common(void) 97void show_regs_common(void)
@@ -374,6 +373,7 @@ void default_idle(void)
374{ 373{
375 if (hlt_use_halt()) { 374 if (hlt_use_halt()) {
376 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 375 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
376 trace_cpu_idle(1, smp_processor_id());
377 current_thread_info()->status &= ~TS_POLLING; 377 current_thread_info()->status &= ~TS_POLLING;
378 /* 378 /*
379 * TS_POLLING-cleared state must be visible before we 379 * TS_POLLING-cleared state must be visible before we
@@ -444,6 +444,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
444void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 444void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
445{ 445{
446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); 446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
447 trace_cpu_idle((ax>>4)+1, smp_processor_id());
447 if (!need_resched()) { 448 if (!need_resched()) {
448 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 449 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
449 clflush((void *)&current_thread_info()->flags); 450 clflush((void *)&current_thread_info()->flags);
@@ -460,6 +461,7 @@ static void mwait_idle(void)
460{ 461{
461 if (!need_resched()) { 462 if (!need_resched()) {
462 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 463 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
464 trace_cpu_idle(1, smp_processor_id());
463 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 465 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
464 clflush((void *)&current_thread_info()->flags); 466 clflush((void *)&current_thread_info()->flags);
465 467
@@ -481,10 +483,12 @@ static void mwait_idle(void)
481static void poll_idle(void) 483static void poll_idle(void)
482{ 484{
483 trace_power_start(POWER_CSTATE, 0, smp_processor_id()); 485 trace_power_start(POWER_CSTATE, 0, smp_processor_id());
486 trace_cpu_idle(0, smp_processor_id());
484 local_irq_enable(); 487 local_irq_enable();
485 while (!need_resched()) 488 while (!need_resched())
486 cpu_relax(); 489 cpu_relax();
487 trace_power_end(0); 490 trace_power_end(smp_processor_id());
491 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
488} 492}
489 493
490/* 494/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 96586c3cbbbf..4b9befa0e347 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -113,8 +113,8 @@ void cpu_idle(void)
113 stop_critical_timings(); 113 stop_critical_timings();
114 pm_idle(); 114 pm_idle();
115 start_critical_timings(); 115 start_critical_timings();
116
117 trace_power_end(smp_processor_id()); 116 trace_power_end(smp_processor_id());
117 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
118 } 118 }
119 tick_nohz_restart_sched_tick(); 119 tick_nohz_restart_sched_tick();
120 preempt_enable_no_resched(); 120 preempt_enable_no_resched();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b3d7a3a04f38..4c818a738396 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -142,6 +142,8 @@ void cpu_idle(void)
142 start_critical_timings(); 142 start_critical_timings();
143 143
144 trace_power_end(smp_processor_id()); 144 trace_power_end(smp_processor_id());
145 trace_cpu_idle(PWR_EVENT_EXIT,
146 smp_processor_id());
145 147
146 /* In many cases the interrupt that ended idle 148 /* In many cases the interrupt that ended idle
147 has already called exit_idle. But some idle 149 has already called exit_idle. But some idle
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index fda313ebbb03..c8e41e90f59c 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -43,17 +43,33 @@ static void rdc321x_reset(struct pci_dev *dev)
43 outb(1, 0x92); 43 outb(1, 0x92);
44} 44}
45 45
46static void ce4100_reset(struct pci_dev *dev)
47{
48 int i;
49
50 for (i = 0; i < 10; i++) {
51 outb(0x2, 0xcf9);
52 udelay(50);
53 }
54}
55
46struct device_fixup { 56struct device_fixup {
47 unsigned int vendor; 57 unsigned int vendor;
48 unsigned int device; 58 unsigned int device;
49 void (*reboot_fixup)(struct pci_dev *); 59 void (*reboot_fixup)(struct pci_dev *);
50}; 60};
51 61
62/*
63 * PCI ids solely used for fixups_table go here
64 */
65#define PCI_DEVICE_ID_INTEL_CE4100 0x0708
66
52static const struct device_fixup fixups_table[] = { 67static const struct device_fixup fixups_table[] = {
53{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, 68{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset },
54{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, 69{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset },
55{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset }, 70{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset },
56{ PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset }, 71{ PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset },
72{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100, ce4100_reset },
57}; 73};
58 74
59/* 75/*
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a0f52af256a0..d3cfe26c0252 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -705,7 +705,7 @@ static u64 __init get_max_mapped(void)
705void __init setup_arch(char **cmdline_p) 705void __init setup_arch(char **cmdline_p)
706{ 706{
707 int acpi = 0; 707 int acpi = 0;
708 int k8 = 0; 708 int amd = 0;
709 unsigned long flags; 709 unsigned long flags;
710 710
711#ifdef CONFIG_X86_32 711#ifdef CONFIG_X86_32
@@ -991,12 +991,12 @@ void __init setup_arch(char **cmdline_p)
991 acpi = acpi_numa_init(); 991 acpi = acpi_numa_init();
992#endif 992#endif
993 993
994#ifdef CONFIG_K8_NUMA 994#ifdef CONFIG_AMD_NUMA
995 if (!acpi) 995 if (!acpi)
996 k8 = !k8_numa_init(0, max_pfn); 996 amd = !amd_numa_init(0, max_pfn);
997#endif 997#endif
998 998
999 initmem_init(0, max_pfn, acpi, k8); 999 initmem_init(0, max_pfn, acpi, amd);
1000 memblock_find_dma_reserve(); 1000 memblock_find_dma_reserve();
1001 dma32_reserve_bootmem(); 1001 dma32_reserve_bootmem();
1002 1002
@@ -1045,10 +1045,7 @@ void __init setup_arch(char **cmdline_p)
1045#endif 1045#endif
1046 1046
1047 init_apic_mappings(); 1047 init_apic_mappings();
1048 ioapic_init_mappings(); 1048 ioapic_and_gsi_init();
1049
1050 /* need to wait for io_apic is mapped */
1051 probe_nr_irqs_gsi();
1052 1049
1053 kvm_guest_init(); 1050 kvm_guest_init();
1054 1051
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 083e99d1b7df..ee886fe10ef4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -281,6 +281,13 @@ static void __cpuinit smp_callin(void)
281 */ 281 */
282 smp_store_cpu_info(cpuid); 282 smp_store_cpu_info(cpuid);
283 283
284 /*
285 * This must be done before setting cpu_online_mask
286 * or calling notify_cpu_starting.
287 */
288 set_cpu_sibling_map(raw_smp_processor_id());
289 wmb();
290
284 notify_cpu_starting(cpuid); 291 notify_cpu_starting(cpuid);
285 292
286 /* 293 /*
@@ -316,16 +323,6 @@ notrace static void __cpuinit start_secondary(void *unused)
316 */ 323 */
317 check_tsc_sync_target(); 324 check_tsc_sync_target();
318 325
319 if (nmi_watchdog == NMI_IO_APIC) {
320 legacy_pic->mask(0);
321 enable_NMI_through_LVT0();
322 legacy_pic->unmask(0);
323 }
324
325 /* This must be done before setting cpu_online_mask */
326 set_cpu_sibling_map(raw_smp_processor_id());
327 wmb();
328
329 /* 326 /*
330 * We need to hold call_lock, so there is no inconsistency 327 * We need to hold call_lock, so there is no inconsistency
331 * between the time smp_call_function() determines number of 328 * between the time smp_call_function() determines number of
@@ -1061,8 +1058,6 @@ static int __init smp_sanity_check(unsigned max_cpus)
1061 printk(KERN_INFO "SMP mode deactivated.\n"); 1058 printk(KERN_INFO "SMP mode deactivated.\n");
1062 smpboot_clear_io_apic(); 1059 smpboot_clear_io_apic();
1063 1060
1064 localise_nmi_watchdog();
1065
1066 connect_bsp_APIC(); 1061 connect_bsp_APIC();
1067 setup_local_APIC(); 1062 setup_local_APIC();
1068 end_local_APIC_setup(); 1063 end_local_APIC_setup();
@@ -1166,6 +1161,20 @@ out:
1166 preempt_enable(); 1161 preempt_enable();
1167} 1162}
1168 1163
1164void arch_disable_nonboot_cpus_begin(void)
1165{
1166 /*
1167 * Avoid the smp alternatives switch during the disable_nonboot_cpus().
1168 * In the suspend path, we will be back in the SMP mode shortly anyways.
1169 */
1170 skip_smp_alternatives = true;
1171}
1172
1173void arch_disable_nonboot_cpus_end(void)
1174{
1175 skip_smp_alternatives = false;
1176}
1177
1169void arch_enable_nonboot_cpus_begin(void) 1178void arch_enable_nonboot_cpus_begin(void)
1170{ 1179{
1171 set_mtrr_aps_delayed_init(); 1180 set_mtrr_aps_delayed_init();
@@ -1196,7 +1205,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1196#ifdef CONFIG_X86_IO_APIC 1205#ifdef CONFIG_X86_IO_APIC
1197 setup_ioapic_dest(); 1206 setup_ioapic_dest();
1198#endif 1207#endif
1199 check_nmi_watchdog();
1200 mtrr_aps_init(); 1208 mtrr_aps_init();
1201} 1209}
1202 1210
@@ -1341,8 +1349,6 @@ int native_cpu_disable(void)
1341 if (cpu == 0) 1349 if (cpu == 0)
1342 return -EBUSY; 1350 return -EBUSY;
1343 1351
1344 if (nmi_watchdog == NMI_LOCAL_APIC)
1345 stop_apic_nmi_watchdog(NULL);
1346 clear_local_APIC(); 1352 clear_local_APIC();
1347 1353
1348 cpu_disable_common(); 1354 cpu_disable_common();
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index b53c525368a7..938c8e10a19a 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -73,22 +73,22 @@ static const struct stacktrace_ops save_stack_ops_nosched = {
73 */ 73 */
74void save_stack_trace(struct stack_trace *trace) 74void save_stack_trace(struct stack_trace *trace)
75{ 75{
76 dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace); 76 dump_trace(current, NULL, NULL, &save_stack_ops, trace);
77 if (trace->nr_entries < trace->max_entries) 77 if (trace->nr_entries < trace->max_entries)
78 trace->entries[trace->nr_entries++] = ULONG_MAX; 78 trace->entries[trace->nr_entries++] = ULONG_MAX;
79} 79}
80EXPORT_SYMBOL_GPL(save_stack_trace); 80EXPORT_SYMBOL_GPL(save_stack_trace);
81 81
82void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp) 82void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
83{ 83{
84 dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace); 84 dump_trace(current, regs, NULL, &save_stack_ops, trace);
85 if (trace->nr_entries < trace->max_entries) 85 if (trace->nr_entries < trace->max_entries)
86 trace->entries[trace->nr_entries++] = ULONG_MAX; 86 trace->entries[trace->nr_entries++] = ULONG_MAX;
87} 87}
88 88
89void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 89void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
90{ 90{
91 dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); 91 dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace);
92 if (trace->nr_entries < trace->max_entries) 92 if (trace->nr_entries < trace->max_entries)
93 trace->entries[trace->nr_entries++] = ULONG_MAX; 93 trace->entries[trace->nr_entries++] = ULONG_MAX;
94} 94}
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index fb5cc5e14cfa..25a28a245937 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -22,10 +22,6 @@
22#include <asm/hpet.h> 22#include <asm/hpet.h>
23#include <asm/time.h> 23#include <asm/time.h>
24 24
25#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
26int timer_ack;
27#endif
28
29#ifdef CONFIG_X86_64 25#ifdef CONFIG_X86_64
30volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; 26volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
31#endif 27#endif
@@ -63,20 +59,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
63 /* Keep nmi watchdog up to date */ 59 /* Keep nmi watchdog up to date */
64 inc_irq_stat(irq0_irqs); 60 inc_irq_stat(irq0_irqs);
65 61
66 /* Optimized out for !IO_APIC and x86_64 */
67 if (timer_ack) {
68 /*
69 * Subtle, when I/O APICs are used we have to ack timer IRQ
70 * manually to deassert NMI lines for the watchdog if run
71 * on an 82489DX-based system.
72 */
73 raw_spin_lock(&i8259A_lock);
74 outb(0x0c, PIC_MASTER_OCW3);
75 /* Ack the IRQ; AEOI will end it automatically. */
76 inb(PIC_MASTER_POLL);
77 raw_spin_unlock(&i8259A_lock);
78 }
79
80 global_clock_event->event_handler(global_clock_event); 62 global_clock_event->event_handler(global_clock_event);
81 63
82 /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ 64 /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 3af2dff58b21..075d130efcf9 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -127,7 +127,7 @@ startup_64:
127no_longmode: 127no_longmode:
128 hlt 128 hlt
129 jmp no_longmode 129 jmp no_longmode
130#include "verify_cpu_64.S" 130#include "verify_cpu.S"
131 131
132 # Careful these need to be in the same 64K segment as the above; 132 # Careful these need to be in the same 64K segment as the above;
133tidt: 133tidt:
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index cb838ca42c96..c76aaca5694d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -83,6 +83,8 @@ EXPORT_SYMBOL_GPL(used_vectors);
83 83
84static int ignore_nmis; 84static int ignore_nmis;
85 85
86int unknown_nmi_panic;
87
86static inline void conditional_sti(struct pt_regs *regs) 88static inline void conditional_sti(struct pt_regs *regs)
87{ 89{
88 if (regs->flags & X86_EFLAGS_IF) 90 if (regs->flags & X86_EFLAGS_IF)
@@ -300,6 +302,13 @@ gp_in_kernel:
300 die("general protection fault", regs, error_code); 302 die("general protection fault", regs, error_code);
301} 303}
302 304
305static int __init setup_unknown_nmi_panic(char *str)
306{
307 unknown_nmi_panic = 1;
308 return 1;
309}
310__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
311
303static notrace __kprobes void 312static notrace __kprobes void
304mem_parity_error(unsigned char reason, struct pt_regs *regs) 313mem_parity_error(unsigned char reason, struct pt_regs *regs)
305{ 314{
@@ -342,9 +351,11 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
342 reason = (reason & 0xf) | 8; 351 reason = (reason & 0xf) | 8;
343 outb(reason, 0x61); 352 outb(reason, 0x61);
344 353
345 i = 2000; 354 i = 20000;
346 while (--i) 355 while (--i) {
347 udelay(1000); 356 touch_nmi_watchdog();
357 udelay(100);
358 }
348 359
349 reason &= ~8; 360 reason &= ~8;
350 outb(reason, 0x61); 361 outb(reason, 0x61);
@@ -371,7 +382,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
371 reason, smp_processor_id()); 382 reason, smp_processor_id());
372 383
373 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); 384 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
374 if (panic_on_unrecovered_nmi) 385 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
375 panic("NMI: Not continuing"); 386 panic("NMI: Not continuing");
376 387
377 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 388 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
@@ -397,20 +408,8 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
397 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) 408 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
398 == NOTIFY_STOP) 409 == NOTIFY_STOP)
399 return; 410 return;
400
401#ifndef CONFIG_LOCKUP_DETECTOR
402 /*
403 * Ok, so this is none of the documented NMI sources,
404 * so it must be the NMI watchdog.
405 */
406 if (nmi_watchdog_tick(regs, reason))
407 return;
408 if (!do_nmi_callback(regs, cpu))
409#endif /* !CONFIG_LOCKUP_DETECTOR */
410 unknown_nmi_error(reason, regs);
411#else
412 unknown_nmi_error(reason, regs);
413#endif 411#endif
412 unknown_nmi_error(reason, regs);
414 413
415 return; 414 return;
416 } 415 }
@@ -446,14 +445,12 @@ do_nmi(struct pt_regs *regs, long error_code)
446 445
447void stop_nmi(void) 446void stop_nmi(void)
448{ 447{
449 acpi_nmi_disable();
450 ignore_nmis++; 448 ignore_nmis++;
451} 449}
452 450
453void restart_nmi(void) 451void restart_nmi(void)
454{ 452{
455 ignore_nmis--; 453 ignore_nmis--;
456 acpi_nmi_enable();
457} 454}
458 455
459/* May run on IST stack. */ 456/* May run on IST stack. */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 0c40d8b72416..356a0d455cf9 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -872,6 +872,9 @@ __cpuinit int unsynchronized_tsc(void)
872 872
873 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 873 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
874 return 0; 874 return 0;
875
876 if (tsc_clocksource_reliable)
877 return 0;
875 /* 878 /*
876 * Intel systems are normally all synchronized. 879 * Intel systems are normally all synchronized.
877 * Exceptions must mark TSC as unstable: 880 * Exceptions must mark TSC as unstable:
@@ -879,14 +882,92 @@ __cpuinit int unsynchronized_tsc(void)
879 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 882 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
880 /* assume multi socket systems are not synchronized: */ 883 /* assume multi socket systems are not synchronized: */
881 if (num_possible_cpus() > 1) 884 if (num_possible_cpus() > 1)
882 tsc_unstable = 1; 885 return 1;
883 } 886 }
884 887
885 return tsc_unstable; 888 return 0;
889}
890
891
892static void tsc_refine_calibration_work(struct work_struct *work);
893static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
894/**
895 * tsc_refine_calibration_work - Further refine tsc freq calibration
896 * @work - ignored.
897 *
898 * This functions uses delayed work over a period of a
899 * second to further refine the TSC freq value. Since this is
900 * timer based, instead of loop based, we don't block the boot
901 * process while this longer calibration is done.
902 *
903 * If there are any calibration anomolies (too many SMIs, etc),
904 * or the refined calibration is off by 1% of the fast early
905 * calibration, we throw out the new calibration and use the
906 * early calibration.
907 */
908static void tsc_refine_calibration_work(struct work_struct *work)
909{
910 static u64 tsc_start = -1, ref_start;
911 static int hpet;
912 u64 tsc_stop, ref_stop, delta;
913 unsigned long freq;
914
915 /* Don't bother refining TSC on unstable systems */
916 if (check_tsc_unstable())
917 goto out;
918
919 /*
920 * Since the work is started early in boot, we may be
921 * delayed the first time we expire. So set the workqueue
922 * again once we know timers are working.
923 */
924 if (tsc_start == -1) {
925 /*
926 * Only set hpet once, to avoid mixing hardware
927 * if the hpet becomes enabled later.
928 */
929 hpet = is_hpet_enabled();
930 schedule_delayed_work(&tsc_irqwork, HZ);
931 tsc_start = tsc_read_refs(&ref_start, hpet);
932 return;
933 }
934
935 tsc_stop = tsc_read_refs(&ref_stop, hpet);
936
937 /* hpet or pmtimer available ? */
938 if (!hpet && !ref_start && !ref_stop)
939 goto out;
940
941 /* Check, whether the sampling was disturbed by an SMI */
942 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
943 goto out;
944
945 delta = tsc_stop - tsc_start;
946 delta *= 1000000LL;
947 if (hpet)
948 freq = calc_hpet_ref(delta, ref_start, ref_stop);
949 else
950 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
951
952 /* Make sure we're within 1% */
953 if (abs(tsc_khz - freq) > tsc_khz/100)
954 goto out;
955
956 tsc_khz = freq;
957 printk(KERN_INFO "Refined TSC clocksource calibration: "
958 "%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000,
959 (unsigned long)tsc_khz % 1000);
960
961out:
962 clocksource_register_khz(&clocksource_tsc, tsc_khz);
886} 963}
887 964
888static void __init init_tsc_clocksource(void) 965
966static int __init init_tsc_clocksource(void)
889{ 967{
968 if (!cpu_has_tsc || tsc_disabled > 0)
969 return 0;
970
890 if (tsc_clocksource_reliable) 971 if (tsc_clocksource_reliable)
891 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; 972 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
892 /* lower the rating if we already know its unstable: */ 973 /* lower the rating if we already know its unstable: */
@@ -894,8 +975,14 @@ static void __init init_tsc_clocksource(void)
894 clocksource_tsc.rating = 0; 975 clocksource_tsc.rating = 0;
895 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 976 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
896 } 977 }
897 clocksource_register_khz(&clocksource_tsc, tsc_khz); 978 schedule_delayed_work(&tsc_irqwork, 0);
979 return 0;
898} 980}
981/*
982 * We use device_initcall here, to ensure we run after the hpet
983 * is fully initialized, which may occur at fs_initcall time.
984 */
985device_initcall(init_tsc_clocksource);
899 986
900void __init tsc_init(void) 987void __init tsc_init(void)
901{ 988{
@@ -949,6 +1036,5 @@ void __init tsc_init(void)
949 mark_tsc_unstable("TSCs unsynchronized"); 1036 mark_tsc_unstable("TSCs unsynchronized");
950 1037
951 check_system_tsc_reliable(); 1038 check_system_tsc_reliable();
952 init_tsc_clocksource();
953} 1039}
954 1040
diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu.S
index 56a8c2a867d9..0edefc19a113 100644
--- a/arch/x86/kernel/verify_cpu_64.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -7,6 +7,7 @@
7 * Copyright (c) 2007 Andi Kleen (ak@suse.de) 7 * Copyright (c) 2007 Andi Kleen (ak@suse.de)
8 * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com) 8 * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
9 * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com) 9 * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
10 * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
10 * 11 *
11 * This source code is licensed under the GNU General Public License, 12 * This source code is licensed under the GNU General Public License,
12 * Version 2. See the file COPYING for more details. 13 * Version 2. See the file COPYING for more details.
@@ -14,18 +15,17 @@
14 * This is a common code for verification whether CPU supports 15 * This is a common code for verification whether CPU supports
15 * long mode and SSE or not. It is not called directly instead this 16 * long mode and SSE or not. It is not called directly instead this
16 * file is included at various places and compiled in that context. 17 * file is included at various places and compiled in that context.
17 * Following are the current usage. 18 * This file is expected to run in 32bit code. Currently:
18 * 19 *
19 * This file is included by both 16bit and 32bit code. 20 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21 * arch/x86/kernel/trampoline_64.S: secondary processor verfication
22 * arch/x86/kernel/head_32.S: processor startup
20 * 23 *
21 * arch/x86_64/boot/setup.S : Boot cpu verification (16bit) 24 * verify_cpu, returns the status of longmode and SSE in register %eax.
22 * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
23 * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
24 * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
25 *
26 * verify_cpu, returns the status of cpu check in register %eax.
27 * 0: Success 1: Failure 25 * 0: Success 1: Failure
28 * 26 *
27 * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
28 *
29 * The caller needs to check for the error code and take the action 29 * The caller needs to check for the error code and take the action
30 * appropriately. Either display a message or halt. 30 * appropriately. Either display a message or halt.
31 */ 31 */
@@ -62,8 +62,41 @@ verify_cpu:
62 cmpl $0x444d4163,%ecx 62 cmpl $0x444d4163,%ecx
63 jnz verify_cpu_noamd 63 jnz verify_cpu_noamd
64 mov $1,%di # cpu is from AMD 64 mov $1,%di # cpu is from AMD
65 jmp verify_cpu_check
65 66
66verify_cpu_noamd: 67verify_cpu_noamd:
68 cmpl $0x756e6547,%ebx # GenuineIntel?
69 jnz verify_cpu_check
70 cmpl $0x49656e69,%edx
71 jnz verify_cpu_check
72 cmpl $0x6c65746e,%ecx
73 jnz verify_cpu_check
74
75 # only call IA32_MISC_ENABLE when:
76 # family > 6 || (family == 6 && model >= 0xd)
77 movl $0x1, %eax # check CPU family and model
78 cpuid
79 movl %eax, %ecx
80
81 andl $0x0ff00f00, %eax # mask family and extended family
82 shrl $8, %eax
83 cmpl $6, %eax
84 ja verify_cpu_clear_xd # family > 6, ok
85 jb verify_cpu_check # family < 6, skip
86
87 andl $0x000f00f0, %ecx # mask model and extended model
88 shrl $4, %ecx
89 cmpl $0xd, %ecx
90 jb verify_cpu_check # family == 6, model < 0xd, skip
91
92verify_cpu_clear_xd:
93 movl $MSR_IA32_MISC_ENABLE, %ecx
94 rdmsr
95 btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
96 jnc verify_cpu_check # only write MSR if bit was changed
97 wrmsr
98
99verify_cpu_check:
67 movl $0x1,%eax # Does the cpu have what it takes 100 movl $0x1,%eax # Does the cpu have what it takes
68 cpuid 101 cpuid
69 andl $REQUIRED_MASK0,%edx 102 andl $REQUIRED_MASK0,%edx
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index e03530aebfd0..bf4700755184 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -69,7 +69,7 @@ jiffies_64 = jiffies;
69 69
70PHDRS { 70PHDRS {
71 text PT_LOAD FLAGS(5); /* R_E */ 71 text PT_LOAD FLAGS(5); /* R_E */
72 data PT_LOAD FLAGS(7); /* RWE */ 72 data PT_LOAD FLAGS(6); /* RW_ */
73#ifdef CONFIG_X86_64 73#ifdef CONFIG_X86_64
74 user PT_LOAD FLAGS(5); /* R_E */ 74 user PT_LOAD FLAGS(5); /* R_E */
75#ifdef CONFIG_SMP 75#ifdef CONFIG_SMP
@@ -116,6 +116,10 @@ SECTIONS
116 116
117 EXCEPTION_TABLE(16) :text = 0x9090 117 EXCEPTION_TABLE(16) :text = 0x9090
118 118
119#if defined(CONFIG_DEBUG_RODATA)
120 /* .text should occupy whole number of pages */
121 . = ALIGN(PAGE_SIZE);
122#endif
119 X64_ALIGN_DEBUG_RODATA_BEGIN 123 X64_ALIGN_DEBUG_RODATA_BEGIN
120 RO_DATA(PAGE_SIZE) 124 RO_DATA(PAGE_SIZE)
121 X64_ALIGN_DEBUG_RODATA_END 125 X64_ALIGN_DEBUG_RODATA_END
@@ -335,7 +339,7 @@ SECTIONS
335 __bss_start = .; 339 __bss_start = .;
336 *(.bss..page_aligned) 340 *(.bss..page_aligned)
337 *(.bss) 341 *(.bss)
338 . = ALIGN(4); 342 . = ALIGN(PAGE_SIZE);
339 __bss_stop = .; 343 __bss_stop = .;
340 } 344 }
341 345
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index e7d5382ef263..4f420c2f2d55 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -4,7 +4,6 @@
4#include <asm/asm-offsets.h> 4#include <asm/asm-offsets.h>
5#include <asm/thread_info.h> 5#include <asm/thread_info.h>
6#include <asm/processor-flags.h> 6#include <asm/processor-flags.h>
7#include <asm/pgtable.h>
8 7
9/*G:020 8/*G:020
10 * Our story starts with the kernel booting into startup_32 in 9 * Our story starts with the kernel booting into startup_32 in
@@ -38,113 +37,9 @@ ENTRY(lguest_entry)
38 /* Set up the initial stack so we can run C code. */ 37 /* Set up the initial stack so we can run C code. */
39 movl $(init_thread_union+THREAD_SIZE),%esp 38 movl $(init_thread_union+THREAD_SIZE),%esp
40 39
41 call init_pagetables
42
43 /* Jumps are relative: we're running __PAGE_OFFSET too low. */ 40 /* Jumps are relative: we're running __PAGE_OFFSET too low. */
44 jmp lguest_init+__PAGE_OFFSET 41 jmp lguest_init+__PAGE_OFFSET
45 42
46/*
47 * Initialize page tables. This creates a PDE and a set of page
48 * tables, which are located immediately beyond __brk_base. The variable
49 * _brk_end is set up to point to the first "safe" location.
50 * Mappings are created both at virtual address 0 (identity mapping)
51 * and PAGE_OFFSET for up to _end.
52 *
53 * FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they
54 * don't have a stack at this point, so we can't just use call and ret.
55 */
56init_pagetables:
57#if PTRS_PER_PMD > 1
58#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
59#else
60#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
61#endif
62#define pa(X) ((X) - __PAGE_OFFSET)
63
64/* Enough space to fit pagetables for the low memory linear map */
65MAPPING_BEYOND_END = \
66 PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
67#ifdef CONFIG_X86_PAE
68
69 /*
70 * In PAE mode initial_page_table is statically defined to contain
71 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
72 * entries). The identity mapping is handled by pointing two PGD entries
73 * to the first kernel PMD.
74 *
75 * Note the upper half of each PMD or PTE are always zero at this stage.
76 */
77
78#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
79
80 xorl %ebx,%ebx /* %ebx is kept at zero */
81
82 movl $pa(__brk_base), %edi
83 movl $pa(initial_pg_pmd), %edx
84 movl $PTE_IDENT_ATTR, %eax
8510:
86 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
87 movl %ecx,(%edx) /* Store PMD entry */
88 /* Upper half already zero */
89 addl $8,%edx
90 movl $512,%ecx
9111:
92 stosl
93 xchgl %eax,%ebx
94 stosl
95 xchgl %eax,%ebx
96 addl $0x1000,%eax
97 loop 11b
98
99 /*
100 * End condition: we must map up to the end + MAPPING_BEYOND_END.
101 */
102 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
103 cmpl %ebp,%eax
104 jb 10b
1051:
106 addl $__PAGE_OFFSET, %edi
107 movl %edi, pa(_brk_end)
108 shrl $12, %eax
109 movl %eax, pa(max_pfn_mapped)
110
111 /* Do early initialization of the fixmap area */
112 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
113 movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
114#else /* Not PAE */
115
116page_pde_offset = (__PAGE_OFFSET >> 20);
117
118 movl $pa(__brk_base), %edi
119 movl $pa(initial_page_table), %edx
120 movl $PTE_IDENT_ATTR, %eax
12110:
122 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
123 movl %ecx,(%edx) /* Store identity PDE entry */
124 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
125 addl $4,%edx
126 movl $1024, %ecx
12711:
128 stosl
129 addl $0x1000,%eax
130 loop 11b
131 /*
132 * End condition: we must map up to the end + MAPPING_BEYOND_END.
133 */
134 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
135 cmpl %ebp,%eax
136 jb 10b
137 addl $__PAGE_OFFSET, %edi
138 movl %edi, pa(_brk_end)
139 shrl $12, %eax
140 movl %eax, pa(max_pfn_mapped)
141
142 /* Do early initialization of the fixmap area */
143 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
144 movl %eax,pa(initial_page_table+0xffc)
145#endif
146 ret
147
148/*G:055 43/*G:055
149 * We create a macro which puts the assembler code between lgstart_ and lgend_ 44 * We create a macro which puts the assembler code between lgstart_ and lgend_
150 * markers. These templates are put in the .text section: they can't be 45 * markers. These templates are put in the .text section: they can't be
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 55543397a8a7..09df2f9a3d69 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -23,7 +23,7 @@ mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
23obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 23obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
24 24
25obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o 25obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
26obj-$(CONFIG_K8_NUMA) += k8topology_64.o 26obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o
27obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 27obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
28 28
29obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o 29obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/amdtopology_64.c
index 804a3b6c6e14..51fae9cfdecb 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * AMD K8 NUMA support. 2 * AMD NUMA support.
3 * Discover the memory map and associated nodes. 3 * Discover the memory map and associated nodes.
4 * 4 *
5 * This version reads it directly from the K8 northbridge. 5 * This version reads it directly from the AMD northbridge.
6 * 6 *
7 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 7 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
8 */ 8 */
@@ -57,7 +57,7 @@ static __init void early_get_boot_cpu_id(void)
57{ 57{
58 /* 58 /*
59 * need to get the APIC ID of the BSP so can use that to 59 * need to get the APIC ID of the BSP so can use that to
60 * create apicid_to_node in k8_scan_nodes() 60 * create apicid_to_node in amd_scan_nodes()
61 */ 61 */
62#ifdef CONFIG_X86_MPPARSE 62#ifdef CONFIG_X86_MPPARSE
63 /* 63 /*
@@ -69,7 +69,7 @@ static __init void early_get_boot_cpu_id(void)
69 early_init_lapic_mapping(); 69 early_init_lapic_mapping();
70} 70}
71 71
72int __init k8_get_nodes(struct bootnode *physnodes) 72int __init amd_get_nodes(struct bootnode *physnodes)
73{ 73{
74 int i; 74 int i;
75 int ret = 0; 75 int ret = 0;
@@ -82,7 +82,7 @@ int __init k8_get_nodes(struct bootnode *physnodes)
82 return ret; 82 return ret;
83} 83}
84 84
85int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn) 85int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
86{ 86{
87 unsigned long start = PFN_PHYS(start_pfn); 87 unsigned long start = PFN_PHYS(start_pfn);
88 unsigned long end = PFN_PHYS(end_pfn); 88 unsigned long end = PFN_PHYS(end_pfn);
@@ -194,7 +194,7 @@ int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn)
194 return 0; 194 return 0;
195} 195}
196 196
197int __init k8_scan_nodes(void) 197int __init amd_scan_nodes(void)
198{ 198{
199 unsigned int bits; 199 unsigned int bits;
200 unsigned int cores; 200 unsigned int cores;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index c0e28a13de7d..947f42abe820 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -364,8 +364,9 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
364 /* 364 /*
365 * We just marked the kernel text read only above, now that 365 * We just marked the kernel text read only above, now that
366 * we are going to free part of that, we need to make that 366 * we are going to free part of that, we need to make that
367 * writeable first. 367 * writeable and non-executable first.
368 */ 368 */
369 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
369 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); 370 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
370 371
371 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 372 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 0e969f9f401b..f89b5bb4e93f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -226,7 +226,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
226 226
227static inline int is_kernel_text(unsigned long addr) 227static inline int is_kernel_text(unsigned long addr)
228{ 228{
229 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) 229 if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
230 return 1; 230 return 1;
231 return 0; 231 return 0;
232} 232}
@@ -912,6 +912,23 @@ void set_kernel_text_ro(void)
912 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 912 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
913} 913}
914 914
915static void mark_nxdata_nx(void)
916{
917 /*
918 * When this called, init has already been executed and released,
919 * so everything past _etext sould be NX.
920 */
921 unsigned long start = PFN_ALIGN(_etext);
922 /*
923 * This comes from is_kernel_text upper limit. Also HPAGE where used:
924 */
925 unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
926
927 if (__supported_pte_mask & _PAGE_NX)
928 printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
929 set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
930}
931
915void mark_rodata_ro(void) 932void mark_rodata_ro(void)
916{ 933{
917 unsigned long start = PFN_ALIGN(_text); 934 unsigned long start = PFN_ALIGN(_text);
@@ -946,6 +963,7 @@ void mark_rodata_ro(void)
946 printk(KERN_INFO "Testing CPA: write protecting again\n"); 963 printk(KERN_INFO "Testing CPA: write protecting again\n");
947 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 964 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
948#endif 965#endif
966 mark_nxdata_nx();
949} 967}
950#endif 968#endif
951 969
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
index af3b6c8a436f..704a37cedddb 100644
--- a/arch/x86/mm/kmemcheck/error.c
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
185 e->trace.entries = e->trace_entries; 185 e->trace.entries = e->trace_entries;
186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); 186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
187 e->trace.skip = 0; 187 e->trace.skip = 0;
188 save_stack_trace_bp(&e->trace, regs->bp); 188 save_stack_trace_regs(&e->trace, regs);
189 189
190 /* Round address down to nearest 16 bytes */ 190 /* Round address down to nearest 16 bytes */
191 shadow_copy = kmemcheck_shadow_lookup(address 191 shadow_copy = kmemcheck_shadow_lookup(address
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7ffc9b727efd..7762a517d69d 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -264,7 +264,7 @@ static struct bootnode physnodes[MAX_NUMNODES] __initdata;
264static char *cmdline __initdata; 264static char *cmdline __initdata;
265 265
266static int __init setup_physnodes(unsigned long start, unsigned long end, 266static int __init setup_physnodes(unsigned long start, unsigned long end,
267 int acpi, int k8) 267 int acpi, int amd)
268{ 268{
269 int nr_nodes = 0; 269 int nr_nodes = 0;
270 int ret = 0; 270 int ret = 0;
@@ -274,13 +274,13 @@ static int __init setup_physnodes(unsigned long start, unsigned long end,
274 if (acpi) 274 if (acpi)
275 nr_nodes = acpi_get_nodes(physnodes); 275 nr_nodes = acpi_get_nodes(physnodes);
276#endif 276#endif
277#ifdef CONFIG_K8_NUMA 277#ifdef CONFIG_AMD_NUMA
278 if (k8) 278 if (amd)
279 nr_nodes = k8_get_nodes(physnodes); 279 nr_nodes = amd_get_nodes(physnodes);
280#endif 280#endif
281 /* 281 /*
282 * Basic sanity checking on the physical node map: there may be errors 282 * Basic sanity checking on the physical node map: there may be errors
283 * if the SRAT or K8 incorrectly reported the topology or the mem= 283 * if the SRAT or AMD code incorrectly reported the topology or the mem=
284 * kernel parameter is used. 284 * kernel parameter is used.
285 */ 285 */
286 for (i = 0; i < nr_nodes; i++) { 286 for (i = 0; i < nr_nodes; i++) {
@@ -549,7 +549,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
549 * numa=fake command-line option. 549 * numa=fake command-line option.
550 */ 550 */
551static int __init numa_emulation(unsigned long start_pfn, 551static int __init numa_emulation(unsigned long start_pfn,
552 unsigned long last_pfn, int acpi, int k8) 552 unsigned long last_pfn, int acpi, int amd)
553{ 553{
554 u64 addr = start_pfn << PAGE_SHIFT; 554 u64 addr = start_pfn << PAGE_SHIFT;
555 u64 max_addr = last_pfn << PAGE_SHIFT; 555 u64 max_addr = last_pfn << PAGE_SHIFT;
@@ -557,7 +557,7 @@ static int __init numa_emulation(unsigned long start_pfn,
557 int num_nodes; 557 int num_nodes;
558 int i; 558 int i;
559 559
560 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8); 560 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
561 /* 561 /*
562 * If the numa=fake command-line contains a 'M' or 'G', it represents 562 * If the numa=fake command-line contains a 'M' or 'G', it represents
563 * the fixed node size. Otherwise, if it is just a single number N, 563 * the fixed node size. Otherwise, if it is just a single number N,
@@ -602,7 +602,7 @@ static int __init numa_emulation(unsigned long start_pfn,
602#endif /* CONFIG_NUMA_EMU */ 602#endif /* CONFIG_NUMA_EMU */
603 603
604void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, 604void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
605 int acpi, int k8) 605 int acpi, int amd)
606{ 606{
607 int i; 607 int i;
608 608
@@ -610,7 +610,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
610 nodes_clear(node_online_map); 610 nodes_clear(node_online_map);
611 611
612#ifdef CONFIG_NUMA_EMU 612#ifdef CONFIG_NUMA_EMU
613 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8)) 613 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
614 return; 614 return;
615 nodes_clear(node_possible_map); 615 nodes_clear(node_possible_map);
616 nodes_clear(node_online_map); 616 nodes_clear(node_online_map);
@@ -624,8 +624,8 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
624 nodes_clear(node_online_map); 624 nodes_clear(node_online_map);
625#endif 625#endif
626 626
627#ifdef CONFIG_K8_NUMA 627#ifdef CONFIG_AMD_NUMA
628 if (!numa_off && k8 && !k8_scan_nodes()) 628 if (!numa_off && amd && !amd_scan_nodes())
629 return; 629 return;
630 nodes_clear(node_possible_map); 630 nodes_clear(node_possible_map);
631 nodes_clear(node_online_map); 631 nodes_clear(node_online_map);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 532e7933d606..8b830ca14ac4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -13,6 +13,7 @@
13#include <linux/pfn.h> 13#include <linux/pfn.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <linux/pci.h>
16 17
17#include <asm/e820.h> 18#include <asm/e820.h>
18#include <asm/processor.h> 19#include <asm/processor.h>
@@ -255,13 +256,16 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
255 unsigned long pfn) 256 unsigned long pfn)
256{ 257{
257 pgprot_t forbidden = __pgprot(0); 258 pgprot_t forbidden = __pgprot(0);
259 pgprot_t required = __pgprot(0);
258 260
259 /* 261 /*
260 * The BIOS area between 640k and 1Mb needs to be executable for 262 * The BIOS area between 640k and 1Mb needs to be executable for
261 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. 263 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
262 */ 264 */
263 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) 265#ifdef CONFIG_PCI_BIOS
266 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
264 pgprot_val(forbidden) |= _PAGE_NX; 267 pgprot_val(forbidden) |= _PAGE_NX;
268#endif
265 269
266 /* 270 /*
267 * The kernel text needs to be executable for obvious reasons 271 * The kernel text needs to be executable for obvious reasons
@@ -278,6 +282,12 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
278 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 282 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
279 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 283 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
280 pgprot_val(forbidden) |= _PAGE_RW; 284 pgprot_val(forbidden) |= _PAGE_RW;
285 /*
286 * .data and .bss should always be writable.
287 */
288 if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
289 within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
290 pgprot_val(required) |= _PAGE_RW;
281 291
282#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 292#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
283 /* 293 /*
@@ -317,6 +327,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
317#endif 327#endif
318 328
319 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 329 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
330 prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
320 331
321 return prot; 332 return prot;
322} 333}
@@ -393,7 +404,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
393{ 404{
394 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; 405 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
395 pte_t new_pte, old_pte, *tmp; 406 pte_t new_pte, old_pte, *tmp;
396 pgprot_t old_prot, new_prot; 407 pgprot_t old_prot, new_prot, req_prot;
397 int i, do_split = 1; 408 int i, do_split = 1;
398 unsigned int level; 409 unsigned int level;
399 410
@@ -438,10 +449,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
438 * We are safe now. Check whether the new pgprot is the same: 449 * We are safe now. Check whether the new pgprot is the same:
439 */ 450 */
440 old_pte = *kpte; 451 old_pte = *kpte;
441 old_prot = new_prot = pte_pgprot(old_pte); 452 old_prot = new_prot = req_prot = pte_pgprot(old_pte);
442 453
443 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 454 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
444 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 455 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
445 456
446 /* 457 /*
447 * old_pte points to the large page base address. So we need 458 * old_pte points to the large page base address. So we need
@@ -450,17 +461,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
450 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); 461 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
451 cpa->pfn = pfn; 462 cpa->pfn = pfn;
452 463
453 new_prot = static_protections(new_prot, address, pfn); 464 new_prot = static_protections(req_prot, address, pfn);
454 465
455 /* 466 /*
456 * We need to check the full range, whether 467 * We need to check the full range, whether
457 * static_protection() requires a different pgprot for one of 468 * static_protection() requires a different pgprot for one of
458 * the pages in the range we try to preserve: 469 * the pages in the range we try to preserve:
459 */ 470 */
460 addr = address + PAGE_SIZE; 471 addr = address & pmask;
461 pfn++; 472 pfn = pte_pfn(old_pte);
462 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { 473 for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
463 pgprot_t chk_prot = static_protections(new_prot, addr, pfn); 474 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
464 475
465 if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 476 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
466 goto out_unlock; 477 goto out_unlock;
@@ -483,7 +494,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
483 * that we limited the number of possible pages already to 494 * that we limited the number of possible pages already to
484 * the number of pages in the large page. 495 * the number of pages in the large page.
485 */ 496 */
486 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { 497 if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
487 /* 498 /*
488 * The address is aligned and the number of pages 499 * The address is aligned and the number of pages
489 * covers the full page. 500 * covers the full page.
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index a3250aa34086..410531d3c292 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -41,7 +41,7 @@ void __init x86_report_nx(void)
41{ 41{
42 if (!cpu_has_nx) { 42 if (!cpu_has_nx) {
43 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " 43 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
44 "missing in CPU or disabled in BIOS!\n"); 44 "missing in CPU!\n");
45 } else { 45 } else {
46#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 46#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
47 if (disable_nx) { 47 if (disable_nx) {
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index a17dffd136c1..f16434568a51 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -92,6 +92,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *cpu_affinity)
92 /* mark this node as "seen" in node bitmap */ 92 /* mark this node as "seen" in node bitmap */
93 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo); 93 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
94 94
95 /* don't need to check apic_id here, because it is always 8 bits */
95 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo; 96 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
96 97
97 printk(KERN_DEBUG "CPU %02x in proximity domain %02x\n", 98 printk(KERN_DEBUG "CPU %02x in proximity domain %02x\n",
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index a35cb9d8b060..171a0aacb99a 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -134,6 +134,10 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
134 } 134 }
135 135
136 apic_id = pa->apic_id; 136 apic_id = pa->apic_id;
137 if (apic_id >= MAX_LOCAL_APIC) {
138 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
139 return;
140 }
137 apicid_to_node[apic_id] = node; 141 apicid_to_node[apic_id] = node;
138 node_set(node, cpu_nodes_parsed); 142 node_set(node, cpu_nodes_parsed);
139 acpi_numa = 1; 143 acpi_numa = 1;
@@ -168,6 +172,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
168 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid; 172 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
169 else 173 else
170 apic_id = pa->apic_id; 174 apic_id = pa->apic_id;
175
176 if (apic_id >= MAX_LOCAL_APIC) {
177 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
178 return;
179 }
180
171 apicid_to_node[apic_id] = node; 181 apicid_to_node[apic_id] = node;
172 node_set(node, cpu_nodes_parsed); 182 node_set(node, cpu_nodes_parsed);
173 acpi_numa = 1; 183 acpi_numa = 1;
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 2d49d4e19a36..72cbec14d783 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -126,7 +126,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
126 if (!user_mode_vm(regs)) { 126 if (!user_mode_vm(regs)) {
127 unsigned long stack = kernel_stack_pointer(regs); 127 unsigned long stack = kernel_stack_pointer(regs);
128 if (depth) 128 if (depth)
129 dump_trace(NULL, regs, (unsigned long *)stack, 0, 129 dump_trace(NULL, regs, (unsigned long *)stack,
130 &backtrace_ops, &depth); 130 &backtrace_ops, &depth);
131 return; 131 return;
132 } 132 }
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 4e8baad36d37..358c8b9c96a7 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -732,6 +732,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
732 case 0x14: 732 case 0x14:
733 cpu_type = "x86-64/family14h"; 733 cpu_type = "x86-64/family14h";
734 break; 734 break;
735 case 0x15:
736 cpu_type = "x86-64/family15h";
737 break;
735 default: 738 default:
736 return -ENODEV; 739 return -ENODEV;
737 } 740 }
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index e3ecb71b5790..0636dd93cef8 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -58,9 +58,6 @@ static void timer_stop(void)
58 58
59int __init op_nmi_timer_init(struct oprofile_operations *ops) 59int __init op_nmi_timer_init(struct oprofile_operations *ops)
60{ 60{
61 if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0))
62 return -ENODEV;
63
64 ops->start = timer_start; 61 ops->start = timer_start;
65 ops->stop = timer_stop; 62 ops->stop = timer_stop;
66 ops->cpu_type = "timer"; 63 ops->cpu_type = "timer";
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 7d90d47655ba..c3b8e24f2b16 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -29,11 +29,12 @@
29#include "op_x86_model.h" 29#include "op_x86_model.h"
30#include "op_counter.h" 30#include "op_counter.h"
31 31
32#define NUM_COUNTERS 4 32#define NUM_COUNTERS 4
33#define NUM_COUNTERS_F15H 6
33#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 34#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
34#define NUM_VIRT_COUNTERS 32 35#define NUM_VIRT_COUNTERS 32
35#else 36#else
36#define NUM_VIRT_COUNTERS NUM_COUNTERS 37#define NUM_VIRT_COUNTERS 0
37#endif 38#endif
38 39
39#define OP_EVENT_MASK 0x0FFF 40#define OP_EVENT_MASK 0x0FFF
@@ -41,7 +42,8 @@
41 42
42#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) 43#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
43 44
44static unsigned long reset_value[NUM_VIRT_COUNTERS]; 45static int num_counters;
46static unsigned long reset_value[OP_MAX_COUNTER];
45 47
46#define IBS_FETCH_SIZE 6 48#define IBS_FETCH_SIZE 6
47#define IBS_OP_SIZE 12 49#define IBS_OP_SIZE 12
@@ -387,7 +389,7 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
387 int i; 389 int i;
388 390
389 /* enable active counters */ 391 /* enable active counters */
390 for (i = 0; i < NUM_COUNTERS; ++i) { 392 for (i = 0; i < num_counters; ++i) {
391 int virt = op_x86_phys_to_virt(i); 393 int virt = op_x86_phys_to_virt(i);
392 if (!reset_value[virt]) 394 if (!reset_value[virt])
393 continue; 395 continue;
@@ -406,7 +408,7 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
406{ 408{
407 int i; 409 int i;
408 410
409 for (i = 0; i < NUM_COUNTERS; ++i) { 411 for (i = 0; i < num_counters; ++i) {
410 if (!msrs->counters[i].addr) 412 if (!msrs->counters[i].addr)
411 continue; 413 continue;
412 release_perfctr_nmi(MSR_K7_PERFCTR0 + i); 414 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
@@ -418,7 +420,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
418{ 420{
419 int i; 421 int i;
420 422
421 for (i = 0; i < NUM_COUNTERS; i++) { 423 for (i = 0; i < num_counters; i++) {
422 if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) 424 if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
423 goto fail; 425 goto fail;
424 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { 426 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
@@ -426,8 +428,13 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
426 goto fail; 428 goto fail;
427 } 429 }
428 /* both registers must be reserved */ 430 /* both registers must be reserved */
429 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; 431 if (num_counters == NUM_COUNTERS_F15H) {
430 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; 432 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
433 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
434 } else {
435 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
436 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
437 }
431 continue; 438 continue;
432 fail: 439 fail:
433 if (!counter_config[i].enabled) 440 if (!counter_config[i].enabled)
@@ -447,7 +454,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
447 int i; 454 int i;
448 455
449 /* setup reset_value */ 456 /* setup reset_value */
450 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { 457 for (i = 0; i < OP_MAX_COUNTER; ++i) {
451 if (counter_config[i].enabled 458 if (counter_config[i].enabled
452 && msrs->counters[op_x86_virt_to_phys(i)].addr) 459 && msrs->counters[op_x86_virt_to_phys(i)].addr)
453 reset_value[i] = counter_config[i].count; 460 reset_value[i] = counter_config[i].count;
@@ -456,7 +463,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
456 } 463 }
457 464
458 /* clear all counters */ 465 /* clear all counters */
459 for (i = 0; i < NUM_COUNTERS; ++i) { 466 for (i = 0; i < num_counters; ++i) {
460 if (!msrs->controls[i].addr) 467 if (!msrs->controls[i].addr)
461 continue; 468 continue;
462 rdmsrl(msrs->controls[i].addr, val); 469 rdmsrl(msrs->controls[i].addr, val);
@@ -472,7 +479,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
472 } 479 }
473 480
474 /* enable active counters */ 481 /* enable active counters */
475 for (i = 0; i < NUM_COUNTERS; ++i) { 482 for (i = 0; i < num_counters; ++i) {
476 int virt = op_x86_phys_to_virt(i); 483 int virt = op_x86_phys_to_virt(i);
477 if (!reset_value[virt]) 484 if (!reset_value[virt])
478 continue; 485 continue;
@@ -503,7 +510,7 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
503 u64 val; 510 u64 val;
504 int i; 511 int i;
505 512
506 for (i = 0; i < NUM_COUNTERS; ++i) { 513 for (i = 0; i < num_counters; ++i) {
507 int virt = op_x86_phys_to_virt(i); 514 int virt = op_x86_phys_to_virt(i);
508 if (!reset_value[virt]) 515 if (!reset_value[virt])
509 continue; 516 continue;
@@ -526,7 +533,7 @@ static void op_amd_start(struct op_msrs const * const msrs)
526 u64 val; 533 u64 val;
527 int i; 534 int i;
528 535
529 for (i = 0; i < NUM_COUNTERS; ++i) { 536 for (i = 0; i < num_counters; ++i) {
530 if (!reset_value[op_x86_phys_to_virt(i)]) 537 if (!reset_value[op_x86_phys_to_virt(i)])
531 continue; 538 continue;
532 rdmsrl(msrs->controls[i].addr, val); 539 rdmsrl(msrs->controls[i].addr, val);
@@ -546,7 +553,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
546 * Subtle: stop on all counters to avoid race with setting our 553 * Subtle: stop on all counters to avoid race with setting our
547 * pm callback 554 * pm callback
548 */ 555 */
549 for (i = 0; i < NUM_COUNTERS; ++i) { 556 for (i = 0; i < num_counters; ++i) {
550 if (!reset_value[op_x86_phys_to_virt(i)]) 557 if (!reset_value[op_x86_phys_to_virt(i)])
551 continue; 558 continue;
552 rdmsrl(msrs->controls[i].addr, val); 559 rdmsrl(msrs->controls[i].addr, val);
@@ -603,6 +610,7 @@ static int force_ibs_eilvt_setup(void)
603 ret = setup_ibs_ctl(i); 610 ret = setup_ibs_ctl(i);
604 if (ret) 611 if (ret)
605 return ret; 612 return ret;
613 pr_err(FW_BUG "using offset %d for IBS interrupts\n", i);
606 return 0; 614 return 0;
607 } 615 }
608 616
@@ -706,18 +714,29 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
706 return 0; 714 return 0;
707} 715}
708 716
717struct op_x86_model_spec op_amd_spec;
718
709static int op_amd_init(struct oprofile_operations *ops) 719static int op_amd_init(struct oprofile_operations *ops)
710{ 720{
711 init_ibs(); 721 init_ibs();
712 create_arch_files = ops->create_files; 722 create_arch_files = ops->create_files;
713 ops->create_files = setup_ibs_files; 723 ops->create_files = setup_ibs_files;
724
725 if (boot_cpu_data.x86 == 0x15) {
726 num_counters = NUM_COUNTERS_F15H;
727 } else {
728 num_counters = NUM_COUNTERS;
729 }
730
731 op_amd_spec.num_counters = num_counters;
732 op_amd_spec.num_controls = num_counters;
733 op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
734
714 return 0; 735 return 0;
715} 736}
716 737
717struct op_x86_model_spec op_amd_spec = { 738struct op_x86_model_spec op_amd_spec = {
718 .num_counters = NUM_COUNTERS, 739 /* num_counters/num_controls filled in at runtime */
719 .num_controls = NUM_COUNTERS,
720 .num_virt_counters = NUM_VIRT_COUNTERS,
721 .reserved = MSR_AMD_EVENTSEL_RESERVED, 740 .reserved = MSR_AMD_EVENTSEL_RESERVED,
722 .event_mask = OP_EVENT_MASK, 741 .event_mask = OP_EVENT_MASK,
723 .init = op_amd_init, 742 .init = op_amd_init,
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 182558dd5515..9fadec074142 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -11,7 +11,7 @@
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/ptrace.h> 13#include <linux/ptrace.h>
14#include <linux/nmi.h> 14#include <asm/nmi.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/fixmap.h> 16#include <asm/fixmap.h>
17#include <asm/apic.h> 17#include <asm/apic.h>
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index effd96e33f16..6b8759f7634e 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PCI_OLPC) += olpc.o
7obj-$(CONFIG_PCI_XEN) += xen.o 7obj-$(CONFIG_PCI_XEN) += xen.o
8 8
9obj-y += fixup.o 9obj-y += fixup.o
10obj-$(CONFIG_X86_INTEL_CE) += ce4100.o
10obj-$(CONFIG_ACPI) += acpi.o 11obj-$(CONFIG_ACPI) += acpi.o
11obj-y += legacy.o irq.o 12obj-y += legacy.o irq.o
12 13
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
new file mode 100644
index 000000000000..85b68ef5e809
--- /dev/null
+++ b/arch/x86/pci/ce4100.c
@@ -0,0 +1,315 @@
1/*
2 * GPL LICENSE SUMMARY
3 *
4 * Copyright(c) 2010 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
20 *
21 * Contact Information:
22 * Intel Corporation
23 * 2200 Mission College Blvd.
24 * Santa Clara, CA 97052
25 *
26 * This provides access methods for PCI registers that mis-behave on
27 * the CE4100. Each register can be assigned a private init, read and
28 * write routine. The exception to this is the bridge device. The
29 * bridge device is the only device on bus zero (0) that requires any
30 * fixup so it is a special case ATM
31 */
32
33#include <linux/kernel.h>
34#include <linux/pci.h>
35#include <linux/init.h>
36
37#include <asm/pci_x86.h>
38
39struct sim_reg {
40 u32 value;
41 u32 mask;
42};
43
44struct sim_dev_reg {
45 int dev_func;
46 int reg;
47 void (*init)(struct sim_dev_reg *reg);
48 void (*read)(struct sim_dev_reg *reg, u32 *value);
49 void (*write)(struct sim_dev_reg *reg, u32 value);
50 struct sim_reg sim_reg;
51};
52
53struct sim_reg_op {
54 void (*init)(struct sim_dev_reg *reg);
55 void (*read)(struct sim_dev_reg *reg, u32 value);
56 void (*write)(struct sim_dev_reg *reg, u32 value);
57};
58
59#define MB (1024 * 1024)
60#define KB (1024)
61#define SIZE_TO_MASK(size) (~(size - 1))
62
63#define DEFINE_REG(device, func, offset, size, init_op, read_op, write_op)\
64{ PCI_DEVFN(device, func), offset, init_op, read_op, write_op,\
65 {0, SIZE_TO_MASK(size)} },
66
67static void reg_init(struct sim_dev_reg *reg)
68{
69 pci_direct_conf1.read(0, 1, reg->dev_func, reg->reg, 4,
70 &reg->sim_reg.value);
71}
72
73static void reg_read(struct sim_dev_reg *reg, u32 *value)
74{
75 unsigned long flags;
76
77 raw_spin_lock_irqsave(&pci_config_lock, flags);
78 *value = reg->sim_reg.value;
79 raw_spin_unlock_irqrestore(&pci_config_lock, flags);
80}
81
82static void reg_write(struct sim_dev_reg *reg, u32 value)
83{
84 unsigned long flags;
85
86 raw_spin_lock_irqsave(&pci_config_lock, flags);
87 reg->sim_reg.value = (value & reg->sim_reg.mask) |
88 (reg->sim_reg.value & ~reg->sim_reg.mask);
89 raw_spin_unlock_irqrestore(&pci_config_lock, flags);
90}
91
92static void sata_reg_init(struct sim_dev_reg *reg)
93{
94 pci_direct_conf1.read(0, 1, PCI_DEVFN(14, 0), 0x10, 4,
95 &reg->sim_reg.value);
96 reg->sim_reg.value += 0x400;
97}
98
99static void ehci_reg_read(struct sim_dev_reg *reg, u32 *value)
100{
101 reg_read(reg, value);
102 if (*value != reg->sim_reg.mask)
103 *value |= 0x100;
104}
105
106void sata_revid_init(struct sim_dev_reg *reg)
107{
108 reg->sim_reg.value = 0x01060100;
109 reg->sim_reg.mask = 0;
110}
111
112static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
113{
114 reg_read(reg, value);
115}
116
117static struct sim_dev_reg bus1_fixups[] = {
118 DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
119 DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
120 DEFINE_REG(2, 1, 0x10, (64*KB), reg_init, reg_read, reg_write)
121 DEFINE_REG(3, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
122 DEFINE_REG(4, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
123 DEFINE_REG(4, 1, 0x10, (128*KB), reg_init, reg_read, reg_write)
124 DEFINE_REG(6, 0, 0x10, (512*KB), reg_init, reg_read, reg_write)
125 DEFINE_REG(6, 1, 0x10, (512*KB), reg_init, reg_read, reg_write)
126 DEFINE_REG(6, 2, 0x10, (64*KB), reg_init, reg_read, reg_write)
127 DEFINE_REG(8, 0, 0x10, (1*MB), reg_init, reg_read, reg_write)
128 DEFINE_REG(8, 1, 0x10, (64*KB), reg_init, reg_read, reg_write)
129 DEFINE_REG(8, 2, 0x10, (64*KB), reg_init, reg_read, reg_write)
130 DEFINE_REG(9, 0, 0x10 , (1*MB), reg_init, reg_read, reg_write)
131 DEFINE_REG(9, 0, 0x14, (64*KB), reg_init, reg_read, reg_write)
132 DEFINE_REG(10, 0, 0x10, (256), reg_init, reg_read, reg_write)
133 DEFINE_REG(10, 0, 0x14, (256*MB), reg_init, reg_read, reg_write)
134 DEFINE_REG(11, 0, 0x10, (256), reg_init, reg_read, reg_write)
135 DEFINE_REG(11, 0, 0x14, (256), reg_init, reg_read, reg_write)
136 DEFINE_REG(11, 1, 0x10, (256), reg_init, reg_read, reg_write)
137 DEFINE_REG(11, 2, 0x10, (256), reg_init, reg_read, reg_write)
138 DEFINE_REG(11, 2, 0x14, (256), reg_init, reg_read, reg_write)
139 DEFINE_REG(11, 2, 0x18, (256), reg_init, reg_read, reg_write)
140 DEFINE_REG(11, 3, 0x10, (256), reg_init, reg_read, reg_write)
141 DEFINE_REG(11, 3, 0x14, (256), reg_init, reg_read, reg_write)
142 DEFINE_REG(11, 4, 0x10, (256), reg_init, reg_read, reg_write)
143 DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
144 DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
145 DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
146 DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
147 DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
148 DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
149 DEFINE_REG(13, 0, 0x10, (32*KB), reg_init, ehci_reg_read, reg_write)
150 DEFINE_REG(13, 1, 0x10, (32*KB), reg_init, ehci_reg_read, reg_write)
151 DEFINE_REG(14, 0, 0x8, 0, sata_revid_init, sata_revid_read, 0)
152 DEFINE_REG(14, 0, 0x10, 0, reg_init, reg_read, reg_write)
153 DEFINE_REG(14, 0, 0x14, 0, reg_init, reg_read, reg_write)
154 DEFINE_REG(14, 0, 0x18, 0, reg_init, reg_read, reg_write)
155 DEFINE_REG(14, 0, 0x1C, 0, reg_init, reg_read, reg_write)
156 DEFINE_REG(14, 0, 0x20, 0, reg_init, reg_read, reg_write)
157 DEFINE_REG(14, 0, 0x24, (0x200), sata_reg_init, reg_read, reg_write)
158 DEFINE_REG(15, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
159 DEFINE_REG(15, 0, 0x14, (64*KB), reg_init, reg_read, reg_write)
160 DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
161 DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
162 DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
163 DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
164 DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
165};
166
167static void __init init_sim_regs(void)
168{
169 int i;
170
171 for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
172 if (bus1_fixups[i].init)
173 bus1_fixups[i].init(&bus1_fixups[i]);
174 }
175}
176
177static inline void extract_bytes(u32 *value, int reg, int len)
178{
179 uint32_t mask;
180
181 *value >>= ((reg & 3) * 8);
182 mask = 0xFFFFFFFF >> ((4 - len) * 8);
183 *value &= mask;
184}
185
186int bridge_read(unsigned int devfn, int reg, int len, u32 *value)
187{
188 u32 av_bridge_base, av_bridge_limit;
189 int retval = 0;
190
191 switch (reg) {
192 /* Make BARs appear to not request any memory. */
193 case PCI_BASE_ADDRESS_0:
194 case PCI_BASE_ADDRESS_0 + 1:
195 case PCI_BASE_ADDRESS_0 + 2:
196 case PCI_BASE_ADDRESS_0 + 3:
197 *value = 0;
198 break;
199
200 /* Since subordinate bus number register is hardwired
201 * to zero and read only, so do the simulation.
202 */
203 case PCI_PRIMARY_BUS:
204 if (len == 4)
205 *value = 0x00010100;
206 break;
207
208 case PCI_SUBORDINATE_BUS:
209 *value = 1;
210 break;
211
212 case PCI_MEMORY_BASE:
213 case PCI_MEMORY_LIMIT:
214 /* Get the A/V bridge base address. */
215 pci_direct_conf1.read(0, 0, devfn,
216 PCI_BASE_ADDRESS_0, 4, &av_bridge_base);
217
218 av_bridge_limit = av_bridge_base + (512*MB - 1);
219 av_bridge_limit >>= 16;
220 av_bridge_limit &= 0xFFF0;
221
222 av_bridge_base >>= 16;
223 av_bridge_base &= 0xFFF0;
224
225 if (reg == PCI_MEMORY_LIMIT)
226 *value = av_bridge_limit;
227 else if (len == 2)
228 *value = av_bridge_base;
229 else
230 *value = (av_bridge_limit << 16) | av_bridge_base;
231 break;
232 /* Make prefetchable memory limit smaller than prefetchable
233 * memory base, so not claim prefetchable memory space.
234 */
235 case PCI_PREF_MEMORY_BASE:
236 *value = 0xFFF0;
237 break;
238 case PCI_PREF_MEMORY_LIMIT:
239 *value = 0x0;
240 break;
241 /* Make IO limit smaller than IO base, so not claim IO space. */
242 case PCI_IO_BASE:
243 *value = 0xF0;
244 break;
245 case PCI_IO_LIMIT:
246 *value = 0;
247 break;
248 default:
249 retval = 1;
250 }
251 return retval;
252}
253
254static int ce4100_conf_read(unsigned int seg, unsigned int bus,
255 unsigned int devfn, int reg, int len, u32 *value)
256{
257 int i, retval = 1;
258
259 if (bus == 1) {
260 for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
261 if (bus1_fixups[i].dev_func == devfn &&
262 bus1_fixups[i].reg == (reg & ~3) &&
263 bus1_fixups[i].read) {
264 bus1_fixups[i].read(&(bus1_fixups[i]),
265 value);
266 extract_bytes(value, reg, len);
267 return 0;
268 }
269 }
270 }
271
272 if (bus == 0 && (PCI_DEVFN(1, 0) == devfn) &&
273 !bridge_read(devfn, reg, len, value))
274 return 0;
275
276 return pci_direct_conf1.read(seg, bus, devfn, reg, len, value);
277}
278
279static int ce4100_conf_write(unsigned int seg, unsigned int bus,
280 unsigned int devfn, int reg, int len, u32 value)
281{
282 int i;
283
284 if (bus == 1) {
285 for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
286 if (bus1_fixups[i].dev_func == devfn &&
287 bus1_fixups[i].reg == (reg & ~3) &&
288 bus1_fixups[i].write) {
289 bus1_fixups[i].write(&(bus1_fixups[i]),
290 value);
291 return 0;
292 }
293 }
294 }
295
296 /* Discard writes to A/V bridge BAR. */
297 if (bus == 0 && PCI_DEVFN(1, 0) == devfn &&
298 ((reg & ~3) == PCI_BASE_ADDRESS_0))
299 return 0;
300
301 return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
302}
303
304struct pci_raw_ops ce4100_pci_conf = {
305 .read = ce4100_conf_read,
306 .write = ce4100_conf_write,
307};
308
309static int __init ce4100_pci_init(void)
310{
311 init_sim_regs();
312 raw_pci_ops = &ce4100_pci_conf;
313 return 0;
314}
315subsys_initcall(ce4100_pci_init);
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 2492d165096a..a5f7d0d63de0 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -9,6 +9,7 @@
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <asm/pci_x86.h> 10#include <asm/pci_x86.h>
11#include <asm/pci-functions.h> 11#include <asm/pci-functions.h>
12#include <asm/cacheflush.h>
12 13
13/* BIOS32 signature: "_32_" */ 14/* BIOS32 signature: "_32_" */
14#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) 15#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
@@ -25,6 +26,27 @@
25#define PCIBIOS_HW_TYPE1_SPEC 0x10 26#define PCIBIOS_HW_TYPE1_SPEC 0x10
26#define PCIBIOS_HW_TYPE2_SPEC 0x20 27#define PCIBIOS_HW_TYPE2_SPEC 0x20
27 28
29int pcibios_enabled;
30
31/* According to the BIOS specification at:
32 * http://members.datafast.net.au/dft0802/specs/bios21.pdf, we could
33 * restrict the x zone to some pages and make it ro. But this may be
34 * broken on some bios, complex to handle with static_protections.
35 * We could make the 0xe0000-0x100000 range rox, but this can break
36 * some ISA mapping.
37 *
38 * So we let's an rw and x hole when pcibios is used. This shouldn't
39 * happen for modern system with mmconfig, and if you don't want it
40 * you could disable pcibios...
41 */
42static inline void set_bios_x(void)
43{
44 pcibios_enabled = 1;
45 set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
46 if (__supported_pte_mask & _PAGE_NX)
47 printk(KERN_INFO "PCI : PCI BIOS aera is rw and x. Use pci=nobios if you want it NX.\n");
48}
49
28/* 50/*
29 * This is the standard structure used to identify the entry point 51 * This is the standard structure used to identify the entry point
30 * to the BIOS32 Service Directory, as documented in 52 * to the BIOS32 Service Directory, as documented in
@@ -332,6 +354,7 @@ static struct pci_raw_ops * __devinit pci_find_bios(void)
332 DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", 354 DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n",
333 bios32_entry); 355 bios32_entry);
334 bios32_indirect.address = bios32_entry + PAGE_OFFSET; 356 bios32_indirect.address = bios32_entry + PAGE_OFFSET;
357 set_bios_x();
335 if (check_pcibios()) 358 if (check_pcibios())
336 return &pci_bios_access; 359 return &pci_bios_access;
337 } 360 }
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 7bf70b812fa2..021eee91c056 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -1,5 +1,7 @@
1# Platform specific code goes here 1# Platform specific code goes here
2obj-y += ce4100/
2obj-y += efi/ 3obj-y += efi/
4obj-y += iris/
3obj-y += mrst/ 5obj-y += mrst/
4obj-y += olpc/ 6obj-y += olpc/
5obj-y += scx200/ 7obj-y += scx200/
diff --git a/arch/x86/platform/ce4100/Makefile b/arch/x86/platform/ce4100/Makefile
new file mode 100644
index 000000000000..91fc92971d94
--- /dev/null
+++ b/arch/x86/platform/ce4100/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_X86_INTEL_CE) += ce4100.o
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
new file mode 100644
index 000000000000..d2c0d51a7178
--- /dev/null
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -0,0 +1,132 @@
1/*
2 * Intel CE4100 platform specific setup code
3 *
4 * (C) Copyright 2010 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/irq.h>
14#include <linux/module.h>
15#include <linux/serial_reg.h>
16#include <linux/serial_8250.h>
17
18#include <asm/setup.h>
19#include <asm/io.h>
20
21static int ce4100_i8042_detect(void)
22{
23 return 0;
24}
25
26static void __init sdv_find_smp_config(void)
27{
28}
29
30#ifdef CONFIG_SERIAL_8250
31
32
33static unsigned int mem_serial_in(struct uart_port *p, int offset)
34{
35 offset = offset << p->regshift;
36 return readl(p->membase + offset);
37}
38
39/*
40 * The UART Tx interrupts are not set under some conditions and therefore serial
41 * transmission hangs. This is a silicon issue and has not been root caused. The
42 * workaround for this silicon issue checks UART_LSR_THRE bit and UART_LSR_TEMT
43 * bit of LSR register in interrupt handler to see whether at least one of these
44 * two bits is set, if so then process the transmit request. If this workaround
45 * is not applied, then the serial transmission may hang. This workaround is for
46 * errata number 9 in Errata - B step.
47*/
48
49static unsigned int ce4100_mem_serial_in(struct uart_port *p, int offset)
50{
51 unsigned int ret, ier, lsr;
52
53 if (offset == UART_IIR) {
54 offset = offset << p->regshift;
55 ret = readl(p->membase + offset);
56 if (ret & UART_IIR_NO_INT) {
57 /* see if the TX interrupt should have really set */
58 ier = mem_serial_in(p, UART_IER);
59 /* see if the UART's XMIT interrupt is enabled */
60 if (ier & UART_IER_THRI) {
61 lsr = mem_serial_in(p, UART_LSR);
62 /* now check to see if the UART should be
63 generating an interrupt (but isn't) */
64 if (lsr & (UART_LSR_THRE | UART_LSR_TEMT))
65 ret &= ~UART_IIR_NO_INT;
66 }
67 }
68 } else
69 ret = mem_serial_in(p, offset);
70 return ret;
71}
72
73static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
74{
75 offset = offset << p->regshift;
76 writel(value, p->membase + offset);
77}
78
79static void ce4100_serial_fixup(int port, struct uart_port *up,
80 unsigned short *capabilites)
81{
82#ifdef CONFIG_EARLY_PRINTK
83 /*
84 * Over ride the legacy port configuration that comes from
85 * asm/serial.h. Using the ioport driver then switching to the
86 * PCI memmaped driver hangs the IOAPIC
87 */
88 if (up->iotype != UPIO_MEM32) {
89 up->uartclk = 14745600;
90 up->mapbase = 0xdffe0200;
91 set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
92 up->mapbase & PAGE_MASK);
93 up->membase =
94 (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
95 up->membase += up->mapbase & ~PAGE_MASK;
96 up->iotype = UPIO_MEM32;
97 up->regshift = 2;
98 }
99#endif
100 up->iobase = 0;
101 up->serial_in = ce4100_mem_serial_in;
102 up->serial_out = ce4100_mem_serial_out;
103
104 *capabilites |= (1 << 12);
105}
106
107static __init void sdv_serial_fixup(void)
108{
109 serial8250_set_isa_configurator(ce4100_serial_fixup);
110}
111
112#else
113static inline void sdv_serial_fixup(void);
114#endif
115
116static void __init sdv_arch_setup(void)
117{
118 sdv_serial_fixup();
119}
120
121/*
122 * CE4100 specific x86_init function overrides and early setup
123 * calls.
124 */
125void __init x86_ce4100_early_setup(void)
126{
127 x86_init.oem.arch_setup = sdv_arch_setup;
128 x86_platform.i8042_detect = ce4100_i8042_detect;
129 x86_init.resources.probe_roms = x86_init_noop;
130 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
131 x86_init.mpparse.find_smp_config = sdv_find_smp_config;
132}
diff --git a/arch/x86/platform/iris/Makefile b/arch/x86/platform/iris/Makefile
new file mode 100644
index 000000000000..db921983a102
--- /dev/null
+++ b/arch/x86/platform/iris/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_X86_32_IRIS) += iris.o
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
new file mode 100644
index 000000000000..1ba7f5ed8c9b
--- /dev/null
+++ b/arch/x86/platform/iris/iris.c
@@ -0,0 +1,91 @@
1/*
2 * Eurobraille/Iris power off support.
3 *
4 * Eurobraille's Iris machine is a PC with no APM or ACPI support.
5 * It is shutdown by a special I/O sequence which this module provides.
6 *
7 * Copyright (C) Shérab <Sebastien.Hinderer@ens-lyon.org>
8 *
9 * This program is free software ; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation ; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY ; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the program ; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/moduleparam.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/delay.h>
29#include <linux/init.h>
30#include <linux/pm.h>
31#include <asm/io.h>
32
33#define IRIS_GIO_BASE 0x340
34#define IRIS_GIO_INPUT IRIS_GIO_BASE
35#define IRIS_GIO_OUTPUT (IRIS_GIO_BASE + 1)
36#define IRIS_GIO_PULSE 0x80 /* First byte to send */
37#define IRIS_GIO_REST 0x00 /* Second byte to send */
38#define IRIS_GIO_NODEV 0xff /* Likely not an Iris */
39
40MODULE_LICENSE("GPL");
41MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");
42MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
43MODULE_SUPPORTED_DEVICE("Eurobraille/Iris");
44
45static int force;
46
47module_param(force, bool, 0);
48MODULE_PARM_DESC(force, "Set to one to force poweroff handler installation.");
49
50static void (*old_pm_power_off)(void);
51
52static void iris_power_off(void)
53{
54 outb(IRIS_GIO_PULSE, IRIS_GIO_OUTPUT);
55 msleep(850);
56 outb(IRIS_GIO_REST, IRIS_GIO_OUTPUT);
57}
58
59/*
60 * Before installing the power_off handler, try to make sure the OS is
61 * running on an Iris. Since Iris does not support DMI, this is done
62 * by reading its input port and seeing whether the read value is
63 * meaningful.
64 */
65static int iris_init(void)
66{
67 unsigned char status;
68 if (force != 1) {
69 printk(KERN_ERR "The force parameter has not been set to 1 so the Iris poweroff handler will not be installed.\n");
70 return -ENODEV;
71 }
72 status = inb(IRIS_GIO_INPUT);
73 if (status == IRIS_GIO_NODEV) {
74 printk(KERN_ERR "This machine does not seem to be an Iris. Power_off handler not installed.\n");
75 return -ENODEV;
76 }
77 old_pm_power_off = pm_power_off;
78 pm_power_off = &iris_power_off;
79 printk(KERN_INFO "Iris power_off handler installed.\n");
80
81 return 0;
82}
83
84static void iris_exit(void)
85{
86 pm_power_off = old_pm_power_off;
87 printk(KERN_INFO "Iris power_off handler uninstalled.\n");
88}
89
90module_init(iris_init);
91module_exit(iris_exit);
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile
index efbbc552fa95..f61ccdd49341 100644
--- a/arch/x86/platform/mrst/Makefile
+++ b/arch/x86/platform/mrst/Makefile
@@ -1 +1,3 @@
1obj-$(CONFIG_X86_MRST) += mrst.o 1obj-$(CONFIG_X86_MRST) += mrst.o
2obj-$(CONFIG_X86_MRST) += vrtc.o
3obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o
diff --git a/arch/x86/kernel/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 65df603622b2..65df603622b2 100644
--- a/arch/x86/kernel/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 79ae68154e87..fee0b4914e07 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -9,9 +9,19 @@
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 */ 11 */
12
13#define pr_fmt(fmt) "mrst: " fmt
14
12#include <linux/init.h> 15#include <linux/init.h>
13#include <linux/kernel.h> 16#include <linux/kernel.h>
14#include <linux/sfi.h> 17#include <linux/sfi.h>
18#include <linux/intel_pmic_gpio.h>
19#include <linux/spi/spi.h>
20#include <linux/i2c.h>
21#include <linux/i2c/pca953x.h>
22#include <linux/gpio_keys.h>
23#include <linux/input.h>
24#include <linux/platform_device.h>
15#include <linux/irq.h> 25#include <linux/irq.h>
16#include <linux/module.h> 26#include <linux/module.h>
17 27
@@ -23,7 +33,9 @@
23#include <asm/mrst.h> 33#include <asm/mrst.h>
24#include <asm/io.h> 34#include <asm/io.h>
25#include <asm/i8259.h> 35#include <asm/i8259.h>
36#include <asm/intel_scu_ipc.h>
26#include <asm/apb_timer.h> 37#include <asm/apb_timer.h>
38#include <asm/reboot.h>
27 39
28/* 40/*
29 * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, 41 * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
@@ -102,10 +114,10 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
102 memcpy(sfi_mtimer_array, pentry, totallen); 114 memcpy(sfi_mtimer_array, pentry, totallen);
103 } 115 }
104 116
105 printk(KERN_INFO "SFI: MTIMER info (num = %d):\n", sfi_mtimer_num); 117 pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
106 pentry = sfi_mtimer_array; 118 pentry = sfi_mtimer_array;
107 for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) { 119 for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
108 printk(KERN_INFO "timer[%d]: paddr = 0x%08x, freq = %dHz," 120 pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz,"
109 " irq = %d\n", totallen, (u32)pentry->phys_addr, 121 " irq = %d\n", totallen, (u32)pentry->phys_addr,
110 pentry->freq_hz, pentry->irq); 122 pentry->freq_hz, pentry->irq);
111 if (!pentry->irq) 123 if (!pentry->irq)
@@ -176,14 +188,14 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
176 memcpy(sfi_mrtc_array, pentry, totallen); 188 memcpy(sfi_mrtc_array, pentry, totallen);
177 } 189 }
178 190
179 printk(KERN_INFO "SFI: RTC info (num = %d):\n", sfi_mrtc_num); 191 pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
180 pentry = sfi_mrtc_array; 192 pentry = sfi_mrtc_array;
181 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { 193 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
182 printk(KERN_INFO "RTC[%d]: paddr = 0x%08x, irq = %d\n", 194 pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
183 totallen, (u32)pentry->phys_addr, pentry->irq); 195 totallen, (u32)pentry->phys_addr, pentry->irq);
184 mp_irq.type = MP_IOAPIC; 196 mp_irq.type = MP_IOAPIC;
185 mp_irq.irqtype = mp_INT; 197 mp_irq.irqtype = mp_INT;
186 mp_irq.irqflag = 0; 198 mp_irq.irqflag = 0xf; /* level trigger and active low */
187 mp_irq.srcbus = 0; 199 mp_irq.srcbus = 0;
188 mp_irq.srcbusirq = pentry->irq; /* IRQ */ 200 mp_irq.srcbusirq = pentry->irq; /* IRQ */
189 mp_irq.dstapic = MP_APIC_ALL; 201 mp_irq.dstapic = MP_APIC_ALL;
@@ -209,6 +221,7 @@ static unsigned long __init mrst_calibrate_tsc(void)
209 221
210void __init mrst_time_init(void) 222void __init mrst_time_init(void)
211{ 223{
224 sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
212 switch (mrst_timer_options) { 225 switch (mrst_timer_options) {
213 case MRST_TIMER_APBT_ONLY: 226 case MRST_TIMER_APBT_ONLY:
214 break; 227 break;
@@ -224,16 +237,10 @@ void __init mrst_time_init(void)
224 return; 237 return;
225 } 238 }
226 /* we need at least one APB timer */ 239 /* we need at least one APB timer */
227 sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
228 pre_init_apic_IRQ0(); 240 pre_init_apic_IRQ0();
229 apbt_time_init(); 241 apbt_time_init();
230} 242}
231 243
232void __init mrst_rtc_init(void)
233{
234 sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
235}
236
237void __cpuinit mrst_arch_setup(void) 244void __cpuinit mrst_arch_setup(void)
238{ 245{
239 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) 246 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
@@ -256,6 +263,17 @@ static int mrst_i8042_detect(void)
256 return 0; 263 return 0;
257} 264}
258 265
266/* Reboot and power off are handled by the SCU on a MID device */
267static void mrst_power_off(void)
268{
269 intel_scu_ipc_simple_command(0xf1, 1);
270}
271
272static void mrst_reboot(void)
273{
274 intel_scu_ipc_simple_command(0xf1, 0);
275}
276
259/* 277/*
260 * Moorestown specific x86_init function overrides and early setup 278 * Moorestown specific x86_init function overrides and early setup
261 * calls. 279 * calls.
@@ -281,6 +299,10 @@ void __init x86_mrst_early_setup(void)
281 299
282 legacy_pic = &null_legacy_pic; 300 legacy_pic = &null_legacy_pic;
283 301
302 /* Moorestown specific power_off/restart method */
303 pm_power_off = mrst_power_off;
304 machine_ops.emergency_restart = mrst_reboot;
305
284 /* Avoid searching for BIOS MP tables */ 306 /* Avoid searching for BIOS MP tables */
285 x86_init.mpparse.find_smp_config = x86_init_noop; 307 x86_init.mpparse.find_smp_config = x86_init_noop;
286 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 308 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
@@ -309,3 +331,505 @@ static inline int __init setup_x86_mrst_timer(char *arg)
309 return 0; 331 return 0;
310} 332}
311__setup("x86_mrst_timer=", setup_x86_mrst_timer); 333__setup("x86_mrst_timer=", setup_x86_mrst_timer);
334
335/*
336 * Parsing GPIO table first, since the DEVS table will need this table
337 * to map the pin name to the actual pin.
338 */
339static struct sfi_gpio_table_entry *gpio_table;
340static int gpio_num_entry;
341
342static int __init sfi_parse_gpio(struct sfi_table_header *table)
343{
344 struct sfi_table_simple *sb;
345 struct sfi_gpio_table_entry *pentry;
346 int num, i;
347
348 if (gpio_table)
349 return 0;
350 sb = (struct sfi_table_simple *)table;
351 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
352 pentry = (struct sfi_gpio_table_entry *)sb->pentry;
353
354 gpio_table = (struct sfi_gpio_table_entry *)
355 kmalloc(num * sizeof(*pentry), GFP_KERNEL);
356 if (!gpio_table)
357 return -1;
358 memcpy(gpio_table, pentry, num * sizeof(*pentry));
359 gpio_num_entry = num;
360
361 pr_debug("GPIO pin info:\n");
362 for (i = 0; i < num; i++, pentry++)
363 pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
364 " pin = %d\n", i,
365 pentry->controller_name,
366 pentry->pin_name,
367 pentry->pin_no);
368 return 0;
369}
370
371static int get_gpio_by_name(const char *name)
372{
373 struct sfi_gpio_table_entry *pentry = gpio_table;
374 int i;
375
376 if (!pentry)
377 return -1;
378 for (i = 0; i < gpio_num_entry; i++, pentry++) {
379 if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
380 return pentry->pin_no;
381 }
382 return -1;
383}
384
385/*
386 * Here defines the array of devices platform data that IAFW would export
387 * through SFI "DEVS" table, we use name and type to match the device and
388 * its platform data.
389 */
390struct devs_id {
391 char name[SFI_NAME_LEN + 1];
392 u8 type;
393 u8 delay;
394 void *(*get_platform_data)(void *info);
395};
396
397/* the offset for the mapping of global gpio pin to irq */
398#define MRST_IRQ_OFFSET 0x100
399
400static void __init *pmic_gpio_platform_data(void *info)
401{
402 static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
403 int gpio_base = get_gpio_by_name("pmic_gpio_base");
404
405 if (gpio_base == -1)
406 gpio_base = 64;
407 pmic_gpio_pdata.gpio_base = gpio_base;
408 pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET;
409 pmic_gpio_pdata.gpiointr = 0xffffeff8;
410
411 return &pmic_gpio_pdata;
412}
413
414static void __init *max3111_platform_data(void *info)
415{
416 struct spi_board_info *spi_info = info;
417 int intr = get_gpio_by_name("max3111_int");
418
419 if (intr == -1)
420 return NULL;
421 spi_info->irq = intr + MRST_IRQ_OFFSET;
422 return NULL;
423}
424
425/* we have multiple max7315 on the board ... */
426#define MAX7315_NUM 2
427static void __init *max7315_platform_data(void *info)
428{
429 static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
430 static int nr;
431 struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
432 struct i2c_board_info *i2c_info = info;
433 int gpio_base, intr;
434 char base_pin_name[SFI_NAME_LEN + 1];
435 char intr_pin_name[SFI_NAME_LEN + 1];
436
437 if (nr == MAX7315_NUM) {
438 pr_err("too many max7315s, we only support %d\n",
439 MAX7315_NUM);
440 return NULL;
441 }
442 /* we have several max7315 on the board, we only need load several
443 * instances of the same pca953x driver to cover them
444 */
445 strcpy(i2c_info->type, "max7315");
446 if (nr++) {
447 sprintf(base_pin_name, "max7315_%d_base", nr);
448 sprintf(intr_pin_name, "max7315_%d_int", nr);
449 } else {
450 strcpy(base_pin_name, "max7315_base");
451 strcpy(intr_pin_name, "max7315_int");
452 }
453
454 gpio_base = get_gpio_by_name(base_pin_name);
455 intr = get_gpio_by_name(intr_pin_name);
456
457 if (gpio_base == -1)
458 return NULL;
459 max7315->gpio_base = gpio_base;
460 if (intr != -1) {
461 i2c_info->irq = intr + MRST_IRQ_OFFSET;
462 max7315->irq_base = gpio_base + MRST_IRQ_OFFSET;
463 } else {
464 i2c_info->irq = -1;
465 max7315->irq_base = -1;
466 }
467 return max7315;
468}
469
470static void __init *emc1403_platform_data(void *info)
471{
472 static short intr2nd_pdata;
473 struct i2c_board_info *i2c_info = info;
474 int intr = get_gpio_by_name("thermal_int");
475 int intr2nd = get_gpio_by_name("thermal_alert");
476
477 if (intr == -1 || intr2nd == -1)
478 return NULL;
479
480 i2c_info->irq = intr + MRST_IRQ_OFFSET;
481 intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
482
483 return &intr2nd_pdata;
484}
485
486static void __init *lis331dl_platform_data(void *info)
487{
488 static short intr2nd_pdata;
489 struct i2c_board_info *i2c_info = info;
490 int intr = get_gpio_by_name("accel_int");
491 int intr2nd = get_gpio_by_name("accel_2");
492
493 if (intr == -1 || intr2nd == -1)
494 return NULL;
495
496 i2c_info->irq = intr + MRST_IRQ_OFFSET;
497 intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
498
499 return &intr2nd_pdata;
500}
501
502static void __init *no_platform_data(void *info)
503{
504 return NULL;
505}
506
507static const struct devs_id __initconst device_ids[] = {
508 {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
509 {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
510 {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
511 {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
512 {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
513 {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
514 {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
515 {"msic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
516 {},
517};
518
519#define MAX_IPCDEVS 24
520static struct platform_device *ipc_devs[MAX_IPCDEVS];
521static int ipc_next_dev;
522
523#define MAX_SCU_SPI 24
524static struct spi_board_info *spi_devs[MAX_SCU_SPI];
525static int spi_next_dev;
526
527#define MAX_SCU_I2C 24
528static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
529static int i2c_bus[MAX_SCU_I2C];
530static int i2c_next_dev;
531
532static void __init intel_scu_device_register(struct platform_device *pdev)
533{
534 if(ipc_next_dev == MAX_IPCDEVS)
535 pr_err("too many SCU IPC devices");
536 else
537 ipc_devs[ipc_next_dev++] = pdev;
538}
539
540static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
541{
542 struct spi_board_info *new_dev;
543
544 if (spi_next_dev == MAX_SCU_SPI) {
545 pr_err("too many SCU SPI devices");
546 return;
547 }
548
549 new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
550 if (!new_dev) {
551 pr_err("failed to alloc mem for delayed spi dev %s\n",
552 sdev->modalias);
553 return;
554 }
555 memcpy(new_dev, sdev, sizeof(*sdev));
556
557 spi_devs[spi_next_dev++] = new_dev;
558}
559
560static void __init intel_scu_i2c_device_register(int bus,
561 struct i2c_board_info *idev)
562{
563 struct i2c_board_info *new_dev;
564
565 if (i2c_next_dev == MAX_SCU_I2C) {
566 pr_err("too many SCU I2C devices");
567 return;
568 }
569
570 new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
571 if (!new_dev) {
572 pr_err("failed to alloc mem for delayed i2c dev %s\n",
573 idev->type);
574 return;
575 }
576 memcpy(new_dev, idev, sizeof(*idev));
577
578 i2c_bus[i2c_next_dev] = bus;
579 i2c_devs[i2c_next_dev++] = new_dev;
580}
581
582/* Called by IPC driver */
583void intel_scu_devices_create(void)
584{
585 int i;
586
587 for (i = 0; i < ipc_next_dev; i++)
588 platform_device_add(ipc_devs[i]);
589
590 for (i = 0; i < spi_next_dev; i++)
591 spi_register_board_info(spi_devs[i], 1);
592
593 for (i = 0; i < i2c_next_dev; i++) {
594 struct i2c_adapter *adapter;
595 struct i2c_client *client;
596
597 adapter = i2c_get_adapter(i2c_bus[i]);
598 if (adapter) {
599 client = i2c_new_device(adapter, i2c_devs[i]);
600 if (!client)
601 pr_err("can't create i2c device %s\n",
602 i2c_devs[i]->type);
603 } else
604 i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
605 }
606}
607EXPORT_SYMBOL_GPL(intel_scu_devices_create);
608
609/* Called by IPC driver */
610void intel_scu_devices_destroy(void)
611{
612 int i;
613
614 for (i = 0; i < ipc_next_dev; i++)
615 platform_device_del(ipc_devs[i]);
616}
617EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
618
619static void __init install_irq_resource(struct platform_device *pdev, int irq)
620{
621 /* Single threaded */
622 static struct resource __initdata res = {
623 .name = "IRQ",
624 .flags = IORESOURCE_IRQ,
625 };
626 res.start = irq;
627 platform_device_add_resources(pdev, &res, 1);
628}
629
630static void __init sfi_handle_ipc_dev(struct platform_device *pdev)
631{
632 const struct devs_id *dev = device_ids;
633 void *pdata = NULL;
634
635 while (dev->name[0]) {
636 if (dev->type == SFI_DEV_TYPE_IPC &&
637 !strncmp(dev->name, pdev->name, SFI_NAME_LEN)) {
638 pdata = dev->get_platform_data(pdev);
639 break;
640 }
641 dev++;
642 }
643 pdev->dev.platform_data = pdata;
644 intel_scu_device_register(pdev);
645}
646
647static void __init sfi_handle_spi_dev(struct spi_board_info *spi_info)
648{
649 const struct devs_id *dev = device_ids;
650 void *pdata = NULL;
651
652 while (dev->name[0]) {
653 if (dev->type == SFI_DEV_TYPE_SPI &&
654 !strncmp(dev->name, spi_info->modalias, SFI_NAME_LEN)) {
655 pdata = dev->get_platform_data(spi_info);
656 break;
657 }
658 dev++;
659 }
660 spi_info->platform_data = pdata;
661 if (dev->delay)
662 intel_scu_spi_device_register(spi_info);
663 else
664 spi_register_board_info(spi_info, 1);
665}
666
667static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info)
668{
669 const struct devs_id *dev = device_ids;
670 void *pdata = NULL;
671
672 while (dev->name[0]) {
673 if (dev->type == SFI_DEV_TYPE_I2C &&
674 !strncmp(dev->name, i2c_info->type, SFI_NAME_LEN)) {
675 pdata = dev->get_platform_data(i2c_info);
676 break;
677 }
678 dev++;
679 }
680 i2c_info->platform_data = pdata;
681
682 if (dev->delay)
683 intel_scu_i2c_device_register(bus, i2c_info);
684 else
685 i2c_register_board_info(bus, i2c_info, 1);
686 }
687
688
689static int __init sfi_parse_devs(struct sfi_table_header *table)
690{
691 struct sfi_table_simple *sb;
692 struct sfi_device_table_entry *pentry;
693 struct spi_board_info spi_info;
694 struct i2c_board_info i2c_info;
695 struct platform_device *pdev;
696 int num, i, bus;
697 int ioapic;
698 struct io_apic_irq_attr irq_attr;
699
700 sb = (struct sfi_table_simple *)table;
701 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
702 pentry = (struct sfi_device_table_entry *)sb->pentry;
703
704 for (i = 0; i < num; i++, pentry++) {
705 if (pentry->irq != (u8)0xff) { /* native RTE case */
706 /* these SPI2 devices are not exposed to system as PCI
707 * devices, but they have separate RTE entry in IOAPIC
708 * so we have to enable them one by one here
709 */
710 ioapic = mp_find_ioapic(pentry->irq);
711 irq_attr.ioapic = ioapic;
712 irq_attr.ioapic_pin = pentry->irq;
713 irq_attr.trigger = 1;
714 irq_attr.polarity = 1;
715 io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
716 }
717 switch (pentry->type) {
718 case SFI_DEV_TYPE_IPC:
719 /* ID as IRQ is a hack that will go away */
720 pdev = platform_device_alloc(pentry->name, pentry->irq);
721 if (pdev == NULL) {
722 pr_err("out of memory for SFI platform device '%s'.\n",
723 pentry->name);
724 continue;
725 }
726 install_irq_resource(pdev, pentry->irq);
727 pr_debug("info[%2d]: IPC bus, name = %16.16s, "
728 "irq = 0x%2x\n", i, pentry->name, pentry->irq);
729 sfi_handle_ipc_dev(pdev);
730 break;
731 case SFI_DEV_TYPE_SPI:
732 memset(&spi_info, 0, sizeof(spi_info));
733 strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
734 spi_info.irq = pentry->irq;
735 spi_info.bus_num = pentry->host_num;
736 spi_info.chip_select = pentry->addr;
737 spi_info.max_speed_hz = pentry->max_freq;
738 pr_debug("info[%2d]: SPI bus = %d, name = %16.16s, "
739 "irq = 0x%2x, max_freq = %d, cs = %d\n", i,
740 spi_info.bus_num,
741 spi_info.modalias,
742 spi_info.irq,
743 spi_info.max_speed_hz,
744 spi_info.chip_select);
745 sfi_handle_spi_dev(&spi_info);
746 break;
747 case SFI_DEV_TYPE_I2C:
748 memset(&i2c_info, 0, sizeof(i2c_info));
749 bus = pentry->host_num;
750 strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
751 i2c_info.irq = pentry->irq;
752 i2c_info.addr = pentry->addr;
753 pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
754 "irq = 0x%2x, addr = 0x%x\n", i, bus,
755 i2c_info.type,
756 i2c_info.irq,
757 i2c_info.addr);
758 sfi_handle_i2c_dev(bus, &i2c_info);
759 break;
760 case SFI_DEV_TYPE_UART:
761 case SFI_DEV_TYPE_HSI:
762 default:
763 ;
764 }
765 }
766 return 0;
767}
768
769static int __init mrst_platform_init(void)
770{
771 sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
772 sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
773 return 0;
774}
775arch_initcall(mrst_platform_init);
776
777/*
778 * we will search these buttons in SFI GPIO table (by name)
779 * and register them dynamically. Please add all possible
780 * buttons here, we will shrink them if no GPIO found.
781 */
782static struct gpio_keys_button gpio_button[] = {
783 {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
784 {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
785 {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
786 {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
787 {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
788 {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
789 {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
790 {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
791 {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
792 {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
793};
794
795static struct gpio_keys_platform_data mrst_gpio_keys = {
796 .buttons = gpio_button,
797 .rep = 1,
798 .nbuttons = -1, /* will fill it after search */
799};
800
801static struct platform_device pb_device = {
802 .name = "gpio-keys",
803 .id = -1,
804 .dev = {
805 .platform_data = &mrst_gpio_keys,
806 },
807};
808
809/*
810 * Shrink the non-existent buttons, register the gpio button
811 * device if there is some
812 */
813static int __init pb_keys_init(void)
814{
815 struct gpio_keys_button *gb = gpio_button;
816 int i, num, good = 0;
817
818 num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
819 for (i = 0; i < num; i++) {
820 gb[i].gpio = get_gpio_by_name(gb[i].desc);
821 if (gb[i].gpio == -1)
822 continue;
823
824 if (i != good)
825 gb[good] = gb[i];
826 good++;
827 }
828
829 if (good) {
830 mrst_gpio_keys.nbuttons = good;
831 return platform_device_register(&pb_device);
832 }
833 return 0;
834}
835late_initcall(pb_keys_init);
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
new file mode 100644
index 000000000000..32cd7edd71a0
--- /dev/null
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -0,0 +1,165 @@
1/*
2 * vrtc.c: Driver for virtual RTC device on Intel MID platform
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 *
11 * Note:
12 * VRTC is emulated by system controller firmware, the real HW
13 * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
14 * in a memory mapped IO space that is visible to the host IA
15 * processor.
16 *
17 * This driver is based on RTC CMOS driver.
18 */
19
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/sfi.h>
23#include <linux/platform_device.h>
24
25#include <asm/mrst.h>
26#include <asm/mrst-vrtc.h>
27#include <asm/time.h>
28#include <asm/fixmap.h>
29
30static unsigned char __iomem *vrtc_virt_base;
31
32unsigned char vrtc_cmos_read(unsigned char reg)
33{
34 unsigned char retval;
35
36 /* vRTC's registers range from 0x0 to 0xD */
37 if (reg > 0xd || !vrtc_virt_base)
38 return 0xff;
39
40 lock_cmos_prefix(reg);
41 retval = __raw_readb(vrtc_virt_base + (reg << 2));
42 lock_cmos_suffix(reg);
43 return retval;
44}
45EXPORT_SYMBOL_GPL(vrtc_cmos_read);
46
47void vrtc_cmos_write(unsigned char val, unsigned char reg)
48{
49 if (reg > 0xd || !vrtc_virt_base)
50 return;
51
52 lock_cmos_prefix(reg);
53 __raw_writeb(val, vrtc_virt_base + (reg << 2));
54 lock_cmos_suffix(reg);
55}
56EXPORT_SYMBOL_GPL(vrtc_cmos_write);
57
58unsigned long vrtc_get_time(void)
59{
60 u8 sec, min, hour, mday, mon;
61 u32 year;
62
63 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
64 cpu_relax();
65
66 sec = vrtc_cmos_read(RTC_SECONDS);
67 min = vrtc_cmos_read(RTC_MINUTES);
68 hour = vrtc_cmos_read(RTC_HOURS);
69 mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
70 mon = vrtc_cmos_read(RTC_MONTH);
71 year = vrtc_cmos_read(RTC_YEAR);
72
73 /* vRTC YEAR reg contains the offset to 1960 */
74 year += 1960;
75
76 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
77 "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
78
79 return mktime(year, mon, mday, hour, min, sec);
80}
81
82/* Only care about the minutes and seconds */
83int vrtc_set_mmss(unsigned long nowtime)
84{
85 int real_sec, real_min;
86 int vrtc_min;
87
88 vrtc_min = vrtc_cmos_read(RTC_MINUTES);
89
90 real_sec = nowtime % 60;
91 real_min = nowtime / 60;
92 if (((abs(real_min - vrtc_min) + 15)/30) & 1)
93 real_min += 30;
94 real_min %= 60;
95
96 vrtc_cmos_write(real_sec, RTC_SECONDS);
97 vrtc_cmos_write(real_min, RTC_MINUTES);
98 return 0;
99}
100
101void __init mrst_rtc_init(void)
102{
103 unsigned long rtc_paddr;
104 void __iomem *virt_base;
105
106 sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
107 if (!sfi_mrtc_num)
108 return;
109
110 rtc_paddr = sfi_mrtc_array[0].phys_addr;
111
112 /* vRTC's register address may not be page aligned */
113 set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr);
114
115 virt_base = (void __iomem *)__fix_to_virt(FIX_LNW_VRTC);
116 virt_base += rtc_paddr & ~PAGE_MASK;
117 vrtc_virt_base = virt_base;
118
119 x86_platform.get_wallclock = vrtc_get_time;
120 x86_platform.set_wallclock = vrtc_set_mmss;
121}
122
123/*
124 * The Moorestown platform has a memory mapped virtual RTC device that emulates
125 * the programming interface of the RTC.
126 */
127
128static struct resource vrtc_resources[] = {
129 [0] = {
130 .flags = IORESOURCE_MEM,
131 },
132 [1] = {
133 .flags = IORESOURCE_IRQ,
134 }
135};
136
137static struct platform_device vrtc_device = {
138 .name = "rtc_mrst",
139 .id = -1,
140 .resource = vrtc_resources,
141 .num_resources = ARRAY_SIZE(vrtc_resources),
142};
143
144/* Register the RTC device if appropriate */
145static int __init mrst_device_create(void)
146{
147 /* No Moorestown, no device */
148 if (!mrst_identify_cpu())
149 return -ENODEV;
150 /* No timer, no device */
151 if (!sfi_mrtc_num)
152 return -ENODEV;
153
154 /* iomem resource */
155 vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
156 vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
157 MRST_VRTC_MAP_SZ;
158 /* irq resource */
159 vrtc_resources[1].start = sfi_mrtc_array[0].irq;
160 vrtc_resources[1].end = sfi_mrtc_array[0].irq;
161
162 return platform_device_register(&vrtc_device);
163}
164
165module_init(mrst_device_create);
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
index dd4c281ffe57..ca54875ac795 100644
--- a/arch/x86/platform/sfi/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
@@ -48,9 +48,9 @@ static void __init mp_sfi_register_lapic_address(unsigned long address)
48/* All CPUs enumerated by SFI must be present and enabled */ 48/* All CPUs enumerated by SFI must be present and enabled */
49static void __cpuinit mp_sfi_register_lapic(u8 id) 49static void __cpuinit mp_sfi_register_lapic(u8 id)
50{ 50{
51 if (MAX_APICS - id <= 0) { 51 if (MAX_LOCAL_APIC - id <= 0) {
52 pr_warning("Processor #%d invalid (max %d)\n", 52 pr_warning("Processor #%d invalid (max %d)\n",
53 id, MAX_APICS); 53 id, MAX_LOCAL_APIC);
54 return; 54 return;
55 } 55 }
56 56
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index ba9caa808a9c..df58e9cad96a 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1341,7 +1341,7 @@ uv_activation_descriptor_init(int node, int pnode)
1341 1341
1342 /* 1342 /*
1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub 1344 * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
1345 */ 1345 */
1346 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE 1346 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
1347 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1347 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
@@ -1490,7 +1490,7 @@ calculate_destination_timeout(void)
1490/* 1490/*
1491 * initialize the bau_control structure for each cpu 1491 * initialize the bau_control structure for each cpu
1492 */ 1492 */
1493static void __init uv_init_per_cpu(int nuvhubs) 1493static int __init uv_init_per_cpu(int nuvhubs)
1494{ 1494{
1495 int i; 1495 int i;
1496 int cpu; 1496 int cpu;
@@ -1507,7 +1507,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
1507 struct bau_control *smaster = NULL; 1507 struct bau_control *smaster = NULL;
1508 struct socket_desc { 1508 struct socket_desc {
1509 short num_cpus; 1509 short num_cpus;
1510 short cpu_number[16]; 1510 short cpu_number[MAX_CPUS_PER_SOCKET];
1511 }; 1511 };
1512 struct uvhub_desc { 1512 struct uvhub_desc {
1513 unsigned short socket_mask; 1513 unsigned short socket_mask;
@@ -1540,6 +1540,10 @@ static void __init uv_init_per_cpu(int nuvhubs)
1540 sdp = &bdp->socket[socket]; 1540 sdp = &bdp->socket[socket];
1541 sdp->cpu_number[sdp->num_cpus] = cpu; 1541 sdp->cpu_number[sdp->num_cpus] = cpu;
1542 sdp->num_cpus++; 1542 sdp->num_cpus++;
1543 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1544 printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
1545 return 1;
1546 }
1543 } 1547 }
1544 for (uvhub = 0; uvhub < nuvhubs; uvhub++) { 1548 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1545 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8)))) 1549 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
@@ -1570,6 +1574,12 @@ static void __init uv_init_per_cpu(int nuvhubs)
1570 bcp->uvhub_master = hmaster; 1574 bcp->uvhub_master = hmaster;
1571 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)-> 1575 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
1572 blade_processor_id; 1576 blade_processor_id;
1577 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1578 printk(KERN_EMERG
1579 "%d cpus per uvhub invalid\n",
1580 bcp->uvhub_cpu);
1581 return 1;
1582 }
1573 } 1583 }
1574nextsocket: 1584nextsocket:
1575 socket++; 1585 socket++;
@@ -1595,6 +1605,7 @@ nextsocket:
1595 bcp->congested_reps = congested_reps; 1605 bcp->congested_reps = congested_reps;
1596 bcp->congested_period = congested_period; 1606 bcp->congested_period = congested_period;
1597 } 1607 }
1608 return 0;
1598} 1609}
1599 1610
1600/* 1611/*
@@ -1625,7 +1636,10 @@ static int __init uv_bau_init(void)
1625 spin_lock_init(&disable_lock); 1636 spin_lock_init(&disable_lock);
1626 congested_cycles = microsec_2_cycles(congested_response_us); 1637 congested_cycles = microsec_2_cycles(congested_response_us);
1627 1638
1628 uv_init_per_cpu(nuvhubs); 1639 if (uv_init_per_cpu(nuvhubs)) {
1640 nobau = 1;
1641 return 0;
1642 }
1629 1643
1630 uv_partition_base_pnode = 0x7fffffff; 1644 uv_partition_base_pnode = 0x7fffffff;
1631 for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1645 for (uvhub = 0; uvhub < nuvhubs; uvhub++)
diff --git a/arch/x86/platform/visws/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c
index 3371bd053b89..632037671746 100644
--- a/arch/x86/platform/visws/visws_quirks.c
+++ b/arch/x86/platform/visws/visws_quirks.c
@@ -171,7 +171,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
171 ver = m->apicver; 171 ver = m->apicver;
172 if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) { 172 if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) {
173 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", 173 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
174 m->apicid, MAX_APICS); 174 m->apicid, MAX_LOCAL_APIC);
175 return; 175 return;
176 } 176 }
177 177