aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/cnt32_to_63.h78
-rw-r--r--arch/arm/include/asm/pci.h2
-rw-r--r--arch/arm/kernel/kgdb.c2
-rw-r--r--arch/arm/mach-davinci/psc.c3
-rw-r--r--arch/arm/mach-mx3/pcm037.c2
-rw-r--r--arch/arm/mach-pxa/time.c2
-rw-r--r--arch/arm/mach-pxa/tosa.c11
-rw-r--r--arch/arm/mach-sa1100/generic.c2
-rw-r--r--arch/arm/mach-sa1100/include/mach/jornada720.h11
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c10
-rw-r--r--arch/arm/mach-versatile/core.c2
-rw-r--r--arch/arm/plat-omap/devices.c110
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c2
-rw-r--r--arch/avr32/boot/images/.gitignore4
-rw-r--r--arch/avr32/kernel/.gitignore1
-rw-r--r--arch/avr32/kernel/avr32_ksyms.c1
-rw-r--r--arch/avr32/kernel/syscall-stubs.S9
-rw-r--r--arch/avr32/kernel/syscall_table.S2
-rw-r--r--arch/avr32/kernel/traps.c8
-rw-r--r--arch/avr32/lib/findbit.S30
-rw-r--r--arch/ia64/include/asm/dma-mapping.h4
-rw-r--r--arch/ia64/include/asm/elf.h15
-rw-r--r--arch/ia64/include/asm/sections.h16
-rw-r--r--arch/ia64/include/asm/sn/bte.h9
-rw-r--r--arch/ia64/kernel/efi.c5
-rw-r--r--arch/ia64/kernel/head.S9
-rw-r--r--arch/ia64/kernel/module.c21
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/smpboot.c4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
-rw-r--r--arch/ia64/kvm/kvm-ia64.c7
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c4
-rw-r--r--arch/m32r/Kconfig10
-rw-r--r--arch/m32r/kernel/entry.S2
-rw-r--r--arch/m32r/kernel/head.S1
-rw-r--r--arch/m32r/kernel/irq.c6
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c2
-rw-r--r--arch/m32r/kernel/process.c30
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/m32r/kernel/time.c5
-rw-r--r--arch/m32r/kernel/traps.c8
-rw-r--r--arch/m32r/lib/delay.c2
-rw-r--r--arch/m68k/configs/amiga_defconfig53
-rw-r--r--arch/m68k/configs/apollo_defconfig53
-rw-r--r--arch/m68k/configs/atari_defconfig60
-rw-r--r--arch/m68k/configs/bvme6000_defconfig52
-rw-r--r--arch/m68k/configs/hp300_defconfig52
-rw-r--r--arch/m68k/configs/mac_defconfig53
-rw-r--r--arch/m68k/configs/multi_defconfig62
-rw-r--r--arch/m68k/configs/mvme147_defconfig52
-rw-r--r--arch/m68k/configs/mvme16x_defconfig52
-rw-r--r--arch/m68k/configs/q40_defconfig53
-rw-r--r--arch/m68k/configs/sun3_defconfig54
-rw-r--r--arch/m68k/configs/sun3x_defconfig52
-rw-r--r--arch/mips/Kconfig53
-rw-r--r--arch/mips/au1000/common/gpio.c6
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
-rw-r--r--arch/mips/kernel/cevt-smtc.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c26
-rw-r--r--arch/mips/kernel/entry.S10
-rw-r--r--arch/mips/kernel/genex.S41
-rw-r--r--arch/mips/kernel/head.S1
-rw-r--r--arch/mips/kernel/kgdb.c3
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/smtc.c260
-rw-r--r--arch/mips/kernel/traps.c28
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/lib/csum_partial.S21
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c9
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/pci-bcm47xx.c60
-rw-r--r--arch/mips/pci/pci-ip27.c40
-rw-r--r--arch/mips/sibyte/swarm/Makefile3
-rw-r--r--arch/mips/sibyte/swarm/platform.c85
-rw-r--r--arch/mips/vr41xx/common/irq.c6
-rw-r--r--arch/mn10300/kernel/irq.c71
-rw-r--r--arch/mn10300/kernel/time.c52
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/mn10300/unit-asb2303/unit-init.c2
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c2
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/holly.dts106
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts8
-rw-r--r--arch/powerpc/include/asm/elf.h7
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/kernel/idle.c6
-rw-r--r--arch/powerpc/kernel/kgdb.c5
-rw-r--r--arch/powerpc/kernel/module_64.c19
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c12
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/lib/delay.c88
-rw-r--r--arch/sparc/kernel/of_device.c2
-rw-r--r--arch/sparc/kernel/ptrace.c4
-rw-r--r--arch/sparc64/kernel/irq.c5
-rw-r--r--arch/sparc64/kernel/of_device.c9
-rw-r--r--arch/sparc64/kernel/pci.c2
-rw-r--r--arch/sparc64/kernel/pci_psycho.c14
-rw-r--r--arch/sparc64/kernel/prom.c104
-rw-r--r--arch/sparc64/kernel/ptrace.c8
-rw-r--r--arch/sparc64/kernel/traps.c3
-rw-r--r--arch/x86/Kconfig76
-rw-r--r--arch/x86/boot/compressed/head_32.S5
-rw-r--r--arch/x86/boot/compressed/misc.c10
-rw-r--r--arch/x86/boot/compressed/relocs.c2
-rw-r--r--arch/x86/boot/header.S1
-rw-r--r--arch/x86/configs/i386_defconfig19
-rw-r--r--arch/x86/configs/x86_64_defconfig29
-rw-r--r--arch/x86/ia32/ia32_aout.c11
-rw-r--r--arch/x86/ia32/ia32_signal.c21
-rw-r--r--arch/x86/ia32/sys_ia32.c9
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/alternative.c8
-rw-r--r--arch/x86/kernel/amd_iommu.c350
-rw-r--r--arch/x86/kernel/amd_iommu_init.c194
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic_32.c332
-rw-r--r--arch/x86/kernel/apic_64.c389
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/bios_uv.c10
-rw-r--r--arch/x86/kernel/cpu/common.c28
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c7
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c274
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c86
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/crash_dump_64.c13
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/efi.c6
-rw-r--r--arch/x86/kernel/head64.c5
-rw-r--r--arch/x86/kernel/head_32.S34
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/k8.c5
-rw-r--r--arch/x86/kernel/kdebugfs.c1
-rw-r--r--arch/x86/kernel/kgdb.c50
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/nmi.c11
-rw-r--r--arch/x86/kernel/olpc.c6
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c18
-rw-r--r--arch/x86/kernel/pci-dma.c179
-rw-r--r--arch/x86/kernel/pci-gart_64.c162
-rw-r--r--arch/x86/kernel/pci-nommu.c10
-rw-r--r--arch/x86/kernel/pcspeaker.c13
-rw-r--r--arch/x86/kernel/process.c20
-rw-r--r--arch/x86/kernel/process_32.c12
-rw-r--r--arch/x86/kernel/process_64.c133
-rw-r--r--arch/x86/kernel/ptrace.c40
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/kernel/sigframe.h5
-rw-r--r--arch/x86/kernel/signal_32.c11
-rw-r--r--arch/x86/kernel/signal_64.c105
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kernel/sys_x86_64.c43
-rw-r--r--arch/x86/kernel/traps_64.c61
-rw-r--r--arch/x86/kernel/tsc.c290
-rw-r--r--arch/x86/kernel/visws_quirks.c16
-rw-r--r--arch/x86/kernel/vmi_32.c12
-rw-r--r--arch/x86/kernel/vsmp_64.c2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/svm.c12
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/vmx.h2
-rw-r--r--arch/x86/lib/msr-on-cpu.c78
-rw-r--r--arch/x86/lib/string_32.c42
-rw-r--r--arch/x86/lib/strstr_32.c6
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--arch/x86/mm/dump_pagetables.c4
-rw-r--r--arch/x86/mm/init_32.c88
-rw-r--r--arch/x86/mm/init_64.c118
-rw-r--r--arch/x86/mm/ioremap.c19
-rw-r--r--arch/x86/mm/numa_64.c10
-rw-r--r--arch/x86/mm/pageattr-test.c9
-rw-r--r--arch/x86/mm/pageattr.c463
-rw-r--r--arch/x86/mm/pat.c132
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/mm/pgtable_32.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c4
-rw-r--r--arch/x86/oprofile/op_model_p4.c175
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/irq.c67
-rw-r--r--arch/x86/power/hibernate_asm_32.S14
-rw-r--r--arch/x86/xen/enlighten.c20
-rw-r--r--arch/x86/xen/setup.c2
195 files changed, 4622 insertions, 2736 deletions
diff --git a/arch/arm/include/asm/cnt32_to_63.h b/arch/arm/include/asm/cnt32_to_63.h
deleted file mode 100644
index 480c873fa746..000000000000
--- a/arch/arm/include/asm/cnt32_to_63.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * include/asm/cnt32_to_63.h -- extend a 32-bit counter to 63 bits
3 *
4 * Author: Nicolas Pitre
5 * Created: December 3, 2006
6 * Copyright: MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
11 */
12
13#ifndef __INCLUDE_CNT32_TO_63_H__
14#define __INCLUDE_CNT32_TO_63_H__
15
16#include <linux/compiler.h>
17#include <asm/types.h>
18#include <asm/byteorder.h>
19
20/*
21 * Prototype: u64 cnt32_to_63(u32 cnt)
22 * Many hardware clock counters are only 32 bits wide and therefore have
23 * a relatively short period making wrap-arounds rather frequent. This
24 * is a problem when implementing sched_clock() for example, where a 64-bit
25 * non-wrapping monotonic value is expected to be returned.
26 *
27 * To overcome that limitation, let's extend a 32-bit counter to 63 bits
28 * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
29 * by the hardware while bits 32 to 62 are stored in memory. The top bit in
30 * memory is used to synchronize with the hardware clock half-period. When
31 * the top bit of both counters (hardware and in memory) differ then the
32 * memory is updated with a new value, incrementing it when the hardware
33 * counter wraps around.
34 *
35 * Because a word store in memory is atomic then the incremented value will
36 * always be in synch with the top bit indicating to any potential concurrent
37 * reader if the value in memory is up to date or not with regards to the
38 * needed increment. And any race in updating the value in memory is harmless
39 * as the same value would simply be stored more than once.
40 *
41 * The only restriction for the algorithm to work properly is that this
42 * code must be executed at least once per each half period of the 32-bit
43 * counter to properly update the state bit in memory. This is usually not a
44 * problem in practice, but if it is then a kernel timer could be scheduled
45 * to manage for this code to be executed often enough.
46 *
47 * Note that the top bit (bit 63) in the returned value should be considered
48 * as garbage. It is not cleared here because callers are likely to use a
49 * multiplier on the returned value which can get rid of the top bit
50 * implicitly by making the multiplier even, therefore saving on a runtime
51 * clear-bit instruction. Otherwise caller must remember to clear the top
52 * bit explicitly.
53 */
54
55/* this is used only to give gcc a clue about good code generation */
56typedef union {
57 struct {
58#if defined(__LITTLE_ENDIAN)
59 u32 lo, hi;
60#elif defined(__BIG_ENDIAN)
61 u32 hi, lo;
62#endif
63 };
64 u64 val;
65} cnt32_to_63_t;
66
67#define cnt32_to_63(cnt_lo) \
68({ \
69 static volatile u32 __m_cnt_hi = 0; \
70 cnt32_to_63_t __x; \
71 __x.hi = __m_cnt_hi; \
72 __x.lo = (cnt_lo); \
73 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
74 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
75 __x.val; \
76})
77
78#endif
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 721c03d53f4b..918d0cbbf064 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -30,7 +30,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
30 * The networking and block device layers use this boolean for bounce 30 * The networking and block device layers use this boolean for bounce
31 * buffer decisions. 31 * buffer decisions.
32 */ 32 */
33#define PCI_DMA_BUS_IS_PHYS (0) 33#define PCI_DMA_BUS_IS_PHYS (1)
34 34
35/* 35/*
36 * Whether pci_unmap_{single,page} is a nop depends upon the 36 * Whether pci_unmap_{single,page} is a nop depends upon the
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index aaffaecffcd1..ba8ccfede964 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -111,8 +111,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
111 case 'D': 111 case 'D':
112 case 'k': 112 case 'k':
113 case 'c': 113 case 'c':
114 kgdb_contthread = NULL;
115
116 /* 114 /*
117 * Try to read optional parameter, pc unchanged if no parm. 115 * Try to read optional parameter, pc unchanged if no parm.
118 * If this was a compiled breakpoint, we need to move 116 * If this was a compiled breakpoint, we need to move
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 720c48b9ee04..aa2fc375a325 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -70,9 +70,6 @@ void davinci_psc_config(unsigned int domain, unsigned int id, char enable)
70{ 70{
71 u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl, mdstat_mask; 71 u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl, mdstat_mask;
72 72
73 if (id < 0)
74 return;
75
76 mdctl = davinci_readl(DAVINCI_PWR_SLEEP_CNTRL_BASE + MDCTL + 4 * id); 73 mdctl = davinci_readl(DAVINCI_PWR_SLEEP_CNTRL_BASE + MDCTL + 4 * id);
77 if (enable) 74 if (enable)
78 mdctl |= 0x00000003; /* Enable Module */ 75 mdctl |= 0x00000003; /* Enable Module */
diff --git a/arch/arm/mach-mx3/pcm037.c b/arch/arm/mach-mx3/pcm037.c
index 0a152ed15a85..df8582a6231b 100644
--- a/arch/arm/mach-mx3/pcm037.c
+++ b/arch/arm/mach-mx3/pcm037.c
@@ -54,7 +54,7 @@ static struct platform_device pcm037_flash = {
54}; 54};
55 55
56static struct imxuart_platform_data uart_pdata = { 56static struct imxuart_platform_data uart_pdata = {
57 .flags = 0, 57 .flags = IMXUART_HAVE_RTSCTS,
58}; 58};
59 59
60static struct platform_device *devices[] __initdata = { 60static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 67e18509d7bf..b0d6b32654cf 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -17,9 +17,9 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/clockchips.h> 18#include <linux/clockchips.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/cnt32_to_63.h>
20 21
21#include <asm/div64.h> 22#include <asm/div64.h>
22#include <asm/cnt32_to_63.h>
23#include <asm/mach/irq.h> 23#include <asm/mach/irq.h>
24#include <asm/mach/time.h> 24#include <asm/mach/time.h>
25#include <mach/pxa-regs.h> 25#include <mach/pxa-regs.h>
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 5dab30eafddc..9f3ef9eb32e3 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -50,6 +50,7 @@
50#include <asm/mach/sharpsl_param.h> 50#include <asm/mach/sharpsl_param.h>
51 51
52#include "generic.h" 52#include "generic.h"
53#include "clock.h"
53#include "devices.h" 54#include "devices.h"
54 55
55static unsigned long tosa_pin_config[] = { 56static unsigned long tosa_pin_config[] = {
@@ -521,6 +522,14 @@ static struct gpio_keys_button tosa_gpio_keys[] = {
521 .wakeup = 1, 522 .wakeup = 1,
522 .active_low = 1, 523 .active_low = 1,
523 }, 524 },
525 {
526 .type = EV_SW,
527 .code = SW_HEADPHONE_INSERT,
528 .gpio = TOSA_GPIO_EAR_IN,
529 .desc = "HeadPhone insert",
530 .active_low = 1,
531 .debounce_interval = 300,
532 },
524}; 533};
525 534
526static struct gpio_keys_platform_data tosa_gpio_keys_platform_data = { 535static struct gpio_keys_platform_data tosa_gpio_keys_platform_data = {
@@ -792,6 +801,8 @@ static void __init tosa_init(void)
792 pxa_set_i2c_info(NULL); 801 pxa_set_i2c_info(NULL);
793 platform_scoop_config = &tosa_pcmcia_config; 802 platform_scoop_config = &tosa_pcmcia_config;
794 803
804 clk_add_alias("CLK_CK3P6MI", &tc6393xb_device.dev, "GPIO11_CLK", NULL);
805
795 platform_add_devices(devices, ARRAY_SIZE(devices)); 806 platform_add_devices(devices, ARRAY_SIZE(devices));
796} 807}
797 808
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 1362994c78aa..b422526f6d8b 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -18,9 +18,9 @@
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/sched.h> /* just for sched_clock() - funny that */ 19#include <linux/sched.h> /* just for sched_clock() - funny that */
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/cnt32_to_63.h>
21 22
22#include <asm/div64.h> 23#include <asm/div64.h>
23#include <asm/cnt32_to_63.h>
24#include <mach/hardware.h> 24#include <mach/hardware.h>
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
diff --git a/arch/arm/mach-sa1100/include/mach/jornada720.h b/arch/arm/mach-sa1100/include/mach/jornada720.h
index bc120850d313..cc6b4bfcecf6 100644
--- a/arch/arm/mach-sa1100/include/mach/jornada720.h
+++ b/arch/arm/mach-sa1100/include/mach/jornada720.h
@@ -1,10 +1,10 @@
1/* 1/*
2 * arch/arm/mach-sa1100/include/mach/jornada720.h 2 * arch/arm/mach-sa1100/include/mach/jornada720.h
3 * 3 *
4 * This file contains SSP/MCU communication definitions for HP Jornada 710/720/728 4 * SSP/MCU communication definitions for HP Jornada 710/720/728
5 * 5 *
6 * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com> 6 * Copyright 2007,2008 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
7 * Copyright (C) 2000 John Ankcorn <jca@lcs.mit.edu> 7 * Copyright 2000 John Ankcorn <jca@lcs.mit.edu>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -25,3 +25,8 @@
25#define PWMOFF 0xDF 25#define PWMOFF 0xDF
26#define TXDUMMY 0x11 26#define TXDUMMY 0x11
27#define ERRORCODE 0x00 27#define ERRORCODE 0x00
28
29extern void jornada_ssp_start(void);
30extern void jornada_ssp_end(void);
31extern int jornada_ssp_inout(u8 byte);
32extern int jornada_ssp_byte(u8 byte);
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index 06ea7abd9170..28cf36967977 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -21,8 +21,8 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <asm/hardware/ssp.h>
25#include <mach/jornada720.h> 24#include <mach/jornada720.h>
25#include <asm/hardware/ssp.h>
26 26
27static DEFINE_SPINLOCK(jornada_ssp_lock); 27static DEFINE_SPINLOCK(jornada_ssp_lock);
28static unsigned long jornada_ssp_flags; 28static unsigned long jornada_ssp_flags;
@@ -109,12 +109,12 @@ EXPORT_SYMBOL(jornada_ssp_inout);
109 * jornada_ssp_start - enable mcu 109 * jornada_ssp_start - enable mcu
110 * 110 *
111 */ 111 */
112int jornada_ssp_start() 112void jornada_ssp_start(void)
113{ 113{
114 spin_lock_irqsave(&jornada_ssp_lock, jornada_ssp_flags); 114 spin_lock_irqsave(&jornada_ssp_lock, jornada_ssp_flags);
115 GPCR = GPIO_GPIO25; 115 GPCR = GPIO_GPIO25;
116 udelay(50); 116 udelay(50);
117 return 0; 117 return;
118}; 118};
119EXPORT_SYMBOL(jornada_ssp_start); 119EXPORT_SYMBOL(jornada_ssp_start);
120 120
@@ -122,11 +122,11 @@ EXPORT_SYMBOL(jornada_ssp_start);
122 * jornada_ssp_end - disable mcu and turn off lock 122 * jornada_ssp_end - disable mcu and turn off lock
123 * 123 *
124 */ 124 */
125int jornada_ssp_end() 125void jornada_ssp_end(void)
126{ 126{
127 GPSR = GPIO_GPIO25; 127 GPSR = GPIO_GPIO25;
128 spin_unlock_irqrestore(&jornada_ssp_lock, jornada_ssp_flags); 128 spin_unlock_irqrestore(&jornada_ssp_lock, jornada_ssp_flags);
129 return 0; 129 return;
130}; 130};
131EXPORT_SYMBOL(jornada_ssp_end); 131EXPORT_SYMBOL(jornada_ssp_end);
132 132
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index d75e795c893e..b638f10411e8 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -28,8 +28,8 @@
28#include <linux/amba/clcd.h> 28#include <linux/amba/clcd.h>
29#include <linux/clocksource.h> 29#include <linux/clocksource.h>
30#include <linux/clockchips.h> 30#include <linux/clockchips.h>
31#include <linux/cnt32_to_63.h>
31 32
32#include <asm/cnt32_to_63.h>
33#include <asm/system.h> 33#include <asm/system.h>
34#include <mach/hardware.h> 34#include <mach/hardware.h>
35#include <asm/io.h> 35#include <asm/io.h>
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 187e3d8bfdfe..01da719a7453 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -21,6 +21,7 @@
21 21
22#include <mach/tc.h> 22#include <mach/tc.h>
23#include <mach/board.h> 23#include <mach/board.h>
24#include <mach/mmc.h>
24#include <mach/mux.h> 25#include <mach/mux.h>
25#include <mach/gpio.h> 26#include <mach/gpio.h>
26#include <mach/menelaus.h> 27#include <mach/menelaus.h>
@@ -194,25 +195,38 @@ void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
194 195
195/*-------------------------------------------------------------------------*/ 196/*-------------------------------------------------------------------------*/
196 197
197#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) 198#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
199 defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
198 200
199#ifdef CONFIG_ARCH_OMAP24XX 201#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
200#define OMAP_MMC1_BASE 0x4809c000 202#define OMAP_MMC1_BASE 0x4809c000
201#define OMAP_MMC1_INT INT_24XX_MMC_IRQ 203#define OMAP_MMC1_END (OMAP_MMC1_BASE + 0x1fc)
204#define OMAP_MMC1_INT INT_24XX_MMC_IRQ
205
206#define OMAP_MMC2_BASE 0x480b4000
207#define OMAP_MMC2_END (OMAP_MMC2_BASE + 0x1fc)
208#define OMAP_MMC2_INT INT_24XX_MMC2_IRQ
209
202#else 210#else
211
203#define OMAP_MMC1_BASE 0xfffb7800 212#define OMAP_MMC1_BASE 0xfffb7800
213#define OMAP_MMC1_END (OMAP_MMC1_BASE + 0x7f)
204#define OMAP_MMC1_INT INT_MMC 214#define OMAP_MMC1_INT INT_MMC
205#endif 215
206#define OMAP_MMC2_BASE 0xfffb7c00 /* omap16xx only */ 216#define OMAP_MMC2_BASE 0xfffb7c00 /* omap16xx only */
217#define OMAP_MMC2_END (OMAP_MMC2_BASE + 0x7f)
218#define OMAP_MMC2_INT INT_1610_MMC2
219
220#endif
207 221
208static struct omap_mmc_conf mmc1_conf; 222static struct omap_mmc_platform_data mmc1_data;
209 223
210static u64 mmc1_dmamask = 0xffffffff; 224static u64 mmc1_dmamask = 0xffffffff;
211 225
212static struct resource mmc1_resources[] = { 226static struct resource mmc1_resources[] = {
213 { 227 {
214 .start = OMAP_MMC1_BASE, 228 .start = OMAP_MMC1_BASE,
215 .end = OMAP_MMC1_BASE + 0x7f, 229 .end = OMAP_MMC1_END,
216 .flags = IORESOURCE_MEM, 230 .flags = IORESOURCE_MEM,
217 }, 231 },
218 { 232 {
@@ -226,26 +240,27 @@ static struct platform_device mmc_omap_device1 = {
226 .id = 1, 240 .id = 1,
227 .dev = { 241 .dev = {
228 .dma_mask = &mmc1_dmamask, 242 .dma_mask = &mmc1_dmamask,
229 .platform_data = &mmc1_conf, 243 .platform_data = &mmc1_data,
230 }, 244 },
231 .num_resources = ARRAY_SIZE(mmc1_resources), 245 .num_resources = ARRAY_SIZE(mmc1_resources),
232 .resource = mmc1_resources, 246 .resource = mmc1_resources,
233}; 247};
234 248
235#ifdef CONFIG_ARCH_OMAP16XX 249#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2430) || \
250 defined(CONFIG_ARCH_OMAP34XX)
236 251
237static struct omap_mmc_conf mmc2_conf; 252static struct omap_mmc_platform_data mmc2_data;
238 253
239static u64 mmc2_dmamask = 0xffffffff; 254static u64 mmc2_dmamask = 0xffffffff;
240 255
241static struct resource mmc2_resources[] = { 256static struct resource mmc2_resources[] = {
242 { 257 {
243 .start = OMAP_MMC2_BASE, 258 .start = OMAP_MMC2_BASE,
244 .end = OMAP_MMC2_BASE + 0x7f, 259 .end = OMAP_MMC2_END,
245 .flags = IORESOURCE_MEM, 260 .flags = IORESOURCE_MEM,
246 }, 261 },
247 { 262 {
248 .start = INT_1610_MMC2, 263 .start = OMAP_MMC2_INT,
249 .flags = IORESOURCE_IRQ, 264 .flags = IORESOURCE_IRQ,
250 }, 265 },
251}; 266};
@@ -255,26 +270,19 @@ static struct platform_device mmc_omap_device2 = {
255 .id = 2, 270 .id = 2,
256 .dev = { 271 .dev = {
257 .dma_mask = &mmc2_dmamask, 272 .dma_mask = &mmc2_dmamask,
258 .platform_data = &mmc2_conf, 273 .platform_data = &mmc2_data,
259 }, 274 },
260 .num_resources = ARRAY_SIZE(mmc2_resources), 275 .num_resources = ARRAY_SIZE(mmc2_resources),
261 .resource = mmc2_resources, 276 .resource = mmc2_resources,
262}; 277};
263#endif 278#endif
264 279
265static void __init omap_init_mmc(void) 280static inline void omap_init_mmc_conf(const struct omap_mmc_config *mmc_conf)
266{ 281{
267 const struct omap_mmc_config *mmc_conf; 282 if (cpu_is_omap2430() || cpu_is_omap34xx())
268 const struct omap_mmc_conf *mmc;
269
270 /* NOTE: assumes MMC was never (wrongly) enabled */
271 mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
272 if (!mmc_conf)
273 return; 283 return;
274 284
275 /* block 1 is always available and has just one pinout option */ 285 if (mmc_conf->mmc[0].enabled) {
276 mmc = &mmc_conf->mmc[0];
277 if (mmc->enabled) {
278 if (cpu_is_omap24xx()) { 286 if (cpu_is_omap24xx()) {
279 omap_cfg_reg(H18_24XX_MMC_CMD); 287 omap_cfg_reg(H18_24XX_MMC_CMD);
280 omap_cfg_reg(H15_24XX_MMC_CLKI); 288 omap_cfg_reg(H15_24XX_MMC_CLKI);
@@ -292,7 +300,7 @@ static void __init omap_init_mmc(void)
292 omap_cfg_reg(P20_1710_MMC_DATDIR0); 300 omap_cfg_reg(P20_1710_MMC_DATDIR0);
293 } 301 }
294 } 302 }
295 if (mmc->wire4) { 303 if (mmc_conf->mmc[0].wire4) {
296 if (cpu_is_omap24xx()) { 304 if (cpu_is_omap24xx()) {
297 omap_cfg_reg(H14_24XX_MMC_DAT1); 305 omap_cfg_reg(H14_24XX_MMC_DAT1);
298 omap_cfg_reg(E19_24XX_MMC_DAT2); 306 omap_cfg_reg(E19_24XX_MMC_DAT2);
@@ -303,25 +311,22 @@ static void __init omap_init_mmc(void)
303 } else { 311 } else {
304 omap_cfg_reg(MMC_DAT1); 312 omap_cfg_reg(MMC_DAT1);
305 /* NOTE: DAT2 can be on W10 (here) or M15 */ 313 /* NOTE: DAT2 can be on W10 (here) or M15 */
306 if (!mmc->nomux) 314 if (!mmc_conf->mmc[0].nomux)
307 omap_cfg_reg(MMC_DAT2); 315 omap_cfg_reg(MMC_DAT2);
308 omap_cfg_reg(MMC_DAT3); 316 omap_cfg_reg(MMC_DAT3);
309 } 317 }
310 } 318 }
311 mmc1_conf = *mmc;
312 (void) platform_device_register(&mmc_omap_device1);
313 } 319 }
314 320
315#ifdef CONFIG_ARCH_OMAP16XX 321#ifdef CONFIG_ARCH_OMAP16XX
316 /* block 2 is on newer chips, and has many pinout options */ 322 /* block 2 is on newer chips, and has many pinout options */
317 mmc = &mmc_conf->mmc[1]; 323 if (mmc_conf->mmc[1].enabled) {
318 if (mmc->enabled) { 324 if (!mmc_conf->mmc[1].nomux) {
319 if (!mmc->nomux) {
320 omap_cfg_reg(Y8_1610_MMC2_CMD); 325 omap_cfg_reg(Y8_1610_MMC2_CMD);
321 omap_cfg_reg(Y10_1610_MMC2_CLK); 326 omap_cfg_reg(Y10_1610_MMC2_CLK);
322 omap_cfg_reg(R18_1610_MMC2_CLKIN); 327 omap_cfg_reg(R18_1610_MMC2_CLKIN);
323 omap_cfg_reg(W8_1610_MMC2_DAT0); 328 omap_cfg_reg(W8_1610_MMC2_DAT0);
324 if (mmc->wire4) { 329 if (mmc_conf->mmc[1].wire4) {
325 omap_cfg_reg(V8_1610_MMC2_DAT1); 330 omap_cfg_reg(V8_1610_MMC2_DAT1);
326 omap_cfg_reg(W15_1610_MMC2_DAT2); 331 omap_cfg_reg(W15_1610_MMC2_DAT2);
327 omap_cfg_reg(R10_1610_MMC2_DAT3); 332 omap_cfg_reg(R10_1610_MMC2_DAT3);
@@ -337,14 +342,55 @@ static void __init omap_init_mmc(void)
337 if (cpu_is_omap1710()) 342 if (cpu_is_omap1710())
338 omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24), 343 omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
339 MOD_CONF_CTRL_1); 344 MOD_CONF_CTRL_1);
340 mmc2_conf = *mmc; 345 }
346#endif
347}
348
349static void __init omap_init_mmc(void)
350{
351 const struct omap_mmc_config *mmc_conf;
352
353 /* NOTE: assumes MMC was never (wrongly) enabled */
354 mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
355 if (!mmc_conf)
356 return;
357
358 omap_init_mmc_conf(mmc_conf);
359
360 if (mmc_conf->mmc[0].enabled) {
361 mmc1_data.conf = mmc_conf->mmc[0];
362 (void) platform_device_register(&mmc_omap_device1);
363 }
364
365#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2430) || \
366 defined(CONFIG_ARCH_OMAP34XX)
367 if (mmc_conf->mmc[1].enabled) {
368 mmc2_data.conf = mmc_conf->mmc[1];
341 (void) platform_device_register(&mmc_omap_device2); 369 (void) platform_device_register(&mmc_omap_device2);
342 } 370 }
343#endif 371#endif
344 return;
345} 372}
373
374void omap_set_mmc_info(int host, const struct omap_mmc_platform_data *info)
375{
376 switch (host) {
377 case 1:
378 mmc1_data = *info;
379 break;
380#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2430) || \
381 defined(CONFIG_ARCH_OMAP34XX)
382 case 2:
383 mmc2_data = *info;
384 break;
385#endif
386 default:
387 BUG();
388 }
389}
390
346#else 391#else
347static inline void omap_init_mmc(void) {} 392static inline void omap_init_mmc(void) {}
393void omap_set_mmc_info(int host, const struct omap_mmc_platform_data *info) {}
348#endif 394#endif
349 395
350/*-------------------------------------------------------------------------*/ 396/*-------------------------------------------------------------------------*/
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index ee4c292683e1..dfc3443e23aa 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -325,7 +325,7 @@ static int __init atstk1002_init(void)
325#ifdef CONFIG_BOARD_ATSTK100X_SPI1 325#ifdef CONFIG_BOARD_ATSTK100X_SPI1
326 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); 326 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
327#endif 327#endif
328#ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM 328#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
329 at32_add_device_mci(0, MCI_PDATA); 329 at32_add_device_mci(0, MCI_PDATA);
330#endif 330#endif
331#ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM 331#ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM
diff --git a/arch/avr32/boot/images/.gitignore b/arch/avr32/boot/images/.gitignore
new file mode 100644
index 000000000000..64ea9d0141d2
--- /dev/null
+++ b/arch/avr32/boot/images/.gitignore
@@ -0,0 +1,4 @@
1uImage
2uImage.srec
3vmlinux.cso
4sfdwarf.log
diff --git a/arch/avr32/kernel/.gitignore b/arch/avr32/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/avr32/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
index 84a7d44edc67..11e310c567a9 100644
--- a/arch/avr32/kernel/avr32_ksyms.c
+++ b/arch/avr32/kernel/avr32_ksyms.c
@@ -58,6 +58,7 @@ EXPORT_SYMBOL(find_first_zero_bit);
58EXPORT_SYMBOL(find_next_zero_bit); 58EXPORT_SYMBOL(find_next_zero_bit);
59EXPORT_SYMBOL(find_first_bit); 59EXPORT_SYMBOL(find_first_bit);
60EXPORT_SYMBOL(find_next_bit); 60EXPORT_SYMBOL(find_next_bit);
61EXPORT_SYMBOL(generic_find_next_le_bit);
61EXPORT_SYMBOL(generic_find_next_zero_le_bit); 62EXPORT_SYMBOL(generic_find_next_zero_le_bit);
62 63
63/* I/O primitives (lib/io-*.S) */ 64/* I/O primitives (lib/io-*.S) */
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index 890286a1e62b..673178e235f3 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -109,3 +109,12 @@ __sys_epoll_pwait:
109 rcall sys_epoll_pwait 109 rcall sys_epoll_pwait
110 sub sp, -4 110 sub sp, -4
111 popm pc 111 popm pc
112
113 .global __sys_sync_file_range
114 .type __sys_sync_file_range,@function
115__sys_sync_file_range:
116 pushm lr
117 st.w --sp, ARG6
118 rcall sys_sync_file_range
119 sub sp, -4
120 popm pc
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 478bda4c4a09..7ee0057613b3 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -275,7 +275,7 @@ sys_call_table:
275 .long sys_set_robust_list 275 .long sys_set_robust_list
276 .long sys_get_robust_list /* 260 */ 276 .long sys_get_robust_list /* 260 */
277 .long __sys_splice 277 .long __sys_splice
278 .long sys_sync_file_range 278 .long __sys_sync_file_range
279 .long sys_tee 279 .long sys_tee
280 .long sys_vmsplice 280 .long sys_vmsplice
281 .long __sys_epoll_pwait /* 265 */ 281 .long __sys_epoll_pwait /* 265 */
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index b835c4c01368..0d987373bc01 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -116,15 +116,15 @@ asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
116 switch (ret) { 116 switch (ret) {
117 case NOTIFY_OK: 117 case NOTIFY_OK:
118 case NOTIFY_STOP: 118 case NOTIFY_STOP:
119 return; 119 break;
120 case NOTIFY_BAD: 120 case NOTIFY_BAD:
121 die("Fatal Non-Maskable Interrupt", regs, SIGINT); 121 die("Fatal Non-Maskable Interrupt", regs, SIGINT);
122 default: 122 default:
123 printk(KERN_ALERT "Got NMI, but nobody cared. Disabling...\n");
124 nmi_disable();
123 break; 125 break;
124 } 126 }
125 127 nmi_exit();
126 printk(KERN_ALERT "Got NMI, but nobody cared. Disabling...\n");
127 nmi_disable();
128} 128}
129 129
130asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs) 130asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs)
diff --git a/arch/avr32/lib/findbit.S b/arch/avr32/lib/findbit.S
index c6b91dee857c..997b33b2288a 100644
--- a/arch/avr32/lib/findbit.S
+++ b/arch/avr32/lib/findbit.S
@@ -123,6 +123,36 @@ ENTRY(find_next_bit)
123 brgt 1b 123 brgt 1b
124 retal r11 124 retal r11
125 125
126ENTRY(generic_find_next_le_bit)
127 lsr r8, r10, 5
128 sub r9, r11, r10
129 retle r11
130
131 lsl r8, 2
132 add r12, r8
133 andl r10, 31, COH
134 breq 1f
135
136 /* offset is not word-aligned. Handle the first (32 - r10) bits */
137 ldswp.w r8, r12[0]
138 sub r12, -4
139 lsr r8, r8, r10
140 brne .L_found
141
142 /* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
143 add r9, r10
144 sub r9, 32
145 retle r11
146
147 /* Main loop. offset must be word-aligned */
1481: ldswp.w r8, r12[0]
149 cp.w r8, 0
150 brne .L_found
151 sub r12, -4
152 sub r9, 32
153 brgt 1b
154 retal r11
155
126ENTRY(generic_find_next_zero_le_bit) 156ENTRY(generic_find_next_zero_le_bit)
127 lsr r8, r10, 5 157 lsr r8, r10, 5
128 sub r9, r11, r10 158 sub r9, r11, r10
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 9f0df9bd46b7..06ff1ba21465 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -8,7 +8,9 @@
8#include <asm/machvec.h> 8#include <asm/machvec.h>
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10 10
11#define dma_alloc_coherent platform_dma_alloc_coherent 11#define dma_alloc_coherent(dev, size, handle, gfp) \
12 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
13
12/* coherent mem. is cheap */ 14/* coherent mem. is cheap */
13static inline void * 15static inline void *
14dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 16dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 5e0c1a6bce8d..2acb6b6543c9 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -266,4 +266,19 @@ do { \
266 } \ 266 } \
267} while (0) 267} while (0)
268 268
269/*
270 * format for entries in the Global Offset Table
271 */
272struct got_entry {
273 uint64_t val;
274};
275
276/*
277 * Layout of the Function Descriptor
278 */
279struct fdesc {
280 uint64_t ip;
281 uint64_t gp;
282};
283
269#endif /* _ASM_IA64_ELF_H */ 284#endif /* _ASM_IA64_ELF_H */
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
index a7acad2bc2f0..1a873b36a4a1 100644
--- a/arch/ia64/include/asm/sections.h
+++ b/arch/ia64/include/asm/sections.h
@@ -6,9 +6,14 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8 8
9#include <linux/elf.h>
10#include <linux/uaccess.h>
9#include <asm-generic/sections.h> 11#include <asm-generic/sections.h>
10 12
11extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; 13extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
14#ifdef CONFIG_SMP
15extern char __cpu0_per_cpu[];
16#endif
12extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; 17extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
13extern char __start___rse_patchlist[], __end___rse_patchlist[]; 18extern char __start___rse_patchlist[], __end___rse_patchlist[];
14extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; 19extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
@@ -22,7 +27,16 @@ extern char __start_unwind[], __end_unwind[];
22extern char __start_ivt_text[], __end_ivt_text[]; 27extern char __start_ivt_text[], __end_ivt_text[];
23 28
24#undef dereference_function_descriptor 29#undef dereference_function_descriptor
25void *dereference_function_descriptor(void *); 30static inline void *dereference_function_descriptor(void *ptr)
31{
32 struct fdesc *desc = ptr;
33 void *p;
34
35 if (!probe_kernel_address(&desc->ip, p))
36 ptr = p;
37 return ptr;
38}
39
26 40
27#endif /* _ASM_IA64_SECTIONS_H */ 41#endif /* _ASM_IA64_SECTIONS_H */
28 42
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h
index a0d214f43115..5efecf06c9a4 100644
--- a/arch/ia64/include/asm/sn/bte.h
+++ b/arch/ia64/include/asm/sn/bte.h
@@ -223,10 +223,11 @@ extern void bte_error_handler(unsigned long);
223 * until the transfer is complete. In order to get the asynch 223 * until the transfer is complete. In order to get the asynch
224 * version of bte_copy, you must perform this check yourself. 224 * version of bte_copy, you must perform this check yourself.
225 */ 225 */
226#define BTE_UNALIGNED_COPY(src, dest, len, mode) \ 226#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
227 (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \ 227 (((len & (L1_CACHE_BYTES - 1)) || \
228 (dest & L1_CACHE_MASK)) ? \ 228 (src & (L1_CACHE_BYTES - 1)) || \
229 bte_unaligned_copy(src, dest, len, mode) : \ 229 (dest & (L1_CACHE_BYTES - 1))) ? \
230 bte_unaligned_copy(src, dest, len, mode) : \
230 bte_copy(src, dest, len, mode, NULL)) 231 bte_copy(src, dest, len, mode, NULL))
231 232
232 233
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index d45f215bc8fc..51b75cea7018 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1232,9 +1232,10 @@ efi_initialize_iomem_resources(struct resource *code_resource,
1232 if (md->attribute & EFI_MEMORY_WP) { 1232 if (md->attribute & EFI_MEMORY_WP) {
1233 name = "System ROM"; 1233 name = "System ROM";
1234 flags |= IORESOURCE_READONLY; 1234 flags |= IORESOURCE_READONLY;
1235 } else { 1235 } else if (md->attribute == EFI_MEMORY_UC)
1236 name = "Uncached RAM";
1237 else
1236 name = "System RAM"; 1238 name = "System RAM";
1237 }
1238 break; 1239 break;
1239 1240
1240 case EFI_ACPI_MEMORY_NVS: 1241 case EFI_ACPI_MEMORY_NVS:
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 8bdea8eb62e3..66e491d8baac 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -367,16 +367,17 @@ start_ap:
367 ;; 367 ;;
368#else 368#else
369(isAP) br.few 2f 369(isAP) br.few 2f
370 mov r20=r19 370 movl r20=__cpu0_per_cpu
371 sub r19=r19,r18
372 ;; 371 ;;
373 shr.u r18=r18,3 372 shr.u r18=r18,3
3741: 3731:
375 ld8 r21=[r20],8;; 374 ld8 r21=[r19],8;;
376 st8[r19]=r21,8 375 st8[r20]=r21,8
377 adds r18=-1,r18;; 376 adds r18=-1,r18;;
378 cmp4.lt p7,p6=0,r18 377 cmp4.lt p7,p6=0,r18
379(p7) br.cond.dptk.few 1b 378(p7) br.cond.dptk.few 1b
379 mov r19=r20
380 ;;
3802: 3812:
381#endif 382#endif
382 tpa r19=r19 383 tpa r19=r19
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 545626f66a4c..aaa7d901521f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -31,11 +31,9 @@
31#include <linux/elf.h> 31#include <linux/elf.h>
32#include <linux/moduleloader.h> 32#include <linux/moduleloader.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/uaccess.h>
35#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
36 35
37#include <asm/patch.h> 36#include <asm/patch.h>
38#include <asm/sections.h>
39#include <asm/unaligned.h> 37#include <asm/unaligned.h>
40 38
41#define ARCH_MODULE_DEBUG 0 39#define ARCH_MODULE_DEBUG 0
@@ -137,15 +135,6 @@ static const char *reloc_name[256] = {
137 135
138#undef N 136#undef N
139 137
140struct got_entry {
141 uint64_t val;
142};
143
144struct fdesc {
145 uint64_t ip;
146 uint64_t gp;
147};
148
149/* Opaque struct for insns, to protect against derefs. */ 138/* Opaque struct for insns, to protect against derefs. */
150struct insn; 139struct insn;
151 140
@@ -943,13 +932,3 @@ module_arch_cleanup (struct module *mod)
943 if (mod->arch.core_unw_table) 932 if (mod->arch.core_unw_table)
944 unw_remove_unwind_table(mod->arch.core_unw_table); 933 unw_remove_unwind_table(mod->arch.core_unw_table);
945} 934}
946
947void *dereference_function_descriptor(void *ptr)
948{
949 struct fdesc *desc = ptr;
950 void *p;
951
952 if (!probe_kernel_address(&desc->ip, p))
953 ptr = p;
954 return ptr;
955}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c27d5b2c182b..de636b215677 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -616,7 +616,9 @@ setup_arch (char **cmdline_p)
616 ia64_mca_init(); 616 ia64_mca_init();
617 617
618 platform_setup(cmdline_p); 618 platform_setup(cmdline_p);
619#ifndef CONFIG_IA64_HP_SIM
619 check_sal_cache_flush(); 620 check_sal_cache_flush();
621#endif
620 paging_init(); 622 paging_init();
621} 623}
622 624
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index bcea81e432fd..d8f05e504fbf 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -741,16 +741,14 @@ int __cpu_disable(void)
741 return -EBUSY; 741 return -EBUSY;
742 } 742 }
743 743
744 cpu_clear(cpu, cpu_online_map);
745
746 if (migrate_platform_irqs(cpu)) { 744 if (migrate_platform_irqs(cpu)) {
747 cpu_set(cpu, cpu_online_map); 745 cpu_set(cpu, cpu_online_map);
748 return (-EBUSY); 746 return (-EBUSY);
749 } 747 }
750 748
751 remove_siblinginfo(cpu); 749 remove_siblinginfo(cpu);
752 cpu_clear(cpu, cpu_online_map);
753 fixup_irqs(); 750 fixup_irqs();
751 cpu_clear(cpu, cpu_online_map);
754 local_flush_tlb_all(); 752 local_flush_tlb_all();
755 cpu_clear(cpu, cpu_callin_map); 753 cpu_clear(cpu, cpu_callin_map);
756 return 0; 754 return 0;
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index de71da811cd6..10a7d47e8510 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -215,9 +215,6 @@ SECTIONS
215 /* Per-cpu data: */ 215 /* Per-cpu data: */
216 percpu : { } :percpu 216 percpu : { } :percpu
217 . = ALIGN(PERCPU_PAGE_SIZE); 217 . = ALIGN(PERCPU_PAGE_SIZE);
218#ifdef CONFIG_SMP
219 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
220#endif
221 __phys_per_cpu_start = .; 218 __phys_per_cpu_start = .;
222 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) 219 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
223 { 220 {
@@ -233,6 +230,11 @@ SECTIONS
233 data : { } :data 230 data : { } :data
234 .data : AT(ADDR(.data) - LOAD_OFFSET) 231 .data : AT(ADDR(.data) - LOAD_OFFSET)
235 { 232 {
233#ifdef CONFIG_SMP
234 . = ALIGN(PERCPU_PAGE_SIZE);
235 __cpu0_per_cpu = .;
236 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
237#endif
236 DATA_DATA 238 DATA_DATA
237 *(.data1) 239 *(.data1)
238 *(.gnu.linkonce.d*) 240 *(.gnu.linkonce.d*)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 7a37d06376be..cd0d1a7284b7 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -38,6 +38,7 @@
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/div64.h> 39#include <asm/div64.h>
40#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/elf.h>
41 42
42#include "misc.h" 43#include "misc.h"
43#include "vti.h" 44#include "vti.h"
@@ -61,12 +62,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { NULL } 62 { NULL }
62}; 63};
63 64
64
65struct fdesc{
66 unsigned long ip;
67 unsigned long gp;
68};
69
70static void kvm_flush_icache(unsigned long start, unsigned long len) 65static void kvm_flush_icache(unsigned long start, unsigned long len)
71{ 66{
72 int l; 67 int l;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e566ff43884a..0ee085efbe29 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,7 +163,7 @@ per_cpu_init (void)
163 * get_zeroed_page(). 163 * get_zeroed_page().
164 */ 164 */
165 if (first_time) { 165 if (first_time) {
166 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; 166 void *cpu0_data = __cpu0_per_cpu;
167 167
168 first_time=0; 168 first_time=0;
169 169
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 78026aabaa7f..d8c5fcd89e5b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
144 144
145 for_each_possible_early_cpu(cpu) { 145 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 146 if (cpu == 0) {
147 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; 147 void *cpu0_data = __cpu0_per_cpu;
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 148 __per_cpu_offset[cpu] = (char*)cpu0_data -
149 __per_cpu_start; 149 __per_cpu_start;
150 } else if (node == node_cpuid[cpu].nid) { 150 } else if (node == node_cpuid[cpu].nid) {
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 529462c01570..79165122501c 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -420,8 +420,10 @@ tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
420 entry = find_next_zero_bit(map, mapsize, last_entry); 420 entry = find_next_zero_bit(map, mapsize, last_entry);
421 } 421 }
422 422
423 if (entry > mapsize) 423 if (entry > mapsize) {
424 kfree(ca_dmamap);
424 goto map_return; 425 goto map_return;
426 }
425 427
426 for (i = 0; i < entries; i++) 428 for (i = 0; i < entries; i++)
427 set_bit(entry + i, map); 429 set_bit(entry + i, map);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index a5f864c445b2..f57113f1f892 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -216,10 +216,6 @@ config MEMORY_SIZE
216 default "01000000" if PLAT_M32104UT 216 default "01000000" if PLAT_M32104UT
217 default "00800000" if PLAT_OAKS32R 217 default "00800000" if PLAT_OAKS32R
218 218
219config NOHIGHMEM
220 bool
221 default y
222
223config ARCH_DISCONTIGMEM_ENABLE 219config ARCH_DISCONTIGMEM_ENABLE
224 bool "Internal RAM Support" 220 bool "Internal RAM Support"
225 depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104 221 depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104
@@ -410,11 +406,7 @@ config PCI_DIRECT
410source "drivers/pci/Kconfig" 406source "drivers/pci/Kconfig"
411 407
412config ISA 408config ISA
413 bool "ISA support" 409 bool
414 help
415 Find out whether you have ISA slots on your motherboard. ISA is the
416 name of a bus system, i.e. the way the CPU talks to the other stuff
417 inside your box. If you have ISA, say Y, otherwise N.
418 410
419source "drivers/pcmcia/Kconfig" 411source "drivers/pcmcia/Kconfig"
420 412
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index d4eaa2fd1818..612d35b082a6 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -143,7 +143,7 @@ ret_from_intr:
143 and3 r4, r4, #0x8000 ; check BSM bit 143 and3 r4, r4, #0x8000 ; check BSM bit
144#endif 144#endif
145 beqz r4, resume_kernel 145 beqz r4, resume_kernel
146ENTRY(resume_userspace) 146resume_userspace:
147 DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt 147 DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt
148 ; setting need_resched or sigpending 148 ; setting need_resched or sigpending
149 ; between sampling and the iret 149 ; between sampling and the iret
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
index dab7436d7bbe..40180778a5c7 100644
--- a/arch/m32r/kernel/head.S
+++ b/arch/m32r/kernel/head.S
@@ -29,7 +29,6 @@ __INITDATA
29 .global _end 29 .global _end
30ENTRY(stext) 30ENTRY(stext)
31ENTRY(_stext) 31ENTRY(_stext)
32ENTRY(startup_32)
33 /* Setup up the stack pointer */ 32 /* Setup up the stack pointer */
34 LDIMM (r0, spi_stack_top) 33 LDIMM (r0, spi_stack_top)
35 LDIMM (r1, spu_stack_top) 34 LDIMM (r1, spu_stack_top)
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index d0c5b0b7da2f..2aeae4670098 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -22,9 +22,6 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24 24
25atomic_t irq_err_count;
26atomic_t irq_mis_count;
27
28/* 25/*
29 * Generic, controller-independent functions: 26 * Generic, controller-independent functions:
30 */ 27 */
@@ -63,9 +60,6 @@ int show_interrupts(struct seq_file *p, void *v)
63 seq_putc(p, '\n'); 60 seq_putc(p, '\n');
64skip: 61skip:
65 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 62 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
66 } else if (i == NR_IRQS) {
67 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
68 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
69 } 63 }
70 return 0; 64 return 0;
71} 65}
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 16bcb189a383..22624b51d4d3 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -14,6 +14,7 @@
14#include <asm/delay.h> 14#include <asm/delay.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17#include <asm/pgtable.h>
17 18
18/* platform dependent support */ 19/* platform dependent support */
19EXPORT_SYMBOL(boot_cpu_data); 20EXPORT_SYMBOL(boot_cpu_data);
@@ -65,6 +66,7 @@ EXPORT_SYMBOL(memset);
65EXPORT_SYMBOL(copy_page); 66EXPORT_SYMBOL(copy_page);
66EXPORT_SYMBOL(clear_page); 67EXPORT_SYMBOL(clear_page);
67EXPORT_SYMBOL(strlen); 68EXPORT_SYMBOL(strlen);
69EXPORT_SYMBOL(empty_zero_page);
68 70
69EXPORT_SYMBOL(_inb); 71EXPORT_SYMBOL(_inb);
70EXPORT_SYMBOL(_inw); 72EXPORT_SYMBOL(_inw);
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index a689e2978b6e..5be4faaf5b1c 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -35,8 +35,6 @@
35 35
36#include <linux/err.h> 36#include <linux/err.h>
37 37
38static int hlt_counter=0;
39
40/* 38/*
41 * Return saved PC of a blocked thread. 39 * Return saved PC of a blocked thread.
42 */ 40 */
@@ -48,31 +46,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
48/* 46/*
49 * Powermanagement idle function, if any.. 47 * Powermanagement idle function, if any..
50 */ 48 */
51void (*pm_idle)(void) = NULL; 49static void (*pm_idle)(void) = NULL;
52EXPORT_SYMBOL(pm_idle);
53 50
54void (*pm_power_off)(void) = NULL; 51void (*pm_power_off)(void) = NULL;
55EXPORT_SYMBOL(pm_power_off); 52EXPORT_SYMBOL(pm_power_off);
56 53
57void disable_hlt(void)
58{
59 hlt_counter++;
60}
61
62EXPORT_SYMBOL(disable_hlt);
63
64void enable_hlt(void)
65{
66 hlt_counter--;
67}
68
69EXPORT_SYMBOL(enable_hlt);
70
71/* 54/*
72 * We use this is we don't have any better 55 * We use this is we don't have any better
73 * idle routine.. 56 * idle routine..
74 */ 57 */
75void default_idle(void) 58static void default_idle(void)
76{ 59{
77 /* M32R_FIXME: Please use "cpu_sleep" mode. */ 60 /* M32R_FIXME: Please use "cpu_sleep" mode. */
78 cpu_relax(); 61 cpu_relax();
@@ -260,15 +243,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
260 return 0; 243 return 0;
261} 244}
262 245
263/*
264 * Capture the user space registers if the task is not running (in user space)
265 */
266int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
267{
268 /* M32R_FIXME */
269 return 1;
270}
271
272asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2, 246asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
273 unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, 247 unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
274 struct pt_regs regs) 248 struct pt_regs regs)
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 7577f971ea4e..929e5c9d3ad9 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -84,7 +84,7 @@ void smp_send_timer(void);
84void smp_ipi_timer_interrupt(struct pt_regs *); 84void smp_ipi_timer_interrupt(struct pt_regs *);
85void smp_local_timer_interrupt(void); 85void smp_local_timer_interrupt(void);
86 86
87void send_IPI_allbutself(int, int); 87static void send_IPI_allbutself(int, int);
88static void send_IPI_mask(cpumask_t, int, int); 88static void send_IPI_mask(cpumask_t, int, int);
89unsigned long send_IPI_mask_phys(cpumask_t, int, int); 89unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90 90
@@ -722,7 +722,7 @@ void smp_local_timer_interrupt(void)
722 * ---------- --- -------------------------------------------------------- 722 * ---------- --- --------------------------------------------------------
723 * 723 *
724 *==========================================================================*/ 724 *==========================================================================*/
725void send_IPI_allbutself(int ipi_num, int try) 725static void send_IPI_allbutself(int ipi_num, int try)
726{ 726{
727 cpumask_t cpumask; 727 cpumask_t cpumask;
728 728
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 994cc1556355..6ea017727cce 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -34,7 +34,6 @@
34#include <asm/hw_irq.h> 34#include <asm/hw_irq.h>
35 35
36#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
37extern void send_IPI_allbutself(int, int);
38extern void smp_local_timer_interrupt(void); 37extern void smp_local_timer_interrupt(void);
39#endif 38#endif
40 39
@@ -188,7 +187,7 @@ static long last_rtc_update = 0;
188 * timer_interrupt() needs to keep up the real-time clock, 187 * timer_interrupt() needs to keep up the real-time clock,
189 * as well as call the "do_timer()" routine every clocktick 188 * as well as call the "do_timer()" routine every clocktick
190 */ 189 */
191irqreturn_t timer_interrupt(int irq, void *dev_id) 190static irqreturn_t timer_interrupt(int irq, void *dev_id)
192{ 191{
193#ifndef CONFIG_SMP 192#ifndef CONFIG_SMP
194 profile_tick(CPU_PROFILING); 193 profile_tick(CPU_PROFILING);
@@ -228,7 +227,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
228 return IRQ_HANDLED; 227 return IRQ_HANDLED;
229} 228}
230 229
231struct irqaction irq0 = { 230static struct irqaction irq0 = {
232 .handler = timer_interrupt, 231 .handler = timer_interrupt,
233 .flags = IRQF_DISABLED, 232 .flags = IRQF_DISABLED,
234 .mask = CPU_MASK_NONE, 233 .mask = CPU_MASK_NONE,
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 46159a4e644b..03b14e55cd89 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -61,7 +61,7 @@ extern unsigned long eit_vector[];
61 ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \ 61 ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
62 + 0xff000000UL 62 + 0xff000000UL
63 63
64void set_eit_vector_entries(void) 64static void set_eit_vector_entries(void)
65{ 65{
66 extern void default_eit_handler(void); 66 extern void default_eit_handler(void);
67 extern void system_call(void); 67 extern void system_call(void);
@@ -121,9 +121,9 @@ void __init trap_init(void)
121 cpu_init(); 121 cpu_init();
122} 122}
123 123
124int kstack_depth_to_print = 24; 124static int kstack_depth_to_print = 24;
125 125
126void show_trace(struct task_struct *task, unsigned long *stack) 126static void show_trace(struct task_struct *task, unsigned long *stack)
127{ 127{
128 unsigned long addr; 128 unsigned long addr;
129 129
@@ -224,7 +224,7 @@ bad:
224 printk("\n"); 224 printk("\n");
225} 225}
226 226
227DEFINE_SPINLOCK(die_lock); 227static DEFINE_SPINLOCK(die_lock);
228 228
229void die(const char * str, struct pt_regs * regs, long err) 229void die(const char * str, struct pt_regs * regs, long err)
230{ 230{
diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c
index 59bfc34e0d9f..ced549be80f5 100644
--- a/arch/m32r/lib/delay.c
+++ b/arch/m32r/lib/delay.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/param.h> 8#include <linux/param.h>
9#include <linux/module.h>
9#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <asm/current.h> 12#include <asm/current.h>
@@ -121,3 +122,4 @@ void __ndelay(unsigned long nsecs)
121{ 122{
122 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 123 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
123} 124}
125EXPORT_SYMBOL(__ndelay);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 8e2a0f5faf53..8bd61a640fc9 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:00 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -166,10 +172,6 @@ CONFIG_GENERIC_ISA_DMA=y
166CONFIG_ZONE_DMA=y 172CONFIG_ZONE_DMA=y
167# CONFIG_ARCH_SUPPORTS_MSI is not set 173# CONFIG_ARCH_SUPPORTS_MSI is not set
168CONFIG_ZORRO_NAMES=y 174CONFIG_ZORRO_NAMES=y
169
170#
171# Networking
172#
173CONFIG_NET=y 175CONFIG_NET=y
174 176
175# 177#
@@ -183,6 +185,7 @@ CONFIG_XFRM=y
183# CONFIG_XFRM_SUB_POLICY is not set 185# CONFIG_XFRM_SUB_POLICY is not set
184CONFIG_XFRM_MIGRATE=y 186CONFIG_XFRM_MIGRATE=y
185# CONFIG_XFRM_STATISTICS is not set 187# CONFIG_XFRM_STATISTICS is not set
188CONFIG_XFRM_IPCOMP=m
186CONFIG_NET_KEY=y 189CONFIG_NET_KEY=y
187CONFIG_NET_KEY_MIGRATE=y 190CONFIG_NET_KEY_MIGRATE=y
188CONFIG_INET=y 191CONFIG_INET=y
@@ -413,6 +416,7 @@ CONFIG_NET_CLS_ROUTE=y
413# 416#
414# CONFIG_CFG80211 is not set 417# CONFIG_CFG80211 is not set
415CONFIG_WIRELESS_EXT=y 418CONFIG_WIRELESS_EXT=y
419# CONFIG_WIRELESS_EXT_SYSFS is not set
416# CONFIG_MAC80211 is not set 420# CONFIG_MAC80211 is not set
417CONFIG_IEEE80211=m 421CONFIG_IEEE80211=m
418# CONFIG_IEEE80211_DEBUG is not set 422# CONFIG_IEEE80211_DEBUG is not set
@@ -432,7 +436,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
432CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 436CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
433CONFIG_STANDALONE=y 437CONFIG_STANDALONE=y
434CONFIG_PREVENT_FIRMWARE_BUILD=y 438CONFIG_PREVENT_FIRMWARE_BUILD=y
435CONFIG_FW_LOADER=m 439CONFIG_FW_LOADER=y
440# CONFIG_FIRMWARE_IN_KERNEL is not set
441CONFIG_EXTRA_FIRMWARE=""
436# CONFIG_SYS_HYPERVISOR is not set 442# CONFIG_SYS_HYPERVISOR is not set
437CONFIG_CONNECTOR=m 443CONFIG_CONNECTOR=m
438# CONFIG_MTD is not set 444# CONFIG_MTD is not set
@@ -460,6 +466,7 @@ CONFIG_CDROM_PKTCDVD=m
460CONFIG_CDROM_PKTCDVD_BUFFERS=8 466CONFIG_CDROM_PKTCDVD_BUFFERS=8
461# CONFIG_CDROM_PKTCDVD_WCACHE is not set 467# CONFIG_CDROM_PKTCDVD_WCACHE is not set
462CONFIG_ATA_OVER_ETH=m 468CONFIG_ATA_OVER_ETH=m
469# CONFIG_BLK_DEV_HD is not set
463CONFIG_MISC_DEVICES=y 470CONFIG_MISC_DEVICES=y
464# CONFIG_EEPROM_93CX6 is not set 471# CONFIG_EEPROM_93CX6 is not set
465# CONFIG_ENCLOSURE_SERVICES is not set 472# CONFIG_ENCLOSURE_SERVICES is not set
@@ -470,6 +477,7 @@ CONFIG_BLK_DEV_IDE=y
470# 477#
471# Please see Documentation/ide/ide.txt for help/info on IDE drives 478# Please see Documentation/ide/ide.txt for help/info on IDE drives
472# 479#
480CONFIG_IDE_ATAPI=y
473# CONFIG_BLK_DEV_IDE_SATA is not set 481# CONFIG_BLK_DEV_IDE_SATA is not set
474CONFIG_BLK_DEV_IDEDISK=y 482CONFIG_BLK_DEV_IDEDISK=y
475# CONFIG_IDEDISK_MULTI_MODE is not set 483# CONFIG_IDEDISK_MULTI_MODE is not set
@@ -489,8 +497,6 @@ CONFIG_BLK_DEV_GAYLE=y
489CONFIG_BLK_DEV_IDEDOUBLER=y 497CONFIG_BLK_DEV_IDEDOUBLER=y
490CONFIG_BLK_DEV_BUDDHA=y 498CONFIG_BLK_DEV_BUDDHA=y
491# CONFIG_BLK_DEV_IDEDMA is not set 499# CONFIG_BLK_DEV_IDEDMA is not set
492# CONFIG_BLK_DEV_HD_ONLY is not set
493# CONFIG_BLK_DEV_HD is not set
494 500
495# 501#
496# SCSI device support 502# SCSI device support
@@ -556,6 +562,7 @@ CONFIG_A2091_SCSI=y
556CONFIG_GVP11_SCSI=y 562CONFIG_GVP11_SCSI=y
557CONFIG_SCSI_A4000T=y 563CONFIG_SCSI_A4000T=y
558CONFIG_SCSI_ZORRO7XX=y 564CONFIG_SCSI_ZORRO7XX=y
565# CONFIG_SCSI_DH is not set
559CONFIG_MD=y 566CONFIG_MD=y
560CONFIG_BLK_DEV_MD=m 567CONFIG_BLK_DEV_MD=m
561CONFIG_MD_LINEAR=m 568CONFIG_MD_LINEAR=m
@@ -564,7 +571,7 @@ CONFIG_MD_RAID1=m
564# CONFIG_MD_RAID10 is not set 571# CONFIG_MD_RAID10 is not set
565CONFIG_MD_RAID456=m 572CONFIG_MD_RAID456=m
566CONFIG_MD_RAID5_RESHAPE=y 573CONFIG_MD_RAID5_RESHAPE=y
567CONFIG_MD_MULTIPATH=m 574# CONFIG_MD_MULTIPATH is not set
568# CONFIG_MD_FAULTY is not set 575# CONFIG_MD_FAULTY is not set
569CONFIG_BLK_DEV_DM=m 576CONFIG_BLK_DEV_DM=m
570# CONFIG_DM_DEBUG is not set 577# CONFIG_DM_DEBUG is not set
@@ -573,13 +580,9 @@ CONFIG_DM_SNAPSHOT=m
573CONFIG_DM_MIRROR=m 580CONFIG_DM_MIRROR=m
574CONFIG_DM_ZERO=m 581CONFIG_DM_ZERO=m
575CONFIG_DM_MULTIPATH=m 582CONFIG_DM_MULTIPATH=m
576CONFIG_DM_MULTIPATH_EMC=m
577CONFIG_DM_MULTIPATH_RDAC=m
578CONFIG_DM_MULTIPATH_HP=m
579# CONFIG_DM_DELAY is not set 583# CONFIG_DM_DELAY is not set
580CONFIG_DM_UEVENT=y 584CONFIG_DM_UEVENT=y
581CONFIG_NETDEVICES=y 585CONFIG_NETDEVICES=y
582# CONFIG_NETDEVICES_MULTIQUEUE is not set
583CONFIG_DUMMY=m 586CONFIG_DUMMY=m
584# CONFIG_BONDING is not set 587# CONFIG_BONDING is not set
585CONFIG_MACVLAN=m 588CONFIG_MACVLAN=m
@@ -722,6 +725,7 @@ CONFIG_INPUT_M68K_BEEP=m
722# Character devices 725# Character devices
723# 726#
724CONFIG_VT=y 727CONFIG_VT=y
728CONFIG_CONSOLE_TRANSLATIONS=y
725CONFIG_VT_CONSOLE=y 729CONFIG_VT_CONSOLE=y
726CONFIG_HW_CONSOLE=y 730CONFIG_HW_CONSOLE=y
727CONFIG_VT_HW_CONSOLE_BINDING=y 731CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -757,6 +761,7 @@ CONFIG_GEN_RTC_X=y
757# CONFIG_POWER_SUPPLY is not set 761# CONFIG_POWER_SUPPLY is not set
758# CONFIG_HWMON is not set 762# CONFIG_HWMON is not set
759# CONFIG_THERMAL is not set 763# CONFIG_THERMAL is not set
764# CONFIG_THERMAL_HWMON is not set
760# CONFIG_WATCHDOG is not set 765# CONFIG_WATCHDOG is not set
761 766
762# 767#
@@ -768,8 +773,10 @@ CONFIG_SSB_POSSIBLE=y
768# 773#
769# Multifunction device drivers 774# Multifunction device drivers
770# 775#
776# CONFIG_MFD_CORE is not set
771# CONFIG_MFD_SM501 is not set 777# CONFIG_MFD_SM501 is not set
772# CONFIG_HTC_PASIC3 is not set 778# CONFIG_HTC_PASIC3 is not set
779# CONFIG_MFD_TMIO is not set
773 780
774# 781#
775# Multimedia devices 782# Multimedia devices
@@ -844,10 +851,6 @@ CONFIG_LOGO=y
844CONFIG_LOGO_LINUX_MONO=y 851CONFIG_LOGO_LINUX_MONO=y
845CONFIG_LOGO_LINUX_VGA16=y 852CONFIG_LOGO_LINUX_VGA16=y
846CONFIG_LOGO_LINUX_CLUT224=y 853CONFIG_LOGO_LINUX_CLUT224=y
847
848#
849# Sound
850#
851CONFIG_SOUND=m 854CONFIG_SOUND=m
852CONFIG_DMASOUND_PAULA=m 855CONFIG_DMASOUND_PAULA=m
853CONFIG_DMASOUND=m 856CONFIG_DMASOUND=m
@@ -861,6 +864,7 @@ CONFIG_HIDRAW=y
861# CONFIG_NEW_LEDS is not set 864# CONFIG_NEW_LEDS is not set
862# CONFIG_ACCESSIBILITY is not set 865# CONFIG_ACCESSIBILITY is not set
863# CONFIG_RTC_CLASS is not set 866# CONFIG_RTC_CLASS is not set
867# CONFIG_DMADEVICES is not set
864# CONFIG_AUXDISPLAY is not set 868# CONFIG_AUXDISPLAY is not set
865# CONFIG_UIO is not set 869# CONFIG_UIO is not set
866 870
@@ -899,6 +903,7 @@ CONFIG_XFS_FS=m
899CONFIG_OCFS2_FS=m 903CONFIG_OCFS2_FS=m
900CONFIG_OCFS2_FS_O2CB=m 904CONFIG_OCFS2_FS_O2CB=m
901CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 905CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
906# CONFIG_OCFS2_FS_STATS is not set
902# CONFIG_OCFS2_DEBUG_MASKLOG is not set 907# CONFIG_OCFS2_DEBUG_MASKLOG is not set
903# CONFIG_OCFS2_DEBUG_FS is not set 908# CONFIG_OCFS2_DEBUG_FS is not set
904CONFIG_DNOTIFY=y 909CONFIG_DNOTIFY=y
@@ -958,6 +963,7 @@ CONFIG_HFSPLUS_FS=m
958CONFIG_CRAMFS=m 963CONFIG_CRAMFS=m
959# CONFIG_VXFS_FS is not set 964# CONFIG_VXFS_FS is not set
960CONFIG_MINIX_FS=y 965CONFIG_MINIX_FS=y
966# CONFIG_OMFS_FS is not set
961CONFIG_HPFS_FS=m 967CONFIG_HPFS_FS=m
962# CONFIG_QNX4FS_FS is not set 968# CONFIG_QNX4FS_FS is not set
963# CONFIG_ROMFS_FS is not set 969# CONFIG_ROMFS_FS is not set
@@ -980,7 +986,6 @@ CONFIG_EXPORTFS=m
980CONFIG_NFS_COMMON=y 986CONFIG_NFS_COMMON=y
981CONFIG_SUNRPC=m 987CONFIG_SUNRPC=m
982CONFIG_SUNRPC_GSS=m 988CONFIG_SUNRPC_GSS=m
983CONFIG_SUNRPC_BIND34=y
984CONFIG_RPCSEC_GSS_KRB5=m 989CONFIG_RPCSEC_GSS_KRB5=m
985# CONFIG_RPCSEC_GSS_SPKM3 is not set 990# CONFIG_RPCSEC_GSS_SPKM3 is not set
986CONFIG_SMB_FS=m 991CONFIG_SMB_FS=m
@@ -989,7 +994,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
989# CONFIG_CIFS is not set 994# CONFIG_CIFS is not set
990# CONFIG_NCP_FS is not set 995# CONFIG_NCP_FS is not set
991CONFIG_CODA_FS=m 996CONFIG_CODA_FS=m
992# CONFIG_CODA_FS_OLD_API is not set
993# CONFIG_AFS_FS is not set 997# CONFIG_AFS_FS is not set
994 998
995# 999#
@@ -1054,6 +1058,8 @@ CONFIG_MAGIC_SYSRQ=y
1054# CONFIG_HEADERS_CHECK is not set 1058# CONFIG_HEADERS_CHECK is not set
1055# CONFIG_DEBUG_KERNEL is not set 1059# CONFIG_DEBUG_KERNEL is not set
1056CONFIG_DEBUG_BUGVERBOSE=y 1060CONFIG_DEBUG_BUGVERBOSE=y
1061CONFIG_DEBUG_MEMORY_INIT=y
1062CONFIG_SYSCTL_SYSCALL_CHECK=y
1057# CONFIG_SAMPLES is not set 1063# CONFIG_SAMPLES is not set
1058 1064
1059# 1065#
@@ -1113,6 +1119,10 @@ CONFIG_CRYPTO_CRC32C=m
1113CONFIG_CRYPTO_MD4=m 1119CONFIG_CRYPTO_MD4=m
1114CONFIG_CRYPTO_MD5=m 1120CONFIG_CRYPTO_MD5=m
1115CONFIG_CRYPTO_MICHAEL_MIC=m 1121CONFIG_CRYPTO_MICHAEL_MIC=m
1122CONFIG_CRYPTO_RMD128=m
1123CONFIG_CRYPTO_RMD160=m
1124CONFIG_CRYPTO_RMD256=m
1125CONFIG_CRYPTO_RMD320=m
1116CONFIG_CRYPTO_SHA1=m 1126CONFIG_CRYPTO_SHA1=m
1117CONFIG_CRYPTO_SHA256=m 1127CONFIG_CRYPTO_SHA256=m
1118CONFIG_CRYPTO_SHA512=m 1128CONFIG_CRYPTO_SHA512=m
@@ -1154,6 +1164,7 @@ CONFIG_BITREVERSE=y
1154# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1164# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1155CONFIG_CRC_CCITT=m 1165CONFIG_CRC_CCITT=m
1156CONFIG_CRC16=m 1166CONFIG_CRC16=m
1167CONFIG_CRC_T10DIF=y
1157CONFIG_CRC_ITU_T=m 1168CONFIG_CRC_ITU_T=m
1158CONFIG_CRC32=y 1169CONFIG_CRC32=y
1159# CONFIG_CRC7 is not set 1170# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index e2d511e2a1d1..c41b854c0284 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:01 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -161,10 +167,6 @@ CONFIG_HEARTBEAT=y
161CONFIG_PROC_HARDWARE=y 167CONFIG_PROC_HARDWARE=y
162CONFIG_ZONE_DMA=y 168CONFIG_ZONE_DMA=y
163# CONFIG_ARCH_SUPPORTS_MSI is not set 169# CONFIG_ARCH_SUPPORTS_MSI is not set
164
165#
166# Networking
167#
168CONFIG_NET=y 170CONFIG_NET=y
169 171
170# 172#
@@ -178,6 +180,7 @@ CONFIG_XFRM=y
178# CONFIG_XFRM_SUB_POLICY is not set 180# CONFIG_XFRM_SUB_POLICY is not set
179CONFIG_XFRM_MIGRATE=y 181CONFIG_XFRM_MIGRATE=y
180# CONFIG_XFRM_STATISTICS is not set 182# CONFIG_XFRM_STATISTICS is not set
183CONFIG_XFRM_IPCOMP=m
181CONFIG_NET_KEY=y 184CONFIG_NET_KEY=y
182CONFIG_NET_KEY_MIGRATE=y 185CONFIG_NET_KEY_MIGRATE=y
183CONFIG_INET=y 186CONFIG_INET=y
@@ -411,6 +414,7 @@ CONFIG_NET_CLS_ROUTE=y
411# 414#
412# CONFIG_CFG80211 is not set 415# CONFIG_CFG80211 is not set
413CONFIG_WIRELESS_EXT=y 416CONFIG_WIRELESS_EXT=y
417# CONFIG_WIRELESS_EXT_SYSFS is not set
414# CONFIG_MAC80211 is not set 418# CONFIG_MAC80211 is not set
415CONFIG_IEEE80211=m 419CONFIG_IEEE80211=m
416# CONFIG_IEEE80211_DEBUG is not set 420# CONFIG_IEEE80211_DEBUG is not set
@@ -430,7 +434,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
430CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 434CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
431CONFIG_STANDALONE=y 435CONFIG_STANDALONE=y
432CONFIG_PREVENT_FIRMWARE_BUILD=y 436CONFIG_PREVENT_FIRMWARE_BUILD=y
433CONFIG_FW_LOADER=m 437CONFIG_FW_LOADER=y
438# CONFIG_FIRMWARE_IN_KERNEL is not set
439CONFIG_EXTRA_FIRMWARE=""
434# CONFIG_SYS_HYPERVISOR is not set 440# CONFIG_SYS_HYPERVISOR is not set
435CONFIG_CONNECTOR=m 441CONFIG_CONNECTOR=m
436# CONFIG_MTD is not set 442# CONFIG_MTD is not set
@@ -448,6 +454,7 @@ CONFIG_CDROM_PKTCDVD=m
448CONFIG_CDROM_PKTCDVD_BUFFERS=8 454CONFIG_CDROM_PKTCDVD_BUFFERS=8
449# CONFIG_CDROM_PKTCDVD_WCACHE is not set 455# CONFIG_CDROM_PKTCDVD_WCACHE is not set
450CONFIG_ATA_OVER_ETH=m 456CONFIG_ATA_OVER_ETH=m
457# CONFIG_BLK_DEV_HD is not set
451CONFIG_MISC_DEVICES=y 458CONFIG_MISC_DEVICES=y
452# CONFIG_EEPROM_93CX6 is not set 459# CONFIG_EEPROM_93CX6 is not set
453# CONFIG_ENCLOSURE_SERVICES is not set 460# CONFIG_ENCLOSURE_SERVICES is not set
@@ -499,6 +506,7 @@ CONFIG_SCSI_SRP_TGT_ATTRS=y
499CONFIG_SCSI_LOWLEVEL=y 506CONFIG_SCSI_LOWLEVEL=y
500CONFIG_ISCSI_TCP=m 507CONFIG_ISCSI_TCP=m
501# CONFIG_SCSI_DEBUG is not set 508# CONFIG_SCSI_DEBUG is not set
509# CONFIG_SCSI_DH is not set
502CONFIG_MD=y 510CONFIG_MD=y
503CONFIG_BLK_DEV_MD=m 511CONFIG_BLK_DEV_MD=m
504CONFIG_MD_LINEAR=m 512CONFIG_MD_LINEAR=m
@@ -507,7 +515,7 @@ CONFIG_MD_RAID1=m
507# CONFIG_MD_RAID10 is not set 515# CONFIG_MD_RAID10 is not set
508CONFIG_MD_RAID456=m 516CONFIG_MD_RAID456=m
509CONFIG_MD_RAID5_RESHAPE=y 517CONFIG_MD_RAID5_RESHAPE=y
510CONFIG_MD_MULTIPATH=m 518# CONFIG_MD_MULTIPATH is not set
511# CONFIG_MD_FAULTY is not set 519# CONFIG_MD_FAULTY is not set
512CONFIG_BLK_DEV_DM=m 520CONFIG_BLK_DEV_DM=m
513# CONFIG_DM_DEBUG is not set 521# CONFIG_DM_DEBUG is not set
@@ -516,13 +524,9 @@ CONFIG_DM_SNAPSHOT=m
516CONFIG_DM_MIRROR=m 524CONFIG_DM_MIRROR=m
517CONFIG_DM_ZERO=m 525CONFIG_DM_ZERO=m
518CONFIG_DM_MULTIPATH=m 526CONFIG_DM_MULTIPATH=m
519CONFIG_DM_MULTIPATH_EMC=m
520CONFIG_DM_MULTIPATH_RDAC=m
521CONFIG_DM_MULTIPATH_HP=m
522# CONFIG_DM_DELAY is not set 527# CONFIG_DM_DELAY is not set
523CONFIG_DM_UEVENT=y 528CONFIG_DM_UEVENT=y
524CONFIG_NETDEVICES=y 529CONFIG_NETDEVICES=y
525# CONFIG_NETDEVICES_MULTIQUEUE is not set
526CONFIG_DUMMY=m 530CONFIG_DUMMY=m
527# CONFIG_BONDING is not set 531# CONFIG_BONDING is not set
528CONFIG_MACVLAN=m 532CONFIG_MACVLAN=m
@@ -532,7 +536,6 @@ CONFIG_VETH=m
532# CONFIG_PHYLIB is not set 536# CONFIG_PHYLIB is not set
533CONFIG_NET_ETHERNET=y 537CONFIG_NET_ETHERNET=y
534# CONFIG_MII is not set 538# CONFIG_MII is not set
535CONFIG_APOLLO_ELPLUS=y
536# CONFIG_IBM_NEW_EMAC_ZMII is not set 539# CONFIG_IBM_NEW_EMAC_ZMII is not set
537# CONFIG_IBM_NEW_EMAC_RGMII is not set 540# CONFIG_IBM_NEW_EMAC_RGMII is not set
538# CONFIG_IBM_NEW_EMAC_TAH is not set 541# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -627,6 +630,7 @@ CONFIG_SERIO_LIBPS2=m
627# Character devices 630# Character devices
628# 631#
629CONFIG_VT=y 632CONFIG_VT=y
633CONFIG_CONSOLE_TRANSLATIONS=y
630CONFIG_VT_CONSOLE=y 634CONFIG_VT_CONSOLE=y
631CONFIG_HW_CONSOLE=y 635CONFIG_HW_CONSOLE=y
632CONFIG_VT_HW_CONSOLE_BINDING=y 636CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -657,6 +661,7 @@ CONFIG_GEN_RTC_X=y
657# CONFIG_POWER_SUPPLY is not set 661# CONFIG_POWER_SUPPLY is not set
658# CONFIG_HWMON is not set 662# CONFIG_HWMON is not set
659# CONFIG_THERMAL is not set 663# CONFIG_THERMAL is not set
664# CONFIG_THERMAL_HWMON is not set
660# CONFIG_WATCHDOG is not set 665# CONFIG_WATCHDOG is not set
661 666
662# 667#
@@ -668,8 +673,10 @@ CONFIG_SSB_POSSIBLE=y
668# 673#
669# Multifunction device drivers 674# Multifunction device drivers
670# 675#
676# CONFIG_MFD_CORE is not set
671# CONFIG_MFD_SM501 is not set 677# CONFIG_MFD_SM501 is not set
672# CONFIG_HTC_PASIC3 is not set 678# CONFIG_HTC_PASIC3 is not set
679# CONFIG_MFD_TMIO is not set
673 680
674# 681#
675# Multimedia devices 682# Multimedia devices
@@ -738,10 +745,6 @@ CONFIG_LOGO=y
738CONFIG_LOGO_LINUX_MONO=y 745CONFIG_LOGO_LINUX_MONO=y
739# CONFIG_LOGO_LINUX_VGA16 is not set 746# CONFIG_LOGO_LINUX_VGA16 is not set
740# CONFIG_LOGO_LINUX_CLUT224 is not set 747# CONFIG_LOGO_LINUX_CLUT224 is not set
741
742#
743# Sound
744#
745# CONFIG_SOUND is not set 748# CONFIG_SOUND is not set
746CONFIG_HID_SUPPORT=y 749CONFIG_HID_SUPPORT=y
747CONFIG_HID=m 750CONFIG_HID=m
@@ -753,6 +756,7 @@ CONFIG_HIDRAW=y
753# CONFIG_NEW_LEDS is not set 756# CONFIG_NEW_LEDS is not set
754# CONFIG_ACCESSIBILITY is not set 757# CONFIG_ACCESSIBILITY is not set
755# CONFIG_RTC_CLASS is not set 758# CONFIG_RTC_CLASS is not set
759# CONFIG_DMADEVICES is not set
756# CONFIG_UIO is not set 760# CONFIG_UIO is not set
757 761
758# 762#
@@ -789,6 +793,7 @@ CONFIG_XFS_FS=m
789CONFIG_OCFS2_FS=m 793CONFIG_OCFS2_FS=m
790CONFIG_OCFS2_FS_O2CB=m 794CONFIG_OCFS2_FS_O2CB=m
791CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 795CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
796# CONFIG_OCFS2_FS_STATS is not set
792# CONFIG_OCFS2_DEBUG_MASKLOG is not set 797# CONFIG_OCFS2_DEBUG_MASKLOG is not set
793# CONFIG_OCFS2_DEBUG_FS is not set 798# CONFIG_OCFS2_DEBUG_FS is not set
794CONFIG_DNOTIFY=y 799CONFIG_DNOTIFY=y
@@ -848,6 +853,7 @@ CONFIG_HFSPLUS_FS=m
848CONFIG_CRAMFS=m 853CONFIG_CRAMFS=m
849# CONFIG_VXFS_FS is not set 854# CONFIG_VXFS_FS is not set
850CONFIG_MINIX_FS=y 855CONFIG_MINIX_FS=y
856# CONFIG_OMFS_FS is not set
851CONFIG_HPFS_FS=m 857CONFIG_HPFS_FS=m
852# CONFIG_QNX4FS_FS is not set 858# CONFIG_QNX4FS_FS is not set
853# CONFIG_ROMFS_FS is not set 859# CONFIG_ROMFS_FS is not set
@@ -860,18 +866,17 @@ CONFIG_NFS_FS=y
860CONFIG_NFS_V3=y 866CONFIG_NFS_V3=y
861# CONFIG_NFS_V3_ACL is not set 867# CONFIG_NFS_V3_ACL is not set
862CONFIG_NFS_V4=y 868CONFIG_NFS_V4=y
869CONFIG_ROOT_NFS=y
863CONFIG_NFSD=m 870CONFIG_NFSD=m
864CONFIG_NFSD_V3=y 871CONFIG_NFSD_V3=y
865# CONFIG_NFSD_V3_ACL is not set 872# CONFIG_NFSD_V3_ACL is not set
866# CONFIG_NFSD_V4 is not set 873# CONFIG_NFSD_V4 is not set
867CONFIG_ROOT_NFS=y
868CONFIG_LOCKD=y 874CONFIG_LOCKD=y
869CONFIG_LOCKD_V4=y 875CONFIG_LOCKD_V4=y
870CONFIG_EXPORTFS=m 876CONFIG_EXPORTFS=m
871CONFIG_NFS_COMMON=y 877CONFIG_NFS_COMMON=y
872CONFIG_SUNRPC=y 878CONFIG_SUNRPC=y
873CONFIG_SUNRPC_GSS=y 879CONFIG_SUNRPC_GSS=y
874CONFIG_SUNRPC_BIND34=y
875CONFIG_RPCSEC_GSS_KRB5=y 880CONFIG_RPCSEC_GSS_KRB5=y
876# CONFIG_RPCSEC_GSS_SPKM3 is not set 881# CONFIG_RPCSEC_GSS_SPKM3 is not set
877CONFIG_SMB_FS=m 882CONFIG_SMB_FS=m
@@ -880,7 +885,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
880# CONFIG_CIFS is not set 885# CONFIG_CIFS is not set
881# CONFIG_NCP_FS is not set 886# CONFIG_NCP_FS is not set
882CONFIG_CODA_FS=m 887CONFIG_CODA_FS=m
883# CONFIG_CODA_FS_OLD_API is not set
884# CONFIG_AFS_FS is not set 888# CONFIG_AFS_FS is not set
885 889
886# 890#
@@ -944,6 +948,8 @@ CONFIG_MAGIC_SYSRQ=y
944# CONFIG_HEADERS_CHECK is not set 948# CONFIG_HEADERS_CHECK is not set
945# CONFIG_DEBUG_KERNEL is not set 949# CONFIG_DEBUG_KERNEL is not set
946CONFIG_DEBUG_BUGVERBOSE=y 950CONFIG_DEBUG_BUGVERBOSE=y
951CONFIG_DEBUG_MEMORY_INIT=y
952CONFIG_SYSCTL_SYSCALL_CHECK=y
947# CONFIG_SAMPLES is not set 953# CONFIG_SAMPLES is not set
948 954
949# 955#
@@ -1003,6 +1009,10 @@ CONFIG_CRYPTO_CRC32C=m
1003CONFIG_CRYPTO_MD4=m 1009CONFIG_CRYPTO_MD4=m
1004CONFIG_CRYPTO_MD5=y 1010CONFIG_CRYPTO_MD5=y
1005CONFIG_CRYPTO_MICHAEL_MIC=m 1011CONFIG_CRYPTO_MICHAEL_MIC=m
1012CONFIG_CRYPTO_RMD128=m
1013CONFIG_CRYPTO_RMD160=m
1014CONFIG_CRYPTO_RMD256=m
1015CONFIG_CRYPTO_RMD320=m
1006CONFIG_CRYPTO_SHA1=m 1016CONFIG_CRYPTO_SHA1=m
1007CONFIG_CRYPTO_SHA256=m 1017CONFIG_CRYPTO_SHA256=m
1008CONFIG_CRYPTO_SHA512=m 1018CONFIG_CRYPTO_SHA512=m
@@ -1044,6 +1054,7 @@ CONFIG_BITREVERSE=y
1044# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1054# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1045CONFIG_CRC_CCITT=m 1055CONFIG_CRC_CCITT=m
1046CONFIG_CRC16=m 1056CONFIG_CRC16=m
1057CONFIG_CRC_T10DIF=y
1047CONFIG_CRC_ITU_T=m 1058CONFIG_CRC_ITU_T=m
1048CONFIG_CRC32=y 1059CONFIG_CRC32=y
1049# CONFIG_CRC7 is not set 1060# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 6e20d656adaf..654c5acb9e86 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:02 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -162,10 +168,6 @@ CONFIG_HEARTBEAT=y
162CONFIG_PROC_HARDWARE=y 168CONFIG_PROC_HARDWARE=y
163CONFIG_ZONE_DMA=y 169CONFIG_ZONE_DMA=y
164# CONFIG_ARCH_SUPPORTS_MSI is not set 170# CONFIG_ARCH_SUPPORTS_MSI is not set
165
166#
167# Networking
168#
169CONFIG_NET=y 171CONFIG_NET=y
170 172
171# 173#
@@ -179,6 +181,7 @@ CONFIG_XFRM=y
179# CONFIG_XFRM_SUB_POLICY is not set 181# CONFIG_XFRM_SUB_POLICY is not set
180CONFIG_XFRM_MIGRATE=y 182CONFIG_XFRM_MIGRATE=y
181# CONFIG_XFRM_STATISTICS is not set 183# CONFIG_XFRM_STATISTICS is not set
184CONFIG_XFRM_IPCOMP=m
182CONFIG_NET_KEY=y 185CONFIG_NET_KEY=y
183CONFIG_NET_KEY_MIGRATE=y 186CONFIG_NET_KEY_MIGRATE=y
184CONFIG_INET=y 187CONFIG_INET=y
@@ -409,6 +412,7 @@ CONFIG_NET_CLS_ROUTE=y
409# 412#
410# CONFIG_CFG80211 is not set 413# CONFIG_CFG80211 is not set
411CONFIG_WIRELESS_EXT=y 414CONFIG_WIRELESS_EXT=y
415# CONFIG_WIRELESS_EXT_SYSFS is not set
412# CONFIG_MAC80211 is not set 416# CONFIG_MAC80211 is not set
413CONFIG_IEEE80211=m 417CONFIG_IEEE80211=m
414# CONFIG_IEEE80211_DEBUG is not set 418# CONFIG_IEEE80211_DEBUG is not set
@@ -428,7 +432,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
428CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 432CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
429CONFIG_STANDALONE=y 433CONFIG_STANDALONE=y
430CONFIG_PREVENT_FIRMWARE_BUILD=y 434CONFIG_PREVENT_FIRMWARE_BUILD=y
431CONFIG_FW_LOADER=m 435CONFIG_FW_LOADER=y
436# CONFIG_FIRMWARE_IN_KERNEL is not set
437CONFIG_EXTRA_FIRMWARE=""
432# CONFIG_SYS_HYPERVISOR is not set 438# CONFIG_SYS_HYPERVISOR is not set
433CONFIG_CONNECTOR=m 439CONFIG_CONNECTOR=m
434# CONFIG_MTD is not set 440# CONFIG_MTD is not set
@@ -452,6 +458,7 @@ CONFIG_CDROM_PKTCDVD=m
452CONFIG_CDROM_PKTCDVD_BUFFERS=8 458CONFIG_CDROM_PKTCDVD_BUFFERS=8
453# CONFIG_CDROM_PKTCDVD_WCACHE is not set 459# CONFIG_CDROM_PKTCDVD_WCACHE is not set
454CONFIG_ATA_OVER_ETH=m 460CONFIG_ATA_OVER_ETH=m
461# CONFIG_BLK_DEV_HD is not set
455CONFIG_MISC_DEVICES=y 462CONFIG_MISC_DEVICES=y
456# CONFIG_EEPROM_93CX6 is not set 463# CONFIG_EEPROM_93CX6 is not set
457# CONFIG_ENCLOSURE_SERVICES is not set 464# CONFIG_ENCLOSURE_SERVICES is not set
@@ -462,6 +469,7 @@ CONFIG_BLK_DEV_IDE=y
462# 469#
463# Please see Documentation/ide/ide.txt for help/info on IDE drives 470# Please see Documentation/ide/ide.txt for help/info on IDE drives
464# 471#
472CONFIG_IDE_ATAPI=y
465# CONFIG_BLK_DEV_IDE_SATA is not set 473# CONFIG_BLK_DEV_IDE_SATA is not set
466CONFIG_BLK_DEV_IDEDISK=y 474CONFIG_BLK_DEV_IDEDISK=y
467# CONFIG_IDEDISK_MULTI_MODE is not set 475# CONFIG_IDEDISK_MULTI_MODE is not set
@@ -479,8 +487,6 @@ CONFIG_IDE_PROC_FS=y
479# CONFIG_BLK_DEV_PLATFORM is not set 487# CONFIG_BLK_DEV_PLATFORM is not set
480CONFIG_BLK_DEV_FALCON_IDE=y 488CONFIG_BLK_DEV_FALCON_IDE=y
481# CONFIG_BLK_DEV_IDEDMA is not set 489# CONFIG_BLK_DEV_IDEDMA is not set
482# CONFIG_BLK_DEV_HD_ONLY is not set
483# CONFIG_BLK_DEV_HD is not set
484 490
485# 491#
486# SCSI device support 492# SCSI device support
@@ -530,6 +536,7 @@ CONFIG_ISCSI_TCP=m
530CONFIG_ATARI_SCSI=y 536CONFIG_ATARI_SCSI=y
531# CONFIG_ATARI_SCSI_TOSHIBA_DELAY is not set 537# CONFIG_ATARI_SCSI_TOSHIBA_DELAY is not set
532# CONFIG_ATARI_SCSI_RESET_BOOT is not set 538# CONFIG_ATARI_SCSI_RESET_BOOT is not set
539# CONFIG_SCSI_DH is not set
533CONFIG_MD=y 540CONFIG_MD=y
534CONFIG_BLK_DEV_MD=m 541CONFIG_BLK_DEV_MD=m
535CONFIG_MD_LINEAR=m 542CONFIG_MD_LINEAR=m
@@ -538,7 +545,7 @@ CONFIG_MD_RAID1=m
538# CONFIG_MD_RAID10 is not set 545# CONFIG_MD_RAID10 is not set
539CONFIG_MD_RAID456=m 546CONFIG_MD_RAID456=m
540CONFIG_MD_RAID5_RESHAPE=y 547CONFIG_MD_RAID5_RESHAPE=y
541CONFIG_MD_MULTIPATH=m 548# CONFIG_MD_MULTIPATH is not set
542# CONFIG_MD_FAULTY is not set 549# CONFIG_MD_FAULTY is not set
543CONFIG_BLK_DEV_DM=m 550CONFIG_BLK_DEV_DM=m
544# CONFIG_DM_DEBUG is not set 551# CONFIG_DM_DEBUG is not set
@@ -547,13 +554,9 @@ CONFIG_DM_SNAPSHOT=m
547CONFIG_DM_MIRROR=m 554CONFIG_DM_MIRROR=m
548CONFIG_DM_ZERO=m 555CONFIG_DM_ZERO=m
549CONFIG_DM_MULTIPATH=m 556CONFIG_DM_MULTIPATH=m
550CONFIG_DM_MULTIPATH_EMC=m
551CONFIG_DM_MULTIPATH_RDAC=m
552CONFIG_DM_MULTIPATH_HP=m
553# CONFIG_DM_DELAY is not set 557# CONFIG_DM_DELAY is not set
554CONFIG_DM_UEVENT=y 558CONFIG_DM_UEVENT=y
555CONFIG_NETDEVICES=y 559CONFIG_NETDEVICES=y
556# CONFIG_NETDEVICES_MULTIQUEUE is not set
557CONFIG_DUMMY=m 560CONFIG_DUMMY=m
558# CONFIG_BONDING is not set 561# CONFIG_BONDING is not set
559CONFIG_MACVLAN=m 562CONFIG_MACVLAN=m
@@ -666,6 +669,7 @@ CONFIG_SERIO_LIBPS2=y
666# Character devices 669# Character devices
667# 670#
668CONFIG_VT=y 671CONFIG_VT=y
672CONFIG_CONSOLE_TRANSLATIONS=y
669CONFIG_VT_CONSOLE=y 673CONFIG_VT_CONSOLE=y
670CONFIG_HW_CONSOLE=y 674CONFIG_HW_CONSOLE=y
671CONFIG_VT_HW_CONSOLE_BINDING=y 675CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -700,6 +704,7 @@ CONFIG_GEN_RTC_X=y
700# CONFIG_POWER_SUPPLY is not set 704# CONFIG_POWER_SUPPLY is not set
701# CONFIG_HWMON is not set 705# CONFIG_HWMON is not set
702# CONFIG_THERMAL is not set 706# CONFIG_THERMAL is not set
707# CONFIG_THERMAL_HWMON is not set
703# CONFIG_WATCHDOG is not set 708# CONFIG_WATCHDOG is not set
704 709
705# 710#
@@ -711,8 +716,10 @@ CONFIG_SSB_POSSIBLE=y
711# 716#
712# Multifunction device drivers 717# Multifunction device drivers
713# 718#
719# CONFIG_MFD_CORE is not set
714# CONFIG_MFD_SM501 is not set 720# CONFIG_MFD_SM501 is not set
715# CONFIG_HTC_PASIC3 is not set 721# CONFIG_HTC_PASIC3 is not set
722# CONFIG_MFD_TMIO is not set
716 723
717# 724#
718# Multimedia devices 725# Multimedia devices
@@ -782,10 +789,6 @@ CONFIG_LOGO=y
782CONFIG_LOGO_LINUX_MONO=y 789CONFIG_LOGO_LINUX_MONO=y
783CONFIG_LOGO_LINUX_VGA16=y 790CONFIG_LOGO_LINUX_VGA16=y
784CONFIG_LOGO_LINUX_CLUT224=y 791CONFIG_LOGO_LINUX_CLUT224=y
785
786#
787# Sound
788#
789CONFIG_SOUND=m 792CONFIG_SOUND=m
790CONFIG_DMASOUND_ATARI=m 793CONFIG_DMASOUND_ATARI=m
791CONFIG_DMASOUND=m 794CONFIG_DMASOUND=m
@@ -799,6 +802,7 @@ CONFIG_HIDRAW=y
799# CONFIG_NEW_LEDS is not set 802# CONFIG_NEW_LEDS is not set
800# CONFIG_ACCESSIBILITY is not set 803# CONFIG_ACCESSIBILITY is not set
801# CONFIG_RTC_CLASS is not set 804# CONFIG_RTC_CLASS is not set
805# CONFIG_DMADEVICES is not set
802# CONFIG_AUXDISPLAY is not set 806# CONFIG_AUXDISPLAY is not set
803# CONFIG_UIO is not set 807# CONFIG_UIO is not set
804 808
@@ -806,11 +810,8 @@ CONFIG_HIDRAW=y
806# Character devices 810# Character devices
807# 811#
808CONFIG_ATARI_MFPSER=m 812CONFIG_ATARI_MFPSER=m
809CONFIG_ATARI_SCC=y
810CONFIG_ATARI_SCC_DMA=y
811CONFIG_ATARI_MIDI=m 813CONFIG_ATARI_MIDI=m
812CONFIG_ATARI_DSP56K=m 814CONFIG_ATARI_DSP56K=m
813# CONFIG_SERIAL_CONSOLE is not set
814 815
815# 816#
816# File systems 817# File systems
@@ -820,8 +821,10 @@ CONFIG_EXT2_FS=y
820# CONFIG_EXT2_FS_XIP is not set 821# CONFIG_EXT2_FS_XIP is not set
821CONFIG_EXT3_FS=y 822CONFIG_EXT3_FS=y
822# CONFIG_EXT3_FS_XATTR is not set 823# CONFIG_EXT3_FS_XATTR is not set
823# CONFIG_EXT4DEV_FS is not set 824CONFIG_EXT4DEV_FS=y
825# CONFIG_EXT4DEV_FS_XATTR is not set
824CONFIG_JBD=y 826CONFIG_JBD=y
827CONFIG_JBD2=y
825CONFIG_REISERFS_FS=m 828CONFIG_REISERFS_FS=m
826# CONFIG_REISERFS_CHECK is not set 829# CONFIG_REISERFS_CHECK is not set
827# CONFIG_REISERFS_PROC_INFO is not set 830# CONFIG_REISERFS_PROC_INFO is not set
@@ -840,6 +843,7 @@ CONFIG_XFS_FS=m
840CONFIG_OCFS2_FS=m 843CONFIG_OCFS2_FS=m
841CONFIG_OCFS2_FS_O2CB=m 844CONFIG_OCFS2_FS_O2CB=m
842CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 845CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
846# CONFIG_OCFS2_FS_STATS is not set
843# CONFIG_OCFS2_DEBUG_MASKLOG is not set 847# CONFIG_OCFS2_DEBUG_MASKLOG is not set
844# CONFIG_OCFS2_DEBUG_FS is not set 848# CONFIG_OCFS2_DEBUG_FS is not set
845CONFIG_DNOTIFY=y 849CONFIG_DNOTIFY=y
@@ -899,6 +903,7 @@ CONFIG_HFSPLUS_FS=m
899CONFIG_CRAMFS=m 903CONFIG_CRAMFS=m
900# CONFIG_VXFS_FS is not set 904# CONFIG_VXFS_FS is not set
901CONFIG_MINIX_FS=y 905CONFIG_MINIX_FS=y
906# CONFIG_OMFS_FS is not set
902CONFIG_HPFS_FS=m 907CONFIG_HPFS_FS=m
903# CONFIG_QNX4FS_FS is not set 908# CONFIG_QNX4FS_FS is not set
904# CONFIG_ROMFS_FS is not set 909# CONFIG_ROMFS_FS is not set
@@ -920,7 +925,6 @@ CONFIG_LOCKD_V4=y
920CONFIG_EXPORTFS=m 925CONFIG_EXPORTFS=m
921CONFIG_NFS_COMMON=y 926CONFIG_NFS_COMMON=y
922CONFIG_SUNRPC=m 927CONFIG_SUNRPC=m
923CONFIG_SUNRPC_BIND34=y
924# CONFIG_RPCSEC_GSS_KRB5 is not set 928# CONFIG_RPCSEC_GSS_KRB5 is not set
925# CONFIG_RPCSEC_GSS_SPKM3 is not set 929# CONFIG_RPCSEC_GSS_SPKM3 is not set
926CONFIG_SMB_FS=m 930CONFIG_SMB_FS=m
@@ -929,7 +933,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
929# CONFIG_CIFS is not set 933# CONFIG_CIFS is not set
930# CONFIG_NCP_FS is not set 934# CONFIG_NCP_FS is not set
931CONFIG_CODA_FS=m 935CONFIG_CODA_FS=m
932# CONFIG_CODA_FS_OLD_API is not set
933# CONFIG_AFS_FS is not set 936# CONFIG_AFS_FS is not set
934 937
935# 938#
@@ -994,6 +997,8 @@ CONFIG_MAGIC_SYSRQ=y
994# CONFIG_HEADERS_CHECK is not set 997# CONFIG_HEADERS_CHECK is not set
995# CONFIG_DEBUG_KERNEL is not set 998# CONFIG_DEBUG_KERNEL is not set
996CONFIG_DEBUG_BUGVERBOSE=y 999CONFIG_DEBUG_BUGVERBOSE=y
1000CONFIG_DEBUG_MEMORY_INIT=y
1001CONFIG_SYSCTL_SYSCALL_CHECK=y
997# CONFIG_SAMPLES is not set 1002# CONFIG_SAMPLES is not set
998 1003
999# 1004#
@@ -1053,6 +1058,10 @@ CONFIG_CRYPTO_CRC32C=m
1053CONFIG_CRYPTO_MD4=m 1058CONFIG_CRYPTO_MD4=m
1054CONFIG_CRYPTO_MD5=m 1059CONFIG_CRYPTO_MD5=m
1055CONFIG_CRYPTO_MICHAEL_MIC=m 1060CONFIG_CRYPTO_MICHAEL_MIC=m
1061CONFIG_CRYPTO_RMD128=m
1062CONFIG_CRYPTO_RMD160=m
1063CONFIG_CRYPTO_RMD256=m
1064CONFIG_CRYPTO_RMD320=m
1056CONFIG_CRYPTO_SHA1=m 1065CONFIG_CRYPTO_SHA1=m
1057CONFIG_CRYPTO_SHA256=m 1066CONFIG_CRYPTO_SHA256=m
1058CONFIG_CRYPTO_SHA512=m 1067CONFIG_CRYPTO_SHA512=m
@@ -1094,6 +1103,7 @@ CONFIG_BITREVERSE=y
1094# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1103# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1095CONFIG_CRC_CCITT=m 1104CONFIG_CRC_CCITT=m
1096CONFIG_CRC16=y 1105CONFIG_CRC16=y
1106CONFIG_CRC_T10DIF=y
1097CONFIG_CRC_ITU_T=m 1107CONFIG_CRC_ITU_T=m
1098CONFIG_CRC32=y 1108CONFIG_CRC32=y
1099# CONFIG_CRC7 is not set 1109# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index a0a9b30bb502..2e44af0fe54a 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:03 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -163,10 +169,6 @@ CONFIG_BINFMT_MISC=m
163CONFIG_PROC_HARDWARE=y 169CONFIG_PROC_HARDWARE=y
164CONFIG_ZONE_DMA=y 170CONFIG_ZONE_DMA=y
165# CONFIG_ARCH_SUPPORTS_MSI is not set 171# CONFIG_ARCH_SUPPORTS_MSI is not set
166
167#
168# Networking
169#
170CONFIG_NET=y 172CONFIG_NET=y
171 173
172# 174#
@@ -180,6 +182,7 @@ CONFIG_XFRM=y
180# CONFIG_XFRM_SUB_POLICY is not set 182# CONFIG_XFRM_SUB_POLICY is not set
181CONFIG_XFRM_MIGRATE=y 183CONFIG_XFRM_MIGRATE=y
182# CONFIG_XFRM_STATISTICS is not set 184# CONFIG_XFRM_STATISTICS is not set
185CONFIG_XFRM_IPCOMP=m
183CONFIG_NET_KEY=y 186CONFIG_NET_KEY=y
184CONFIG_NET_KEY_MIGRATE=y 187CONFIG_NET_KEY_MIGRATE=y
185CONFIG_INET=y 188CONFIG_INET=y
@@ -413,6 +416,7 @@ CONFIG_NET_CLS_ROUTE=y
413# 416#
414# CONFIG_CFG80211 is not set 417# CONFIG_CFG80211 is not set
415CONFIG_WIRELESS_EXT=y 418CONFIG_WIRELESS_EXT=y
419# CONFIG_WIRELESS_EXT_SYSFS is not set
416# CONFIG_MAC80211 is not set 420# CONFIG_MAC80211 is not set
417CONFIG_IEEE80211=m 421CONFIG_IEEE80211=m
418# CONFIG_IEEE80211_DEBUG is not set 422# CONFIG_IEEE80211_DEBUG is not set
@@ -432,7 +436,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
432CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 436CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
433CONFIG_STANDALONE=y 437CONFIG_STANDALONE=y
434CONFIG_PREVENT_FIRMWARE_BUILD=y 438CONFIG_PREVENT_FIRMWARE_BUILD=y
435CONFIG_FW_LOADER=m 439CONFIG_FW_LOADER=y
440# CONFIG_FIRMWARE_IN_KERNEL is not set
441CONFIG_EXTRA_FIRMWARE=""
436# CONFIG_SYS_HYPERVISOR is not set 442# CONFIG_SYS_HYPERVISOR is not set
437CONFIG_CONNECTOR=m 443CONFIG_CONNECTOR=m
438# CONFIG_MTD is not set 444# CONFIG_MTD is not set
@@ -450,6 +456,7 @@ CONFIG_CDROM_PKTCDVD=m
450CONFIG_CDROM_PKTCDVD_BUFFERS=8 456CONFIG_CDROM_PKTCDVD_BUFFERS=8
451# CONFIG_CDROM_PKTCDVD_WCACHE is not set 457# CONFIG_CDROM_PKTCDVD_WCACHE is not set
452CONFIG_ATA_OVER_ETH=m 458CONFIG_ATA_OVER_ETH=m
459# CONFIG_BLK_DEV_HD is not set
453CONFIG_MISC_DEVICES=y 460CONFIG_MISC_DEVICES=y
454# CONFIG_EEPROM_93CX6 is not set 461# CONFIG_EEPROM_93CX6 is not set
455# CONFIG_ENCLOSURE_SERVICES is not set 462# CONFIG_ENCLOSURE_SERVICES is not set
@@ -503,6 +510,7 @@ CONFIG_ISCSI_TCP=m
503CONFIG_53C700_BE_BUS=y 510CONFIG_53C700_BE_BUS=y
504# CONFIG_SCSI_DEBUG is not set 511# CONFIG_SCSI_DEBUG is not set
505CONFIG_BVME6000_SCSI=y 512CONFIG_BVME6000_SCSI=y
513# CONFIG_SCSI_DH is not set
506CONFIG_MD=y 514CONFIG_MD=y
507CONFIG_BLK_DEV_MD=m 515CONFIG_BLK_DEV_MD=m
508CONFIG_MD_LINEAR=m 516CONFIG_MD_LINEAR=m
@@ -511,7 +519,7 @@ CONFIG_MD_RAID1=m
511# CONFIG_MD_RAID10 is not set 519# CONFIG_MD_RAID10 is not set
512CONFIG_MD_RAID456=m 520CONFIG_MD_RAID456=m
513CONFIG_MD_RAID5_RESHAPE=y 521CONFIG_MD_RAID5_RESHAPE=y
514CONFIG_MD_MULTIPATH=m 522# CONFIG_MD_MULTIPATH is not set
515# CONFIG_MD_FAULTY is not set 523# CONFIG_MD_FAULTY is not set
516CONFIG_BLK_DEV_DM=m 524CONFIG_BLK_DEV_DM=m
517# CONFIG_DM_DEBUG is not set 525# CONFIG_DM_DEBUG is not set
@@ -520,13 +528,9 @@ CONFIG_DM_SNAPSHOT=m
520CONFIG_DM_MIRROR=m 528CONFIG_DM_MIRROR=m
521CONFIG_DM_ZERO=m 529CONFIG_DM_ZERO=m
522CONFIG_DM_MULTIPATH=m 530CONFIG_DM_MULTIPATH=m
523CONFIG_DM_MULTIPATH_EMC=m
524CONFIG_DM_MULTIPATH_RDAC=m
525CONFIG_DM_MULTIPATH_HP=m
526# CONFIG_DM_DELAY is not set 531# CONFIG_DM_DELAY is not set
527CONFIG_DM_UEVENT=y 532CONFIG_DM_UEVENT=y
528CONFIG_NETDEVICES=y 533CONFIG_NETDEVICES=y
529# CONFIG_NETDEVICES_MULTIQUEUE is not set
530CONFIG_DUMMY=m 534CONFIG_DUMMY=m
531# CONFIG_BONDING is not set 535# CONFIG_BONDING is not set
532CONFIG_MACVLAN=m 536CONFIG_MACVLAN=m
@@ -631,6 +635,7 @@ CONFIG_SERIO_LIBPS2=m
631# Character devices 635# Character devices
632# 636#
633CONFIG_VT=y 637CONFIG_VT=y
638CONFIG_CONSOLE_TRANSLATIONS=y
634CONFIG_VT_CONSOLE=y 639CONFIG_VT_CONSOLE=y
635CONFIG_HW_CONSOLE=y 640CONFIG_HW_CONSOLE=y
636CONFIG_VT_HW_CONSOLE_BINDING=y 641CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -661,6 +666,7 @@ CONFIG_GEN_RTC_X=y
661# CONFIG_POWER_SUPPLY is not set 666# CONFIG_POWER_SUPPLY is not set
662# CONFIG_HWMON is not set 667# CONFIG_HWMON is not set
663# CONFIG_THERMAL is not set 668# CONFIG_THERMAL is not set
669# CONFIG_THERMAL_HWMON is not set
664# CONFIG_WATCHDOG is not set 670# CONFIG_WATCHDOG is not set
665 671
666# 672#
@@ -672,8 +678,10 @@ CONFIG_SSB_POSSIBLE=y
672# 678#
673# Multifunction device drivers 679# Multifunction device drivers
674# 680#
681# CONFIG_MFD_CORE is not set
675# CONFIG_MFD_SM501 is not set 682# CONFIG_MFD_SM501 is not set
676# CONFIG_HTC_PASIC3 is not set 683# CONFIG_HTC_PASIC3 is not set
684# CONFIG_MFD_TMIO is not set
677 685
678# 686#
679# Multimedia devices 687# Multimedia devices
@@ -708,10 +716,6 @@ CONFIG_SSB_POSSIBLE=y
708# Console display driver support 716# Console display driver support
709# 717#
710CONFIG_DUMMY_CONSOLE=y 718CONFIG_DUMMY_CONSOLE=y
711
712#
713# Sound
714#
715# CONFIG_SOUND is not set 719# CONFIG_SOUND is not set
716CONFIG_HID_SUPPORT=y 720CONFIG_HID_SUPPORT=y
717CONFIG_HID=m 721CONFIG_HID=m
@@ -723,6 +727,7 @@ CONFIG_HIDRAW=y
723# CONFIG_NEW_LEDS is not set 727# CONFIG_NEW_LEDS is not set
724# CONFIG_ACCESSIBILITY is not set 728# CONFIG_ACCESSIBILITY is not set
725# CONFIG_RTC_CLASS is not set 729# CONFIG_RTC_CLASS is not set
730# CONFIG_DMADEVICES is not set
726# CONFIG_UIO is not set 731# CONFIG_UIO is not set
727 732
728# 733#
@@ -759,6 +764,7 @@ CONFIG_XFS_FS=m
759CONFIG_OCFS2_FS=m 764CONFIG_OCFS2_FS=m
760CONFIG_OCFS2_FS_O2CB=m 765CONFIG_OCFS2_FS_O2CB=m
761CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 766CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
767# CONFIG_OCFS2_FS_STATS is not set
762# CONFIG_OCFS2_DEBUG_MASKLOG is not set 768# CONFIG_OCFS2_DEBUG_MASKLOG is not set
763# CONFIG_OCFS2_DEBUG_FS is not set 769# CONFIG_OCFS2_DEBUG_FS is not set
764CONFIG_DNOTIFY=y 770CONFIG_DNOTIFY=y
@@ -818,6 +824,7 @@ CONFIG_HFSPLUS_FS=m
818CONFIG_CRAMFS=m 824CONFIG_CRAMFS=m
819# CONFIG_VXFS_FS is not set 825# CONFIG_VXFS_FS is not set
820CONFIG_MINIX_FS=y 826CONFIG_MINIX_FS=y
827# CONFIG_OMFS_FS is not set
821CONFIG_HPFS_FS=m 828CONFIG_HPFS_FS=m
822# CONFIG_QNX4FS_FS is not set 829# CONFIG_QNX4FS_FS is not set
823# CONFIG_ROMFS_FS is not set 830# CONFIG_ROMFS_FS is not set
@@ -830,18 +837,17 @@ CONFIG_NFS_FS=y
830CONFIG_NFS_V3=y 837CONFIG_NFS_V3=y
831# CONFIG_NFS_V3_ACL is not set 838# CONFIG_NFS_V3_ACL is not set
832CONFIG_NFS_V4=y 839CONFIG_NFS_V4=y
840CONFIG_ROOT_NFS=y
833CONFIG_NFSD=m 841CONFIG_NFSD=m
834CONFIG_NFSD_V3=y 842CONFIG_NFSD_V3=y
835# CONFIG_NFSD_V3_ACL is not set 843# CONFIG_NFSD_V3_ACL is not set
836# CONFIG_NFSD_V4 is not set 844# CONFIG_NFSD_V4 is not set
837CONFIG_ROOT_NFS=y
838CONFIG_LOCKD=y 845CONFIG_LOCKD=y
839CONFIG_LOCKD_V4=y 846CONFIG_LOCKD_V4=y
840CONFIG_EXPORTFS=m 847CONFIG_EXPORTFS=m
841CONFIG_NFS_COMMON=y 848CONFIG_NFS_COMMON=y
842CONFIG_SUNRPC=y 849CONFIG_SUNRPC=y
843CONFIG_SUNRPC_GSS=y 850CONFIG_SUNRPC_GSS=y
844CONFIG_SUNRPC_BIND34=y
845CONFIG_RPCSEC_GSS_KRB5=y 851CONFIG_RPCSEC_GSS_KRB5=y
846# CONFIG_RPCSEC_GSS_SPKM3 is not set 852# CONFIG_RPCSEC_GSS_SPKM3 is not set
847CONFIG_SMB_FS=m 853CONFIG_SMB_FS=m
@@ -850,7 +856,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
850# CONFIG_CIFS is not set 856# CONFIG_CIFS is not set
851# CONFIG_NCP_FS is not set 857# CONFIG_NCP_FS is not set
852CONFIG_CODA_FS=m 858CONFIG_CODA_FS=m
853# CONFIG_CODA_FS_OLD_API is not set
854# CONFIG_AFS_FS is not set 859# CONFIG_AFS_FS is not set
855 860
856# 861#
@@ -915,6 +920,8 @@ CONFIG_MAGIC_SYSRQ=y
915# CONFIG_HEADERS_CHECK is not set 920# CONFIG_HEADERS_CHECK is not set
916# CONFIG_DEBUG_KERNEL is not set 921# CONFIG_DEBUG_KERNEL is not set
917CONFIG_DEBUG_BUGVERBOSE=y 922CONFIG_DEBUG_BUGVERBOSE=y
923CONFIG_DEBUG_MEMORY_INIT=y
924CONFIG_SYSCTL_SYSCALL_CHECK=y
918# CONFIG_SAMPLES is not set 925# CONFIG_SAMPLES is not set
919 926
920# 927#
@@ -974,6 +981,10 @@ CONFIG_CRYPTO_CRC32C=m
974CONFIG_CRYPTO_MD4=m 981CONFIG_CRYPTO_MD4=m
975CONFIG_CRYPTO_MD5=y 982CONFIG_CRYPTO_MD5=y
976CONFIG_CRYPTO_MICHAEL_MIC=m 983CONFIG_CRYPTO_MICHAEL_MIC=m
984CONFIG_CRYPTO_RMD128=m
985CONFIG_CRYPTO_RMD160=m
986CONFIG_CRYPTO_RMD256=m
987CONFIG_CRYPTO_RMD320=m
977CONFIG_CRYPTO_SHA1=m 988CONFIG_CRYPTO_SHA1=m
978CONFIG_CRYPTO_SHA256=m 989CONFIG_CRYPTO_SHA256=m
979CONFIG_CRYPTO_SHA512=m 990CONFIG_CRYPTO_SHA512=m
@@ -1015,6 +1026,7 @@ CONFIG_BITREVERSE=m
1015# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1026# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1016CONFIG_CRC_CCITT=m 1027CONFIG_CRC_CCITT=m
1017CONFIG_CRC16=m 1028CONFIG_CRC16=m
1029CONFIG_CRC_T10DIF=y
1018CONFIG_CRC_ITU_T=m 1030CONFIG_CRC_ITU_T=m
1019CONFIG_CRC32=m 1031CONFIG_CRC32=m
1020# CONFIG_CRC7 is not set 1032# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6778041de262..3570fc89b089 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:04 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -162,10 +168,6 @@ CONFIG_HEARTBEAT=y
162CONFIG_PROC_HARDWARE=y 168CONFIG_PROC_HARDWARE=y
163CONFIG_ZONE_DMA=y 169CONFIG_ZONE_DMA=y
164# CONFIG_ARCH_SUPPORTS_MSI is not set 170# CONFIG_ARCH_SUPPORTS_MSI is not set
165
166#
167# Networking
168#
169CONFIG_NET=y 171CONFIG_NET=y
170 172
171# 173#
@@ -179,6 +181,7 @@ CONFIG_XFRM=y
179# CONFIG_XFRM_SUB_POLICY is not set 181# CONFIG_XFRM_SUB_POLICY is not set
180CONFIG_XFRM_MIGRATE=y 182CONFIG_XFRM_MIGRATE=y
181# CONFIG_XFRM_STATISTICS is not set 183# CONFIG_XFRM_STATISTICS is not set
184CONFIG_XFRM_IPCOMP=m
182CONFIG_NET_KEY=y 185CONFIG_NET_KEY=y
183CONFIG_NET_KEY_MIGRATE=y 186CONFIG_NET_KEY_MIGRATE=y
184CONFIG_INET=y 187CONFIG_INET=y
@@ -412,6 +415,7 @@ CONFIG_NET_CLS_ROUTE=y
412# 415#
413# CONFIG_CFG80211 is not set 416# CONFIG_CFG80211 is not set
414CONFIG_WIRELESS_EXT=y 417CONFIG_WIRELESS_EXT=y
418# CONFIG_WIRELESS_EXT_SYSFS is not set
415# CONFIG_MAC80211 is not set 419# CONFIG_MAC80211 is not set
416CONFIG_IEEE80211=m 420CONFIG_IEEE80211=m
417# CONFIG_IEEE80211_DEBUG is not set 421# CONFIG_IEEE80211_DEBUG is not set
@@ -431,7 +435,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
431CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 435CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
432CONFIG_STANDALONE=y 436CONFIG_STANDALONE=y
433CONFIG_PREVENT_FIRMWARE_BUILD=y 437CONFIG_PREVENT_FIRMWARE_BUILD=y
434CONFIG_FW_LOADER=m 438CONFIG_FW_LOADER=y
439# CONFIG_FIRMWARE_IN_KERNEL is not set
440CONFIG_EXTRA_FIRMWARE=""
435# CONFIG_SYS_HYPERVISOR is not set 441# CONFIG_SYS_HYPERVISOR is not set
436CONFIG_CONNECTOR=m 442CONFIG_CONNECTOR=m
437# CONFIG_MTD is not set 443# CONFIG_MTD is not set
@@ -449,6 +455,7 @@ CONFIG_CDROM_PKTCDVD=m
449CONFIG_CDROM_PKTCDVD_BUFFERS=8 455CONFIG_CDROM_PKTCDVD_BUFFERS=8
450# CONFIG_CDROM_PKTCDVD_WCACHE is not set 456# CONFIG_CDROM_PKTCDVD_WCACHE is not set
451CONFIG_ATA_OVER_ETH=m 457CONFIG_ATA_OVER_ETH=m
458# CONFIG_BLK_DEV_HD is not set
452CONFIG_MISC_DEVICES=y 459CONFIG_MISC_DEVICES=y
453# CONFIG_EEPROM_93CX6 is not set 460# CONFIG_EEPROM_93CX6 is not set
454# CONFIG_ENCLOSURE_SERVICES is not set 461# CONFIG_ENCLOSURE_SERVICES is not set
@@ -500,6 +507,7 @@ CONFIG_SCSI_SRP_TGT_ATTRS=y
500CONFIG_SCSI_LOWLEVEL=y 507CONFIG_SCSI_LOWLEVEL=y
501CONFIG_ISCSI_TCP=m 508CONFIG_ISCSI_TCP=m
502# CONFIG_SCSI_DEBUG is not set 509# CONFIG_SCSI_DEBUG is not set
510# CONFIG_SCSI_DH is not set
503CONFIG_MD=y 511CONFIG_MD=y
504CONFIG_BLK_DEV_MD=m 512CONFIG_BLK_DEV_MD=m
505CONFIG_MD_LINEAR=m 513CONFIG_MD_LINEAR=m
@@ -508,7 +516,7 @@ CONFIG_MD_RAID1=m
508# CONFIG_MD_RAID10 is not set 516# CONFIG_MD_RAID10 is not set
509CONFIG_MD_RAID456=m 517CONFIG_MD_RAID456=m
510CONFIG_MD_RAID5_RESHAPE=y 518CONFIG_MD_RAID5_RESHAPE=y
511CONFIG_MD_MULTIPATH=m 519# CONFIG_MD_MULTIPATH is not set
512# CONFIG_MD_FAULTY is not set 520# CONFIG_MD_FAULTY is not set
513CONFIG_BLK_DEV_DM=m 521CONFIG_BLK_DEV_DM=m
514# CONFIG_DM_DEBUG is not set 522# CONFIG_DM_DEBUG is not set
@@ -517,13 +525,9 @@ CONFIG_DM_SNAPSHOT=m
517CONFIG_DM_MIRROR=m 525CONFIG_DM_MIRROR=m
518CONFIG_DM_ZERO=m 526CONFIG_DM_ZERO=m
519CONFIG_DM_MULTIPATH=m 527CONFIG_DM_MULTIPATH=m
520CONFIG_DM_MULTIPATH_EMC=m
521CONFIG_DM_MULTIPATH_RDAC=m
522CONFIG_DM_MULTIPATH_HP=m
523# CONFIG_DM_DELAY is not set 528# CONFIG_DM_DELAY is not set
524CONFIG_DM_UEVENT=y 529CONFIG_DM_UEVENT=y
525CONFIG_NETDEVICES=y 530CONFIG_NETDEVICES=y
526# CONFIG_NETDEVICES_MULTIQUEUE is not set
527CONFIG_DUMMY=m 531CONFIG_DUMMY=m
528# CONFIG_BONDING is not set 532# CONFIG_BONDING is not set
529CONFIG_MACVLAN=m 533CONFIG_MACVLAN=m
@@ -636,6 +640,7 @@ CONFIG_SERIO_LIBPS2=m
636# Character devices 640# Character devices
637# 641#
638CONFIG_VT=y 642CONFIG_VT=y
643CONFIG_CONSOLE_TRANSLATIONS=y
639CONFIG_VT_CONSOLE=y 644CONFIG_VT_CONSOLE=y
640CONFIG_HW_CONSOLE=y 645CONFIG_HW_CONSOLE=y
641CONFIG_VT_HW_CONSOLE_BINDING=y 646CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -666,6 +671,7 @@ CONFIG_GEN_RTC_X=y
666# CONFIG_POWER_SUPPLY is not set 671# CONFIG_POWER_SUPPLY is not set
667# CONFIG_HWMON is not set 672# CONFIG_HWMON is not set
668# CONFIG_THERMAL is not set 673# CONFIG_THERMAL is not set
674# CONFIG_THERMAL_HWMON is not set
669# CONFIG_WATCHDOG is not set 675# CONFIG_WATCHDOG is not set
670 676
671# 677#
@@ -677,8 +683,10 @@ CONFIG_SSB_POSSIBLE=y
677# 683#
678# Multifunction device drivers 684# Multifunction device drivers
679# 685#
686# CONFIG_MFD_CORE is not set
680# CONFIG_MFD_SM501 is not set 687# CONFIG_MFD_SM501 is not set
681# CONFIG_HTC_PASIC3 is not set 688# CONFIG_HTC_PASIC3 is not set
689# CONFIG_MFD_TMIO is not set
682 690
683# 691#
684# Multimedia devices 692# Multimedia devices
@@ -747,10 +755,6 @@ CONFIG_LOGO=y
747# CONFIG_LOGO_LINUX_MONO is not set 755# CONFIG_LOGO_LINUX_MONO is not set
748# CONFIG_LOGO_LINUX_VGA16 is not set 756# CONFIG_LOGO_LINUX_VGA16 is not set
749CONFIG_LOGO_LINUX_CLUT224=y 757CONFIG_LOGO_LINUX_CLUT224=y
750
751#
752# Sound
753#
754# CONFIG_SOUND is not set 758# CONFIG_SOUND is not set
755CONFIG_HID_SUPPORT=y 759CONFIG_HID_SUPPORT=y
756CONFIG_HID=m 760CONFIG_HID=m
@@ -762,6 +766,7 @@ CONFIG_HIDRAW=y
762# CONFIG_NEW_LEDS is not set 766# CONFIG_NEW_LEDS is not set
763# CONFIG_ACCESSIBILITY is not set 767# CONFIG_ACCESSIBILITY is not set
764# CONFIG_RTC_CLASS is not set 768# CONFIG_RTC_CLASS is not set
769# CONFIG_DMADEVICES is not set
765# CONFIG_UIO is not set 770# CONFIG_UIO is not set
766 771
767# 772#
@@ -796,6 +801,7 @@ CONFIG_XFS_FS=m
796CONFIG_OCFS2_FS=m 801CONFIG_OCFS2_FS=m
797CONFIG_OCFS2_FS_O2CB=m 802CONFIG_OCFS2_FS_O2CB=m
798CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 803CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
804# CONFIG_OCFS2_FS_STATS is not set
799# CONFIG_OCFS2_DEBUG_MASKLOG is not set 805# CONFIG_OCFS2_DEBUG_MASKLOG is not set
800# CONFIG_OCFS2_DEBUG_FS is not set 806# CONFIG_OCFS2_DEBUG_FS is not set
801CONFIG_DNOTIFY=y 807CONFIG_DNOTIFY=y
@@ -855,6 +861,7 @@ CONFIG_HFSPLUS_FS=m
855CONFIG_CRAMFS=m 861CONFIG_CRAMFS=m
856# CONFIG_VXFS_FS is not set 862# CONFIG_VXFS_FS is not set
857CONFIG_MINIX_FS=y 863CONFIG_MINIX_FS=y
864# CONFIG_OMFS_FS is not set
858CONFIG_HPFS_FS=m 865CONFIG_HPFS_FS=m
859# CONFIG_QNX4FS_FS is not set 866# CONFIG_QNX4FS_FS is not set
860# CONFIG_ROMFS_FS is not set 867# CONFIG_ROMFS_FS is not set
@@ -867,18 +874,17 @@ CONFIG_NFS_FS=y
867CONFIG_NFS_V3=y 874CONFIG_NFS_V3=y
868# CONFIG_NFS_V3_ACL is not set 875# CONFIG_NFS_V3_ACL is not set
869CONFIG_NFS_V4=y 876CONFIG_NFS_V4=y
877CONFIG_ROOT_NFS=y
870CONFIG_NFSD=m 878CONFIG_NFSD=m
871CONFIG_NFSD_V3=y 879CONFIG_NFSD_V3=y
872# CONFIG_NFSD_V3_ACL is not set 880# CONFIG_NFSD_V3_ACL is not set
873# CONFIG_NFSD_V4 is not set 881# CONFIG_NFSD_V4 is not set
874CONFIG_ROOT_NFS=y
875CONFIG_LOCKD=y 882CONFIG_LOCKD=y
876CONFIG_LOCKD_V4=y 883CONFIG_LOCKD_V4=y
877CONFIG_EXPORTFS=m 884CONFIG_EXPORTFS=m
878CONFIG_NFS_COMMON=y 885CONFIG_NFS_COMMON=y
879CONFIG_SUNRPC=y 886CONFIG_SUNRPC=y
880CONFIG_SUNRPC_GSS=y 887CONFIG_SUNRPC_GSS=y
881CONFIG_SUNRPC_BIND34=y
882CONFIG_RPCSEC_GSS_KRB5=y 888CONFIG_RPCSEC_GSS_KRB5=y
883# CONFIG_RPCSEC_GSS_SPKM3 is not set 889# CONFIG_RPCSEC_GSS_SPKM3 is not set
884CONFIG_SMB_FS=m 890CONFIG_SMB_FS=m
@@ -887,7 +893,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
887# CONFIG_CIFS is not set 893# CONFIG_CIFS is not set
888# CONFIG_NCP_FS is not set 894# CONFIG_NCP_FS is not set
889CONFIG_CODA_FS=m 895CONFIG_CODA_FS=m
890# CONFIG_CODA_FS_OLD_API is not set
891# CONFIG_AFS_FS is not set 896# CONFIG_AFS_FS is not set
892 897
893# 898#
@@ -951,6 +956,8 @@ CONFIG_MAGIC_SYSRQ=y
951# CONFIG_HEADERS_CHECK is not set 956# CONFIG_HEADERS_CHECK is not set
952# CONFIG_DEBUG_KERNEL is not set 957# CONFIG_DEBUG_KERNEL is not set
953CONFIG_DEBUG_BUGVERBOSE=y 958CONFIG_DEBUG_BUGVERBOSE=y
959CONFIG_DEBUG_MEMORY_INIT=y
960CONFIG_SYSCTL_SYSCALL_CHECK=y
954# CONFIG_SAMPLES is not set 961# CONFIG_SAMPLES is not set
955 962
956# 963#
@@ -1010,6 +1017,10 @@ CONFIG_CRYPTO_CRC32C=m
1010CONFIG_CRYPTO_MD4=m 1017CONFIG_CRYPTO_MD4=m
1011CONFIG_CRYPTO_MD5=y 1018CONFIG_CRYPTO_MD5=y
1012CONFIG_CRYPTO_MICHAEL_MIC=m 1019CONFIG_CRYPTO_MICHAEL_MIC=m
1020CONFIG_CRYPTO_RMD128=m
1021CONFIG_CRYPTO_RMD160=m
1022CONFIG_CRYPTO_RMD256=m
1023CONFIG_CRYPTO_RMD320=m
1013CONFIG_CRYPTO_SHA1=m 1024CONFIG_CRYPTO_SHA1=m
1014CONFIG_CRYPTO_SHA256=m 1025CONFIG_CRYPTO_SHA256=m
1015CONFIG_CRYPTO_SHA512=m 1026CONFIG_CRYPTO_SHA512=m
@@ -1051,6 +1062,7 @@ CONFIG_BITREVERSE=y
1051# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1062# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1052CONFIG_CRC_CCITT=m 1063CONFIG_CRC_CCITT=m
1053CONFIG_CRC16=m 1064CONFIG_CRC16=m
1065CONFIG_CRC_T10DIF=y
1054CONFIG_CRC_ITU_T=m 1066CONFIG_CRC_ITU_T=m
1055CONFIG_CRC32=y 1067CONFIG_CRC32=y
1056# CONFIG_CRC7 is not set 1068# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 7cd375740348..db6e8822594a 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:06 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -163,10 +169,6 @@ CONFIG_BINFMT_MISC=m
163CONFIG_PROC_HARDWARE=y 169CONFIG_PROC_HARDWARE=y
164CONFIG_ZONE_DMA=y 170CONFIG_ZONE_DMA=y
165# CONFIG_ARCH_SUPPORTS_MSI is not set 171# CONFIG_ARCH_SUPPORTS_MSI is not set
166
167#
168# Networking
169#
170CONFIG_NET=y 172CONFIG_NET=y
171 173
172# 174#
@@ -180,6 +182,7 @@ CONFIG_XFRM=y
180# CONFIG_XFRM_SUB_POLICY is not set 182# CONFIG_XFRM_SUB_POLICY is not set
181CONFIG_XFRM_MIGRATE=y 183CONFIG_XFRM_MIGRATE=y
182# CONFIG_XFRM_STATISTICS is not set 184# CONFIG_XFRM_STATISTICS is not set
185CONFIG_XFRM_IPCOMP=m
183CONFIG_NET_KEY=y 186CONFIG_NET_KEY=y
184CONFIG_NET_KEY_MIGRATE=y 187CONFIG_NET_KEY_MIGRATE=y
185CONFIG_INET=y 188CONFIG_INET=y
@@ -413,6 +416,7 @@ CONFIG_NET_CLS_ROUTE=y
413# 416#
414# CONFIG_CFG80211 is not set 417# CONFIG_CFG80211 is not set
415CONFIG_WIRELESS_EXT=y 418CONFIG_WIRELESS_EXT=y
419# CONFIG_WIRELESS_EXT_SYSFS is not set
416# CONFIG_MAC80211 is not set 420# CONFIG_MAC80211 is not set
417CONFIG_IEEE80211=m 421CONFIG_IEEE80211=m
418# CONFIG_IEEE80211_DEBUG is not set 422# CONFIG_IEEE80211_DEBUG is not set
@@ -432,7 +436,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
432CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 436CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
433CONFIG_STANDALONE=y 437CONFIG_STANDALONE=y
434CONFIG_PREVENT_FIRMWARE_BUILD=y 438CONFIG_PREVENT_FIRMWARE_BUILD=y
435CONFIG_FW_LOADER=m 439CONFIG_FW_LOADER=y
440# CONFIG_FIRMWARE_IN_KERNEL is not set
441CONFIG_EXTRA_FIRMWARE=""
436# CONFIG_SYS_HYPERVISOR is not set 442# CONFIG_SYS_HYPERVISOR is not set
437CONFIG_CONNECTOR=m 443CONFIG_CONNECTOR=m
438# CONFIG_MTD is not set 444# CONFIG_MTD is not set
@@ -450,6 +456,7 @@ CONFIG_CDROM_PKTCDVD=m
450CONFIG_CDROM_PKTCDVD_BUFFERS=8 456CONFIG_CDROM_PKTCDVD_BUFFERS=8
451# CONFIG_CDROM_PKTCDVD_WCACHE is not set 457# CONFIG_CDROM_PKTCDVD_WCACHE is not set
452CONFIG_ATA_OVER_ETH=m 458CONFIG_ATA_OVER_ETH=m
459# CONFIG_BLK_DEV_HD is not set
453CONFIG_MISC_DEVICES=y 460CONFIG_MISC_DEVICES=y
454# CONFIG_EEPROM_93CX6 is not set 461# CONFIG_EEPROM_93CX6 is not set
455# CONFIG_ENCLOSURE_SERVICES is not set 462# CONFIG_ENCLOSURE_SERVICES is not set
@@ -460,6 +467,7 @@ CONFIG_BLK_DEV_IDE=y
460# 467#
461# Please see Documentation/ide/ide.txt for help/info on IDE drives 468# Please see Documentation/ide/ide.txt for help/info on IDE drives
462# 469#
470CONFIG_IDE_ATAPI=y
463# CONFIG_BLK_DEV_IDE_SATA is not set 471# CONFIG_BLK_DEV_IDE_SATA is not set
464CONFIG_BLK_DEV_IDEDISK=y 472CONFIG_BLK_DEV_IDEDISK=y
465# CONFIG_IDEDISK_MULTI_MODE is not set 473# CONFIG_IDEDISK_MULTI_MODE is not set
@@ -477,8 +485,6 @@ CONFIG_IDE_PROC_FS=y
477# CONFIG_BLK_DEV_PLATFORM is not set 485# CONFIG_BLK_DEV_PLATFORM is not set
478CONFIG_BLK_DEV_MAC_IDE=y 486CONFIG_BLK_DEV_MAC_IDE=y
479# CONFIG_BLK_DEV_IDEDMA is not set 487# CONFIG_BLK_DEV_IDEDMA is not set
480# CONFIG_BLK_DEV_HD_ONLY is not set
481# CONFIG_BLK_DEV_HD is not set
482 488
483# 489#
484# SCSI device support 490# SCSI device support
@@ -527,6 +533,7 @@ CONFIG_ISCSI_TCP=m
527# CONFIG_SCSI_DEBUG is not set 533# CONFIG_SCSI_DEBUG is not set
528CONFIG_MAC_SCSI=y 534CONFIG_MAC_SCSI=y
529CONFIG_SCSI_MAC_ESP=y 535CONFIG_SCSI_MAC_ESP=y
536# CONFIG_SCSI_DH is not set
530CONFIG_MD=y 537CONFIG_MD=y
531CONFIG_BLK_DEV_MD=m 538CONFIG_BLK_DEV_MD=m
532CONFIG_MD_LINEAR=m 539CONFIG_MD_LINEAR=m
@@ -535,7 +542,7 @@ CONFIG_MD_RAID1=m
535# CONFIG_MD_RAID10 is not set 542# CONFIG_MD_RAID10 is not set
536CONFIG_MD_RAID456=m 543CONFIG_MD_RAID456=m
537CONFIG_MD_RAID5_RESHAPE=y 544CONFIG_MD_RAID5_RESHAPE=y
538CONFIG_MD_MULTIPATH=m 545# CONFIG_MD_MULTIPATH is not set
539# CONFIG_MD_FAULTY is not set 546# CONFIG_MD_FAULTY is not set
540CONFIG_BLK_DEV_DM=m 547CONFIG_BLK_DEV_DM=m
541# CONFIG_DM_DEBUG is not set 548# CONFIG_DM_DEBUG is not set
@@ -544,9 +551,6 @@ CONFIG_DM_SNAPSHOT=m
544CONFIG_DM_MIRROR=m 551CONFIG_DM_MIRROR=m
545CONFIG_DM_ZERO=m 552CONFIG_DM_ZERO=m
546CONFIG_DM_MULTIPATH=m 553CONFIG_DM_MULTIPATH=m
547CONFIG_DM_MULTIPATH_EMC=m
548CONFIG_DM_MULTIPATH_RDAC=m
549CONFIG_DM_MULTIPATH_HP=m
550# CONFIG_DM_DELAY is not set 554# CONFIG_DM_DELAY is not set
551CONFIG_DM_UEVENT=y 555CONFIG_DM_UEVENT=y
552CONFIG_MACINTOSH_DRIVERS=y 556CONFIG_MACINTOSH_DRIVERS=y
@@ -559,7 +563,6 @@ CONFIG_ADB_CUDA=y
559CONFIG_INPUT_ADBHID=y 563CONFIG_INPUT_ADBHID=y
560CONFIG_MAC_EMUMOUSEBTN=y 564CONFIG_MAC_EMUMOUSEBTN=y
561CONFIG_NETDEVICES=y 565CONFIG_NETDEVICES=y
562# CONFIG_NETDEVICES_MULTIQUEUE is not set
563CONFIG_DUMMY=m 566CONFIG_DUMMY=m
564# CONFIG_BONDING is not set 567# CONFIG_BONDING is not set
565CONFIG_MACVLAN=m 568CONFIG_MACVLAN=m
@@ -670,6 +673,7 @@ CONFIG_SERIO_LIBPS2=m
670# Character devices 673# Character devices
671# 674#
672CONFIG_VT=y 675CONFIG_VT=y
676CONFIG_CONSOLE_TRANSLATIONS=y
673CONFIG_VT_CONSOLE=y 677CONFIG_VT_CONSOLE=y
674CONFIG_HW_CONSOLE=y 678CONFIG_HW_CONSOLE=y
675CONFIG_VT_HW_CONSOLE_BINDING=y 679CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -700,6 +704,7 @@ CONFIG_GEN_RTC_X=y
700# CONFIG_POWER_SUPPLY is not set 704# CONFIG_POWER_SUPPLY is not set
701# CONFIG_HWMON is not set 705# CONFIG_HWMON is not set
702# CONFIG_THERMAL is not set 706# CONFIG_THERMAL is not set
707# CONFIG_THERMAL_HWMON is not set
703# CONFIG_WATCHDOG is not set 708# CONFIG_WATCHDOG is not set
704 709
705# 710#
@@ -711,8 +716,10 @@ CONFIG_SSB_POSSIBLE=y
711# 716#
712# Multifunction device drivers 717# Multifunction device drivers
713# 718#
719# CONFIG_MFD_CORE is not set
714# CONFIG_MFD_SM501 is not set 720# CONFIG_MFD_SM501 is not set
715# CONFIG_HTC_PASIC3 is not set 721# CONFIG_HTC_PASIC3 is not set
722# CONFIG_MFD_TMIO is not set
716 723
717# 724#
718# Multimedia devices 725# Multimedia devices
@@ -784,10 +791,6 @@ CONFIG_LOGO_LINUX_MONO=y
784CONFIG_LOGO_LINUX_VGA16=y 791CONFIG_LOGO_LINUX_VGA16=y
785CONFIG_LOGO_LINUX_CLUT224=y 792CONFIG_LOGO_LINUX_CLUT224=y
786CONFIG_LOGO_MAC_CLUT224=y 793CONFIG_LOGO_MAC_CLUT224=y
787
788#
789# Sound
790#
791# CONFIG_SOUND is not set 794# CONFIG_SOUND is not set
792CONFIG_HID_SUPPORT=y 795CONFIG_HID_SUPPORT=y
793CONFIG_HID=m 796CONFIG_HID=m
@@ -799,6 +802,7 @@ CONFIG_HIDRAW=y
799# CONFIG_NEW_LEDS is not set 802# CONFIG_NEW_LEDS is not set
800# CONFIG_ACCESSIBILITY is not set 803# CONFIG_ACCESSIBILITY is not set
801# CONFIG_RTC_CLASS is not set 804# CONFIG_RTC_CLASS is not set
805# CONFIG_DMADEVICES is not set
802# CONFIG_UIO is not set 806# CONFIG_UIO is not set
803 807
804# 808#
@@ -836,6 +840,7 @@ CONFIG_XFS_FS=m
836CONFIG_OCFS2_FS=m 840CONFIG_OCFS2_FS=m
837CONFIG_OCFS2_FS_O2CB=m 841CONFIG_OCFS2_FS_O2CB=m
838CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 842CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
843# CONFIG_OCFS2_FS_STATS is not set
839# CONFIG_OCFS2_DEBUG_MASKLOG is not set 844# CONFIG_OCFS2_DEBUG_MASKLOG is not set
840# CONFIG_OCFS2_DEBUG_FS is not set 845# CONFIG_OCFS2_DEBUG_FS is not set
841CONFIG_DNOTIFY=y 846CONFIG_DNOTIFY=y
@@ -895,6 +900,7 @@ CONFIG_HFSPLUS_FS=y
895CONFIG_CRAMFS=m 900CONFIG_CRAMFS=m
896# CONFIG_VXFS_FS is not set 901# CONFIG_VXFS_FS is not set
897CONFIG_MINIX_FS=y 902CONFIG_MINIX_FS=y
903# CONFIG_OMFS_FS is not set
898CONFIG_HPFS_FS=m 904CONFIG_HPFS_FS=m
899# CONFIG_QNX4FS_FS is not set 905# CONFIG_QNX4FS_FS is not set
900# CONFIG_ROMFS_FS is not set 906# CONFIG_ROMFS_FS is not set
@@ -917,7 +923,6 @@ CONFIG_EXPORTFS=m
917CONFIG_NFS_COMMON=y 923CONFIG_NFS_COMMON=y
918CONFIG_SUNRPC=m 924CONFIG_SUNRPC=m
919CONFIG_SUNRPC_GSS=m 925CONFIG_SUNRPC_GSS=m
920CONFIG_SUNRPC_BIND34=y
921CONFIG_RPCSEC_GSS_KRB5=m 926CONFIG_RPCSEC_GSS_KRB5=m
922# CONFIG_RPCSEC_GSS_SPKM3 is not set 927# CONFIG_RPCSEC_GSS_SPKM3 is not set
923CONFIG_SMB_FS=m 928CONFIG_SMB_FS=m
@@ -926,7 +931,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
926# CONFIG_CIFS is not set 931# CONFIG_CIFS is not set
927# CONFIG_NCP_FS is not set 932# CONFIG_NCP_FS is not set
928CONFIG_CODA_FS=m 933CONFIG_CODA_FS=m
929# CONFIG_CODA_FS_OLD_API is not set
930# CONFIG_AFS_FS is not set 934# CONFIG_AFS_FS is not set
931 935
932# 936#
@@ -991,6 +995,8 @@ CONFIG_MAGIC_SYSRQ=y
991# CONFIG_HEADERS_CHECK is not set 995# CONFIG_HEADERS_CHECK is not set
992# CONFIG_DEBUG_KERNEL is not set 996# CONFIG_DEBUG_KERNEL is not set
993CONFIG_DEBUG_BUGVERBOSE=y 997CONFIG_DEBUG_BUGVERBOSE=y
998CONFIG_DEBUG_MEMORY_INIT=y
999CONFIG_SYSCTL_SYSCALL_CHECK=y
994# CONFIG_SAMPLES is not set 1000# CONFIG_SAMPLES is not set
995 1001
996# 1002#
@@ -1050,6 +1056,10 @@ CONFIG_CRYPTO_CRC32C=m
1050CONFIG_CRYPTO_MD4=m 1056CONFIG_CRYPTO_MD4=m
1051CONFIG_CRYPTO_MD5=m 1057CONFIG_CRYPTO_MD5=m
1052CONFIG_CRYPTO_MICHAEL_MIC=m 1058CONFIG_CRYPTO_MICHAEL_MIC=m
1059CONFIG_CRYPTO_RMD128=m
1060CONFIG_CRYPTO_RMD160=m
1061CONFIG_CRYPTO_RMD256=m
1062CONFIG_CRYPTO_RMD320=m
1053CONFIG_CRYPTO_SHA1=m 1063CONFIG_CRYPTO_SHA1=m
1054CONFIG_CRYPTO_SHA256=m 1064CONFIG_CRYPTO_SHA256=m
1055CONFIG_CRYPTO_SHA512=m 1065CONFIG_CRYPTO_SHA512=m
@@ -1091,6 +1101,7 @@ CONFIG_BITREVERSE=y
1091# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1101# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1092CONFIG_CRC_CCITT=m 1102CONFIG_CRC_CCITT=m
1093CONFIG_CRC16=m 1103CONFIG_CRC16=m
1104CONFIG_CRC_T10DIF=y
1094CONFIG_CRC_ITU_T=m 1105CONFIG_CRC_ITU_T=m
1095CONFIG_CRC32=y 1106CONFIG_CRC32=y
1096# CONFIG_CRC7 is not set 1107# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 0747fa3984df..1a806102b999 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:07 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -173,10 +179,6 @@ CONFIG_GENERIC_ISA_DMA=y
173CONFIG_ZONE_DMA=y 179CONFIG_ZONE_DMA=y
174# CONFIG_ARCH_SUPPORTS_MSI is not set 180# CONFIG_ARCH_SUPPORTS_MSI is not set
175CONFIG_ZORRO_NAMES=y 181CONFIG_ZORRO_NAMES=y
176
177#
178# Networking
179#
180CONFIG_NET=y 182CONFIG_NET=y
181 183
182# 184#
@@ -190,6 +192,7 @@ CONFIG_XFRM=y
190# CONFIG_XFRM_SUB_POLICY is not set 192# CONFIG_XFRM_SUB_POLICY is not set
191CONFIG_XFRM_MIGRATE=y 193CONFIG_XFRM_MIGRATE=y
192# CONFIG_XFRM_STATISTICS is not set 194# CONFIG_XFRM_STATISTICS is not set
195CONFIG_XFRM_IPCOMP=m
193CONFIG_NET_KEY=y 196CONFIG_NET_KEY=y
194CONFIG_NET_KEY_MIGRATE=y 197CONFIG_NET_KEY_MIGRATE=y
195CONFIG_INET=y 198CONFIG_INET=y
@@ -427,6 +430,7 @@ CONFIG_NET_CLS_ROUTE=y
427# 430#
428# CONFIG_CFG80211 is not set 431# CONFIG_CFG80211 is not set
429CONFIG_WIRELESS_EXT=y 432CONFIG_WIRELESS_EXT=y
433# CONFIG_WIRELESS_EXT_SYSFS is not set
430# CONFIG_MAC80211 is not set 434# CONFIG_MAC80211 is not set
431CONFIG_IEEE80211=m 435CONFIG_IEEE80211=m
432# CONFIG_IEEE80211_DEBUG is not set 436# CONFIG_IEEE80211_DEBUG is not set
@@ -446,7 +450,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
446CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 450CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
447CONFIG_STANDALONE=y 451CONFIG_STANDALONE=y
448CONFIG_PREVENT_FIRMWARE_BUILD=y 452CONFIG_PREVENT_FIRMWARE_BUILD=y
449CONFIG_FW_LOADER=m 453CONFIG_FW_LOADER=y
454# CONFIG_FIRMWARE_IN_KERNEL is not set
455CONFIG_EXTRA_FIRMWARE=""
450# CONFIG_SYS_HYPERVISOR is not set 456# CONFIG_SYS_HYPERVISOR is not set
451CONFIG_CONNECTOR=m 457CONFIG_CONNECTOR=m
452# CONFIG_MTD is not set 458# CONFIG_MTD is not set
@@ -476,6 +482,7 @@ CONFIG_CDROM_PKTCDVD=m
476CONFIG_CDROM_PKTCDVD_BUFFERS=8 482CONFIG_CDROM_PKTCDVD_BUFFERS=8
477# CONFIG_CDROM_PKTCDVD_WCACHE is not set 483# CONFIG_CDROM_PKTCDVD_WCACHE is not set
478CONFIG_ATA_OVER_ETH=m 484CONFIG_ATA_OVER_ETH=m
485# CONFIG_BLK_DEV_HD is not set
479CONFIG_MISC_DEVICES=y 486CONFIG_MISC_DEVICES=y
480# CONFIG_EEPROM_93CX6 is not set 487# CONFIG_EEPROM_93CX6 is not set
481# CONFIG_ENCLOSURE_SERVICES is not set 488# CONFIG_ENCLOSURE_SERVICES is not set
@@ -486,6 +493,7 @@ CONFIG_BLK_DEV_IDE=y
486# 493#
487# Please see Documentation/ide/ide.txt for help/info on IDE drives 494# Please see Documentation/ide/ide.txt for help/info on IDE drives
488# 495#
496CONFIG_IDE_ATAPI=y
489# CONFIG_BLK_DEV_IDE_SATA is not set 497# CONFIG_BLK_DEV_IDE_SATA is not set
490CONFIG_BLK_DEV_IDEDISK=y 498CONFIG_BLK_DEV_IDEDISK=y
491# CONFIG_IDEDISK_MULTI_MODE is not set 499# CONFIG_IDEDISK_MULTI_MODE is not set
@@ -508,8 +516,6 @@ CONFIG_BLK_DEV_FALCON_IDE=y
508CONFIG_BLK_DEV_MAC_IDE=y 516CONFIG_BLK_DEV_MAC_IDE=y
509CONFIG_BLK_DEV_Q40IDE=y 517CONFIG_BLK_DEV_Q40IDE=y
510# CONFIG_BLK_DEV_IDEDMA is not set 518# CONFIG_BLK_DEV_IDEDMA is not set
511# CONFIG_BLK_DEV_HD_ONLY is not set
512# CONFIG_BLK_DEV_HD is not set
513 519
514# 520#
515# SCSI device support 521# SCSI device support
@@ -584,6 +590,7 @@ CONFIG_MVME147_SCSI=y
584CONFIG_MVME16x_SCSI=y 590CONFIG_MVME16x_SCSI=y
585CONFIG_BVME6000_SCSI=y 591CONFIG_BVME6000_SCSI=y
586CONFIG_SUN3X_ESP=y 592CONFIG_SUN3X_ESP=y
593# CONFIG_SCSI_DH is not set
587CONFIG_MD=y 594CONFIG_MD=y
588CONFIG_BLK_DEV_MD=m 595CONFIG_BLK_DEV_MD=m
589CONFIG_MD_LINEAR=m 596CONFIG_MD_LINEAR=m
@@ -592,7 +599,7 @@ CONFIG_MD_RAID1=m
592# CONFIG_MD_RAID10 is not set 599# CONFIG_MD_RAID10 is not set
593CONFIG_MD_RAID456=m 600CONFIG_MD_RAID456=m
594CONFIG_MD_RAID5_RESHAPE=y 601CONFIG_MD_RAID5_RESHAPE=y
595CONFIG_MD_MULTIPATH=m 602# CONFIG_MD_MULTIPATH is not set
596# CONFIG_MD_FAULTY is not set 603# CONFIG_MD_FAULTY is not set
597CONFIG_BLK_DEV_DM=m 604CONFIG_BLK_DEV_DM=m
598# CONFIG_DM_DEBUG is not set 605# CONFIG_DM_DEBUG is not set
@@ -601,9 +608,6 @@ CONFIG_DM_SNAPSHOT=m
601CONFIG_DM_MIRROR=m 608CONFIG_DM_MIRROR=m
602CONFIG_DM_ZERO=m 609CONFIG_DM_ZERO=m
603CONFIG_DM_MULTIPATH=m 610CONFIG_DM_MULTIPATH=m
604CONFIG_DM_MULTIPATH_EMC=m
605CONFIG_DM_MULTIPATH_RDAC=m
606CONFIG_DM_MULTIPATH_HP=m
607# CONFIG_DM_DELAY is not set 611# CONFIG_DM_DELAY is not set
608CONFIG_DM_UEVENT=y 612CONFIG_DM_UEVENT=y
609CONFIG_MACINTOSH_DRIVERS=y 613CONFIG_MACINTOSH_DRIVERS=y
@@ -616,7 +620,6 @@ CONFIG_ADB_CUDA=y
616CONFIG_INPUT_ADBHID=y 620CONFIG_INPUT_ADBHID=y
617CONFIG_MAC_EMUMOUSEBTN=y 621CONFIG_MAC_EMUMOUSEBTN=y
618CONFIG_NETDEVICES=y 622CONFIG_NETDEVICES=y
619# CONFIG_NETDEVICES_MULTIQUEUE is not set
620CONFIG_DUMMY=m 623CONFIG_DUMMY=m
621# CONFIG_BONDING is not set 624# CONFIG_BONDING is not set
622CONFIG_MACVLAN=m 625CONFIG_MACVLAN=m
@@ -632,7 +635,6 @@ CONFIG_A2065=m
632CONFIG_HYDRA=m 635CONFIG_HYDRA=m
633CONFIG_ZORRO8390=m 636CONFIG_ZORRO8390=m
634CONFIG_APNE=m 637CONFIG_APNE=m
635CONFIG_APOLLO_ELPLUS=y
636CONFIG_MAC8390=y 638CONFIG_MAC8390=y
637CONFIG_MAC89x0=m 639CONFIG_MAC89x0=m
638CONFIG_MACSONIC=m 640CONFIG_MACSONIC=m
@@ -791,6 +793,7 @@ CONFIG_SERIO_LIBPS2=y
791# Character devices 793# Character devices
792# 794#
793CONFIG_VT=y 795CONFIG_VT=y
796CONFIG_CONSOLE_TRANSLATIONS=y
794CONFIG_VT_CONSOLE=y 797CONFIG_VT_CONSOLE=y
795CONFIG_HW_CONSOLE=y 798CONFIG_HW_CONSOLE=y
796CONFIG_VT_HW_CONSOLE_BINDING=y 799CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -827,6 +830,7 @@ CONFIG_GEN_RTC_X=y
827# CONFIG_POWER_SUPPLY is not set 830# CONFIG_POWER_SUPPLY is not set
828# CONFIG_HWMON is not set 831# CONFIG_HWMON is not set
829# CONFIG_THERMAL is not set 832# CONFIG_THERMAL is not set
833# CONFIG_THERMAL_HWMON is not set
830# CONFIG_WATCHDOG is not set 834# CONFIG_WATCHDOG is not set
831 835
832# 836#
@@ -838,8 +842,10 @@ CONFIG_SSB_POSSIBLE=y
838# 842#
839# Multifunction device drivers 843# Multifunction device drivers
840# 844#
845# CONFIG_MFD_CORE is not set
841# CONFIG_MFD_SM501 is not set 846# CONFIG_MFD_SM501 is not set
842# CONFIG_HTC_PASIC3 is not set 847# CONFIG_HTC_PASIC3 is not set
848# CONFIG_MFD_TMIO is not set
843 849
844# 850#
845# Multimedia devices 851# Multimedia devices
@@ -923,10 +929,6 @@ CONFIG_LOGO_LINUX_MONO=y
923CONFIG_LOGO_LINUX_VGA16=y 929CONFIG_LOGO_LINUX_VGA16=y
924CONFIG_LOGO_LINUX_CLUT224=y 930CONFIG_LOGO_LINUX_CLUT224=y
925CONFIG_LOGO_MAC_CLUT224=y 931CONFIG_LOGO_MAC_CLUT224=y
926
927#
928# Sound
929#
930CONFIG_SOUND=m 932CONFIG_SOUND=m
931CONFIG_DMASOUND_ATARI=m 933CONFIG_DMASOUND_ATARI=m
932CONFIG_DMASOUND_PAULA=m 934CONFIG_DMASOUND_PAULA=m
@@ -942,6 +944,7 @@ CONFIG_HIDRAW=y
942# CONFIG_NEW_LEDS is not set 944# CONFIG_NEW_LEDS is not set
943# CONFIG_ACCESSIBILITY is not set 945# CONFIG_ACCESSIBILITY is not set
944# CONFIG_RTC_CLASS is not set 946# CONFIG_RTC_CLASS is not set
947# CONFIG_DMADEVICES is not set
945# CONFIG_AUXDISPLAY is not set 948# CONFIG_AUXDISPLAY is not set
946# CONFIG_UIO is not set 949# CONFIG_UIO is not set
947 950
@@ -949,8 +952,6 @@ CONFIG_HIDRAW=y
949# Character devices 952# Character devices
950# 953#
951CONFIG_ATARI_MFPSER=m 954CONFIG_ATARI_MFPSER=m
952CONFIG_ATARI_SCC=y
953CONFIG_ATARI_SCC_DMA=y
954CONFIG_ATARI_MIDI=m 955CONFIG_ATARI_MIDI=m
955CONFIG_ATARI_DSP56K=m 956CONFIG_ATARI_DSP56K=m
956CONFIG_AMIGA_BUILTIN_SERIAL=y 957CONFIG_AMIGA_BUILTIN_SERIAL=y
@@ -972,8 +973,10 @@ CONFIG_EXT2_FS=y
972# CONFIG_EXT2_FS_XIP is not set 973# CONFIG_EXT2_FS_XIP is not set
973CONFIG_EXT3_FS=y 974CONFIG_EXT3_FS=y
974# CONFIG_EXT3_FS_XATTR is not set 975# CONFIG_EXT3_FS_XATTR is not set
975# CONFIG_EXT4DEV_FS is not set 976CONFIG_EXT4DEV_FS=y
977# CONFIG_EXT4DEV_FS_XATTR is not set
976CONFIG_JBD=y 978CONFIG_JBD=y
979CONFIG_JBD2=y
977CONFIG_REISERFS_FS=m 980CONFIG_REISERFS_FS=m
978# CONFIG_REISERFS_CHECK is not set 981# CONFIG_REISERFS_CHECK is not set
979# CONFIG_REISERFS_PROC_INFO is not set 982# CONFIG_REISERFS_PROC_INFO is not set
@@ -992,6 +995,7 @@ CONFIG_XFS_FS=m
992CONFIG_OCFS2_FS=m 995CONFIG_OCFS2_FS=m
993CONFIG_OCFS2_FS_O2CB=m 996CONFIG_OCFS2_FS_O2CB=m
994CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 997CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
998# CONFIG_OCFS2_FS_STATS is not set
995# CONFIG_OCFS2_DEBUG_MASKLOG is not set 999# CONFIG_OCFS2_DEBUG_MASKLOG is not set
996# CONFIG_OCFS2_DEBUG_FS is not set 1000# CONFIG_OCFS2_DEBUG_FS is not set
997CONFIG_DNOTIFY=y 1001CONFIG_DNOTIFY=y
@@ -1051,6 +1055,7 @@ CONFIG_HFSPLUS_FS=y
1051CONFIG_CRAMFS=m 1055CONFIG_CRAMFS=m
1052# CONFIG_VXFS_FS is not set 1056# CONFIG_VXFS_FS is not set
1053CONFIG_MINIX_FS=y 1057CONFIG_MINIX_FS=y
1058# CONFIG_OMFS_FS is not set
1054CONFIG_HPFS_FS=m 1059CONFIG_HPFS_FS=m
1055# CONFIG_QNX4FS_FS is not set 1060# CONFIG_QNX4FS_FS is not set
1056# CONFIG_ROMFS_FS is not set 1061# CONFIG_ROMFS_FS is not set
@@ -1063,18 +1068,17 @@ CONFIG_NFS_FS=y
1063CONFIG_NFS_V3=y 1068CONFIG_NFS_V3=y
1064# CONFIG_NFS_V3_ACL is not set 1069# CONFIG_NFS_V3_ACL is not set
1065CONFIG_NFS_V4=y 1070CONFIG_NFS_V4=y
1071CONFIG_ROOT_NFS=y
1066CONFIG_NFSD=m 1072CONFIG_NFSD=m
1067CONFIG_NFSD_V3=y 1073CONFIG_NFSD_V3=y
1068# CONFIG_NFSD_V3_ACL is not set 1074# CONFIG_NFSD_V3_ACL is not set
1069# CONFIG_NFSD_V4 is not set 1075# CONFIG_NFSD_V4 is not set
1070CONFIG_ROOT_NFS=y
1071CONFIG_LOCKD=y 1076CONFIG_LOCKD=y
1072CONFIG_LOCKD_V4=y 1077CONFIG_LOCKD_V4=y
1073CONFIG_EXPORTFS=m 1078CONFIG_EXPORTFS=m
1074CONFIG_NFS_COMMON=y 1079CONFIG_NFS_COMMON=y
1075CONFIG_SUNRPC=y 1080CONFIG_SUNRPC=y
1076CONFIG_SUNRPC_GSS=y 1081CONFIG_SUNRPC_GSS=y
1077CONFIG_SUNRPC_BIND34=y
1078CONFIG_RPCSEC_GSS_KRB5=y 1082CONFIG_RPCSEC_GSS_KRB5=y
1079# CONFIG_RPCSEC_GSS_SPKM3 is not set 1083# CONFIG_RPCSEC_GSS_SPKM3 is not set
1080CONFIG_SMB_FS=m 1084CONFIG_SMB_FS=m
@@ -1083,7 +1087,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
1083# CONFIG_CIFS is not set 1087# CONFIG_CIFS is not set
1084# CONFIG_NCP_FS is not set 1088# CONFIG_NCP_FS is not set
1085CONFIG_CODA_FS=m 1089CONFIG_CODA_FS=m
1086# CONFIG_CODA_FS_OLD_API is not set
1087# CONFIG_AFS_FS is not set 1090# CONFIG_AFS_FS is not set
1088 1091
1089# 1092#
@@ -1152,6 +1155,8 @@ CONFIG_MAGIC_SYSRQ=y
1152# CONFIG_HEADERS_CHECK is not set 1155# CONFIG_HEADERS_CHECK is not set
1153# CONFIG_DEBUG_KERNEL is not set 1156# CONFIG_DEBUG_KERNEL is not set
1154CONFIG_DEBUG_BUGVERBOSE=y 1157CONFIG_DEBUG_BUGVERBOSE=y
1158CONFIG_DEBUG_MEMORY_INIT=y
1159CONFIG_SYSCTL_SYSCALL_CHECK=y
1155# CONFIG_SAMPLES is not set 1160# CONFIG_SAMPLES is not set
1156 1161
1157# 1162#
@@ -1211,6 +1216,10 @@ CONFIG_CRYPTO_CRC32C=m
1211CONFIG_CRYPTO_MD4=m 1216CONFIG_CRYPTO_MD4=m
1212CONFIG_CRYPTO_MD5=y 1217CONFIG_CRYPTO_MD5=y
1213CONFIG_CRYPTO_MICHAEL_MIC=m 1218CONFIG_CRYPTO_MICHAEL_MIC=m
1219CONFIG_CRYPTO_RMD128=m
1220CONFIG_CRYPTO_RMD160=m
1221CONFIG_CRYPTO_RMD256=m
1222CONFIG_CRYPTO_RMD320=m
1214CONFIG_CRYPTO_SHA1=m 1223CONFIG_CRYPTO_SHA1=m
1215CONFIG_CRYPTO_SHA256=m 1224CONFIG_CRYPTO_SHA256=m
1216CONFIG_CRYPTO_SHA512=m 1225CONFIG_CRYPTO_SHA512=m
@@ -1252,6 +1261,7 @@ CONFIG_BITREVERSE=y
1252# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1261# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1253CONFIG_CRC_CCITT=m 1262CONFIG_CRC_CCITT=m
1254CONFIG_CRC16=y 1263CONFIG_CRC16=y
1264CONFIG_CRC_T10DIF=y
1255CONFIG_CRC_ITU_T=m 1265CONFIG_CRC_ITU_T=m
1256CONFIG_CRC32=y 1266CONFIG_CRC32=y
1257# CONFIG_CRC7 is not set 1267# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index e7a8246840b5..cacb5aef6a37 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:08 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -163,10 +169,6 @@ CONFIG_BINFMT_MISC=m
163CONFIG_PROC_HARDWARE=y 169CONFIG_PROC_HARDWARE=y
164CONFIG_ZONE_DMA=y 170CONFIG_ZONE_DMA=y
165# CONFIG_ARCH_SUPPORTS_MSI is not set 171# CONFIG_ARCH_SUPPORTS_MSI is not set
166
167#
168# Networking
169#
170CONFIG_NET=y 172CONFIG_NET=y
171 173
172# 174#
@@ -180,6 +182,7 @@ CONFIG_XFRM=y
180# CONFIG_XFRM_SUB_POLICY is not set 182# CONFIG_XFRM_SUB_POLICY is not set
181CONFIG_XFRM_MIGRATE=y 183CONFIG_XFRM_MIGRATE=y
182# CONFIG_XFRM_STATISTICS is not set 184# CONFIG_XFRM_STATISTICS is not set
185CONFIG_XFRM_IPCOMP=m
183CONFIG_NET_KEY=y 186CONFIG_NET_KEY=y
184CONFIG_NET_KEY_MIGRATE=y 187CONFIG_NET_KEY_MIGRATE=y
185CONFIG_INET=y 188CONFIG_INET=y
@@ -413,6 +416,7 @@ CONFIG_NET_CLS_ROUTE=y
413# 416#
414# CONFIG_CFG80211 is not set 417# CONFIG_CFG80211 is not set
415CONFIG_WIRELESS_EXT=y 418CONFIG_WIRELESS_EXT=y
419# CONFIG_WIRELESS_EXT_SYSFS is not set
416# CONFIG_MAC80211 is not set 420# CONFIG_MAC80211 is not set
417CONFIG_IEEE80211=m 421CONFIG_IEEE80211=m
418# CONFIG_IEEE80211_DEBUG is not set 422# CONFIG_IEEE80211_DEBUG is not set
@@ -432,7 +436,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
432CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 436CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
433CONFIG_STANDALONE=y 437CONFIG_STANDALONE=y
434CONFIG_PREVENT_FIRMWARE_BUILD=y 438CONFIG_PREVENT_FIRMWARE_BUILD=y
435CONFIG_FW_LOADER=m 439CONFIG_FW_LOADER=y
440# CONFIG_FIRMWARE_IN_KERNEL is not set
441CONFIG_EXTRA_FIRMWARE=""
436# CONFIG_SYS_HYPERVISOR is not set 442# CONFIG_SYS_HYPERVISOR is not set
437CONFIG_CONNECTOR=m 443CONFIG_CONNECTOR=m
438# CONFIG_MTD is not set 444# CONFIG_MTD is not set
@@ -450,6 +456,7 @@ CONFIG_CDROM_PKTCDVD=m
450CONFIG_CDROM_PKTCDVD_BUFFERS=8 456CONFIG_CDROM_PKTCDVD_BUFFERS=8
451# CONFIG_CDROM_PKTCDVD_WCACHE is not set 457# CONFIG_CDROM_PKTCDVD_WCACHE is not set
452CONFIG_ATA_OVER_ETH=m 458CONFIG_ATA_OVER_ETH=m
459# CONFIG_BLK_DEV_HD is not set
453CONFIG_MISC_DEVICES=y 460CONFIG_MISC_DEVICES=y
454# CONFIG_EEPROM_93CX6 is not set 461# CONFIG_EEPROM_93CX6 is not set
455# CONFIG_ENCLOSURE_SERVICES is not set 462# CONFIG_ENCLOSURE_SERVICES is not set
@@ -502,6 +509,7 @@ CONFIG_SCSI_LOWLEVEL=y
502CONFIG_ISCSI_TCP=m 509CONFIG_ISCSI_TCP=m
503# CONFIG_SCSI_DEBUG is not set 510# CONFIG_SCSI_DEBUG is not set
504CONFIG_MVME147_SCSI=y 511CONFIG_MVME147_SCSI=y
512# CONFIG_SCSI_DH is not set
505CONFIG_MD=y 513CONFIG_MD=y
506CONFIG_BLK_DEV_MD=m 514CONFIG_BLK_DEV_MD=m
507CONFIG_MD_LINEAR=m 515CONFIG_MD_LINEAR=m
@@ -510,7 +518,7 @@ CONFIG_MD_RAID1=m
510# CONFIG_MD_RAID10 is not set 518# CONFIG_MD_RAID10 is not set
511CONFIG_MD_RAID456=m 519CONFIG_MD_RAID456=m
512CONFIG_MD_RAID5_RESHAPE=y 520CONFIG_MD_RAID5_RESHAPE=y
513CONFIG_MD_MULTIPATH=m 521# CONFIG_MD_MULTIPATH is not set
514# CONFIG_MD_FAULTY is not set 522# CONFIG_MD_FAULTY is not set
515CONFIG_BLK_DEV_DM=m 523CONFIG_BLK_DEV_DM=m
516# CONFIG_DM_DEBUG is not set 524# CONFIG_DM_DEBUG is not set
@@ -519,13 +527,9 @@ CONFIG_DM_SNAPSHOT=m
519CONFIG_DM_MIRROR=m 527CONFIG_DM_MIRROR=m
520CONFIG_DM_ZERO=m 528CONFIG_DM_ZERO=m
521CONFIG_DM_MULTIPATH=m 529CONFIG_DM_MULTIPATH=m
522CONFIG_DM_MULTIPATH_EMC=m
523CONFIG_DM_MULTIPATH_RDAC=m
524CONFIG_DM_MULTIPATH_HP=m
525# CONFIG_DM_DELAY is not set 530# CONFIG_DM_DELAY is not set
526CONFIG_DM_UEVENT=y 531CONFIG_DM_UEVENT=y
527CONFIG_NETDEVICES=y 532CONFIG_NETDEVICES=y
528# CONFIG_NETDEVICES_MULTIQUEUE is not set
529CONFIG_DUMMY=m 533CONFIG_DUMMY=m
530# CONFIG_BONDING is not set 534# CONFIG_BONDING is not set
531CONFIG_MACVLAN=m 535CONFIG_MACVLAN=m
@@ -630,6 +634,7 @@ CONFIG_SERIO_LIBPS2=m
630# Character devices 634# Character devices
631# 635#
632CONFIG_VT=y 636CONFIG_VT=y
637CONFIG_CONSOLE_TRANSLATIONS=y
633CONFIG_VT_CONSOLE=y 638CONFIG_VT_CONSOLE=y
634CONFIG_HW_CONSOLE=y 639CONFIG_HW_CONSOLE=y
635CONFIG_VT_HW_CONSOLE_BINDING=y 640CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -660,6 +665,7 @@ CONFIG_GEN_RTC_X=y
660# CONFIG_POWER_SUPPLY is not set 665# CONFIG_POWER_SUPPLY is not set
661# CONFIG_HWMON is not set 666# CONFIG_HWMON is not set
662# CONFIG_THERMAL is not set 667# CONFIG_THERMAL is not set
668# CONFIG_THERMAL_HWMON is not set
663# CONFIG_WATCHDOG is not set 669# CONFIG_WATCHDOG is not set
664 670
665# 671#
@@ -671,8 +677,10 @@ CONFIG_SSB_POSSIBLE=y
671# 677#
672# Multifunction device drivers 678# Multifunction device drivers
673# 679#
680# CONFIG_MFD_CORE is not set
674# CONFIG_MFD_SM501 is not set 681# CONFIG_MFD_SM501 is not set
675# CONFIG_HTC_PASIC3 is not set 682# CONFIG_HTC_PASIC3 is not set
683# CONFIG_MFD_TMIO is not set
676 684
677# 685#
678# Multimedia devices 686# Multimedia devices
@@ -707,10 +715,6 @@ CONFIG_SSB_POSSIBLE=y
707# Console display driver support 715# Console display driver support
708# 716#
709CONFIG_DUMMY_CONSOLE=y 717CONFIG_DUMMY_CONSOLE=y
710
711#
712# Sound
713#
714# CONFIG_SOUND is not set 718# CONFIG_SOUND is not set
715CONFIG_HID_SUPPORT=y 719CONFIG_HID_SUPPORT=y
716CONFIG_HID=m 720CONFIG_HID=m
@@ -722,6 +726,7 @@ CONFIG_HIDRAW=y
722# CONFIG_NEW_LEDS is not set 726# CONFIG_NEW_LEDS is not set
723# CONFIG_ACCESSIBILITY is not set 727# CONFIG_ACCESSIBILITY is not set
724# CONFIG_RTC_CLASS is not set 728# CONFIG_RTC_CLASS is not set
729# CONFIG_DMADEVICES is not set
725# CONFIG_UIO is not set 730# CONFIG_UIO is not set
726 731
727# 732#
@@ -758,6 +763,7 @@ CONFIG_XFS_FS=m
758CONFIG_OCFS2_FS=m 763CONFIG_OCFS2_FS=m
759CONFIG_OCFS2_FS_O2CB=m 764CONFIG_OCFS2_FS_O2CB=m
760CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 765CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
766# CONFIG_OCFS2_FS_STATS is not set
761# CONFIG_OCFS2_DEBUG_MASKLOG is not set 767# CONFIG_OCFS2_DEBUG_MASKLOG is not set
762# CONFIG_OCFS2_DEBUG_FS is not set 768# CONFIG_OCFS2_DEBUG_FS is not set
763CONFIG_DNOTIFY=y 769CONFIG_DNOTIFY=y
@@ -817,6 +823,7 @@ CONFIG_HFSPLUS_FS=m
817CONFIG_CRAMFS=m 823CONFIG_CRAMFS=m
818# CONFIG_VXFS_FS is not set 824# CONFIG_VXFS_FS is not set
819CONFIG_MINIX_FS=y 825CONFIG_MINIX_FS=y
826# CONFIG_OMFS_FS is not set
820CONFIG_HPFS_FS=m 827CONFIG_HPFS_FS=m
821# CONFIG_QNX4FS_FS is not set 828# CONFIG_QNX4FS_FS is not set
822# CONFIG_ROMFS_FS is not set 829# CONFIG_ROMFS_FS is not set
@@ -829,18 +836,17 @@ CONFIG_NFS_FS=y
829CONFIG_NFS_V3=y 836CONFIG_NFS_V3=y
830# CONFIG_NFS_V3_ACL is not set 837# CONFIG_NFS_V3_ACL is not set
831CONFIG_NFS_V4=y 838CONFIG_NFS_V4=y
839CONFIG_ROOT_NFS=y
832CONFIG_NFSD=m 840CONFIG_NFSD=m
833CONFIG_NFSD_V3=y 841CONFIG_NFSD_V3=y
834# CONFIG_NFSD_V3_ACL is not set 842# CONFIG_NFSD_V3_ACL is not set
835# CONFIG_NFSD_V4 is not set 843# CONFIG_NFSD_V4 is not set
836CONFIG_ROOT_NFS=y
837CONFIG_LOCKD=y 844CONFIG_LOCKD=y
838CONFIG_LOCKD_V4=y 845CONFIG_LOCKD_V4=y
839CONFIG_EXPORTFS=m 846CONFIG_EXPORTFS=m
840CONFIG_NFS_COMMON=y 847CONFIG_NFS_COMMON=y
841CONFIG_SUNRPC=y 848CONFIG_SUNRPC=y
842CONFIG_SUNRPC_GSS=y 849CONFIG_SUNRPC_GSS=y
843CONFIG_SUNRPC_BIND34=y
844CONFIG_RPCSEC_GSS_KRB5=y 850CONFIG_RPCSEC_GSS_KRB5=y
845# CONFIG_RPCSEC_GSS_SPKM3 is not set 851# CONFIG_RPCSEC_GSS_SPKM3 is not set
846CONFIG_SMB_FS=m 852CONFIG_SMB_FS=m
@@ -849,7 +855,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
849# CONFIG_CIFS is not set 855# CONFIG_CIFS is not set
850# CONFIG_NCP_FS is not set 856# CONFIG_NCP_FS is not set
851CONFIG_CODA_FS=m 857CONFIG_CODA_FS=m
852# CONFIG_CODA_FS_OLD_API is not set
853# CONFIG_AFS_FS is not set 858# CONFIG_AFS_FS is not set
854 859
855# 860#
@@ -914,6 +919,8 @@ CONFIG_MAGIC_SYSRQ=y
914# CONFIG_HEADERS_CHECK is not set 919# CONFIG_HEADERS_CHECK is not set
915# CONFIG_DEBUG_KERNEL is not set 920# CONFIG_DEBUG_KERNEL is not set
916CONFIG_DEBUG_BUGVERBOSE=y 921CONFIG_DEBUG_BUGVERBOSE=y
922CONFIG_DEBUG_MEMORY_INIT=y
923CONFIG_SYSCTL_SYSCALL_CHECK=y
917# CONFIG_SAMPLES is not set 924# CONFIG_SAMPLES is not set
918 925
919# 926#
@@ -973,6 +980,10 @@ CONFIG_CRYPTO_CRC32C=m
973CONFIG_CRYPTO_MD4=m 980CONFIG_CRYPTO_MD4=m
974CONFIG_CRYPTO_MD5=y 981CONFIG_CRYPTO_MD5=y
975CONFIG_CRYPTO_MICHAEL_MIC=m 982CONFIG_CRYPTO_MICHAEL_MIC=m
983CONFIG_CRYPTO_RMD128=m
984CONFIG_CRYPTO_RMD160=m
985CONFIG_CRYPTO_RMD256=m
986CONFIG_CRYPTO_RMD320=m
976CONFIG_CRYPTO_SHA1=m 987CONFIG_CRYPTO_SHA1=m
977CONFIG_CRYPTO_SHA256=m 988CONFIG_CRYPTO_SHA256=m
978CONFIG_CRYPTO_SHA512=m 989CONFIG_CRYPTO_SHA512=m
@@ -1014,6 +1025,7 @@ CONFIG_BITREVERSE=y
1014# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1025# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1015CONFIG_CRC_CCITT=m 1026CONFIG_CRC_CCITT=m
1016CONFIG_CRC16=m 1027CONFIG_CRC16=m
1028CONFIG_CRC_T10DIF=y
1017CONFIG_CRC_ITU_T=m 1029CONFIG_CRC_ITU_T=m
1018CONFIG_CRC32=y 1030CONFIG_CRC32=y
1019# CONFIG_CRC7 is not set 1031# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index ab536eb172bb..a183e25e348d 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:09 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -163,10 +169,6 @@ CONFIG_BINFMT_MISC=m
163CONFIG_PROC_HARDWARE=y 169CONFIG_PROC_HARDWARE=y
164CONFIG_ZONE_DMA=y 170CONFIG_ZONE_DMA=y
165# CONFIG_ARCH_SUPPORTS_MSI is not set 171# CONFIG_ARCH_SUPPORTS_MSI is not set
166
167#
168# Networking
169#
170CONFIG_NET=y 172CONFIG_NET=y
171 173
172# 174#
@@ -180,6 +182,7 @@ CONFIG_XFRM=y
180# CONFIG_XFRM_SUB_POLICY is not set 182# CONFIG_XFRM_SUB_POLICY is not set
181CONFIG_XFRM_MIGRATE=y 183CONFIG_XFRM_MIGRATE=y
182# CONFIG_XFRM_STATISTICS is not set 184# CONFIG_XFRM_STATISTICS is not set
185CONFIG_XFRM_IPCOMP=m
183CONFIG_NET_KEY=y 186CONFIG_NET_KEY=y
184CONFIG_NET_KEY_MIGRATE=y 187CONFIG_NET_KEY_MIGRATE=y
185CONFIG_INET=y 188CONFIG_INET=y
@@ -413,6 +416,7 @@ CONFIG_NET_CLS_ROUTE=y
413# 416#
414# CONFIG_CFG80211 is not set 417# CONFIG_CFG80211 is not set
415CONFIG_WIRELESS_EXT=y 418CONFIG_WIRELESS_EXT=y
419# CONFIG_WIRELESS_EXT_SYSFS is not set
416# CONFIG_MAC80211 is not set 420# CONFIG_MAC80211 is not set
417CONFIG_IEEE80211=m 421CONFIG_IEEE80211=m
418# CONFIG_IEEE80211_DEBUG is not set 422# CONFIG_IEEE80211_DEBUG is not set
@@ -432,7 +436,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
432CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 436CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
433CONFIG_STANDALONE=y 437CONFIG_STANDALONE=y
434CONFIG_PREVENT_FIRMWARE_BUILD=y 438CONFIG_PREVENT_FIRMWARE_BUILD=y
435CONFIG_FW_LOADER=m 439CONFIG_FW_LOADER=y
440# CONFIG_FIRMWARE_IN_KERNEL is not set
441CONFIG_EXTRA_FIRMWARE=""
436# CONFIG_SYS_HYPERVISOR is not set 442# CONFIG_SYS_HYPERVISOR is not set
437CONFIG_CONNECTOR=m 443CONFIG_CONNECTOR=m
438# CONFIG_MTD is not set 444# CONFIG_MTD is not set
@@ -450,6 +456,7 @@ CONFIG_CDROM_PKTCDVD=m
450CONFIG_CDROM_PKTCDVD_BUFFERS=8 456CONFIG_CDROM_PKTCDVD_BUFFERS=8
451# CONFIG_CDROM_PKTCDVD_WCACHE is not set 457# CONFIG_CDROM_PKTCDVD_WCACHE is not set
452CONFIG_ATA_OVER_ETH=m 458CONFIG_ATA_OVER_ETH=m
459# CONFIG_BLK_DEV_HD is not set
453CONFIG_MISC_DEVICES=y 460CONFIG_MISC_DEVICES=y
454# CONFIG_EEPROM_93CX6 is not set 461# CONFIG_EEPROM_93CX6 is not set
455# CONFIG_ENCLOSURE_SERVICES is not set 462# CONFIG_ENCLOSURE_SERVICES is not set
@@ -503,6 +510,7 @@ CONFIG_ISCSI_TCP=m
503CONFIG_53C700_BE_BUS=y 510CONFIG_53C700_BE_BUS=y
504# CONFIG_SCSI_DEBUG is not set 511# CONFIG_SCSI_DEBUG is not set
505CONFIG_MVME16x_SCSI=y 512CONFIG_MVME16x_SCSI=y
513# CONFIG_SCSI_DH is not set
506CONFIG_MD=y 514CONFIG_MD=y
507CONFIG_BLK_DEV_MD=m 515CONFIG_BLK_DEV_MD=m
508CONFIG_MD_LINEAR=m 516CONFIG_MD_LINEAR=m
@@ -511,7 +519,7 @@ CONFIG_MD_RAID1=m
511# CONFIG_MD_RAID10 is not set 519# CONFIG_MD_RAID10 is not set
512CONFIG_MD_RAID456=m 520CONFIG_MD_RAID456=m
513CONFIG_MD_RAID5_RESHAPE=y 521CONFIG_MD_RAID5_RESHAPE=y
514CONFIG_MD_MULTIPATH=m 522# CONFIG_MD_MULTIPATH is not set
515# CONFIG_MD_FAULTY is not set 523# CONFIG_MD_FAULTY is not set
516CONFIG_BLK_DEV_DM=m 524CONFIG_BLK_DEV_DM=m
517# CONFIG_DM_DEBUG is not set 525# CONFIG_DM_DEBUG is not set
@@ -520,13 +528,9 @@ CONFIG_DM_SNAPSHOT=m
520CONFIG_DM_MIRROR=m 528CONFIG_DM_MIRROR=m
521CONFIG_DM_ZERO=m 529CONFIG_DM_ZERO=m
522CONFIG_DM_MULTIPATH=m 530CONFIG_DM_MULTIPATH=m
523CONFIG_DM_MULTIPATH_EMC=m
524CONFIG_DM_MULTIPATH_RDAC=m
525CONFIG_DM_MULTIPATH_HP=m
526# CONFIG_DM_DELAY is not set 531# CONFIG_DM_DELAY is not set
527CONFIG_DM_UEVENT=y 532CONFIG_DM_UEVENT=y
528CONFIG_NETDEVICES=y 533CONFIG_NETDEVICES=y
529# CONFIG_NETDEVICES_MULTIQUEUE is not set
530CONFIG_DUMMY=m 534CONFIG_DUMMY=m
531# CONFIG_BONDING is not set 535# CONFIG_BONDING is not set
532CONFIG_MACVLAN=m 536CONFIG_MACVLAN=m
@@ -631,6 +635,7 @@ CONFIG_SERIO_LIBPS2=m
631# Character devices 635# Character devices
632# 636#
633CONFIG_VT=y 637CONFIG_VT=y
638CONFIG_CONSOLE_TRANSLATIONS=y
634CONFIG_VT_CONSOLE=y 639CONFIG_VT_CONSOLE=y
635CONFIG_HW_CONSOLE=y 640CONFIG_HW_CONSOLE=y
636CONFIG_VT_HW_CONSOLE_BINDING=y 641CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -661,6 +666,7 @@ CONFIG_GEN_RTC_X=y
661# CONFIG_POWER_SUPPLY is not set 666# CONFIG_POWER_SUPPLY is not set
662# CONFIG_HWMON is not set 667# CONFIG_HWMON is not set
663# CONFIG_THERMAL is not set 668# CONFIG_THERMAL is not set
669# CONFIG_THERMAL_HWMON is not set
664# CONFIG_WATCHDOG is not set 670# CONFIG_WATCHDOG is not set
665 671
666# 672#
@@ -672,8 +678,10 @@ CONFIG_SSB_POSSIBLE=y
672# 678#
673# Multifunction device drivers 679# Multifunction device drivers
674# 680#
681# CONFIG_MFD_CORE is not set
675# CONFIG_MFD_SM501 is not set 682# CONFIG_MFD_SM501 is not set
676# CONFIG_HTC_PASIC3 is not set 683# CONFIG_HTC_PASIC3 is not set
684# CONFIG_MFD_TMIO is not set
677 685
678# 686#
679# Multimedia devices 687# Multimedia devices
@@ -708,10 +716,6 @@ CONFIG_SSB_POSSIBLE=y
708# Console display driver support 716# Console display driver support
709# 717#
710CONFIG_DUMMY_CONSOLE=y 718CONFIG_DUMMY_CONSOLE=y
711
712#
713# Sound
714#
715# CONFIG_SOUND is not set 719# CONFIG_SOUND is not set
716CONFIG_HID_SUPPORT=y 720CONFIG_HID_SUPPORT=y
717CONFIG_HID=m 721CONFIG_HID=m
@@ -723,6 +727,7 @@ CONFIG_HIDRAW=y
723# CONFIG_NEW_LEDS is not set 727# CONFIG_NEW_LEDS is not set
724# CONFIG_ACCESSIBILITY is not set 728# CONFIG_ACCESSIBILITY is not set
725# CONFIG_RTC_CLASS is not set 729# CONFIG_RTC_CLASS is not set
730# CONFIG_DMADEVICES is not set
726# CONFIG_UIO is not set 731# CONFIG_UIO is not set
727 732
728# 733#
@@ -760,6 +765,7 @@ CONFIG_XFS_FS=m
760CONFIG_OCFS2_FS=m 765CONFIG_OCFS2_FS=m
761CONFIG_OCFS2_FS_O2CB=m 766CONFIG_OCFS2_FS_O2CB=m
762CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 767CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
768# CONFIG_OCFS2_FS_STATS is not set
763# CONFIG_OCFS2_DEBUG_MASKLOG is not set 769# CONFIG_OCFS2_DEBUG_MASKLOG is not set
764# CONFIG_OCFS2_DEBUG_FS is not set 770# CONFIG_OCFS2_DEBUG_FS is not set
765CONFIG_DNOTIFY=y 771CONFIG_DNOTIFY=y
@@ -819,6 +825,7 @@ CONFIG_HFSPLUS_FS=m
819CONFIG_CRAMFS=m 825CONFIG_CRAMFS=m
820# CONFIG_VXFS_FS is not set 826# CONFIG_VXFS_FS is not set
821CONFIG_MINIX_FS=y 827CONFIG_MINIX_FS=y
828# CONFIG_OMFS_FS is not set
822CONFIG_HPFS_FS=m 829CONFIG_HPFS_FS=m
823# CONFIG_QNX4FS_FS is not set 830# CONFIG_QNX4FS_FS is not set
824# CONFIG_ROMFS_FS is not set 831# CONFIG_ROMFS_FS is not set
@@ -831,18 +838,17 @@ CONFIG_NFS_FS=y
831CONFIG_NFS_V3=y 838CONFIG_NFS_V3=y
832# CONFIG_NFS_V3_ACL is not set 839# CONFIG_NFS_V3_ACL is not set
833CONFIG_NFS_V4=y 840CONFIG_NFS_V4=y
841CONFIG_ROOT_NFS=y
834CONFIG_NFSD=m 842CONFIG_NFSD=m
835CONFIG_NFSD_V3=y 843CONFIG_NFSD_V3=y
836# CONFIG_NFSD_V3_ACL is not set 844# CONFIG_NFSD_V3_ACL is not set
837# CONFIG_NFSD_V4 is not set 845# CONFIG_NFSD_V4 is not set
838CONFIG_ROOT_NFS=y
839CONFIG_LOCKD=y 846CONFIG_LOCKD=y
840CONFIG_LOCKD_V4=y 847CONFIG_LOCKD_V4=y
841CONFIG_EXPORTFS=m 848CONFIG_EXPORTFS=m
842CONFIG_NFS_COMMON=y 849CONFIG_NFS_COMMON=y
843CONFIG_SUNRPC=y 850CONFIG_SUNRPC=y
844CONFIG_SUNRPC_GSS=y 851CONFIG_SUNRPC_GSS=y
845CONFIG_SUNRPC_BIND34=y
846CONFIG_RPCSEC_GSS_KRB5=y 852CONFIG_RPCSEC_GSS_KRB5=y
847# CONFIG_RPCSEC_GSS_SPKM3 is not set 853# CONFIG_RPCSEC_GSS_SPKM3 is not set
848CONFIG_SMB_FS=m 854CONFIG_SMB_FS=m
@@ -851,7 +857,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
851# CONFIG_CIFS is not set 857# CONFIG_CIFS is not set
852# CONFIG_NCP_FS is not set 858# CONFIG_NCP_FS is not set
853CONFIG_CODA_FS=m 859CONFIG_CODA_FS=m
854# CONFIG_CODA_FS_OLD_API is not set
855# CONFIG_AFS_FS is not set 860# CONFIG_AFS_FS is not set
856 861
857# 862#
@@ -916,6 +921,8 @@ CONFIG_MAGIC_SYSRQ=y
916# CONFIG_HEADERS_CHECK is not set 921# CONFIG_HEADERS_CHECK is not set
917# CONFIG_DEBUG_KERNEL is not set 922# CONFIG_DEBUG_KERNEL is not set
918CONFIG_DEBUG_BUGVERBOSE=y 923CONFIG_DEBUG_BUGVERBOSE=y
924CONFIG_DEBUG_MEMORY_INIT=y
925CONFIG_SYSCTL_SYSCALL_CHECK=y
919# CONFIG_SAMPLES is not set 926# CONFIG_SAMPLES is not set
920 927
921# 928#
@@ -975,6 +982,10 @@ CONFIG_CRYPTO_CRC32C=m
975CONFIG_CRYPTO_MD4=m 982CONFIG_CRYPTO_MD4=m
976CONFIG_CRYPTO_MD5=y 983CONFIG_CRYPTO_MD5=y
977CONFIG_CRYPTO_MICHAEL_MIC=m 984CONFIG_CRYPTO_MICHAEL_MIC=m
985CONFIG_CRYPTO_RMD128=m
986CONFIG_CRYPTO_RMD160=m
987CONFIG_CRYPTO_RMD256=m
988CONFIG_CRYPTO_RMD320=m
978CONFIG_CRYPTO_SHA1=m 989CONFIG_CRYPTO_SHA1=m
979CONFIG_CRYPTO_SHA256=m 990CONFIG_CRYPTO_SHA256=m
980CONFIG_CRYPTO_SHA512=m 991CONFIG_CRYPTO_SHA512=m
@@ -1016,6 +1027,7 @@ CONFIG_BITREVERSE=y
1016# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1027# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1017CONFIG_CRC_CCITT=m 1028CONFIG_CRC_CCITT=m
1018CONFIG_CRC16=m 1029CONFIG_CRC16=m
1030CONFIG_CRC_T10DIF=y
1019CONFIG_CRC_ITU_T=m 1031CONFIG_CRC_ITU_T=m
1020CONFIG_CRC32=y 1032CONFIG_CRC32=y
1021# CONFIG_CRC7 is not set 1033# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index e05be687b500..72eaff0776b8 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:10 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -163,10 +169,6 @@ CONFIG_ISA=y
163CONFIG_GENERIC_ISA_DMA=y 169CONFIG_GENERIC_ISA_DMA=y
164CONFIG_ZONE_DMA=y 170CONFIG_ZONE_DMA=y
165# CONFIG_ARCH_SUPPORTS_MSI is not set 171# CONFIG_ARCH_SUPPORTS_MSI is not set
166
167#
168# Networking
169#
170CONFIG_NET=y 172CONFIG_NET=y
171 173
172# 174#
@@ -180,6 +182,7 @@ CONFIG_XFRM=y
180# CONFIG_XFRM_SUB_POLICY is not set 182# CONFIG_XFRM_SUB_POLICY is not set
181CONFIG_XFRM_MIGRATE=y 183CONFIG_XFRM_MIGRATE=y
182# CONFIG_XFRM_STATISTICS is not set 184# CONFIG_XFRM_STATISTICS is not set
185CONFIG_XFRM_IPCOMP=m
183CONFIG_NET_KEY=y 186CONFIG_NET_KEY=y
184CONFIG_NET_KEY_MIGRATE=y 187CONFIG_NET_KEY_MIGRATE=y
185CONFIG_INET=y 188CONFIG_INET=y
@@ -410,6 +413,7 @@ CONFIG_NET_CLS_ROUTE=y
410# 413#
411# CONFIG_CFG80211 is not set 414# CONFIG_CFG80211 is not set
412CONFIG_WIRELESS_EXT=y 415CONFIG_WIRELESS_EXT=y
416# CONFIG_WIRELESS_EXT_SYSFS is not set
413# CONFIG_MAC80211 is not set 417# CONFIG_MAC80211 is not set
414CONFIG_IEEE80211=m 418CONFIG_IEEE80211=m
415# CONFIG_IEEE80211_DEBUG is not set 419# CONFIG_IEEE80211_DEBUG is not set
@@ -429,7 +433,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
429CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 433CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
430CONFIG_STANDALONE=y 434CONFIG_STANDALONE=y
431CONFIG_PREVENT_FIRMWARE_BUILD=y 435CONFIG_PREVENT_FIRMWARE_BUILD=y
432CONFIG_FW_LOADER=m 436CONFIG_FW_LOADER=y
437# CONFIG_FIRMWARE_IN_KERNEL is not set
438CONFIG_EXTRA_FIRMWARE=""
433# CONFIG_SYS_HYPERVISOR is not set 439# CONFIG_SYS_HYPERVISOR is not set
434CONFIG_CONNECTOR=m 440CONFIG_CONNECTOR=m
435# CONFIG_MTD is not set 441# CONFIG_MTD is not set
@@ -448,6 +454,7 @@ CONFIG_CDROM_PKTCDVD=m
448CONFIG_CDROM_PKTCDVD_BUFFERS=8 454CONFIG_CDROM_PKTCDVD_BUFFERS=8
449# CONFIG_CDROM_PKTCDVD_WCACHE is not set 455# CONFIG_CDROM_PKTCDVD_WCACHE is not set
450CONFIG_ATA_OVER_ETH=m 456CONFIG_ATA_OVER_ETH=m
457# CONFIG_BLK_DEV_HD is not set
451CONFIG_MISC_DEVICES=y 458CONFIG_MISC_DEVICES=y
452# CONFIG_EEPROM_93CX6 is not set 459# CONFIG_EEPROM_93CX6 is not set
453# CONFIG_ENCLOSURE_SERVICES is not set 460# CONFIG_ENCLOSURE_SERVICES is not set
@@ -458,6 +465,7 @@ CONFIG_BLK_DEV_IDE=y
458# 465#
459# Please see Documentation/ide/ide.txt for help/info on IDE drives 466# Please see Documentation/ide/ide.txt for help/info on IDE drives
460# 467#
468CONFIG_IDE_ATAPI=y
461# CONFIG_BLK_DEV_IDE_SATA is not set 469# CONFIG_BLK_DEV_IDE_SATA is not set
462CONFIG_BLK_DEV_IDEDISK=y 470CONFIG_BLK_DEV_IDEDISK=y
463# CONFIG_IDEDISK_MULTI_MODE is not set 471# CONFIG_IDEDISK_MULTI_MODE is not set
@@ -475,8 +483,6 @@ CONFIG_IDE_PROC_FS=y
475# CONFIG_BLK_DEV_PLATFORM is not set 483# CONFIG_BLK_DEV_PLATFORM is not set
476CONFIG_BLK_DEV_Q40IDE=y 484CONFIG_BLK_DEV_Q40IDE=y
477# CONFIG_BLK_DEV_IDEDMA is not set 485# CONFIG_BLK_DEV_IDEDMA is not set
478# CONFIG_BLK_DEV_HD_ONLY is not set
479# CONFIG_BLK_DEV_HD is not set
480 486
481# 487#
482# SCSI device support 488# SCSI device support
@@ -536,6 +542,7 @@ CONFIG_ISCSI_TCP=m
536# CONFIG_SCSI_SYM53C416 is not set 542# CONFIG_SCSI_SYM53C416 is not set
537# CONFIG_SCSI_T128 is not set 543# CONFIG_SCSI_T128 is not set
538# CONFIG_SCSI_DEBUG is not set 544# CONFIG_SCSI_DEBUG is not set
545# CONFIG_SCSI_DH is not set
539CONFIG_MD=y 546CONFIG_MD=y
540CONFIG_BLK_DEV_MD=m 547CONFIG_BLK_DEV_MD=m
541CONFIG_MD_LINEAR=m 548CONFIG_MD_LINEAR=m
@@ -544,7 +551,7 @@ CONFIG_MD_RAID1=m
544# CONFIG_MD_RAID10 is not set 551# CONFIG_MD_RAID10 is not set
545CONFIG_MD_RAID456=m 552CONFIG_MD_RAID456=m
546CONFIG_MD_RAID5_RESHAPE=y 553CONFIG_MD_RAID5_RESHAPE=y
547CONFIG_MD_MULTIPATH=m 554# CONFIG_MD_MULTIPATH is not set
548# CONFIG_MD_FAULTY is not set 555# CONFIG_MD_FAULTY is not set
549CONFIG_BLK_DEV_DM=m 556CONFIG_BLK_DEV_DM=m
550# CONFIG_DM_DEBUG is not set 557# CONFIG_DM_DEBUG is not set
@@ -553,13 +560,9 @@ CONFIG_DM_SNAPSHOT=m
553CONFIG_DM_MIRROR=m 560CONFIG_DM_MIRROR=m
554CONFIG_DM_ZERO=m 561CONFIG_DM_ZERO=m
555CONFIG_DM_MULTIPATH=m 562CONFIG_DM_MULTIPATH=m
556CONFIG_DM_MULTIPATH_EMC=m
557CONFIG_DM_MULTIPATH_RDAC=m
558CONFIG_DM_MULTIPATH_HP=m
559# CONFIG_DM_DELAY is not set 563# CONFIG_DM_DELAY is not set
560CONFIG_DM_UEVENT=y 564CONFIG_DM_UEVENT=y
561CONFIG_NETDEVICES=y 565CONFIG_NETDEVICES=y
562# CONFIG_NETDEVICES_MULTIQUEUE is not set
563CONFIG_DUMMY=m 566CONFIG_DUMMY=m
564# CONFIG_BONDING is not set 567# CONFIG_BONDING is not set
565CONFIG_MACVLAN=m 568CONFIG_MACVLAN=m
@@ -680,6 +683,7 @@ CONFIG_SERIO_LIBPS2=m
680# Character devices 683# Character devices
681# 684#
682CONFIG_VT=y 685CONFIG_VT=y
686CONFIG_CONSOLE_TRANSLATIONS=y
683CONFIG_VT_CONSOLE=y 687CONFIG_VT_CONSOLE=y
684CONFIG_HW_CONSOLE=y 688CONFIG_HW_CONSOLE=y
685CONFIG_VT_HW_CONSOLE_BINDING=y 689CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -711,6 +715,7 @@ CONFIG_GEN_RTC_X=y
711# CONFIG_POWER_SUPPLY is not set 715# CONFIG_POWER_SUPPLY is not set
712# CONFIG_HWMON is not set 716# CONFIG_HWMON is not set
713# CONFIG_THERMAL is not set 717# CONFIG_THERMAL is not set
718# CONFIG_THERMAL_HWMON is not set
714# CONFIG_WATCHDOG is not set 719# CONFIG_WATCHDOG is not set
715 720
716# 721#
@@ -722,8 +727,10 @@ CONFIG_SSB_POSSIBLE=y
722# 727#
723# Multifunction device drivers 728# Multifunction device drivers
724# 729#
730# CONFIG_MFD_CORE is not set
725# CONFIG_MFD_SM501 is not set 731# CONFIG_MFD_SM501 is not set
726# CONFIG_HTC_PASIC3 is not set 732# CONFIG_HTC_PASIC3 is not set
733# CONFIG_MFD_TMIO is not set
727 734
728# 735#
729# Multimedia devices 736# Multimedia devices
@@ -792,10 +799,6 @@ CONFIG_LOGO=y
792CONFIG_LOGO_LINUX_MONO=y 799CONFIG_LOGO_LINUX_MONO=y
793CONFIG_LOGO_LINUX_VGA16=y 800CONFIG_LOGO_LINUX_VGA16=y
794CONFIG_LOGO_LINUX_CLUT224=y 801CONFIG_LOGO_LINUX_CLUT224=y
795
796#
797# Sound
798#
799CONFIG_SOUND=m 802CONFIG_SOUND=m
800CONFIG_DMASOUND_Q40=m 803CONFIG_DMASOUND_Q40=m
801CONFIG_DMASOUND=m 804CONFIG_DMASOUND=m
@@ -809,6 +812,7 @@ CONFIG_HIDRAW=y
809# CONFIG_NEW_LEDS is not set 812# CONFIG_NEW_LEDS is not set
810# CONFIG_ACCESSIBILITY is not set 813# CONFIG_ACCESSIBILITY is not set
811# CONFIG_RTC_CLASS is not set 814# CONFIG_RTC_CLASS is not set
815# CONFIG_DMADEVICES is not set
812# CONFIG_UIO is not set 816# CONFIG_UIO is not set
813 817
814# 818#
@@ -843,6 +847,7 @@ CONFIG_XFS_FS=m
843CONFIG_OCFS2_FS=m 847CONFIG_OCFS2_FS=m
844CONFIG_OCFS2_FS_O2CB=m 848CONFIG_OCFS2_FS_O2CB=m
845CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 849CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
850# CONFIG_OCFS2_FS_STATS is not set
846# CONFIG_OCFS2_DEBUG_MASKLOG is not set 851# CONFIG_OCFS2_DEBUG_MASKLOG is not set
847# CONFIG_OCFS2_DEBUG_FS is not set 852# CONFIG_OCFS2_DEBUG_FS is not set
848CONFIG_DNOTIFY=y 853CONFIG_DNOTIFY=y
@@ -902,6 +907,7 @@ CONFIG_HFSPLUS_FS=m
902CONFIG_CRAMFS=m 907CONFIG_CRAMFS=m
903# CONFIG_VXFS_FS is not set 908# CONFIG_VXFS_FS is not set
904CONFIG_MINIX_FS=y 909CONFIG_MINIX_FS=y
910# CONFIG_OMFS_FS is not set
905CONFIG_HPFS_FS=m 911CONFIG_HPFS_FS=m
906# CONFIG_QNX4FS_FS is not set 912# CONFIG_QNX4FS_FS is not set
907# CONFIG_ROMFS_FS is not set 913# CONFIG_ROMFS_FS is not set
@@ -924,7 +930,6 @@ CONFIG_EXPORTFS=m
924CONFIG_NFS_COMMON=y 930CONFIG_NFS_COMMON=y
925CONFIG_SUNRPC=y 931CONFIG_SUNRPC=y
926CONFIG_SUNRPC_GSS=y 932CONFIG_SUNRPC_GSS=y
927CONFIG_SUNRPC_BIND34=y
928CONFIG_RPCSEC_GSS_KRB5=y 933CONFIG_RPCSEC_GSS_KRB5=y
929# CONFIG_RPCSEC_GSS_SPKM3 is not set 934# CONFIG_RPCSEC_GSS_SPKM3 is not set
930CONFIG_SMB_FS=m 935CONFIG_SMB_FS=m
@@ -933,7 +938,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
933# CONFIG_CIFS is not set 938# CONFIG_CIFS is not set
934# CONFIG_NCP_FS is not set 939# CONFIG_NCP_FS is not set
935CONFIG_CODA_FS=m 940CONFIG_CODA_FS=m
936# CONFIG_CODA_FS_OLD_API is not set
937# CONFIG_AFS_FS is not set 941# CONFIG_AFS_FS is not set
938 942
939# 943#
@@ -997,6 +1001,8 @@ CONFIG_MAGIC_SYSRQ=y
997# CONFIG_HEADERS_CHECK is not set 1001# CONFIG_HEADERS_CHECK is not set
998# CONFIG_DEBUG_KERNEL is not set 1002# CONFIG_DEBUG_KERNEL is not set
999CONFIG_DEBUG_BUGVERBOSE=y 1003CONFIG_DEBUG_BUGVERBOSE=y
1004CONFIG_DEBUG_MEMORY_INIT=y
1005CONFIG_SYSCTL_SYSCALL_CHECK=y
1000# CONFIG_SAMPLES is not set 1006# CONFIG_SAMPLES is not set
1001 1007
1002# 1008#
@@ -1056,6 +1062,10 @@ CONFIG_CRYPTO_CRC32C=m
1056CONFIG_CRYPTO_MD4=m 1062CONFIG_CRYPTO_MD4=m
1057CONFIG_CRYPTO_MD5=y 1063CONFIG_CRYPTO_MD5=y
1058CONFIG_CRYPTO_MICHAEL_MIC=m 1064CONFIG_CRYPTO_MICHAEL_MIC=m
1065CONFIG_CRYPTO_RMD128=m
1066CONFIG_CRYPTO_RMD160=m
1067CONFIG_CRYPTO_RMD256=m
1068CONFIG_CRYPTO_RMD320=m
1059CONFIG_CRYPTO_SHA1=m 1069CONFIG_CRYPTO_SHA1=m
1060CONFIG_CRYPTO_SHA256=m 1070CONFIG_CRYPTO_SHA256=m
1061CONFIG_CRYPTO_SHA512=m 1071CONFIG_CRYPTO_SHA512=m
@@ -1097,6 +1107,7 @@ CONFIG_BITREVERSE=y
1097# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1107# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1098CONFIG_CRC_CCITT=m 1108CONFIG_CRC_CCITT=m
1099CONFIG_CRC16=m 1109CONFIG_CRC16=m
1110CONFIG_CRC_T10DIF=y
1100CONFIG_CRC_ITU_T=m 1111CONFIG_CRC_ITU_T=m
1101CONFIG_CRC32=y 1112CONFIG_CRC32=y
1102# CONFIG_CRC7 is not set 1113# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 296340d2b315..cb62b96d766e 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:11 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -149,10 +155,6 @@ CONFIG_BINFMT_MISC=m
149CONFIG_PROC_HARDWARE=y 155CONFIG_PROC_HARDWARE=y
150CONFIG_ZONE_DMA=y 156CONFIG_ZONE_DMA=y
151# CONFIG_ARCH_SUPPORTS_MSI is not set 157# CONFIG_ARCH_SUPPORTS_MSI is not set
152
153#
154# Networking
155#
156CONFIG_NET=y 158CONFIG_NET=y
157 159
158# 160#
@@ -166,6 +168,7 @@ CONFIG_XFRM=y
166# CONFIG_XFRM_SUB_POLICY is not set 168# CONFIG_XFRM_SUB_POLICY is not set
167CONFIG_XFRM_MIGRATE=y 169CONFIG_XFRM_MIGRATE=y
168# CONFIG_XFRM_STATISTICS is not set 170# CONFIG_XFRM_STATISTICS is not set
171CONFIG_XFRM_IPCOMP=m
169CONFIG_NET_KEY=y 172CONFIG_NET_KEY=y
170CONFIG_NET_KEY_MIGRATE=y 173CONFIG_NET_KEY_MIGRATE=y
171CONFIG_INET=y 174CONFIG_INET=y
@@ -399,6 +402,7 @@ CONFIG_NET_CLS_ROUTE=y
399# 402#
400# CONFIG_CFG80211 is not set 403# CONFIG_CFG80211 is not set
401CONFIG_WIRELESS_EXT=y 404CONFIG_WIRELESS_EXT=y
405# CONFIG_WIRELESS_EXT_SYSFS is not set
402# CONFIG_MAC80211 is not set 406# CONFIG_MAC80211 is not set
403CONFIG_IEEE80211=m 407CONFIG_IEEE80211=m
404# CONFIG_IEEE80211_DEBUG is not set 408# CONFIG_IEEE80211_DEBUG is not set
@@ -418,7 +422,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
418CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 422CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
419CONFIG_STANDALONE=y 423CONFIG_STANDALONE=y
420CONFIG_PREVENT_FIRMWARE_BUILD=y 424CONFIG_PREVENT_FIRMWARE_BUILD=y
421CONFIG_FW_LOADER=m 425CONFIG_FW_LOADER=y
426# CONFIG_FIRMWARE_IN_KERNEL is not set
427CONFIG_EXTRA_FIRMWARE=""
422# CONFIG_SYS_HYPERVISOR is not set 428# CONFIG_SYS_HYPERVISOR is not set
423CONFIG_CONNECTOR=m 429CONFIG_CONNECTOR=m
424# CONFIG_MTD is not set 430# CONFIG_MTD is not set
@@ -436,6 +442,7 @@ CONFIG_CDROM_PKTCDVD=m
436CONFIG_CDROM_PKTCDVD_BUFFERS=8 442CONFIG_CDROM_PKTCDVD_BUFFERS=8
437# CONFIG_CDROM_PKTCDVD_WCACHE is not set 443# CONFIG_CDROM_PKTCDVD_WCACHE is not set
438CONFIG_ATA_OVER_ETH=m 444CONFIG_ATA_OVER_ETH=m
445# CONFIG_BLK_DEV_HD is not set
439CONFIG_MISC_DEVICES=y 446CONFIG_MISC_DEVICES=y
440# CONFIG_EEPROM_93CX6 is not set 447# CONFIG_EEPROM_93CX6 is not set
441# CONFIG_ENCLOSURE_SERVICES is not set 448# CONFIG_ENCLOSURE_SERVICES is not set
@@ -488,6 +495,7 @@ CONFIG_SCSI_LOWLEVEL=y
488CONFIG_ISCSI_TCP=m 495CONFIG_ISCSI_TCP=m
489# CONFIG_SCSI_DEBUG is not set 496# CONFIG_SCSI_DEBUG is not set
490CONFIG_SUN3_SCSI=y 497CONFIG_SUN3_SCSI=y
498# CONFIG_SCSI_DH is not set
491CONFIG_MD=y 499CONFIG_MD=y
492CONFIG_BLK_DEV_MD=m 500CONFIG_BLK_DEV_MD=m
493CONFIG_MD_LINEAR=m 501CONFIG_MD_LINEAR=m
@@ -496,7 +504,7 @@ CONFIG_MD_RAID1=m
496# CONFIG_MD_RAID10 is not set 504# CONFIG_MD_RAID10 is not set
497CONFIG_MD_RAID456=m 505CONFIG_MD_RAID456=m
498CONFIG_MD_RAID5_RESHAPE=y 506CONFIG_MD_RAID5_RESHAPE=y
499CONFIG_MD_MULTIPATH=m 507# CONFIG_MD_MULTIPATH is not set
500# CONFIG_MD_FAULTY is not set 508# CONFIG_MD_FAULTY is not set
501CONFIG_BLK_DEV_DM=m 509CONFIG_BLK_DEV_DM=m
502# CONFIG_DM_DEBUG is not set 510# CONFIG_DM_DEBUG is not set
@@ -505,13 +513,9 @@ CONFIG_DM_SNAPSHOT=m
505CONFIG_DM_MIRROR=m 513CONFIG_DM_MIRROR=m
506CONFIG_DM_ZERO=m 514CONFIG_DM_ZERO=m
507CONFIG_DM_MULTIPATH=m 515CONFIG_DM_MULTIPATH=m
508CONFIG_DM_MULTIPATH_EMC=m
509CONFIG_DM_MULTIPATH_RDAC=m
510CONFIG_DM_MULTIPATH_HP=m
511# CONFIG_DM_DELAY is not set 516# CONFIG_DM_DELAY is not set
512CONFIG_DM_UEVENT=y 517CONFIG_DM_UEVENT=y
513CONFIG_NETDEVICES=y 518CONFIG_NETDEVICES=y
514# CONFIG_NETDEVICES_MULTIQUEUE is not set
515CONFIG_DUMMY=m 519CONFIG_DUMMY=m
516# CONFIG_BONDING is not set 520# CONFIG_BONDING is not set
517CONFIG_MACVLAN=m 521CONFIG_MACVLAN=m
@@ -527,7 +531,6 @@ CONFIG_SUN3_82586=y
527# CONFIG_IBM_NEW_EMAC_RGMII is not set 531# CONFIG_IBM_NEW_EMAC_RGMII is not set
528# CONFIG_IBM_NEW_EMAC_TAH is not set 532# CONFIG_IBM_NEW_EMAC_TAH is not set
529# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 533# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
530# CONFIG_B44 is not set
531# CONFIG_NETDEV_1000 is not set 534# CONFIG_NETDEV_1000 is not set
532# CONFIG_NETDEV_10000 is not set 535# CONFIG_NETDEV_10000 is not set
533 536
@@ -617,6 +620,7 @@ CONFIG_SERIO_LIBPS2=m
617# Character devices 620# Character devices
618# 621#
619CONFIG_VT=y 622CONFIG_VT=y
623CONFIG_CONSOLE_TRANSLATIONS=y
620CONFIG_VT_CONSOLE=y 624CONFIG_VT_CONSOLE=y
621CONFIG_HW_CONSOLE=y 625CONFIG_HW_CONSOLE=y
622CONFIG_VT_HW_CONSOLE_BINDING=y 626CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -647,19 +651,20 @@ CONFIG_GEN_RTC_X=y
647# CONFIG_POWER_SUPPLY is not set 651# CONFIG_POWER_SUPPLY is not set
648# CONFIG_HWMON is not set 652# CONFIG_HWMON is not set
649# CONFIG_THERMAL is not set 653# CONFIG_THERMAL is not set
654# CONFIG_THERMAL_HWMON is not set
650# CONFIG_WATCHDOG is not set 655# CONFIG_WATCHDOG is not set
651 656
652# 657#
653# Sonics Silicon Backplane 658# Sonics Silicon Backplane
654# 659#
655CONFIG_SSB_POSSIBLE=y
656# CONFIG_SSB is not set
657 660
658# 661#
659# Multifunction device drivers 662# Multifunction device drivers
660# 663#
664# CONFIG_MFD_CORE is not set
661# CONFIG_MFD_SM501 is not set 665# CONFIG_MFD_SM501 is not set
662# CONFIG_HTC_PASIC3 is not set 666# CONFIG_HTC_PASIC3 is not set
667# CONFIG_MFD_TMIO is not set
663 668
664# 669#
665# Multimedia devices 670# Multimedia devices
@@ -727,10 +732,6 @@ CONFIG_LOGO=y
727CONFIG_LOGO_LINUX_MONO=y 732CONFIG_LOGO_LINUX_MONO=y
728CONFIG_LOGO_LINUX_VGA16=y 733CONFIG_LOGO_LINUX_VGA16=y
729CONFIG_LOGO_LINUX_CLUT224=y 734CONFIG_LOGO_LINUX_CLUT224=y
730
731#
732# Sound
733#
734# CONFIG_SOUND is not set 735# CONFIG_SOUND is not set
735CONFIG_HID_SUPPORT=y 736CONFIG_HID_SUPPORT=y
736CONFIG_HID=m 737CONFIG_HID=m
@@ -776,6 +777,7 @@ CONFIG_XFS_FS=m
776CONFIG_OCFS2_FS=m 777CONFIG_OCFS2_FS=m
777CONFIG_OCFS2_FS_O2CB=m 778CONFIG_OCFS2_FS_O2CB=m
778CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 779CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
780# CONFIG_OCFS2_FS_STATS is not set
779# CONFIG_OCFS2_DEBUG_MASKLOG is not set 781# CONFIG_OCFS2_DEBUG_MASKLOG is not set
780# CONFIG_OCFS2_DEBUG_FS is not set 782# CONFIG_OCFS2_DEBUG_FS is not set
781CONFIG_DNOTIFY=y 783CONFIG_DNOTIFY=y
@@ -835,6 +837,7 @@ CONFIG_HFSPLUS_FS=m
835CONFIG_CRAMFS=m 837CONFIG_CRAMFS=m
836# CONFIG_VXFS_FS is not set 838# CONFIG_VXFS_FS is not set
837CONFIG_MINIX_FS=y 839CONFIG_MINIX_FS=y
840# CONFIG_OMFS_FS is not set
838CONFIG_HPFS_FS=m 841CONFIG_HPFS_FS=m
839# CONFIG_QNX4FS_FS is not set 842# CONFIG_QNX4FS_FS is not set
840# CONFIG_ROMFS_FS is not set 843# CONFIG_ROMFS_FS is not set
@@ -847,18 +850,17 @@ CONFIG_NFS_FS=y
847CONFIG_NFS_V3=y 850CONFIG_NFS_V3=y
848# CONFIG_NFS_V3_ACL is not set 851# CONFIG_NFS_V3_ACL is not set
849CONFIG_NFS_V4=y 852CONFIG_NFS_V4=y
853CONFIG_ROOT_NFS=y
850CONFIG_NFSD=m 854CONFIG_NFSD=m
851CONFIG_NFSD_V3=y 855CONFIG_NFSD_V3=y
852# CONFIG_NFSD_V3_ACL is not set 856# CONFIG_NFSD_V3_ACL is not set
853# CONFIG_NFSD_V4 is not set 857# CONFIG_NFSD_V4 is not set
854CONFIG_ROOT_NFS=y
855CONFIG_LOCKD=y 858CONFIG_LOCKD=y
856CONFIG_LOCKD_V4=y 859CONFIG_LOCKD_V4=y
857CONFIG_EXPORTFS=m 860CONFIG_EXPORTFS=m
858CONFIG_NFS_COMMON=y 861CONFIG_NFS_COMMON=y
859CONFIG_SUNRPC=y 862CONFIG_SUNRPC=y
860CONFIG_SUNRPC_GSS=y 863CONFIG_SUNRPC_GSS=y
861CONFIG_SUNRPC_BIND34=y
862CONFIG_RPCSEC_GSS_KRB5=y 864CONFIG_RPCSEC_GSS_KRB5=y
863# CONFIG_RPCSEC_GSS_SPKM3 is not set 865# CONFIG_RPCSEC_GSS_SPKM3 is not set
864CONFIG_SMB_FS=m 866CONFIG_SMB_FS=m
@@ -867,7 +869,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
867# CONFIG_CIFS is not set 869# CONFIG_CIFS is not set
868# CONFIG_NCP_FS is not set 870# CONFIG_NCP_FS is not set
869CONFIG_CODA_FS=m 871CONFIG_CODA_FS=m
870# CONFIG_CODA_FS_OLD_API is not set
871# CONFIG_AFS_FS is not set 872# CONFIG_AFS_FS is not set
872 873
873# 874#
@@ -932,6 +933,8 @@ CONFIG_MAGIC_SYSRQ=y
932# CONFIG_HEADERS_CHECK is not set 933# CONFIG_HEADERS_CHECK is not set
933# CONFIG_DEBUG_KERNEL is not set 934# CONFIG_DEBUG_KERNEL is not set
934CONFIG_DEBUG_BUGVERBOSE=y 935CONFIG_DEBUG_BUGVERBOSE=y
936CONFIG_DEBUG_MEMORY_INIT=y
937CONFIG_SYSCTL_SYSCALL_CHECK=y
935# CONFIG_SAMPLES is not set 938# CONFIG_SAMPLES is not set
936 939
937# 940#
@@ -991,6 +994,10 @@ CONFIG_CRYPTO_CRC32C=m
991CONFIG_CRYPTO_MD4=m 994CONFIG_CRYPTO_MD4=m
992CONFIG_CRYPTO_MD5=y 995CONFIG_CRYPTO_MD5=y
993CONFIG_CRYPTO_MICHAEL_MIC=m 996CONFIG_CRYPTO_MICHAEL_MIC=m
997CONFIG_CRYPTO_RMD128=m
998CONFIG_CRYPTO_RMD160=m
999CONFIG_CRYPTO_RMD256=m
1000CONFIG_CRYPTO_RMD320=m
994CONFIG_CRYPTO_SHA1=m 1001CONFIG_CRYPTO_SHA1=m
995CONFIG_CRYPTO_SHA256=m 1002CONFIG_CRYPTO_SHA256=m
996CONFIG_CRYPTO_SHA512=m 1003CONFIG_CRYPTO_SHA512=m
@@ -1032,6 +1039,7 @@ CONFIG_BITREVERSE=y
1032# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1039# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1033CONFIG_CRC_CCITT=m 1040CONFIG_CRC_CCITT=m
1034CONFIG_CRC16=m 1041CONFIG_CRC16=m
1042CONFIG_CRC_T10DIF=y
1035CONFIG_CRC_ITU_T=m 1043CONFIG_CRC_ITU_T=m
1036CONFIG_CRC32=y 1044CONFIG_CRC32=y
1037# CONFIG_CRC7 is not set 1045# CONFIG_CRC7 is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 8d3a416c92bf..04b4363a7050 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc4 3# Linux kernel version: 2.6.27-rc6
4# Wed May 28 22:47:35 2008 4# Wed Sep 10 09:02:12 2008
5# 5#
6CONFIG_M68K=y 6CONFIG_M68K=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -52,7 +52,6 @@ CONFIG_SYSCTL=y
52# CONFIG_EMBEDDED is not set 52# CONFIG_EMBEDDED is not set
53CONFIG_UID16=y 53CONFIG_UID16=y
54CONFIG_SYSCTL_SYSCALL=y 54CONFIG_SYSCTL_SYSCALL=y
55CONFIG_SYSCTL_SYSCALL_CHECK=y
56CONFIG_KALLSYMS=y 55CONFIG_KALLSYMS=y
57# CONFIG_KALLSYMS_EXTRA_PASS is not set 56# CONFIG_KALLSYMS_EXTRA_PASS is not set
58CONFIG_HOTPLUG=y 57CONFIG_HOTPLUG=y
@@ -75,10 +74,16 @@ CONFIG_SLAB=y
75# CONFIG_PROFILING is not set 74# CONFIG_PROFILING is not set
76# CONFIG_MARKERS is not set 75# CONFIG_MARKERS is not set
77# CONFIG_HAVE_OPROFILE is not set 76# CONFIG_HAVE_OPROFILE is not set
77# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
78# CONFIG_HAVE_IOREMAP_PROT is not set
78# CONFIG_HAVE_KPROBES is not set 79# CONFIG_HAVE_KPROBES is not set
79# CONFIG_HAVE_KRETPROBES is not set 80# CONFIG_HAVE_KRETPROBES is not set
81# CONFIG_HAVE_ARCH_TRACEHOOK is not set
80# CONFIG_HAVE_DMA_ATTRS is not set 82# CONFIG_HAVE_DMA_ATTRS is not set
83# CONFIG_USE_GENERIC_SMP_HELPERS is not set
84# CONFIG_HAVE_CLK is not set
81CONFIG_PROC_PAGE_MONITOR=y 85CONFIG_PROC_PAGE_MONITOR=y
86# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
82CONFIG_SLABINFO=y 87CONFIG_SLABINFO=y
83CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
@@ -95,6 +100,7 @@ CONFIG_BLOCK=y
95# CONFIG_BLK_DEV_IO_TRACE is not set 100# CONFIG_BLK_DEV_IO_TRACE is not set
96# CONFIG_LSF is not set 101# CONFIG_LSF is not set
97CONFIG_BLK_DEV_BSG=y 102CONFIG_BLK_DEV_BSG=y
103# CONFIG_BLK_DEV_INTEGRITY is not set
98 104
99# 105#
100# IO Schedulers 106# IO Schedulers
@@ -160,10 +166,6 @@ CONFIG_BINFMT_MISC=m
160CONFIG_PROC_HARDWARE=y 166CONFIG_PROC_HARDWARE=y
161CONFIG_ZONE_DMA=y 167CONFIG_ZONE_DMA=y
162# CONFIG_ARCH_SUPPORTS_MSI is not set 168# CONFIG_ARCH_SUPPORTS_MSI is not set
163
164#
165# Networking
166#
167CONFIG_NET=y 169CONFIG_NET=y
168 170
169# 171#
@@ -177,6 +179,7 @@ CONFIG_XFRM=y
177# CONFIG_XFRM_SUB_POLICY is not set 179# CONFIG_XFRM_SUB_POLICY is not set
178CONFIG_XFRM_MIGRATE=y 180CONFIG_XFRM_MIGRATE=y
179# CONFIG_XFRM_STATISTICS is not set 181# CONFIG_XFRM_STATISTICS is not set
182CONFIG_XFRM_IPCOMP=m
180CONFIG_NET_KEY=y 183CONFIG_NET_KEY=y
181CONFIG_NET_KEY_MIGRATE=y 184CONFIG_NET_KEY_MIGRATE=y
182CONFIG_INET=y 185CONFIG_INET=y
@@ -410,6 +413,7 @@ CONFIG_NET_CLS_ROUTE=y
410# 413#
411# CONFIG_CFG80211 is not set 414# CONFIG_CFG80211 is not set
412CONFIG_WIRELESS_EXT=y 415CONFIG_WIRELESS_EXT=y
416# CONFIG_WIRELESS_EXT_SYSFS is not set
413# CONFIG_MAC80211 is not set 417# CONFIG_MAC80211 is not set
414CONFIG_IEEE80211=m 418CONFIG_IEEE80211=m
415# CONFIG_IEEE80211_DEBUG is not set 419# CONFIG_IEEE80211_DEBUG is not set
@@ -429,7 +433,9 @@ CONFIG_IEEE80211_CRYPT_TKIP=m
429CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 433CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
430CONFIG_STANDALONE=y 434CONFIG_STANDALONE=y
431CONFIG_PREVENT_FIRMWARE_BUILD=y 435CONFIG_PREVENT_FIRMWARE_BUILD=y
432CONFIG_FW_LOADER=m 436CONFIG_FW_LOADER=y
437# CONFIG_FIRMWARE_IN_KERNEL is not set
438CONFIG_EXTRA_FIRMWARE=""
433# CONFIG_SYS_HYPERVISOR is not set 439# CONFIG_SYS_HYPERVISOR is not set
434CONFIG_CONNECTOR=m 440CONFIG_CONNECTOR=m
435# CONFIG_MTD is not set 441# CONFIG_MTD is not set
@@ -447,6 +453,7 @@ CONFIG_CDROM_PKTCDVD=m
447CONFIG_CDROM_PKTCDVD_BUFFERS=8 453CONFIG_CDROM_PKTCDVD_BUFFERS=8
448# CONFIG_CDROM_PKTCDVD_WCACHE is not set 454# CONFIG_CDROM_PKTCDVD_WCACHE is not set
449CONFIG_ATA_OVER_ETH=m 455CONFIG_ATA_OVER_ETH=m
456# CONFIG_BLK_DEV_HD is not set
450CONFIG_MISC_DEVICES=y 457CONFIG_MISC_DEVICES=y
451# CONFIG_EEPROM_93CX6 is not set 458# CONFIG_EEPROM_93CX6 is not set
452# CONFIG_ENCLOSURE_SERVICES is not set 459# CONFIG_ENCLOSURE_SERVICES is not set
@@ -499,6 +506,7 @@ CONFIG_SCSI_LOWLEVEL=y
499CONFIG_ISCSI_TCP=m 506CONFIG_ISCSI_TCP=m
500# CONFIG_SCSI_DEBUG is not set 507# CONFIG_SCSI_DEBUG is not set
501CONFIG_SUN3X_ESP=y 508CONFIG_SUN3X_ESP=y
509# CONFIG_SCSI_DH is not set
502CONFIG_MD=y 510CONFIG_MD=y
503CONFIG_BLK_DEV_MD=m 511CONFIG_BLK_DEV_MD=m
504CONFIG_MD_LINEAR=m 512CONFIG_MD_LINEAR=m
@@ -507,7 +515,7 @@ CONFIG_MD_RAID1=m
507# CONFIG_MD_RAID10 is not set 515# CONFIG_MD_RAID10 is not set
508CONFIG_MD_RAID456=m 516CONFIG_MD_RAID456=m
509CONFIG_MD_RAID5_RESHAPE=y 517CONFIG_MD_RAID5_RESHAPE=y
510CONFIG_MD_MULTIPATH=m 518# CONFIG_MD_MULTIPATH is not set
511# CONFIG_MD_FAULTY is not set 519# CONFIG_MD_FAULTY is not set
512CONFIG_BLK_DEV_DM=m 520CONFIG_BLK_DEV_DM=m
513# CONFIG_DM_DEBUG is not set 521# CONFIG_DM_DEBUG is not set
@@ -516,13 +524,9 @@ CONFIG_DM_SNAPSHOT=m
516CONFIG_DM_MIRROR=m 524CONFIG_DM_MIRROR=m
517CONFIG_DM_ZERO=m 525CONFIG_DM_ZERO=m
518CONFIG_DM_MULTIPATH=m 526CONFIG_DM_MULTIPATH=m
519CONFIG_DM_MULTIPATH_EMC=m
520CONFIG_DM_MULTIPATH_RDAC=m
521CONFIG_DM_MULTIPATH_HP=m
522# CONFIG_DM_DELAY is not set 527# CONFIG_DM_DELAY is not set
523CONFIG_DM_UEVENT=y 528CONFIG_DM_UEVENT=y
524CONFIG_NETDEVICES=y 529CONFIG_NETDEVICES=y
525# CONFIG_NETDEVICES_MULTIQUEUE is not set
526CONFIG_DUMMY=m 530CONFIG_DUMMY=m
527# CONFIG_BONDING is not set 531# CONFIG_BONDING is not set
528CONFIG_MACVLAN=m 532CONFIG_MACVLAN=m
@@ -627,6 +631,7 @@ CONFIG_SERIO_LIBPS2=m
627# Character devices 631# Character devices
628# 632#
629CONFIG_VT=y 633CONFIG_VT=y
634CONFIG_CONSOLE_TRANSLATIONS=y
630CONFIG_VT_CONSOLE=y 635CONFIG_VT_CONSOLE=y
631CONFIG_HW_CONSOLE=y 636CONFIG_HW_CONSOLE=y
632CONFIG_VT_HW_CONSOLE_BINDING=y 637CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -657,6 +662,7 @@ CONFIG_GEN_RTC_X=y
657# CONFIG_POWER_SUPPLY is not set 662# CONFIG_POWER_SUPPLY is not set
658# CONFIG_HWMON is not set 663# CONFIG_HWMON is not set
659# CONFIG_THERMAL is not set 664# CONFIG_THERMAL is not set
665# CONFIG_THERMAL_HWMON is not set
660# CONFIG_WATCHDOG is not set 666# CONFIG_WATCHDOG is not set
661 667
662# 668#
@@ -668,8 +674,10 @@ CONFIG_SSB_POSSIBLE=y
668# 674#
669# Multifunction device drivers 675# Multifunction device drivers
670# 676#
677# CONFIG_MFD_CORE is not set
671# CONFIG_MFD_SM501 is not set 678# CONFIG_MFD_SM501 is not set
672# CONFIG_HTC_PASIC3 is not set 679# CONFIG_HTC_PASIC3 is not set
680# CONFIG_MFD_TMIO is not set
673 681
674# 682#
675# Multimedia devices 683# Multimedia devices
@@ -737,10 +745,6 @@ CONFIG_LOGO=y
737CONFIG_LOGO_LINUX_MONO=y 745CONFIG_LOGO_LINUX_MONO=y
738CONFIG_LOGO_LINUX_VGA16=y 746CONFIG_LOGO_LINUX_VGA16=y
739CONFIG_LOGO_LINUX_CLUT224=y 747CONFIG_LOGO_LINUX_CLUT224=y
740
741#
742# Sound
743#
744# CONFIG_SOUND is not set 748# CONFIG_SOUND is not set
745CONFIG_HID_SUPPORT=y 749CONFIG_HID_SUPPORT=y
746CONFIG_HID=m 750CONFIG_HID=m
@@ -752,6 +756,7 @@ CONFIG_HIDRAW=y
752# CONFIG_NEW_LEDS is not set 756# CONFIG_NEW_LEDS is not set
753# CONFIG_ACCESSIBILITY is not set 757# CONFIG_ACCESSIBILITY is not set
754# CONFIG_RTC_CLASS is not set 758# CONFIG_RTC_CLASS is not set
759# CONFIG_DMADEVICES is not set
755# CONFIG_UIO is not set 760# CONFIG_UIO is not set
756 761
757# 762#
@@ -786,6 +791,7 @@ CONFIG_XFS_FS=m
786CONFIG_OCFS2_FS=m 791CONFIG_OCFS2_FS=m
787CONFIG_OCFS2_FS_O2CB=m 792CONFIG_OCFS2_FS_O2CB=m
788CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m 793CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
794# CONFIG_OCFS2_FS_STATS is not set
789# CONFIG_OCFS2_DEBUG_MASKLOG is not set 795# CONFIG_OCFS2_DEBUG_MASKLOG is not set
790# CONFIG_OCFS2_DEBUG_FS is not set 796# CONFIG_OCFS2_DEBUG_FS is not set
791CONFIG_DNOTIFY=y 797CONFIG_DNOTIFY=y
@@ -845,6 +851,7 @@ CONFIG_HFSPLUS_FS=m
845CONFIG_CRAMFS=m 851CONFIG_CRAMFS=m
846# CONFIG_VXFS_FS is not set 852# CONFIG_VXFS_FS is not set
847CONFIG_MINIX_FS=y 853CONFIG_MINIX_FS=y
854# CONFIG_OMFS_FS is not set
848CONFIG_HPFS_FS=m 855CONFIG_HPFS_FS=m
849# CONFIG_QNX4FS_FS is not set 856# CONFIG_QNX4FS_FS is not set
850# CONFIG_ROMFS_FS is not set 857# CONFIG_ROMFS_FS is not set
@@ -857,18 +864,17 @@ CONFIG_NFS_FS=y
857CONFIG_NFS_V3=y 864CONFIG_NFS_V3=y
858# CONFIG_NFS_V3_ACL is not set 865# CONFIG_NFS_V3_ACL is not set
859CONFIG_NFS_V4=y 866CONFIG_NFS_V4=y
867CONFIG_ROOT_NFS=y
860CONFIG_NFSD=m 868CONFIG_NFSD=m
861CONFIG_NFSD_V3=y 869CONFIG_NFSD_V3=y
862# CONFIG_NFSD_V3_ACL is not set 870# CONFIG_NFSD_V3_ACL is not set
863# CONFIG_NFSD_V4 is not set 871# CONFIG_NFSD_V4 is not set
864CONFIG_ROOT_NFS=y
865CONFIG_LOCKD=y 872CONFIG_LOCKD=y
866CONFIG_LOCKD_V4=y 873CONFIG_LOCKD_V4=y
867CONFIG_EXPORTFS=m 874CONFIG_EXPORTFS=m
868CONFIG_NFS_COMMON=y 875CONFIG_NFS_COMMON=y
869CONFIG_SUNRPC=y 876CONFIG_SUNRPC=y
870CONFIG_SUNRPC_GSS=y 877CONFIG_SUNRPC_GSS=y
871CONFIG_SUNRPC_BIND34=y
872CONFIG_RPCSEC_GSS_KRB5=y 878CONFIG_RPCSEC_GSS_KRB5=y
873# CONFIG_RPCSEC_GSS_SPKM3 is not set 879# CONFIG_RPCSEC_GSS_SPKM3 is not set
874CONFIG_SMB_FS=m 880CONFIG_SMB_FS=m
@@ -877,7 +883,6 @@ CONFIG_SMB_NLS_REMOTE="cp437"
877# CONFIG_CIFS is not set 883# CONFIG_CIFS is not set
878# CONFIG_NCP_FS is not set 884# CONFIG_NCP_FS is not set
879CONFIG_CODA_FS=m 885CONFIG_CODA_FS=m
880# CONFIG_CODA_FS_OLD_API is not set
881# CONFIG_AFS_FS is not set 886# CONFIG_AFS_FS is not set
882 887
883# 888#
@@ -942,6 +947,8 @@ CONFIG_MAGIC_SYSRQ=y
942# CONFIG_HEADERS_CHECK is not set 947# CONFIG_HEADERS_CHECK is not set
943# CONFIG_DEBUG_KERNEL is not set 948# CONFIG_DEBUG_KERNEL is not set
944CONFIG_DEBUG_BUGVERBOSE=y 949CONFIG_DEBUG_BUGVERBOSE=y
950CONFIG_DEBUG_MEMORY_INIT=y
951CONFIG_SYSCTL_SYSCALL_CHECK=y
945# CONFIG_SAMPLES is not set 952# CONFIG_SAMPLES is not set
946 953
947# 954#
@@ -1001,6 +1008,10 @@ CONFIG_CRYPTO_CRC32C=m
1001CONFIG_CRYPTO_MD4=m 1008CONFIG_CRYPTO_MD4=m
1002CONFIG_CRYPTO_MD5=y 1009CONFIG_CRYPTO_MD5=y
1003CONFIG_CRYPTO_MICHAEL_MIC=m 1010CONFIG_CRYPTO_MICHAEL_MIC=m
1011CONFIG_CRYPTO_RMD128=m
1012CONFIG_CRYPTO_RMD160=m
1013CONFIG_CRYPTO_RMD256=m
1014CONFIG_CRYPTO_RMD320=m
1004CONFIG_CRYPTO_SHA1=m 1015CONFIG_CRYPTO_SHA1=m
1005CONFIG_CRYPTO_SHA256=m 1016CONFIG_CRYPTO_SHA256=m
1006CONFIG_CRYPTO_SHA512=m 1017CONFIG_CRYPTO_SHA512=m
@@ -1042,6 +1053,7 @@ CONFIG_BITREVERSE=y
1042# CONFIG_GENERIC_FIND_NEXT_BIT is not set 1053# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1043CONFIG_CRC_CCITT=m 1054CONFIG_CRC_CCITT=m
1044CONFIG_CRC16=m 1055CONFIG_CRC16=m
1056CONFIG_CRC_T10DIF=y
1045CONFIG_CRC_ITU_T=m 1057CONFIG_CRC_ITU_T=m
1046CONFIG_CRC32=y 1058CONFIG_CRC32=y
1047# CONFIG_CRC7 is not set 1059# CONFIG_CRC7 is not set
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 49896a2a1d72..1e06d233fa83 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -211,6 +211,7 @@ config MIPS_MALTA
211 select SYS_SUPPORTS_64BIT_KERNEL 211 select SYS_SUPPORTS_64BIT_KERNEL
212 select SYS_SUPPORTS_BIG_ENDIAN 212 select SYS_SUPPORTS_BIG_ENDIAN
213 select SYS_SUPPORTS_LITTLE_ENDIAN 213 select SYS_SUPPORTS_LITTLE_ENDIAN
214 select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken
214 select SYS_SUPPORTS_MULTITHREADING 215 select SYS_SUPPORTS_MULTITHREADING
215 select SYS_SUPPORTS_SMARTMIPS 216 select SYS_SUPPORTS_SMARTMIPS
216 help 217 help
@@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC
1403 depends on CPU_MIPS32_R2 1404 depends on CPU_MIPS32_R2
1404 #depends on CPU_MIPS64_R2 # once there is hardware ... 1405 #depends on CPU_MIPS64_R2 # once there is hardware ...
1405 depends on SYS_SUPPORTS_MULTITHREADING 1406 depends on SYS_SUPPORTS_MULTITHREADING
1406 select GENERIC_CLOCKEVENTS_BROADCAST
1407 select CPU_MIPSR2_IRQ_VI 1407 select CPU_MIPSR2_IRQ_VI
1408 select CPU_MIPSR2_IRQ_EI 1408 select CPU_MIPSR2_IRQ_EI
1409 select MIPS_MT 1409 select MIPS_MT
@@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER
1451 Includes a loader for loading an elf relocatable object 1451 Includes a loader for loading an elf relocatable object
1452 onto another VPE and running it. 1452 onto another VPE and running it.
1453 1453
1454config MIPS_MT_SMTC_INSTANT_REPLAY
1455 bool "Low-latency Dispatch of Deferred SMTC IPIs"
1456 depends on MIPS_MT_SMTC && !PREEMPT
1457 default y
1458 help
1459 SMTC pseudo-interrupts between TCs are deferred and queued
1460 if the target TC is interrupt-inhibited (IXMT). In the first
1461 SMTC prototypes, these queued IPIs were serviced on return
1462 to user mode, or on entry into the kernel idle loop. The
1463 INSTANT_REPLAY option dispatches them as part of local_irq_restore()
1464 processing, which adds runtime overhead (hence the option to turn
1465 it off), but ensures that IPIs are handled promptly even under
1466 heavy I/O interrupt load.
1467
1468config MIPS_MT_SMTC_IM_BACKSTOP 1454config MIPS_MT_SMTC_IM_BACKSTOP
1469 bool "Use per-TC register bits as backstop for inhibited IM bits" 1455 bool "Use per-TC register bits as backstop for inhibited IM bits"
1470 depends on MIPS_MT_SMTC 1456 depends on MIPS_MT_SMTC
1471 default y 1457 default n
1472 help 1458 help
1473 To support multiple TC microthreads acting as "CPUs" within 1459 To support multiple TC microthreads acting as "CPUs" within
1474 a VPE, VPE-wide interrupt mask bits must be specially manipulated 1460 a VPE, VPE-wide interrupt mask bits must be specially manipulated
1475 during interrupt handling. To support legacy drivers and interrupt 1461 during interrupt handling. To support legacy drivers and interrupt
1476 controller management code, SMTC has a "backstop" to track and 1462 controller management code, SMTC has a "backstop" to track and
1477 if necessary restore the interrupt mask. This has some performance 1463 if necessary restore the interrupt mask. This has some performance
1478 impact on interrupt service overhead. Disable it only if you know 1464 impact on interrupt service overhead.
1479 what you are doing.
1480 1465
1481config MIPS_MT_SMTC_IRQAFF 1466config MIPS_MT_SMTC_IRQAFF
1482 bool "Support IRQ affinity API" 1467 bool "Support IRQ affinity API"
@@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF
1486 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 1471 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
1487 for SMTC Linux kernel. Requires platform support, of which 1472 for SMTC Linux kernel. Requires platform support, of which
1488 an example can be found in the MIPS kernel i8259 and Malta 1473 an example can be found in the MIPS kernel i8259 and Malta
1489 platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY 1474 platform code. Adds some overhead to interrupt dispatch, and
1490 be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to 1475 should be used only if you know what you are doing.
1491 interrupt dispatch, and should be used only if you know what
1492 you are doing.
1493 1476
1494config MIPS_VPE_LOADER_TOM 1477config MIPS_VPE_LOADER_TOM
1495 bool "Load VPE program into memory hidden from linux" 1478 bool "Load VPE program into memory hidden from linux"
@@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD
1517 "exit" syscall notifying other kernel modules the SP program is 1500 "exit" syscall notifying other kernel modules the SP program is
1518 exiting. You probably want to say yes here. 1501 exiting. You probably want to say yes here.
1519 1502
1503config MIPS_CMP
1504 bool "MIPS CMP framework support"
1505 depends on SYS_SUPPORTS_MIPS_CMP
1506 select SYNC_R4K if BROKEN
1507 select SYS_SUPPORTS_SMP
1508 select SYS_SUPPORTS_SCHED_SMT if SMP
1509 select WEAK_ORDERING
1510 default n
1511 help
1512 This is a placeholder option for the GCMP work. It will need to
1513 be handled differently...
1514
1520config SB1_PASS_1_WORKAROUNDS 1515config SB1_PASS_1_WORKAROUNDS
1521 bool 1516 bool
1522 depends on CPU_SB1_PASS_1 1517 depends on CPU_SB1_PASS_1
@@ -1693,6 +1688,9 @@ config SMP
1693config SMP_UP 1688config SMP_UP
1694 bool 1689 bool
1695 1690
1691config SYS_SUPPORTS_MIPS_CMP
1692 bool
1693
1696config SYS_SUPPORTS_SMP 1694config SYS_SUPPORTS_SMP
1697 bool 1695 bool
1698 1696
@@ -1740,17 +1738,6 @@ config NR_CPUS
1740 performance should round up your number of processors to the next 1738 performance should round up your number of processors to the next
1741 power of two. 1739 power of two.
1742 1740
1743config MIPS_CMP
1744 bool "MIPS CMP framework support"
1745 depends on SMP
1746 select SYNC_R4K
1747 select SYS_SUPPORTS_SCHED_SMT
1748 select WEAK_ORDERING
1749 default n
1750 help
1751 This is a placeholder option for the GCMP work. It will need to
1752 be handled differently...
1753
1754source "kernel/time/Kconfig" 1741source "kernel/time/Kconfig"
1755 1742
1756# 1743#
diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c
index b485d94ce8a5..e660ddd611c4 100644
--- a/arch/mips/au1000/common/gpio.c
+++ b/arch/mips/au1000/common/gpio.c
@@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value)
48{ 48{
49 gpio -= AU1XXX_GPIO_BASE; 49 gpio -= AU1XXX_GPIO_BASE;
50 50
51 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); 51 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
52} 52}
53 53
54static int au1xxx_gpio2_direction_input(unsigned gpio) 54static int au1xxx_gpio2_direction_input(unsigned gpio)
@@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio)
61static int au1xxx_gpio2_direction_output(unsigned gpio, int value) 61static int au1xxx_gpio2_direction_output(unsigned gpio, int value)
62{ 62{
63 gpio -= AU1XXX_GPIO_BASE; 63 gpio -= AU1XXX_GPIO_BASE;
64 gpio2->dir = (0x01 << gpio) | (value << gpio); 64 gpio2->dir |= 0x01 << gpio;
65 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
65 return 0; 66 return 0;
66} 67}
67 68
@@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio)
90static int au1xxx_gpio1_direction_output(unsigned gpio, int value) 91static int au1xxx_gpio1_direction_output(unsigned gpio, int value)
91{ 92{
92 gpio1->trioutclr = (0x01 & gpio); 93 gpio1->trioutclr = (0x01 & gpio);
94 au1xxx_gpio1_write(gpio, value);
93 return 0; 95 return 0;
94} 96}
95 97
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 706f93974797..25775cb54000 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
13obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
13obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 14obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
14obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 15obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
15obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 16obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0d..4a4c59f2737a 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
12 12
13#include <asm/smtc_ipi.h> 13#include <asm/smtc_ipi.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/cevt-r4k.h>
16
17/*
18 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
19 * of these routines with SMTC-specific variants.
20 */
21
22#ifndef CONFIG_MIPS_MT_SMTC
15 23
16static int mips_next_event(unsigned long delta, 24static int mips_next_event(unsigned long delta,
17 struct clock_event_device *evt) 25 struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
19 unsigned int cnt; 27 unsigned int cnt;
20 int res; 28 int res;
21 29
22#ifdef CONFIG_MIPS_MT_SMTC
23 {
24 unsigned long flags, vpflags;
25 local_irq_save(flags);
26 vpflags = dvpe();
27#endif
28 cnt = read_c0_count(); 30 cnt = read_c0_count();
29 cnt += delta; 31 cnt += delta;
30 write_c0_compare(cnt); 32 write_c0_compare(cnt);
31 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 33 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
32#ifdef CONFIG_MIPS_MT_SMTC
33 evpe(vpflags);
34 local_irq_restore(flags);
35 }
36#endif
37 return res; 34 return res;
38} 35}
39 36
40static void mips_set_mode(enum clock_event_mode mode, 37#endif /* CONFIG_MIPS_MT_SMTC */
41 struct clock_event_device *evt) 38
39void mips_set_clock_mode(enum clock_event_mode mode,
40 struct clock_event_device *evt)
42{ 41{
43 /* Nothing to do ... */ 42 /* Nothing to do ... */
44} 43}
45 44
46static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 45DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
47static int cp0_timer_irq_installed; 46int cp0_timer_irq_installed;
48 47
49/* 48#ifndef CONFIG_MIPS_MT_SMTC
50 * Timer ack for an R4k-compatible timer of a known frequency.
51 */
52static void c0_timer_ack(void)
53{
54 write_c0_compare(read_c0_compare());
55}
56 49
57/* 50irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
58 * Possibly handle a performance counter interrupt.
59 * Return true if the timer interrupt should not be checked
60 */
61static inline int handle_perf_irq(int r2)
62{
63 /*
64 * The performance counter overflow interrupt may be shared with the
65 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
66 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
67 * and we can't reliably determine if a counter interrupt has also
68 * happened (!r2) then don't check for a timer interrupt.
69 */
70 return (cp0_perfcount_irq < 0) &&
71 perf_irq() == IRQ_HANDLED &&
72 !r2;
73}
74
75static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
76{ 51{
77 const int r2 = cpu_has_mips_r2; 52 const int r2 = cpu_has_mips_r2;
78 struct clock_event_device *cd; 53 struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
93 * interrupt. Being the paranoiacs we are we check anyway. 68 * interrupt. Being the paranoiacs we are we check anyway.
94 */ 69 */
95 if (!r2 || (read_c0_cause() & (1 << 30))) { 70 if (!r2 || (read_c0_cause() & (1 << 30))) {
96 c0_timer_ack(); 71 /* Clear Count/Compare Interrupt */
97#ifdef CONFIG_MIPS_MT_SMTC 72 write_c0_compare(read_c0_compare());
98 if (cpu_data[cpu].vpe_id)
99 goto out;
100 cpu = 0;
101#endif
102 cd = &per_cpu(mips_clockevent_device, cpu); 73 cd = &per_cpu(mips_clockevent_device, cpu);
103 cd->event_handler(cd); 74 cd->event_handler(cd);
104 } 75 }
@@ -107,65 +78,16 @@ out:
107 return IRQ_HANDLED; 78 return IRQ_HANDLED;
108} 79}
109 80
110static struct irqaction c0_compare_irqaction = { 81#endif /* Not CONFIG_MIPS_MT_SMTC */
82
83struct irqaction c0_compare_irqaction = {
111 .handler = c0_compare_interrupt, 84 .handler = c0_compare_interrupt,
112#ifdef CONFIG_MIPS_MT_SMTC
113 .flags = IRQF_DISABLED,
114#else
115 .flags = IRQF_DISABLED | IRQF_PERCPU, 85 .flags = IRQF_DISABLED | IRQF_PERCPU,
116#endif
117 .name = "timer", 86 .name = "timer",
118}; 87};
119 88
120#ifdef CONFIG_MIPS_MT_SMTC
121DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
122
123static void smtc_set_mode(enum clock_event_mode mode,
124 struct clock_event_device *evt)
125{
126}
127
128static void mips_broadcast(cpumask_t mask)
129{
130 unsigned int cpu;
131
132 for_each_cpu_mask(cpu, mask)
133 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
134}
135
136static void setup_smtc_dummy_clockevent_device(void)
137{
138 //uint64_t mips_freq = mips_hpt_^frequency;
139 unsigned int cpu = smp_processor_id();
140 struct clock_event_device *cd;
141 89
142 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 90void mips_event_handler(struct clock_event_device *dev)
143
144 cd->name = "SMTC";
145 cd->features = CLOCK_EVT_FEAT_DUMMY;
146
147 /* Calculate the min / max delta */
148 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
149 cd->shift = 0; //32;
150 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
151 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
152
153 cd->rating = 200;
154 cd->irq = 17; //-1;
155// if (cpu)
156// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
157// else
158 cd->cpumask = cpumask_of_cpu(cpu);
159
160 cd->set_mode = smtc_set_mode;
161
162 cd->broadcast = mips_broadcast;
163
164 clockevents_register_device(cd);
165}
166#endif
167
168static void mips_event_handler(struct clock_event_device *dev)
169{ 91{
170} 92}
171 93
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
177 return (read_c0_cause() >> cp0_compare_irq) & 0x100; 99 return (read_c0_cause() >> cp0_compare_irq) & 0x100;
178} 100}
179 101
180static int c0_compare_int_usable(void) 102/*
103 * Compare interrupt can be routed and latched outside the core,
104 * so a single execution hazard barrier may not be enough to give
105 * it time to clear as seen in the Cause register. 4 time the
106 * pipeline depth seems reasonably conservative, and empirically
107 * works better in configurations with high CPU/bus clock ratios.
108 */
109
110#define compare_change_hazard() \
111 do { \
112 irq_disable_hazard(); \
113 irq_disable_hazard(); \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 } while (0)
117
118int c0_compare_int_usable(void)
181{ 119{
182 unsigned int delta; 120 unsigned int delta;
183 unsigned int cnt; 121 unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
187 */ 125 */
188 if (c0_compare_int_pending()) { 126 if (c0_compare_int_pending()) {
189 write_c0_compare(read_c0_count()); 127 write_c0_compare(read_c0_count());
190 irq_disable_hazard(); 128 compare_change_hazard();
191 if (c0_compare_int_pending()) 129 if (c0_compare_int_pending())
192 return 0; 130 return 0;
193 } 131 }
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
196 cnt = read_c0_count(); 134 cnt = read_c0_count();
197 cnt += delta; 135 cnt += delta;
198 write_c0_compare(cnt); 136 write_c0_compare(cnt);
199 irq_disable_hazard(); 137 compare_change_hazard();
200 if ((int)(read_c0_count() - cnt) < 0) 138 if ((int)(read_c0_count() - cnt) < 0)
201 break; 139 break;
202 /* increase delta if the timer was already expired */ 140 /* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
205 while ((int)(read_c0_count() - cnt) <= 0) 143 while ((int)(read_c0_count() - cnt) <= 0)
206 ; /* Wait for expiry */ 144 ; /* Wait for expiry */
207 145
146 compare_change_hazard();
208 if (!c0_compare_int_pending()) 147 if (!c0_compare_int_pending())
209 return 0; 148 return 0;
210 149
211 write_c0_compare(read_c0_count()); 150 write_c0_compare(read_c0_count());
212 irq_disable_hazard(); 151 compare_change_hazard();
213 if (c0_compare_int_pending()) 152 if (c0_compare_int_pending())
214 return 0; 153 return 0;
215 154
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
219 return 1; 158 return 1;
220} 159}
221 160
161#ifndef CONFIG_MIPS_MT_SMTC
162
222int __cpuinit mips_clockevent_init(void) 163int __cpuinit mips_clockevent_init(void)
223{ 164{
224 uint64_t mips_freq = mips_hpt_frequency; 165 uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
229 if (!cpu_has_counter || !mips_hpt_frequency) 170 if (!cpu_has_counter || !mips_hpt_frequency)
230 return -ENXIO; 171 return -ENXIO;
231 172
232#ifdef CONFIG_MIPS_MT_SMTC
233 setup_smtc_dummy_clockevent_device();
234
235 /*
236 * On SMTC we only register VPE0's compare interrupt as clockevent
237 * device.
238 */
239 if (cpu)
240 return 0;
241#endif
242
243 if (!c0_compare_int_usable()) 173 if (!c0_compare_int_usable())
244 return -ENXIO; 174 return -ENXIO;
245 175
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
265 195
266 cd->rating = 300; 196 cd->rating = 300;
267 cd->irq = irq; 197 cd->irq = irq;
268#ifdef CONFIG_MIPS_MT_SMTC
269 cd->cpumask = CPU_MASK_ALL;
270#else
271 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of_cpu(cpu);
272#endif
273 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
274 cd->set_mode = mips_set_mode; 200 cd->set_mode = mips_set_clock_mode;
275 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
276 202
277 clockevents_register_device(cd); 203 clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
281 207
282 cp0_timer_irq_installed = 1; 208 cp0_timer_irq_installed = 1;
283 209
284#ifdef CONFIG_MIPS_MT_SMTC
285#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
286 setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
287#else
288 setup_irq(irq, &c0_compare_irqaction); 210 setup_irq(irq, &c0_compare_irqaction);
289#endif
290 211
291 return 0; 212 return 0;
292} 213}
214
215#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
new file mode 100644
index 000000000000..5162fe4b5952
--- /dev/null
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -0,0 +1,321 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10#include <linux/clockchips.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13
14#include <asm/smtc_ipi.h>
15#include <asm/time.h>
16#include <asm/cevt-r4k.h>
17
18/*
19 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
20 * or other MIPS MT cores.
21 *
22 * Notes on SMTC Support:
23 *
24 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
25 * But there's only one Count/Compare pair per VPE, and Compare
26 * interrupts are taken opportunisitically by available TCs
27 * bound to the VPE with the Count register. The new timer
28 * framework provides for global broadcasts, but we really
29 * want VPE-level multicasts for best behavior. So instead
30 * of invoking the high-level clock-event broadcast code,
31 * this version of SMTC support uses the historical SMTC
32 * multicast mechanisms "under the hood", appearing to the
33 * generic clock layer as if the interrupts are per-CPU.
34 *
35 * The approach taken here is to maintain a set of NR_CPUS
36 * virtual timers, and track which "CPU" needs to be alerted
37 * at each event.
38 *
39 * It's unlikely that we'll see a MIPS MT core with more than
40 * 2 VPEs, but we *know* that we won't need to handle more
41 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
42 * is always going to be overkill, but always going to be enough.
43 */
44
45unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
46static int smtc_nextinvpe[NR_CPUS];
47
48/*
49 * Timestamps stored are absolute values to be programmed
50 * into Count register. Valid timestamps will never be zero.
51 * If a Zero Count value is actually calculated, it is converted
52 * to be a 1, which will introduce 1 or two CPU cycles of error
53 * roughly once every four billion events, which at 1000 HZ means
54 * about once every 50 days. If that's actually a problem, one
55 * could alternate squashing 0 to 1 and to -1.
56 */
57
58#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
59#define ISVALID(x) ((x) != 0L)
60
61/*
62 * Time comparison is subtle, as it's really truncated
63 * modular arithmetic.
64 */
65
66#define IS_SOONER(a, b, reference) \
67 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
68
69/*
70 * CATCHUP_INCREMENT, used when the function falls behind the counter.
71 * Could be an increasing function instead of a constant;
72 */
73
74#define CATCHUP_INCREMENT 64
75
76static int mips_next_event(unsigned long delta,
77 struct clock_event_device *evt)
78{
79 unsigned long flags;
80 unsigned int mtflags;
81 unsigned long timestamp, reference, previous;
82 unsigned long nextcomp = 0L;
83 int vpe = current_cpu_data.vpe_id;
84 int cpu = smp_processor_id();
85 local_irq_save(flags);
86 mtflags = dmt();
87
88 /*
89 * Maintain the per-TC virtual timer
90 * and program the per-VPE shared Count register
91 * as appropriate here...
92 */
93 reference = (unsigned long)read_c0_count();
94 timestamp = MAKEVALID(reference + delta);
95 /*
96 * To really model the clock, we have to catch the case
97 * where the current next-in-VPE timestamp is the old
98 * timestamp for the calling CPE, but the new value is
99 * in fact later. In that case, we have to do a full
100 * scan and discover the new next-in-VPE CPU id and
101 * timestamp.
102 */
103 previous = smtc_nexttime[vpe][cpu];
104 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
105 && IS_SOONER(previous, timestamp, reference)) {
106 int i;
107 int soonest = cpu;
108
109 /*
110 * Update timestamp array here, so that new
111 * value gets considered along with those of
112 * other virtual CPUs on the VPE.
113 */
114 smtc_nexttime[vpe][cpu] = timestamp;
115 for_each_online_cpu(i) {
116 if (ISVALID(smtc_nexttime[vpe][i])
117 && IS_SOONER(smtc_nexttime[vpe][i],
118 smtc_nexttime[vpe][soonest], reference)) {
119 soonest = i;
120 }
121 }
122 smtc_nextinvpe[vpe] = soonest;
123 nextcomp = smtc_nexttime[vpe][soonest];
124 /*
125 * Otherwise, we don't have to process the whole array rank,
126 * we just have to see if the event horizon has gotten closer.
127 */
128 } else {
129 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
130 IS_SOONER(timestamp,
131 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
132 smtc_nextinvpe[vpe] = cpu;
133 nextcomp = timestamp;
134 }
135 /*
136 * Since next-in-VPE may me the same as the executing
137 * virtual CPU, we update the array *after* checking
138 * its value.
139 */
140 smtc_nexttime[vpe][cpu] = timestamp;
141 }
142
143 /*
144 * It may be that, in fact, we don't need to update Compare,
145 * but if we do, we want to make sure we didn't fall into
146 * a crack just behind Count.
147 */
148 if (ISVALID(nextcomp)) {
149 write_c0_compare(nextcomp);
150 ehb();
151 /*
152 * We never return an error, we just make sure
153 * that we trigger the handlers as quickly as
154 * we can if we fell behind.
155 */
156 while ((nextcomp - (unsigned long)read_c0_count())
157 > (unsigned long)LONG_MAX) {
158 nextcomp += CATCHUP_INCREMENT;
159 write_c0_compare(nextcomp);
160 ehb();
161 }
162 }
163 emt(mtflags);
164 local_irq_restore(flags);
165 return 0;
166}
167
168
169void smtc_distribute_timer(int vpe)
170{
171 unsigned long flags;
172 unsigned int mtflags;
173 int cpu;
174 struct clock_event_device *cd;
175 unsigned long nextstamp = 0L;
176 unsigned long reference;
177
178
179repeat:
180 for_each_online_cpu(cpu) {
181 /*
182 * Find virtual CPUs within the current VPE who have
183 * unserviced timer requests whose time is now past.
184 */
185 local_irq_save(flags);
186 mtflags = dmt();
187 if (cpu_data[cpu].vpe_id == vpe &&
188 ISVALID(smtc_nexttime[vpe][cpu])) {
189 reference = (unsigned long)read_c0_count();
190 if ((smtc_nexttime[vpe][cpu] - reference)
191 > (unsigned long)LONG_MAX) {
192 smtc_nexttime[vpe][cpu] = 0L;
193 emt(mtflags);
194 local_irq_restore(flags);
195 /*
196 * We don't send IPIs to ourself.
197 */
198 if (cpu != smp_processor_id()) {
199 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
200 } else {
201 cd = &per_cpu(mips_clockevent_device, cpu);
202 cd->event_handler(cd);
203 }
204 } else {
205 /* Local to VPE but Valid Time not yet reached. */
206 if (!ISVALID(nextstamp) ||
207 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
208 reference)) {
209 smtc_nextinvpe[vpe] = cpu;
210 nextstamp = smtc_nexttime[vpe][cpu];
211 }
212 emt(mtflags);
213 local_irq_restore(flags);
214 }
215 } else {
216 emt(mtflags);
217 local_irq_restore(flags);
218
219 }
220 }
221 /* Reprogram for interrupt at next soonest timestamp for VPE */
222 if (ISVALID(nextstamp)) {
223 write_c0_compare(nextstamp);
224 ehb();
225 if ((nextstamp - (unsigned long)read_c0_count())
226 > (unsigned long)LONG_MAX)
227 goto repeat;
228 }
229}
230
231
232irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
233{
234 int cpu = smp_processor_id();
235
236 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
237 handle_perf_irq(1);
238
239 if (read_c0_cause() & (1 << 30)) {
240 /* Clear Count/Compare Interrupt */
241 write_c0_compare(read_c0_compare());
242 smtc_distribute_timer(cpu_data[cpu].vpe_id);
243 }
244 return IRQ_HANDLED;
245}
246
247
248int __cpuinit mips_clockevent_init(void)
249{
250 uint64_t mips_freq = mips_hpt_frequency;
251 unsigned int cpu = smp_processor_id();
252 struct clock_event_device *cd;
253 unsigned int irq;
254 int i;
255 int j;
256
257 if (!cpu_has_counter || !mips_hpt_frequency)
258 return -ENXIO;
259 if (cpu == 0) {
260 for (i = 0; i < num_possible_cpus(); i++) {
261 smtc_nextinvpe[i] = 0;
262 for (j = 0; j < num_possible_cpus(); j++)
263 smtc_nexttime[i][j] = 0L;
264 }
265 /*
266 * SMTC also can't have the usablility test
267 * run by secondary TCs once Compare is in use.
268 */
269 if (!c0_compare_int_usable())
270 return -ENXIO;
271 }
272
273 /*
274 * With vectored interrupts things are getting platform specific.
275 * get_c0_compare_int is a hook to allow a platform to return the
276 * interrupt number of it's liking.
277 */
278 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
279 if (get_c0_compare_int)
280 irq = get_c0_compare_int();
281
282 cd = &per_cpu(mips_clockevent_device, cpu);
283
284 cd->name = "MIPS";
285 cd->features = CLOCK_EVT_FEAT_ONESHOT;
286
287 /* Calculate the min / max delta */
288 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
289 cd->shift = 32;
290 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
291 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
292
293 cd->rating = 300;
294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu);
296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler;
299
300 clockevents_register_device(cd);
301
302 /*
303 * On SMTC we only want to do the data structure
304 * initialization and IRQ setup once.
305 */
306 if (cpu)
307 return 0;
308 /*
309 * And we need the hwmask associated with the c0_compare
310 * vector to be initialized.
311 */
312 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
313 if (cp0_timer_irq_installed)
314 return 0;
315
316 cp0_timer_irq_installed = 1;
317
318 setup_irq(irq, &c0_compare_irqaction);
319
320 return 0;
321}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 335a6ae3d594..e621fda8ab37 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -45,18 +45,7 @@ static void r39xx_wait(void)
45 local_irq_enable(); 45 local_irq_enable();
46} 46}
47 47
48/* 48extern void r4k_wait(void);
49 * There is a race when WAIT instruction executed with interrupt
50 * enabled.
51 * But it is implementation-dependent wheter the pipelie restarts when
52 * a non-enabled interrupt is requested.
53 */
54static void r4k_wait(void)
55{
56 __asm__(" .set mips3 \n"
57 " wait \n"
58 " .set mips0 \n");
59}
60 49
61/* 50/*
62 * This variant is preferable as it allows testing need_resched and going to 51 * This variant is preferable as it allows testing need_resched and going to
@@ -65,14 +54,18 @@ static void r4k_wait(void)
65 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes 54 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
66 * using this version a gamble. 55 * using this version a gamble.
67 */ 56 */
68static void r4k_wait_irqoff(void) 57void r4k_wait_irqoff(void)
69{ 58{
70 local_irq_disable(); 59 local_irq_disable();
71 if (!need_resched()) 60 if (!need_resched())
72 __asm__(" .set mips3 \n" 61 __asm__(" .set push \n"
62 " .set mips3 \n"
73 " wait \n" 63 " wait \n"
74 " .set mips0 \n"); 64 " .set pop \n");
75 local_irq_enable(); 65 local_irq_enable();
66 __asm__(" .globl __pastwait \n"
67 "__pastwait: \n");
68 return;
76} 69}
77 70
78/* 71/*
@@ -128,7 +121,7 @@ static int __init wait_disable(char *s)
128 121
129__setup("nowait", wait_disable); 122__setup("nowait", wait_disable);
130 123
131static inline void check_wait(void) 124void __init check_wait(void)
132{ 125{
133 struct cpuinfo_mips *c = &current_cpu_data; 126 struct cpuinfo_mips *c = &current_cpu_data;
134 127
@@ -242,7 +235,6 @@ static inline void check_errata(void)
242 235
243void __init check_bugs32(void) 236void __init check_bugs32(void)
244{ 237{
245 check_wait();
246 check_errata(); 238 check_errata();
247} 239}
248 240
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939d..ffa331029e08 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
79 79
80FEXPORT(restore_all) # restore full frame 80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC 81#ifdef CONFIG_MIPS_MT_SMTC
82/* Detect and execute deferred IPI "interrupts" */
83 LONG_L s0, TI_REGS($28)
84 LONG_S sp, TI_REGS($28)
85 jal deferred_smtc_ipi
86 LONG_S s0, TI_REGS($28)
87#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 82#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
88/* Re-arm any temporarily masked interrupts not explicitly "acked" */ 83/* Re-arm any temporarily masked interrupts not explicitly "acked" */
89 mfc0 v0, CP0_TCSTATUS 84 mfc0 v0, CP0_TCSTATUS
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
112 xor t0, t0, t3 107 xor t0, t0, t3
113 mtc0 t0, CP0_TCCONTEXT 108 mtc0 t0, CP0_TCCONTEXT
114#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 109#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
110/* Detect and execute deferred IPI "interrupts" */
111 LONG_L s0, TI_REGS($28)
112 LONG_S sp, TI_REGS($28)
113 jal deferred_smtc_ipi
114 LONG_S s0, TI_REGS($28)
115#endif /* CONFIG_MIPS_MT_SMTC */ 115#endif /* CONFIG_MIPS_MT_SMTC */
116 .set noat 116 .set noat
117 RESTORE_TEMP 117 RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index c6ada98ee042..01dcbe38fa01 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -20,6 +20,7 @@
20#include <asm/stackframe.h> 20#include <asm/stackframe.h>
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/page.h> 22#include <asm/page.h>
23#include <asm/thread_info.h>
23 24
24#define PANIC_PIC(msg) \ 25#define PANIC_PIC(msg) \
25 .set push; \ 26 .set push; \
@@ -126,7 +127,42 @@ handle_vcei:
126 127
127 __FINIT 128 __FINIT
128 129
130 .align 5 /* 32 byte rollback region */
131LEAF(r4k_wait)
132 .set push
133 .set noreorder
134 /* start of rollback region */
135 LONG_L t0, TI_FLAGS($28)
136 nop
137 andi t0, _TIF_NEED_RESCHED
138 bnez t0, 1f
139 nop
140 nop
141 nop
142 .set mips3
143 wait
144 /* end of rollback region (the region size must be power of two) */
145 .set pop
1461:
147 jr ra
148 END(r4k_wait)
149
150 .macro BUILD_ROLLBACK_PROLOGUE handler
151 FEXPORT(rollback_\handler)
152 .set push
153 .set noat
154 MFC0 k0, CP0_EPC
155 PTR_LA k1, r4k_wait
156 ori k0, 0x1f /* 32 byte rollback region */
157 xori k0, 0x1f
158 bne k0, k1, 9f
159 MTC0 k0, CP0_EPC
1609:
161 .set pop
162 .endm
163
129 .align 5 164 .align 5
165BUILD_ROLLBACK_PROLOGUE handle_int
130NESTED(handle_int, PT_SIZE, sp) 166NESTED(handle_int, PT_SIZE, sp)
131#ifdef CONFIG_TRACE_IRQFLAGS 167#ifdef CONFIG_TRACE_IRQFLAGS
132 /* 168 /*
@@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
201 * This prototype is copied to ebase + n*IntCtl.VS and patched 237 * This prototype is copied to ebase + n*IntCtl.VS and patched
202 * to invoke the handler 238 * to invoke the handler
203 */ 239 */
240BUILD_ROLLBACK_PROLOGUE except_vec_vi
204NESTED(except_vec_vi, 0, sp) 241NESTED(except_vec_vi, 0, sp)
205 SAVE_SOME 242 SAVE_SOME
206 SAVE_AT 243 SAVE_AT
@@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
245 and t0, a0, t1 282 and t0, a0, t1
246#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 283#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
247 mfc0 t2, CP0_TCCONTEXT 284 mfc0 t2, CP0_TCCONTEXT
248 or t0, t0, t2 285 or t2, t0, t2
249 mtc0 t0, CP0_TCCONTEXT 286 mtc0 t2, CP0_TCCONTEXT
250#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 287#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
251 xor t1, t1, t0 288 xor t1, t1, t0
252 mtc0 t1, CP0_STATUS 289 mtc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 361364501d34..492a0a8d70fb 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -22,6 +22,7 @@
22#include <asm/irqflags.h> 22#include <asm/irqflags.h>
23#include <asm/regdef.h> 23#include <asm/regdef.h>
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/pgtable-bits.h>
25#include <asm/mipsregs.h> 26#include <asm/mipsregs.h>
26#include <asm/stackframe.h> 27#include <asm/stackframe.h>
27 28
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 8f6d58ede33c..6e152c80cd4a 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
236 236
237 atomic_set(&kgdb_cpu_doing_single_step, -1); 237 atomic_set(&kgdb_cpu_doing_single_step, -1);
238 if (remcom_in_buffer[0] == 's') 238 if (remcom_in_buffer[0] == 's')
239 if (kgdb_contthread) 239 atomic_set(&kgdb_cpu_doing_single_step, cpu);
240 atomic_set(&kgdb_cpu_doing_single_step, cpu);
241 240
242 return 0; 241 return 0;
243 } 242 }
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740c..dc9eb72ed9de 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
159/* 159/*
160 * FPU Use Factor empirically derived from experiments on 34K 160 * FPU Use Factor empirically derived from experiments on 34K
161 */ 161 */
162#define FPUSEFACTOR 333 162#define FPUSEFACTOR 2000
163 163
164static __init int mt_fp_affinity_init(void) 164static __init int mt_fp_affinity_init(void)
165{ 165{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b16facd9ea8e..22fc19bbe87f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
55 while (1) { 55 while (1) {
56 tick_nohz_stop_sched_tick(1); 56 tick_nohz_stop_sched_tick(1);
57 while (!need_resched()) { 57 while (!need_resched()) {
58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 58#ifdef CONFIG_MIPS_MT_SMTC
59 extern void smtc_idle_loop_hook(void); 59 extern void smtc_idle_loop_hook(void);
60 60
61 smtc_idle_loop_hook(); 61 smtc_idle_loop_hook();
@@ -145,17 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
145 */ 145 */
146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); 146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
148 clear_tsk_thread_flag(p, TIF_USEDFPU);
149 148
150#ifdef CONFIG_MIPS_MT_FPAFF 149#ifdef CONFIG_MIPS_MT_SMTC
151 /* 150 /*
152 * FPU affinity support is cleaner if we track the 151 * SMTC restores TCStatus after Status, and the CU bits
153 * user-visible CPU affinity from the very beginning. 152 * are aliased there.
154 * The generic cpus_allowed mask will already have
155 * been copied from the parent before copy_thread
156 * is invoked.
157 */ 153 */
158 p->thread.user_cpus_allowed = p->cpus_allowed; 154 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
155#endif
156 clear_tsk_thread_flag(p, TIF_USEDFPU);
157
158#ifdef CONFIG_MIPS_MT_FPAFF
159 clear_tsk_thread_flag(p, TIF_FPUBOUND);
159#endif /* CONFIG_MIPS_MT_FPAFF */ 160#endif /* CONFIG_MIPS_MT_FPAFF */
160 161
161 if (clone_flags & CLONE_SETTLS) 162 if (clone_flags & CLONE_SETTLS)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a5..96ffc9c6d194 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
238 case FPC_EIR: { /* implementation / version register */ 238 case FPC_EIR: { /* implementation / version register */
239 unsigned int flags; 239 unsigned int flags;
240#ifdef CONFIG_MIPS_MT_SMTC 240#ifdef CONFIG_MIPS_MT_SMTC
241 unsigned int irqflags; 241 unsigned long irqflags;
242 unsigned int mtflags; 242 unsigned int mtflags;
243#endif /* CONFIG_MIPS_MT_SMTC */ 243#endif /* CONFIG_MIPS_MT_SMTC */
244 244
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a516286532ab..897fb2b4751c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,4 +1,21 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */ 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
2 19
3#include <linux/clockchips.h> 20#include <linux/clockchips.h>
4#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -21,7 +38,6 @@
21#include <asm/time.h> 38#include <asm/time.h>
22#include <asm/addrspace.h> 39#include <asm/addrspace.h>
23#include <asm/smtc.h> 40#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h> 41#include <asm/smtc_proc.h>
26 42
27/* 43/*
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
58 74
59asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 75asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
60 76
61/*
62 * Clock interrupt "latch" buffers, per "CPU"
63 */
64
65static atomic_t ipi_timer_latch[NR_CPUS];
66 77
67/* 78/*
68 * Number of InterProcessor Interrupt (IPI) message buffers to allocate 79 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
70 81
71#define IPIBUF_PER_CPU 4 82#define IPIBUF_PER_CPU 4
72 83
73static struct smtc_ipi_q IPIQ[NR_CPUS]; 84struct smtc_ipi_q IPIQ[NR_CPUS];
74static struct smtc_ipi_q freeIPIq; 85static struct smtc_ipi_q freeIPIq;
75 86
76 87
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
282 * phys_cpu_present_map and the logical/physical mappings. 293 * phys_cpu_present_map and the logical/physical mappings.
283 */ 294 */
284 295
285int __init mipsmt_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
286{ 297{
287 int i, ntcs; 298 int i, ntcs;
288 299
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
325 write_tc_c0_tcstatus((read_tc_c0_tcstatus() 336 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 337 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
327 | TCSTATUS_A); 338 | TCSTATUS_A);
328 write_tc_c0_tccontext(0); 339 /*
340 * TCContext gets an offset from the base of the IPIQ array
341 * to be used in low-level code to detect the presence of
342 * an active IPI queue
343 */
344 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
329 /* Bind tc to vpe */ 345 /* Bind tc to vpe */
330 write_tc_c0_tcbind(vpe); 346 write_tc_c0_tcbind(vpe);
331 /* In general, all TCs should have the same cpu_data indications */ 347 /* In general, all TCs should have the same cpu_data indications */
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
336 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 352 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
337 cpu_data[cpu].vpe_id = vpe; 353 cpu_data[cpu].vpe_id = vpe;
338 cpu_data[cpu].tc_id = tc; 354 cpu_data[cpu].tc_id = tc;
355 /* Multi-core SMTC hasn't been tested, but be prepared */
356 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
339} 357}
340 358
359/*
360 * Tweak to get Count registes in as close a sync as possible.
361 * Value seems good for 34K-class cores.
362 */
363
364#define CP0_SKEW 8
341 365
342void mipsmt_prepare_cpus(void) 366void smtc_prepare_cpus(int cpus)
343{ 367{
344 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 368 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
345 unsigned long flags; 369 unsigned long flags;
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
363 IPIQ[i].head = IPIQ[i].tail = NULL; 387 IPIQ[i].head = IPIQ[i].tail = NULL;
364 spin_lock_init(&IPIQ[i].lock); 388 spin_lock_init(&IPIQ[i].lock);
365 IPIQ[i].depth = 0; 389 IPIQ[i].depth = 0;
366 atomic_set(&ipi_timer_latch[i], 0);
367 } 390 }
368 391
369 /* cpu_data index starts at zero */ 392 /* cpu_data index starts at zero */
370 cpu = 0; 393 cpu = 0;
371 cpu_data[cpu].vpe_id = 0; 394 cpu_data[cpu].vpe_id = 0;
372 cpu_data[cpu].tc_id = 0; 395 cpu_data[cpu].tc_id = 0;
396 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
373 cpu++; 397 cpu++;
374 398
375 /* Report on boot-time options */ 399 /* Report on boot-time options */
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
484 write_vpe_c0_compare(0); 508 write_vpe_c0_compare(0);
485 /* Propagate Config7 */ 509 /* Propagate Config7 */
486 write_vpe_c0_config7(read_c0_config7()); 510 write_vpe_c0_config7(read_c0_config7());
487 write_vpe_c0_count(read_c0_count()); 511 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
512 ehb();
488 } 513 }
489 /* enable multi-threading within VPE */ 514 /* enable multi-threading within VPE */
490 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 515 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
556void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 581void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
557{ 582{
558 extern u32 kernelsp[NR_CPUS]; 583 extern u32 kernelsp[NR_CPUS];
559 long flags; 584 unsigned long flags;
560 int mtflags; 585 int mtflags;
561 586
562 LOCK_MT_PRA(); 587 LOCK_MT_PRA();
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
585 610
586void smtc_init_secondary(void) 611void smtc_init_secondary(void)
587{ 612{
588 /*
589 * Start timer on secondary VPEs if necessary.
590 * plat_timer_setup has already have been invoked by init/main
591 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
592 * SMTC init code assigns TCs consdecutively and in ascending order
593 * to across available VPEs.
594 */
595 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
596 ((read_c0_tcbind() & TCBIND_CURVPE)
597 != cpu_data[smp_processor_id() - 1].vpe_id)){
598 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
599 }
600
601 local_irq_enable(); 613 local_irq_enable();
602} 614}
603 615
604void smtc_smp_finish(void) 616void smtc_smp_finish(void)
605{ 617{
618 int cpu = smp_processor_id();
619
620 /*
621 * Lowest-numbered CPU per VPE starts a clock tick.
622 * Like per_cpu_trap_init() hack, this assumes that
623 * SMTC init code assigns TCs consdecutively and
624 * in ascending order across available VPEs.
625 */
626 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
627 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
628
606 printk("TC %d going on-line as CPU %d\n", 629 printk("TC %d going on-line as CPU %d\n",
607 cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 630 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
608} 631}
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
753{ 776{
754 int tcstatus; 777 int tcstatus;
755 struct smtc_ipi *pipi; 778 struct smtc_ipi *pipi;
756 long flags; 779 unsigned long flags;
757 int mtflags; 780 int mtflags;
781 unsigned long tcrestart;
782 extern void r4k_wait_irqoff(void), __pastwait(void);
758 783
759 if (cpu == smp_processor_id()) { 784 if (cpu == smp_processor_id()) {
760 printk("Cannot Send IPI to self!\n"); 785 printk("Cannot Send IPI to self!\n");
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
771 pipi->arg = (void *)action; 796 pipi->arg = (void *)action;
772 pipi->dest = cpu; 797 pipi->dest = cpu;
773 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 798 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
774 if (type == SMTC_CLOCK_TICK)
775 atomic_inc(&ipi_timer_latch[cpu]);
776 /* If not on same VPE, enqueue and send cross-VPE interrupt */ 799 /* If not on same VPE, enqueue and send cross-VPE interrupt */
777 smtc_ipi_nq(&IPIQ[cpu], pipi); 800 smtc_ipi_nq(&IPIQ[cpu], pipi);
778 LOCK_CORE_PRA(); 801 LOCK_CORE_PRA();
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
800 823
801 if ((tcstatus & TCSTATUS_IXMT) != 0) { 824 if ((tcstatus & TCSTATUS_IXMT) != 0) {
802 /* 825 /*
803 * Spin-waiting here can deadlock, 826 * If we're in the the irq-off version of the wait
804 * so we queue the message for the target TC. 827 * loop, we need to force exit from the wait and
828 * do a direct post of the IPI.
829 */
830 if (cpu_wait == r4k_wait_irqoff) {
831 tcrestart = read_tc_c0_tcrestart();
832 if (tcrestart >= (unsigned long)r4k_wait_irqoff
833 && tcrestart < (unsigned long)__pastwait) {
834 write_tc_c0_tcrestart(__pastwait);
835 tcstatus &= ~TCSTATUS_IXMT;
836 write_tc_c0_tcstatus(tcstatus);
837 goto postdirect;
838 }
839 }
840 /*
841 * Otherwise we queue the message for the target TC
842 * to pick up when he does a local_irq_restore()
805 */ 843 */
806 write_tc_c0_tchalt(0); 844 write_tc_c0_tchalt(0);
807 UNLOCK_CORE_PRA(); 845 UNLOCK_CORE_PRA();
808 /* Try to reduce redundant timer interrupt messages */
809 if (type == SMTC_CLOCK_TICK) {
810 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
811 smtc_ipi_nq(&freeIPIq, pipi);
812 return;
813 }
814 }
815 smtc_ipi_nq(&IPIQ[cpu], pipi); 846 smtc_ipi_nq(&IPIQ[cpu], pipi);
816 } else { 847 } else {
817 if (type == SMTC_CLOCK_TICK) 848postdirect:
818 atomic_inc(&ipi_timer_latch[cpu]);
819 post_direct_ipi(cpu, pipi); 849 post_direct_ipi(cpu, pipi);
820 write_tc_c0_tchalt(0); 850 write_tc_c0_tchalt(0);
821 UNLOCK_CORE_PRA(); 851 UNLOCK_CORE_PRA();
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
883 smp_call_function_interrupt(); 913 smp_call_function_interrupt();
884} 914}
885 915
886DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); 916DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
887 917
888void ipi_decode(struct smtc_ipi *pipi) 918void ipi_decode(struct smtc_ipi *pipi)
889{ 919{
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
891 struct clock_event_device *cd; 921 struct clock_event_device *cd;
892 void *arg_copy = pipi->arg; 922 void *arg_copy = pipi->arg;
893 int type_copy = pipi->type; 923 int type_copy = pipi->type;
894 int ticks;
895
896 smtc_ipi_nq(&freeIPIq, pipi); 924 smtc_ipi_nq(&freeIPIq, pipi);
897 switch (type_copy) { 925 switch (type_copy) {
898 case SMTC_CLOCK_TICK: 926 case SMTC_CLOCK_TICK:
899 irq_enter(); 927 irq_enter();
900 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; 928 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
901 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 929 cd = &per_cpu(mips_clockevent_device, cpu);
902 ticks = atomic_read(&ipi_timer_latch[cpu]); 930 cd->event_handler(cd);
903 atomic_sub(ticks, &ipi_timer_latch[cpu]);
904 while (ticks) {
905 cd->event_handler(cd);
906 ticks--;
907 }
908 irq_exit(); 931 irq_exit();
909 break; 932 break;
910 933
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
937 } 960 }
938} 961}
939 962
963/*
964 * Similar to smtc_ipi_replay(), but invoked from context restore,
965 * so it reuses the current exception frame rather than set up a
966 * new one with self_ipi.
967 */
968
940void deferred_smtc_ipi(void) 969void deferred_smtc_ipi(void)
941{ 970{
942 struct smtc_ipi *pipi; 971 int cpu = smp_processor_id();
943 unsigned long flags;
944/* DEBUG */
945 int q = smp_processor_id();
946 972
947 /* 973 /*
948 * Test is not atomic, but much faster than a dequeue, 974 * Test is not atomic, but much faster than a dequeue,
949 * and the vast majority of invocations will have a null queue. 975 * and the vast majority of invocations will have a null queue.
976 * If irq_disabled when this was called, then any IPIs queued
977 * after we test last will be taken on the next irq_enable/restore.
978 * If interrupts were enabled, then any IPIs added after the
979 * last test will be taken directly.
950 */ 980 */
951 if (IPIQ[q].head != NULL) { 981
952 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { 982 while (IPIQ[cpu].head != NULL) {
953 /* ipi_decode() should be called with interrupts off */ 983 struct smtc_ipi_q *q = &IPIQ[cpu];
954 local_irq_save(flags); 984 struct smtc_ipi *pipi;
985 unsigned long flags;
986
987 /*
988 * It may be possible we'll come in with interrupts
989 * already enabled.
990 */
991 local_irq_save(flags);
992
993 spin_lock(&q->lock);
994 pipi = __smtc_ipi_dq(q);
995 spin_unlock(&q->lock);
996 if (pipi != NULL)
955 ipi_decode(pipi); 997 ipi_decode(pipi);
956 local_irq_restore(flags); 998 /*
957 } 999 * The use of the __raw_local restore isn't
1000 * as obviously necessary here as in smtc_ipi_replay(),
1001 * but it's more efficient, given that we're already
1002 * running down the IPI queue.
1003 */
1004 __raw_local_irq_restore(flags);
958 } 1005 }
959} 1006}
960 1007
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
975 struct smtc_ipi *pipi; 1022 struct smtc_ipi *pipi;
976 unsigned long tcstatus; 1023 unsigned long tcstatus;
977 int sent; 1024 int sent;
978 long flags; 1025 unsigned long flags;
979 unsigned int mtflags; 1026 unsigned int mtflags;
980 unsigned int vpflags; 1027 unsigned int vpflags;
981 1028
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1066 1113
1067/* 1114/*
1068 * SMTC-specific hacks invoked from elsewhere in the kernel. 1115 * SMTC-specific hacks invoked from elsewhere in the kernel.
1069 *
1070 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1071 * called with interrupts disabled. We do rely on interrupts being disabled
1072 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1073 * result in a recursive call to raw_local_irq_restore().
1074 */ 1116 */
1075 1117
1076static void __smtc_ipi_replay(void) 1118 /*
1119 * smtc_ipi_replay is called from raw_local_irq_restore
1120 */
1121
1122void smtc_ipi_replay(void)
1077{ 1123{
1078 unsigned int cpu = smp_processor_id(); 1124 unsigned int cpu = smp_processor_id();
1079 1125
1080 /* 1126 /*
1081 * To the extent that we've ever turned interrupts off, 1127 * To the extent that we've ever turned interrupts off,
1082 * we may have accumulated deferred IPIs. This is subtle. 1128 * we may have accumulated deferred IPIs. This is subtle.
1083 * If we use the smtc_ipi_qdepth() macro, we'll get an
1084 * exact number - but we'll also disable interrupts
1085 * and create a window of failure where a new IPI gets
1086 * queued after we test the depth but before we re-enable
1087 * interrupts. So long as IXMT never gets set, however,
1088 * we should be OK: If we pick up something and dispatch 1129 * we should be OK: If we pick up something and dispatch
1089 * it here, that's great. If we see nothing, but concurrent 1130 * it here, that's great. If we see nothing, but concurrent
1090 * with this operation, another TC sends us an IPI, IXMT 1131 * with this operation, another TC sends us an IPI, IXMT
1091 * is clear, and we'll handle it as a real pseudo-interrupt 1132 * is clear, and we'll handle it as a real pseudo-interrupt
1092 * and not a pseudo-pseudo interrupt. 1133 * and not a pseudo-pseudo interrupt. The important thing
1134 * is to do the last check for queued message *after* the
1135 * re-enabling of interrupts.
1093 */ 1136 */
1094 if (IPIQ[cpu].depth > 0) { 1137 while (IPIQ[cpu].head != NULL) {
1095 while (1) { 1138 struct smtc_ipi_q *q = &IPIQ[cpu];
1096 struct smtc_ipi_q *q = &IPIQ[cpu]; 1139 struct smtc_ipi *pipi;
1097 struct smtc_ipi *pipi; 1140 unsigned long flags;
1098 extern void self_ipi(struct smtc_ipi *); 1141
1099 1142 /*
1100 spin_lock(&q->lock); 1143 * It's just possible we'll come in with interrupts
1101 pipi = __smtc_ipi_dq(q); 1144 * already enabled.
1102 spin_unlock(&q->lock); 1145 */
1103 if (!pipi) 1146 local_irq_save(flags);
1104 break; 1147
1148 spin_lock(&q->lock);
1149 pipi = __smtc_ipi_dq(q);
1150 spin_unlock(&q->lock);
1151 /*
1152 ** But use a raw restore here to avoid recursion.
1153 */
1154 __raw_local_irq_restore(flags);
1105 1155
1156 if (pipi) {
1106 self_ipi(pipi); 1157 self_ipi(pipi);
1107 smtc_cpu_stats[cpu].selfipis++; 1158 smtc_cpu_stats[cpu].selfipis++;
1108 } 1159 }
1109 } 1160 }
1110} 1161}
1111 1162
1112void smtc_ipi_replay(void)
1113{
1114 raw_local_irq_disable();
1115 __smtc_ipi_replay();
1116}
1117
1118EXPORT_SYMBOL(smtc_ipi_replay); 1163EXPORT_SYMBOL(smtc_ipi_replay);
1119 1164
1120void smtc_idle_loop_hook(void) 1165void smtc_idle_loop_hook(void)
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
1193 } 1238 }
1194 } 1239 }
1195 1240
1196 /*
1197 * Now that we limit outstanding timer IPIs, check for hung TC
1198 */
1199 for (tc = 0; tc < NR_CPUS; tc++) {
1200 /* Don't check ourself - we'll dequeue IPIs just below */
1201 if ((tc != smp_processor_id()) &&
1202 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1203 if (clock_hang_reported[tc] == 0) {
1204 pdb_msg += sprintf(pdb_msg,
1205 "TC %d looks hung with timer latch at %d\n",
1206 tc, atomic_read(&ipi_timer_latch[tc]));
1207 clock_hang_reported[tc]++;
1208 }
1209 }
1210 }
1211 emt(mtflags); 1241 emt(mtflags);
1212 local_irq_restore(flags); 1242 local_irq_restore(flags);
1213 if (pdb_msg != &id_ho_db_msg[0]) 1243 if (pdb_msg != &id_ho_db_msg[0])
1214 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1244 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1215#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1245#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1216 1246
1217 /* 1247 smtc_ipi_replay();
1218 * Replay any accumulated deferred IPIs. If "Instant Replay"
1219 * is in use, there should never be any.
1220 */
1221#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1222 {
1223 unsigned long flags;
1224
1225 local_irq_save(flags);
1226 __smtc_ipi_replay();
1227 local_irq_restore(flags);
1228 }
1229#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1230} 1248}
1231 1249
1232void smtc_soft_dump(void) 1250void smtc_soft_dump(void)
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
1242 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1260 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1243 } 1261 }
1244 smtc_ipi_qdump(); 1262 smtc_ipi_qdump();
1245 printk("Timer IPI Backlogs:\n");
1246 for (i=0; i < NR_CPUS; i++) {
1247 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1248 }
1249 printk("%d Recoveries of \"stolen\" FPU\n", 1263 printk("%d Recoveries of \"stolen\" FPU\n",
1250 atomic_read(&smtc_fpu_recoveries)); 1264 atomic_read(&smtc_fpu_recoveries));
1251} 1265}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 6bee29097a56..b602ac6eb47d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,9 @@
46#include <asm/types.h> 46#include <asm/types.h>
47#include <asm/stacktrace.h> 47#include <asm/stacktrace.h>
48 48
49extern void check_wait(void);
50extern asmlinkage void r4k_wait(void);
51extern asmlinkage void rollback_handle_int(void);
49extern asmlinkage void handle_int(void); 52extern asmlinkage void handle_int(void);
50extern asmlinkage void handle_tlbm(void); 53extern asmlinkage void handle_tlbm(void);
51extern asmlinkage void handle_tlbl(void); 54extern asmlinkage void handle_tlbl(void);
@@ -822,8 +825,10 @@ static void mt_ase_fp_affinity(void)
822 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 825 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
823 cpumask_t tmask; 826 cpumask_t tmask;
824 827
825 cpus_and(tmask, current->thread.user_cpus_allowed, 828 current->thread.user_cpus_allowed
826 mt_fpu_cpumask); 829 = current->cpus_allowed;
830 cpus_and(tmask, current->cpus_allowed,
831 mt_fpu_cpumask);
827 set_cpus_allowed(current, tmask); 832 set_cpus_allowed(current, tmask);
828 set_thread_flag(TIF_FPUBOUND); 833 set_thread_flag(TIF_FPUBOUND);
829 } 834 }
@@ -1251,6 +1256,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1251 1256
1252 extern char except_vec_vi, except_vec_vi_lui; 1257 extern char except_vec_vi, except_vec_vi_lui;
1253 extern char except_vec_vi_ori, except_vec_vi_end; 1258 extern char except_vec_vi_ori, except_vec_vi_end;
1259 extern char rollback_except_vec_vi;
1260 char *vec_start = (cpu_wait == r4k_wait) ?
1261 &rollback_except_vec_vi : &except_vec_vi;
1254#ifdef CONFIG_MIPS_MT_SMTC 1262#ifdef CONFIG_MIPS_MT_SMTC
1255 /* 1263 /*
1256 * We need to provide the SMTC vectored interrupt handler 1264 * We need to provide the SMTC vectored interrupt handler
@@ -1258,11 +1266,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1258 * Status.IM bit to be masked before going there. 1266 * Status.IM bit to be masked before going there.
1259 */ 1267 */
1260 extern char except_vec_vi_mori; 1268 extern char except_vec_vi_mori;
1261 const int mori_offset = &except_vec_vi_mori - &except_vec_vi; 1269 const int mori_offset = &except_vec_vi_mori - vec_start;
1262#endif /* CONFIG_MIPS_MT_SMTC */ 1270#endif /* CONFIG_MIPS_MT_SMTC */
1263 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1271 const int handler_len = &except_vec_vi_end - vec_start;
1264 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1272 const int lui_offset = &except_vec_vi_lui - vec_start;
1265 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1273 const int ori_offset = &except_vec_vi_ori - vec_start;
1266 1274
1267 if (handler_len > VECTORSPACING) { 1275 if (handler_len > VECTORSPACING) {
1268 /* 1276 /*
@@ -1272,7 +1280,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1272 panic("VECTORSPACING too small"); 1280 panic("VECTORSPACING too small");
1273 } 1281 }
1274 1282
1275 memcpy(b, &except_vec_vi, handler_len); 1283 memcpy(b, vec_start, handler_len);
1276#ifdef CONFIG_MIPS_MT_SMTC 1284#ifdef CONFIG_MIPS_MT_SMTC
1277 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1285 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1278 1286
@@ -1554,6 +1562,10 @@ void __init trap_init(void)
1554 extern char except_vec3_generic, except_vec3_r4000; 1562 extern char except_vec3_generic, except_vec3_r4000;
1555 extern char except_vec4; 1563 extern char except_vec4;
1556 unsigned long i; 1564 unsigned long i;
1565 int rollback;
1566
1567 check_wait();
1568 rollback = (cpu_wait == r4k_wait);
1557 1569
1558#if defined(CONFIG_KGDB) 1570#if defined(CONFIG_KGDB)
1559 if (kgdb_early_setup) 1571 if (kgdb_early_setup)
@@ -1618,7 +1630,7 @@ void __init trap_init(void)
1618 if (board_be_init) 1630 if (board_be_init)
1619 board_be_init(); 1631 board_be_init();
1620 1632
1621 set_except_vector(0, handle_int); 1633 set_except_vector(0, rollback ? rollback_handle_int : handle_int);
1622 set_except_vector(1, handle_tlbm); 1634 set_except_vector(1, handle_tlbm);
1623 set_except_vector(2, handle_tlbl); 1635 set_except_vector(2, handle_tlbl);
1624 set_except_vector(3, handle_tlbs); 1636 set_except_vector(3, handle_tlbs);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index b5470ceb418b..afb119f35682 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -36,6 +36,7 @@ SECTIONS
36 SCHED_TEXT 36 SCHED_TEXT
37 LOCK_TEXT 37 LOCK_TEXT
38 KPROBES_TEXT 38 KPROBES_TEXT
39 *(.text.*)
39 *(.fixup) 40 *(.fixup)
40 *(.gnu.warning) 41 *(.gnu.warning)
41 } :text = 0 42 } :text = 0
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 8d7784122c14..edac9892c51a 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -39,12 +39,14 @@
39#ifdef USE_DOUBLE 39#ifdef USE_DOUBLE
40 40
41#define LOAD ld 41#define LOAD ld
42#define LOAD32 lwu
42#define ADD daddu 43#define ADD daddu
43#define NBYTES 8 44#define NBYTES 8
44 45
45#else 46#else
46 47
47#define LOAD lw 48#define LOAD lw
49#define LOAD32 lw
48#define ADD addu 50#define ADD addu
49#define NBYTES 4 51#define NBYTES 4
50 52
@@ -60,6 +62,14 @@
60 ADD sum, v1; \ 62 ADD sum, v1; \
61 .set pop 63 .set pop
62 64
65#define ADDC32(sum,reg) \
66 .set push; \
67 .set noat; \
68 addu sum, reg; \
69 sltu v1, sum, reg; \
70 addu sum, v1; \
71 .set pop
72
63#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ 73#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
64 LOAD _t0, (offset + UNIT(0))(src); \ 74 LOAD _t0, (offset + UNIT(0))(src); \
65 LOAD _t1, (offset + UNIT(1))(src); \ 75 LOAD _t1, (offset + UNIT(1))(src); \
@@ -132,7 +142,7 @@ LEAF(csum_partial)
132 beqz t8, .Lqword_align 142 beqz t8, .Lqword_align
133 andi t8, src, 0x8 143 andi t8, src, 0x8
134 144
135 lw t0, 0x00(src) 145 LOAD32 t0, 0x00(src)
136 LONG_SUBU a1, a1, 0x4 146 LONG_SUBU a1, a1, 0x4
137 ADDC(sum, t0) 147 ADDC(sum, t0)
138 PTR_ADDU src, src, 0x4 148 PTR_ADDU src, src, 0x4
@@ -211,7 +221,7 @@ LEAF(csum_partial)
211 LONG_SRL t8, t8, 0x2 221 LONG_SRL t8, t8, 0x2
212 222
213.Lend_words: 223.Lend_words:
214 lw t0, (src) 224 LOAD32 t0, (src)
215 LONG_SUBU t8, t8, 0x1 225 LONG_SUBU t8, t8, 0x1
216 ADDC(sum, t0) 226 ADDC(sum, t0)
217 .set reorder /* DADDI_WAR */ 227 .set reorder /* DADDI_WAR */
@@ -230,6 +240,9 @@ LEAF(csum_partial)
230 /* Still a full word to go */ 240 /* Still a full word to go */
231 ulw t1, (src) 241 ulw t1, (src)
232 PTR_ADDIU src, 4 242 PTR_ADDIU src, 4
243#ifdef USE_DOUBLE
244 dsll t1, t1, 32 /* clear lower 32bit */
245#endif
233 ADDC(sum, t1) 246 ADDC(sum, t1)
234 247
2351: move t1, zero 2481: move t1, zero
@@ -280,7 +293,7 @@ LEAF(csum_partial)
2801: 2931:
281 .set reorder 294 .set reorder
282 /* Add the passed partial csum. */ 295 /* Add the passed partial csum. */
283 ADDC(sum, a2) 296 ADDC32(sum, a2)
284 jr ra 297 jr ra
285 .set noreorder 298 .set noreorder
286 END(csum_partial) 299 END(csum_partial)
@@ -681,7 +694,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
681 .set pop 694 .set pop
6821: 6951:
683 .set reorder 696 .set reorder
684 ADDC(sum, psum) 697 ADDC32(sum, psum)
685 jr ra 698 jr ra
686 .set noreorder 699 .set noreorder
687 700
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 3b7dd722c32a..cef2db8d2225 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
15obj-$(CONFIG_PCI) += malta-pci.o 15obj-$(CONFIG_PCI) += malta-pci.o
16 16
17# FIXME FIXME FIXME 17# FIXME FIXME FIXME
18obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o 18obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
19 19
20EXTRA_CFLAGS += -Werror 20EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 5ea705e49454..f84a46a8ae6e 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
84 84
85static void __init msmtc_smp_setup(void) 85static void __init msmtc_smp_setup(void)
86{ 86{
87 mipsmt_build_cpu_map(0); 87 /*
88 * we won't get the definitive value until
89 * we've run smtc_prepare_cpus later, but
90 * we would appear to need an upper bound now.
91 */
92 smp_num_siblings = smtc_build_cpu_map(0);
88} 93}
89 94
90static void __init msmtc_prepare_cpus(unsigned int max_cpus) 95static void __init msmtc_prepare_cpus(unsigned int max_cpus)
91{ 96{
92 mipsmt_prepare_cpus(); 97 smtc_prepare_cpus(max_cpus);
93} 98}
94 99
95struct plat_smp_ops msmtc_smp_ops = { 100struct plat_smp_ops msmtc_smp_ops = {
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 15e01aec37fd..c8c32f417b6c 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o
15obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o 15obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
16obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o 16obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o
17obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o 17obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
18obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
18 19
19# 20#
20# These are still pretty much in the old state, watch, go blind. 21# These are still pretty much in the old state, watch, go blind.
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
new file mode 100644
index 000000000000..bea9b6cdfdbf
--- /dev/null
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
10 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
12 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
13 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
15 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
16 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
17 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
18 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/ssb/ssb.h>
28
29int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
30{
31 return 0;
32}
33
34int pcibios_plat_dev_init(struct pci_dev *dev)
35{
36 int res;
37 u8 slot, pin;
38
39 res = ssb_pcibios_plat_dev_init(dev);
40 if (res < 0) {
41 printk(KERN_ALERT "PCI: Failed to init device %s\n",
42 pci_name(dev));
43 return res;
44 }
45
46 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
47 slot = PCI_SLOT(dev->devfn);
48 res = ssb_pcibios_map_irq(dev, slot, pin);
49
50 /* IRQ-0 and IRQ-1 are software interrupts. */
51 if (res < 2) {
52 printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
53 pci_name(dev));
54 return res;
55 }
56
57 dev->irq = res;
58 return 0;
59}
60
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index bd78368c82bf..f97ab1461012 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
143 */ 143 */
144int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 144int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
145{ 145{
146 return 0;
147}
148
149/* Most MIPS systems have straight-forward swizzling needs. */
150static inline u8 bridge_swizzle(u8 pin, u8 slot)
151{
152 return (((pin - 1) + slot) % 4) + 1;
153}
154
155static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
156{
157 while (dev->bus->parent) {
158 /* Move up the chain of bridges. */
159 dev = dev->bus->self;
160 }
161
162 return dev;
163}
164
165/* Do platform specific device initialization at pci_enable_device() time */
166int pcibios_plat_dev_init(struct pci_dev *dev)
167{
146 struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); 168 struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
147 int irq = bc->pci_int[slot]; 169 struct pci_dev *rdev = bridge_root_dev(dev);
170 int slot = PCI_SLOT(rdev->devfn);
171 int irq;
148 172
173 irq = bc->pci_int[slot];
149 if (irq == -1) { 174 if (irq == -1) {
150 irq = bc->pci_int[slot] = request_bridge_irq(bc); 175 irq = request_bridge_irq(bc);
151 if (irq < 0) 176 if (irq < 0)
152 panic("Can't allocate interrupt for PCI device %s\n", 177 return irq;
153 pci_name(dev)); 178
179 bc->pci_int[slot] = irq;
154 } 180 }
155 181
156 irq_to_bridge[irq] = bc; 182 irq_to_bridge[irq] = bc;
157 irq_to_slot[irq] = slot; 183 irq_to_slot[irq] = slot;
158 184
159 return irq; 185 dev->irq = irq;
160}
161 186
162/* Do platform specific device initialization at pci_enable_device() time */
163int pcibios_plat_dev_init(struct pci_dev *dev)
164{
165 return 0; 187 return 0;
166} 188}
167 189
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
index f18ba9201bbc..7b45f199d92a 100644
--- a/arch/mips/sibyte/swarm/Makefile
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -1,3 +1,4 @@
1obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o 1obj-y := platform.o setup.o rtc_xicor1241.o \
2 rtc_m41t81.o
2 3
3obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o 4obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
new file mode 100644
index 000000000000..54847fe1e564
--- /dev/null
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -0,0 +1,85 @@
1#include <linux/err.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/io.h>
5#include <linux/platform_device.h>
6#include <linux/ata_platform.h>
7
8#include <asm/sibyte/board.h>
9#include <asm/sibyte/sb1250_genbus.h>
10#include <asm/sibyte/sb1250_regs.h>
11
12#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR)
13
14#define DRV_NAME "pata-swarm"
15
16#define SWARM_IDE_SHIFT 5
17#define SWARM_IDE_BASE 0x1f0
18#define SWARM_IDE_CTRL 0x3f6
19
20static struct resource swarm_pata_resource[] = {
21 {
22 .name = "Swarm GenBus IDE",
23 .flags = IORESOURCE_MEM,
24 }, {
25 .name = "Swarm GenBus IDE",
26 .flags = IORESOURCE_MEM,
27 }, {
28 .name = "Swarm GenBus IDE",
29 .flags = IORESOURCE_IRQ,
30 .start = K_INT_GB_IDE,
31 .end = K_INT_GB_IDE,
32 },
33};
34
35static struct pata_platform_info pata_platform_data = {
36 .ioport_shift = SWARM_IDE_SHIFT,
37};
38
39static struct platform_device swarm_pata_device = {
40 .name = "pata_platform",
41 .id = -1,
42 .resource = swarm_pata_resource,
43 .num_resources = ARRAY_SIZE(swarm_pata_resource),
44 .dev = {
45 .platform_data = &pata_platform_data,
46 .coherent_dma_mask = ~0, /* grumble */
47 },
48};
49
50static int __init swarm_pata_init(void)
51{
52 u8 __iomem *base;
53 phys_t offset, size;
54 struct resource *r;
55
56 if (!SIBYTE_HAVE_IDE)
57 return -ENODEV;
58
59 base = ioremap(A_IO_EXT_BASE, 0x800);
60 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
61 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
62 iounmap(base);
63
64 offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
65 size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
66 if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
67 pr_info(DRV_NAME ": PATA interface at GenBus disabled\n");
68
69 return -EBUSY;
70 }
71
72 pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS);
73
74 r = swarm_pata_resource;
75 r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT);
76 r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1;
77 r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT);
78 r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1;
79
80 return platform_device_register(&swarm_pata_device);
81}
82
83device_initcall(swarm_pata_init);
84
85#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index cba36a247e32..92dd1a0ca352 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -72,6 +72,7 @@ static void irq_dispatch(unsigned int irq)
72 cascade = irq_cascade + irq; 72 cascade = irq_cascade + irq;
73 if (cascade->get_irq != NULL) { 73 if (cascade->get_irq != NULL) {
74 unsigned int source_irq = irq; 74 unsigned int source_irq = irq;
75 int ret;
75 desc = irq_desc + source_irq; 76 desc = irq_desc + source_irq;
76 if (desc->chip->mask_ack) 77 if (desc->chip->mask_ack)
77 desc->chip->mask_ack(source_irq); 78 desc->chip->mask_ack(source_irq);
@@ -79,8 +80,9 @@ static void irq_dispatch(unsigned int irq)
79 desc->chip->mask(source_irq); 80 desc->chip->mask(source_irq);
80 desc->chip->ack(source_irq); 81 desc->chip->ack(source_irq);
81 } 82 }
82 irq = cascade->get_irq(irq); 83 ret = cascade->get_irq(irq);
83 if (irq < 0) 84 irq = ret;
85 if (ret < 0)
84 atomic_inc(&irq_err_count); 86 atomic_inc(&irq_err_count);
85 else 87 else
86 irq_dispatch(irq); 88 irq_dispatch(irq);
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 761c434a2488..56c64ccc9c21 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -20,22 +20,8 @@ EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
20atomic_t irq_err_count; 20atomic_t irq_err_count;
21 21
22/* 22/*
23 * MN10300 INTC controller operations 23 * MN10300 interrupt controller operations
24 */ 24 */
25static void mn10300_cpupic_disable(unsigned int irq)
26{
27 u16 tmp = GxICR(irq);
28 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
29 tmp = GxICR(irq);
30}
31
32static void mn10300_cpupic_enable(unsigned int irq)
33{
34 u16 tmp = GxICR(irq);
35 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
36 tmp = GxICR(irq);
37}
38
39static void mn10300_cpupic_ack(unsigned int irq) 25static void mn10300_cpupic_ack(unsigned int irq)
40{ 26{
41 u16 tmp; 27 u16 tmp;
@@ -60,26 +46,54 @@ static void mn10300_cpupic_mask_ack(unsigned int irq)
60static void mn10300_cpupic_unmask(unsigned int irq) 46static void mn10300_cpupic_unmask(unsigned int irq)
61{ 47{
62 u16 tmp = GxICR(irq); 48 u16 tmp = GxICR(irq);
63 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 49 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
64 tmp = GxICR(irq); 50 tmp = GxICR(irq);
65} 51}
66 52
67static void mn10300_cpupic_end(unsigned int irq) 53static void mn10300_cpupic_unmask_clear(unsigned int irq)
68{ 54{
55 /* the MN10300 PIC latches its interrupt request bit, even after the
56 * device has ceased to assert its interrupt line and the interrupt
57 * channel has been disabled in the PIC, so for level-triggered
58 * interrupts we need to clear the request bit when we re-enable */
69 u16 tmp = GxICR(irq); 59 u16 tmp = GxICR(irq);
70 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; 60 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
71 tmp = GxICR(irq); 61 tmp = GxICR(irq);
72} 62}
73 63
74static struct irq_chip mn10300_cpu_pic = { 64/*
75 .name = "cpu", 65 * MN10300 PIC level-triggered IRQ handling.
76 .disable = mn10300_cpupic_disable, 66 *
77 .enable = mn10300_cpupic_enable, 67 * The PIC has no 'ACK' function per se. It is possible to clear individual
68 * channel latches, but each latch relatches whether or not the channel is
69 * masked, so we need to clear the latch when we unmask the channel.
70 *
71 * Also for this reason, we don't supply an ack() op (it's unused anyway if
72 * mask_ack() is provided), and mask_ack() just masks.
73 */
74static struct irq_chip mn10300_cpu_pic_level = {
75 .name = "cpu_l",
76 .disable = mn10300_cpupic_mask,
77 .enable = mn10300_cpupic_unmask_clear,
78 .ack = NULL,
79 .mask = mn10300_cpupic_mask,
80 .mask_ack = mn10300_cpupic_mask,
81 .unmask = mn10300_cpupic_unmask_clear,
82};
83
84/*
85 * MN10300 PIC edge-triggered IRQ handling.
86 *
87 * We use the latch clearing function of the PIC as the 'ACK' function.
88 */
89static struct irq_chip mn10300_cpu_pic_edge = {
90 .name = "cpu_e",
91 .disable = mn10300_cpupic_mask,
92 .enable = mn10300_cpupic_unmask,
78 .ack = mn10300_cpupic_ack, 93 .ack = mn10300_cpupic_ack,
79 .mask = mn10300_cpupic_mask, 94 .mask = mn10300_cpupic_mask,
80 .mask_ack = mn10300_cpupic_mask_ack, 95 .mask_ack = mn10300_cpupic_mask_ack,
81 .unmask = mn10300_cpupic_unmask, 96 .unmask = mn10300_cpupic_unmask,
82 .end = mn10300_cpupic_end,
83}; 97};
84 98
85/* 99/*
@@ -114,7 +128,8 @@ void set_intr_level(int irq, u16 level)
114 */ 128 */
115void set_intr_postackable(int irq) 129void set_intr_postackable(int irq)
116{ 130{
117 set_irq_handler(irq, handle_level_irq); 131 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
132 handle_level_irq);
118} 133}
119 134
120/* 135/*
@@ -126,8 +141,12 @@ void __init init_IRQ(void)
126 141
127 for (irq = 0; irq < NR_IRQS; irq++) 142 for (irq = 0; irq < NR_IRQS; irq++)
128 if (irq_desc[irq].chip == &no_irq_type) 143 if (irq_desc[irq].chip == &no_irq_type)
129 set_irq_chip_and_handler(irq, &mn10300_cpu_pic, 144 /* due to the PIC latching interrupt requests, even
130 handle_edge_irq); 145 * when the IRQ is disabled, IRQ_PENDING is superfluous
146 * and we can use handle_level_irq() for edge-triggered
147 * interrupts */
148 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
149 handle_level_irq);
131 unit_init_IRQ(); 150 unit_init_IRQ();
132} 151}
133 152
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index babb7c2ac377..e4606586f94c 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -1,6 +1,6 @@
1/* MN10300 Low level time management 1/* MN10300 Low level time management
2 * 2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2007-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/i386/kernel/time.c 5 * - Derived from arch/i386/kernel/time.c
6 * 6 *
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/profile.h> 18#include <linux/profile.h>
19#include <linux/cnt32_to_63.h>
19#include <asm/irq.h> 20#include <asm/irq.h>
20#include <asm/div64.h> 21#include <asm/div64.h>
21#include <asm/processor.h> 22#include <asm/processor.h>
@@ -40,27 +41,54 @@ static struct irqaction timer_irq = {
40 .name = "timer", 41 .name = "timer",
41}; 42};
42 43
44static unsigned long sched_clock_multiplier;
45
43/* 46/*
44 * scheduler clock - returns current time in nanosec units. 47 * scheduler clock - returns current time in nanosec units.
45 */ 48 */
46unsigned long long sched_clock(void) 49unsigned long long sched_clock(void)
47{ 50{
48 union { 51 union {
49 unsigned long long l; 52 unsigned long long ll;
50 u32 w[2]; 53 unsigned l[2];
51 } quot; 54 } tsc64, result;
55 unsigned long tsc, tmp;
56 unsigned product[3]; /* 96-bit intermediate value */
57
58 /* read the TSC value
59 */
60 tsc = 0 - get_cycles(); /* get_cycles() counts down */
52 61
53 quot.w[0] = mn10300_last_tsc - get_cycles(); 62 /* expand to 64-bits.
54 quot.w[1] = 1000000000; 63 * - sched_clock() must be called once a minute or better or the
64 * following will go horribly wrong - see cnt32_to_63()
65 */
66 tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
55 67
56 asm("mulu %2,%3,%0,%1" 68 /* scale the 64-bit TSC value to a nanosecond value via a 96-bit
57 : "=r"(quot.w[1]), "=r"(quot.w[0]) 69 * intermediate
58 : "0"(quot.w[1]), "1"(quot.w[0]) 70 */
71 asm("mulu %2,%0,%3,%0 \n" /* LSW * mult -> 0:%3:%0 */
72 "mulu %2,%1,%2,%1 \n" /* MSW * mult -> %2:%1:0 */
73 "add %3,%1 \n"
74 "addc 0,%2 \n" /* result in %2:%1:%0 */
75 : "=r"(product[0]), "=r"(product[1]), "=r"(product[2]), "=r"(tmp)
76 : "0"(tsc64.l[0]), "1"(tsc64.l[1]), "2"(sched_clock_multiplier)
59 : "cc"); 77 : "cc");
60 78
61 do_div(quot.l, MN10300_TSCCLK); 79 result.l[0] = product[1] << 16 | product[0] >> 16;
80 result.l[1] = product[2] << 16 | product[1] >> 16;
62 81
63 return quot.l; 82 return result.ll;
83}
84
85/*
86 * initialise the scheduler clock
87 */
88static void __init mn10300_sched_clock_init(void)
89{
90 sched_clock_multiplier =
91 __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
64} 92}
65 93
66/* 94/*
@@ -128,4 +156,6 @@ void __init time_init(void)
128 /* start the watchdog timer */ 156 /* start the watchdog timer */
129 watchdog_go(); 157 watchdog_go();
130#endif 158#endif
159
160 mn10300_sched_clock_init();
131} 161}
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 78f092ca0316..33cf25025dac 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -174,7 +174,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
174 * If we're in an interrupt or have no user 174 * If we're in an interrupt or have no user
175 * context, we must not take the fault.. 175 * context, we must not take the fault..
176 */ 176 */
177 if (in_interrupt() || !mm) 177 if (in_atomic() || !mm)
178 goto no_context; 178 goto no_context;
179 179
180 down_read(&mm->mmap_sem); 180 down_read(&mm->mmap_sem);
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c
index 14b2c817cff8..70e8cb4ea266 100644
--- a/arch/mn10300/unit-asb2303/unit-init.c
+++ b/arch/mn10300/unit-asb2303/unit-init.c
@@ -51,7 +51,7 @@ void __init unit_init_IRQ(void)
51 switch (GET_XIRQ_TRIGGER(extnum)) { 51 switch (GET_XIRQ_TRIGGER(extnum)) {
52 case XIRQ_TRIGGER_HILEVEL: 52 case XIRQ_TRIGGER_HILEVEL:
53 case XIRQ_TRIGGER_LOWLEVEL: 53 case XIRQ_TRIGGER_LOWLEVEL:
54 set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); 54 set_intr_postackable(XIRQ2IRQ(extnum));
55 break; 55 break;
56 default: 56 default:
57 break; 57 break;
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index 6a352414a358..72812a9439ac 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -52,7 +52,7 @@ void __init unit_init_IRQ(void)
52 switch (GET_XIRQ_TRIGGER(extnum)) { 52 switch (GET_XIRQ_TRIGGER(extnum)) {
53 case XIRQ_TRIGGER_HILEVEL: 53 case XIRQ_TRIGGER_HILEVEL:
54 case XIRQ_TRIGGER_LOWLEVEL: 54 case XIRQ_TRIGGER_LOWLEVEL:
55 set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); 55 set_intr_postackable(XIRQ2IRQ(extnum));
56 break; 56 break;
57 default: 57 default:
58 break; 58 break;
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 717a3bc1352e..65d1a8454d2c 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -195,7 +195,7 @@ image-$(CONFIG_PPC_CELLEB) += zImage.pseries
195image-$(CONFIG_PPC_CHRP) += zImage.chrp 195image-$(CONFIG_PPC_CHRP) += zImage.chrp
196image-$(CONFIG_PPC_EFIKA) += zImage.chrp 196image-$(CONFIG_PPC_EFIKA) += zImage.chrp
197image-$(CONFIG_PPC_PMAC) += zImage.pmac 197image-$(CONFIG_PPC_PMAC) += zImage.pmac
198image-$(CONFIG_PPC_HOLLY) += zImage.holly 198image-$(CONFIG_PPC_HOLLY) += dtbImage.holly
199image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800 199image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
200image-$(CONFIG_PPC_ISERIES) += zImage.iseries 200image-$(CONFIG_PPC_ISERIES) += zImage.iseries
201image-$(CONFIG_DEFAULT_UIMAGE) += uImage 201image-$(CONFIG_DEFAULT_UIMAGE) += uImage
diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts
index f87fe7b9ced9..c6e11ebecebb 100644
--- a/arch/powerpc/boot/dts/holly.dts
+++ b/arch/powerpc/boot/dts/holly.dts
@@ -133,61 +133,61 @@
133 reg = <0x00007400 0x00000400>; 133 reg = <0x00007400 0x00000400>;
134 big-endian; 134 big-endian;
135 }; 135 };
136 };
136 137
137 pci@1000 { 138 pci@c0001000 {
138 device_type = "pci"; 139 device_type = "pci";
139 compatible = "tsi109-pci", "tsi108-pci"; 140 compatible = "tsi109-pci", "tsi108-pci";
140 #interrupt-cells = <1>; 141 #interrupt-cells = <1>;
141 #size-cells = <2>; 142 #size-cells = <2>;
142 #address-cells = <3>; 143 #address-cells = <3>;
143 reg = <0x00001000 0x00001000>; 144 reg = <0xc0001000 0x00001000>;
144 bus-range = <0x0 0x0>; 145 bus-range = <0x0 0x0>;
145 /*----------------------------------------------------+ 146 /*----------------------------------------------------+
146 | PCI memory range. 147 | PCI memory range.
147 | 01 denotes I/O space 148 | 01 denotes I/O space
148 | 02 denotes 32-bit memory space 149 | 02 denotes 32-bit memory space
149 +----------------------------------------------------*/ 150 +----------------------------------------------------*/
150 ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000 151 ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000
151 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>; 152 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>;
152 clock-frequency = <133333332>; 153 clock-frequency = <133333332>;
153 interrupt-parent = <&MPIC>; 154 interrupt-parent = <&MPIC>;
155 interrupts = <0x17 0x2>;
156 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
157 /*----------------------------------------------------+
158 | The INTA, INTB, INTC, INTD are shared.
159 +----------------------------------------------------*/
160 interrupt-map = <
161 0x800 0x0 0x0 0x1 &RT0 0x24 0x0
162 0x800 0x0 0x0 0x2 &RT0 0x25 0x0
163 0x800 0x0 0x0 0x3 &RT0 0x26 0x0
164 0x800 0x0 0x0 0x4 &RT0 0x27 0x0
165
166 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
167 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
168 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
169 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
170
171 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
172 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
173 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
174 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
175
176 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
177 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
178 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
179 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
180 >;
181
182 RT0: router@1180 {
183 device_type = "pic-router";
184 interrupt-controller;
185 big-endian;
186 clock-frequency = <0>;
187 #address-cells = <0>;
188 #interrupt-cells = <2>;
154 interrupts = <0x17 0x2>; 189 interrupts = <0x17 0x2>;
155 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 190 interrupt-parent = <&MPIC>;
156 /*----------------------------------------------------+
157 | The INTA, INTB, INTC, INTD are shared.
158 +----------------------------------------------------*/
159 interrupt-map = <
160 0x800 0x0 0x0 0x1 &RT0 0x24 0x0
161 0x800 0x0 0x0 0x2 &RT0 0x25 0x0
162 0x800 0x0 0x0 0x3 &RT0 0x26 0x0
163 0x800 0x0 0x0 0x4 &RT0 0x27 0x0
164
165 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
166 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
167 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
168 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
169
170 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
171 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
172 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
173 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
174
175 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
176 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
177 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
178 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
179 >;
180
181 RT0: router@1180 {
182 device_type = "pic-router";
183 interrupt-controller;
184 big-endian;
185 clock-frequency = <0>;
186 #address-cells = <0>;
187 #interrupt-cells = <2>;
188 interrupts = <0x17 0x2>;
189 interrupt-parent = <&MPIC>;
190 };
191 }; 191 };
192 }; 192 };
193 193
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 3b3a1062cb25..584a4f184eb2 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -281,7 +281,7 @@
281 cell-index = <0>; 281 cell-index = <0>;
282 reg = <0x0 0x80>; 282 reg = <0x0 0x80>;
283 interrupt-parent = <&mpic>; 283 interrupt-parent = <&mpic>;
284 interrupts = <60 2>; 284 interrupts = <76 2>;
285 }; 285 };
286 dma-channel@1 { 286 dma-channel@1 {
287 compatible = "fsl,mpc8610-dma-channel", 287 compatible = "fsl,mpc8610-dma-channel",
@@ -289,7 +289,7 @@
289 cell-index = <1>; 289 cell-index = <1>;
290 reg = <0x80 0x80>; 290 reg = <0x80 0x80>;
291 interrupt-parent = <&mpic>; 291 interrupt-parent = <&mpic>;
292 interrupts = <61 2>; 292 interrupts = <77 2>;
293 }; 293 };
294 dma-channel@2 { 294 dma-channel@2 {
295 compatible = "fsl,mpc8610-dma-channel", 295 compatible = "fsl,mpc8610-dma-channel",
@@ -297,7 +297,7 @@
297 cell-index = <2>; 297 cell-index = <2>;
298 reg = <0x100 0x80>; 298 reg = <0x100 0x80>;
299 interrupt-parent = <&mpic>; 299 interrupt-parent = <&mpic>;
300 interrupts = <62 2>; 300 interrupts = <78 2>;
301 }; 301 };
302 dma-channel@3 { 302 dma-channel@3 {
303 compatible = "fsl,mpc8610-dma-channel", 303 compatible = "fsl,mpc8610-dma-channel",
@@ -305,7 +305,7 @@
305 cell-index = <3>; 305 cell-index = <3>;
306 reg = <0x180 0x80>; 306 reg = <0x180 0x80>;
307 interrupt-parent = <&mpic>; 307 interrupt-parent = <&mpic>;
308 interrupts = <63 2>; 308 interrupts = <79 2>;
309 }; 309 };
310 }; 310 };
311 311
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 80d1f399ee51..64c6ee22eefd 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -409,6 +409,13 @@ do { \
409/* Keep this the last entry. */ 409/* Keep this the last entry. */
410#define R_PPC64_NUM 107 410#define R_PPC64_NUM 107
411 411
412/* There's actually a third entry here, but it's unused */
413struct ppc64_opd_entry
414{
415 unsigned long funcaddr;
416 unsigned long r2;
417};
418
412#ifdef __KERNEL__ 419#ifdef __KERNEL__
413 420
414#ifdef CONFIG_SPU_BASE 421#ifdef CONFIG_SPU_BASE
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 7710e9e6660f..07956f3e7844 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -2,6 +2,8 @@
2#define _ASM_POWERPC_SECTIONS_H 2#define _ASM_POWERPC_SECTIONS_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/elf.h>
6#include <linux/uaccess.h>
5#include <asm-generic/sections.h> 7#include <asm-generic/sections.h>
6 8
7#ifdef __powerpc64__ 9#ifdef __powerpc64__
@@ -17,7 +19,15 @@ static inline int in_kernel_text(unsigned long addr)
17} 19}
18 20
19#undef dereference_function_descriptor 21#undef dereference_function_descriptor
20void *dereference_function_descriptor(void *); 22static inline void *dereference_function_descriptor(void *ptr)
23{
24 struct ppc64_opd_entry *desc = ptr;
25 void *p;
26
27 if (!probe_kernel_address(&desc->funcaddr, p))
28 ptr = p;
29 return ptr;
30}
21 31
22#endif 32#endif
23 33
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index d308a9f70f1b..31982d05d81a 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -34,11 +34,7 @@
34#include <asm/smp.h> 34#include <asm/smp.h>
35 35
36#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
37/* this is used for software suspend, and that shuts down 37#define cpu_should_die() cpu_is_offline(smp_processor_id())
38 * CPUs even while the system is still booting... */
39#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
40 (system_state == SYSTEM_RUNNING \
41 || system_state == SYSTEM_BOOTING))
42#else 38#else
43#define cpu_should_die() 0 39#define cpu_should_die() 0
44#endif 40#endif
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index b4fdf2f2743c..fe8f71dd0b3f 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -347,9 +347,8 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
347 linux_regs->msr |= MSR_SE; 347 linux_regs->msr |= MSR_SE;
348#endif 348#endif
349 kgdb_single_step = 1; 349 kgdb_single_step = 1;
350 if (kgdb_contthread) 350 atomic_set(&kgdb_cpu_doing_single_step,
351 atomic_set(&kgdb_cpu_doing_single_step, 351 raw_smp_processor_id());
352 raw_smp_processor_id());
353 } 352 }
354 return 0; 353 return 0;
355 } 354 }
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ad79de272ff3..1af2377e4992 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -21,9 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <linux/uaccess.h>
25#include <asm/module.h> 24#include <asm/module.h>
26#include <asm/sections.h>
27#include <asm/firmware.h> 25#include <asm/firmware.h>
28#include <asm/code-patching.h> 26#include <asm/code-patching.h>
29#include <linux/sort.h> 27#include <linux/sort.h>
@@ -43,13 +41,6 @@
43#define DEBUGP(fmt , ...) 41#define DEBUGP(fmt , ...)
44#endif 42#endif
45 43
46/* There's actually a third entry here, but it's unused */
47struct ppc64_opd_entry
48{
49 unsigned long funcaddr;
50 unsigned long r2;
51};
52
53/* Like PPC32, we need little trampolines to do > 24-bit jumps (into 44/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
54 the kernel itself). But on PPC64, these need to be used for every 45 the kernel itself). But on PPC64, these need to be used for every
55 jump, actually, to reset r2 (TOC+0x8000). */ 46 jump, actually, to reset r2 (TOC+0x8000). */
@@ -452,13 +443,3 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
452 443
453 return 0; 444 return 0;
454} 445}
455
456void *dereference_function_descriptor(void *ptr)
457{
458 struct ppc64_opd_entry *desc = ptr;
459 void *p;
460
461 if (!probe_kernel_address(&desc->funcaddr, p))
462 ptr = p;
463 return ptr;
464}
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index ef74a0763ec1..8c619963becc 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -219,11 +219,21 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev)
219 int i; 219 int i;
220 u8 *dummy; 220 u8 *dummy;
221 struct pci_bus *bus = dev->bus; 221 struct pci_bus *bus = dev->bus;
222 resource_size_t end = 0;
223
224 for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) {
225 unsigned long flags = pci_resource_flags(dev, i);
226 if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM)
227 end = pci_resource_end(dev, i);
228 }
222 229
223 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 230 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
224 if ((bus->resource[i]) && 231 if ((bus->resource[i]) &&
225 (bus->resource[i]->flags & IORESOURCE_MEM)) { 232 (bus->resource[i]->flags & IORESOURCE_MEM)) {
226 dummy = ioremap(bus->resource[i]->end - 3, 0x4); 233 if (bus->resource[i]->end == end)
234 dummy = ioremap(bus->resource[i]->start, 0x4);
235 else
236 dummy = ioremap(bus->resource[i]->end - 3, 0x4);
227 if (dummy) { 237 if (dummy) {
228 in_8(dummy); 238 in_8(dummy);
229 iounmap(dummy); 239 iounmap(dummy);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ca114fe46ffb..06acb1a18bbc 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -169,6 +169,8 @@ void init_cpu_timer(void)
169 169
170static void clock_comparator_interrupt(__u16 code) 170static void clock_comparator_interrupt(__u16 code)
171{ 171{
172 if (S390_lowcore.clock_comparator == -1ULL)
173 set_clock_comparator(S390_lowcore.clock_comparator);
172} 174}
173 175
174static void etr_timing_alert(struct etr_irq_parm *); 176static void etr_timing_alert(struct etr_irq_parm *);
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index fc6ab6094df8..0953cee05efc 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,14 +1,9 @@
1/* 1/*
2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 2 * Precise Delay Loops for S390
4 * 3 *
5 * S390 version 4 * Copyright IBM Corp. 1999,2008
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 * Derived from "arch/i386/lib/delay.c"
10 * Copyright (C) 1993 Linus Torvalds
11 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
12 */ 7 */
13 8
14#include <linux/sched.h> 9#include <linux/sched.h>
@@ -29,30 +24,31 @@ void __delay(unsigned long loops)
29 asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); 24 asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
30} 25}
31 26
32/* 27static void __udelay_disabled(unsigned long usecs)
33 * Waits for 'usecs' microseconds using the TOD clock comparator.
34 */
35void __udelay(unsigned long usecs)
36{ 28{
37 u64 end, time, old_cc = 0; 29 unsigned long mask, cr0, cr0_saved;
38 unsigned long flags, cr0, mask, dummy; 30 u64 clock_saved;
39 int irq_context;
40 31
41 irq_context = in_interrupt(); 32 clock_saved = local_tick_disable();
42 if (!irq_context) 33 set_clock_comparator(get_clock() + ((u64) usecs << 12));
43 local_bh_disable(); 34 __ctl_store(cr0_saved, 0, 0);
44 local_irq_save(flags); 35 cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
45 if (raw_irqs_disabled_flags(flags)) { 36 __ctl_load(cr0 , 0, 0);
46 old_cc = local_tick_disable(); 37 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 S390_lowcore.clock_comparator = -1ULL; 38 trace_hardirqs_on();
48 __ctl_store(cr0, 0, 0); 39 __load_psw_mask(mask);
49 dummy = (cr0 & 0xffff00e0) | 0x00000800; 40 local_irq_disable();
50 __ctl_load(dummy , 0, 0); 41 __ctl_load(cr0_saved, 0, 0);
51 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; 42 local_tick_enable(clock_saved);
52 } else 43 set_clock_comparator(S390_lowcore.clock_comparator);
53 mask = psw_kernel_bits | PSW_MASK_WAIT | 44}
54 PSW_MASK_EXT | PSW_MASK_IO;
55 45
46static void __udelay_enabled(unsigned long usecs)
47{
48 unsigned long mask;
49 u64 end, time;
50
51 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
56 end = get_clock() + ((u64) usecs << 12); 52 end = get_clock() + ((u64) usecs << 12);
57 do { 53 do {
58 time = end < S390_lowcore.clock_comparator ? 54 time = end < S390_lowcore.clock_comparator ?
@@ -62,13 +58,37 @@ void __udelay(unsigned long usecs)
62 __load_psw_mask(mask); 58 __load_psw_mask(mask);
63 local_irq_disable(); 59 local_irq_disable();
64 } while (get_clock() < end); 60 } while (get_clock() < end);
61 set_clock_comparator(S390_lowcore.clock_comparator);
62}
65 63
66 if (raw_irqs_disabled_flags(flags)) { 64/*
67 __ctl_load(cr0, 0, 0); 65 * Waits for 'usecs' microseconds using the TOD clock comparator.
68 local_tick_enable(old_cc); 66 */
67void __udelay(unsigned long usecs)
68{
69 unsigned long flags;
70
71 preempt_disable();
72 local_irq_save(flags);
73 if (in_irq()) {
74 __udelay_disabled(usecs);
75 goto out;
76 }
77 if (in_softirq()) {
78 if (raw_irqs_disabled_flags(flags))
79 __udelay_disabled(usecs);
80 else
81 __udelay_enabled(usecs);
82 goto out;
69 } 83 }
70 if (!irq_context) 84 if (raw_irqs_disabled_flags(flags)) {
85 local_bh_disable();
86 __udelay_disabled(usecs);
71 _local_bh_enable(); 87 _local_bh_enable();
72 set_clock_comparator(S390_lowcore.clock_comparator); 88 goto out;
89 }
90 __udelay_enabled(usecs);
91out:
73 local_irq_restore(flags); 92 local_irq_restore(flags);
93 preempt_enable();
74} 94}
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index c481d45f97b7..f58c537446a8 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -241,7 +241,7 @@ static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
241 return of_bus_default_map(addr, range, na, ns, pna); 241 return of_bus_default_map(addr, range, na, ns, pna);
242} 242}
243 243
244static unsigned int of_bus_sbus_get_flags(const u32 *addr) 244static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
245{ 245{
246 return IORESOURCE_MEM; 246 return IORESOURCE_MEM;
247} 247}
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
index 20699c701412..8ce6285a06d5 100644
--- a/arch/sparc/kernel/ptrace.c
+++ b/arch/sparc/kernel/ptrace.c
@@ -288,7 +288,7 @@ static const struct user_regset sparc32_regsets[] = {
288 */ 288 */
289 [REGSET_GENERAL] = { 289 [REGSET_GENERAL] = {
290 .core_note_type = NT_PRSTATUS, 290 .core_note_type = NT_PRSTATUS,
291 .n = 38 * sizeof(u32), 291 .n = 38,
292 .size = sizeof(u32), .align = sizeof(u32), 292 .size = sizeof(u32), .align = sizeof(u32),
293 .get = genregs32_get, .set = genregs32_set 293 .get = genregs32_get, .set = genregs32_set
294 }, 294 },
@@ -304,7 +304,7 @@ static const struct user_regset sparc32_regsets[] = {
304 */ 304 */
305 [REGSET_FP] = { 305 [REGSET_FP] = {
306 .core_note_type = NT_PRFPREG, 306 .core_note_type = NT_PRFPREG,
307 .n = 99 * sizeof(u32), 307 .n = 99,
308 .size = sizeof(u32), .align = sizeof(u32), 308 .size = sizeof(u32), .align = sizeof(u32),
309 .get = fpregs32_get, .set = fpregs32_set 309 .get = fpregs32_get, .set = fpregs32_set
310 }, 310 },
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 23963882bc18..7495bc774685 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/linkage.h>
10#include <linux/ptrace.h> 11#include <linux/ptrace.h>
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/kernel_stat.h> 13#include <linux/kernel_stat.h>
@@ -866,7 +867,7 @@ static void kill_prom_timer(void)
866 : "g1", "g2"); 867 : "g1", "g2");
867} 868}
868 869
869void init_irqwork_curcpu(void) 870void notrace init_irqwork_curcpu(void)
870{ 871{
871 int cpu = hard_smp_processor_id(); 872 int cpu = hard_smp_processor_id();
872 873
@@ -897,7 +898,7 @@ static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type
897 } 898 }
898} 899}
899 900
900void __cpuinit sun4v_register_mondo_queues(int this_cpu) 901void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
901{ 902{
902 struct trap_per_cpu *tb = &trap_block[this_cpu]; 903 struct trap_per_cpu *tb = &trap_block[this_cpu];
903 904
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index f845f150f565..100ebd527499 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -169,7 +169,7 @@ static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long fla
169 169
170static int of_bus_pci_match(struct device_node *np) 170static int of_bus_pci_match(struct device_node *np)
171{ 171{
172 if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { 172 if (!strcmp(np->name, "pci")) {
173 const char *model = of_get_property(np, "model", NULL); 173 const char *model = of_get_property(np, "model", NULL);
174 174
175 if (model && !strcmp(model, "SUNW,simba")) 175 if (model && !strcmp(model, "SUNW,simba"))
@@ -200,7 +200,7 @@ static int of_bus_simba_match(struct device_node *np)
200 /* Treat PCI busses lacking ranges property just like 200 /* Treat PCI busses lacking ranges property just like
201 * simba. 201 * simba.
202 */ 202 */
203 if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { 203 if (!strcmp(np->name, "pci")) {
204 if (!of_find_property(np, "ranges", NULL)) 204 if (!of_find_property(np, "ranges", NULL))
205 return 1; 205 return 1;
206 } 206 }
@@ -429,7 +429,7 @@ static int __init use_1to1_mapping(struct device_node *pp)
429 * it lacks a ranges property, and this will include 429 * it lacks a ranges property, and this will include
430 * cases like Simba. 430 * cases like Simba.
431 */ 431 */
432 if (!strcmp(pp->type, "pci") || !strcmp(pp->type, "pciex")) 432 if (!strcmp(pp->name, "pci"))
433 return 0; 433 return 0;
434 434
435 return 1; 435 return 1;
@@ -714,8 +714,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
714 break; 714 break;
715 } 715 }
716 } else { 716 } else {
717 if (!strcmp(pp->type, "pci") || 717 if (!strcmp(pp->name, "pci")) {
718 !strcmp(pp->type, "pciex")) {
719 unsigned int this_orig_irq = irq; 718 unsigned int this_orig_irq = irq;
720 719
721 irq = pci_irq_swizzle(dp, pp, irq); 720 irq = pci_irq_swizzle(dp, pp, irq);
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 55096195458f..80dad76f8b81 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -425,7 +425,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
425 dev->current_state = 4; /* unknown power state */ 425 dev->current_state = 4; /* unknown power state */
426 dev->error_state = pci_channel_io_normal; 426 dev->error_state = pci_channel_io_normal;
427 427
428 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 428 if (!strcmp(node->name, "pci")) {
429 /* a PCI-PCI bridge */ 429 /* a PCI-PCI bridge */
430 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 430 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
431 dev->rom_base_reg = PCI_ROM_ADDRESS1; 431 dev->rom_base_reg = PCI_ROM_ADDRESS1;
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index ef5fe29202c2..f85b6bebb0be 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -575,7 +575,7 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
575{ 575{
576 unsigned long csr_reg, csr, csr_error_bits; 576 unsigned long csr_reg, csr, csr_error_bits;
577 irqreturn_t ret = IRQ_NONE; 577 irqreturn_t ret = IRQ_NONE;
578 u16 stat; 578 u16 stat, *addr;
579 579
580 if (is_pbm_a) { 580 if (is_pbm_a) {
581 csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL; 581 csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
@@ -597,7 +597,9 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
597 printk("%s: PCI SERR signal asserted.\n", pbm->name); 597 printk("%s: PCI SERR signal asserted.\n", pbm->name);
598 ret = IRQ_HANDLED; 598 ret = IRQ_HANDLED;
599 } 599 }
600 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); 600 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
601 0, PCI_STATUS);
602 pci_config_read16(addr, &stat);
601 if (stat & (PCI_STATUS_PARITY | 603 if (stat & (PCI_STATUS_PARITY |
602 PCI_STATUS_SIG_TARGET_ABORT | 604 PCI_STATUS_SIG_TARGET_ABORT |
603 PCI_STATUS_REC_TARGET_ABORT | 605 PCI_STATUS_REC_TARGET_ABORT |
@@ -605,7 +607,7 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
605 PCI_STATUS_SIG_SYSTEM_ERROR)) { 607 PCI_STATUS_SIG_SYSTEM_ERROR)) {
606 printk("%s: PCI bus error, PCI_STATUS[%04x]\n", 608 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
607 pbm->name, stat); 609 pbm->name, stat);
608 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); 610 pci_config_write16(addr, 0xffff);
609 ret = IRQ_HANDLED; 611 ret = IRQ_HANDLED;
610 } 612 }
611 return ret; 613 return ret;
@@ -744,16 +746,16 @@ static void psycho_register_error_handlers(struct pci_pbm_info *pbm)
744 * the second will just error out since we do not pass in 746 * the second will just error out since we do not pass in
745 * IRQF_SHARED. 747 * IRQF_SHARED.
746 */ 748 */
747 err = request_irq(op->irqs[1], psycho_ue_intr, 0, 749 err = request_irq(op->irqs[1], psycho_ue_intr, IRQF_SHARED,
748 "PSYCHO_UE", pbm); 750 "PSYCHO_UE", pbm);
749 err = request_irq(op->irqs[2], psycho_ce_intr, 0, 751 err = request_irq(op->irqs[2], psycho_ce_intr, IRQF_SHARED,
750 "PSYCHO_CE", pbm); 752 "PSYCHO_CE", pbm);
751 753
752 /* This one, however, ought not to fail. We can just warn 754 /* This one, however, ought not to fail. We can just warn
753 * about it since the system can still operate properly even 755 * about it since the system can still operate properly even
754 * if this fails. 756 * if this fails.
755 */ 757 */
756 err = request_irq(op->irqs[0], psycho_pcierr_intr, 0, 758 err = request_irq(op->irqs[0], psycho_pcierr_intr, IRQF_SHARED,
757 "PSYCHO_PCIERR", pbm); 759 "PSYCHO_PCIERR", pbm);
758 if (err) 760 if (err)
759 printk(KERN_WARNING "%s: Could not register PCIERR, " 761 printk(KERN_WARNING "%s: Could not register PCIERR, "
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 3c048ac4e638..7151513f156e 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -156,55 +156,11 @@ static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
156 return PSYCHO_IMAP_B_SLOT0 + (slot * 8); 156 return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
157} 157}
158 158
159#define PSYCHO_IMAP_SCSI 0x1000UL 159#define PSYCHO_OBIO_IMAP_BASE 0x1000UL
160#define PSYCHO_IMAP_ETH 0x1008UL 160
161#define PSYCHO_IMAP_BPP 0x1010UL
162#define PSYCHO_IMAP_AU_REC 0x1018UL
163#define PSYCHO_IMAP_AU_PLAY 0x1020UL
164#define PSYCHO_IMAP_PFAIL 0x1028UL
165#define PSYCHO_IMAP_KMS 0x1030UL
166#define PSYCHO_IMAP_FLPY 0x1038UL
167#define PSYCHO_IMAP_SHW 0x1040UL
168#define PSYCHO_IMAP_KBD 0x1048UL
169#define PSYCHO_IMAP_MS 0x1050UL
170#define PSYCHO_IMAP_SER 0x1058UL
171#define PSYCHO_IMAP_TIM0 0x1060UL
172#define PSYCHO_IMAP_TIM1 0x1068UL
173#define PSYCHO_IMAP_UE 0x1070UL
174#define PSYCHO_IMAP_CE 0x1078UL
175#define PSYCHO_IMAP_A_ERR 0x1080UL
176#define PSYCHO_IMAP_B_ERR 0x1088UL
177#define PSYCHO_IMAP_PMGMT 0x1090UL
178#define PSYCHO_IMAP_GFX 0x1098UL
179#define PSYCHO_IMAP_EUPA 0x10a0UL
180
181static unsigned long __psycho_onboard_imap_off[] = {
182/*0x20*/ PSYCHO_IMAP_SCSI,
183/*0x21*/ PSYCHO_IMAP_ETH,
184/*0x22*/ PSYCHO_IMAP_BPP,
185/*0x23*/ PSYCHO_IMAP_AU_REC,
186/*0x24*/ PSYCHO_IMAP_AU_PLAY,
187/*0x25*/ PSYCHO_IMAP_PFAIL,
188/*0x26*/ PSYCHO_IMAP_KMS,
189/*0x27*/ PSYCHO_IMAP_FLPY,
190/*0x28*/ PSYCHO_IMAP_SHW,
191/*0x29*/ PSYCHO_IMAP_KBD,
192/*0x2a*/ PSYCHO_IMAP_MS,
193/*0x2b*/ PSYCHO_IMAP_SER,
194/*0x2c*/ PSYCHO_IMAP_TIM0,
195/*0x2d*/ PSYCHO_IMAP_TIM1,
196/*0x2e*/ PSYCHO_IMAP_UE,
197/*0x2f*/ PSYCHO_IMAP_CE,
198/*0x30*/ PSYCHO_IMAP_A_ERR,
199/*0x31*/ PSYCHO_IMAP_B_ERR,
200/*0x32*/ PSYCHO_IMAP_PMGMT,
201/*0x33*/ PSYCHO_IMAP_GFX,
202/*0x34*/ PSYCHO_IMAP_EUPA,
203};
204#define PSYCHO_ONBOARD_IRQ_BASE 0x20 161#define PSYCHO_ONBOARD_IRQ_BASE 0x20
205#define PSYCHO_ONBOARD_IRQ_LAST 0x34
206#define psycho_onboard_imap_offset(__ino) \ 162#define psycho_onboard_imap_offset(__ino) \
207 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE] 163 (PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
208 164
209#define PSYCHO_ICLR_A_SLOT0 0x1400UL 165#define PSYCHO_ICLR_A_SLOT0 0x1400UL
210#define PSYCHO_ICLR_SCSI 0x1800UL 166#define PSYCHO_ICLR_SCSI 0x1800UL
@@ -228,10 +184,6 @@ static unsigned int psycho_irq_build(struct device_node *dp,
228 imap_off = psycho_pcislot_imap_offset(ino); 184 imap_off = psycho_pcislot_imap_offset(ino);
229 } else { 185 } else {
230 /* Onboard device */ 186 /* Onboard device */
231 if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
232 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
233 prom_halt();
234 }
235 imap_off = psycho_onboard_imap_offset(ino); 187 imap_off = psycho_onboard_imap_offset(ino);
236 } 188 }
237 189
@@ -318,23 +270,6 @@ static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
318 270
319#define SABRE_IMAP_A_SLOT0 0x0c00UL 271#define SABRE_IMAP_A_SLOT0 0x0c00UL
320#define SABRE_IMAP_B_SLOT0 0x0c20UL 272#define SABRE_IMAP_B_SLOT0 0x0c20UL
321#define SABRE_IMAP_SCSI 0x1000UL
322#define SABRE_IMAP_ETH 0x1008UL
323#define SABRE_IMAP_BPP 0x1010UL
324#define SABRE_IMAP_AU_REC 0x1018UL
325#define SABRE_IMAP_AU_PLAY 0x1020UL
326#define SABRE_IMAP_PFAIL 0x1028UL
327#define SABRE_IMAP_KMS 0x1030UL
328#define SABRE_IMAP_FLPY 0x1038UL
329#define SABRE_IMAP_SHW 0x1040UL
330#define SABRE_IMAP_KBD 0x1048UL
331#define SABRE_IMAP_MS 0x1050UL
332#define SABRE_IMAP_SER 0x1058UL
333#define SABRE_IMAP_UE 0x1070UL
334#define SABRE_IMAP_CE 0x1078UL
335#define SABRE_IMAP_PCIERR 0x1080UL
336#define SABRE_IMAP_GFX 0x1098UL
337#define SABRE_IMAP_EUPA 0x10a0UL
338#define SABRE_ICLR_A_SLOT0 0x1400UL 273#define SABRE_ICLR_A_SLOT0 0x1400UL
339#define SABRE_ICLR_B_SLOT0 0x1480UL 274#define SABRE_ICLR_B_SLOT0 0x1480UL
340#define SABRE_ICLR_SCSI 0x1800UL 275#define SABRE_ICLR_SCSI 0x1800UL
@@ -364,33 +299,10 @@ static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
364 return SABRE_IMAP_B_SLOT0 + (slot * 8); 299 return SABRE_IMAP_B_SLOT0 + (slot * 8);
365} 300}
366 301
367static unsigned long __sabre_onboard_imap_off[] = { 302#define SABRE_OBIO_IMAP_BASE 0x1000UL
368/*0x20*/ SABRE_IMAP_SCSI, 303#define SABRE_ONBOARD_IRQ_BASE 0x20
369/*0x21*/ SABRE_IMAP_ETH,
370/*0x22*/ SABRE_IMAP_BPP,
371/*0x23*/ SABRE_IMAP_AU_REC,
372/*0x24*/ SABRE_IMAP_AU_PLAY,
373/*0x25*/ SABRE_IMAP_PFAIL,
374/*0x26*/ SABRE_IMAP_KMS,
375/*0x27*/ SABRE_IMAP_FLPY,
376/*0x28*/ SABRE_IMAP_SHW,
377/*0x29*/ SABRE_IMAP_KBD,
378/*0x2a*/ SABRE_IMAP_MS,
379/*0x2b*/ SABRE_IMAP_SER,
380/*0x2c*/ 0 /* reserved */,
381/*0x2d*/ 0 /* reserved */,
382/*0x2e*/ SABRE_IMAP_UE,
383/*0x2f*/ SABRE_IMAP_CE,
384/*0x30*/ SABRE_IMAP_PCIERR,
385/*0x31*/ 0 /* reserved */,
386/*0x32*/ 0 /* reserved */,
387/*0x33*/ SABRE_IMAP_GFX,
388/*0x34*/ SABRE_IMAP_EUPA,
389};
390#define SABRE_ONBOARD_IRQ_BASE 0x20
391#define SABRE_ONBOARD_IRQ_LAST 0x30
392#define sabre_onboard_imap_offset(__ino) \ 304#define sabre_onboard_imap_offset(__ino) \
393 __sabre_onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE] 305 (SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
394 306
395#define sabre_iclr_offset(ino) \ 307#define sabre_iclr_offset(ino) \
396 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ 308 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
@@ -453,10 +365,6 @@ static unsigned int sabre_irq_build(struct device_node *dp,
453 imap_off = sabre_pcislot_imap_offset(ino); 365 imap_off = sabre_pcislot_imap_offset(ino);
454 } else { 366 } else {
455 /* onboard device */ 367 /* onboard device */
456 if (ino > SABRE_ONBOARD_IRQ_LAST) {
457 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
458 prom_halt();
459 }
460 imap_off = sabre_onboard_imap_offset(ino); 368 imap_off = sabre_onboard_imap_offset(ino);
461 } 369 }
462 370
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index bd578cc4856d..10306e476e38 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -443,7 +443,7 @@ static const struct user_regset sparc64_regsets[] = {
443 */ 443 */
444 [REGSET_GENERAL] = { 444 [REGSET_GENERAL] = {
445 .core_note_type = NT_PRSTATUS, 445 .core_note_type = NT_PRSTATUS,
446 .n = 36 * sizeof(u64), 446 .n = 36,
447 .size = sizeof(u64), .align = sizeof(u64), 447 .size = sizeof(u64), .align = sizeof(u64),
448 .get = genregs64_get, .set = genregs64_set 448 .get = genregs64_get, .set = genregs64_set
449 }, 449 },
@@ -455,7 +455,7 @@ static const struct user_regset sparc64_regsets[] = {
455 */ 455 */
456 [REGSET_FP] = { 456 [REGSET_FP] = {
457 .core_note_type = NT_PRFPREG, 457 .core_note_type = NT_PRFPREG,
458 .n = 35 * sizeof(u64), 458 .n = 35,
459 .size = sizeof(u64), .align = sizeof(u64), 459 .size = sizeof(u64), .align = sizeof(u64),
460 .get = fpregs64_get, .set = fpregs64_set 460 .get = fpregs64_get, .set = fpregs64_set
461 }, 461 },
@@ -801,7 +801,7 @@ static const struct user_regset sparc32_regsets[] = {
801 */ 801 */
802 [REGSET_GENERAL] = { 802 [REGSET_GENERAL] = {
803 .core_note_type = NT_PRSTATUS, 803 .core_note_type = NT_PRSTATUS,
804 .n = 38 * sizeof(u32), 804 .n = 38,
805 .size = sizeof(u32), .align = sizeof(u32), 805 .size = sizeof(u32), .align = sizeof(u32),
806 .get = genregs32_get, .set = genregs32_set 806 .get = genregs32_get, .set = genregs32_set
807 }, 807 },
@@ -817,7 +817,7 @@ static const struct user_regset sparc32_regsets[] = {
817 */ 817 */
818 [REGSET_FP] = { 818 [REGSET_FP] = {
819 .core_note_type = NT_PRFPREG, 819 .core_note_type = NT_PRFPREG,
820 .n = 99 * sizeof(u32), 820 .n = 99,
821 .size = sizeof(u32), .align = sizeof(u32), 821 .size = sizeof(u32), .align = sizeof(u32),
822 .get = fpregs32_get, .set = fpregs32_set 822 .get = fpregs32_get, .set = fpregs32_set
823 }, 823 },
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 3d924121c796..c824df13f589 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/linkage.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/signal.h> 15#include <linux/signal.h>
15#include <linux/smp.h> 16#include <linux/smp.h>
@@ -2453,7 +2454,7 @@ struct trap_per_cpu trap_block[NR_CPUS];
2453/* This can get invoked before sched_init() so play it super safe 2454/* This can get invoked before sched_init() so play it super safe
2454 * and use hard_smp_processor_id(). 2455 * and use hard_smp_processor_id().
2455 */ 2456 */
2456void init_cur_cpu_trap(struct thread_info *t) 2457void notrace init_cur_cpu_trap(struct thread_info *t)
2457{ 2458{
2458 int cpu = hard_smp_processor_id(); 2459 int cpu = hard_smp_processor_id();
2459 struct trap_per_cpu *p = &trap_block[cpu]; 2460 struct trap_per_cpu *p = &trap_block[cpu];
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 21ef9dd36187..44d4f2130d01 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,7 @@ config X86
29 select HAVE_FTRACE 29 select HAVE_FTRACE
30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
31 select HAVE_ARCH_KGDB if !X86_VOYAGER 31 select HAVE_ARCH_KGDB if !X86_VOYAGER
32 select HAVE_ARCH_TRACEHOOK
32 select HAVE_GENERIC_DMA_COHERENT if X86_32 33 select HAVE_GENERIC_DMA_COHERENT if X86_32
33 select HAVE_EFFICIENT_UNALIGNED_ACCESS 34 select HAVE_EFFICIENT_UNALIGNED_ACCESS
34 35
@@ -553,6 +554,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
553config AMD_IOMMU 554config AMD_IOMMU
554 bool "AMD IOMMU support" 555 bool "AMD IOMMU support"
555 select SWIOTLB 556 select SWIOTLB
557 select PCI_MSI
556 depends on X86_64 && PCI && ACPI 558 depends on X86_64 && PCI && ACPI
557 help 559 help
558 With this option you can enable support for AMD IOMMU hardware in 560 With this option you can enable support for AMD IOMMU hardware in
@@ -1020,7 +1022,7 @@ config HAVE_ARCH_ALLOC_REMAP
1020 1022
1021config ARCH_FLATMEM_ENABLE 1023config ARCH_FLATMEM_ENABLE
1022 def_bool y 1024 def_bool y
1023 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA 1025 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA
1024 1026
1025config ARCH_DISCONTIGMEM_ENABLE 1027config ARCH_DISCONTIGMEM_ENABLE
1026 def_bool y 1028 def_bool y
@@ -1036,7 +1038,7 @@ config ARCH_SPARSEMEM_DEFAULT
1036 1038
1037config ARCH_SPARSEMEM_ENABLE 1039config ARCH_SPARSEMEM_ENABLE
1038 def_bool y 1040 def_bool y
1039 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) 1041 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
1040 select SPARSEMEM_STATIC if X86_32 1042 select SPARSEMEM_STATIC if X86_32
1041 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1043 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1042 1044
@@ -1117,10 +1119,10 @@ config MTRR
1117 You can safely say Y even if your machine doesn't have MTRRs, you'll 1119 You can safely say Y even if your machine doesn't have MTRRs, you'll
1118 just add about 9 KB to your kernel. 1120 just add about 9 KB to your kernel.
1119 1121
1120 See <file:Documentation/mtrr.txt> for more information. 1122 See <file:Documentation/x86/mtrr.txt> for more information.
1121 1123
1122config MTRR_SANITIZER 1124config MTRR_SANITIZER
1123 bool 1125 def_bool y
1124 prompt "MTRR cleanup support" 1126 prompt "MTRR cleanup support"
1125 depends on MTRR 1127 depends on MTRR
1126 help 1128 help
@@ -1131,7 +1133,7 @@ config MTRR_SANITIZER
1131 The largest mtrr entry size for a continous block can be set with 1133 The largest mtrr entry size for a continous block can be set with
1132 mtrr_chunk_size. 1134 mtrr_chunk_size.
1133 1135
1134 If unsure, say N. 1136 If unsure, say Y.
1135 1137
1136config MTRR_SANITIZER_ENABLE_DEFAULT 1138config MTRR_SANITIZER_ENABLE_DEFAULT
1137 int "MTRR cleanup enable value (0-1)" 1139 int "MTRR cleanup enable value (0-1)"
@@ -1191,7 +1193,6 @@ config IRQBALANCE
1191config SECCOMP 1193config SECCOMP
1192 def_bool y 1194 def_bool y
1193 prompt "Enable seccomp to safely compute untrusted bytecode" 1195 prompt "Enable seccomp to safely compute untrusted bytecode"
1194 depends on PROC_FS
1195 help 1196 help
1196 This kernel feature is useful for number crunching applications 1197 This kernel feature is useful for number crunching applications
1197 that may need to compute untrusted bytecode during their 1198 that may need to compute untrusted bytecode during their
@@ -1199,7 +1200,7 @@ config SECCOMP
1199 the process as file descriptors supporting the read/write 1200 the process as file descriptors supporting the read/write
1200 syscalls, it's possible to isolate those applications in 1201 syscalls, it's possible to isolate those applications in
1201 their own address space using seccomp. Once seccomp is 1202 their own address space using seccomp. Once seccomp is
1202 enabled via /proc/<pid>/seccomp, it cannot be disabled 1203 enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
1203 and the task is only allowed to execute a few safe syscalls 1204 and the task is only allowed to execute a few safe syscalls
1204 defined by each seccomp mode. 1205 defined by each seccomp mode.
1205 1206
@@ -1356,14 +1357,14 @@ config PHYSICAL_ALIGN
1356 Don't change this unless you know what you are doing. 1357 Don't change this unless you know what you are doing.
1357 1358
1358config HOTPLUG_CPU 1359config HOTPLUG_CPU
1359 bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)" 1360 bool "Support for hot-pluggable CPUs"
1360 depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER 1361 depends on SMP && HOTPLUG && !X86_VOYAGER
1361 ---help--- 1362 ---help---
1362 Say Y here to experiment with turning CPUs off and on, and to 1363 Say Y here to allow turning CPUs off and on. CPUs can be
1363 enable suspend on SMP systems. CPUs can be controlled through 1364 controlled through /sys/devices/system/cpu.
1364 /sys/devices/system/cpu. 1365 ( Note: power management support will enable this option
1365 Say N if you want to disable CPU hotplug and don't need to 1366 automatically on SMP systems. )
1366 suspend. 1367 Say N if you want to disable CPU hotplug.
1367 1368
1368config COMPAT_VDSO 1369config COMPAT_VDSO
1369 def_bool y 1370 def_bool y
@@ -1378,6 +1379,51 @@ config COMPAT_VDSO
1378 1379
1379 If unsure, say Y. 1380 If unsure, say Y.
1380 1381
1382config CMDLINE_BOOL
1383 bool "Built-in kernel command line"
1384 default n
1385 help
1386 Allow for specifying boot arguments to the kernel at
1387 build time. On some systems (e.g. embedded ones), it is
1388 necessary or convenient to provide some or all of the
1389 kernel boot arguments with the kernel itself (that is,
1390 to not rely on the boot loader to provide them.)
1391
1392 To compile command line arguments into the kernel,
1393 set this option to 'Y', then fill in the
1394 the boot arguments in CONFIG_CMDLINE.
1395
1396 Systems with fully functional boot loaders (i.e. non-embedded)
1397 should leave this option set to 'N'.
1398
1399config CMDLINE
1400 string "Built-in kernel command string"
1401 depends on CMDLINE_BOOL
1402 default ""
1403 help
1404 Enter arguments here that should be compiled into the kernel
1405 image and used at boot time. If the boot loader provides a
1406 command line at boot time, it is appended to this string to
1407 form the full kernel command line, when the system boots.
1408
1409 However, you can use the CONFIG_CMDLINE_OVERRIDE option to
1410 change this behavior.
1411
1412 In most cases, the command line (whether built-in or provided
1413 by the boot loader) should specify the device for the root
1414 file system.
1415
1416config CMDLINE_OVERRIDE
1417 bool "Built-in command line overrides boot loader arguments"
1418 default n
1419 depends on CMDLINE_BOOL
1420 help
1421 Set this option to 'Y' to have the kernel ignore the boot loader
1422 command line, and use ONLY the built-in command line.
1423
1424 This is used to work around broken boot loaders. This should
1425 be set to 'N' under normal conditions.
1426
1381endmenu 1427endmenu
1382 1428
1383config ARCH_ENABLE_MEMORY_HOTPLUG 1429config ARCH_ENABLE_MEMORY_HOTPLUG
@@ -1781,7 +1827,7 @@ config COMPAT_FOR_U64_ALIGNMENT
1781 1827
1782config SYSVIPC_COMPAT 1828config SYSVIPC_COMPAT
1783 def_bool y 1829 def_bool y
1784 depends on X86_64 && COMPAT && SYSVIPC 1830 depends on COMPAT && SYSVIPC
1785 1831
1786endmenu 1832endmenu
1787 1833
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index ba7736cf2ec7..29c5fbf08392 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -137,14 +137,15 @@ relocated:
137 */ 137 */
138 movl output_len(%ebx), %eax 138 movl output_len(%ebx), %eax
139 pushl %eax 139 pushl %eax
140 # push arguments for decompress_kernel:
140 pushl %ebp # output address 141 pushl %ebp # output address
141 movl input_len(%ebx), %eax 142 movl input_len(%ebx), %eax
142 pushl %eax # input_len 143 pushl %eax # input_len
143 leal input_data(%ebx), %eax 144 leal input_data(%ebx), %eax
144 pushl %eax # input_data 145 pushl %eax # input_data
145 leal boot_heap(%ebx), %eax 146 leal boot_heap(%ebx), %eax
146 pushl %eax # heap area as third argument 147 pushl %eax # heap area
147 pushl %esi # real mode pointer as second arg 148 pushl %esi # real mode pointer
148 call decompress_kernel 149 call decompress_kernel
149 addl $20, %esp 150 addl $20, %esp
150 popl %ecx 151 popl %ecx
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index aaf5a2131efc..5780d361105b 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -27,7 +27,7 @@
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <linux/screen_info.h> 28#include <linux/screen_info.h>
29#include <linux/elf.h> 29#include <linux/elf.h>
30#include <asm/io.h> 30#include <linux/io.h>
31#include <asm/page.h> 31#include <asm/page.h>
32#include <asm/boot.h> 32#include <asm/boot.h>
33#include <asm/bootparam.h> 33#include <asm/bootparam.h>
@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s)
251 y--; 251 y--;
252 } 252 }
253 } else { 253 } else {
254 vidmem [(x + cols * y) * 2] = c; 254 vidmem[(x + cols * y) * 2] = c;
255 if (++x >= cols) { 255 if (++x >= cols) {
256 x = 0; 256 x = 0;
257 if (++y >= lines) { 257 if (++y >= lines) {
@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n)
277 int i; 277 int i;
278 char *ss = s; 278 char *ss = s;
279 279
280 for (i = 0; i < n; i++) ss[i] = c; 280 for (i = 0; i < n; i++)
281 ss[i] = c;
281 return s; 282 return s;
282} 283}
283 284
@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n)
287 const char *s = src; 288 const char *s = src;
288 char *d = dest; 289 char *d = dest;
289 290
290 for (i = 0; i < n; i++) d[i] = s[i]; 291 for (i = 0; i < n; i++)
292 d[i] = s[i];
291 return dest; 293 return dest;
292} 294}
293 295
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index a1310c52fc0c..857e492c571e 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
492 continue; 492 continue;
493 } 493 }
494 sh_symtab = sec_symtab->symtab; 494 sh_symtab = sec_symtab->symtab;
495 sym_strtab = sec->link->strtab; 495 sym_strtab = sec_symtab->link->strtab;
496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
497 Elf32_Rel *rel; 497 Elf32_Rel *rel;
498 Elf32_Sym *sym; 498 Elf32_Sym *sym;
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index af86e431acfa..b993062e9a5f 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -30,7 +30,6 @@ SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */
30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */ 30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */
31 /* to be loaded */ 31 /* to be loaded */
32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */ 32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */
33SWAP_DEV = 0 /* SWAP_DEV is now written by "build" */
34 33
35#ifndef SVGA_MODE 34#ifndef SVGA_MODE
36#define SVGA_MODE ASK_VGA 35#define SVGA_MODE ASK_VGA
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 104275e191a8..ef9a52005ec9 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc4 3# Linux kernel version: 2.6.27-rc5
4# Mon Aug 25 15:04:00 2008 4# Wed Sep 3 17:23:09 2008
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -202,7 +202,7 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
202# CONFIG_M586 is not set 202# CONFIG_M586 is not set
203# CONFIG_M586TSC is not set 203# CONFIG_M586TSC is not set
204# CONFIG_M586MMX is not set 204# CONFIG_M586MMX is not set
205# CONFIG_M686 is not set 205CONFIG_M686=y
206# CONFIG_MPENTIUMII is not set 206# CONFIG_MPENTIUMII is not set
207# CONFIG_MPENTIUMIII is not set 207# CONFIG_MPENTIUMIII is not set
208# CONFIG_MPENTIUMM is not set 208# CONFIG_MPENTIUMM is not set
@@ -221,13 +221,14 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
221# CONFIG_MVIAC3_2 is not set 221# CONFIG_MVIAC3_2 is not set
222# CONFIG_MVIAC7 is not set 222# CONFIG_MVIAC7 is not set
223# CONFIG_MPSC is not set 223# CONFIG_MPSC is not set
224CONFIG_MCORE2=y 224# CONFIG_MCORE2 is not set
225# CONFIG_GENERIC_CPU is not set 225# CONFIG_GENERIC_CPU is not set
226CONFIG_X86_GENERIC=y 226CONFIG_X86_GENERIC=y
227CONFIG_X86_CPU=y 227CONFIG_X86_CPU=y
228CONFIG_X86_CMPXCHG=y 228CONFIG_X86_CMPXCHG=y
229CONFIG_X86_L1_CACHE_SHIFT=7 229CONFIG_X86_L1_CACHE_SHIFT=7
230CONFIG_X86_XADD=y 230CONFIG_X86_XADD=y
231# CONFIG_X86_PPRO_FENCE is not set
231CONFIG_X86_WP_WORKS_OK=y 232CONFIG_X86_WP_WORKS_OK=y
232CONFIG_X86_INVLPG=y 233CONFIG_X86_INVLPG=y
233CONFIG_X86_BSWAP=y 234CONFIG_X86_BSWAP=y
@@ -235,14 +236,15 @@ CONFIG_X86_POPAD_OK=y
235CONFIG_X86_INTEL_USERCOPY=y 236CONFIG_X86_INTEL_USERCOPY=y
236CONFIG_X86_USE_PPRO_CHECKSUM=y 237CONFIG_X86_USE_PPRO_CHECKSUM=y
237CONFIG_X86_TSC=y 238CONFIG_X86_TSC=y
239CONFIG_X86_CMOV=y
238CONFIG_X86_MINIMUM_CPU_FAMILY=4 240CONFIG_X86_MINIMUM_CPU_FAMILY=4
239CONFIG_X86_DEBUGCTLMSR=y 241CONFIG_X86_DEBUGCTLMSR=y
240CONFIG_HPET_TIMER=y 242CONFIG_HPET_TIMER=y
241CONFIG_HPET_EMULATE_RTC=y 243CONFIG_HPET_EMULATE_RTC=y
242CONFIG_DMI=y 244CONFIG_DMI=y
243# CONFIG_IOMMU_HELPER is not set 245# CONFIG_IOMMU_HELPER is not set
244CONFIG_NR_CPUS=4 246CONFIG_NR_CPUS=64
245# CONFIG_SCHED_SMT is not set 247CONFIG_SCHED_SMT=y
246CONFIG_SCHED_MC=y 248CONFIG_SCHED_MC=y
247# CONFIG_PREEMPT_NONE is not set 249# CONFIG_PREEMPT_NONE is not set
248CONFIG_PREEMPT_VOLUNTARY=y 250CONFIG_PREEMPT_VOLUNTARY=y
@@ -254,7 +256,8 @@ CONFIG_VM86=y
254# CONFIG_TOSHIBA is not set 256# CONFIG_TOSHIBA is not set
255# CONFIG_I8K is not set 257# CONFIG_I8K is not set
256CONFIG_X86_REBOOTFIXUPS=y 258CONFIG_X86_REBOOTFIXUPS=y
257# CONFIG_MICROCODE is not set 259CONFIG_MICROCODE=y
260CONFIG_MICROCODE_OLD_INTERFACE=y
258CONFIG_X86_MSR=y 261CONFIG_X86_MSR=y
259CONFIG_X86_CPUID=y 262CONFIG_X86_CPUID=y
260# CONFIG_NOHIGHMEM is not set 263# CONFIG_NOHIGHMEM is not set
@@ -2115,7 +2118,7 @@ CONFIG_IO_DELAY_0X80=y
2115CONFIG_DEFAULT_IO_DELAY_TYPE=0 2118CONFIG_DEFAULT_IO_DELAY_TYPE=0
2116CONFIG_DEBUG_BOOT_PARAMS=y 2119CONFIG_DEBUG_BOOT_PARAMS=y
2117# CONFIG_CPA_DEBUG is not set 2120# CONFIG_CPA_DEBUG is not set
2118# CONFIG_OPTIMIZE_INLINING is not set 2121CONFIG_OPTIMIZE_INLINING=y
2119 2122
2120# 2123#
2121# Security options 2124# Security options
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 678c8acefe04..e620ea6e2a7a 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc4 3# Linux kernel version: 2.6.27-rc5
4# Mon Aug 25 14:40:46 2008 4# Wed Sep 3 17:13:39 2008
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -218,17 +218,14 @@ CONFIG_X86_PC=y
218# CONFIG_MVIAC3_2 is not set 218# CONFIG_MVIAC3_2 is not set
219# CONFIG_MVIAC7 is not set 219# CONFIG_MVIAC7 is not set
220# CONFIG_MPSC is not set 220# CONFIG_MPSC is not set
221CONFIG_MCORE2=y 221# CONFIG_MCORE2 is not set
222# CONFIG_GENERIC_CPU is not set 222CONFIG_GENERIC_CPU=y
223CONFIG_X86_CPU=y 223CONFIG_X86_CPU=y
224CONFIG_X86_L1_CACHE_BYTES=64 224CONFIG_X86_L1_CACHE_BYTES=128
225CONFIG_X86_INTERNODE_CACHE_BYTES=64 225CONFIG_X86_INTERNODE_CACHE_BYTES=128
226CONFIG_X86_CMPXCHG=y 226CONFIG_X86_CMPXCHG=y
227CONFIG_X86_L1_CACHE_SHIFT=6 227CONFIG_X86_L1_CACHE_SHIFT=7
228CONFIG_X86_WP_WORKS_OK=y 228CONFIG_X86_WP_WORKS_OK=y
229CONFIG_X86_INTEL_USERCOPY=y
230CONFIG_X86_USE_PPRO_CHECKSUM=y
231CONFIG_X86_P6_NOP=y
232CONFIG_X86_TSC=y 229CONFIG_X86_TSC=y
233CONFIG_X86_CMPXCHG64=y 230CONFIG_X86_CMPXCHG64=y
234CONFIG_X86_CMOV=y 231CONFIG_X86_CMOV=y
@@ -243,9 +240,8 @@ CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
243CONFIG_AMD_IOMMU=y 240CONFIG_AMD_IOMMU=y
244CONFIG_SWIOTLB=y 241CONFIG_SWIOTLB=y
245CONFIG_IOMMU_HELPER=y 242CONFIG_IOMMU_HELPER=y
246# CONFIG_MAXSMP is not set 243CONFIG_NR_CPUS=64
247CONFIG_NR_CPUS=4 244CONFIG_SCHED_SMT=y
248# CONFIG_SCHED_SMT is not set
249CONFIG_SCHED_MC=y 245CONFIG_SCHED_MC=y
250# CONFIG_PREEMPT_NONE is not set 246# CONFIG_PREEMPT_NONE is not set
251CONFIG_PREEMPT_VOLUNTARY=y 247CONFIG_PREEMPT_VOLUNTARY=y
@@ -254,7 +250,8 @@ CONFIG_X86_LOCAL_APIC=y
254CONFIG_X86_IO_APIC=y 250CONFIG_X86_IO_APIC=y
255# CONFIG_X86_MCE is not set 251# CONFIG_X86_MCE is not set
256# CONFIG_I8K is not set 252# CONFIG_I8K is not set
257# CONFIG_MICROCODE is not set 253CONFIG_MICROCODE=y
254CONFIG_MICROCODE_OLD_INTERFACE=y
258CONFIG_X86_MSR=y 255CONFIG_X86_MSR=y
259CONFIG_X86_CPUID=y 256CONFIG_X86_CPUID=y
260CONFIG_NUMA=y 257CONFIG_NUMA=y
@@ -290,7 +287,7 @@ CONFIG_BOUNCE=y
290CONFIG_VIRT_TO_BUS=y 287CONFIG_VIRT_TO_BUS=y
291CONFIG_MTRR=y 288CONFIG_MTRR=y
292# CONFIG_MTRR_SANITIZER is not set 289# CONFIG_MTRR_SANITIZER is not set
293# CONFIG_X86_PAT is not set 290CONFIG_X86_PAT=y
294CONFIG_EFI=y 291CONFIG_EFI=y
295CONFIG_SECCOMP=y 292CONFIG_SECCOMP=y
296# CONFIG_HZ_100 is not set 293# CONFIG_HZ_100 is not set
@@ -2089,7 +2086,7 @@ CONFIG_IO_DELAY_0X80=y
2089CONFIG_DEFAULT_IO_DELAY_TYPE=0 2086CONFIG_DEFAULT_IO_DELAY_TYPE=0
2090CONFIG_DEBUG_BOOT_PARAMS=y 2087CONFIG_DEBUG_BOOT_PARAMS=y
2091# CONFIG_CPA_DEBUG is not set 2088# CONFIG_CPA_DEBUG is not set
2092# CONFIG_OPTIMIZE_INLINING is not set 2089CONFIG_OPTIMIZE_INLINING=y
2093 2090
2094# 2091#
2095# Security options 2092# Security options
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a0e1dbe67dc1..127ec3f07214 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
85 dump->regs.ax = regs->ax; 85 dump->regs.ax = regs->ax;
86 dump->regs.ds = current->thread.ds; 86 dump->regs.ds = current->thread.ds;
87 dump->regs.es = current->thread.es; 87 dump->regs.es = current->thread.es;
88 asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs; 88 savesegment(fs, fs);
89 asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; 89 dump->regs.fs = fs;
90 savesegment(gs, gs);
91 dump->regs.gs = gs;
90 dump->regs.orig_ax = regs->orig_ax; 92 dump->regs.orig_ax = regs->orig_ax;
91 dump->regs.ip = regs->ip; 93 dump->regs.ip = regs->ip;
92 dump->regs.cs = regs->cs; 94 dump->regs.cs = regs->cs;
@@ -430,8 +432,9 @@ beyond_if:
430 current->mm->start_stack = 432 current->mm->start_stack =
431 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); 433 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
432 /* start thread */ 434 /* start thread */
433 asm volatile("movl %0,%%fs" :: "r" (0)); \ 435 loadsegment(fs, 0);
434 asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); 436 loadsegment(ds, __USER32_DS);
437 loadsegment(es, __USER32_DS);
435 load_gs_index(0); 438 load_gs_index(0);
436 (regs)->ip = ex.a_entry; 439 (regs)->ip = ex.a_entry;
437 (regs)->sp = current->mm->start_stack; 440 (regs)->sp = current->mm->start_stack;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index f25a10124005..8d64c1bc8474 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -207,7 +207,7 @@ struct rt_sigframe
207 { unsigned int cur; \ 207 { unsigned int cur; \
208 unsigned short pre; \ 208 unsigned short pre; \
209 err |= __get_user(pre, &sc->seg); \ 209 err |= __get_user(pre, &sc->seg); \
210 asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \ 210 savesegment(seg, cur); \
211 pre |= mask; \ 211 pre |= mask; \
212 if (pre != cur) loadsegment(seg, pre); } 212 if (pre != cur) loadsegment(seg, pre); }
213 213
@@ -236,7 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
236 */ 236 */
237 err |= __get_user(gs, &sc->gs); 237 err |= __get_user(gs, &sc->gs);
238 gs |= 3; 238 gs |= 3;
239 asm("movl %%gs,%0" : "=r" (oldgs)); 239 savesegment(gs, oldgs);
240 if (gs != oldgs) 240 if (gs != oldgs)
241 load_gs_index(gs); 241 load_gs_index(gs);
242 242
@@ -342,14 +342,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
342{ 342{
343 int tmp, err = 0; 343 int tmp, err = 0;
344 344
345 tmp = 0; 345 savesegment(gs, tmp);
346 __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
347 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 346 err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
348 __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); 347 savesegment(fs, tmp);
349 err |= __put_user(tmp, (unsigned int __user *)&sc->fs); 348 err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
350 __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp)); 349 savesegment(ds, tmp);
351 err |= __put_user(tmp, (unsigned int __user *)&sc->ds); 350 err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
352 __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp)); 351 savesegment(es, tmp);
353 err |= __put_user(tmp, (unsigned int __user *)&sc->es); 352 err |= __put_user(tmp, (unsigned int __user *)&sc->es);
354 353
355 err |= __put_user((u32)regs->di, &sc->di); 354 err |= __put_user((u32)regs->di, &sc->di);
@@ -491,8 +490,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
491 regs->dx = 0; 490 regs->dx = 0;
492 regs->cx = 0; 491 regs->cx = 0;
493 492
494 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 493 loadsegment(ds, __USER32_DS);
495 asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 494 loadsegment(es, __USER32_DS);
496 495
497 regs->cs = __USER32_CS; 496 regs->cs = __USER32_CS;
498 regs->ss = __USER32_DS; 497 regs->ss = __USER32_DS;
@@ -588,8 +587,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
588 regs->dx = (unsigned long) &frame->info; 587 regs->dx = (unsigned long) &frame->info;
589 regs->cx = (unsigned long) &frame->uc; 588 regs->cx = (unsigned long) &frame->uc;
590 589
591 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 590 loadsegment(ds, __USER32_DS);
592 asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 591 loadsegment(es, __USER32_DS);
593 592
594 regs->cs = __USER32_CS; 593 regs->cs = __USER32_CS;
595 regs->ss = __USER32_DS; 594 regs->ss = __USER32_DS;
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index d3c64088b981..beda4232ce69 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -556,15 +556,6 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
556 return ret; 556 return ret;
557} 557}
558 558
559/* These are here just in case some old ia32 binary calls it. */
560asmlinkage long sys32_pause(void)
561{
562 current->state = TASK_INTERRUPTIBLE;
563 schedule();
564 return -ERESTARTNOHAND;
565}
566
567
568#ifdef CONFIG_SYSCTL_SYSCALL 559#ifdef CONFIG_SYSCTL_SYSCALL
569struct sysctl_ia32 { 560struct sysctl_ia32 {
570 unsigned int name; 561 unsigned int name;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 27ef365e757d..c2ac1b4515a0 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled);
58#ifdef CONFIG_X86_64 58#ifdef CONFIG_X86_64
59 59
60#include <asm/proto.h> 60#include <asm/proto.h>
61#include <asm/genapic.h>
62 61
63#else /* X86 */ 62#else /* X86 */
64 63
@@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
97#warning ACPI uses CMPXCHG, i486 and later hardware 96#warning ACPI uses CMPXCHG, i486 and later hardware
98#endif 97#endif
99 98
100static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
101
102/* -------------------------------------------------------------------------- 99/* --------------------------------------------------------------------------
103 Boot-time Configuration 100 Boot-time Configuration
104 -------------------------------------------------------------------------- */ 101 -------------------------------------------------------------------------- */
@@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
160struct acpi_mcfg_allocation *pci_mmcfg_config; 157struct acpi_mcfg_allocation *pci_mmcfg_config;
161int pci_mmcfg_config_num; 158int pci_mmcfg_config_num;
162 159
160static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
161
163static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) 162static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
164{ 163{
165 if (!strcmp(mcfg->header.oem_id, "SGI")) 164 if (!strcmp(mcfg->header.oem_id, "SGI"))
@@ -253,10 +252,8 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
253 return; 252 return;
254 } 253 }
255 254
256#ifdef CONFIG_X86_32
257 if (boot_cpu_physical_apicid != -1U) 255 if (boot_cpu_physical_apicid != -1U)
258 ver = apic_version[boot_cpu_physical_apicid]; 256 ver = apic_version[boot_cpu_physical_apicid];
259#endif
260 257
261 generic_processor_info(id, ver); 258 generic_processor_info(id, ver);
262} 259}
@@ -776,10 +773,8 @@ static void __init acpi_register_lapic_address(unsigned long address)
776 set_fixmap_nocache(FIX_APIC_BASE, address); 773 set_fixmap_nocache(FIX_APIC_BASE, address);
777 if (boot_cpu_physical_apicid == -1U) { 774 if (boot_cpu_physical_apicid == -1U) {
778 boot_cpu_physical_apicid = read_apic_id(); 775 boot_cpu_physical_apicid = read_apic_id();
779#ifdef CONFIG_X86_32
780 apic_version[boot_cpu_physical_apicid] = 776 apic_version[boot_cpu_physical_apicid] =
781 GET_APIC_VERSION(apic_read(APIC_LVR)); 777 GET_APIC_VERSION(apic_read(APIC_LVR));
782#endif
783 } 778 }
784} 779}
785 780
@@ -1607,6 +1602,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1607 */ 1602 */
1608 { 1603 {
1609 .callback = dmi_ignore_irq0_timer_override, 1604 .callback = dmi_ignore_irq0_timer_override,
1605 .ident = "HP nx6115 laptop",
1606 .matches = {
1607 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1608 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
1609 },
1610 },
1611 {
1612 .callback = dmi_ignore_irq0_timer_override,
1610 .ident = "HP NX6125 laptop", 1613 .ident = "HP NX6125 laptop",
1611 .matches = { 1614 .matches = {
1612 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1615 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -1621,6 +1624,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1621 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), 1624 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
1622 }, 1625 },
1623 }, 1626 },
1627 {
1628 .callback = dmi_ignore_irq0_timer_override,
1629 .ident = "HP 6715b laptop",
1630 .matches = {
1631 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1632 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
1633 },
1634 },
1624 {} 1635 {}
1625}; 1636};
1626 1637
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 65a0c1b48696..fb04e49776ba 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -231,25 +231,25 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
231 continue; 231 continue;
232 if (*ptr > text_end) 232 if (*ptr > text_end)
233 continue; 233 continue;
234 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */ 234 /* turn DS segment override prefix into lock prefix */
235 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
235 }; 236 };
236} 237}
237 238
238static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 239static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
239{ 240{
240 u8 **ptr; 241 u8 **ptr;
241 char insn[1];
242 242
243 if (noreplace_smp) 243 if (noreplace_smp)
244 return; 244 return;
245 245
246 add_nops(insn, 1);
247 for (ptr = start; ptr < end; ptr++) { 246 for (ptr = start; ptr < end; ptr++) {
248 if (*ptr < text) 247 if (*ptr < text)
249 continue; 248 continue;
250 if (*ptr > text_end) 249 if (*ptr > text_end)
251 continue; 250 continue;
252 text_poke(*ptr, insn, 1); 251 /* turn lock prefix into DS segment override prefix */
252 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
253 }; 253 };
254} 254}
255 255
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 69b4d060b21c..34e4d112b1ef 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -33,6 +33,10 @@
33 33
34static DEFINE_RWLOCK(amd_iommu_devtable_lock); 34static DEFINE_RWLOCK(amd_iommu_devtable_lock);
35 35
36/* A list of preallocated protection domains */
37static LIST_HEAD(iommu_pd_list);
38static DEFINE_SPINLOCK(iommu_pd_list_lock);
39
36/* 40/*
37 * general struct to manage commands send to an IOMMU 41 * general struct to manage commands send to an IOMMU
38 */ 42 */
@@ -51,6 +55,102 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
51 55
52/**************************************************************************** 56/****************************************************************************
53 * 57 *
58 * Interrupt handling functions
59 *
60 ****************************************************************************/
61
62static void iommu_print_event(void *__evt)
63{
64 u32 *event = __evt;
65 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
66 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
67 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
68 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
69 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
70
71 printk(KERN_ERR "AMD IOMMU: Event logged [");
72
73 switch (type) {
74 case EVENT_TYPE_ILL_DEV:
75 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
76 "address=0x%016llx flags=0x%04x]\n",
77 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
78 address, flags);
79 break;
80 case EVENT_TYPE_IO_FAULT:
81 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
82 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
83 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
84 domid, address, flags);
85 break;
86 case EVENT_TYPE_DEV_TAB_ERR:
87 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
88 "address=0x%016llx flags=0x%04x]\n",
89 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90 address, flags);
91 break;
92 case EVENT_TYPE_PAGE_TAB_ERR:
93 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
94 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
95 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
96 domid, address, flags);
97 break;
98 case EVENT_TYPE_ILL_CMD:
99 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
100 break;
101 case EVENT_TYPE_CMD_HARD_ERR:
102 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
103 "flags=0x%04x]\n", address, flags);
104 break;
105 case EVENT_TYPE_IOTLB_INV_TO:
106 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
107 "address=0x%016llx]\n",
108 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
109 address);
110 break;
111 case EVENT_TYPE_INV_DEV_REQ:
112 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
113 "address=0x%016llx flags=0x%04x]\n",
114 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
115 address, flags);
116 break;
117 default:
118 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
119 }
120}
121
122static void iommu_poll_events(struct amd_iommu *iommu)
123{
124 u32 head, tail;
125 unsigned long flags;
126
127 spin_lock_irqsave(&iommu->lock, flags);
128
129 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
130 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
131
132 while (head != tail) {
133 iommu_print_event(iommu->evt_buf + head);
134 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
135 }
136
137 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
138
139 spin_unlock_irqrestore(&iommu->lock, flags);
140}
141
142irqreturn_t amd_iommu_int_handler(int irq, void *data)
143{
144 struct amd_iommu *iommu;
145
146 list_for_each_entry(iommu, &amd_iommu_list, list)
147 iommu_poll_events(iommu);
148
149 return IRQ_HANDLED;
150}
151
152/****************************************************************************
153 *
54 * IOMMU command queuing functions 154 * IOMMU command queuing functions
55 * 155 *
56 ****************************************************************************/ 156 ****************************************************************************/
@@ -101,10 +201,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
101 */ 201 */
102static int iommu_completion_wait(struct amd_iommu *iommu) 202static int iommu_completion_wait(struct amd_iommu *iommu)
103{ 203{
104 int ret, ready = 0; 204 int ret = 0, ready = 0;
105 unsigned status = 0; 205 unsigned status = 0;
106 struct iommu_cmd cmd; 206 struct iommu_cmd cmd;
107 unsigned long i = 0; 207 unsigned long flags, i = 0;
108 208
109 memset(&cmd, 0, sizeof(cmd)); 209 memset(&cmd, 0, sizeof(cmd));
110 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
@@ -112,10 +212,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
112 212
113 iommu->need_sync = 0; 213 iommu->need_sync = 0;
114 214
115 ret = iommu_queue_command(iommu, &cmd); 215 spin_lock_irqsave(&iommu->lock, flags);
216
217 ret = __iommu_queue_command(iommu, &cmd);
116 218
117 if (ret) 219 if (ret)
118 return ret; 220 goto out;
119 221
120 while (!ready && (i < EXIT_LOOP_COUNT)) { 222 while (!ready && (i < EXIT_LOOP_COUNT)) {
121 ++i; 223 ++i;
@@ -130,6 +232,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
130 232
131 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 233 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
132 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 234 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
235out:
236 spin_unlock_irqrestore(&iommu->lock, flags);
133 237
134 return 0; 238 return 0;
135} 239}
@@ -140,6 +244,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
140static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) 244static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
141{ 245{
142 struct iommu_cmd cmd; 246 struct iommu_cmd cmd;
247 int ret;
143 248
144 BUG_ON(iommu == NULL); 249 BUG_ON(iommu == NULL);
145 250
@@ -147,9 +252,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
147 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); 252 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
148 cmd.data[0] = devid; 253 cmd.data[0] = devid;
149 254
255 ret = iommu_queue_command(iommu, &cmd);
256
150 iommu->need_sync = 1; 257 iommu->need_sync = 1;
151 258
152 return iommu_queue_command(iommu, &cmd); 259 return ret;
153} 260}
154 261
155/* 262/*
@@ -159,6 +266,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
159 u64 address, u16 domid, int pde, int s) 266 u64 address, u16 domid, int pde, int s)
160{ 267{
161 struct iommu_cmd cmd; 268 struct iommu_cmd cmd;
269 int ret;
162 270
163 memset(&cmd, 0, sizeof(cmd)); 271 memset(&cmd, 0, sizeof(cmd));
164 address &= PAGE_MASK; 272 address &= PAGE_MASK;
@@ -171,9 +279,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
171 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ 279 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
172 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 280 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
173 281
282 ret = iommu_queue_command(iommu, &cmd);
283
174 iommu->need_sync = 1; 284 iommu->need_sync = 1;
175 285
176 return iommu_queue_command(iommu, &cmd); 286 return ret;
177} 287}
178 288
179/* 289/*
@@ -203,6 +313,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
203 return 0; 313 return 0;
204} 314}
205 315
316/* Flush the whole IO/TLB for a given protection domain */
317static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
318{
319 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
320
321 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
322}
323
206/**************************************************************************** 324/****************************************************************************
207 * 325 *
208 * The functions below are used the create the page table mappings for 326 * The functions below are used the create the page table mappings for
@@ -362,11 +480,6 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
362 * efficient allocator. 480 * efficient allocator.
363 * 481 *
364 ****************************************************************************/ 482 ****************************************************************************/
365static unsigned long dma_mask_to_pages(unsigned long mask)
366{
367 return (mask >> PAGE_SHIFT) +
368 (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
369}
370 483
371/* 484/*
372 * The address allocator core function. 485 * The address allocator core function.
@@ -375,25 +488,31 @@ static unsigned long dma_mask_to_pages(unsigned long mask)
375 */ 488 */
376static unsigned long dma_ops_alloc_addresses(struct device *dev, 489static unsigned long dma_ops_alloc_addresses(struct device *dev,
377 struct dma_ops_domain *dom, 490 struct dma_ops_domain *dom,
378 unsigned int pages) 491 unsigned int pages,
492 unsigned long align_mask,
493 u64 dma_mask)
379{ 494{
380 unsigned long limit = dma_mask_to_pages(*dev->dma_mask); 495 unsigned long limit;
381 unsigned long address; 496 unsigned long address;
382 unsigned long size = dom->aperture_size >> PAGE_SHIFT;
383 unsigned long boundary_size; 497 unsigned long boundary_size;
384 498
385 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 499 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
386 PAGE_SIZE) >> PAGE_SHIFT; 500 PAGE_SIZE) >> PAGE_SHIFT;
387 limit = limit < size ? limit : size; 501 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
502 dma_mask >> PAGE_SHIFT);
388 503
389 if (dom->next_bit >= limit) 504 if (dom->next_bit >= limit) {
390 dom->next_bit = 0; 505 dom->next_bit = 0;
506 dom->need_flush = true;
507 }
391 508
392 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, 509 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
393 0 , boundary_size, 0); 510 0 , boundary_size, align_mask);
394 if (address == -1) 511 if (address == -1) {
395 address = iommu_area_alloc(dom->bitmap, limit, 0, pages, 512 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
396 0, boundary_size, 0); 513 0, boundary_size, align_mask);
514 dom->need_flush = true;
515 }
397 516
398 if (likely(address != -1)) { 517 if (likely(address != -1)) {
399 dom->next_bit = address + pages; 518 dom->next_bit = address + pages;
@@ -459,7 +578,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
459 if (start_page + pages > last_page) 578 if (start_page + pages > last_page)
460 pages = last_page - start_page; 579 pages = last_page - start_page;
461 580
462 set_bit_string(dom->bitmap, start_page, pages); 581 iommu_area_reserve(dom->bitmap, start_page, pages);
463} 582}
464 583
465static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) 584static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
@@ -553,6 +672,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
553 dma_dom->bitmap[0] = 1; 672 dma_dom->bitmap[0] = 1;
554 dma_dom->next_bit = 0; 673 dma_dom->next_bit = 0;
555 674
675 dma_dom->need_flush = false;
676 dma_dom->target_dev = 0xffff;
677
556 /* Intialize the exclusion range if necessary */ 678 /* Intialize the exclusion range if necessary */
557 if (iommu->exclusion_start && 679 if (iommu->exclusion_start &&
558 iommu->exclusion_start < dma_dom->aperture_size) { 680 iommu->exclusion_start < dma_dom->aperture_size) {
@@ -623,12 +745,13 @@ static void set_device_domain(struct amd_iommu *iommu,
623 745
624 u64 pte_root = virt_to_phys(domain->pt_root); 746 u64 pte_root = virt_to_phys(domain->pt_root);
625 747
626 pte_root |= (domain->mode & 0x07) << 9; 748 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
627 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2; 749 << DEV_ENTRY_MODE_SHIFT;
750 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
628 751
629 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 752 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
630 amd_iommu_dev_table[devid].data[0] = pte_root; 753 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
631 amd_iommu_dev_table[devid].data[1] = pte_root >> 32; 754 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
632 amd_iommu_dev_table[devid].data[2] = domain->id; 755 amd_iommu_dev_table[devid].data[2] = domain->id;
633 756
634 amd_iommu_pd_table[devid] = domain; 757 amd_iommu_pd_table[devid] = domain;
@@ -646,6 +769,45 @@ static void set_device_domain(struct amd_iommu *iommu,
646 *****************************************************************************/ 769 *****************************************************************************/
647 770
648/* 771/*
772 * This function checks if the driver got a valid device from the caller to
773 * avoid dereferencing invalid pointers.
774 */
775static bool check_device(struct device *dev)
776{
777 if (!dev || !dev->dma_mask)
778 return false;
779
780 return true;
781}
782
783/*
784 * In this function the list of preallocated protection domains is traversed to
785 * find the domain for a specific device
786 */
787static struct dma_ops_domain *find_protection_domain(u16 devid)
788{
789 struct dma_ops_domain *entry, *ret = NULL;
790 unsigned long flags;
791
792 if (list_empty(&iommu_pd_list))
793 return NULL;
794
795 spin_lock_irqsave(&iommu_pd_list_lock, flags);
796
797 list_for_each_entry(entry, &iommu_pd_list, list) {
798 if (entry->target_dev == devid) {
799 ret = entry;
800 list_del(&ret->list);
801 break;
802 }
803 }
804
805 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
806
807 return ret;
808}
809
810/*
649 * In the dma_ops path we only have the struct device. This function 811 * In the dma_ops path we only have the struct device. This function
650 * finds the corresponding IOMMU, the protection domain and the 812 * finds the corresponding IOMMU, the protection domain and the
651 * requestor id for a given device. 813 * requestor id for a given device.
@@ -661,27 +823,30 @@ static int get_device_resources(struct device *dev,
661 struct pci_dev *pcidev; 823 struct pci_dev *pcidev;
662 u16 _bdf; 824 u16 _bdf;
663 825
664 BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask); 826 *iommu = NULL;
827 *domain = NULL;
828 *bdf = 0xffff;
829
830 if (dev->bus != &pci_bus_type)
831 return 0;
665 832
666 pcidev = to_pci_dev(dev); 833 pcidev = to_pci_dev(dev);
667 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 834 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
668 835
669 /* device not translated by any IOMMU in the system? */ 836 /* device not translated by any IOMMU in the system? */
670 if (_bdf > amd_iommu_last_bdf) { 837 if (_bdf > amd_iommu_last_bdf)
671 *iommu = NULL;
672 *domain = NULL;
673 *bdf = 0xffff;
674 return 0; 838 return 0;
675 }
676 839
677 *bdf = amd_iommu_alias_table[_bdf]; 840 *bdf = amd_iommu_alias_table[_bdf];
678 841
679 *iommu = amd_iommu_rlookup_table[*bdf]; 842 *iommu = amd_iommu_rlookup_table[*bdf];
680 if (*iommu == NULL) 843 if (*iommu == NULL)
681 return 0; 844 return 0;
682 dma_dom = (*iommu)->default_dom;
683 *domain = domain_for_device(*bdf); 845 *domain = domain_for_device(*bdf);
684 if (*domain == NULL) { 846 if (*domain == NULL) {
847 dma_dom = find_protection_domain(*bdf);
848 if (!dma_dom)
849 dma_dom = (*iommu)->default_dom;
685 *domain = &dma_dom->domain; 850 *domain = &dma_dom->domain;
686 set_device_domain(*iommu, *domain, *bdf); 851 set_device_domain(*iommu, *domain, *bdf);
687 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " 852 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
@@ -760,17 +925,24 @@ static dma_addr_t __map_single(struct device *dev,
760 struct dma_ops_domain *dma_dom, 925 struct dma_ops_domain *dma_dom,
761 phys_addr_t paddr, 926 phys_addr_t paddr,
762 size_t size, 927 size_t size,
763 int dir) 928 int dir,
929 bool align,
930 u64 dma_mask)
764{ 931{
765 dma_addr_t offset = paddr & ~PAGE_MASK; 932 dma_addr_t offset = paddr & ~PAGE_MASK;
766 dma_addr_t address, start; 933 dma_addr_t address, start;
767 unsigned int pages; 934 unsigned int pages;
935 unsigned long align_mask = 0;
768 int i; 936 int i;
769 937
770 pages = iommu_num_pages(paddr, size); 938 pages = iommu_num_pages(paddr, size);
771 paddr &= PAGE_MASK; 939 paddr &= PAGE_MASK;
772 940
773 address = dma_ops_alloc_addresses(dev, dma_dom, pages); 941 if (align)
942 align_mask = (1UL << get_order(size)) - 1;
943
944 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
945 dma_mask);
774 if (unlikely(address == bad_dma_address)) 946 if (unlikely(address == bad_dma_address))
775 goto out; 947 goto out;
776 948
@@ -782,6 +954,12 @@ static dma_addr_t __map_single(struct device *dev,
782 } 954 }
783 address += offset; 955 address += offset;
784 956
957 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
958 iommu_flush_tlb(iommu, dma_dom->domain.id);
959 dma_dom->need_flush = false;
960 } else if (unlikely(iommu_has_npcache(iommu)))
961 iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
962
785out: 963out:
786 return address; 964 return address;
787} 965}
@@ -812,6 +990,9 @@ static void __unmap_single(struct amd_iommu *iommu,
812 } 990 }
813 991
814 dma_ops_free_addresses(dma_dom, dma_addr, pages); 992 dma_ops_free_addresses(dma_dom, dma_addr, pages);
993
994 if (amd_iommu_unmap_flush)
995 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
815} 996}
816 997
817/* 998/*
@@ -825,6 +1006,12 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
825 struct protection_domain *domain; 1006 struct protection_domain *domain;
826 u16 devid; 1007 u16 devid;
827 dma_addr_t addr; 1008 dma_addr_t addr;
1009 u64 dma_mask;
1010
1011 if (!check_device(dev))
1012 return bad_dma_address;
1013
1014 dma_mask = *dev->dma_mask;
828 1015
829 get_device_resources(dev, &iommu, &domain, &devid); 1016 get_device_resources(dev, &iommu, &domain, &devid);
830 1017
@@ -833,14 +1020,12 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
833 return (dma_addr_t)paddr; 1020 return (dma_addr_t)paddr;
834 1021
835 spin_lock_irqsave(&domain->lock, flags); 1022 spin_lock_irqsave(&domain->lock, flags);
836 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir); 1023 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1024 dma_mask);
837 if (addr == bad_dma_address) 1025 if (addr == bad_dma_address)
838 goto out; 1026 goto out;
839 1027
840 if (iommu_has_npcache(iommu)) 1028 if (unlikely(iommu->need_sync))
841 iommu_flush_pages(iommu, domain->id, addr, size);
842
843 if (iommu->need_sync)
844 iommu_completion_wait(iommu); 1029 iommu_completion_wait(iommu);
845 1030
846out: 1031out:
@@ -860,7 +1045,8 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
860 struct protection_domain *domain; 1045 struct protection_domain *domain;
861 u16 devid; 1046 u16 devid;
862 1047
863 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1048 if (!check_device(dev) ||
1049 !get_device_resources(dev, &iommu, &domain, &devid))
864 /* device not handled by any AMD IOMMU */ 1050 /* device not handled by any AMD IOMMU */
865 return; 1051 return;
866 1052
@@ -868,9 +1054,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
868 1054
869 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1055 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
870 1056
871 iommu_flush_pages(iommu, domain->id, dma_addr, size); 1057 if (unlikely(iommu->need_sync))
872
873 if (iommu->need_sync)
874 iommu_completion_wait(iommu); 1058 iommu_completion_wait(iommu);
875 1059
876 spin_unlock_irqrestore(&domain->lock, flags); 1060 spin_unlock_irqrestore(&domain->lock, flags);
@@ -909,6 +1093,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
909 struct scatterlist *s; 1093 struct scatterlist *s;
910 phys_addr_t paddr; 1094 phys_addr_t paddr;
911 int mapped_elems = 0; 1095 int mapped_elems = 0;
1096 u64 dma_mask;
1097
1098 if (!check_device(dev))
1099 return 0;
1100
1101 dma_mask = *dev->dma_mask;
912 1102
913 get_device_resources(dev, &iommu, &domain, &devid); 1103 get_device_resources(dev, &iommu, &domain, &devid);
914 1104
@@ -921,19 +1111,17 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
921 paddr = sg_phys(s); 1111 paddr = sg_phys(s);
922 1112
923 s->dma_address = __map_single(dev, iommu, domain->priv, 1113 s->dma_address = __map_single(dev, iommu, domain->priv,
924 paddr, s->length, dir); 1114 paddr, s->length, dir, false,
1115 dma_mask);
925 1116
926 if (s->dma_address) { 1117 if (s->dma_address) {
927 s->dma_length = s->length; 1118 s->dma_length = s->length;
928 mapped_elems++; 1119 mapped_elems++;
929 } else 1120 } else
930 goto unmap; 1121 goto unmap;
931 if (iommu_has_npcache(iommu))
932 iommu_flush_pages(iommu, domain->id, s->dma_address,
933 s->dma_length);
934 } 1122 }
935 1123
936 if (iommu->need_sync) 1124 if (unlikely(iommu->need_sync))
937 iommu_completion_wait(iommu); 1125 iommu_completion_wait(iommu);
938 1126
939out: 1127out:
@@ -967,7 +1155,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
967 u16 devid; 1155 u16 devid;
968 int i; 1156 int i;
969 1157
970 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1158 if (!check_device(dev) ||
1159 !get_device_resources(dev, &iommu, &domain, &devid))
971 return; 1160 return;
972 1161
973 spin_lock_irqsave(&domain->lock, flags); 1162 spin_lock_irqsave(&domain->lock, flags);
@@ -975,12 +1164,10 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
975 for_each_sg(sglist, s, nelems, i) { 1164 for_each_sg(sglist, s, nelems, i) {
976 __unmap_single(iommu, domain->priv, s->dma_address, 1165 __unmap_single(iommu, domain->priv, s->dma_address,
977 s->dma_length, dir); 1166 s->dma_length, dir);
978 iommu_flush_pages(iommu, domain->id, s->dma_address,
979 s->dma_length);
980 s->dma_address = s->dma_length = 0; 1167 s->dma_address = s->dma_length = 0;
981 } 1168 }
982 1169
983 if (iommu->need_sync) 1170 if (unlikely(iommu->need_sync))
984 iommu_completion_wait(iommu); 1171 iommu_completion_wait(iommu);
985 1172
986 spin_unlock_irqrestore(&domain->lock, flags); 1173 spin_unlock_irqrestore(&domain->lock, flags);
@@ -998,25 +1185,33 @@ static void *alloc_coherent(struct device *dev, size_t size,
998 struct protection_domain *domain; 1185 struct protection_domain *domain;
999 u16 devid; 1186 u16 devid;
1000 phys_addr_t paddr; 1187 phys_addr_t paddr;
1188 u64 dma_mask = dev->coherent_dma_mask;
1189
1190 if (!check_device(dev))
1191 return NULL;
1001 1192
1193 if (!get_device_resources(dev, &iommu, &domain, &devid))
1194 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1195
1196 flag |= __GFP_ZERO;
1002 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 1197 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1003 if (!virt_addr) 1198 if (!virt_addr)
1004 return 0; 1199 return 0;
1005 1200
1006 memset(virt_addr, 0, size);
1007 paddr = virt_to_phys(virt_addr); 1201 paddr = virt_to_phys(virt_addr);
1008 1202
1009 get_device_resources(dev, &iommu, &domain, &devid);
1010
1011 if (!iommu || !domain) { 1203 if (!iommu || !domain) {
1012 *dma_addr = (dma_addr_t)paddr; 1204 *dma_addr = (dma_addr_t)paddr;
1013 return virt_addr; 1205 return virt_addr;
1014 } 1206 }
1015 1207
1208 if (!dma_mask)
1209 dma_mask = *dev->dma_mask;
1210
1016 spin_lock_irqsave(&domain->lock, flags); 1211 spin_lock_irqsave(&domain->lock, flags);
1017 1212
1018 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 1213 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
1019 size, DMA_BIDIRECTIONAL); 1214 size, DMA_BIDIRECTIONAL, true, dma_mask);
1020 1215
1021 if (*dma_addr == bad_dma_address) { 1216 if (*dma_addr == bad_dma_address) {
1022 free_pages((unsigned long)virt_addr, get_order(size)); 1217 free_pages((unsigned long)virt_addr, get_order(size));
@@ -1024,10 +1219,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1024 goto out; 1219 goto out;
1025 } 1220 }
1026 1221
1027 if (iommu_has_npcache(iommu)) 1222 if (unlikely(iommu->need_sync))
1028 iommu_flush_pages(iommu, domain->id, *dma_addr, size);
1029
1030 if (iommu->need_sync)
1031 iommu_completion_wait(iommu); 1223 iommu_completion_wait(iommu);
1032 1224
1033out: 1225out:
@@ -1038,8 +1230,6 @@ out:
1038 1230
1039/* 1231/*
1040 * The exported free_coherent function for dma_ops. 1232 * The exported free_coherent function for dma_ops.
1041 * FIXME: fix the generic x86 DMA layer so that it actually calls that
1042 * function.
1043 */ 1233 */
1044static void free_coherent(struct device *dev, size_t size, 1234static void free_coherent(struct device *dev, size_t size,
1045 void *virt_addr, dma_addr_t dma_addr) 1235 void *virt_addr, dma_addr_t dma_addr)
@@ -1049,6 +1239,9 @@ static void free_coherent(struct device *dev, size_t size,
1049 struct protection_domain *domain; 1239 struct protection_domain *domain;
1050 u16 devid; 1240 u16 devid;
1051 1241
1242 if (!check_device(dev))
1243 return;
1244
1052 get_device_resources(dev, &iommu, &domain, &devid); 1245 get_device_resources(dev, &iommu, &domain, &devid);
1053 1246
1054 if (!iommu || !domain) 1247 if (!iommu || !domain)
@@ -1057,9 +1250,8 @@ static void free_coherent(struct device *dev, size_t size,
1057 spin_lock_irqsave(&domain->lock, flags); 1250 spin_lock_irqsave(&domain->lock, flags);
1058 1251
1059 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1252 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1060 iommu_flush_pages(iommu, domain->id, dma_addr, size);
1061 1253
1062 if (iommu->need_sync) 1254 if (unlikely(iommu->need_sync))
1063 iommu_completion_wait(iommu); 1255 iommu_completion_wait(iommu);
1064 1256
1065 spin_unlock_irqrestore(&domain->lock, flags); 1257 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1069,6 +1261,30 @@ free_mem:
1069} 1261}
1070 1262
1071/* 1263/*
1264 * This function is called by the DMA layer to find out if we can handle a
1265 * particular device. It is part of the dma_ops.
1266 */
1267static int amd_iommu_dma_supported(struct device *dev, u64 mask)
1268{
1269 u16 bdf;
1270 struct pci_dev *pcidev;
1271
1272 /* No device or no PCI device */
1273 if (!dev || dev->bus != &pci_bus_type)
1274 return 0;
1275
1276 pcidev = to_pci_dev(dev);
1277
1278 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1279
1280 /* Out of our scope? */
1281 if (bdf > amd_iommu_last_bdf)
1282 return 0;
1283
1284 return 1;
1285}
1286
1287/*
1072 * The function for pre-allocating protection domains. 1288 * The function for pre-allocating protection domains.
1073 * 1289 *
1074 * If the driver core informs the DMA layer if a driver grabs a device 1290 * If the driver core informs the DMA layer if a driver grabs a device
@@ -1097,10 +1313,9 @@ void prealloc_protection_domains(void)
1097 if (!dma_dom) 1313 if (!dma_dom)
1098 continue; 1314 continue;
1099 init_unity_mappings_for_device(dma_dom, devid); 1315 init_unity_mappings_for_device(dma_dom, devid);
1100 set_device_domain(iommu, &dma_dom->domain, devid); 1316 dma_dom->target_dev = devid;
1101 printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ", 1317
1102 dma_dom->domain.id); 1318 list_add_tail(&dma_dom->list, &iommu_pd_list);
1103 print_devid(devid, 1);
1104 } 1319 }
1105} 1320}
1106 1321
@@ -1111,6 +1326,7 @@ static struct dma_mapping_ops amd_iommu_dma_ops = {
1111 .unmap_single = unmap_single, 1326 .unmap_single = unmap_single,
1112 .map_sg = map_sg, 1327 .map_sg = map_sg,
1113 .unmap_sg = unmap_sg, 1328 .unmap_sg = unmap_sg,
1329 .dma_supported = amd_iommu_dma_supported,
1114}; 1330};
1115 1331
1116/* 1332/*
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index a69cc0f52042..148fcfe22f17 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -22,6 +22,8 @@
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
25#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
26#include <asm/amd_iommu_types.h> 28#include <asm/amd_iommu_types.h>
27#include <asm/amd_iommu.h> 29#include <asm/amd_iommu.h>
@@ -30,7 +32,6 @@
30/* 32/*
31 * definitions for the ACPI scanning code 33 * definitions for the ACPI scanning code
32 */ 34 */
33#define PCI_BUS(x) (((x) >> 8) & 0xff)
34#define IVRS_HEADER_LENGTH 48 35#define IVRS_HEADER_LENGTH 48
35 36
36#define ACPI_IVHD_TYPE 0x10 37#define ACPI_IVHD_TYPE 0x10
@@ -121,6 +122,7 @@ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
121 we find in ACPI */ 122 we find in ACPI */
122unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
123int amd_iommu_isolate; /* if 1, device isolation is enabled */ 124int amd_iommu_isolate; /* if 1, device isolation is enabled */
125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
124 126
125LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
126 system */ 128 system */
@@ -234,7 +236,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
234{ 236{
235 u32 ctrl; 237 u32 ctrl;
236 238
237 ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); 239 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
238 ctrl &= ~(1 << bit); 240 ctrl &= ~(1 << bit);
239 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 241 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
240} 242}
@@ -242,13 +244,23 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
242/* Function to enable the hardware */ 244/* Function to enable the hardware */
243void __init iommu_enable(struct amd_iommu *iommu) 245void __init iommu_enable(struct amd_iommu *iommu)
244{ 246{
245 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); 247 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU "
246 print_devid(iommu->devid, 0); 248 "at %02x:%02x.%x cap 0x%hx\n",
247 printk(" cap 0x%hx\n", iommu->cap_ptr); 249 iommu->dev->bus->number,
250 PCI_SLOT(iommu->dev->devfn),
251 PCI_FUNC(iommu->dev->devfn),
252 iommu->cap_ptr);
248 253
249 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 254 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
250} 255}
251 256
257/* Function to enable IOMMU event logging and event interrupts */
258void __init iommu_enable_event_logging(struct amd_iommu *iommu)
259{
260 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
261 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
262}
263
252/* 264/*
253 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 265 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
254 * the system has one. 266 * the system has one.
@@ -286,6 +298,14 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
286 ****************************************************************************/ 298 ****************************************************************************/
287 299
288/* 300/*
301 * This function calculates the length of a given IVHD entry
302 */
303static inline int ivhd_entry_length(u8 *ivhd)
304{
305 return 0x04 << (*ivhd >> 6);
306}
307
308/*
289 * This function reads the last device id the IOMMU has to handle from the PCI 309 * This function reads the last device id the IOMMU has to handle from the PCI
290 * capability header for this IOMMU 310 * capability header for this IOMMU
291 */ 311 */
@@ -329,7 +349,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
329 default: 349 default:
330 break; 350 break;
331 } 351 }
332 p += 0x04 << (*p >> 6); 352 p += ivhd_entry_length(p);
333 } 353 }
334 354
335 WARN_ON(p != end); 355 WARN_ON(p != end);
@@ -414,7 +434,32 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
414 434
415static void __init free_command_buffer(struct amd_iommu *iommu) 435static void __init free_command_buffer(struct amd_iommu *iommu)
416{ 436{
417 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 437 free_pages((unsigned long)iommu->cmd_buf,
438 get_order(iommu->cmd_buf_size));
439}
440
441/* allocates the memory where the IOMMU will log its events to */
442static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
443{
444 u64 entry;
445 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
446 get_order(EVT_BUFFER_SIZE));
447
448 if (iommu->evt_buf == NULL)
449 return NULL;
450
451 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
452 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
453 &entry, sizeof(entry));
454
455 iommu->evt_buf_size = EVT_BUFFER_SIZE;
456
457 return iommu->evt_buf;
458}
459
460static void __init free_event_buffer(struct amd_iommu *iommu)
461{
462 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
418} 463}
419 464
420/* sets a specific bit in the device table entry. */ 465/* sets a specific bit in the device table entry. */
@@ -487,19 +532,21 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
487 */ 532 */
488static void __init init_iommu_from_pci(struct amd_iommu *iommu) 533static void __init init_iommu_from_pci(struct amd_iommu *iommu)
489{ 534{
490 int bus = PCI_BUS(iommu->devid);
491 int dev = PCI_SLOT(iommu->devid);
492 int fn = PCI_FUNC(iommu->devid);
493 int cap_ptr = iommu->cap_ptr; 535 int cap_ptr = iommu->cap_ptr;
494 u32 range; 536 u32 range, misc;
495 537
496 iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET); 538 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
539 &iommu->cap);
540 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
541 &range);
542 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
543 &misc);
497 544
498 range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
499 iommu->first_device = calc_devid(MMIO_GET_BUS(range), 545 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
500 MMIO_GET_FD(range)); 546 MMIO_GET_FD(range));
501 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 547 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
502 MMIO_GET_LD(range)); 548 MMIO_GET_LD(range));
549 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
503} 550}
504 551
505/* 552/*
@@ -604,7 +651,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
604 break; 651 break;
605 } 652 }
606 653
607 p += 0x04 << (e->type >> 6); 654 p += ivhd_entry_length(p);
608 } 655 }
609} 656}
610 657
@@ -622,6 +669,7 @@ static int __init init_iommu_devices(struct amd_iommu *iommu)
622static void __init free_iommu_one(struct amd_iommu *iommu) 669static void __init free_iommu_one(struct amd_iommu *iommu)
623{ 670{
624 free_command_buffer(iommu); 671 free_command_buffer(iommu);
672 free_event_buffer(iommu);
625 iommu_unmap_mmio_space(iommu); 673 iommu_unmap_mmio_space(iommu);
626} 674}
627 675
@@ -649,8 +697,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
649 /* 697 /*
650 * Copy data from ACPI table entry to the iommu struct 698 * Copy data from ACPI table entry to the iommu struct
651 */ 699 */
652 iommu->devid = h->devid; 700 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
701 if (!iommu->dev)
702 return 1;
703
653 iommu->cap_ptr = h->cap_ptr; 704 iommu->cap_ptr = h->cap_ptr;
705 iommu->pci_seg = h->pci_seg;
654 iommu->mmio_phys = h->mmio_phys; 706 iommu->mmio_phys = h->mmio_phys;
655 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); 707 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
656 if (!iommu->mmio_base) 708 if (!iommu->mmio_base)
@@ -661,10 +713,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
661 if (!iommu->cmd_buf) 713 if (!iommu->cmd_buf)
662 return -ENOMEM; 714 return -ENOMEM;
663 715
716 iommu->evt_buf = alloc_event_buffer(iommu);
717 if (!iommu->evt_buf)
718 return -ENOMEM;
719
720 iommu->int_enabled = false;
721
664 init_iommu_from_pci(iommu); 722 init_iommu_from_pci(iommu);
665 init_iommu_from_acpi(iommu, h); 723 init_iommu_from_acpi(iommu, h);
666 init_iommu_devices(iommu); 724 init_iommu_devices(iommu);
667 725
726 pci_enable_device(iommu->dev);
727
668 return 0; 728 return 0;
669} 729}
670 730
@@ -706,6 +766,95 @@ static int __init init_iommu_all(struct acpi_table_header *table)
706 766
707/**************************************************************************** 767/****************************************************************************
708 * 768 *
769 * The following functions initialize the MSI interrupts for all IOMMUs
770 * in the system. Its a bit challenging because there could be multiple
771 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
772 * pci_dev.
773 *
774 ****************************************************************************/
775
776static int __init iommu_setup_msix(struct amd_iommu *iommu)
777{
778 struct amd_iommu *curr;
779 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
780 int nvec = 0, i;
781
782 list_for_each_entry(curr, &amd_iommu_list, list) {
783 if (curr->dev == iommu->dev) {
784 entries[nvec].entry = curr->evt_msi_num;
785 entries[nvec].vector = 0;
786 curr->int_enabled = true;
787 nvec++;
788 }
789 }
790
791 if (pci_enable_msix(iommu->dev, entries, nvec)) {
792 pci_disable_msix(iommu->dev);
793 return 1;
794 }
795
796 for (i = 0; i < nvec; ++i) {
797 int r = request_irq(entries->vector, amd_iommu_int_handler,
798 IRQF_SAMPLE_RANDOM,
799 "AMD IOMMU",
800 NULL);
801 if (r)
802 goto out_free;
803 }
804
805 return 0;
806
807out_free:
808 for (i -= 1; i >= 0; --i)
809 free_irq(entries->vector, NULL);
810
811 pci_disable_msix(iommu->dev);
812
813 return 1;
814}
815
816static int __init iommu_setup_msi(struct amd_iommu *iommu)
817{
818 int r;
819 struct amd_iommu *curr;
820
821 list_for_each_entry(curr, &amd_iommu_list, list) {
822 if (curr->dev == iommu->dev)
823 curr->int_enabled = true;
824 }
825
826
827 if (pci_enable_msi(iommu->dev))
828 return 1;
829
830 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
831 IRQF_SAMPLE_RANDOM,
832 "AMD IOMMU",
833 NULL);
834
835 if (r) {
836 pci_disable_msi(iommu->dev);
837 return 1;
838 }
839
840 return 0;
841}
842
843static int __init iommu_init_msi(struct amd_iommu *iommu)
844{
845 if (iommu->int_enabled)
846 return 0;
847
848 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX))
849 return iommu_setup_msix(iommu);
850 else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
851 return iommu_setup_msi(iommu);
852
853 return 1;
854}
855
856/****************************************************************************
857 *
709 * The next functions belong to the third pass of parsing the ACPI 858 * The next functions belong to the third pass of parsing the ACPI
710 * table. In this last pass the memory mapping requirements are 859 * table. In this last pass the memory mapping requirements are
711 * gathered (like exclusion and unity mapping reanges). 860 * gathered (like exclusion and unity mapping reanges).
@@ -811,7 +960,6 @@ static void init_device_table(void)
811 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 960 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
812 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 961 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
813 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 962 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
814 set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
815 } 963 }
816} 964}
817 965
@@ -825,6 +973,8 @@ static void __init enable_iommus(void)
825 973
826 list_for_each_entry(iommu, &amd_iommu_list, list) { 974 list_for_each_entry(iommu, &amd_iommu_list, list) {
827 iommu_set_exclusion_range(iommu); 975 iommu_set_exclusion_range(iommu);
976 iommu_init_msi(iommu);
977 iommu_enable_event_logging(iommu);
828 iommu_enable(iommu); 978 iommu_enable(iommu);
829 } 979 }
830} 980}
@@ -995,11 +1145,17 @@ int __init amd_iommu_init(void)
995 else 1145 else
996 printk("disabled\n"); 1146 printk("disabled\n");
997 1147
1148 if (amd_iommu_unmap_flush)
1149 printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n");
1150 else
1151 printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n");
1152
998out: 1153out:
999 return ret; 1154 return ret;
1000 1155
1001free: 1156free:
1002 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1); 1157 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1158 get_order(MAX_DOMAIN_ID/8));
1003 1159
1004 free_pages((unsigned long)amd_iommu_pd_table, 1160 free_pages((unsigned long)amd_iommu_pd_table,
1005 get_order(rlookup_table_size)); 1161 get_order(rlookup_table_size));
@@ -1057,8 +1213,10 @@ void __init amd_iommu_detect(void)
1057static int __init parse_amd_iommu_options(char *str) 1213static int __init parse_amd_iommu_options(char *str)
1058{ 1214{
1059 for (; *str; ++str) { 1215 for (; *str; ++str) {
1060 if (strcmp(str, "isolate") == 0) 1216 if (strncmp(str, "isolate", 7) == 0)
1061 amd_iommu_isolate = 1; 1217 amd_iommu_isolate = 1;
1218 if (strncmp(str, "fullflush", 11) == 0)
1219 amd_iommu_unmap_flush = true;
1062 } 1220 }
1063 1221
1064 return 1; 1222 return 1;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 44e21826db11..9a32b37ee2ee 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -455,11 +455,11 @@ out:
455 force_iommu || 455 force_iommu ||
456 valid_agp || 456 valid_agp ||
457 fallback_aper_force) { 457 fallback_aper_force) {
458 printk(KERN_ERR 458 printk(KERN_INFO
459 "Your BIOS doesn't leave a aperture memory hole\n"); 459 "Your BIOS doesn't leave a aperture memory hole\n");
460 printk(KERN_ERR 460 printk(KERN_INFO
461 "Please enable the IOMMU option in the BIOS setup\n"); 461 "Please enable the IOMMU option in the BIOS setup\n");
462 printk(KERN_ERR 462 printk(KERN_INFO
463 "This costs you %d MB of RAM\n", 463 "This costs you %d MB of RAM\n",
464 32 << fallback_aper_order); 464 32 << fallback_aper_order);
465 465
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 584272105051..a91c57cb666a 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -60,10 +60,8 @@ unsigned long mp_lapic_addr;
60static int force_enable_local_apic; 60static int force_enable_local_apic;
61int disable_apic; 61int disable_apic;
62 62
63/* Local APIC timer verification ok */
64static int local_apic_timer_verify_ok;
65/* Disable local APIC timer from the kernel commandline or via dmi quirk */ 63/* Disable local APIC timer from the kernel commandline or via dmi quirk */
66static int local_apic_timer_disabled; 64static int disable_apic_timer __cpuinitdata;
67/* Local APIC timer works in C2 */ 65/* Local APIC timer works in C2 */
68int local_apic_timer_c2_ok; 66int local_apic_timer_c2_ok;
69EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); 67EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
@@ -130,7 +128,11 @@ static inline int lapic_get_version(void)
130 */ 128 */
131static inline int lapic_is_integrated(void) 129static inline int lapic_is_integrated(void)
132{ 130{
131#ifdef CONFIG_X86_64
132 return 1;
133#else
133 return APIC_INTEGRATED(lapic_get_version()); 134 return APIC_INTEGRATED(lapic_get_version());
135#endif
134} 136}
135 137
136/* 138/*
@@ -244,8 +246,12 @@ int lapic_get_maxlvt(void)
244 * Local APIC timer 246 * Local APIC timer
245 */ 247 */
246 248
247/* Clock divisor is set to 16 */ 249/* Clock divisor */
250#ifdef CONFG_X86_64
251#define APIC_DIVISOR 1
252#else
248#define APIC_DIVISOR 16 253#define APIC_DIVISOR 16
254#endif
249 255
250/* 256/*
251 * This function sets up the local APIC timer, with a timeout of 257 * This function sets up the local APIC timer, with a timeout of
@@ -253,6 +259,9 @@ int lapic_get_maxlvt(void)
253 * this function twice on the boot CPU, once with a bogus timeout 259 * this function twice on the boot CPU, once with a bogus timeout
254 * value, second time for real. The other (noncalibrating) CPUs 260 * value, second time for real. The other (noncalibrating) CPUs
255 * call this function only once, with the real, calibrated value. 261 * call this function only once, with the real, calibrated value.
262 *
263 * We do reads before writes even if unnecessary, to get around the
264 * P5 APIC double write bug.
256 */ 265 */
257static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) 266static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
258{ 267{
@@ -274,14 +283,44 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
274 */ 283 */
275 tmp_value = apic_read(APIC_TDCR); 284 tmp_value = apic_read(APIC_TDCR);
276 apic_write(APIC_TDCR, 285 apic_write(APIC_TDCR,
277 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | 286 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
278 APIC_TDR_DIV_16); 287 APIC_TDR_DIV_16);
279 288
280 if (!oneshot) 289 if (!oneshot)
281 apic_write(APIC_TMICT, clocks / APIC_DIVISOR); 290 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
282} 291}
283 292
284/* 293/*
294 * Setup extended LVT, AMD specific (K8, family 10h)
295 *
296 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
297 * MCE interrupts are supported. Thus MCE offset must be set to 0.
298 */
299
300#define APIC_EILVT_LVTOFF_MCE 0
301#define APIC_EILVT_LVTOFF_IBS 1
302
303static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
304{
305 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
306 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
307
308 apic_write(reg, v);
309}
310
311u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
312{
313 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
314 return APIC_EILVT_LVTOFF_MCE;
315}
316
317u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
318{
319 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
320 return APIC_EILVT_LVTOFF_IBS;
321}
322
323/*
285 * Program the next event, relative to now 324 * Program the next event, relative to now
286 */ 325 */
287static int lapic_next_event(unsigned long delta, 326static int lapic_next_event(unsigned long delta,
@@ -300,8 +339,8 @@ static void lapic_timer_setup(enum clock_event_mode mode,
300 unsigned long flags; 339 unsigned long flags;
301 unsigned int v; 340 unsigned int v;
302 341
303 /* Lapic used for broadcast ? */ 342 /* Lapic used as dummy for broadcast ? */
304 if (!local_apic_timer_verify_ok) 343 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
305 return; 344 return;
306 345
307 local_irq_save(flags); 346 local_irq_save(flags);
@@ -514,7 +553,7 @@ static int __init calibrate_APIC_clock(void)
514 return -1; 553 return -1;
515 } 554 }
516 555
517 local_apic_timer_verify_ok = 1; 556 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
518 557
519 /* We trust the pm timer based calibration */ 558 /* We trust the pm timer based calibration */
520 if (!pm_referenced) { 559 if (!pm_referenced) {
@@ -548,11 +587,11 @@ static int __init calibrate_APIC_clock(void)
548 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) 587 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
549 apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); 588 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
550 else 589 else
551 local_apic_timer_verify_ok = 0; 590 levt->features |= CLOCK_EVT_FEAT_DUMMY;
552 } else 591 } else
553 local_irq_enable(); 592 local_irq_enable();
554 593
555 if (!local_apic_timer_verify_ok) { 594 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
556 printk(KERN_WARNING 595 printk(KERN_WARNING
557 "APIC timer disabled due to verification failure.\n"); 596 "APIC timer disabled due to verification failure.\n");
558 return -1; 597 return -1;
@@ -574,7 +613,8 @@ void __init setup_boot_APIC_clock(void)
574 * timer as a dummy clock event source on SMP systems, so the 613 * timer as a dummy clock event source on SMP systems, so the
575 * broadcast mechanism is used. On UP systems simply ignore it. 614 * broadcast mechanism is used. On UP systems simply ignore it.
576 */ 615 */
577 if (local_apic_timer_disabled) { 616 if (disable_apic_timer) {
617 printk(KERN_INFO "Disabling APIC timer\n");
578 /* No broadcast on UP ! */ 618 /* No broadcast on UP ! */
579 if (num_possible_cpus() > 1) { 619 if (num_possible_cpus() > 1) {
580 lapic_clockevent.mult = 1; 620 lapic_clockevent.mult = 1;
@@ -643,7 +683,11 @@ static void local_apic_timer_interrupt(void)
643 /* 683 /*
644 * the NMI deadlock-detector uses this. 684 * the NMI deadlock-detector uses this.
645 */ 685 */
686#ifdef CONFIG_X86_64
687 add_pda(apic_timer_irqs, 1);
688#else
646 per_cpu(irq_stat, cpu).apic_timer_irqs++; 689 per_cpu(irq_stat, cpu).apic_timer_irqs++;
690#endif
647 691
648 evt->event_handler(evt); 692 evt->event_handler(evt);
649} 693}
@@ -683,35 +727,6 @@ int setup_profiling_timer(unsigned int multiplier)
683} 727}
684 728
685/* 729/*
686 * Setup extended LVT, AMD specific (K8, family 10h)
687 *
688 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
689 * MCE interrupts are supported. Thus MCE offset must be set to 0.
690 */
691
692#define APIC_EILVT_LVTOFF_MCE 0
693#define APIC_EILVT_LVTOFF_IBS 1
694
695static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
696{
697 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
698 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
699 apic_write(reg, v);
700}
701
702u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
703{
704 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
705 return APIC_EILVT_LVTOFF_MCE;
706}
707
708u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
709{
710 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
711 return APIC_EILVT_LVTOFF_IBS;
712}
713
714/*
715 * Local APIC start and shutdown 730 * Local APIC start and shutdown
716 */ 731 */
717 732
@@ -756,7 +771,7 @@ void clear_local_APIC(void)
756 } 771 }
757 772
758 /* lets not touch this if we didn't frob it */ 773 /* lets not touch this if we didn't frob it */
759#ifdef CONFIG_X86_MCE_P4THERMAL 774#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
760 if (maxlvt >= 5) { 775 if (maxlvt >= 5) {
761 v = apic_read(APIC_LVTTHMR); 776 v = apic_read(APIC_LVTTHMR);
762 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); 777 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
@@ -773,10 +788,6 @@ void clear_local_APIC(void)
773 if (maxlvt >= 4) 788 if (maxlvt >= 4)
774 apic_write(APIC_LVTPC, APIC_LVT_MASKED); 789 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
775 790
776#ifdef CONFIG_X86_MCE_P4THERMAL
777 if (maxlvt >= 5)
778 apic_write(APIC_LVTTHMR, APIC_LVT_MASKED);
779#endif
780 /* Integrated APIC (!82489DX) ? */ 791 /* Integrated APIC (!82489DX) ? */
781 if (lapic_is_integrated()) { 792 if (lapic_is_integrated()) {
782 if (maxlvt > 3) 793 if (maxlvt > 3)
@@ -791,7 +802,7 @@ void clear_local_APIC(void)
791 */ 802 */
792void disable_local_APIC(void) 803void disable_local_APIC(void)
793{ 804{
794 unsigned long value; 805 unsigned int value;
795 806
796 clear_local_APIC(); 807 clear_local_APIC();
797 808
@@ -803,6 +814,7 @@ void disable_local_APIC(void)
803 value &= ~APIC_SPIV_APIC_ENABLED; 814 value &= ~APIC_SPIV_APIC_ENABLED;
804 apic_write(APIC_SPIV, value); 815 apic_write(APIC_SPIV, value);
805 816
817#ifdef CONFIG_X86_32
806 /* 818 /*
807 * When LAPIC was disabled by the BIOS and enabled by the kernel, 819 * When LAPIC was disabled by the BIOS and enabled by the kernel,
808 * restore the disabled state. 820 * restore the disabled state.
@@ -814,6 +826,7 @@ void disable_local_APIC(void)
814 l &= ~MSR_IA32_APICBASE_ENABLE; 826 l &= ~MSR_IA32_APICBASE_ENABLE;
815 wrmsr(MSR_IA32_APICBASE, l, h); 827 wrmsr(MSR_IA32_APICBASE, l, h);
816 } 828 }
829#endif
817} 830}
818 831
819/* 832/*
@@ -830,11 +843,15 @@ void lapic_shutdown(void)
830 return; 843 return;
831 844
832 local_irq_save(flags); 845 local_irq_save(flags);
833 clear_local_APIC();
834 846
835 if (enabled_via_apicbase) 847#ifdef CONFIG_X86_32
848 if (!enabled_via_apicbase)
849 clear_local_APIC();
850 else
851#endif
836 disable_local_APIC(); 852 disable_local_APIC();
837 853
854
838 local_irq_restore(flags); 855 local_irq_restore(flags);
839} 856}
840 857
@@ -879,6 +896,12 @@ int __init verify_local_APIC(void)
879 */ 896 */
880 reg0 = apic_read(APIC_ID); 897 reg0 = apic_read(APIC_ID);
881 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); 898 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
899 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
900 reg1 = apic_read(APIC_ID);
901 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
902 apic_write(APIC_ID, reg0);
903 if (reg1 != (reg0 ^ APIC_ID_MASK))
904 return 0;
882 905
883 /* 906 /*
884 * The next two are just to see if we have sane values. 907 * The next two are just to see if we have sane values.
@@ -904,14 +927,15 @@ void __init sync_Arb_IDs(void)
904 */ 927 */
905 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 928 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
906 return; 929 return;
930
907 /* 931 /*
908 * Wait for idle. 932 * Wait for idle.
909 */ 933 */
910 apic_wait_icr_idle(); 934 apic_wait_icr_idle();
911 935
912 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); 936 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
913 apic_write(APIC_ICR, 937 apic_write(APIC_ICR, APIC_DEST_ALLINC |
914 APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT); 938 APIC_INT_LEVELTRIG | APIC_DM_INIT);
915} 939}
916 940
917/* 941/*
@@ -919,7 +943,7 @@ void __init sync_Arb_IDs(void)
919 */ 943 */
920void __init init_bsp_APIC(void) 944void __init init_bsp_APIC(void)
921{ 945{
922 unsigned long value; 946 unsigned int value;
923 947
924 /* 948 /*
925 * Don't do the setup now if we have a SMP BIOS as the 949 * Don't do the setup now if we have a SMP BIOS as the
@@ -940,11 +964,13 @@ void __init init_bsp_APIC(void)
940 value &= ~APIC_VECTOR_MASK; 964 value &= ~APIC_VECTOR_MASK;
941 value |= APIC_SPIV_APIC_ENABLED; 965 value |= APIC_SPIV_APIC_ENABLED;
942 966
967#ifdef CONFIG_X86_32
943 /* This bit is reserved on P4/Xeon and should be cleared */ 968 /* This bit is reserved on P4/Xeon and should be cleared */
944 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 969 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
945 (boot_cpu_data.x86 == 15)) 970 (boot_cpu_data.x86 == 15))
946 value &= ~APIC_SPIV_FOCUS_DISABLED; 971 value &= ~APIC_SPIV_FOCUS_DISABLED;
947 else 972 else
973#endif
948 value |= APIC_SPIV_FOCUS_DISABLED; 974 value |= APIC_SPIV_FOCUS_DISABLED;
949 value |= SPURIOUS_APIC_VECTOR; 975 value |= SPURIOUS_APIC_VECTOR;
950 apic_write(APIC_SPIV, value); 976 apic_write(APIC_SPIV, value);
@@ -963,6 +989,16 @@ static void __cpuinit lapic_setup_esr(void)
963{ 989{
964 unsigned long oldvalue, value, maxlvt; 990 unsigned long oldvalue, value, maxlvt;
965 if (lapic_is_integrated() && !esr_disable) { 991 if (lapic_is_integrated() && !esr_disable) {
992 if (esr_disable) {
993 /*
994 * Something untraceable is creating bad interrupts on
995 * secondary quads ... for the moment, just leave the
996 * ESR disabled - we can't do anything useful with the
997 * errors anyway - mbligh
998 */
999 printk(KERN_INFO "Leaving ESR disabled.\n");
1000 return;
1001 }
966 /* !82489DX */ 1002 /* !82489DX */
967 maxlvt = lapic_get_maxlvt(); 1003 maxlvt = lapic_get_maxlvt();
968 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1004 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
@@ -983,16 +1019,7 @@ static void __cpuinit lapic_setup_esr(void)
983 "vector: 0x%08lx after: 0x%08lx\n", 1019 "vector: 0x%08lx after: 0x%08lx\n",
984 oldvalue, value); 1020 oldvalue, value);
985 } else { 1021 } else {
986 if (esr_disable) 1022 printk(KERN_INFO "No ESR for 82489DX.\n");
987 /*
988 * Something untraceable is creating bad interrupts on
989 * secondary quads ... for the moment, just leave the
990 * ESR disabled - we can't do anything useful with the
991 * errors anyway - mbligh
992 */
993 printk(KERN_INFO "Leaving ESR disabled.\n");
994 else
995 printk(KERN_INFO "No ESR for 82489DX.\n");
996 } 1023 }
997} 1024}
998 1025
@@ -1130,13 +1157,17 @@ void __cpuinit setup_local_APIC(void)
1130 1157
1131void __cpuinit end_local_APIC_setup(void) 1158void __cpuinit end_local_APIC_setup(void)
1132{ 1159{
1133 unsigned long value;
1134
1135 lapic_setup_esr(); 1160 lapic_setup_esr();
1136 /* Disable the local apic timer */ 1161
1137 value = apic_read(APIC_LVTT); 1162#ifdef CONFIG_X86_32
1138 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); 1163 {
1139 apic_write(APIC_LVTT, value); 1164 unsigned int value;
1165 /* Disable the local apic timer */
1166 value = apic_read(APIC_LVTT);
1167 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1168 apic_write(APIC_LVTT, value);
1169 }
1170#endif
1140 1171
1141 setup_apic_nmi_watchdog(NULL); 1172 setup_apic_nmi_watchdog(NULL);
1142 apic_pm_activate(); 1173 apic_pm_activate();
@@ -1367,6 +1398,7 @@ void smp_error_interrupt(struct pt_regs *regs)
1367 */ 1398 */
1368void __init connect_bsp_APIC(void) 1399void __init connect_bsp_APIC(void)
1369{ 1400{
1401#ifdef CONFIG_X86_32
1370 if (pic_mode) { 1402 if (pic_mode) {
1371 /* 1403 /*
1372 * Do not trust the local APIC being empty at bootup. 1404 * Do not trust the local APIC being empty at bootup.
@@ -1381,6 +1413,7 @@ void __init connect_bsp_APIC(void)
1381 outb(0x70, 0x22); 1413 outb(0x70, 0x22);
1382 outb(0x01, 0x23); 1414 outb(0x01, 0x23);
1383 } 1415 }
1416#endif
1384 enable_apic_mode(); 1417 enable_apic_mode();
1385} 1418}
1386 1419
@@ -1393,6 +1426,9 @@ void __init connect_bsp_APIC(void)
1393 */ 1426 */
1394void disconnect_bsp_APIC(int virt_wire_setup) 1427void disconnect_bsp_APIC(int virt_wire_setup)
1395{ 1428{
1429 unsigned int value;
1430
1431#ifdef CONFIG_X86_32
1396 if (pic_mode) { 1432 if (pic_mode) {
1397 /* 1433 /*
1398 * Put the board back into PIC mode (has an effect only on 1434 * Put the board back into PIC mode (has an effect only on
@@ -1404,54 +1440,53 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1404 "entering PIC mode.\n"); 1440 "entering PIC mode.\n");
1405 outb(0x70, 0x22); 1441 outb(0x70, 0x22);
1406 outb(0x00, 0x23); 1442 outb(0x00, 0x23);
1407 } else { 1443 return;
1408 /* Go back to Virtual Wire compatibility mode */ 1444 }
1409 unsigned long value; 1445#endif
1410 1446
1411 /* For the spurious interrupt use vector F, and enable it */ 1447 /* Go back to Virtual Wire compatibility mode */
1412 value = apic_read(APIC_SPIV);
1413 value &= ~APIC_VECTOR_MASK;
1414 value |= APIC_SPIV_APIC_ENABLED;
1415 value |= 0xf;
1416 apic_write(APIC_SPIV, value);
1417 1448
1418 if (!virt_wire_setup) { 1449 /* For the spurious interrupt use vector F, and enable it */
1419 /* 1450 value = apic_read(APIC_SPIV);
1420 * For LVT0 make it edge triggered, active high, 1451 value &= ~APIC_VECTOR_MASK;
1421 * external and enabled 1452 value |= APIC_SPIV_APIC_ENABLED;
1422 */ 1453 value |= 0xf;
1423 value = apic_read(APIC_LVT0); 1454 apic_write(APIC_SPIV, value);
1424 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1425 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1426 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1427 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1428 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1429 apic_write(APIC_LVT0, value);
1430 } else {
1431 /* Disable LVT0 */
1432 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1433 }
1434 1455
1456 if (!virt_wire_setup) {
1435 /* 1457 /*
1436 * For LVT1 make it edge triggered, active high, nmi and 1458 * For LVT0 make it edge triggered, active high,
1437 * enabled 1459 * external and enabled
1438 */ 1460 */
1439 value = apic_read(APIC_LVT1); 1461 value = apic_read(APIC_LVT0);
1440 value &= ~( 1462 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1441 APIC_MODE_MASK | APIC_SEND_PENDING |
1442 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | 1463 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1443 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); 1464 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1444 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; 1465 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1445 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); 1466 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1446 apic_write(APIC_LVT1, value); 1467 apic_write(APIC_LVT0, value);
1468 } else {
1469 /* Disable LVT0 */
1470 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1447 } 1471 }
1472
1473 /*
1474 * For LVT1 make it edge triggered, active high,
1475 * nmi and enabled
1476 */
1477 value = apic_read(APIC_LVT1);
1478 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1479 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1480 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1481 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1482 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1483 apic_write(APIC_LVT1, value);
1448} 1484}
1449 1485
1450void __cpuinit generic_processor_info(int apicid, int version) 1486void __cpuinit generic_processor_info(int apicid, int version)
1451{ 1487{
1452 int cpu; 1488 int cpu;
1453 cpumask_t tmp_map; 1489 cpumask_t tmp_map;
1454 physid_mask_t phys_cpu;
1455 1490
1456 /* 1491 /*
1457 * Validate version 1492 * Validate version
@@ -1464,9 +1499,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1464 } 1499 }
1465 apic_version[apicid] = version; 1500 apic_version[apicid] = version;
1466 1501
1467 phys_cpu = apicid_to_cpu_present(apicid);
1468 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
1469
1470 if (num_processors >= NR_CPUS) { 1502 if (num_processors >= NR_CPUS) {
1471 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." 1503 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1472 " Processor ignored.\n", NR_CPUS); 1504 " Processor ignored.\n", NR_CPUS);
@@ -1477,17 +1509,19 @@ void __cpuinit generic_processor_info(int apicid, int version)
1477 cpus_complement(tmp_map, cpu_present_map); 1509 cpus_complement(tmp_map, cpu_present_map);
1478 cpu = first_cpu(tmp_map); 1510 cpu = first_cpu(tmp_map);
1479 1511
1480 if (apicid == boot_cpu_physical_apicid) 1512 physid_set(apicid, phys_cpu_present_map);
1513 if (apicid == boot_cpu_physical_apicid) {
1481 /* 1514 /*
1482 * x86_bios_cpu_apicid is required to have processors listed 1515 * x86_bios_cpu_apicid is required to have processors listed
1483 * in same order as logical cpu numbers. Hence the first 1516 * in same order as logical cpu numbers. Hence the first
1484 * entry is BSP, and so on. 1517 * entry is BSP, and so on.
1485 */ 1518 */
1486 cpu = 0; 1519 cpu = 0;
1487 1520 }
1488 if (apicid > max_physical_apicid) 1521 if (apicid > max_physical_apicid)
1489 max_physical_apicid = apicid; 1522 max_physical_apicid = apicid;
1490 1523
1524#ifdef CONFIG_X86_32
1491 /* 1525 /*
1492 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y 1526 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1493 * but we need to work other dependencies like SMP_SUSPEND etc 1527 * but we need to work other dependencies like SMP_SUSPEND etc
@@ -1507,7 +1541,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
1507 def_to_bigsmp = 1; 1541 def_to_bigsmp = 1;
1508 } 1542 }
1509 } 1543 }
1510#ifdef CONFIG_SMP 1544#endif
1545
1546#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
1511 /* are we being called early in kernel startup? */ 1547 /* are we being called early in kernel startup? */
1512 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1548 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1513 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 1549 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
@@ -1520,6 +1556,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
1520 per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1556 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1521 } 1557 }
1522#endif 1558#endif
1559
1523 cpu_set(cpu, cpu_possible_map); 1560 cpu_set(cpu, cpu_possible_map);
1524 cpu_set(cpu, cpu_present_map); 1561 cpu_set(cpu, cpu_present_map);
1525} 1562}
@@ -1530,6 +1567,11 @@ void __cpuinit generic_processor_info(int apicid, int version)
1530#ifdef CONFIG_PM 1567#ifdef CONFIG_PM
1531 1568
1532static struct { 1569static struct {
1570 /*
1571 * 'active' is true if the local APIC was enabled by us and
1572 * not the BIOS; this signifies that we are also responsible
1573 * for disabling it before entering apm/acpi suspend
1574 */
1533 int active; 1575 int active;
1534 /* r/w apic fields */ 1576 /* r/w apic fields */
1535 unsigned int apic_id; 1577 unsigned int apic_id;
@@ -1570,7 +1612,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1570 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); 1612 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1571 apic_pm_state.apic_tmict = apic_read(APIC_TMICT); 1613 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1572 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); 1614 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1573#ifdef CONFIG_X86_MCE_P4THERMAL 1615#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1574 if (maxlvt >= 5) 1616 if (maxlvt >= 5)
1575 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); 1617 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1576#endif 1618#endif
@@ -1594,16 +1636,23 @@ static int lapic_resume(struct sys_device *dev)
1594 1636
1595 local_irq_save(flags); 1637 local_irq_save(flags);
1596 1638
1597 /* 1639#ifdef CONFIG_X86_64
1598 * Make sure the APICBASE points to the right address 1640 if (x2apic)
1599 * 1641 enable_x2apic();
1600 * FIXME! This will be wrong if we ever support suspend on 1642 else
1601 * SMP! We'll need to do this as part of the CPU restore! 1643#endif
1602 */ 1644 {
1603 rdmsr(MSR_IA32_APICBASE, l, h); 1645 /*
1604 l &= ~MSR_IA32_APICBASE_BASE; 1646 * Make sure the APICBASE points to the right address
1605 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; 1647 *
1606 wrmsr(MSR_IA32_APICBASE, l, h); 1648 * FIXME! This will be wrong if we ever support suspend on
1649 * SMP! We'll need to do this as part of the CPU restore!
1650 */
1651 rdmsr(MSR_IA32_APICBASE, l, h);
1652 l &= ~MSR_IA32_APICBASE_BASE;
1653 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1654 wrmsr(MSR_IA32_APICBASE, l, h);
1655 }
1607 1656
1608 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); 1657 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1609 apic_write(APIC_ID, apic_pm_state.apic_id); 1658 apic_write(APIC_ID, apic_pm_state.apic_id);
@@ -1613,7 +1662,7 @@ static int lapic_resume(struct sys_device *dev)
1613 apic_write(APIC_SPIV, apic_pm_state.apic_spiv); 1662 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1614 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); 1663 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1615 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); 1664 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1616#ifdef CONFIG_X86_MCE_P4THERMAL 1665#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1617 if (maxlvt >= 5) 1666 if (maxlvt >= 5)
1618 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); 1667 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1619#endif 1668#endif
@@ -1627,7 +1676,9 @@ static int lapic_resume(struct sys_device *dev)
1627 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); 1676 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1628 apic_write(APIC_ESR, 0); 1677 apic_write(APIC_ESR, 0);
1629 apic_read(APIC_ESR); 1678 apic_read(APIC_ESR);
1679
1630 local_irq_restore(flags); 1680 local_irq_restore(flags);
1681
1631 return 0; 1682 return 0;
1632} 1683}
1633 1684
@@ -1683,20 +1734,20 @@ static int __init parse_lapic(char *arg)
1683} 1734}
1684early_param("lapic", parse_lapic); 1735early_param("lapic", parse_lapic);
1685 1736
1686static int __init parse_nolapic(char *arg) 1737static int __init setup_disableapic(char *arg)
1687{ 1738{
1688 disable_apic = 1; 1739 disable_apic = 1;
1689 setup_clear_cpu_cap(X86_FEATURE_APIC); 1740 setup_clear_cpu_cap(X86_FEATURE_APIC);
1690 return 0; 1741 return 0;
1691} 1742}
1692early_param("nolapic", parse_nolapic); 1743early_param("disableapic", setup_disableapic);
1693 1744
1694static int __init parse_disable_lapic_timer(char *arg) 1745/* same as disableapic, for compatibility */
1746static int __init setup_nolapic(char *arg)
1695{ 1747{
1696 local_apic_timer_disabled = 1; 1748 return setup_disableapic(arg);
1697 return 0;
1698} 1749}
1699early_param("nolapic_timer", parse_disable_lapic_timer); 1750early_param("nolapic", setup_nolapic);
1700 1751
1701static int __init parse_lapic_timer_c2_ok(char *arg) 1752static int __init parse_lapic_timer_c2_ok(char *arg)
1702{ 1753{
@@ -1705,15 +1756,40 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1705} 1756}
1706early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1757early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1707 1758
1759static int __init parse_disable_apic_timer(char *arg)
1760{
1761 disable_apic_timer = 1;
1762 return 0;
1763}
1764early_param("noapictimer", parse_disable_apic_timer);
1765
1766static int __init parse_nolapic_timer(char *arg)
1767{
1768 disable_apic_timer = 1;
1769 return 0;
1770}
1771early_param("nolapic_timer", parse_nolapic_timer);
1772
1708static int __init apic_set_verbosity(char *arg) 1773static int __init apic_set_verbosity(char *arg)
1709{ 1774{
1710 if (!arg) 1775 if (!arg) {
1776#ifdef CONFIG_X86_64
1777 skip_ioapic_setup = 0;
1778 ioapic_force = 1;
1779 return 0;
1780#endif
1711 return -EINVAL; 1781 return -EINVAL;
1782 }
1712 1783
1713 if (strcmp(arg, "debug") == 0) 1784 if (strcmp("debug", arg) == 0)
1714 apic_verbosity = APIC_DEBUG; 1785 apic_verbosity = APIC_DEBUG;
1715 else if (strcmp(arg, "verbose") == 0) 1786 else if (strcmp("verbose", arg) == 0)
1716 apic_verbosity = APIC_VERBOSE; 1787 apic_verbosity = APIC_VERBOSE;
1788 else {
1789 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1790 " use apic=verbose or apic=debug\n", arg);
1791 return -EINVAL;
1792 }
1717 1793
1718 return 0; 1794 return 0;
1719} 1795}
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 1a6011855af3..53898b65a6ae 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -45,6 +45,7 @@
45#include <mach_ipi.h> 45#include <mach_ipi.h>
46#include <mach_apic.h> 46#include <mach_apic.h>
47 47
48/* Disable local APIC timer from the kernel commandline or via dmi quirk */
48static int disable_apic_timer __cpuinitdata; 49static int disable_apic_timer __cpuinitdata;
49static int apic_calibrate_pmtmr __initdata; 50static int apic_calibrate_pmtmr __initdata;
50int disable_apic; 51int disable_apic;
@@ -80,6 +81,9 @@ static void lapic_timer_setup(enum clock_event_mode mode,
80static void lapic_timer_broadcast(cpumask_t mask); 81static void lapic_timer_broadcast(cpumask_t mask);
81static void apic_pm_activate(void); 82static void apic_pm_activate(void);
82 83
84/*
85 * The local apic timer can be used for any function which is CPU local.
86 */
83static struct clock_event_device lapic_clockevent = { 87static struct clock_event_device lapic_clockevent = {
84 .name = "lapic", 88 .name = "lapic",
85 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT 89 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
@@ -106,11 +110,15 @@ static inline int lapic_get_version(void)
106} 110}
107 111
108/* 112/*
109 * Check, if the APIC is integrated or a seperate chip 113 * Check, if the APIC is integrated or a separate chip
110 */ 114 */
111static inline int lapic_is_integrated(void) 115static inline int lapic_is_integrated(void)
112{ 116{
117#ifdef CONFIG_X86_64
113 return 1; 118 return 1;
119#else
120 return APIC_INTEGRATED(lapic_get_version());
121#endif
114} 122}
115 123
116/* 124/*
@@ -125,6 +133,11 @@ static int modern_apic(void)
125 return lapic_get_version() >= 0x14; 133 return lapic_get_version() >= 0x14;
126} 134}
127 135
136/*
137 * Paravirt kernels also might be using these below ops. So we still
138 * use generic apic_read()/apic_write(), which might be pointing to different
139 * ops in PARAVIRT case.
140 */
128void xapic_wait_icr_idle(void) 141void xapic_wait_icr_idle(void)
129{ 142{
130 while (apic_read(APIC_ICR) & APIC_ICR_BUSY) 143 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
@@ -149,7 +162,7 @@ u32 safe_xapic_wait_icr_idle(void)
149 162
150void xapic_icr_write(u32 low, u32 id) 163void xapic_icr_write(u32 low, u32 id)
151{ 164{
152 apic_write(APIC_ICR2, id << 24); 165 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
153 apic_write(APIC_ICR, low); 166 apic_write(APIC_ICR, low);
154} 167}
155 168
@@ -160,7 +173,7 @@ u64 xapic_icr_read(void)
160 icr2 = apic_read(APIC_ICR2); 173 icr2 = apic_read(APIC_ICR2);
161 icr1 = apic_read(APIC_ICR); 174 icr1 = apic_read(APIC_ICR);
162 175
163 return (icr1 | ((u64)icr2 << 32)); 176 return icr1 | ((u64)icr2 << 32);
164} 177}
165 178
166static struct apic_ops xapic_ops = { 179static struct apic_ops xapic_ops = {
@@ -173,7 +186,6 @@ static struct apic_ops xapic_ops = {
173}; 186};
174 187
175struct apic_ops __read_mostly *apic_ops = &xapic_ops; 188struct apic_ops __read_mostly *apic_ops = &xapic_ops;
176
177EXPORT_SYMBOL_GPL(apic_ops); 189EXPORT_SYMBOL_GPL(apic_ops);
178 190
179static void x2apic_wait_icr_idle(void) 191static void x2apic_wait_icr_idle(void)
@@ -243,6 +255,17 @@ int lapic_get_maxlvt(void)
243} 255}
244 256
245/* 257/*
258 * Local APIC timer
259 */
260
261/* Clock divisor */
262#ifdef CONFG_X86_64
263#define APIC_DIVISOR 1
264#else
265#define APIC_DIVISOR 16
266#endif
267
268/*
246 * This function sets up the local APIC timer, with a timeout of 269 * This function sets up the local APIC timer, with a timeout of
247 * 'clocks' APIC bus clock. During calibration we actually call 270 * 'clocks' APIC bus clock. During calibration we actually call
248 * this function twice on the boot CPU, once with a bogus timeout 271 * this function twice on the boot CPU, once with a bogus timeout
@@ -252,7 +275,6 @@ int lapic_get_maxlvt(void)
252 * We do reads before writes even if unnecessary, to get around the 275 * We do reads before writes even if unnecessary, to get around the
253 * P5 APIC double write bug. 276 * P5 APIC double write bug.
254 */ 277 */
255
256static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) 278static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
257{ 279{
258 unsigned int lvtt_value, tmp_value; 280 unsigned int lvtt_value, tmp_value;
@@ -260,6 +282,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
260 lvtt_value = LOCAL_TIMER_VECTOR; 282 lvtt_value = LOCAL_TIMER_VECTOR;
261 if (!oneshot) 283 if (!oneshot)
262 lvtt_value |= APIC_LVT_TIMER_PERIODIC; 284 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
285 if (!lapic_is_integrated())
286 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
287
263 if (!irqen) 288 if (!irqen)
264 lvtt_value |= APIC_LVT_MASKED; 289 lvtt_value |= APIC_LVT_MASKED;
265 290
@@ -269,12 +294,12 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
269 * Divide PICLK by 16 294 * Divide PICLK by 16
270 */ 295 */
271 tmp_value = apic_read(APIC_TDCR); 296 tmp_value = apic_read(APIC_TDCR);
272 apic_write(APIC_TDCR, (tmp_value 297 apic_write(APIC_TDCR,
273 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) 298 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
274 | APIC_TDR_DIV_16); 299 APIC_TDR_DIV_16);
275 300
276 if (!oneshot) 301 if (!oneshot)
277 apic_write(APIC_TMICT, clocks); 302 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
278} 303}
279 304
280/* 305/*
@@ -444,7 +469,7 @@ static int __init calibrate_APIC_clock(void)
444 lapic_clockevent.min_delta_ns = 469 lapic_clockevent.min_delta_ns =
445 clockevent_delta2ns(0xF, &lapic_clockevent); 470 clockevent_delta2ns(0xF, &lapic_clockevent);
446 471
447 calibration_result = result / HZ; 472 calibration_result = (result * APIC_DIVISOR) / HZ;
448 473
449 /* 474 /*
450 * Do a sanity check on the APIC calibration result 475 * Do a sanity check on the APIC calibration result
@@ -466,10 +491,10 @@ static int __init calibrate_APIC_clock(void)
466void __init setup_boot_APIC_clock(void) 491void __init setup_boot_APIC_clock(void)
467{ 492{
468 /* 493 /*
469 * The local apic timer can be disabled via the kernel commandline. 494 * The local apic timer can be disabled via the kernel
470 * Register the lapic timer as a dummy clock event source on SMP 495 * commandline or from the CPU detection code. Register the lapic
471 * systems, so the broadcast mechanism is used. On UP systems simply 496 * timer as a dummy clock event source on SMP systems, so the
472 * ignore it. 497 * broadcast mechanism is used. On UP systems simply ignore it.
473 */ 498 */
474 if (disable_apic_timer) { 499 if (disable_apic_timer) {
475 printk(KERN_INFO "Disabling APIC timer\n"); 500 printk(KERN_INFO "Disabling APIC timer\n");
@@ -481,7 +506,9 @@ void __init setup_boot_APIC_clock(void)
481 return; 506 return;
482 } 507 }
483 508
484 printk(KERN_INFO "Using local APIC timer interrupts.\n"); 509 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
510 "calibrating APIC timer ...\n");
511
485 if (calibrate_APIC_clock()) { 512 if (calibrate_APIC_clock()) {
486 /* No broadcast on UP ! */ 513 /* No broadcast on UP ! */
487 if (num_possible_cpus() > 1) 514 if (num_possible_cpus() > 1)
@@ -500,6 +527,7 @@ void __init setup_boot_APIC_clock(void)
500 printk(KERN_WARNING "APIC timer registered as dummy," 527 printk(KERN_WARNING "APIC timer registered as dummy,"
501 " due to nmi_watchdog=%d!\n", nmi_watchdog); 528 " due to nmi_watchdog=%d!\n", nmi_watchdog);
502 529
530 /* Setup the lapic or request the broadcast */
503 setup_APIC_timer(); 531 setup_APIC_timer();
504} 532}
505 533
@@ -538,7 +566,11 @@ static void local_apic_timer_interrupt(void)
538 /* 566 /*
539 * the NMI deadlock-detector uses this. 567 * the NMI deadlock-detector uses this.
540 */ 568 */
569#ifdef CONFIG_X86_64
541 add_pda(apic_timer_irqs, 1); 570 add_pda(apic_timer_irqs, 1);
571#else
572 per_cpu(irq_stat, cpu).apic_timer_irqs++;
573#endif
542 574
543 evt->event_handler(evt); 575 evt->event_handler(evt);
544} 576}
@@ -569,6 +601,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
569 irq_enter(); 601 irq_enter();
570 local_apic_timer_interrupt(); 602 local_apic_timer_interrupt();
571 irq_exit(); 603 irq_exit();
604
572 set_irq_regs(old_regs); 605 set_irq_regs(old_regs);
573} 606}
574 607
@@ -622,6 +655,13 @@ void clear_local_APIC(void)
622 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); 655 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
623 } 656 }
624 657
658 /* lets not touch this if we didn't frob it */
659#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
660 if (maxlvt >= 5) {
661 v = apic_read(APIC_LVTTHMR);
662 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
663 }
664#endif
625 /* 665 /*
626 * Clean APIC state for other OSs: 666 * Clean APIC state for other OSs:
627 */ 667 */
@@ -632,8 +672,14 @@ void clear_local_APIC(void)
632 apic_write(APIC_LVTERR, APIC_LVT_MASKED); 672 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
633 if (maxlvt >= 4) 673 if (maxlvt >= 4)
634 apic_write(APIC_LVTPC, APIC_LVT_MASKED); 674 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
635 apic_write(APIC_ESR, 0); 675
636 apic_read(APIC_ESR); 676 /* Integrated APIC (!82489DX) ? */
677 if (lapic_is_integrated()) {
678 if (maxlvt > 3)
679 /* Clear ESR due to Pentium errata 3AP and 11AP */
680 apic_write(APIC_ESR, 0);
681 apic_read(APIC_ESR);
682 }
637} 683}
638 684
639/** 685/**
@@ -652,8 +698,28 @@ void disable_local_APIC(void)
652 value = apic_read(APIC_SPIV); 698 value = apic_read(APIC_SPIV);
653 value &= ~APIC_SPIV_APIC_ENABLED; 699 value &= ~APIC_SPIV_APIC_ENABLED;
654 apic_write(APIC_SPIV, value); 700 apic_write(APIC_SPIV, value);
701
702#ifdef CONFIG_X86_32
703 /*
704 * When LAPIC was disabled by the BIOS and enabled by the kernel,
705 * restore the disabled state.
706 */
707 if (enabled_via_apicbase) {
708 unsigned int l, h;
709
710 rdmsr(MSR_IA32_APICBASE, l, h);
711 l &= ~MSR_IA32_APICBASE_ENABLE;
712 wrmsr(MSR_IA32_APICBASE, l, h);
713 }
714#endif
655} 715}
656 716
717/*
718 * If Linux enabled the LAPIC against the BIOS default disable it down before
719 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
720 * not power-off. Additionally clear all LVT entries before disable_local_APIC
721 * for the case where Linux didn't enable the LAPIC.
722 */
657void lapic_shutdown(void) 723void lapic_shutdown(void)
658{ 724{
659 unsigned long flags; 725 unsigned long flags;
@@ -663,7 +729,13 @@ void lapic_shutdown(void)
663 729
664 local_irq_save(flags); 730 local_irq_save(flags);
665 731
666 disable_local_APIC(); 732#ifdef CONFIG_X86_32
733 if (!enabled_via_apicbase)
734 clear_local_APIC();
735 else
736#endif
737 disable_local_APIC();
738
667 739
668 local_irq_restore(flags); 740 local_irq_restore(flags);
669} 741}
@@ -734,8 +806,11 @@ int __init verify_local_APIC(void)
734 */ 806 */
735void __init sync_Arb_IDs(void) 807void __init sync_Arb_IDs(void)
736{ 808{
737 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */ 809 /*
738 if (modern_apic()) 810 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
811 * needed on AMD.
812 */
813 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
739 return; 814 return;
740 815
741 /* 816 /*
@@ -744,8 +819,8 @@ void __init sync_Arb_IDs(void)
744 apic_wait_icr_idle(); 819 apic_wait_icr_idle();
745 820
746 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); 821 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
747 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG 822 apic_write(APIC_ICR, APIC_DEST_ALLINC |
748 | APIC_DM_INIT); 823 APIC_INT_LEVELTRIG | APIC_DM_INIT);
749} 824}
750 825
751/* 826/*
@@ -762,8 +837,6 @@ void __init init_bsp_APIC(void)
762 if (smp_found_config || !cpu_has_apic) 837 if (smp_found_config || !cpu_has_apic)
763 return; 838 return;
764 839
765 value = apic_read(APIC_LVR);
766
767 /* 840 /*
768 * Do not trust the local APIC being empty at bootup. 841 * Do not trust the local APIC being empty at bootup.
769 */ 842 */
@@ -775,7 +848,15 @@ void __init init_bsp_APIC(void)
775 value = apic_read(APIC_SPIV); 848 value = apic_read(APIC_SPIV);
776 value &= ~APIC_VECTOR_MASK; 849 value &= ~APIC_VECTOR_MASK;
777 value |= APIC_SPIV_APIC_ENABLED; 850 value |= APIC_SPIV_APIC_ENABLED;
778 value |= APIC_SPIV_FOCUS_DISABLED; 851
852#ifdef CONFIG_X86_32
853 /* This bit is reserved on P4/Xeon and should be cleared */
854 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
855 (boot_cpu_data.x86 == 15))
856 value &= ~APIC_SPIV_FOCUS_DISABLED;
857 else
858#endif
859 value |= APIC_SPIV_FOCUS_DISABLED;
779 value |= SPURIOUS_APIC_VECTOR; 860 value |= SPURIOUS_APIC_VECTOR;
780 apic_write(APIC_SPIV, value); 861 apic_write(APIC_SPIV, value);
781 862
@@ -784,9 +865,50 @@ void __init init_bsp_APIC(void)
784 */ 865 */
785 apic_write(APIC_LVT0, APIC_DM_EXTINT); 866 apic_write(APIC_LVT0, APIC_DM_EXTINT);
786 value = APIC_DM_NMI; 867 value = APIC_DM_NMI;
868 if (!lapic_is_integrated()) /* 82489DX */
869 value |= APIC_LVT_LEVEL_TRIGGER;
787 apic_write(APIC_LVT1, value); 870 apic_write(APIC_LVT1, value);
788} 871}
789 872
873static void __cpuinit lapic_setup_esr(void)
874{
875 unsigned long oldvalue, value, maxlvt;
876 if (lapic_is_integrated() && !esr_disable) {
877 if (esr_disable) {
878 /*
879 * Something untraceable is creating bad interrupts on
880 * secondary quads ... for the moment, just leave the
881 * ESR disabled - we can't do anything useful with the
882 * errors anyway - mbligh
883 */
884 printk(KERN_INFO "Leaving ESR disabled.\n");
885 return;
886 }
887 /* !82489DX */
888 maxlvt = lapic_get_maxlvt();
889 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
890 apic_write(APIC_ESR, 0);
891 oldvalue = apic_read(APIC_ESR);
892
893 /* enables sending errors */
894 value = ERROR_APIC_VECTOR;
895 apic_write(APIC_LVTERR, value);
896 /*
897 * spec says clear errors after enabling vector.
898 */
899 if (maxlvt > 3)
900 apic_write(APIC_ESR, 0);
901 value = apic_read(APIC_ESR);
902 if (value != oldvalue)
903 apic_printk(APIC_VERBOSE, "ESR value before enabling "
904 "vector: 0x%08lx after: 0x%08lx\n",
905 oldvalue, value);
906 } else {
907 printk(KERN_INFO "No ESR for 82489DX.\n");
908 }
909}
910
911
790/** 912/**
791 * setup_local_APIC - setup the local APIC 913 * setup_local_APIC - setup the local APIC
792 */ 914 */
@@ -892,21 +1014,20 @@ void __cpuinit setup_local_APIC(void)
892 preempt_enable(); 1014 preempt_enable();
893} 1015}
894 1016
895static void __cpuinit lapic_setup_esr(void)
896{
897 unsigned maxlvt = lapic_get_maxlvt();
898
899 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
900 /*
901 * spec says clear errors after enabling vector.
902 */
903 if (maxlvt > 3)
904 apic_write(APIC_ESR, 0);
905}
906
907void __cpuinit end_local_APIC_setup(void) 1017void __cpuinit end_local_APIC_setup(void)
908{ 1018{
909 lapic_setup_esr(); 1019 lapic_setup_esr();
1020
1021#ifdef CONFIG_X86_32
1022 {
1023 unsigned int value;
1024 /* Disable the local apic timer */
1025 value = apic_read(APIC_LVTT);
1026 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1027 apic_write(APIC_LVTT, value);
1028 }
1029#endif
1030
910 setup_apic_nmi_watchdog(NULL); 1031 setup_apic_nmi_watchdog(NULL);
911 apic_pm_activate(); 1032 apic_pm_activate();
912} 1033}
@@ -1108,6 +1229,8 @@ void __init init_apic_mappings(void)
1108 * This initializes the IO-APIC and APIC hardware if this is 1229 * This initializes the IO-APIC and APIC hardware if this is
1109 * a UP kernel. 1230 * a UP kernel.
1110 */ 1231 */
1232int apic_version[MAX_APICS];
1233
1111int __init APIC_init_uniprocessor(void) 1234int __init APIC_init_uniprocessor(void)
1112{ 1235{
1113 if (disable_apic) { 1236 if (disable_apic) {
@@ -1209,17 +1332,57 @@ asmlinkage void smp_error_interrupt(void)
1209} 1332}
1210 1333
1211/** 1334/**
1212 * * connect_bsp_APIC - attach the APIC to the interrupt system 1335 * connect_bsp_APIC - attach the APIC to the interrupt system
1213 * */ 1336 */
1214void __init connect_bsp_APIC(void) 1337void __init connect_bsp_APIC(void)
1215{ 1338{
1339#ifdef CONFIG_X86_32
1340 if (pic_mode) {
1341 /*
1342 * Do not trust the local APIC being empty at bootup.
1343 */
1344 clear_local_APIC();
1345 /*
1346 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
1347 * local APIC to INT and NMI lines.
1348 */
1349 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1350 "enabling APIC mode.\n");
1351 outb(0x70, 0x22);
1352 outb(0x01, 0x23);
1353 }
1354#endif
1216 enable_apic_mode(); 1355 enable_apic_mode();
1217} 1356}
1218 1357
1358/**
1359 * disconnect_bsp_APIC - detach the APIC from the interrupt system
1360 * @virt_wire_setup: indicates, whether virtual wire mode is selected
1361 *
1362 * Virtual wire mode is necessary to deliver legacy interrupts even when the
1363 * APIC is disabled.
1364 */
1219void disconnect_bsp_APIC(int virt_wire_setup) 1365void disconnect_bsp_APIC(int virt_wire_setup)
1220{ 1366{
1367 unsigned int value;
1368
1369#ifdef CONFIG_X86_32
1370 if (pic_mode) {
1371 /*
1372 * Put the board back into PIC mode (has an effect only on
1373 * certain older boards). Note that APIC interrupts, including
1374 * IPIs, won't work beyond this point! The only exception are
1375 * INIT IPIs.
1376 */
1377 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1378 "entering PIC mode.\n");
1379 outb(0x70, 0x22);
1380 outb(0x00, 0x23);
1381 return;
1382 }
1383#endif
1384
1221 /* Go back to Virtual Wire compatibility mode */ 1385 /* Go back to Virtual Wire compatibility mode */
1222 unsigned long value;
1223 1386
1224 /* For the spurious interrupt use vector F, and enable it */ 1387 /* For the spurious interrupt use vector F, and enable it */
1225 value = apic_read(APIC_SPIV); 1388 value = apic_read(APIC_SPIV);
@@ -1245,7 +1408,10 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1245 apic_write(APIC_LVT0, APIC_LVT_MASKED); 1408 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1246 } 1409 }
1247 1410
1248 /* For LVT1 make it edge triggered, active high, nmi and enabled */ 1411 /*
1412 * For LVT1 make it edge triggered, active high,
1413 * nmi and enabled
1414 */
1249 value = apic_read(APIC_LVT1); 1415 value = apic_read(APIC_LVT1);
1250 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | 1416 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1251 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | 1417 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
@@ -1260,9 +1426,20 @@ void __cpuinit generic_processor_info(int apicid, int version)
1260 int cpu; 1426 int cpu;
1261 cpumask_t tmp_map; 1427 cpumask_t tmp_map;
1262 1428
1429 /*
1430 * Validate version
1431 */
1432 if (version == 0x0) {
1433 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
1434 "fixing up to 0x10. (tell your hw vendor)\n",
1435 version);
1436 version = 0x10;
1437 }
1438 apic_version[apicid] = version;
1439
1263 if (num_processors >= NR_CPUS) { 1440 if (num_processors >= NR_CPUS) {
1264 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." 1441 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1265 " Processor ignored.\n", NR_CPUS); 1442 " Processor ignored.\n", NR_CPUS);
1266 return; 1443 return;
1267 } 1444 }
1268 1445
@@ -1282,6 +1459,29 @@ void __cpuinit generic_processor_info(int apicid, int version)
1282 if (apicid > max_physical_apicid) 1459 if (apicid > max_physical_apicid)
1283 max_physical_apicid = apicid; 1460 max_physical_apicid = apicid;
1284 1461
1462#ifdef CONFIG_X86_32
1463 /*
1464 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1465 * but we need to work other dependencies like SMP_SUSPEND etc
1466 * before this can be done without some confusion.
1467 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
1468 * - Ashok Raj <ashok.raj@intel.com>
1469 */
1470 if (max_physical_apicid >= 8) {
1471 switch (boot_cpu_data.x86_vendor) {
1472 case X86_VENDOR_INTEL:
1473 if (!APIC_XAPIC(version)) {
1474 def_to_bigsmp = 0;
1475 break;
1476 }
1477 /* If P4 and above fall through */
1478 case X86_VENDOR_AMD:
1479 def_to_bigsmp = 1;
1480 }
1481 }
1482#endif
1483
1484#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
1285 /* are we being called early in kernel startup? */ 1485 /* are we being called early in kernel startup? */
1286 if (early_per_cpu_ptr(x86_cpu_to_apicid)) { 1486 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1287 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 1487 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
@@ -1293,6 +1493,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
1293 per_cpu(x86_cpu_to_apicid, cpu) = apicid; 1493 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1294 per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1494 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1295 } 1495 }
1496#endif
1296 1497
1297 cpu_set(cpu, cpu_possible_map); 1498 cpu_set(cpu, cpu_possible_map);
1298 cpu_set(cpu, cpu_present_map); 1499 cpu_set(cpu, cpu_present_map);
@@ -1309,9 +1510,11 @@ int hard_smp_processor_id(void)
1309#ifdef CONFIG_PM 1510#ifdef CONFIG_PM
1310 1511
1311static struct { 1512static struct {
1312 /* 'active' is true if the local APIC was enabled by us and 1513 /*
1313 not the BIOS; this signifies that we are also responsible 1514 * 'active' is true if the local APIC was enabled by us and
1314 for disabling it before entering apm/acpi suspend */ 1515 * not the BIOS; this signifies that we are also responsible
1516 * for disabling it before entering apm/acpi suspend
1517 */
1315 int active; 1518 int active;
1316 /* r/w apic fields */ 1519 /* r/w apic fields */
1317 unsigned int apic_id; 1520 unsigned int apic_id;
@@ -1352,10 +1555,11 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1352 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); 1555 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1353 apic_pm_state.apic_tmict = apic_read(APIC_TMICT); 1556 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1354 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); 1557 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1355#ifdef CONFIG_X86_MCE_INTEL 1558#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1356 if (maxlvt >= 5) 1559 if (maxlvt >= 5)
1357 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); 1560 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1358#endif 1561#endif
1562
1359 local_irq_save(flags); 1563 local_irq_save(flags);
1360 disable_local_APIC(); 1564 disable_local_APIC();
1361 local_irq_restore(flags); 1565 local_irq_restore(flags);
@@ -1374,13 +1578,24 @@ static int lapic_resume(struct sys_device *dev)
1374 maxlvt = lapic_get_maxlvt(); 1578 maxlvt = lapic_get_maxlvt();
1375 1579
1376 local_irq_save(flags); 1580 local_irq_save(flags);
1377 if (!x2apic) { 1581
1582#ifdef CONFIG_X86_64
1583 if (x2apic)
1584 enable_x2apic();
1585 else
1586#endif
1587 {
1588 /*
1589 * Make sure the APICBASE points to the right address
1590 *
1591 * FIXME! This will be wrong if we ever support suspend on
1592 * SMP! We'll need to do this as part of the CPU restore!
1593 */
1378 rdmsr(MSR_IA32_APICBASE, l, h); 1594 rdmsr(MSR_IA32_APICBASE, l, h);
1379 l &= ~MSR_IA32_APICBASE_BASE; 1595 l &= ~MSR_IA32_APICBASE_BASE;
1380 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; 1596 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1381 wrmsr(MSR_IA32_APICBASE, l, h); 1597 wrmsr(MSR_IA32_APICBASE, l, h);
1382 } else 1598 }
1383 enable_x2apic();
1384 1599
1385 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); 1600 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1386 apic_write(APIC_ID, apic_pm_state.apic_id); 1601 apic_write(APIC_ID, apic_pm_state.apic_id);
@@ -1390,7 +1605,7 @@ static int lapic_resume(struct sys_device *dev)
1390 apic_write(APIC_SPIV, apic_pm_state.apic_spiv); 1605 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1391 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); 1606 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1392 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); 1607 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1393#ifdef CONFIG_X86_MCE_INTEL 1608#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
1394 if (maxlvt >= 5) 1609 if (maxlvt >= 5)
1395 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); 1610 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1396#endif 1611#endif
@@ -1404,10 +1619,17 @@ static int lapic_resume(struct sys_device *dev)
1404 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); 1619 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1405 apic_write(APIC_ESR, 0); 1620 apic_write(APIC_ESR, 0);
1406 apic_read(APIC_ESR); 1621 apic_read(APIC_ESR);
1622
1407 local_irq_restore(flags); 1623 local_irq_restore(flags);
1624
1408 return 0; 1625 return 0;
1409} 1626}
1410 1627
1628/*
1629 * This device has no shutdown method - fully functioning local APICs
1630 * are needed on every CPU up until machine_halt/restart/poweroff.
1631 */
1632
1411static struct sysdev_class lapic_sysclass = { 1633static struct sysdev_class lapic_sysclass = {
1412 .name = "lapic", 1634 .name = "lapic",
1413 .resume = lapic_resume, 1635 .resume = lapic_resume,
@@ -1533,28 +1755,7 @@ early_param("nox2apic", setup_nox2apic);
1533/* 1755/*
1534 * APIC command line parameters 1756 * APIC command line parameters
1535 */ 1757 */
1536static int __init apic_set_verbosity(char *str) 1758static int __init setup_disableapic(char *arg)
1537{
1538 if (str == NULL) {
1539 skip_ioapic_setup = 0;
1540 ioapic_force = 1;
1541 return 0;
1542 }
1543 if (strcmp("debug", str) == 0)
1544 apic_verbosity = APIC_DEBUG;
1545 else if (strcmp("verbose", str) == 0)
1546 apic_verbosity = APIC_VERBOSE;
1547 else {
1548 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1549 " use apic=verbose or apic=debug\n", str);
1550 return -EINVAL;
1551 }
1552
1553 return 0;
1554}
1555early_param("apic", apic_set_verbosity);
1556
1557static __init int setup_disableapic(char *str)
1558{ 1759{
1559 disable_apic = 1; 1760 disable_apic = 1;
1560 setup_clear_cpu_cap(X86_FEATURE_APIC); 1761 setup_clear_cpu_cap(X86_FEATURE_APIC);
@@ -1563,9 +1764,9 @@ static __init int setup_disableapic(char *str)
1563early_param("disableapic", setup_disableapic); 1764early_param("disableapic", setup_disableapic);
1564 1765
1565/* same as disableapic, for compatibility */ 1766/* same as disableapic, for compatibility */
1566static __init int setup_nolapic(char *str) 1767static int __init setup_nolapic(char *arg)
1567{ 1768{
1568 return setup_disableapic(str); 1769 return setup_disableapic(arg);
1569} 1770}
1570early_param("nolapic", setup_nolapic); 1771early_param("nolapic", setup_nolapic);
1571 1772
@@ -1576,14 +1777,19 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1576} 1777}
1577early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1778early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1578 1779
1579static __init int setup_noapictimer(char *str) 1780static int __init parse_disable_apic_timer(char *arg)
1580{ 1781{
1581 if (str[0] != ' ' && str[0] != 0)
1582 return 0;
1583 disable_apic_timer = 1; 1782 disable_apic_timer = 1;
1584 return 1; 1783 return 0;
1784}
1785early_param("noapictimer", parse_disable_apic_timer);
1786
1787static int __init parse_nolapic_timer(char *arg)
1788{
1789 disable_apic_timer = 1;
1790 return 0;
1585} 1791}
1586__setup("noapictimer", setup_noapictimer); 1792early_param("nolapic_timer", parse_nolapic_timer);
1587 1793
1588static __init int setup_apicpmtimer(char *s) 1794static __init int setup_apicpmtimer(char *s)
1589{ 1795{
@@ -1593,6 +1799,31 @@ static __init int setup_apicpmtimer(char *s)
1593} 1799}
1594__setup("apicpmtimer", setup_apicpmtimer); 1800__setup("apicpmtimer", setup_apicpmtimer);
1595 1801
1802static int __init apic_set_verbosity(char *arg)
1803{
1804 if (!arg) {
1805#ifdef CONFIG_X86_64
1806 skip_ioapic_setup = 0;
1807 ioapic_force = 1;
1808 return 0;
1809#endif
1810 return -EINVAL;
1811 }
1812
1813 if (strcmp("debug", arg) == 0)
1814 apic_verbosity = APIC_DEBUG;
1815 else if (strcmp("verbose", arg) == 0)
1816 apic_verbosity = APIC_VERBOSE;
1817 else {
1818 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1819 " use apic=verbose or apic=debug\n", arg);
1820 return -EINVAL;
1821 }
1822
1823 return 0;
1824}
1825early_param("apic", apic_set_verbosity);
1826
1596static int __init lapic_insert_resource(void) 1827static int __init lapic_insert_resource(void)
1597{ 1828{
1598 if (!apic_phys) 1829 if (!apic_phys)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 9ee24e6bc4b0..5145a6e72bbb 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -228,12 +228,12 @@
228#include <linux/suspend.h> 228#include <linux/suspend.h>
229#include <linux/kthread.h> 229#include <linux/kthread.h>
230#include <linux/jiffies.h> 230#include <linux/jiffies.h>
231#include <linux/smp_lock.h>
232 231
233#include <asm/system.h> 232#include <asm/system.h>
234#include <asm/uaccess.h> 233#include <asm/uaccess.h>
235#include <asm/desc.h> 234#include <asm/desc.h>
236#include <asm/i8253.h> 235#include <asm/i8253.h>
236#include <asm/olpc.h>
237#include <asm/paravirt.h> 237#include <asm/paravirt.h>
238#include <asm/reboot.h> 238#include <asm/reboot.h>
239 239
@@ -2217,7 +2217,7 @@ static int __init apm_init(void)
2217 2217
2218 dmi_check_system(apm_dmi_table); 2218 dmi_check_system(apm_dmi_table);
2219 2219
2220 if (apm_info.bios.version == 0 || paravirt_enabled()) { 2220 if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) {
2221 printk(KERN_INFO "apm: BIOS not found.\n"); 2221 printk(KERN_INFO "apm: BIOS not found.\n");
2222 return -ENODEV; 2222 return -ENODEV;
2223 } 2223 }
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index c639bd55391c..fdd585f9c53d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -25,11 +25,11 @@ x86_bios_strerror(long status)
25{ 25{
26 const char *str; 26 const char *str;
27 switch (status) { 27 switch (status) {
28 case 0: str = "Call completed without error"; break; 28 case 0: str = "Call completed without error"; break;
29 case -1: str = "Not implemented"; break; 29 case -1: str = "Not implemented"; break;
30 case -2: str = "Invalid argument"; break; 30 case -2: str = "Invalid argument"; break;
31 case -3: str = "Call completed with error"; break; 31 case -3: str = "Call completed with error"; break;
32 default: str = "Unknown BIOS status code"; break; 32 default: str = "Unknown BIOS status code"; break;
33 } 33 }
34 return str; 34 return str;
35} 35}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 84c4040efa80..7581b62df184 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -572,35 +572,15 @@ void __init early_cpu_init(void)
572 572
573/* 573/*
574 * The NOPL instruction is supposed to exist on all CPUs with 574 * The NOPL instruction is supposed to exist on all CPUs with
575 * family >= 6, unfortunately, that's not true in practice because 575 * family >= 6; unfortunately, that's not true in practice because
576 * of early VIA chips and (more importantly) broken virtualizers that 576 * of early VIA chips and (more importantly) broken virtualizers that
577 * are not easy to detect. Hence, probe for it based on first 577 * are not easy to detect. In the latter case it doesn't even *fail*
578 * principles. 578 * reliably, so probing for it doesn't even work. Disable it completely
579 * 579 * unless we can find a reliable way to detect all the broken cases.
580 * Note: no 64-bit chip is known to lack these, but put the code here
581 * for consistency with 32 bits, and to make it utterly trivial to
582 * diagnose the problem should it ever surface.
583 */ 580 */
584static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 581static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
585{ 582{
586 const u32 nopl_signature = 0x888c53b1; /* Random number */
587 u32 has_nopl = nopl_signature;
588
589 clear_cpu_cap(c, X86_FEATURE_NOPL); 583 clear_cpu_cap(c, X86_FEATURE_NOPL);
590 if (c->x86 >= 6) {
591 asm volatile("\n"
592 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
593 "2:\n"
594 " .section .fixup,\"ax\"\n"
595 "3: xor %0,%0\n"
596 " jmp 2b\n"
597 " .previous\n"
598 _ASM_EXTABLE(1b,3b)
599 : "+a" (has_nopl));
600
601 if (has_nopl == nopl_signature)
602 set_cpu_cap(c, X86_FEATURE_NOPL);
603 }
604} 584}
605 585
606static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 586static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index f1685fb91fbd..b8e05ee4f736 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -171,7 +171,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
171 } 171 }
172 172
173 if (c->x86 != 0xF) { 173 if (c->x86 != 0xF) {
174 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n"); 174 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@vger.kernel.org>\n");
175 return 0; 175 return 0;
176 } 176 }
177 177
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 15e13c01cc36..3b5f06423e77 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -26,7 +26,7 @@
26#include <asm/cpufeature.h> 26#include <asm/cpufeature.h>
27 27
28#define PFX "speedstep-centrino: " 28#define PFX "speedstep-centrino: "
29#define MAINTAINER "cpufreq@lists.linux.org.uk" 29#define MAINTAINER "cpufreq@vger.kernel.org"
30 30
31#define dprintk(msg...) \ 31#define dprintk(msg...) \
32 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 32 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cb7d3b6a80eb..4e8d77f01eeb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -401,12 +401,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
401 tmp |= ~((1<<(hi - 1)) - 1); 401 tmp |= ~((1<<(hi - 1)) - 1);
402 402
403 if (tmp != mask_lo) { 403 if (tmp != mask_lo) {
404 static int once = 1; 404 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp; 405 mask_lo = tmp;
411 } 406 }
412 } 407 }
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 84c480bb3715..4c4214690dd1 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -405,9 +405,9 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
405 } 405 }
406 /* RED-PEN: base can be > 32bit */ 406 /* RED-PEN: base can be > 32bit */
407 len += seq_printf(seq, 407 len += seq_printf(seq,
408 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", 408 "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
409 i, base, base >> (20 - PAGE_SHIFT), size, factor, 409 i, base, base >> (20 - PAGE_SHIFT), size, factor,
410 mtrr_attrib_to_str(type), mtrr_usage_table[i]); 410 mtrr_usage_table[i], mtrr_attrib_to_str(type));
411 } 411 }
412 } 412 }
413 return 0; 413 return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 58ac5d3d4361..c78c04821ea1 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -759,7 +759,8 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
759 /* take out UC ranges */ 759 /* take out UC ranges */
760 for (i = 0; i < num_var_ranges; i++) { 760 for (i = 0; i < num_var_ranges; i++) {
761 type = range_state[i].type; 761 type = range_state[i].type;
762 if (type != MTRR_TYPE_UNCACHABLE) 762 if (type != MTRR_TYPE_UNCACHABLE &&
763 type != MTRR_TYPE_WRPROT)
763 continue; 764 continue;
764 size = range_state[i].size_pfn; 765 size = range_state[i].size_pfn;
765 if (!size) 766 if (!size)
@@ -834,7 +835,14 @@ static int __init enable_mtrr_cleanup_setup(char *str)
834 enable_mtrr_cleanup = 1; 835 enable_mtrr_cleanup = 1;
835 return 0; 836 return 0;
836} 837}
837early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup); 838early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
839
840static int __init mtrr_cleanup_debug_setup(char *str)
841{
842 debug_print = 1;
843 return 0;
844}
845early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
838 846
839struct var_mtrr_state { 847struct var_mtrr_state {
840 unsigned long range_startk; 848 unsigned long range_startk;
@@ -898,6 +906,27 @@ set_var_mtrr_all(unsigned int address_bits)
898 } 906 }
899} 907}
900 908
909static unsigned long to_size_factor(unsigned long sizek, char *factorp)
910{
911 char factor;
912 unsigned long base = sizek;
913
914 if (base & ((1<<10) - 1)) {
915 /* not MB alignment */
916 factor = 'K';
917 } else if (base & ((1<<20) - 1)){
918 factor = 'M';
919 base >>= 10;
920 } else {
921 factor = 'G';
922 base >>= 20;
923 }
924
925 *factorp = factor;
926
927 return base;
928}
929
901static unsigned int __init 930static unsigned int __init
902range_to_mtrr(unsigned int reg, unsigned long range_startk, 931range_to_mtrr(unsigned int reg, unsigned long range_startk,
903 unsigned long range_sizek, unsigned char type) 932 unsigned long range_sizek, unsigned char type)
@@ -919,13 +948,21 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
919 align = max_align; 948 align = max_align;
920 949
921 sizek = 1 << align; 950 sizek = 1 << align;
922 if (debug_print) 951 if (debug_print) {
952 char start_factor = 'K', size_factor = 'K';
953 unsigned long start_base, size_base;
954
955 start_base = to_size_factor(range_startk, &start_factor),
956 size_base = to_size_factor(sizek, &size_factor),
957
923 printk(KERN_DEBUG "Setting variable MTRR %d, " 958 printk(KERN_DEBUG "Setting variable MTRR %d, "
924 "base: %ldMB, range: %ldMB, type %s\n", 959 "base: %ld%cB, range: %ld%cB, type %s\n",
925 reg, range_startk >> 10, sizek >> 10, 960 reg, start_base, start_factor,
961 size_base, size_factor,
926 (type == MTRR_TYPE_UNCACHABLE)?"UC": 962 (type == MTRR_TYPE_UNCACHABLE)?"UC":
927 ((type == MTRR_TYPE_WRBACK)?"WB":"Other") 963 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
928 ); 964 );
965 }
929 save_var_mtrr(reg++, range_startk, sizek, type); 966 save_var_mtrr(reg++, range_startk, sizek, type);
930 range_startk += sizek; 967 range_startk += sizek;
931 range_sizek -= sizek; 968 range_sizek -= sizek;
@@ -970,6 +1007,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
970 /* try to append some small hole */ 1007 /* try to append some small hole */
971 range0_basek = state->range_startk; 1008 range0_basek = state->range_startk;
972 range0_sizek = ALIGN(state->range_sizek, chunk_sizek); 1009 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
1010
1011 /* no increase */
973 if (range0_sizek == state->range_sizek) { 1012 if (range0_sizek == state->range_sizek) {
974 if (debug_print) 1013 if (debug_print)
975 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", 1014 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
@@ -980,13 +1019,40 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
980 return 0; 1019 return 0;
981 } 1020 }
982 1021
983 range0_sizek -= chunk_sizek; 1022 /* only cut back, when it is not the last */
984 if (range0_sizek && sizek) { 1023 if (sizek) {
985 while (range0_basek + range0_sizek > (basek + sizek)) { 1024 while (range0_basek + range0_sizek > (basek + sizek)) {
986 range0_sizek -= chunk_sizek; 1025 if (range0_sizek >= chunk_sizek)
987 if (!range0_sizek) 1026 range0_sizek -= chunk_sizek;
988 break; 1027 else
989 } 1028 range0_sizek = 0;
1029
1030 if (!range0_sizek)
1031 break;
1032 }
1033 }
1034
1035second_try:
1036 range_basek = range0_basek + range0_sizek;
1037
1038 /* one hole in the middle */
1039 if (range_basek > basek && range_basek <= (basek + sizek))
1040 second_sizek = range_basek - basek;
1041
1042 if (range0_sizek > state->range_sizek) {
1043
1044 /* one hole in middle or at end */
1045 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
1046
1047 /* hole size should be less than half of range0 size */
1048 if (hole_sizek >= (range0_sizek >> 1) &&
1049 range0_sizek >= chunk_sizek) {
1050 range0_sizek -= chunk_sizek;
1051 second_sizek = 0;
1052 hole_sizek = 0;
1053
1054 goto second_try;
1055 }
990 } 1056 }
991 1057
992 if (range0_sizek) { 1058 if (range0_sizek) {
@@ -996,50 +1062,28 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
996 (range0_basek + range0_sizek)<<10); 1062 (range0_basek + range0_sizek)<<10);
997 state->reg = range_to_mtrr(state->reg, range0_basek, 1063 state->reg = range_to_mtrr(state->reg, range0_basek,
998 range0_sizek, MTRR_TYPE_WRBACK); 1064 range0_sizek, MTRR_TYPE_WRBACK);
999
1000 }
1001
1002 range_basek = range0_basek + range0_sizek;
1003 range_sizek = chunk_sizek;
1004
1005 if (range_basek + range_sizek > basek &&
1006 range_basek + range_sizek <= (basek + sizek)) {
1007 /* one hole */
1008 second_basek = basek;
1009 second_sizek = range_basek + range_sizek - basek;
1010 } 1065 }
1011 1066
1012 /* if last piece, only could one hole near end */ 1067 if (range0_sizek < state->range_sizek) {
1013 if ((second_basek || !basek) && 1068 /* need to handle left over */
1014 range_sizek - (state->range_sizek - range0_sizek) - second_sizek <
1015 (chunk_sizek >> 1)) {
1016 /*
1017 * one hole in middle (second_sizek is 0) or at end
1018 * (second_sizek is 0 )
1019 */
1020 hole_sizek = range_sizek - (state->range_sizek - range0_sizek)
1021 - second_sizek;
1022 hole_basek = range_basek + range_sizek - hole_sizek
1023 - second_sizek;
1024 } else {
1025 /* fallback for big hole, or several holes */
1026 range_sizek = state->range_sizek - range0_sizek; 1069 range_sizek = state->range_sizek - range0_sizek;
1027 second_basek = 0; 1070
1028 second_sizek = 0; 1071 if (debug_print)
1072 printk(KERN_DEBUG "range: %016lx - %016lx\n",
1073 range_basek<<10,
1074 (range_basek + range_sizek)<<10);
1075 state->reg = range_to_mtrr(state->reg, range_basek,
1076 range_sizek, MTRR_TYPE_WRBACK);
1029 } 1077 }
1030 1078
1031 if (debug_print)
1032 printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10,
1033 (range_basek + range_sizek)<<10);
1034 state->reg = range_to_mtrr(state->reg, range_basek, range_sizek,
1035 MTRR_TYPE_WRBACK);
1036 if (hole_sizek) { 1079 if (hole_sizek) {
1080 hole_basek = range_basek - hole_sizek - second_sizek;
1037 if (debug_print) 1081 if (debug_print)
1038 printk(KERN_DEBUG "hole: %016lx - %016lx\n", 1082 printk(KERN_DEBUG "hole: %016lx - %016lx\n",
1039 hole_basek<<10, (hole_basek + hole_sizek)<<10); 1083 hole_basek<<10,
1040 state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, 1084 (hole_basek + hole_sizek)<<10);
1041 MTRR_TYPE_UNCACHABLE); 1085 state->reg = range_to_mtrr(state->reg, hole_basek,
1042 1086 hole_sizek, MTRR_TYPE_UNCACHABLE);
1043 } 1087 }
1044 1088
1045 return second_sizek; 1089 return second_sizek;
@@ -1154,11 +1198,11 @@ struct mtrr_cleanup_result {
1154}; 1198};
1155 1199
1156/* 1200/*
1157 * gran_size: 1M, 2M, ..., 2G 1201 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
1158 * chunk size: gran_size, ..., 4G 1202 * chunk size: gran_size, ..., 2G
1159 * so we need (2+13)*6 1203 * so we need (1+16)*8
1160 */ 1204 */
1161#define NUM_RESULT 90 1205#define NUM_RESULT 136
1162#define PSHIFT (PAGE_SHIFT - 10) 1206#define PSHIFT (PAGE_SHIFT - 10)
1163 1207
1164static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; 1208static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
@@ -1168,13 +1212,14 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
1168static int __init mtrr_cleanup(unsigned address_bits) 1212static int __init mtrr_cleanup(unsigned address_bits)
1169{ 1213{
1170 unsigned long extra_remove_base, extra_remove_size; 1214 unsigned long extra_remove_base, extra_remove_size;
1171 unsigned long i, base, size, def, dummy; 1215 unsigned long base, size, def, dummy;
1172 mtrr_type type; 1216 mtrr_type type;
1173 int nr_range, nr_range_new; 1217 int nr_range, nr_range_new;
1174 u64 chunk_size, gran_size; 1218 u64 chunk_size, gran_size;
1175 unsigned long range_sums, range_sums_new; 1219 unsigned long range_sums, range_sums_new;
1176 int index_good; 1220 int index_good;
1177 int num_reg_good; 1221 int num_reg_good;
1222 int i;
1178 1223
1179 /* extra one for all 0 */ 1224 /* extra one for all 0 */
1180 int num[MTRR_NUM_TYPES + 1]; 1225 int num[MTRR_NUM_TYPES + 1];
@@ -1204,6 +1249,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1204 continue; 1249 continue;
1205 if (!size) 1250 if (!size)
1206 type = MTRR_NUM_TYPES; 1251 type = MTRR_NUM_TYPES;
1252 if (type == MTRR_TYPE_WRPROT)
1253 type = MTRR_TYPE_UNCACHABLE;
1207 num[type]++; 1254 num[type]++;
1208 } 1255 }
1209 1256
@@ -1216,23 +1263,57 @@ static int __init mtrr_cleanup(unsigned address_bits)
1216 num_var_ranges - num[MTRR_NUM_TYPES]) 1263 num_var_ranges - num[MTRR_NUM_TYPES])
1217 return 0; 1264 return 0;
1218 1265
1266 /* print original var MTRRs at first, for debugging: */
1267 printk(KERN_DEBUG "original variable MTRRs\n");
1268 for (i = 0; i < num_var_ranges; i++) {
1269 char start_factor = 'K', size_factor = 'K';
1270 unsigned long start_base, size_base;
1271
1272 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
1273 if (!size_base)
1274 continue;
1275
1276 size_base = to_size_factor(size_base, &size_factor),
1277 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
1278 start_base = to_size_factor(start_base, &start_factor),
1279 type = range_state[i].type;
1280
1281 printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
1282 i, start_base, start_factor,
1283 size_base, size_factor,
1284 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
1285 ((type == MTRR_TYPE_WRPROT) ? "WP" :
1286 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
1287 );
1288 }
1289
1219 memset(range, 0, sizeof(range)); 1290 memset(range, 0, sizeof(range));
1220 extra_remove_size = 0; 1291 extra_remove_size = 0;
1221 if (mtrr_tom2) { 1292 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1222 extra_remove_base = 1 << (32 - PAGE_SHIFT); 1293 if (mtrr_tom2)
1223 extra_remove_size = 1294 extra_remove_size =
1224 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; 1295 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
1225 }
1226 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, 1296 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
1227 extra_remove_size); 1297 extra_remove_size);
1298 /*
1299 * [0, 1M) should always be coverred by var mtrr with WB
1300 * and fixed mtrrs should take effective before var mtrr for it
1301 */
1302 nr_range = add_range_with_merge(range, nr_range, 0,
1303 (1ULL<<(20 - PAGE_SHIFT)) - 1);
1304 /* sort the ranges */
1305 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
1306
1228 range_sums = sum_ranges(range, nr_range); 1307 range_sums = sum_ranges(range, nr_range);
1229 printk(KERN_INFO "total RAM coverred: %ldM\n", 1308 printk(KERN_INFO "total RAM coverred: %ldM\n",
1230 range_sums >> (20 - PAGE_SHIFT)); 1309 range_sums >> (20 - PAGE_SHIFT));
1231 1310
1232 if (mtrr_chunk_size && mtrr_gran_size) { 1311 if (mtrr_chunk_size && mtrr_gran_size) {
1233 int num_reg; 1312 int num_reg;
1313 char gran_factor, chunk_factor, lose_factor;
1314 unsigned long gran_base, chunk_base, lose_base;
1234 1315
1235 debug_print = 1; 1316 debug_print++;
1236 /* convert ranges to var ranges state */ 1317 /* convert ranges to var ranges state */
1237 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, 1318 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
1238 mtrr_gran_size); 1319 mtrr_gran_size);
@@ -1256,34 +1337,48 @@ static int __init mtrr_cleanup(unsigned address_bits)
1256 result[i].lose_cover_sizek = 1337 result[i].lose_cover_sizek =
1257 (range_sums - range_sums_new) << PSHIFT; 1338 (range_sums - range_sums_new) << PSHIFT;
1258 1339
1259 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1340 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1260 result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10, 1341 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1261 result[i].chunk_sizek >> 10); 1342 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1262 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n", 1343 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1344 result[i].bad?"*BAD*":" ",
1345 gran_base, gran_factor, chunk_base, chunk_factor);
1346 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1263 result[i].num_reg, result[i].bad?"-":"", 1347 result[i].num_reg, result[i].bad?"-":"",
1264 result[i].lose_cover_sizek >> 10); 1348 lose_base, lose_factor);
1265 if (!result[i].bad) { 1349 if (!result[i].bad) {
1266 set_var_mtrr_all(address_bits); 1350 set_var_mtrr_all(address_bits);
1267 return 1; 1351 return 1;
1268 } 1352 }
1269 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " 1353 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
1270 "will find optimal one\n"); 1354 "will find optimal one\n");
1271 debug_print = 0; 1355 debug_print--;
1272 memset(result, 0, sizeof(result[0])); 1356 memset(result, 0, sizeof(result[0]));
1273 } 1357 }
1274 1358
1275 i = 0; 1359 i = 0;
1276 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); 1360 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
1277 memset(result, 0, sizeof(result)); 1361 memset(result, 0, sizeof(result));
1278 for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) { 1362 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
1279 for (chunk_size = gran_size; chunk_size < (1ULL<<33); 1363 char gran_factor;
1364 unsigned long gran_base;
1365
1366 if (debug_print)
1367 gran_base = to_size_factor(gran_size >> 10, &gran_factor);
1368
1369 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
1280 chunk_size <<= 1) { 1370 chunk_size <<= 1) {
1281 int num_reg; 1371 int num_reg;
1282 1372
1283 if (debug_print) 1373 if (debug_print) {
1284 printk(KERN_INFO 1374 char chunk_factor;
1285 "\ngran_size: %lldM chunk_size_size: %lldM\n", 1375 unsigned long chunk_base;
1286 gran_size >> 20, chunk_size >> 20); 1376
1377 chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
1378 printk(KERN_INFO "\n");
1379 printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
1380 gran_base, gran_factor, chunk_base, chunk_factor);
1381 }
1287 if (i >= NUM_RESULT) 1382 if (i >= NUM_RESULT)
1288 continue; 1383 continue;
1289 1384
@@ -1326,12 +1421,18 @@ static int __init mtrr_cleanup(unsigned address_bits)
1326 1421
1327 /* print out all */ 1422 /* print out all */
1328 for (i = 0; i < NUM_RESULT; i++) { 1423 for (i = 0; i < NUM_RESULT; i++) {
1329 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1424 char gran_factor, chunk_factor, lose_factor;
1330 result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10, 1425 unsigned long gran_base, chunk_base, lose_base;
1331 result[i].chunk_sizek >> 10); 1426
1332 printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n", 1427 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1333 result[i].num_reg, result[i].bad?"-":"", 1428 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1334 result[i].lose_cover_sizek >> 10); 1429 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1430 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1431 result[i].bad?"*BAD*":" ",
1432 gran_base, gran_factor, chunk_base, chunk_factor);
1433 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1434 result[i].num_reg, result[i].bad?"-":"",
1435 lose_base, lose_factor);
1335 } 1436 }
1336 1437
1337 /* try to find the optimal index */ 1438 /* try to find the optimal index */
@@ -1339,10 +1440,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1339 nr_mtrr_spare_reg = num_var_ranges - 1; 1440 nr_mtrr_spare_reg = num_var_ranges - 1;
1340 num_reg_good = -1; 1441 num_reg_good = -1;
1341 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { 1442 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
1342 if (!min_loss_pfn[i]) { 1443 if (!min_loss_pfn[i])
1343 num_reg_good = i; 1444 num_reg_good = i;
1344 break;
1345 }
1346 } 1445 }
1347 1446
1348 index_good = -1; 1447 index_good = -1;
@@ -1358,21 +1457,26 @@ static int __init mtrr_cleanup(unsigned address_bits)
1358 } 1457 }
1359 1458
1360 if (index_good != -1) { 1459 if (index_good != -1) {
1460 char gran_factor, chunk_factor, lose_factor;
1461 unsigned long gran_base, chunk_base, lose_base;
1462
1361 printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); 1463 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
1362 i = index_good; 1464 i = index_good;
1363 printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t", 1465 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1364 result[i].gran_sizek >> 10, 1466 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1365 result[i].chunk_sizek >> 10); 1467 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1366 printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n", 1468 printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
1367 result[i].num_reg, 1469 gran_base, gran_factor, chunk_base, chunk_factor);
1368 result[i].lose_cover_sizek >> 10); 1470 printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
1471 result[i].num_reg, lose_base, lose_factor);
1369 /* convert ranges to var ranges state */ 1472 /* convert ranges to var ranges state */
1370 chunk_size = result[i].chunk_sizek; 1473 chunk_size = result[i].chunk_sizek;
1371 chunk_size <<= 10; 1474 chunk_size <<= 10;
1372 gran_size = result[i].gran_sizek; 1475 gran_size = result[i].gran_sizek;
1373 gran_size <<= 10; 1476 gran_size <<= 10;
1374 debug_print = 1; 1477 debug_print++;
1375 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); 1478 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
1479 debug_print--;
1376 set_var_mtrr_all(address_bits); 1480 set_var_mtrr_all(address_bits);
1377 return 1; 1481 return 1;
1378 } 1482 }
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 05cc22dbd4ff..6bff382094f5 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -295,13 +295,19 @@ static int setup_k7_watchdog(unsigned nmi_hz)
295 /* setup the timer */ 295 /* setup the timer */
296 wrmsr(evntsel_msr, evntsel, 0); 296 wrmsr(evntsel_msr, evntsel, 0);
297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); 297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
298 apic_write(APIC_LVTPC, APIC_DM_NMI);
299 evntsel |= K7_EVNTSEL_ENABLE;
300 wrmsr(evntsel_msr, evntsel, 0);
301 298
299 /* initialize the wd struct before enabling */
302 wd->perfctr_msr = perfctr_msr; 300 wd->perfctr_msr = perfctr_msr;
303 wd->evntsel_msr = evntsel_msr; 301 wd->evntsel_msr = evntsel_msr;
304 wd->cccr_msr = 0; /* unused */ 302 wd->cccr_msr = 0; /* unused */
303
304 /* ok, everything is initialized, announce that we're set */
305 cpu_nmi_set_wd_enabled();
306
307 apic_write(APIC_LVTPC, APIC_DM_NMI);
308 evntsel |= K7_EVNTSEL_ENABLE;
309 wrmsr(evntsel_msr, evntsel, 0);
310
305 return 1; 311 return 1;
306} 312}
307 313
@@ -379,13 +385,19 @@ static int setup_p6_watchdog(unsigned nmi_hz)
379 wrmsr(evntsel_msr, evntsel, 0); 385 wrmsr(evntsel_msr, evntsel, 0);
380 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 386 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
381 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); 387 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
382 apic_write(APIC_LVTPC, APIC_DM_NMI);
383 evntsel |= P6_EVNTSEL0_ENABLE;
384 wrmsr(evntsel_msr, evntsel, 0);
385 388
389 /* initialize the wd struct before enabling */
386 wd->perfctr_msr = perfctr_msr; 390 wd->perfctr_msr = perfctr_msr;
387 wd->evntsel_msr = evntsel_msr; 391 wd->evntsel_msr = evntsel_msr;
388 wd->cccr_msr = 0; /* unused */ 392 wd->cccr_msr = 0; /* unused */
393
394 /* ok, everything is initialized, announce that we're set */
395 cpu_nmi_set_wd_enabled();
396
397 apic_write(APIC_LVTPC, APIC_DM_NMI);
398 evntsel |= P6_EVNTSEL0_ENABLE;
399 wrmsr(evntsel_msr, evntsel, 0);
400
389 return 1; 401 return 1;
390} 402}
391 403
@@ -432,6 +444,27 @@ static const struct wd_ops p6_wd_ops = {
432#define P4_CCCR_ENABLE (1 << 12) 444#define P4_CCCR_ENABLE (1 << 12)
433#define P4_CCCR_OVF (1 << 31) 445#define P4_CCCR_OVF (1 << 31)
434 446
447#define P4_CONTROLS 18
448static unsigned int p4_controls[18] = {
449 MSR_P4_BPU_CCCR0,
450 MSR_P4_BPU_CCCR1,
451 MSR_P4_BPU_CCCR2,
452 MSR_P4_BPU_CCCR3,
453 MSR_P4_MS_CCCR0,
454 MSR_P4_MS_CCCR1,
455 MSR_P4_MS_CCCR2,
456 MSR_P4_MS_CCCR3,
457 MSR_P4_FLAME_CCCR0,
458 MSR_P4_FLAME_CCCR1,
459 MSR_P4_FLAME_CCCR2,
460 MSR_P4_FLAME_CCCR3,
461 MSR_P4_IQ_CCCR0,
462 MSR_P4_IQ_CCCR1,
463 MSR_P4_IQ_CCCR2,
464 MSR_P4_IQ_CCCR3,
465 MSR_P4_IQ_CCCR4,
466 MSR_P4_IQ_CCCR5,
467};
435/* 468/*
436 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 469 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
437 * CRU_ESCR0 (with any non-null event selector) through a complemented 470 * CRU_ESCR0 (with any non-null event selector) through a complemented
@@ -473,6 +506,26 @@ static int setup_p4_watchdog(unsigned nmi_hz)
473 evntsel_msr = MSR_P4_CRU_ESCR0; 506 evntsel_msr = MSR_P4_CRU_ESCR0;
474 cccr_msr = MSR_P4_IQ_CCCR0; 507 cccr_msr = MSR_P4_IQ_CCCR0;
475 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); 508 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
509
510 /*
511 * If we're on the kdump kernel or other situation, we may
512 * still have other performance counter registers set to
513 * interrupt and they'll keep interrupting forever because
514 * of the P4_CCCR_OVF quirk. So we need to ACK all the
515 * pending interrupts and disable all the registers here,
516 * before reenabling the NMI delivery. Refer to p4_rearm()
517 * about the P4_CCCR_OVF quirk.
518 */
519 if (reset_devices) {
520 unsigned int low, high;
521 int i;
522
523 for (i = 0; i < P4_CONTROLS; i++) {
524 rdmsr(p4_controls[i], low, high);
525 low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
526 wrmsr(p4_controls[i], low, high);
527 }
528 }
476 } else { 529 } else {
477 /* logical cpu 1 */ 530 /* logical cpu 1 */
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 531 perfctr_msr = MSR_P4_IQ_PERFCTR1;
@@ -499,12 +552,17 @@ static int setup_p4_watchdog(unsigned nmi_hz)
499 wrmsr(evntsel_msr, evntsel, 0); 552 wrmsr(evntsel_msr, evntsel, 0);
500 wrmsr(cccr_msr, cccr_val, 0); 553 wrmsr(cccr_msr, cccr_val, 0);
501 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); 554 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
502 apic_write(APIC_LVTPC, APIC_DM_NMI); 555
503 cccr_val |= P4_CCCR_ENABLE;
504 wrmsr(cccr_msr, cccr_val, 0);
505 wd->perfctr_msr = perfctr_msr; 556 wd->perfctr_msr = perfctr_msr;
506 wd->evntsel_msr = evntsel_msr; 557 wd->evntsel_msr = evntsel_msr;
507 wd->cccr_msr = cccr_msr; 558 wd->cccr_msr = cccr_msr;
559
560 /* ok, everything is initialized, announce that we're set */
561 cpu_nmi_set_wd_enabled();
562
563 apic_write(APIC_LVTPC, APIC_DM_NMI);
564 cccr_val |= P4_CCCR_ENABLE;
565 wrmsr(cccr_msr, cccr_val, 0);
508 return 1; 566 return 1;
509} 567}
510 568
@@ -620,13 +678,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
620 wrmsr(evntsel_msr, evntsel, 0); 678 wrmsr(evntsel_msr, evntsel, 0);
621 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 679 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
622 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); 680 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
623 apic_write(APIC_LVTPC, APIC_DM_NMI);
624 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
625 wrmsr(evntsel_msr, evntsel, 0);
626 681
627 wd->perfctr_msr = perfctr_msr; 682 wd->perfctr_msr = perfctr_msr;
628 wd->evntsel_msr = evntsel_msr; 683 wd->evntsel_msr = evntsel_msr;
629 wd->cccr_msr = 0; /* unused */ 684 wd->cccr_msr = 0; /* unused */
685
686 /* ok, everything is initialized, announce that we're set */
687 cpu_nmi_set_wd_enabled();
688
689 apic_write(APIC_LVTPC, APIC_DM_NMI);
690 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
691 wrmsr(evntsel_msr, evntsel, 0);
630 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 692 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
631 return 1; 693 return 1;
632} 694}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8e9cd6a8ec12..6a44d6465991 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -36,7 +36,6 @@
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
37#include <linux/major.h> 37#include <linux/major.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/smp_lock.h>
40#include <linux/device.h> 39#include <linux/device.h>
41#include <linux/cpu.h> 40#include <linux/cpu.h>
42#include <linux/notifier.h> 41#include <linux/notifier.h>
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 15e6c6bc4a46..e90a60ef10c2 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -7,9 +7,8 @@
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/crash_dump.h> 9#include <linux/crash_dump.h>
10 10#include <linux/uaccess.h>
11#include <asm/uaccess.h> 11#include <linux/io.h>
12#include <asm/io.h>
13 12
14/** 13/**
15 * copy_oldmem_page - copy one page from "oldmem" 14 * copy_oldmem_page - copy one page from "oldmem"
@@ -25,7 +24,7 @@
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 24 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 */ 25 */
27ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 26ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf) 27 size_t csize, unsigned long offset, int userbuf)
29{ 28{
30 void *vaddr; 29 void *vaddr;
31 30
@@ -33,14 +32,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
33 return 0; 32 return 0;
34 33
35 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 34 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
35 if (!vaddr)
36 return -ENOMEM;
36 37
37 if (userbuf) { 38 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) { 39 if (copy_to_user(buf, vaddr + offset, csize)) {
39 iounmap(vaddr); 40 iounmap(vaddr);
40 return -EFAULT; 41 return -EFAULT;
41 } 42 }
42 } else 43 } else
43 memcpy(buf, (vaddr + offset), csize); 44 memcpy(buf, vaddr + offset, csize);
44 45
45 iounmap(vaddr); 46 iounmap(vaddr);
46 return csize; 47 return csize;
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ab21c270bfa4..2b69994fd3a8 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -479,12 +479,12 @@ static int ds_release(struct task_struct *task, enum ds_qualifier qual)
479 goto out; 479 goto out;
480 480
481 kfree(context->buffer[qual]); 481 kfree(context->buffer[qual]);
482 context->buffer[qual] = 0; 482 context->buffer[qual] = NULL;
483 483
484 current->mm->total_vm -= context->pages[qual]; 484 current->mm->total_vm -= context->pages[qual];
485 current->mm->locked_vm -= context->pages[qual]; 485 current->mm->locked_vm -= context->pages[qual];
486 context->pages[qual] = 0; 486 context->pages[qual] = 0;
487 context->owner[qual] = 0; 487 context->owner[qual] = NULL;
488 488
489 /* 489 /*
490 * we put the context twice: 490 * we put the context twice:
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 4353cf5e6fac..24bb5faf5efa 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -95,6 +95,20 @@ static void __init nvidia_bugs(int num, int slot, int func)
95 95
96} 96}
97 97
98#ifdef CONFIG_DMAR
99static void __init intel_g33_dmar(int num, int slot, int func)
100{
101 struct acpi_table_header *dmar_tbl;
102 acpi_status status;
103
104 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
105 if (ACPI_SUCCESS(status)) {
106 printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
107 dmar_disabled = 1;
108 }
109}
110#endif
111
98#define QFLAG_APPLY_ONCE 0x1 112#define QFLAG_APPLY_ONCE 0x1
99#define QFLAG_APPLIED 0x2 113#define QFLAG_APPLIED 0x2
100#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 114#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -114,6 +128,10 @@ static struct chipset early_qrk[] __initdata = {
114 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs }, 128 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs },
115 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, 129 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
116 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config }, 130 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config },
131#ifdef CONFIG_DMAR
132 { PCI_VENDOR_ID_INTEL, 0x29c0,
133 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
134#endif
117 {} 135 {}
118}; 136};
119 137
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 06cc8d4254b1..945a31cdd81f 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -414,9 +414,11 @@ void __init efi_init(void)
414 if (memmap.map == NULL) 414 if (memmap.map == NULL)
415 printk(KERN_ERR "Could not map the EFI memory map!\n"); 415 printk(KERN_ERR "Could not map the EFI memory map!\n");
416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); 416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
417
417 if (memmap.desc_size != sizeof(efi_memory_desc_t)) 418 if (memmap.desc_size != sizeof(efi_memory_desc_t))
418 printk(KERN_WARNING "Kernel-defined memdesc" 419 printk(KERN_WARNING
419 "doesn't match the one from EFI!\n"); 420 "Kernel-defined memdesc doesn't match the one from EFI!\n");
421
420 if (add_efi_memmap) 422 if (add_efi_memmap)
421 do_add_efi_memmap(); 423 do_add_efi_memmap();
422 424
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 9bfc4d72fb2e..d16084f90649 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -108,12 +108,11 @@ void __init x86_64_start_kernel(char * real_mode_data)
108 } 108 }
109 load_idt((const struct desc_ptr *)&idt_descr); 109 load_idt((const struct desc_ptr *)&idt_descr);
110 110
111 early_printk("Kernel alive\n"); 111 if (console_loglevel == 10)
112 early_printk("Kernel alive\n");
112 113
113 x86_64_init_pda(); 114 x86_64_init_pda();
114 115
115 early_printk("Kernel really alive\n");
116
117 x86_64_start_reservations(real_mode_data); 116 x86_64_start_reservations(real_mode_data);
118} 117}
119 118
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index a7010c3a377a..e835b4eea70b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -172,10 +172,6 @@ num_subarch_entries = (. - subarch_entries) / 4
172 * 172 *
173 * Note that the stack is not yet set up! 173 * Note that the stack is not yet set up!
174 */ 174 */
175#define PTE_ATTR 0x007 /* PRESENT+RW+USER */
176#define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
177#define PGD_ATTR 0x001 /* PRESENT (no other attributes) */
178
179default_entry: 175default_entry:
180#ifdef CONFIG_X86_PAE 176#ifdef CONFIG_X86_PAE
181 177
@@ -196,9 +192,9 @@ default_entry:
196 movl $pa(pg0), %edi 192 movl $pa(pg0), %edi
197 movl %edi, pa(init_pg_tables_start) 193 movl %edi, pa(init_pg_tables_start)
198 movl $pa(swapper_pg_pmd), %edx 194 movl $pa(swapper_pg_pmd), %edx
199 movl $PTE_ATTR, %eax 195 movl $PTE_IDENT_ATTR, %eax
20010: 19610:
201 leal PDE_ATTR(%edi),%ecx /* Create PMD entry */ 197 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
202 movl %ecx,(%edx) /* Store PMD entry */ 198 movl %ecx,(%edx) /* Store PMD entry */
203 /* Upper half already zero */ 199 /* Upper half already zero */
204 addl $8,%edx 200 addl $8,%edx
@@ -215,7 +211,7 @@ default_entry:
215 * End condition: we must map up to and including INIT_MAP_BEYOND_END 211 * End condition: we must map up to and including INIT_MAP_BEYOND_END
216 * bytes beyond the end of our own page tables. 212 * bytes beyond the end of our own page tables.
217 */ 213 */
218 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 214 leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
219 cmpl %ebp,%eax 215 cmpl %ebp,%eax
220 jb 10b 216 jb 10b
2211: 2171:
@@ -224,7 +220,7 @@ default_entry:
224 movl %eax, pa(max_pfn_mapped) 220 movl %eax, pa(max_pfn_mapped)
225 221
226 /* Do early initialization of the fixmap area */ 222 /* Do early initialization of the fixmap area */
227 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 223 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
228 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) 224 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
229#else /* Not PAE */ 225#else /* Not PAE */
230 226
@@ -233,9 +229,9 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
233 movl $pa(pg0), %edi 229 movl $pa(pg0), %edi
234 movl %edi, pa(init_pg_tables_start) 230 movl %edi, pa(init_pg_tables_start)
235 movl $pa(swapper_pg_dir), %edx 231 movl $pa(swapper_pg_dir), %edx
236 movl $PTE_ATTR, %eax 232 movl $PTE_IDENT_ATTR, %eax
23710: 23310:
238 leal PDE_ATTR(%edi),%ecx /* Create PDE entry */ 234 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
239 movl %ecx,(%edx) /* Store identity PDE entry */ 235 movl %ecx,(%edx) /* Store identity PDE entry */
240 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 236 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
241 addl $4,%edx 237 addl $4,%edx
@@ -249,7 +245,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
249 * bytes beyond the end of our own page tables; the +0x007 is 245 * bytes beyond the end of our own page tables; the +0x007 is
250 * the attribute bits 246 * the attribute bits
251 */ 247 */
252 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 248 leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp
253 cmpl %ebp,%eax 249 cmpl %ebp,%eax
254 jb 10b 250 jb 10b
255 movl %edi,pa(init_pg_tables_end) 251 movl %edi,pa(init_pg_tables_end)
@@ -257,7 +253,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
257 movl %eax, pa(max_pfn_mapped) 253 movl %eax, pa(max_pfn_mapped)
258 254
259 /* Do early initialization of the fixmap area */ 255 /* Do early initialization of the fixmap area */
260 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 256 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
261 movl %eax,pa(swapper_pg_dir+0xffc) 257 movl %eax,pa(swapper_pg_dir+0xffc)
262#endif 258#endif
263 jmp 3f 259 jmp 3f
@@ -634,19 +630,19 @@ ENTRY(empty_zero_page)
634 /* Page-aligned for the benefit of paravirt? */ 630 /* Page-aligned for the benefit of paravirt? */
635 .align PAGE_SIZE_asm 631 .align PAGE_SIZE_asm
636ENTRY(swapper_pg_dir) 632ENTRY(swapper_pg_dir)
637 .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */ 633 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
638# if KPMDS == 3 634# if KPMDS == 3
639 .long pa(swapper_pg_pmd+PGD_ATTR),0 635 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
640 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 636 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0
641 .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0 637 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0
642# elif KPMDS == 2 638# elif KPMDS == 2
643 .long 0,0 639 .long 0,0
644 .long pa(swapper_pg_pmd+PGD_ATTR),0 640 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
645 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 641 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0
646# elif KPMDS == 1 642# elif KPMDS == 1
647 .long 0,0 643 .long 0,0
648 .long 0,0 644 .long 0,0
649 .long pa(swapper_pg_pmd+PGD_ATTR),0 645 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0
650# else 646# else
651# error "Kernel PMDs should be 1, 2 or 3" 647# error "Kernel PMDs should be 1, 2 or 3"
652# endif 648# endif
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index db3280afe886..26cfdc1d7c7f 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -110,7 +110,7 @@ startup_64:
110 movq %rdi, %rax 110 movq %rdi, %rax
111 shrq $PMD_SHIFT, %rax 111 shrq $PMD_SHIFT, %rax
112 andq $(PTRS_PER_PMD - 1), %rax 112 andq $(PTRS_PER_PMD - 1), %rax
113 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 113 leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
114 leaq level2_spare_pgt(%rip), %rbx 114 leaq level2_spare_pgt(%rip), %rbx
115 movq %rdx, 0(%rbx, %rax, 8) 115 movq %rdx, 0(%rbx, %rax, 8)
116ident_complete: 116ident_complete:
@@ -374,7 +374,7 @@ NEXT_PAGE(level2_ident_pgt)
374 /* Since I easily can, map the first 1G. 374 /* Since I easily can, map the first 1G.
375 * Don't set NX because code runs from these pages. 375 * Don't set NX because code runs from these pages.
376 */ 376 */
377 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 377 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
378 378
379NEXT_PAGE(level2_kernel_pgt) 379NEXT_PAGE(level2_kernel_pgt)
380 /* 380 /*
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1cf8c1fcc088..b71e02d42f4f 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -325,7 +325,7 @@ skip:
325 for_each_online_cpu(j) 325 for_each_online_cpu(j)
326 seq_printf(p, "%10u ", 326 seq_printf(p, "%10u ",
327 per_cpu(irq_stat,j).irq_call_count); 327 per_cpu(irq_stat,j).irq_call_count);
328 seq_printf(p, " function call interrupts\n"); 328 seq_printf(p, " Function call interrupts\n");
329 seq_printf(p, "TLB: "); 329 seq_printf(p, "TLB: ");
330 for_each_online_cpu(j) 330 for_each_online_cpu(j)
331 seq_printf(p, "%10u ", 331 seq_printf(p, "%10u ",
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1f78b238d8d2..f065fe9071b9 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -129,7 +129,7 @@ skip:
129 seq_printf(p, "CAL: "); 129 seq_printf(p, "CAL: ");
130 for_each_online_cpu(j) 130 for_each_online_cpu(j)
131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); 131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
132 seq_printf(p, " function call interrupts\n"); 132 seq_printf(p, " Function call interrupts\n");
133 seq_printf(p, "TLB: "); 133 seq_printf(p, "TLB: ");
134 for_each_online_cpu(j) 134 for_each_online_cpu(j)
135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); 135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index 7377ccb21335..304d8bad6559 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -16,8 +16,9 @@ EXPORT_SYMBOL(num_k8_northbridges);
16static u32 *flush_words; 16static u32 *flush_words;
17 17
18struct pci_device_id k8_nb_ids[] = { 18struct pci_device_id k8_nb_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, 19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
21 {} 22 {}
22}; 23};
23EXPORT_SYMBOL(k8_nb_ids); 24EXPORT_SYMBOL(k8_nb_ids);
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index f2d43bc75514..ff7d3b0124f1 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -139,6 +139,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
139 if (PageHighMem(pg)) { 139 if (PageHighMem(pg)) {
140 data = ioremap_cache(pa_data, sizeof(*data)); 140 data = ioremap_cache(pa_data, sizeof(*data));
141 if (!data) { 141 if (!data) {
142 kfree(node);
142 error = -ENXIO; 143 error = -ENXIO;
143 goto err_dir; 144 goto err_dir;
144 } 145 }
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index f47f0eb886b8..10435a120d22 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -69,6 +69,9 @@ static int gdb_x86vector = -1;
69 */ 69 */
70void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 70void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
71{ 71{
72#ifndef CONFIG_X86_32
73 u32 *gdb_regs32 = (u32 *)gdb_regs;
74#endif
72 gdb_regs[GDB_AX] = regs->ax; 75 gdb_regs[GDB_AX] = regs->ax;
73 gdb_regs[GDB_BX] = regs->bx; 76 gdb_regs[GDB_BX] = regs->bx;
74 gdb_regs[GDB_CX] = regs->cx; 77 gdb_regs[GDB_CX] = regs->cx;
@@ -76,9 +79,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
76 gdb_regs[GDB_SI] = regs->si; 79 gdb_regs[GDB_SI] = regs->si;
77 gdb_regs[GDB_DI] = regs->di; 80 gdb_regs[GDB_DI] = regs->di;
78 gdb_regs[GDB_BP] = regs->bp; 81 gdb_regs[GDB_BP] = regs->bp;
79 gdb_regs[GDB_PS] = regs->flags;
80 gdb_regs[GDB_PC] = regs->ip; 82 gdb_regs[GDB_PC] = regs->ip;
81#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
84 gdb_regs[GDB_PS] = regs->flags;
82 gdb_regs[GDB_DS] = regs->ds; 85 gdb_regs[GDB_DS] = regs->ds;
83 gdb_regs[GDB_ES] = regs->es; 86 gdb_regs[GDB_ES] = regs->es;
84 gdb_regs[GDB_CS] = regs->cs; 87 gdb_regs[GDB_CS] = regs->cs;
@@ -94,6 +97,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
94 gdb_regs[GDB_R13] = regs->r13; 97 gdb_regs[GDB_R13] = regs->r13;
95 gdb_regs[GDB_R14] = regs->r14; 98 gdb_regs[GDB_R14] = regs->r14;
96 gdb_regs[GDB_R15] = regs->r15; 99 gdb_regs[GDB_R15] = regs->r15;
100 gdb_regs32[GDB_PS] = regs->flags;
101 gdb_regs32[GDB_CS] = regs->cs;
102 gdb_regs32[GDB_SS] = regs->ss;
97#endif 103#endif
98 gdb_regs[GDB_SP] = regs->sp; 104 gdb_regs[GDB_SP] = regs->sp;
99} 105}
@@ -112,6 +118,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
112 */ 118 */
113void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 119void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
114{ 120{
121#ifndef CONFIG_X86_32
122 u32 *gdb_regs32 = (u32 *)gdb_regs;
123#endif
115 gdb_regs[GDB_AX] = 0; 124 gdb_regs[GDB_AX] = 0;
116 gdb_regs[GDB_BX] = 0; 125 gdb_regs[GDB_BX] = 0;
117 gdb_regs[GDB_CX] = 0; 126 gdb_regs[GDB_CX] = 0;
@@ -129,8 +138,10 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
129 gdb_regs[GDB_FS] = 0xFFFF; 138 gdb_regs[GDB_FS] = 0xFFFF;
130 gdb_regs[GDB_GS] = 0xFFFF; 139 gdb_regs[GDB_GS] = 0xFFFF;
131#else 140#else
132 gdb_regs[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); 141 gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
133 gdb_regs[GDB_PC] = 0; 142 gdb_regs32[GDB_CS] = __KERNEL_CS;
143 gdb_regs32[GDB_SS] = __KERNEL_DS;
144 gdb_regs[GDB_PC] = p->thread.ip;
134 gdb_regs[GDB_R8] = 0; 145 gdb_regs[GDB_R8] = 0;
135 gdb_regs[GDB_R9] = 0; 146 gdb_regs[GDB_R9] = 0;
136 gdb_regs[GDB_R10] = 0; 147 gdb_regs[GDB_R10] = 0;
@@ -153,6 +164,9 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
153 */ 164 */
154void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) 165void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
155{ 166{
167#ifndef CONFIG_X86_32
168 u32 *gdb_regs32 = (u32 *)gdb_regs;
169#endif
156 regs->ax = gdb_regs[GDB_AX]; 170 regs->ax = gdb_regs[GDB_AX];
157 regs->bx = gdb_regs[GDB_BX]; 171 regs->bx = gdb_regs[GDB_BX];
158 regs->cx = gdb_regs[GDB_CX]; 172 regs->cx = gdb_regs[GDB_CX];
@@ -160,9 +174,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
160 regs->si = gdb_regs[GDB_SI]; 174 regs->si = gdb_regs[GDB_SI];
161 regs->di = gdb_regs[GDB_DI]; 175 regs->di = gdb_regs[GDB_DI];
162 regs->bp = gdb_regs[GDB_BP]; 176 regs->bp = gdb_regs[GDB_BP];
163 regs->flags = gdb_regs[GDB_PS];
164 regs->ip = gdb_regs[GDB_PC]; 177 regs->ip = gdb_regs[GDB_PC];
165#ifdef CONFIG_X86_32 178#ifdef CONFIG_X86_32
179 regs->flags = gdb_regs[GDB_PS];
166 regs->ds = gdb_regs[GDB_DS]; 180 regs->ds = gdb_regs[GDB_DS];
167 regs->es = gdb_regs[GDB_ES]; 181 regs->es = gdb_regs[GDB_ES];
168 regs->cs = gdb_regs[GDB_CS]; 182 regs->cs = gdb_regs[GDB_CS];
@@ -175,6 +189,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
175 regs->r13 = gdb_regs[GDB_R13]; 189 regs->r13 = gdb_regs[GDB_R13];
176 regs->r14 = gdb_regs[GDB_R14]; 190 regs->r14 = gdb_regs[GDB_R14];
177 regs->r15 = gdb_regs[GDB_R15]; 191 regs->r15 = gdb_regs[GDB_R15];
192 regs->flags = gdb_regs32[GDB_PS];
193 regs->cs = gdb_regs32[GDB_CS];
194 regs->ss = gdb_regs32[GDB_SS];
178#endif 195#endif
179} 196}
180 197
@@ -378,10 +395,8 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
378 if (remcomInBuffer[0] == 's') { 395 if (remcomInBuffer[0] == 's') {
379 linux_regs->flags |= X86_EFLAGS_TF; 396 linux_regs->flags |= X86_EFLAGS_TF;
380 kgdb_single_step = 1; 397 kgdb_single_step = 1;
381 if (kgdb_contthread) { 398 atomic_set(&kgdb_cpu_doing_single_step,
382 atomic_set(&kgdb_cpu_doing_single_step, 399 raw_smp_processor_id());
383 raw_smp_processor_id());
384 }
385 } 400 }
386 401
387 get_debugreg(dr6, 6); 402 get_debugreg(dr6, 6);
@@ -440,12 +455,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
440 return NOTIFY_DONE; 455 return NOTIFY_DONE;
441 456
442 case DIE_NMI_IPI: 457 case DIE_NMI_IPI:
443 if (atomic_read(&kgdb_active) != -1) { 458 /* Just ignore, we will handle the roundup on DIE_NMI. */
444 /* KGDB CPU roundup */
445 kgdb_nmicallback(raw_smp_processor_id(), regs);
446 was_in_debug_nmi[raw_smp_processor_id()] = 1;
447 touch_nmi_watchdog();
448 }
449 return NOTIFY_DONE; 459 return NOTIFY_DONE;
450 460
451 case DIE_NMIUNKNOWN: 461 case DIE_NMIUNKNOWN:
@@ -466,9 +476,15 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
466 476
467 case DIE_DEBUG: 477 case DIE_DEBUG:
468 if (atomic_read(&kgdb_cpu_doing_single_step) == 478 if (atomic_read(&kgdb_cpu_doing_single_step) ==
469 raw_smp_processor_id() && 479 raw_smp_processor_id()) {
470 user_mode(regs)) 480 if (user_mode(regs))
471 return single_step_cont(regs, args); 481 return single_step_cont(regs, args);
482 break;
483 } else if (test_thread_flag(TIF_SINGLESTEP))
484 /* This means a user thread is single stepping
485 * a system call which should be ignored
486 */
487 return NOTIFY_DONE;
472 /* fall through */ 488 /* fall through */
473 default: 489 default:
474 if (user_mode(regs)) 490 if (user_mode(regs))
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8b7a3cf37d2b..478bca986eca 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -178,7 +178,7 @@ static void kvm_flush_tlb(void)
178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb); 178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
179} 179}
180 180
181static void kvm_release_pt(u32 pfn) 181static void kvm_release_pt(unsigned long pfn)
182{ 182{
183 struct kvm_mmu_op_release_pt rpt = { 183 struct kvm_mmu_op_release_pt rpt = {
184 .header.op = KVM_MMU_OP_RELEASE_PT, 184 .header.op = KVM_MMU_OP_RELEASE_PT,
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index abb78a2cc4ad..2c97f07f1c2c 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -299,6 +299,15 @@ void acpi_nmi_disable(void)
299 on_each_cpu(__acpi_nmi_disable, NULL, 1); 299 on_each_cpu(__acpi_nmi_disable, NULL, 1);
300} 300}
301 301
302/*
303 * This function is called as soon the LAPIC NMI watchdog driver has everything
304 * in place and it's ready to check if the NMIs belong to the NMI watchdog
305 */
306void cpu_nmi_set_wd_enabled(void)
307{
308 __get_cpu_var(wd_enabled) = 1;
309}
310
302void setup_apic_nmi_watchdog(void *unused) 311void setup_apic_nmi_watchdog(void *unused)
303{ 312{
304 if (__get_cpu_var(wd_enabled)) 313 if (__get_cpu_var(wd_enabled))
@@ -311,8 +320,6 @@ void setup_apic_nmi_watchdog(void *unused)
311 320
312 switch (nmi_watchdog) { 321 switch (nmi_watchdog) {
313 case NMI_LOCAL_APIC: 322 case NMI_LOCAL_APIC:
314 /* enable it before to avoid race with handler */
315 __get_cpu_var(wd_enabled) = 1;
316 if (lapic_watchdog_init(nmi_hz) < 0) { 323 if (lapic_watchdog_init(nmi_hz) < 0) {
317 __get_cpu_var(wd_enabled) = 0; 324 __get_cpu_var(wd_enabled) = 0;
318 return; 325 return;
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 3e6672274807..7a13fac63a1f 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -190,12 +190,12 @@ EXPORT_SYMBOL_GPL(olpc_ec_cmd);
190static void __init platform_detect(void) 190static void __init platform_detect(void)
191{ 191{
192 size_t propsize; 192 size_t propsize;
193 u32 rev; 193 __be32 rev;
194 194
195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, 195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4,
196 &propsize) || propsize != 4) { 196 &propsize) || propsize != 4) {
197 printk(KERN_ERR "ofw: getprop call failed!\n"); 197 printk(KERN_ERR "ofw: getprop call failed!\n");
198 rev = 0; 198 rev = cpu_to_be32(0);
199 } 199 }
200 olpc_platform_info.boardrev = be32_to_cpu(rev); 200 olpc_platform_info.boardrev = be32_to_cpu(rev);
201} 201}
@@ -203,7 +203,7 @@ static void __init platform_detect(void)
203static void __init platform_detect(void) 203static void __init platform_detect(void)
204{ 204{
205 /* stopgap until OFW support is added to the kernel */ 205 /* stopgap until OFW support is added to the kernel */
206 olpc_platform_info.boardrev = be32_to_cpu(0xc2); 206 olpc_platform_info.boardrev = 0xc2;
207} 207}
208#endif 208#endif
209 209
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 58262218781b..9fe644f4861d 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 start = start_##ops##_##x; \ 23 start = start_##ops##_##x; \
24 end = end_##ops##_##x; \ 24 end = end_##ops##_##x; \
25 goto patch_site 25 goto patch_site
26 switch(type) { 26 switch (type) {
27 PATCH_SITE(pv_irq_ops, irq_disable); 27 PATCH_SITE(pv_irq_ops, irq_disable);
28 PATCH_SITE(pv_irq_ops, irq_enable); 28 PATCH_SITE(pv_irq_ops, irq_enable);
29 PATCH_SITE(pv_irq_ops, restore_fl); 29 PATCH_SITE(pv_irq_ops, restore_fl);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index dcdac6c826e9..080d1d27f37a 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -261,7 +261,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
261 badbit, tbl, start_addr, npages); 261 badbit, tbl, start_addr, npages);
262 } 262 }
263 263
264 set_bit_string(tbl->it_map, index, npages); 264 iommu_area_reserve(tbl->it_map, index, npages);
265 265
266 spin_unlock_irqrestore(&tbl->it_lock, flags); 266 spin_unlock_irqrestore(&tbl->it_lock, flags);
267} 267}
@@ -491,6 +491,8 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
491 npages = size >> PAGE_SHIFT; 491 npages = size >> PAGE_SHIFT;
492 order = get_order(size); 492 order = get_order(size);
493 493
494 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
495
494 /* alloc enough pages (and possibly more) */ 496 /* alloc enough pages (and possibly more) */
495 ret = (void *)__get_free_pages(flag, order); 497 ret = (void *)__get_free_pages(flag, order);
496 if (!ret) 498 if (!ret)
@@ -510,8 +512,22 @@ error:
510 return ret; 512 return ret;
511} 513}
512 514
515static void calgary_free_coherent(struct device *dev, size_t size,
516 void *vaddr, dma_addr_t dma_handle)
517{
518 unsigned int npages;
519 struct iommu_table *tbl = find_iommu_table(dev);
520
521 size = PAGE_ALIGN(size);
522 npages = size >> PAGE_SHIFT;
523
524 iommu_free(tbl, dma_handle, npages);
525 free_pages((unsigned long)vaddr, get_order(size));
526}
527
513static struct dma_mapping_ops calgary_dma_ops = { 528static struct dma_mapping_ops calgary_dma_ops = {
514 .alloc_coherent = calgary_alloc_coherent, 529 .alloc_coherent = calgary_alloc_coherent,
530 .free_coherent = calgary_free_coherent,
515 .map_single = calgary_map_single, 531 .map_single = calgary_map_single,
516 .unmap_single = calgary_unmap_single, 532 .unmap_single = calgary_unmap_single,
517 .map_sg = calgary_map_sg, 533 .map_sg = calgary_map_sg,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 87d4d6964ec2..0a3824e837b4 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -41,11 +41,12 @@ EXPORT_SYMBOL(bad_dma_address);
41/* Dummy device used for NULL arguments (normally ISA). Better would 41/* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible 42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to older i386. */ 43 to older i386. */
44struct device fallback_dev = { 44struct device x86_dma_fallback_dev = {
45 .bus_id = "fallback device", 45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK, 46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask, 47 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
48}; 48};
49EXPORT_SYMBOL(x86_dma_fallback_dev);
49 50
50int dma_set_mask(struct device *dev, u64 mask) 51int dma_set_mask(struct device *dev, u64 mask)
51{ 52{
@@ -82,7 +83,7 @@ void __init dma32_reserve_bootmem(void)
82 * using 512M as goal 83 * using 512M as goal
83 */ 84 */
84 align = 64ULL<<20; 85 align = 64ULL<<20;
85 size = round_up(dma32_bootmem_size, align); 86 size = roundup(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 87 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
87 512ULL<<20); 88 512ULL<<20);
88 if (dma32_bootmem_ptr) 89 if (dma32_bootmem_ptr)
@@ -133,6 +134,37 @@ unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
133EXPORT_SYMBOL(iommu_num_pages); 134EXPORT_SYMBOL(iommu_num_pages);
134#endif 135#endif
135 136
137void *dma_generic_alloc_coherent(struct device *dev, size_t size,
138 dma_addr_t *dma_addr, gfp_t flag)
139{
140 unsigned long dma_mask;
141 struct page *page;
142 dma_addr_t addr;
143
144 dma_mask = dma_alloc_coherent_mask(dev, flag);
145
146 flag |= __GFP_ZERO;
147again:
148 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
149 if (!page)
150 return NULL;
151
152 addr = page_to_phys(page);
153 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
154 __free_pages(page, get_order(size));
155
156 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
157 flag = (flag & ~GFP_DMA32) | GFP_DMA;
158 goto again;
159 }
160
161 return NULL;
162 }
163
164 *dma_addr = addr;
165 return page_address(page);
166}
167
136/* 168/*
137 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter 169 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
138 * documentation. 170 * documentation.
@@ -241,147 +273,6 @@ int dma_supported(struct device *dev, u64 mask)
241} 273}
242EXPORT_SYMBOL(dma_supported); 274EXPORT_SYMBOL(dma_supported);
243 275
244/* Allocate DMA memory on node near device */
245static noinline struct page *
246dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
247{
248 int node;
249
250 node = dev_to_node(dev);
251
252 return alloc_pages_node(node, gfp, order);
253}
254
255/*
256 * Allocate memory for a coherent mapping.
257 */
258void *
259dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
260 gfp_t gfp)
261{
262 struct dma_mapping_ops *ops = get_dma_ops(dev);
263 void *memory = NULL;
264 struct page *page;
265 unsigned long dma_mask = 0;
266 dma_addr_t bus;
267 int noretry = 0;
268
269 /* ignore region specifiers */
270 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
271
272 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
273 return memory;
274
275 if (!dev) {
276 dev = &fallback_dev;
277 gfp |= GFP_DMA;
278 }
279 dma_mask = dev->coherent_dma_mask;
280 if (dma_mask == 0)
281 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
282
283 /* Device not DMA able */
284 if (dev->dma_mask == NULL)
285 return NULL;
286
287 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
288 if (gfp & __GFP_DMA)
289 noretry = 1;
290
291#ifdef CONFIG_X86_64
292 /* Why <=? Even when the mask is smaller than 4GB it is often
293 larger than 16MB and in this case we have a chance of
294 finding fitting memory in the next higher zone first. If
295 not retry with true GFP_DMA. -AK */
296 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
297 gfp |= GFP_DMA32;
298 if (dma_mask < DMA_32BIT_MASK)
299 noretry = 1;
300 }
301#endif
302
303 again:
304 page = dma_alloc_pages(dev,
305 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
306 if (page == NULL)
307 return NULL;
308
309 {
310 int high, mmu;
311 bus = page_to_phys(page);
312 memory = page_address(page);
313 high = (bus + size) >= dma_mask;
314 mmu = high;
315 if (force_iommu && !(gfp & GFP_DMA))
316 mmu = 1;
317 else if (high) {
318 free_pages((unsigned long)memory,
319 get_order(size));
320
321 /* Don't use the 16MB ZONE_DMA unless absolutely
322 needed. It's better to use remapping first. */
323 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
324 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
325 goto again;
326 }
327
328 /* Let low level make its own zone decisions */
329 gfp &= ~(GFP_DMA32|GFP_DMA);
330
331 if (ops->alloc_coherent)
332 return ops->alloc_coherent(dev, size,
333 dma_handle, gfp);
334 return NULL;
335 }
336
337 memset(memory, 0, size);
338 if (!mmu) {
339 *dma_handle = bus;
340 return memory;
341 }
342 }
343
344 if (ops->alloc_coherent) {
345 free_pages((unsigned long)memory, get_order(size));
346 gfp &= ~(GFP_DMA|GFP_DMA32);
347 return ops->alloc_coherent(dev, size, dma_handle, gfp);
348 }
349
350 if (ops->map_simple) {
351 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
352 size,
353 PCI_DMA_BIDIRECTIONAL);
354 if (*dma_handle != bad_dma_address)
355 return memory;
356 }
357
358 if (panic_on_overflow)
359 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
360 (unsigned long)size);
361 free_pages((unsigned long)memory, get_order(size));
362 return NULL;
363}
364EXPORT_SYMBOL(dma_alloc_coherent);
365
366/*
367 * Unmap coherent memory.
368 * The caller must ensure that the device has finished accessing the mapping.
369 */
370void dma_free_coherent(struct device *dev, size_t size,
371 void *vaddr, dma_addr_t bus)
372{
373 struct dma_mapping_ops *ops = get_dma_ops(dev);
374
375 int order = get_order(size);
376 WARN_ON(irqs_disabled()); /* for portability */
377 if (dma_release_from_coherent(dev, order, vaddr))
378 return;
379 if (ops->unmap_single)
380 ops->unmap_single(dev, bus, size, 0);
381 free_pages((unsigned long)vaddr, order);
382}
383EXPORT_SYMBOL(dma_free_coherent);
384
385static int __init pci_iommu_init(void) 276static int __init pci_iommu_init(void)
386{ 277{
387 calgary_iommu_init(); 278 calgary_iommu_init();
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 49285f8fd4d5..145f1c83369f 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -27,8 +27,8 @@
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/iommu-helper.h> 28#include <linux/iommu-helper.h>
29#include <linux/sysdev.h> 29#include <linux/sysdev.h>
30#include <linux/io.h>
30#include <asm/atomic.h> 31#include <asm/atomic.h>
31#include <asm/io.h>
32#include <asm/mtrr.h> 32#include <asm/mtrr.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/proto.h> 34#include <asm/proto.h>
@@ -80,9 +80,10 @@ AGPEXTERN int agp_memory_reserved;
80AGPEXTERN __u32 *agp_gatt_table; 80AGPEXTERN __u32 *agp_gatt_table;
81 81
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static bool need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size) 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
88 unsigned long boundary_size; 89 unsigned long boundary_size;
@@ -90,26 +91,27 @@ static unsigned long alloc_iommu(struct device *dev, int size)
90 91
91 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
92 PAGE_SIZE) >> PAGE_SHIFT; 93 PAGE_SIZE) >> PAGE_SHIFT;
93 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
94 PAGE_SIZE) >> PAGE_SHIFT; 95 PAGE_SIZE) >> PAGE_SHIFT;
95 96
96 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0); 99 size, base_index, boundary_size, align_mask);
99 if (offset == -1) { 100 if (offset == -1) {
100 need_flush = 1; 101 need_flush = true;
101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0); 103 size, base_index, boundary_size,
104 align_mask);
103 } 105 }
104 if (offset != -1) { 106 if (offset != -1) {
105 next_bit = offset+size; 107 next_bit = offset+size;
106 if (next_bit >= iommu_pages) { 108 if (next_bit >= iommu_pages) {
107 next_bit = 0; 109 next_bit = 0;
108 need_flush = 1; 110 need_flush = true;
109 } 111 }
110 } 112 }
111 if (iommu_fullflush) 113 if (iommu_fullflush)
112 need_flush = 1; 114 need_flush = true;
113 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
114 116
115 return offset; 117 return offset;
@@ -134,7 +136,7 @@ static void flush_gart(void)
134 spin_lock_irqsave(&iommu_bitmap_lock, flags); 136 spin_lock_irqsave(&iommu_bitmap_lock, flags);
135 if (need_flush) { 137 if (need_flush) {
136 k8_flush_garts(); 138 k8_flush_garts();
137 need_flush = 0; 139 need_flush = false;
138 } 140 }
139 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
140} 142}
@@ -173,7 +175,8 @@ static void dump_leak(void)
173 iommu_leak_pages); 175 iommu_leak_pages);
174 for (i = 0; i < iommu_leak_pages; i += 2) { 176 for (i = 0; i < iommu_leak_pages; i += 2) {
175 printk(KERN_DEBUG "%lu: ", iommu_pages-i); 177 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
176 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); 178 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
179 0);
177 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); 180 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
178 } 181 }
179 printk(KERN_DEBUG "\n"); 182 printk(KERN_DEBUG "\n");
@@ -212,34 +215,24 @@ static void iommu_full(struct device *dev, size_t size, int dir)
212static inline int 215static inline int
213need_iommu(struct device *dev, unsigned long addr, size_t size) 216need_iommu(struct device *dev, unsigned long addr, size_t size)
214{ 217{
215 u64 mask = *dev->dma_mask; 218 return force_iommu ||
216 int high = addr + size > mask; 219 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
217 int mmu = high;
218
219 if (force_iommu)
220 mmu = 1;
221
222 return mmu;
223} 220}
224 221
225static inline int 222static inline int
226nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 223nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
227{ 224{
228 u64 mask = *dev->dma_mask; 225 return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
229 int high = addr + size > mask;
230 int mmu = high;
231
232 return mmu;
233} 226}
234 227
235/* Map a single continuous physical area into the IOMMU. 228/* Map a single continuous physical area into the IOMMU.
236 * Caller needs to check if the iommu is needed and flush. 229 * Caller needs to check if the iommu is needed and flush.
237 */ 230 */
238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 231static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir) 232 size_t size, int dir, unsigned long align_mask)
240{ 233{
241 unsigned long npages = iommu_num_pages(phys_mem, size); 234 unsigned long npages = iommu_num_pages(phys_mem, size);
242 unsigned long iommu_page = alloc_iommu(dev, npages); 235 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
243 int i; 236 int i;
244 237
245 if (iommu_page == -1) { 238 if (iommu_page == -1) {
@@ -259,16 +252,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
259 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 252 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
260} 253}
261 254
262static dma_addr_t
263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264{
265 dma_addr_t map = dma_map_area(dev, paddr, size, dir);
266
267 flush_gart();
268
269 return map;
270}
271
272/* Map a single area into the IOMMU */ 255/* Map a single area into the IOMMU */
273static dma_addr_t 256static dma_addr_t
274gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) 257gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
@@ -276,12 +259,13 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
276 unsigned long bus; 259 unsigned long bus;
277 260
278 if (!dev) 261 if (!dev)
279 dev = &fallback_dev; 262 dev = &x86_dma_fallback_dev;
280 263
281 if (!need_iommu(dev, paddr, size)) 264 if (!need_iommu(dev, paddr, size))
282 return paddr; 265 return paddr;
283 266
284 bus = gart_map_simple(dev, paddr, size, dir); 267 bus = dma_map_area(dev, paddr, size, dir, 0);
268 flush_gart();
285 269
286 return bus; 270 return bus;
287} 271}
@@ -340,7 +324,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
340 unsigned long addr = sg_phys(s); 324 unsigned long addr = sg_phys(s);
341 325
342 if (nonforced_iommu(dev, addr, s->length)) { 326 if (nonforced_iommu(dev, addr, s->length)) {
343 addr = dma_map_area(dev, addr, s->length, dir); 327 addr = dma_map_area(dev, addr, s->length, dir, 0);
344 if (addr == bad_dma_address) { 328 if (addr == bad_dma_address) {
345 if (i > 0) 329 if (i > 0)
346 gart_unmap_sg(dev, sg, i, dir); 330 gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +346,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout, 346 int nelems, struct scatterlist *sout,
363 unsigned long pages) 347 unsigned long pages)
364{ 348{
365 unsigned long iommu_start = alloc_iommu(dev, pages); 349 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
366 unsigned long iommu_page = iommu_start; 350 unsigned long iommu_page = iommu_start;
367 struct scatterlist *s; 351 struct scatterlist *s;
368 int i; 352 int i;
@@ -427,7 +411,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
427 return 0; 411 return 0;
428 412
429 if (!dev) 413 if (!dev)
430 dev = &fallback_dev; 414 dev = &x86_dma_fallback_dev;
431 415
432 out = 0; 416 out = 0;
433 start = 0; 417 start = 0;
@@ -499,6 +483,46 @@ error:
499 return 0; 483 return 0;
500} 484}
501 485
486/* allocate and map a coherent mapping */
487static void *
488gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
489 gfp_t flag)
490{
491 dma_addr_t paddr;
492 unsigned long align_mask;
493 struct page *page;
494
495 if (force_iommu && !(flag & GFP_DMA)) {
496 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
497 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
498 if (!page)
499 return NULL;
500
501 align_mask = (1UL << get_order(size)) - 1;
502 paddr = dma_map_area(dev, page_to_phys(page), size,
503 DMA_BIDIRECTIONAL, align_mask);
504
505 flush_gart();
506 if (paddr != bad_dma_address) {
507 *dma_addr = paddr;
508 return page_address(page);
509 }
510 __free_pages(page, get_order(size));
511 } else
512 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
513
514 return NULL;
515}
516
517/* free a coherent mapping */
518static void
519gart_free_coherent(struct device *dev, size_t size, void *vaddr,
520 dma_addr_t dma_addr)
521{
522 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
523 free_pages((unsigned long)vaddr, get_order(size));
524}
525
502static int no_agp; 526static int no_agp;
503 527
504static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 528static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -626,7 +650,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
626 struct pci_dev *dev; 650 struct pci_dev *dev;
627 void *gatt; 651 void *gatt;
628 int i, error; 652 int i, error;
629 unsigned long start_pfn, end_pfn;
630 653
631 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 654 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
632 aper_size = aper_base = info->aper_size = 0; 655 aper_size = aper_base = info->aper_size = 0;
@@ -650,13 +673,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
650 info->aper_size = aper_size >> 20; 673 info->aper_size = aper_size >> 20;
651 674
652 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 675 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
653 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 676 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
677 get_order(gatt_size));
654 if (!gatt) 678 if (!gatt)
655 panic("Cannot allocate GATT table"); 679 panic("Cannot allocate GATT table");
656 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 680 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
657 panic("Could not set GART PTEs to uncacheable pages"); 681 panic("Could not set GART PTEs to uncacheable pages");
658 682
659 memset(gatt, 0, gatt_size);
660 agp_gatt_table = gatt; 683 agp_gatt_table = gatt;
661 684
662 enable_gart_translations(); 685 enable_gart_translations();
@@ -665,19 +688,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
665 if (!error) 688 if (!error)
666 error = sysdev_register(&device_gart); 689 error = sysdev_register(&device_gart);
667 if (error) 690 if (error)
668 panic("Could not register gart_sysdev -- would corrupt data on next suspend"); 691 panic("Could not register gart_sysdev -- "
692 "would corrupt data on next suspend");
669 693
670 flush_gart(); 694 flush_gart();
671 695
672 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", 696 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
673 aper_base, aper_size>>10); 697 aper_base, aper_size>>10);
674 698
675 /* need to map that range */
676 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
677 if (end_pfn > max_low_pfn_mapped) {
678 start_pfn = (aper_base>>PAGE_SHIFT);
679 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
680 }
681 return 0; 699 return 0;
682 700
683 nommu: 701 nommu:
@@ -687,20 +705,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
687 return -1; 705 return -1;
688} 706}
689 707
690extern int agp_amd64_init(void);
691
692static struct dma_mapping_ops gart_dma_ops = { 708static struct dma_mapping_ops gart_dma_ops = {
693 .map_single = gart_map_single, 709 .map_single = gart_map_single,
694 .map_simple = gart_map_simple,
695 .unmap_single = gart_unmap_single, 710 .unmap_single = gart_unmap_single,
696 .sync_single_for_cpu = NULL,
697 .sync_single_for_device = NULL,
698 .sync_single_range_for_cpu = NULL,
699 .sync_single_range_for_device = NULL,
700 .sync_sg_for_cpu = NULL,
701 .sync_sg_for_device = NULL,
702 .map_sg = gart_map_sg, 711 .map_sg = gart_map_sg,
703 .unmap_sg = gart_unmap_sg, 712 .unmap_sg = gart_unmap_sg,
713 .alloc_coherent = gart_alloc_coherent,
714 .free_coherent = gart_free_coherent,
704}; 715};
705 716
706void gart_iommu_shutdown(void) 717void gart_iommu_shutdown(void)
@@ -727,7 +738,8 @@ void __init gart_iommu_init(void)
727{ 738{
728 struct agp_kern_info info; 739 struct agp_kern_info info;
729 unsigned long iommu_start; 740 unsigned long iommu_start;
730 unsigned long aper_size; 741 unsigned long aper_base, aper_size;
742 unsigned long start_pfn, end_pfn;
731 unsigned long scratch; 743 unsigned long scratch;
732 long i; 744 long i;
733 745
@@ -759,30 +771,35 @@ void __init gart_iommu_init(void)
759 (no_agp && init_k8_gatt(&info) < 0)) { 771 (no_agp && init_k8_gatt(&info) < 0)) {
760 if (max_pfn > MAX_DMA32_PFN) { 772 if (max_pfn > MAX_DMA32_PFN) {
761 printk(KERN_WARNING "More than 4GB of memory " 773 printk(KERN_WARNING "More than 4GB of memory "
762 "but GART IOMMU not available.\n" 774 "but GART IOMMU not available.\n");
763 KERN_WARNING "falling back to iommu=soft.\n"); 775 printk(KERN_WARNING "falling back to iommu=soft.\n");
764 } 776 }
765 return; 777 return;
766 } 778 }
767 779
780 /* need to map that range */
781 aper_size = info.aper_size << 20;
782 aper_base = info.aper_base;
783 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
784 if (end_pfn > max_low_pfn_mapped) {
785 start_pfn = (aper_base>>PAGE_SHIFT);
786 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
787 }
788
768 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 789 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
769 aper_size = info.aper_size * 1024 * 1024;
770 iommu_size = check_iommu_size(info.aper_base, aper_size); 790 iommu_size = check_iommu_size(info.aper_base, aper_size);
771 iommu_pages = iommu_size >> PAGE_SHIFT; 791 iommu_pages = iommu_size >> PAGE_SHIFT;
772 792
773 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, 793 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
774 get_order(iommu_pages/8)); 794 get_order(iommu_pages/8));
775 if (!iommu_gart_bitmap) 795 if (!iommu_gart_bitmap)
776 panic("Cannot allocate iommu bitmap\n"); 796 panic("Cannot allocate iommu bitmap\n");
777 memset(iommu_gart_bitmap, 0, iommu_pages/8);
778 797
779#ifdef CONFIG_IOMMU_LEAK 798#ifdef CONFIG_IOMMU_LEAK
780 if (leak_trace) { 799 if (leak_trace) {
781 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, 800 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
782 get_order(iommu_pages*sizeof(void *))); 801 get_order(iommu_pages*sizeof(void *)));
783 if (iommu_leak_tab) 802 if (!iommu_leak_tab)
784 memset(iommu_leak_tab, 0, iommu_pages * 8);
785 else
786 printk(KERN_DEBUG 803 printk(KERN_DEBUG
787 "PCI-DMA: Cannot allocate leak trace area\n"); 804 "PCI-DMA: Cannot allocate leak trace area\n");
788 } 805 }
@@ -792,7 +809,7 @@ void __init gart_iommu_init(void)
792 * Out of IOMMU space handling. 809 * Out of IOMMU space handling.
793 * Reserve some invalid pages at the beginning of the GART. 810 * Reserve some invalid pages at the beginning of the GART.
794 */ 811 */
795 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 812 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
796 813
797 agp_memory_reserved = iommu_size; 814 agp_memory_reserved = iommu_size;
798 printk(KERN_INFO 815 printk(KERN_INFO
@@ -850,7 +867,8 @@ void __init gart_parse_options(char *p)
850 if (!strncmp(p, "leak", 4)) { 867 if (!strncmp(p, "leak", 4)) {
851 leak_trace = 1; 868 leak_trace = 1;
852 p += 4; 869 p += 4;
853 if (*p == '=') ++p; 870 if (*p == '=')
871 ++p;
854 if (isdigit(*p) && get_option(&p, &arg)) 872 if (isdigit(*p) && get_option(&p, &arg))
855 iommu_leak_pages = arg; 873 iommu_leak_pages = arg;
856 } 874 }
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 3f91f71cdc3e..c70ab5a5d4c8 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -14,7 +14,7 @@
14static int 14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
16{ 16{
17 if (hwdev && bus + size > *hwdev->dma_mask) { 17 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
18 if (*hwdev->dma_mask >= DMA_32BIT_MASK) 18 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
19 printk(KERN_ERR 19 printk(KERN_ERR
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
@@ -72,7 +72,15 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return nents; 72 return nents;
73} 73}
74 74
75static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
76 dma_addr_t dma_addr)
77{
78 free_pages((unsigned long)vaddr, get_order(size));
79}
80
75struct dma_mapping_ops nommu_dma_ops = { 81struct dma_mapping_ops nommu_dma_ops = {
82 .alloc_coherent = dma_generic_alloc_coherent,
83 .free_coherent = nommu_free_coherent,
76 .map_single = nommu_map_single, 84 .map_single = nommu_map_single,
77 .map_sg = nommu_map_sg, 85 .map_sg = nommu_map_sg,
78 .is_phys = 1, 86 .is_phys = 1,
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index bc1f2d3ea277..a311ffcaad16 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -1,20 +1,13 @@
1#include <linux/platform_device.h> 1#include <linux/platform_device.h>
2#include <linux/errno.h> 2#include <linux/err.h>
3#include <linux/init.h> 3#include <linux/init.h>
4 4
5static __init int add_pcspkr(void) 5static __init int add_pcspkr(void)
6{ 6{
7 struct platform_device *pd; 7 struct platform_device *pd;
8 int ret;
9 8
10 pd = platform_device_alloc("pcspkr", -1); 9 pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
11 if (!pd)
12 return -ENOMEM;
13 10
14 ret = platform_device_add(pd); 11 return IS_ERR(pd) ? PTR_ERR(pd) : 0;
15 if (ret)
16 platform_device_put(pd);
17
18 return ret;
19} 12}
20device_initcall(add_pcspkr); 13device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9f94bb1c8117..c622772744d8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -184,7 +184,8 @@ static void mwait_idle(void)
184static void poll_idle(void) 184static void poll_idle(void)
185{ 185{
186 local_irq_enable(); 186 local_irq_enable();
187 cpu_relax(); 187 while (!need_resched())
188 cpu_relax();
188} 189}
189 190
190/* 191/*
@@ -245,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
245 return 1; 246 return 1;
246} 247}
247 248
249static cpumask_t c1e_mask = CPU_MASK_NONE;
250static int c1e_detected;
251
252void c1e_remove_cpu(int cpu)
253{
254 cpu_clear(cpu, c1e_mask);
255}
256
248/* 257/*
249 * C1E aware idle routine. We check for C1E active in the interrupt 258 * C1E aware idle routine. We check for C1E active in the interrupt
250 * pending message MSR. If we detect C1E, then we handle it the same 259 * pending message MSR. If we detect C1E, then we handle it the same
@@ -252,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
252 */ 261 */
253static void c1e_idle(void) 262static void c1e_idle(void)
254{ 263{
255 static cpumask_t c1e_mask = CPU_MASK_NONE;
256 static int c1e_detected;
257
258 if (need_resched()) 264 if (need_resched())
259 return; 265 return;
260 266
@@ -264,8 +270,10 @@ static void c1e_idle(void)
264 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 270 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
265 if (lo & K8_INTP_C1E_ACTIVE_MASK) { 271 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
266 c1e_detected = 1; 272 c1e_detected = 1;
267 mark_tsc_unstable("TSC halt in C1E"); 273 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
268 printk(KERN_INFO "System has C1E enabled\n"); 274 mark_tsc_unstable("TSC halt in AMD C1E");
275 printk(KERN_INFO "System has AMD C1E enabled\n");
276 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
269 } 277 }
270 } 278 }
271 279
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 1962d27c6b82..205188db9626 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -37,6 +37,7 @@
37#include <linux/tick.h> 37#include <linux/tick.h>
38#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/dmi.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -55,6 +56,7 @@
55#include <asm/tlbflush.h> 56#include <asm/tlbflush.h>
56#include <asm/cpu.h> 57#include <asm/cpu.h>
57#include <asm/kdebug.h> 58#include <asm/kdebug.h>
59#include <asm/idle.h>
58#include <asm/syscalls.h> 60#include <asm/syscalls.h>
59#include <asm/smp.h> 61#include <asm/smp.h>
60 62
@@ -90,6 +92,7 @@ static void cpu_exit_clear(void)
90 cpu_clear(cpu, cpu_callin_map); 92 cpu_clear(cpu, cpu_callin_map);
91 93
92 numa_remove_cpu(cpu); 94 numa_remove_cpu(cpu);
95 c1e_remove_cpu(cpu);
93} 96}
94 97
95/* We don't actually take CPU down, just spin without interrupts. */ 98/* We don't actually take CPU down, just spin without interrupts. */
@@ -161,6 +164,7 @@ void __show_registers(struct pt_regs *regs, int all)
161 unsigned long d0, d1, d2, d3, d6, d7; 164 unsigned long d0, d1, d2, d3, d6, d7;
162 unsigned long sp; 165 unsigned long sp;
163 unsigned short ss, gs; 166 unsigned short ss, gs;
167 const char *board;
164 168
165 if (user_mode_vm(regs)) { 169 if (user_mode_vm(regs)) {
166 sp = regs->sp; 170 sp = regs->sp;
@@ -173,11 +177,15 @@ void __show_registers(struct pt_regs *regs, int all)
173 } 177 }
174 178
175 printk("\n"); 179 printk("\n");
176 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 180
181 board = dmi_get_system_info(DMI_PRODUCT_NAME);
182 if (!board)
183 board = "";
184 printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
177 task_pid_nr(current), current->comm, 185 task_pid_nr(current), current->comm,
178 print_tainted(), init_utsname()->release, 186 print_tainted(), init_utsname()->release,
179 (int)strcspn(init_utsname()->version, " "), 187 (int)strcspn(init_utsname()->version, " "),
180 init_utsname()->version); 188 init_utsname()->version, board);
181 189
182 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 190 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
183 (u16)regs->cs, regs->ip, regs->flags, 191 (u16)regs->cs, regs->ip, regs->flags,
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 08a9df08adba..2a8ccb9238b4 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,11 +37,11 @@
37#include <linux/kdebug.h> 37#include <linux/kdebug.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/i387.h> 46#include <asm/i387.h>
47#include <asm/mmu_context.h> 47#include <asm/mmu_context.h>
@@ -89,11 +89,13 @@ void exit_idle(void)
89#ifdef CONFIG_HOTPLUG_CPU 89#ifdef CONFIG_HOTPLUG_CPU
90DECLARE_PER_CPU(int, cpu_state); 90DECLARE_PER_CPU(int, cpu_state);
91 91
92#include <asm/nmi.h> 92#include <linux/nmi.h>
93/* We halt the CPU with physical CPU hotplug */ 93/* We halt the CPU with physical CPU hotplug */
94static inline void play_dead(void) 94static inline void play_dead(void)
95{ 95{
96 idle_task_exit(); 96 idle_task_exit();
97 c1e_remove_cpu(raw_smp_processor_id());
98
97 mb(); 99 mb();
98 /* Ack it */ 100 /* Ack it */
99 __get_cpu_var(cpu_state) = CPU_DEAD; 101 __get_cpu_var(cpu_state) = CPU_DEAD;
@@ -152,7 +154,7 @@ void cpu_idle(void)
152} 154}
153 155
154/* Prints also some state that isn't saved in the pt_regs */ 156/* Prints also some state that isn't saved in the pt_regs */
155void __show_regs(struct pt_regs * regs) 157void __show_regs(struct pt_regs *regs)
156{ 158{
157 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
158 unsigned long d0, d1, d2, d3, d6, d7; 160 unsigned long d0, d1, d2, d3, d6, d7;
@@ -161,59 +163,61 @@ void __show_regs(struct pt_regs * regs)
161 163
162 printk("\n"); 164 printk("\n");
163 print_modules(); 165 print_modules();
164 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 166 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
165 current->pid, current->comm, print_tainted(), 167 current->pid, current->comm, print_tainted(),
166 init_utsname()->release, 168 init_utsname()->release,
167 (int)strcspn(init_utsname()->version, " "), 169 (int)strcspn(init_utsname()->version, " "),
168 init_utsname()->version); 170 init_utsname()->version);
169 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 171 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
170 printk_address(regs->ip, 1); 172 printk_address(regs->ip, 1);
171 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 173 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
172 regs->flags); 174 regs->sp, regs->flags);
173 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 175 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
174 regs->ax, regs->bx, regs->cx); 176 regs->ax, regs->bx, regs->cx);
175 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 177 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
176 regs->dx, regs->si, regs->di); 178 regs->dx, regs->si, regs->di);
177 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 179 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
178 regs->bp, regs->r8, regs->r9); 180 regs->bp, regs->r8, regs->r9);
179 printk("R10: %016lx R11: %016lx R12: %016lx\n", 181 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
180 regs->r10, regs->r11, regs->r12); 182 regs->r10, regs->r11, regs->r12);
181 printk("R13: %016lx R14: %016lx R15: %016lx\n", 183 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
182 regs->r13, regs->r14, regs->r15); 184 regs->r13, regs->r14, regs->r15);
183 185
184 asm("movl %%ds,%0" : "=r" (ds)); 186 asm("movl %%ds,%0" : "=r" (ds));
185 asm("movl %%cs,%0" : "=r" (cs)); 187 asm("movl %%cs,%0" : "=r" (cs));
186 asm("movl %%es,%0" : "=r" (es)); 188 asm("movl %%es,%0" : "=r" (es));
187 asm("movl %%fs,%0" : "=r" (fsindex)); 189 asm("movl %%fs,%0" : "=r" (fsindex));
188 asm("movl %%gs,%0" : "=r" (gsindex)); 190 asm("movl %%gs,%0" : "=r" (gsindex));
189 191
190 rdmsrl(MSR_FS_BASE, fs); 192 rdmsrl(MSR_FS_BASE, fs);
191 rdmsrl(MSR_GS_BASE, gs); 193 rdmsrl(MSR_GS_BASE, gs);
192 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 194 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
193 195
194 cr0 = read_cr0(); 196 cr0 = read_cr0();
195 cr2 = read_cr2(); 197 cr2 = read_cr2();
196 cr3 = read_cr3(); 198 cr3 = read_cr3();
197 cr4 = read_cr4(); 199 cr4 = read_cr4();
198 200
199 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 201 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
200 fs,fsindex,gs,gsindex,shadowgs); 202 fs, fsindex, gs, gsindex, shadowgs);
201 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 203 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
202 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 204 es, cr0);
205 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
206 cr4);
203 207
204 get_debugreg(d0, 0); 208 get_debugreg(d0, 0);
205 get_debugreg(d1, 1); 209 get_debugreg(d1, 1);
206 get_debugreg(d2, 2); 210 get_debugreg(d2, 2);
207 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 211 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 get_debugreg(d3, 3); 212 get_debugreg(d3, 3);
209 get_debugreg(d6, 6); 213 get_debugreg(d6, 6);
210 get_debugreg(d7, 7); 214 get_debugreg(d7, 7);
211 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 215 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
212} 216}
213 217
214void show_regs(struct pt_regs *regs) 218void show_regs(struct pt_regs *regs)
215{ 219{
216 printk("CPU %d:", smp_processor_id()); 220 printk(KERN_INFO "CPU %d:", smp_processor_id());
217 __show_regs(regs); 221 __show_regs(regs);
218 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 222 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
219} 223}
@@ -322,10 +326,10 @@ void prepare_to_copy(struct task_struct *tsk)
322 326
323int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 327int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
324 unsigned long unused, 328 unsigned long unused,
325 struct task_struct * p, struct pt_regs * regs) 329 struct task_struct *p, struct pt_regs *regs)
326{ 330{
327 int err; 331 int err;
328 struct pt_regs * childregs; 332 struct pt_regs *childregs;
329 struct task_struct *me = current; 333 struct task_struct *me = current;
330 334
331 childregs = ((struct pt_regs *) 335 childregs = ((struct pt_regs *)
@@ -370,10 +374,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
370 if (test_thread_flag(TIF_IA32)) 374 if (test_thread_flag(TIF_IA32))
371 err = do_set_thread_area(p, -1, 375 err = do_set_thread_area(p, -1,
372 (struct user_desc __user *)childregs->si, 0); 376 (struct user_desc __user *)childregs->si, 0);
373 else 377 else
374#endif 378#endif
375 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 379 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
376 if (err) 380 if (err)
377 goto out; 381 goto out;
378 } 382 }
379 err = 0; 383 err = 0;
@@ -566,7 +570,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
566 unsigned fsindex, gsindex; 570 unsigned fsindex, gsindex;
567 571
568 /* we're going to use this soon, after a few expensive things */ 572 /* we're going to use this soon, after a few expensive things */
569 if (next_p->fpu_counter>5) 573 if (next_p->fpu_counter > 5)
570 prefetch(next->xstate); 574 prefetch(next->xstate);
571 575
572 /* 576 /*
@@ -574,13 +578,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
574 */ 578 */
575 load_sp0(tss, next); 579 load_sp0(tss, next);
576 580
577 /* 581 /*
578 * Switch DS and ES. 582 * Switch DS and ES.
579 * This won't pick up thread selector changes, but I guess that is ok. 583 * This won't pick up thread selector changes, but I guess that is ok.
580 */ 584 */
581 savesegment(es, prev->es); 585 savesegment(es, prev->es);
582 if (unlikely(next->es | prev->es)) 586 if (unlikely(next->es | prev->es))
583 loadsegment(es, next->es); 587 loadsegment(es, next->es);
584 588
585 savesegment(ds, prev->ds); 589 savesegment(ds, prev->ds);
586 if (unlikely(next->ds | prev->ds)) 590 if (unlikely(next->ds | prev->ds))
@@ -606,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
606 */ 610 */
607 arch_leave_lazy_cpu_mode(); 611 arch_leave_lazy_cpu_mode();
608 612
609 /* 613 /*
610 * Switch FS and GS. 614 * Switch FS and GS.
611 * 615 *
612 * Segment register != 0 always requires a reload. Also 616 * Segment register != 0 always requires a reload. Also
@@ -615,13 +619,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
615 */ 619 */
616 if (unlikely(fsindex | next->fsindex | prev->fs)) { 620 if (unlikely(fsindex | next->fsindex | prev->fs)) {
617 loadsegment(fs, next->fsindex); 621 loadsegment(fs, next->fsindex);
618 /* 622 /*
619 * Check if the user used a selector != 0; if yes 623 * Check if the user used a selector != 0; if yes
620 * clear 64bit base, since overloaded base is always 624 * clear 64bit base, since overloaded base is always
621 * mapped to the Null selector 625 * mapped to the Null selector
622 */ 626 */
623 if (fsindex) 627 if (fsindex)
624 prev->fs = 0; 628 prev->fs = 0;
625 } 629 }
626 /* when next process has a 64bit base use it */ 630 /* when next process has a 64bit base use it */
627 if (next->fs) 631 if (next->fs)
@@ -631,7 +635,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
631 if (unlikely(gsindex | next->gsindex | prev->gs)) { 635 if (unlikely(gsindex | next->gsindex | prev->gs)) {
632 load_gs_index(next->gsindex); 636 load_gs_index(next->gsindex);
633 if (gsindex) 637 if (gsindex)
634 prev->gs = 0; 638 prev->gs = 0;
635 } 639 }
636 if (next->gs) 640 if (next->gs)
637 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 641 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
@@ -640,12 +644,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
640 /* Must be after DS reload */ 644 /* Must be after DS reload */
641 unlazy_fpu(prev_p); 645 unlazy_fpu(prev_p);
642 646
643 /* 647 /*
644 * Switch the PDA and FPU contexts. 648 * Switch the PDA and FPU contexts.
645 */ 649 */
646 prev->usersp = read_pda(oldrsp); 650 prev->usersp = read_pda(oldrsp);
647 write_pda(oldrsp, next->usersp); 651 write_pda(oldrsp, next->usersp);
648 write_pda(pcurrent, next_p); 652 write_pda(pcurrent, next_p);
649 653
650 write_pda(kernelstack, 654 write_pda(kernelstack,
651 (unsigned long)task_stack_page(next_p) + 655 (unsigned long)task_stack_page(next_p) +
@@ -686,7 +690,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
686 char __user * __user *envp, struct pt_regs *regs) 690 char __user * __user *envp, struct pt_regs *regs)
687{ 691{
688 long error; 692 long error;
689 char * filename; 693 char *filename;
690 694
691 filename = getname(name); 695 filename = getname(name);
692 error = PTR_ERR(filename); 696 error = PTR_ERR(filename);
@@ -744,55 +748,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
744unsigned long get_wchan(struct task_struct *p) 748unsigned long get_wchan(struct task_struct *p)
745{ 749{
746 unsigned long stack; 750 unsigned long stack;
747 u64 fp,ip; 751 u64 fp, ip;
748 int count = 0; 752 int count = 0;
749 753
750 if (!p || p == current || p->state==TASK_RUNNING) 754 if (!p || p == current || p->state == TASK_RUNNING)
751 return 0; 755 return 0;
752 stack = (unsigned long)task_stack_page(p); 756 stack = (unsigned long)task_stack_page(p);
753 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 757 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
754 return 0; 758 return 0;
755 fp = *(u64 *)(p->thread.sp); 759 fp = *(u64 *)(p->thread.sp);
756 do { 760 do {
757 if (fp < (unsigned long)stack || 761 if (fp < (unsigned long)stack ||
758 fp > (unsigned long)stack+THREAD_SIZE) 762 fp > (unsigned long)stack+THREAD_SIZE)
759 return 0; 763 return 0;
760 ip = *(u64 *)(fp+8); 764 ip = *(u64 *)(fp+8);
761 if (!in_sched_functions(ip)) 765 if (!in_sched_functions(ip))
762 return ip; 766 return ip;
763 fp = *(u64 *)fp; 767 fp = *(u64 *)fp;
764 } while (count++ < 16); 768 } while (count++ < 16);
765 return 0; 769 return 0;
766} 770}
767 771
768long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 772long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
769{ 773{
770 int ret = 0; 774 int ret = 0;
771 int doit = task == current; 775 int doit = task == current;
772 int cpu; 776 int cpu;
773 777
774 switch (code) { 778 switch (code) {
775 case ARCH_SET_GS: 779 case ARCH_SET_GS:
776 if (addr >= TASK_SIZE_OF(task)) 780 if (addr >= TASK_SIZE_OF(task))
777 return -EPERM; 781 return -EPERM;
778 cpu = get_cpu(); 782 cpu = get_cpu();
779 /* handle small bases via the GDT because that's faster to 783 /* handle small bases via the GDT because that's faster to
780 switch. */ 784 switch. */
781 if (addr <= 0xffffffff) { 785 if (addr <= 0xffffffff) {
782 set_32bit_tls(task, GS_TLS, addr); 786 set_32bit_tls(task, GS_TLS, addr);
783 if (doit) { 787 if (doit) {
784 load_TLS(&task->thread, cpu); 788 load_TLS(&task->thread, cpu);
785 load_gs_index(GS_TLS_SEL); 789 load_gs_index(GS_TLS_SEL);
786 } 790 }
787 task->thread.gsindex = GS_TLS_SEL; 791 task->thread.gsindex = GS_TLS_SEL;
788 task->thread.gs = 0; 792 task->thread.gs = 0;
789 } else { 793 } else {
790 task->thread.gsindex = 0; 794 task->thread.gsindex = 0;
791 task->thread.gs = addr; 795 task->thread.gs = addr;
792 if (doit) { 796 if (doit) {
793 load_gs_index(0); 797 load_gs_index(0);
794 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 798 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
795 } 799 }
796 } 800 }
797 put_cpu(); 801 put_cpu();
798 break; 802 break;
@@ -846,8 +850,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
846 rdmsrl(MSR_KERNEL_GS_BASE, base); 850 rdmsrl(MSR_KERNEL_GS_BASE, base);
847 else 851 else
848 base = task->thread.gs; 852 base = task->thread.gs;
849 } 853 } else
850 else
851 base = task->thread.gs; 854 base = task->thread.gs;
852 ret = put_user(base, (unsigned long __user *)addr); 855 ret = put_user(base, (unsigned long __user *)addr);
853 break; 856 break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 58ce4b50211b..e375b658efc3 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -14,6 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/regset.h> 16#include <linux/regset.h>
17#include <linux/tracehook.h>
17#include <linux/user.h> 18#include <linux/user.h>
18#include <linux/elf.h> 19#include <linux/elf.h>
19#include <linux/security.h> 20#include <linux/security.h>
@@ -734,7 +735,7 @@ static int ptrace_bts_config(struct task_struct *child,
734 goto errout; 735 goto errout;
735 736
736 if (cfg.flags & PTRACE_BTS_O_ALLOC) { 737 if (cfg.flags & PTRACE_BTS_O_ALLOC) {
737 ds_ovfl_callback_t ovfl = 0; 738 ds_ovfl_callback_t ovfl = NULL;
738 unsigned int sig = 0; 739 unsigned int sig = 0;
739 740
740 /* we ignore the error in case we were not tracing child */ 741 /* we ignore the error in case we were not tracing child */
@@ -748,7 +749,7 @@ static int ptrace_bts_config(struct task_struct *child,
748 ovfl = ptrace_bts_ovfl; 749 ovfl = ptrace_bts_ovfl;
749 } 750 }
750 751
751 error = ds_request_bts(child, /* base = */ 0, cfg.size, ovfl); 752 error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
752 if (error < 0) 753 if (error < 0)
753 goto errout; 754 goto errout;
754 755
@@ -1086,7 +1087,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1086 break; 1087 break;
1087 1088
1088 case PTRACE_BTS_SIZE: 1089 case PTRACE_BTS_SIZE:
1089 ret = ds_get_bts_index(child, /* pos = */ 0); 1090 ret = ds_get_bts_index(child, /* pos = */ NULL);
1090 break; 1091 break;
1091 1092
1092 case PTRACE_BTS_GET: 1093 case PTRACE_BTS_GET:
@@ -1469,30 +1470,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
1469 force_sig_info(SIGTRAP, &info, tsk); 1470 force_sig_info(SIGTRAP, &info, tsk);
1470} 1471}
1471 1472
1472static void syscall_trace(struct pt_regs *regs)
1473{
1474 if (!(current->ptrace & PT_PTRACED))
1475 return;
1476
1477#if 0
1478 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1479 current->comm,
1480 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1481 current_thread_info()->flags, current->ptrace);
1482#endif
1483
1484 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1485 ? 0x80 : 0));
1486 /*
1487 * this isn't the same as continuing with a signal, but it will do
1488 * for normal use. strace only continues with a signal if the
1489 * stopping signal is not SIGTRAP. -brl
1490 */
1491 if (current->exit_code) {
1492 send_sig(current->exit_code, current, 1);
1493 current->exit_code = 0;
1494 }
1495}
1496 1473
1497#ifdef CONFIG_X86_32 1474#ifdef CONFIG_X86_32
1498# define IS_IA32 1 1475# define IS_IA32 1
@@ -1526,8 +1503,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1526 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1503 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1527 ret = -1L; 1504 ret = -1L;
1528 1505
1529 if (ret || test_thread_flag(TIF_SYSCALL_TRACE)) 1506 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1530 syscall_trace(regs); 1507 tracehook_report_syscall_entry(regs))
1508 ret = -1L;
1531 1509
1532 if (unlikely(current->audit_context)) { 1510 if (unlikely(current->audit_context)) {
1533 if (IS_IA32) 1511 if (IS_IA32)
@@ -1553,7 +1531,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1553 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1531 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1554 1532
1555 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1533 if (test_thread_flag(TIF_SYSCALL_TRACE))
1556 syscall_trace(regs); 1534 tracehook_report_syscall_exit(regs, 0);
1557 1535
1558 /* 1536 /*
1559 * If TIF_SYSCALL_EMU is set, we only get here because of 1537 * If TIF_SYSCALL_EMU is set, we only get here because of
@@ -1569,6 +1547,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1569 * system call instruction. 1547 * system call instruction.
1570 */ 1548 */
1571 if (test_thread_flag(TIF_SINGLESTEP) && 1549 if (test_thread_flag(TIF_SINGLESTEP) &&
1572 (current->ptrace & PT_PTRACED)) 1550 tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
1573 send_sigtrap(current, regs, 0); 1551 send_sigtrap(current, regs, 0);
1574} 1552}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..f4c93f1cfc19 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(pm_power_off);
29 29
30static const struct desc_ptr no_idt = {}; 30static const struct desc_ptr no_idt = {};
31static int reboot_mode; 31static int reboot_mode;
32enum reboot_type reboot_type = BOOT_KBD; 32/*
33 * Keyboard reset and triple fault may result in INIT, not RESET, which
34 * doesn't work when we're in vmx root mode. Try ACPI first.
35 */
36enum reboot_type reboot_type = BOOT_ACPI;
33int reboot_force; 37int reboot_force;
34 38
35#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 39#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 673f12cf6eb0..46c98efbbf8d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -223,6 +223,9 @@ unsigned long saved_video_mode;
223#define RAMDISK_LOAD_FLAG 0x4000 223#define RAMDISK_LOAD_FLAG 0x4000
224 224
225static char __initdata command_line[COMMAND_LINE_SIZE]; 225static char __initdata command_line[COMMAND_LINE_SIZE];
226#ifdef CONFIG_CMDLINE_BOOL
227static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
228#endif
226 229
227#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 230#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
228struct edd edd; 231struct edd edd;
@@ -665,11 +668,28 @@ void __init setup_arch(char **cmdline_p)
665 bss_resource.start = virt_to_phys(&__bss_start); 668 bss_resource.start = virt_to_phys(&__bss_start);
666 bss_resource.end = virt_to_phys(&__bss_stop)-1; 669 bss_resource.end = virt_to_phys(&__bss_stop)-1;
667 670
671#ifdef CONFIG_CMDLINE_BOOL
672#ifdef CONFIG_CMDLINE_OVERRIDE
673 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
674#else
675 if (builtin_cmdline[0]) {
676 /* append boot loader cmdline to builtin */
677 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
678 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
679 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
680 }
681#endif
682#endif
683
668 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 684 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
669 *cmdline_p = command_line; 685 *cmdline_p = command_line;
670 686
671 parse_early_param(); 687 parse_early_param();
672 688
689#ifdef CONFIG_X86_64
690 check_efer();
691#endif
692
673#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) 693#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
674 /* 694 /*
675 * Must be before kernel pagetables are setup 695 * Must be before kernel pagetables are setup
@@ -738,7 +758,6 @@ void __init setup_arch(char **cmdline_p)
738#else 758#else
739 num_physpages = max_pfn; 759 num_physpages = max_pfn;
740 760
741 check_efer();
742 if (cpu_has_x2apic) 761 if (cpu_has_x2apic)
743 check_x2apic(); 762 check_x2apic();
744 763
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h
index 6dd7e2b70a4b..cc673aa55ce4 100644
--- a/arch/x86/kernel/sigframe.h
+++ b/arch/x86/kernel/sigframe.h
@@ -34,4 +34,9 @@ struct rt_sigframe {
34 struct siginfo info; 34 struct siginfo info;
35 /* fp state follows here */ 35 /* fp state follows here */
36}; 36};
37
38int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
39 sigset_t *set, struct pt_regs *regs);
40int ia32_setup_frame(int sig, struct k_sigaction *ka,
41 sigset_t *set, struct pt_regs *regs);
37#endif 42#endif
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 8d380b699c0c..b21070ea33a4 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/tracehook.h>
20#include <linux/elf.h> 21#include <linux/elf.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -556,8 +557,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
556 * handler too. 557 * handler too.
557 */ 558 */
558 regs->flags &= ~X86_EFLAGS_TF; 559 regs->flags &= ~X86_EFLAGS_TF;
559 if (test_thread_flag(TIF_SINGLESTEP))
560 ptrace_notify(SIGTRAP);
561 560
562 spin_lock_irq(&current->sighand->siglock); 561 spin_lock_irq(&current->sighand->siglock);
563 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 562 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
@@ -566,6 +565,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
566 recalc_sigpending(); 565 recalc_sigpending();
567 spin_unlock_irq(&current->sighand->siglock); 566 spin_unlock_irq(&current->sighand->siglock);
568 567
568 tracehook_signal_handler(sig, info, ka, regs,
569 test_thread_flag(TIF_SINGLESTEP));
570
569 return 0; 571 return 0;
570} 572}
571 573
@@ -659,5 +661,10 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
659 if (thread_info_flags & _TIF_SIGPENDING) 661 if (thread_info_flags & _TIF_SIGPENDING)
660 do_signal(regs); 662 do_signal(regs);
661 663
664 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
665 clear_thread_flag(TIF_NOTIFY_RESUME);
666 tracehook_notify_resume(regs);
667 }
668
662 clear_thread_flag(TIF_IRET); 669 clear_thread_flag(TIF_IRET);
663} 670}
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 4665b598a376..823a55bf8c39 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -15,17 +15,20 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/tracehook.h>
18#include <linux/unistd.h> 19#include <linux/unistd.h>
19#include <linux/stddef.h> 20#include <linux/stddef.h>
20#include <linux/personality.h> 21#include <linux/personality.h>
21#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/uaccess.h>
24
22#include <asm/processor.h> 25#include <asm/processor.h>
23#include <asm/ucontext.h> 26#include <asm/ucontext.h>
24#include <asm/uaccess.h>
25#include <asm/i387.h> 27#include <asm/i387.h>
26#include <asm/proto.h> 28#include <asm/proto.h>
27#include <asm/ia32_unistd.h> 29#include <asm/ia32_unistd.h>
28#include <asm/mce.h> 30#include <asm/mce.h>
31#include <asm/syscall.h>
29#include <asm/syscalls.h> 32#include <asm/syscalls.h>
30#include "sigframe.h" 33#include "sigframe.h"
31 34
@@ -42,11 +45,6 @@
42# define FIX_EFLAGS __FIX_EFLAGS 45# define FIX_EFLAGS __FIX_EFLAGS
43#endif 46#endif
44 47
45int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
46 sigset_t *set, struct pt_regs * regs);
47int ia32_setup_frame(int sig, struct k_sigaction *ka,
48 sigset_t *set, struct pt_regs * regs);
49
50asmlinkage long 48asmlinkage long
51sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 49sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
52 struct pt_regs *regs) 50 struct pt_regs *regs)
@@ -66,7 +64,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
66 /* Always make any pending restarted system calls return -EINTR */ 64 /* Always make any pending restarted system calls return -EINTR */
67 current_thread_info()->restart_block.fn = do_no_restart_syscall; 65 current_thread_info()->restart_block.fn = do_no_restart_syscall;
68 66
69#define COPY(x) err |= __get_user(regs->x, &sc->x) 67#define COPY(x) (err |= __get_user(regs->x, &sc->x))
70 68
71 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 69 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
72 COPY(dx); COPY(cx); COPY(ip); 70 COPY(dx); COPY(cx); COPY(ip);
@@ -96,7 +94,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
96 } 94 }
97 95
98 { 96 {
99 struct _fpstate __user * buf; 97 struct _fpstate __user *buf;
100 err |= __get_user(buf, &sc->fpstate); 98 err |= __get_user(buf, &sc->fpstate);
101 err |= restore_i387_xstate(buf); 99 err |= restore_i387_xstate(buf);
102 } 100 }
@@ -122,7 +120,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
122 current->blocked = set; 120 current->blocked = set;
123 recalc_sigpending(); 121 recalc_sigpending();
124 spin_unlock_irq(&current->sighand->siglock); 122 spin_unlock_irq(&current->sighand->siglock);
125 123
126 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 124 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
127 goto badframe; 125 goto badframe;
128 126
@@ -132,16 +130,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
132 return ax; 130 return ax;
133 131
134badframe: 132badframe:
135 signal_fault(regs,frame,"sigreturn"); 133 signal_fault(regs, frame, "sigreturn");
136 return 0; 134 return 0;
137} 135}
138 136
139/* 137/*
140 * Set up a signal frame. 138 * Set up a signal frame.
141 */ 139 */
142 140
143static inline int 141static inline int
144setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) 142setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
143 unsigned long mask, struct task_struct *me)
145{ 144{
146 int err = 0; 145 int err = 0;
147 146
@@ -197,7 +196,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
197} 196}
198 197
199static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 198static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
200 sigset_t *set, struct pt_regs * regs) 199 sigset_t *set, struct pt_regs *regs)
201{ 200{
202 struct rt_sigframe __user *frame; 201 struct rt_sigframe __user *frame;
203 void __user *fp = NULL; 202 void __user *fp = NULL;
@@ -210,19 +209,19 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
210 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; 209 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
211 210
212 if (save_i387_xstate(fp) < 0) 211 if (save_i387_xstate(fp) < 0)
213 err |= -1; 212 err |= -1;
214 } else 213 } else
215 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; 214 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
216 215
217 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 216 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
218 goto give_sigsegv; 217 goto give_sigsegv;
219 218
220 if (ka->sa.sa_flags & SA_SIGINFO) { 219 if (ka->sa.sa_flags & SA_SIGINFO) {
221 err |= copy_siginfo_to_user(&frame->info, info); 220 err |= copy_siginfo_to_user(&frame->info, info);
222 if (err) 221 if (err)
223 goto give_sigsegv; 222 goto give_sigsegv;
224 } 223 }
225 224
226 /* Create the ucontext. */ 225 /* Create the ucontext. */
227 if (cpu_has_xsave) 226 if (cpu_has_xsave)
228 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 227 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
@@ -235,9 +234,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
235 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 234 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
236 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); 235 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
237 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); 236 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
238 if (sizeof(*set) == 16) { 237 if (sizeof(*set) == 16) {
239 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 238 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
240 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 239 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
241 } else 240 } else
242 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 241 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
243 242
@@ -248,7 +247,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
248 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 247 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
249 } else { 248 } else {
250 /* could use a vstub here */ 249 /* could use a vstub here */
251 goto give_sigsegv; 250 goto give_sigsegv;
252 } 251 }
253 252
254 if (err) 253 if (err)
@@ -256,7 +255,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
256 255
257 /* Set up registers for signal handler */ 256 /* Set up registers for signal handler */
258 regs->di = sig; 257 regs->di = sig;
259 /* In case the signal handler was declared without prototypes */ 258 /* In case the signal handler was declared without prototypes */
260 regs->ax = 0; 259 regs->ax = 0;
261 260
262 /* This also works for non SA_SIGINFO handlers because they expect the 261 /* This also works for non SA_SIGINFO handlers because they expect the
@@ -279,37 +278,8 @@ give_sigsegv:
279} 278}
280 279
281/* 280/*
282 * Return -1L or the syscall number that @regs is executing.
283 */
284static long current_syscall(struct pt_regs *regs)
285{
286 /*
287 * We always sign-extend a -1 value being set here,
288 * so this is always either -1L or a syscall number.
289 */
290 return regs->orig_ax;
291}
292
293/*
294 * Return a value that is -EFOO if the system call in @regs->orig_ax
295 * returned an error. This only works for @regs from @current.
296 */
297static long current_syscall_ret(struct pt_regs *regs)
298{
299#ifdef CONFIG_IA32_EMULATION
300 if (test_thread_flag(TIF_IA32))
301 /*
302 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
303 * and will match correctly in comparisons.
304 */
305 return (int) regs->ax;
306#endif
307 return regs->ax;
308}
309
310/*
311 * OK, we're invoking a handler 281 * OK, we're invoking a handler
312 */ 282 */
313 283
314static int 284static int
315handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 285handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
@@ -318,9 +288,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
318 int ret; 288 int ret;
319 289
320 /* Are we from a system call? */ 290 /* Are we from a system call? */
321 if (current_syscall(regs) >= 0) { 291 if (syscall_get_nr(current, regs) >= 0) {
322 /* If so, check system call restarting.. */ 292 /* If so, check system call restarting.. */
323 switch (current_syscall_ret(regs)) { 293 switch (syscall_get_error(current, regs)) {
324 case -ERESTART_RESTARTBLOCK: 294 case -ERESTART_RESTARTBLOCK:
325 case -ERESTARTNOHAND: 295 case -ERESTARTNOHAND:
326 regs->ax = -EINTR; 296 regs->ax = -EINTR;
@@ -353,7 +323,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
353 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); 323 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
354 else 324 else
355 ret = ia32_setup_frame(sig, ka, oldset, regs); 325 ret = ia32_setup_frame(sig, ka, oldset, regs);
356 } else 326 } else
357#endif 327#endif
358 ret = setup_rt_frame(sig, ka, info, oldset, regs); 328 ret = setup_rt_frame(sig, ka, info, oldset, regs);
359 329
@@ -377,15 +347,16 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
377 * handler too. 347 * handler too.
378 */ 348 */
379 regs->flags &= ~X86_EFLAGS_TF; 349 regs->flags &= ~X86_EFLAGS_TF;
380 if (test_thread_flag(TIF_SINGLESTEP))
381 ptrace_notify(SIGTRAP);
382 350
383 spin_lock_irq(&current->sighand->siglock); 351 spin_lock_irq(&current->sighand->siglock);
384 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 352 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
385 if (!(ka->sa.sa_flags & SA_NODEFER)) 353 if (!(ka->sa.sa_flags & SA_NODEFER))
386 sigaddset(&current->blocked,sig); 354 sigaddset(&current->blocked, sig);
387 recalc_sigpending(); 355 recalc_sigpending();
388 spin_unlock_irq(&current->sighand->siglock); 356 spin_unlock_irq(&current->sighand->siglock);
357
358 tracehook_signal_handler(sig, info, ka, regs,
359 test_thread_flag(TIF_SINGLESTEP));
389 } 360 }
390 361
391 return ret; 362 return ret;
@@ -442,9 +413,9 @@ static void do_signal(struct pt_regs *regs)
442 } 413 }
443 414
444 /* Did we come from a system call? */ 415 /* Did we come from a system call? */
445 if (current_syscall(regs) >= 0) { 416 if (syscall_get_nr(current, regs) >= 0) {
446 /* Restart the system call - no handlers present */ 417 /* Restart the system call - no handlers present */
447 switch (current_syscall_ret(regs)) { 418 switch (syscall_get_error(current, regs)) {
448 case -ERESTARTNOHAND: 419 case -ERESTARTNOHAND:
449 case -ERESTARTSYS: 420 case -ERESTARTSYS:
450 case -ERESTARTNOINTR: 421 case -ERESTARTNOINTR:
@@ -482,17 +453,23 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
482 /* deal with pending signal delivery */ 453 /* deal with pending signal delivery */
483 if (thread_info_flags & _TIF_SIGPENDING) 454 if (thread_info_flags & _TIF_SIGPENDING)
484 do_signal(regs); 455 do_signal(regs);
456
457 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
458 clear_thread_flag(TIF_NOTIFY_RESUME);
459 tracehook_notify_resume(regs);
460 }
485} 461}
486 462
487void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 463void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
488{ 464{
489 struct task_struct *me = current; 465 struct task_struct *me = current;
490 if (show_unhandled_signals && printk_ratelimit()) { 466 if (show_unhandled_signals && printk_ratelimit()) {
491 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", 467 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
492 me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); 468 me->comm, me->pid, where, frame, regs->ip,
469 regs->sp, regs->orig_ax);
493 print_vma_addr(" in ", regs->ip); 470 print_vma_addr(" in ", regs->ip);
494 printk("\n"); 471 printk("\n");
495 } 472 }
496 473
497 force_sig(SIGSEGV, me); 474 force_sig(SIGSEGV, me);
498} 475}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aa804c64b167..2ff0bbcd5bd1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1313,16 +1313,13 @@ __init void prefill_possible_map(void)
1313 if (!num_processors) 1313 if (!num_processors)
1314 num_processors = 1; 1314 num_processors = 1;
1315 1315
1316#ifdef CONFIG_HOTPLUG_CPU
1317 if (additional_cpus == -1) { 1316 if (additional_cpus == -1) {
1318 if (disabled_cpus > 0) 1317 if (disabled_cpus > 0)
1319 additional_cpus = disabled_cpus; 1318 additional_cpus = disabled_cpus;
1320 else 1319 else
1321 additional_cpus = 0; 1320 additional_cpus = 0;
1322 } 1321 }
1323#else 1322
1324 additional_cpus = 0;
1325#endif
1326 possible = num_processors + additional_cpus; 1323 possible = num_processors + additional_cpus;
1327 if (possible > NR_CPUS) 1324 if (possible > NR_CPUS)
1328 possible = NR_CPUS; 1325 possible = NR_CPUS;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index c9288c883e20..6bc211accf08 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -13,16 +13,17 @@
13#include <linux/utsname.h> 13#include <linux/utsname.h>
14#include <linux/personality.h> 14#include <linux/personality.h>
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/uaccess.h>
18#include <asm/ia32.h> 18#include <asm/ia32.h>
19#include <asm/syscalls.h> 19#include <asm/syscalls.h>
20 20
21asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, 21asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
22 unsigned long fd, unsigned long off) 22 unsigned long prot, unsigned long flags,
23 unsigned long fd, unsigned long off)
23{ 24{
24 long error; 25 long error;
25 struct file * file; 26 struct file *file;
26 27
27 error = -EINVAL; 28 error = -EINVAL;
28 if (off & ~PAGE_MASK) 29 if (off & ~PAGE_MASK)
@@ -57,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
57 unmapped base down for this case. This can give 58 unmapped base down for this case. This can give
58 conflicts with the heap, but we assume that glibc 59 conflicts with the heap, but we assume that glibc
59 malloc knows how to fall back to mmap. Give it 1GB 60 malloc knows how to fall back to mmap. Give it 1GB
60 of playground for now. -AK */ 61 of playground for now. -AK */
61 *begin = 0x40000000; 62 *begin = 0x40000000;
62 *end = 0x80000000; 63 *end = 0x80000000;
63 if (current->flags & PF_RANDOMIZE) { 64 if (current->flags & PF_RANDOMIZE) {
64 new_begin = randomize_range(*begin, *begin + 0x02000000, 0); 65 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
65 if (new_begin) 66 if (new_begin)
@@ -67,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
67 } 68 }
68 } else { 69 } else {
69 *begin = TASK_UNMAPPED_BASE; 70 *begin = TASK_UNMAPPED_BASE;
70 *end = TASK_SIZE; 71 *end = TASK_SIZE;
71 } 72 }
72} 73}
73 74
74unsigned long 75unsigned long
75arch_get_unmapped_area(struct file *filp, unsigned long addr, 76arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -79,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 struct vm_area_struct *vma; 80 struct vm_area_struct *vma;
80 unsigned long start_addr; 81 unsigned long start_addr;
81 unsigned long begin, end; 82 unsigned long begin, end;
82 83
83 if (flags & MAP_FIXED) 84 if (flags & MAP_FIXED)
84 return addr; 85 return addr;
85 86
86 find_start_end(flags, &begin, &end); 87 find_start_end(flags, &begin, &end);
87 88
88 if (len > end) 89 if (len > end)
89 return -ENOMEM; 90 return -ENOMEM;
@@ -97,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97 } 98 }
98 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 99 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
99 && len <= mm->cached_hole_size) { 100 && len <= mm->cached_hole_size) {
100 mm->cached_hole_size = 0; 101 mm->cached_hole_size = 0;
101 mm->free_area_cache = begin; 102 mm->free_area_cache = begin;
102 } 103 }
103 addr = mm->free_area_cache; 104 addr = mm->free_area_cache;
104 if (addr < begin) 105 if (addr < begin)
105 addr = begin; 106 addr = begin;
106 start_addr = addr; 107 start_addr = addr;
107 108
108full_search: 109full_search:
@@ -128,7 +129,7 @@ full_search:
128 return addr; 129 return addr;
129 } 130 }
130 if (addr + mm->cached_hole_size < vma->vm_start) 131 if (addr + mm->cached_hole_size < vma->vm_start)
131 mm->cached_hole_size = vma->vm_start - addr; 132 mm->cached_hole_size = vma->vm_start - addr;
132 133
133 addr = vma->vm_end; 134 addr = vma->vm_end;
134 } 135 }
@@ -178,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
178 vma = find_vma(mm, addr-len); 179 vma = find_vma(mm, addr-len);
179 if (!vma || addr <= vma->vm_start) 180 if (!vma || addr <= vma->vm_start)
180 /* remember the address as a hint for next time */ 181 /* remember the address as a hint for next time */
181 return (mm->free_area_cache = addr-len); 182 return mm->free_area_cache = addr-len;
182 } 183 }
183 184
184 if (mm->mmap_base < len) 185 if (mm->mmap_base < len)
@@ -195,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
195 vma = find_vma(mm, addr); 196 vma = find_vma(mm, addr);
196 if (!vma || addr+len <= vma->vm_start) 197 if (!vma || addr+len <= vma->vm_start)
197 /* remember the address as a hint for next time */ 198 /* remember the address as a hint for next time */
198 return (mm->free_area_cache = addr); 199 return mm->free_area_cache = addr;
199 200
200 /* remember the largest hole we saw so far */ 201 /* remember the largest hole we saw so far */
201 if (addr + mm->cached_hole_size < vma->vm_start) 202 if (addr + mm->cached_hole_size < vma->vm_start)
@@ -225,13 +226,13 @@ bottomup:
225} 226}
226 227
227 228
228asmlinkage long sys_uname(struct new_utsname __user * name) 229asmlinkage long sys_uname(struct new_utsname __user *name)
229{ 230{
230 int err; 231 int err;
231 down_read(&uts_sem); 232 down_read(&uts_sem);
232 err = copy_to_user(name, utsname(), sizeof (*name)); 233 err = copy_to_user(name, utsname(), sizeof(*name));
233 up_read(&uts_sem); 234 up_read(&uts_sem);
234 if (personality(current->personality) == PER_LINUX32) 235 if (personality(current->personality) == PER_LINUX32)
235 err |= copy_to_user(&name->machine, "i686", 5); 236 err |= copy_to_user(&name->machine, "i686", 5);
236 return err ? -EFAULT : 0; 237 return err ? -EFAULT : 0;
237} 238}
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index b42068fb7b76..2887a789e38f 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -32,6 +32,8 @@
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/nmi.h> 33#include <linux/nmi.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/smp.h>
36#include <linux/io.h>
35 37
36#if defined(CONFIG_EDAC) 38#if defined(CONFIG_EDAC)
37#include <linux/edac.h> 39#include <linux/edac.h>
@@ -45,9 +47,6 @@
45#include <asm/unwind.h> 47#include <asm/unwind.h>
46#include <asm/desc.h> 48#include <asm/desc.h>
47#include <asm/i387.h> 49#include <asm/i387.h>
48#include <asm/nmi.h>
49#include <asm/smp.h>
50#include <asm/io.h>
51#include <asm/pgalloc.h> 50#include <asm/pgalloc.h>
52#include <asm/proto.h> 51#include <asm/proto.h>
53#include <asm/pda.h> 52#include <asm/pda.h>
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
85 84
86void printk_address(unsigned long address, int reliable) 85void printk_address(unsigned long address, int reliable)
87{ 86{
88 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); 87 printk(" [<%016lx>] %s%pS\n",
88 address, reliable ? "" : "? ", (void *) address);
89} 89}
90 90
91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
98 [STACKFAULT_STACK - 1] = "#SS", 98 [STACKFAULT_STACK - 1] = "#SS",
99 [MCE_STACK - 1] = "#MC", 99 [MCE_STACK - 1] = "#MC",
100#if DEBUG_STKSZ > EXCEPTION_STKSZ 100#if DEBUG_STKSZ > EXCEPTION_STKSZ
101 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" 101 [N_EXCEPTION_STACKS ...
102 N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
102#endif 103#endif
103 }; 104 };
104 unsigned k; 105 unsigned k;
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
163} 164}
164 165
165/* 166/*
166 * x86-64 can have up to three kernel stacks: 167 * x86-64 can have up to three kernel stacks:
167 * process stack 168 * process stack
168 * interrupt stack 169 * interrupt stack
169 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 170 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
219 const struct stacktrace_ops *ops, void *data) 220 const struct stacktrace_ops *ops, void *data)
220{ 221{
221 const unsigned cpu = get_cpu(); 222 const unsigned cpu = get_cpu();
222 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; 223 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
223 unsigned used = 0; 224 unsigned used = 0;
224 struct thread_info *tinfo; 225 struct thread_info *tinfo;
225 226
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
237 if (!bp) { 238 if (!bp) {
238 if (task == current) { 239 if (task == current) {
239 /* Grab bp right from our regs */ 240 /* Grab bp right from our regs */
240 asm("movq %%rbp, %0" : "=r" (bp) :); 241 asm("movq %%rbp, %0" : "=r" (bp) : );
241 } else { 242 } else {
242 /* bp is the last reg pushed by switch_to */ 243 /* bp is the last reg pushed by switch_to */
243 bp = *(unsigned long *) task->thread.sp; 244 bp = *(unsigned long *) task->thread.sp;
@@ -356,11 +357,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
356 unsigned long *stack; 357 unsigned long *stack;
357 int i; 358 int i;
358 const int cpu = smp_processor_id(); 359 const int cpu = smp_processor_id();
359 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); 360 unsigned long *irqstack_end =
360 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 361 (unsigned long *) (cpu_pda(cpu)->irqstackptr);
362 unsigned long *irqstack =
363 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
361 364
362 // debugging aid: "show_stack(NULL, NULL);" prints the 365 /*
363 // back trace for this cpu. 366 * debugging aid: "show_stack(NULL, NULL);" prints the
367 * back trace for this cpu.
368 */
364 369
365 if (sp == NULL) { 370 if (sp == NULL) {
366 if (task) 371 if (task)
@@ -404,7 +409,7 @@ void dump_stack(void)
404 409
405#ifdef CONFIG_FRAME_POINTER 410#ifdef CONFIG_FRAME_POINTER
406 if (!bp) 411 if (!bp)
407 asm("movq %%rbp, %0" : "=r" (bp):); 412 asm("movq %%rbp, %0" : "=r" (bp) : );
408#endif 413#endif
409 414
410 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 415 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
@@ -414,7 +419,6 @@ void dump_stack(void)
414 init_utsname()->version); 419 init_utsname()->version);
415 show_trace(NULL, NULL, &stack, bp); 420 show_trace(NULL, NULL, &stack, bp);
416} 421}
417
418EXPORT_SYMBOL(dump_stack); 422EXPORT_SYMBOL(dump_stack);
419 423
420void show_registers(struct pt_regs *regs) 424void show_registers(struct pt_regs *regs)
@@ -492,7 +496,7 @@ unsigned __kprobes long oops_begin(void)
492 raw_local_irq_save(flags); 496 raw_local_irq_save(flags);
493 cpu = smp_processor_id(); 497 cpu = smp_processor_id();
494 if (!__raw_spin_trylock(&die_lock)) { 498 if (!__raw_spin_trylock(&die_lock)) {
495 if (cpu == die_owner) 499 if (cpu == die_owner)
496 /* nested oops. should stop eventually */; 500 /* nested oops. should stop eventually */;
497 else 501 else
498 __raw_spin_lock(&die_lock); 502 __raw_spin_lock(&die_lock);
@@ -637,7 +641,7 @@ kernel_trap:
637} 641}
638 642
639#define DO_ERROR(trapnr, signr, str, name) \ 643#define DO_ERROR(trapnr, signr, str, name) \
640asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 644asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
641{ \ 645{ \
642 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 646 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
643 == NOTIFY_STOP) \ 647 == NOTIFY_STOP) \
@@ -647,7 +651,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
647} 651}
648 652
649#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 653#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
650asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 654asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
651{ \ 655{ \
652 siginfo_t info; \ 656 siginfo_t info; \
653 info.si_signo = signr; \ 657 info.si_signo = signr; \
@@ -682,7 +686,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
682 preempt_conditional_cli(regs); 686 preempt_conditional_cli(regs);
683} 687}
684 688
685asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) 689asmlinkage void do_double_fault(struct pt_regs *regs, long error_code)
686{ 690{
687 static const char str[] = "double fault"; 691 static const char str[] = "double fault";
688 struct task_struct *tsk = current; 692 struct task_struct *tsk = current;
@@ -777,9 +781,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
777} 781}
778 782
779static notrace __kprobes void 783static notrace __kprobes void
780unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 784unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
781{ 785{
782 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 786 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
787 NOTIFY_STOP)
783 return; 788 return;
784 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", 789 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
785 reason); 790 reason);
@@ -881,7 +886,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
881 else if (user_mode(eregs)) 886 else if (user_mode(eregs))
882 regs = task_pt_regs(current); 887 regs = task_pt_regs(current);
883 /* Exception from kernel and interrupts are enabled. Move to 888 /* Exception from kernel and interrupts are enabled. Move to
884 kernel process stack. */ 889 kernel process stack. */
885 else if (eregs->flags & X86_EFLAGS_IF) 890 else if (eregs->flags & X86_EFLAGS_IF)
886 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 891 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
887 if (eregs != regs) 892 if (eregs != regs)
@@ -890,7 +895,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
890} 895}
891 896
892/* runs on IST stack. */ 897/* runs on IST stack. */
893asmlinkage void __kprobes do_debug(struct pt_regs * regs, 898asmlinkage void __kprobes do_debug(struct pt_regs *regs,
894 unsigned long error_code) 899 unsigned long error_code)
895{ 900{
896 struct task_struct *tsk = current; 901 struct task_struct *tsk = current;
@@ -1034,7 +1039,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
1034 1039
1035asmlinkage void bad_intr(void) 1040asmlinkage void bad_intr(void)
1036{ 1041{
1037 printk("bad interrupt"); 1042 printk("bad interrupt");
1038} 1043}
1039 1044
1040asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) 1045asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
@@ -1046,7 +1051,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1046 1051
1047 conditional_sti(regs); 1052 conditional_sti(regs);
1048 if (!user_mode(regs) && 1053 if (!user_mode(regs) &&
1049 kernel_math_error(regs, "kernel simd math error", 19)) 1054 kernel_math_error(regs, "kernel simd math error", 19))
1050 return; 1055 return;
1051 1056
1052 /* 1057 /*
@@ -1091,7 +1096,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1091 force_sig_info(SIGFPE, &info, task); 1096 force_sig_info(SIGFPE, &info, task);
1092} 1097}
1093 1098
1094asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) 1099asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs)
1095{ 1100{
1096} 1101}
1097 1102
@@ -1148,8 +1153,10 @@ void __init trap_init(void)
1148 set_intr_gate(0, &divide_error); 1153 set_intr_gate(0, &divide_error);
1149 set_intr_gate_ist(1, &debug, DEBUG_STACK); 1154 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1150 set_intr_gate_ist(2, &nmi, NMI_STACK); 1155 set_intr_gate_ist(2, &nmi, NMI_STACK);
1151 set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ 1156 /* int3 can be called from all */
1152 set_system_gate(4, &overflow); /* int4 can be called from all */ 1157 set_system_gate_ist(3, &int3, DEBUG_STACK);
1158 /* int4 can be called from all */
1159 set_system_gate(4, &overflow);
1153 set_intr_gate(5, &bounds); 1160 set_intr_gate(5, &bounds);
1154 set_intr_gate(6, &invalid_op); 1161 set_intr_gate(6, &invalid_op);
1155 set_intr_gate(7, &device_not_available); 1162 set_intr_gate(7, &device_not_available);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8f98e9de1b82..161bb850fc47 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *p, int hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
@@ -112,9 +112,9 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
112 for (i = 0; i < MAX_RETRIES; i++) { 112 for (i = 0; i < MAX_RETRIES; i++) {
113 t1 = get_cycles(); 113 t1 = get_cycles();
114 if (hpet) 114 if (hpet)
115 *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 115 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
116 else 116 else
117 *pm = acpi_pm_read_early(); 117 *p = acpi_pm_read_early();
118 t2 = get_cycles(); 118 t2 = get_cycles();
119 if ((t2 - t1) < SMI_TRESHOLD) 119 if ((t2 - t1) < SMI_TRESHOLD)
120 return t2; 120 return t2;
@@ -123,13 +123,59 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
123} 123}
124 124
125/* 125/*
126 * Calculate the TSC frequency from HPET reference
127 */
128static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
129{
130 u64 tmp;
131
132 if (hpet2 < hpet1)
133 hpet2 += 0x100000000ULL;
134 hpet2 -= hpet1;
135 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
136 do_div(tmp, 1000000);
137 do_div(deltatsc, tmp);
138
139 return (unsigned long) deltatsc;
140}
141
142/*
143 * Calculate the TSC frequency from PMTimer reference
144 */
145static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
146{
147 u64 tmp;
148
149 if (!pm1 && !pm2)
150 return ULONG_MAX;
151
152 if (pm2 < pm1)
153 pm2 += (u64)ACPI_PM_OVRRUN;
154 pm2 -= pm1;
155 tmp = pm2 * 1000000000LL;
156 do_div(tmp, PMTMR_TICKS_PER_SEC);
157 do_div(deltatsc, tmp);
158
159 return (unsigned long) deltatsc;
160}
161
162#define CAL_MS 10
163#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
164#define CAL_PIT_LOOPS 1000
165
166#define CAL2_MS 50
167#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS))
168#define CAL2_PIT_LOOPS 5000
169
170
171/*
126 * Try to calibrate the TSC against the Programmable 172 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC 173 * Interrupt Timer and return the frequency of the TSC
128 * in kHz. 174 * in kHz.
129 * 175 *
130 * Return ULONG_MAX on failure to calibrate. 176 * Return ULONG_MAX on failure to calibrate.
131 */ 177 */
132static unsigned long pit_calibrate_tsc(void) 178static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
133{ 179{
134 u64 tsc, t1, t2, delta; 180 u64 tsc, t1, t2, delta;
135 unsigned long tscmin, tscmax; 181 unsigned long tscmin, tscmax;
@@ -144,8 +190,8 @@ static unsigned long pit_calibrate_tsc(void)
144 * (LSB then MSB) to begin countdown. 190 * (LSB then MSB) to begin countdown.
145 */ 191 */
146 outb(0xb0, 0x43); 192 outb(0xb0, 0x43);
147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 193 outb(latch & 0xff, 0x42);
148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 194 outb(latch >> 8, 0x42);
149 195
150 tsc = t1 = t2 = get_cycles(); 196 tsc = t1 = t2 = get_cycles();
151 197
@@ -166,31 +212,154 @@ static unsigned long pit_calibrate_tsc(void)
166 /* 212 /*
167 * Sanity checks: 213 * Sanity checks:
168 * 214 *
169 * If we were not able to read the PIT more than 5000 215 * If we were not able to read the PIT more than loopmin
170 * times, then we have been hit by a massive SMI 216 * times, then we have been hit by a massive SMI
171 * 217 *
172 * If the maximum is 10 times larger than the minimum, 218 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well. 219 * then we got hit by an SMI as well.
174 */ 220 */
175 if (pitcnt < 5000 || tscmax > 10 * tscmin) 221 if (pitcnt < loopmin || tscmax > 10 * tscmin)
176 return ULONG_MAX; 222 return ULONG_MAX;
177 223
178 /* Calculate the PIT value */ 224 /* Calculate the PIT value */
179 delta = t2 - t1; 225 delta = t2 - t1;
180 do_div(delta, 50); 226 do_div(delta, ms);
181 return delta; 227 return delta;
182} 228}
183 229
230/*
231 * This reads the current MSB of the PIT counter, and
232 * checks if we are running on sufficiently fast and
233 * non-virtualized hardware.
234 *
235 * Our expectations are:
236 *
237 * - the PIT is running at roughly 1.19MHz
238 *
239 * - each IO is going to take about 1us on real hardware,
240 * but we allow it to be much faster (by a factor of 10) or
241 * _slightly_ slower (ie we allow up to a 2us read+counter
242 * update - anything else implies a unacceptably slow CPU
243 * or PIT for the fast calibration to work.
244 *
245 * - with 256 PIT ticks to read the value, we have 214us to
246 * see the same MSB (and overhead like doing a single TSC
247 * read per MSB value etc).
248 *
249 * - We're doing 2 reads per loop (LSB, MSB), and we expect
250 * them each to take about a microsecond on real hardware.
251 * So we expect a count value of around 100. But we'll be
252 * generous, and accept anything over 50.
253 *
254 * - if the PIT is stuck, and we see *many* more reads, we
255 * return early (and the next caller of pit_expect_msb()
256 * then consider it a failure when they don't see the
257 * next expected value).
258 *
259 * These expectations mean that we know that we have seen the
260 * transition from one expected value to another with a fairly
261 * high accuracy, and we didn't miss any events. We can thus
262 * use the TSC value at the transitions to calculate a pretty
263 * good value for the TSC frequencty.
264 */
265static inline int pit_expect_msb(unsigned char val)
266{
267 int count = 0;
268
269 for (count = 0; count < 50000; count++) {
270 /* Ignore LSB */
271 inb(0x42);
272 if (inb(0x42) != val)
273 break;
274 }
275 return count > 50;
276}
277
278/*
279 * How many MSB values do we want to see? We aim for a
280 * 15ms calibration, which assuming a 2us counter read
281 * error should give us roughly 150 ppm precision for
282 * the calibration.
283 */
284#define QUICK_PIT_MS 15
285#define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
286
287static unsigned long quick_pit_calibrate(void)
288{
289 /* Set the Gate high, disable speaker */
290 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
291
292 /*
293 * Counter 2, mode 0 (one-shot), binary count
294 *
295 * NOTE! Mode 2 decrements by two (and then the
296 * output is flipped each time, giving the same
297 * final output frequency as a decrement-by-one),
298 * so mode 0 is much better when looking at the
299 * individual counts.
300 */
301 outb(0xb0, 0x43);
302
303 /* Start at 0xffff */
304 outb(0xff, 0x42);
305 outb(0xff, 0x42);
306
307 if (pit_expect_msb(0xff)) {
308 int i;
309 u64 t1, t2, delta;
310 unsigned char expect = 0xfe;
311
312 t1 = get_cycles();
313 for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) {
314 if (!pit_expect_msb(expect))
315 goto failed;
316 }
317 t2 = get_cycles();
318
319 /*
320 * Make sure we can rely on the second TSC timestamp:
321 */
322 if (!pit_expect_msb(expect))
323 goto failed;
324
325 /*
326 * Ok, if we get here, then we've seen the
327 * MSB of the PIT decrement QUICK_PIT_ITERATIONS
328 * times, and each MSB had many hits, so we never
329 * had any sudden jumps.
330 *
331 * As a result, we can depend on there not being
332 * any odd delays anywhere, and the TSC reads are
333 * reliable.
334 *
335 * kHz = ticks / time-in-seconds / 1000;
336 * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000
337 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000)
338 */
339 delta = (t2 - t1)*PIT_TICK_RATE;
340 do_div(delta, QUICK_PIT_ITERATIONS*256*1000);
341 printk("Fast TSC calibration using PIT\n");
342 return delta;
343 }
344failed:
345 return 0;
346}
184 347
185/** 348/**
186 * native_calibrate_tsc - calibrate the tsc on boot 349 * native_calibrate_tsc - calibrate the tsc on boot
187 */ 350 */
188unsigned long native_calibrate_tsc(void) 351unsigned long native_calibrate_tsc(void)
189{ 352{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2; 353 u64 tsc1, tsc2, delta, ref1, ref2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 354 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags; 355 unsigned long flags, latch, ms, fast_calibrate;
193 int hpet = is_hpet_enabled(), i; 356 int hpet = is_hpet_enabled(), i, loopmin;
357
358 local_irq_save(flags);
359 fast_calibrate = quick_pit_calibrate();
360 local_irq_restore(flags);
361 if (fast_calibrate)
362 return fast_calibrate;
194 363
195 /* 364 /*
196 * Run 5 calibration loops to get the lowest frequency value 365 * Run 5 calibration loops to get the lowest frequency value
@@ -216,7 +385,13 @@ unsigned long native_calibrate_tsc(void)
216 * calibration delay loop as we have to wait for a certain 385 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway. 386 * amount of time anyway.
218 */ 387 */
219 for (i = 0; i < 5; i++) { 388
389 /* Preset PIT loop values */
390 latch = CAL_LATCH;
391 ms = CAL_MS;
392 loopmin = CAL_PIT_LOOPS;
393
394 for (i = 0; i < 3; i++) {
220 unsigned long tsc_pit_khz; 395 unsigned long tsc_pit_khz;
221 396
222 /* 397 /*
@@ -226,16 +401,16 @@ unsigned long native_calibrate_tsc(void)
226 * read the end value. 401 * read the end value.
227 */ 402 */
228 local_irq_save(flags); 403 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); 404 tsc1 = tsc_read_refs(&ref1, hpet);
230 tsc_pit_khz = pit_calibrate_tsc(); 405 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 406 tsc2 = tsc_read_refs(&ref2, hpet);
232 local_irq_restore(flags); 407 local_irq_restore(flags);
233 408
234 /* Pick the lowest PIT TSC calibration so far */ 409 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 410 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236 411
237 /* hpet or pmtimer available ? */ 412 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2) 413 if (!hpet && !ref1 && !ref2)
239 continue; 414 continue;
240 415
241 /* Check, whether the sampling was disturbed by an SMI */ 416 /* Check, whether the sampling was disturbed by an SMI */
@@ -243,23 +418,41 @@ unsigned long native_calibrate_tsc(void)
243 continue; 418 continue;
244 419
245 tsc2 = (tsc2 - tsc1) * 1000000LL; 420 tsc2 = (tsc2 - tsc1) * 1000000LL;
421 if (hpet)
422 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
423 else
424 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
246 425
247 if (hpet) { 426 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
248 if (hpet2 < hpet1) 427
249 hpet2 += 0x100000000ULL; 428 /* Check the reference deviation */
250 hpet2 -= hpet1; 429 delta = ((u64) tsc_pit_min) * 100;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 430 do_div(delta, tsc_ref_min);
252 do_div(tsc1, 1000000); 431
253 } else { 432 /*
254 if (pm2 < pm1) 433 * If both calibration results are inside a 10% window
255 pm2 += (u64)ACPI_PM_OVRRUN; 434 * then we can be sure, that the calibration
256 pm2 -= pm1; 435 * succeeded. We break out of the loop right away. We
257 tsc1 = pm2 * 1000000000LL; 436 * use the reference value, as it is more precise.
258 do_div(tsc1, PMTMR_TICKS_PER_SEC); 437 */
438 if (delta >= 90 && delta <= 110) {
439 printk(KERN_INFO
440 "TSC: PIT calibration matches %s. %d loops\n",
441 hpet ? "HPET" : "PMTIMER", i + 1);
442 return tsc_ref_min;
259 } 443 }
260 444
261 do_div(tsc2, tsc1); 445 /*
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 446 * Check whether PIT failed more than once. This
447 * happens in virtualized environments. We need to
448 * give the virtual PC a slightly longer timeframe for
449 * the HPET/PMTIMER to make the result precise.
450 */
451 if (i == 1 && tsc_pit_min == ULONG_MAX) {
452 latch = CAL2_LATCH;
453 ms = CAL2_MS;
454 loopmin = CAL2_PIT_LOOPS;
455 }
263 } 456 }
264 457
265 /* 458 /*
@@ -270,7 +463,7 @@ unsigned long native_calibrate_tsc(void)
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); 463 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271 464
272 /* We don't have an alternative source, disable TSC */ 465 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) { 466 if (!hpet && !ref1 && !ref2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n"); 467 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0; 468 return 0;
276 } 469 }
@@ -278,7 +471,7 @@ unsigned long native_calibrate_tsc(void)
278 /* The alternative source failed as well, disable TSC */ 471 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) { 472 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " 473 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n"); 474 "failed.\n");
282 return 0; 475 return 0;
283 } 476 }
284 477
@@ -290,44 +483,25 @@ unsigned long native_calibrate_tsc(void)
290 } 483 }
291 484
292 /* We don't have an alternative source, use the PIT calibration value */ 485 /* We don't have an alternative source, use the PIT calibration value */
293 if (!hpet && !pm1 && !pm2) { 486 if (!hpet && !ref1 && !ref2) {
294 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 487 printk(KERN_INFO "TSC: Using PIT calibration value\n");
295 return tsc_pit_min; 488 return tsc_pit_min;
296 } 489 }
297 490
298 /* The alternative source failed, use the PIT calibration value */ 491 /* The alternative source failed, use the PIT calibration value */
299 if (tsc_ref_min == ULONG_MAX) { 492 if (tsc_ref_min == ULONG_MAX) {
300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due " 493 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
301 "to SMI disturbance. Using PIT calibration\n"); 494 "Using PIT calibration\n");
302 return tsc_pit_min; 495 return tsc_pit_min;
303 } 496 }
304 497
305 /* Check the reference deviation */
306 delta = ((u64) tsc_pit_min) * 100;
307 do_div(delta, tsc_ref_min);
308
309 /*
310 * If both calibration results are inside a 5% window, the we
311 * use the lower frequency of those as it is probably the
312 * closest estimate.
313 */
314 if (delta >= 95 && delta <= 105) {
315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
316 hpet ? "HPET" : "PMTIMER");
317 printk(KERN_INFO "TSC: using %s calibration value\n",
318 tsc_pit_min <= tsc_ref_min ? "PIT" :
319 hpet ? "HPET" : "PMTIMER");
320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
321 }
322
323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
325
326 /* 498 /*
327 * The calibration values differ too much. In doubt, we use 499 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around 500 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed. 501 * running at double speed. At least we let the user know:
330 */ 502 */
503 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
504 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
331 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 505 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min; 506 return tsc_pit_min;
333} 507}
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 594ef47f0a63..61a97e616f70 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -25,45 +25,31 @@
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h> 27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h>
28#include <asm/fixmap.h> 29#include <asm/fixmap.h>
29#include <asm/reboot.h> 30#include <asm/reboot.h>
30#include <asm/setup.h> 31#include <asm/setup.h>
31#include <asm/e820.h> 32#include <asm/e820.h>
32#include <asm/smp.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <mach_ipi.h>
36 36
37#include "mach_apic.h" 37#include "mach_apic.h"
38 38
39#include <linux/init.h>
40#include <linux/smp.h>
41
42#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
43#include <linux/interrupt.h>
44#include <linux/init.h>
45 40
46#include <asm/io.h>
47#include <asm/apic.h>
48#include <asm/i8259.h> 41#include <asm/i8259.h>
49#include <asm/irq_vectors.h> 42#include <asm/irq_vectors.h>
50#include <asm/visws/cobalt.h>
51#include <asm/visws/lithium.h> 43#include <asm/visws/lithium.h>
52#include <asm/visws/piix4.h>
53 44
54#include <linux/sched.h> 45#include <linux/sched.h>
55#include <linux/kernel.h> 46#include <linux/kernel.h>
56#include <linux/init.h>
57#include <linux/pci.h> 47#include <linux/pci.h>
58#include <linux/pci_ids.h> 48#include <linux/pci_ids.h>
59 49
60extern int no_broadcast; 50extern int no_broadcast;
61 51
62#include <asm/io.h>
63#include <asm/apic.h> 52#include <asm/apic.h>
64#include <asm/arch_hooks.h>
65#include <asm/visws/cobalt.h>
66#include <asm/visws/lithium.h>
67 53
68char visws_board_type = -1; 54char visws_board_type = -1;
69char visws_board_rev = -1; 55char visws_board_rev = -1;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 61531d5c9507..8b6c393ab9fd 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
235 const void *desc) 235 const void *desc)
236{ 236{
237 u32 *ldt_entry = (u32 *)desc; 237 u32 *ldt_entry = (u32 *)desc;
238 vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); 238 vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
239} 239}
240 240
241static void vmi_load_sp0(struct tss_struct *tss, 241static void vmi_load_sp0(struct tss_struct *tss,
@@ -393,13 +393,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
393} 393}
394#endif 394#endif
395 395
396static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn) 396static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
397{ 397{
398 vmi_set_page_type(pfn, VMI_PAGE_L1); 398 vmi_set_page_type(pfn, VMI_PAGE_L1);
399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
400} 400}
401 401
402static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) 402static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
403{ 403{
404 /* 404 /*
405 * This call comes in very early, before mem_map is setup. 405 * This call comes in very early, before mem_map is setup.
@@ -410,20 +410,20 @@ static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); 410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
411} 411}
412 412
413static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) 413static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
414{ 414{
415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); 415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
416 vmi_check_page_type(clonepfn, VMI_PAGE_L2); 416 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); 417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
418} 418}
419 419
420static void vmi_release_pte(u32 pfn) 420static void vmi_release_pte(unsigned long pfn)
421{ 421{
422 vmi_ops.release_page(pfn, VMI_PAGE_L1); 422 vmi_ops.release_page(pfn, VMI_PAGE_L1);
423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
424} 424}
425 425
426static void vmi_release_pmd(u32 pfn) 426static void vmi_release_pmd(unsigned long pfn)
427{ 427{
428 vmi_ops.release_page(pfn, VMI_PAGE_L2); 428 vmi_ops.release_page(pfn, VMI_PAGE_L2);
429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 0c029e8959c7..7766d36983fc 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -61,7 +61,7 @@ static void vsmp_irq_enable(void)
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 62}
63 63
64static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, 64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 65 unsigned long addr, unsigned len)
66{ 66{
67 switch (type) { 67 switch (type) {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0bfe2bd305eb..3da2508eb22a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -711,6 +711,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
711 u64 *spte; 711 u64 *spte;
712 int young = 0; 712 int young = 0;
713 713
714 /* always return old for EPT */
715 if (!shadow_accessed_mask)
716 return 0;
717
714 spte = rmap_next(kvm, rmapp, NULL); 718 spte = rmap_next(kvm, rmapp, NULL);
715 while (spte) { 719 while (spte) {
716 int _young; 720 int _young;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e2ee264740c7..8233b86c778c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -62,6 +62,7 @@ static int npt = 1;
62module_param(npt, int, S_IRUGO); 62module_param(npt, int, S_IRUGO);
63 63
64static void kvm_reput_irq(struct vcpu_svm *svm); 64static void kvm_reput_irq(struct vcpu_svm *svm);
65static void svm_flush_tlb(struct kvm_vcpu *vcpu);
65 66
66static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 67static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
67{ 68{
@@ -878,6 +879,10 @@ set:
878static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 879static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
879{ 880{
880 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; 881 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
882 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
883
884 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
885 force_new_asid(vcpu);
881 886
882 vcpu->arch.cr4 = cr4; 887 vcpu->arch.cr4 = cr4;
883 if (!npt_enabled) 888 if (!npt_enabled)
@@ -1027,6 +1032,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1027 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code, 1032 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1028 (u32)fault_address, (u32)(fault_address >> 32), 1033 (u32)fault_address, (u32)(fault_address >> 32),
1029 handler); 1034 handler);
1035 /*
1036 * FIXME: Tis shouldn't be necessary here, but there is a flush
1037 * missing in the MMU code. Until we find this bug, flush the
1038 * complete TLB here on an NPF
1039 */
1040 if (npt_enabled)
1041 svm_flush_tlb(&svm->vcpu);
1030 1042
1031 if (event_injection) 1043 if (event_injection)
1032 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); 1044 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2a69773e3b26..7041cc52b562 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3301,8 +3301,7 @@ static int __init vmx_init(void)
3301 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3301 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3302 VMX_EPT_WRITABLE_MASK | 3302 VMX_EPT_WRITABLE_MASK |
3303 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3303 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3304 kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, 3304 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3305 VMX_EPT_FAKE_DIRTY_MASK, 0ull,
3306 VMX_EPT_EXECUTABLE_MASK); 3305 VMX_EPT_EXECUTABLE_MASK);
3307 kvm_enable_tdp(); 3306 kvm_enable_tdp();
3308 } else 3307 } else
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index b32d4e5b123d..17e25995b65b 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -355,8 +355,6 @@ enum vmcs_field {
355#define VMX_EPT_READABLE_MASK 0x1ull 355#define VMX_EPT_READABLE_MASK 0x1ull
356#define VMX_EPT_WRITABLE_MASK 0x2ull 356#define VMX_EPT_WRITABLE_MASK 0x2ull
357#define VMX_EPT_EXECUTABLE_MASK 0x4ull 357#define VMX_EPT_EXECUTABLE_MASK 0x4ull
358#define VMX_EPT_FAKE_ACCESSED_MASK (1ull << 62)
359#define VMX_EPT_FAKE_DIRTY_MASK (1ull << 63)
360 358
361#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 359#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
362 360
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 01b868ba82f8..321cf720dbb6 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info)
16 rdmsr(rv->msr_no, rv->l, rv->h); 16 rdmsr(rv->msr_no, rv->l, rv->h);
17} 17}
18 18
19static void __rdmsr_safe_on_cpu(void *info) 19static void __wrmsr_on_cpu(void *info)
20{ 20{
21 struct msr_info *rv = info; 21 struct msr_info *rv = info;
22 22
23 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); 23 wrmsr(rv->msr_no, rv->l, rv->h);
24} 24}
25 25
26static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) 26int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
27{ 27{
28 int err = 0; 28 int err;
29 struct msr_info rv; 29 struct msr_info rv;
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
33 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
34 &rv, 1);
35 err = err ? err : rv.err;
36 } else {
37 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
38 }
39 *l = rv.l; 33 *l = rv.l;
40 *h = rv.h; 34 *h = rv.h;
41 35
42 return err; 36 return err;
43} 37}
44 38
45static void __wrmsr_on_cpu(void *info) 39int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
40{
41 int err;
42 struct msr_info rv;
43
44 rv.msr_no = msr_no;
45 rv.l = l;
46 rv.h = h;
47 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
48
49 return err;
50}
51
52/* These "safe" variants are slower and should be used when the target MSR
53 may not actually exist. */
54static void __rdmsr_safe_on_cpu(void *info)
46{ 55{
47 struct msr_info *rv = info; 56 struct msr_info *rv = info;
48 57
49 wrmsr(rv->msr_no, rv->l, rv->h); 58 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
50} 59}
51 60
52static void __wrmsr_safe_on_cpu(void *info) 61static void __wrmsr_safe_on_cpu(void *info)
@@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info)
56 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); 65 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
57} 66}
58 67
59static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) 68int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
60{ 69{
61 int err = 0; 70 int err;
62 struct msr_info rv; 71 struct msr_info rv;
63 72
64 rv.msr_no = msr_no; 73 rv.msr_no = msr_no;
65 rv.l = l; 74 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
66 rv.h = h; 75 *l = rv.l;
67 if (safe) { 76 *h = rv.h;
68 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
69 &rv, 1);
70 err = err ? err : rv.err;
71 } else {
72 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
73 }
74
75 return err;
76}
77 77
78int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78 return err ? err : rv.err;
79{
80 return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
81} 79}
82 80
83int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
84{
85 return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
86}
87
88/* These "safe" variants are slower and should be used when the target MSR
89 may not actually exist. */
90int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 81int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
91{ 82{
92 return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); 83 int err;
93} 84 struct msr_info rv;
94 85
95int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 86 rv.msr_no = msr_no;
96{ 87 rv.l = l;
97 return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); 88 rv.h = h;
89 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
90
91 return err ? err : rv.err;
98} 92}
99 93
100EXPORT_SYMBOL(rdmsr_on_cpu); 94EXPORT_SYMBOL(rdmsr_on_cpu);
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 94972e7c094d..82004d2bf05e 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src)
22 "testb %%al,%%al\n\t" 22 "testb %%al,%%al\n\t"
23 "jne 1b" 23 "jne 1b"
24 : "=&S" (d0), "=&D" (d1), "=&a" (d2) 24 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
25 :"0" (src), "1" (dest) : "memory"); 25 : "0" (src), "1" (dest) : "memory");
26 return dest; 26 return dest;
27} 27}
28EXPORT_SYMBOL(strcpy); 28EXPORT_SYMBOL(strcpy);
@@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count)
42 "stosb\n" 42 "stosb\n"
43 "2:" 43 "2:"
44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) 44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
45 :"0" (src), "1" (dest), "2" (count) : "memory"); 45 : "0" (src), "1" (dest), "2" (count) : "memory");
46 return dest; 46 return dest;
47} 47}
48EXPORT_SYMBOL(strncpy); 48EXPORT_SYMBOL(strncpy);
@@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src)
60 "testb %%al,%%al\n\t" 60 "testb %%al,%%al\n\t"
61 "jne 1b" 61 "jne 1b"
62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) 62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory"); 63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory");
64 return dest; 64 return dest;
65} 65}
66EXPORT_SYMBOL(strcat); 66EXPORT_SYMBOL(strcat);
@@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct)
105 "2:\tsbbl %%eax,%%eax\n\t" 105 "2:\tsbbl %%eax,%%eax\n\t"
106 "orb $1,%%al\n" 106 "orb $1,%%al\n"
107 "3:" 107 "3:"
108 :"=a" (res), "=&S" (d0), "=&D" (d1) 108 : "=a" (res), "=&S" (d0), "=&D" (d1)
109 :"1" (cs), "2" (ct) 109 : "1" (cs), "2" (ct)
110 :"memory"); 110 : "memory");
111 return res; 111 return res;
112} 112}
113EXPORT_SYMBOL(strcmp); 113EXPORT_SYMBOL(strcmp);
@@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count)
130 "3:\tsbbl %%eax,%%eax\n\t" 130 "3:\tsbbl %%eax,%%eax\n\t"
131 "orb $1,%%al\n" 131 "orb $1,%%al\n"
132 "4:" 132 "4:"
133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) 133 : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
134 :"1" (cs), "2" (ct), "3" (count) 134 : "1" (cs), "2" (ct), "3" (count)
135 :"memory"); 135 : "memory");
136 return res; 136 return res;
137} 137}
138EXPORT_SYMBOL(strncmp); 138EXPORT_SYMBOL(strncmp);
@@ -152,9 +152,9 @@ char *strchr(const char *s, int c)
152 "movl $1,%1\n" 152 "movl $1,%1\n"
153 "2:\tmovl %1,%0\n\t" 153 "2:\tmovl %1,%0\n\t"
154 "decl %0" 154 "decl %0"
155 :"=a" (res), "=&S" (d0) 155 : "=a" (res), "=&S" (d0)
156 :"1" (s), "0" (c) 156 : "1" (s), "0" (c)
157 :"memory"); 157 : "memory");
158 return res; 158 return res;
159} 159}
160EXPORT_SYMBOL(strchr); 160EXPORT_SYMBOL(strchr);
@@ -169,9 +169,9 @@ size_t strlen(const char *s)
169 "scasb\n\t" 169 "scasb\n\t"
170 "notl %0\n\t" 170 "notl %0\n\t"
171 "decl %0" 171 "decl %0"
172 :"=c" (res), "=&D" (d0) 172 : "=c" (res), "=&D" (d0)
173 :"1" (s), "a" (0), "0" (0xffffffffu) 173 : "1" (s), "a" (0), "0" (0xffffffffu)
174 :"memory"); 174 : "memory");
175 return res; 175 return res;
176} 176}
177EXPORT_SYMBOL(strlen); 177EXPORT_SYMBOL(strlen);
@@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count)
189 "je 1f\n\t" 189 "je 1f\n\t"
190 "movl $1,%0\n" 190 "movl $1,%0\n"
191 "1:\tdecl %0" 191 "1:\tdecl %0"
192 :"=D" (res), "=&c" (d0) 192 : "=D" (res), "=&c" (d0)
193 :"a" (c), "0" (cs), "1" (count) 193 : "a" (c), "0" (cs), "1" (count)
194 :"memory"); 194 : "memory");
195 return res; 195 return res;
196} 196}
197EXPORT_SYMBOL(memchr); 197EXPORT_SYMBOL(memchr);
@@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count)
228 "cmpl $-1,%1\n\t" 228 "cmpl $-1,%1\n\t"
229 "jne 1b\n" 229 "jne 1b\n"
230 "3:\tsubl %2,%0" 230 "3:\tsubl %2,%0"
231 :"=a" (res), "=&d" (d0) 231 : "=a" (res), "=&d" (d0)
232 :"c" (s), "1" (count) 232 : "c" (s), "1" (count)
233 :"memory"); 233 : "memory");
234 return res; 234 return res;
235} 235}
236EXPORT_SYMBOL(strnlen); 236EXPORT_SYMBOL(strnlen);
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 42e8a50303f3..8e2d55f754bf 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -23,9 +23,9 @@ __asm__ __volatile__(
23 "jne 1b\n\t" 23 "jne 1b\n\t"
24 "xorl %%eax,%%eax\n\t" 24 "xorl %%eax,%%eax\n\t"
25 "2:" 25 "2:"
26 :"=a" (__res), "=&c" (d0), "=&S" (d1) 26 : "=a" (__res), "=&c" (d0), "=&S" (d1)
27 :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) 27 : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
28 :"dx", "di"); 28 : "dx", "di");
29return __res; 29return __res;
30} 30}
31 31
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 62fa440678d8..847c164725f4 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn,
328 328
329 get_memcfg_numa(); 329 get_memcfg_numa();
330 330
331 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); 331 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
332 332
333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
334 do { 334 do {
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a20d1fa64b4e..e7277cbcfb40 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
148 * we have now. "break" is either changing perms, levels or 148 * we have now. "break" is either changing perms, levels or
149 * address space marker. 149 * address space marker.
150 */ 150 */
151 prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); 151 prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
152 cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); 152 cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;
153 153
154 if (!st->level) { 154 if (!st->level) {
155 /* First entry */ 155 /* First entry */
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4974e97dedfe..c3789bb19308 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -195,11 +195,30 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
195 pgd_t *pgd; 195 pgd_t *pgd;
196 pmd_t *pmd; 196 pmd_t *pmd;
197 pte_t *pte; 197 pte_t *pte;
198 unsigned pages_2m = 0, pages_4k = 0; 198 unsigned pages_2m, pages_4k;
199 int mapping_iter;
200
201 /*
202 * First iteration will setup identity mapping using large/small pages
203 * based on use_pse, with other attributes same as set by
204 * the early code in head_32.S
205 *
206 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
207 * as desired for the kernel identity mapping.
208 *
209 * This two pass mechanism conforms to the TLB app note which says:
210 *
211 * "Software should not write to a paging-structure entry in a way
212 * that would change, for any linear address, both the page size
213 * and either the page frame or attributes."
214 */
215 mapping_iter = 1;
199 216
200 if (!cpu_has_pse) 217 if (!cpu_has_pse)
201 use_pse = 0; 218 use_pse = 0;
202 219
220repeat:
221 pages_2m = pages_4k = 0;
203 pfn = start_pfn; 222 pfn = start_pfn;
204 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 223 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
205 pgd = pgd_base + pgd_idx; 224 pgd = pgd_base + pgd_idx;
@@ -225,6 +244,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
225 if (use_pse) { 244 if (use_pse) {
226 unsigned int addr2; 245 unsigned int addr2;
227 pgprot_t prot = PAGE_KERNEL_LARGE; 246 pgprot_t prot = PAGE_KERNEL_LARGE;
247 /*
248 * first pass will use the same initial
249 * identity mapping attribute + _PAGE_PSE.
250 */
251 pgprot_t init_prot =
252 __pgprot(PTE_IDENT_ATTR |
253 _PAGE_PSE);
228 254
229 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 255 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
230 PAGE_OFFSET + PAGE_SIZE-1; 256 PAGE_OFFSET + PAGE_SIZE-1;
@@ -234,7 +260,10 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
234 prot = PAGE_KERNEL_LARGE_EXEC; 260 prot = PAGE_KERNEL_LARGE_EXEC;
235 261
236 pages_2m++; 262 pages_2m++;
237 set_pmd(pmd, pfn_pmd(pfn, prot)); 263 if (mapping_iter == 1)
264 set_pmd(pmd, pfn_pmd(pfn, init_prot));
265 else
266 set_pmd(pmd, pfn_pmd(pfn, prot));
238 267
239 pfn += PTRS_PER_PTE; 268 pfn += PTRS_PER_PTE;
240 continue; 269 continue;
@@ -246,17 +275,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
246 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; 275 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
247 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 276 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
248 pgprot_t prot = PAGE_KERNEL; 277 pgprot_t prot = PAGE_KERNEL;
278 /*
279 * first pass will use the same initial
280 * identity mapping attribute.
281 */
282 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
249 283
250 if (is_kernel_text(addr)) 284 if (is_kernel_text(addr))
251 prot = PAGE_KERNEL_EXEC; 285 prot = PAGE_KERNEL_EXEC;
252 286
253 pages_4k++; 287 pages_4k++;
254 set_pte(pte, pfn_pte(pfn, prot)); 288 if (mapping_iter == 1)
289 set_pte(pte, pfn_pte(pfn, init_prot));
290 else
291 set_pte(pte, pfn_pte(pfn, prot));
255 } 292 }
256 } 293 }
257 } 294 }
258 update_page_count(PG_LEVEL_2M, pages_2m); 295 if (mapping_iter == 1) {
259 update_page_count(PG_LEVEL_4K, pages_4k); 296 /*
297 * update direct mapping page count only in the first
298 * iteration.
299 */
300 update_page_count(PG_LEVEL_2M, pages_2m);
301 update_page_count(PG_LEVEL_4K, pages_4k);
302
303 /*
304 * local global flush tlb, which will flush the previous
305 * mappings present in both small and large page TLB's.
306 */
307 __flush_tlb_all();
308
309 /*
310 * Second iteration will set the actual desired PTE attributes.
311 */
312 mapping_iter = 2;
313 goto repeat;
314 }
260} 315}
261 316
262/* 317/*
@@ -459,11 +514,7 @@ static void __init pagetable_init(void)
459{ 514{
460 pgd_t *pgd_base = swapper_pg_dir; 515 pgd_t *pgd_base = swapper_pg_dir;
461 516
462 paravirt_pagetable_setup_start(pgd_base);
463
464 permanent_kmaps_init(pgd_base); 517 permanent_kmaps_init(pgd_base);
465
466 paravirt_pagetable_setup_done(pgd_base);
467} 518}
468 519
469#ifdef CONFIG_ACPI_SLEEP 520#ifdef CONFIG_ACPI_SLEEP
@@ -723,7 +774,7 @@ void __init setup_bootmem_allocator(void)
723 after_init_bootmem = 1; 774 after_init_bootmem = 1;
724} 775}
725 776
726static void __init find_early_table_space(unsigned long end) 777static void __init find_early_table_space(unsigned long end, int use_pse)
727{ 778{
728 unsigned long puds, pmds, ptes, tables, start; 779 unsigned long puds, pmds, ptes, tables, start;
729 780
@@ -733,7 +784,7 @@ static void __init find_early_table_space(unsigned long end)
733 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 784 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
734 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 785 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
735 786
736 if (cpu_has_pse) { 787 if (use_pse) {
737 unsigned long extra; 788 unsigned long extra;
738 789
739 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 790 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
@@ -773,12 +824,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
773 pgd_t *pgd_base = swapper_pg_dir; 824 pgd_t *pgd_base = swapper_pg_dir;
774 unsigned long start_pfn, end_pfn; 825 unsigned long start_pfn, end_pfn;
775 unsigned long big_page_start; 826 unsigned long big_page_start;
827#ifdef CONFIG_DEBUG_PAGEALLOC
828 /*
829 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
830 * This will simplify cpa(), which otherwise needs to support splitting
831 * large pages into small in interrupt context, etc.
832 */
833 int use_pse = 0;
834#else
835 int use_pse = cpu_has_pse;
836#endif
776 837
777 /* 838 /*
778 * Find space for the kernel direct mapping tables. 839 * Find space for the kernel direct mapping tables.
779 */ 840 */
780 if (!after_init_bootmem) 841 if (!after_init_bootmem)
781 find_early_table_space(end); 842 find_early_table_space(end, use_pse);
782 843
783#ifdef CONFIG_X86_PAE 844#ifdef CONFIG_X86_PAE
784 set_nx(); 845 set_nx();
@@ -824,7 +885,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
824 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 885 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
825 if (start_pfn < end_pfn) 886 if (start_pfn < end_pfn)
826 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 887 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
827 cpu_has_pse); 888 use_pse);
828 889
829 /* tail is not big page alignment ? */ 890 /* tail is not big page alignment ? */
830 start_pfn = end_pfn; 891 start_pfn = end_pfn;
@@ -987,7 +1048,6 @@ void __init mem_init(void)
987 if (boot_cpu_data.wp_works_ok < 0) 1048 if (boot_cpu_data.wp_works_ok < 0)
988 test_wp_bit(); 1049 test_wp_bit();
989 1050
990 cpa_init();
991 save_pg_dir(); 1051 save_pg_dir();
992 zap_low_mappings(); 1052 zap_low_mappings();
993} 1053}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 1f6806b62eb8..83e13f2d53d2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -281,7 +281,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
281void __init cleanup_highmap(void) 281void __init cleanup_highmap(void)
282{ 282{
283 unsigned long vaddr = __START_KERNEL_map; 283 unsigned long vaddr = __START_KERNEL_map;
284 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; 284 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
285 pmd_t *pmd = level2_kernel_pgt; 285 pmd_t *pmd = level2_kernel_pgt;
286 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 286 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
287 287
@@ -327,7 +327,8 @@ static __ref void unmap_low_page(void *adr)
327} 327}
328 328
329static unsigned long __meminit 329static unsigned long __meminit
330phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end) 330phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
331 pgprot_t prot)
331{ 332{
332 unsigned pages = 0; 333 unsigned pages = 0;
333 unsigned long last_map_addr = end; 334 unsigned long last_map_addr = end;
@@ -345,36 +346,43 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
345 break; 346 break;
346 } 347 }
347 348
349 /*
350 * We will re-use the existing mapping.
351 * Xen for example has some special requirements, like mapping
352 * pagetable pages as RO. So assume someone who pre-setup
353 * these mappings are more intelligent.
354 */
348 if (pte_val(*pte)) 355 if (pte_val(*pte))
349 continue; 356 continue;
350 357
351 if (0) 358 if (0)
352 printk(" pte=%p addr=%lx pte=%016lx\n", 359 printk(" pte=%p addr=%lx pte=%016lx\n",
353 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); 360 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
354 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
355 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
356 pages++; 361 pages++;
362 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
363 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
357 } 364 }
365
358 update_page_count(PG_LEVEL_4K, pages); 366 update_page_count(PG_LEVEL_4K, pages);
359 367
360 return last_map_addr; 368 return last_map_addr;
361} 369}
362 370
363static unsigned long __meminit 371static unsigned long __meminit
364phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end) 372phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
373 pgprot_t prot)
365{ 374{
366 pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); 375 pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
367 376
368 return phys_pte_init(pte, address, end); 377 return phys_pte_init(pte, address, end, prot);
369} 378}
370 379
371static unsigned long __meminit 380static unsigned long __meminit
372phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, 381phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
373 unsigned long page_size_mask) 382 unsigned long page_size_mask, pgprot_t prot)
374{ 383{
375 unsigned long pages = 0; 384 unsigned long pages = 0;
376 unsigned long last_map_addr = end; 385 unsigned long last_map_addr = end;
377 unsigned long start = address;
378 386
379 int i = pmd_index(address); 387 int i = pmd_index(address);
380 388
@@ -382,6 +390,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
382 unsigned long pte_phys; 390 unsigned long pte_phys;
383 pmd_t *pmd = pmd_page + pmd_index(address); 391 pmd_t *pmd = pmd_page + pmd_index(address);
384 pte_t *pte; 392 pte_t *pte;
393 pgprot_t new_prot = prot;
385 394
386 if (address >= end) { 395 if (address >= end) {
387 if (!after_bootmem) { 396 if (!after_bootmem) {
@@ -395,27 +404,40 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
395 if (!pmd_large(*pmd)) { 404 if (!pmd_large(*pmd)) {
396 spin_lock(&init_mm.page_table_lock); 405 spin_lock(&init_mm.page_table_lock);
397 last_map_addr = phys_pte_update(pmd, address, 406 last_map_addr = phys_pte_update(pmd, address,
398 end); 407 end, prot);
399 spin_unlock(&init_mm.page_table_lock); 408 spin_unlock(&init_mm.page_table_lock);
409 continue;
400 } 410 }
401 /* Count entries we're using from level2_ident_pgt */ 411 /*
402 if (start == 0) 412 * If we are ok with PG_LEVEL_2M mapping, then we will
403 pages++; 413 * use the existing mapping,
404 continue; 414 *
415 * Otherwise, we will split the large page mapping but
416 * use the same existing protection bits except for
417 * large page, so that we don't violate Intel's TLB
418 * Application note (317080) which says, while changing
419 * the page sizes, new and old translations should
420 * not differ with respect to page frame and
421 * attributes.
422 */
423 if (page_size_mask & (1 << PG_LEVEL_2M))
424 continue;
425 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
405 } 426 }
406 427
407 if (page_size_mask & (1<<PG_LEVEL_2M)) { 428 if (page_size_mask & (1<<PG_LEVEL_2M)) {
408 pages++; 429 pages++;
409 spin_lock(&init_mm.page_table_lock); 430 spin_lock(&init_mm.page_table_lock);
410 set_pte((pte_t *)pmd, 431 set_pte((pte_t *)pmd,
411 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 432 pfn_pte(address >> PAGE_SHIFT,
433 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
412 spin_unlock(&init_mm.page_table_lock); 434 spin_unlock(&init_mm.page_table_lock);
413 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 435 last_map_addr = (address & PMD_MASK) + PMD_SIZE;
414 continue; 436 continue;
415 } 437 }
416 438
417 pte = alloc_low_page(&pte_phys); 439 pte = alloc_low_page(&pte_phys);
418 last_map_addr = phys_pte_init(pte, address, end); 440 last_map_addr = phys_pte_init(pte, address, end, new_prot);
419 unmap_low_page(pte); 441 unmap_low_page(pte);
420 442
421 spin_lock(&init_mm.page_table_lock); 443 spin_lock(&init_mm.page_table_lock);
@@ -428,12 +450,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
428 450
429static unsigned long __meminit 451static unsigned long __meminit
430phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, 452phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
431 unsigned long page_size_mask) 453 unsigned long page_size_mask, pgprot_t prot)
432{ 454{
433 pmd_t *pmd = pmd_offset(pud, 0); 455 pmd_t *pmd = pmd_offset(pud, 0);
434 unsigned long last_map_addr; 456 unsigned long last_map_addr;
435 457
436 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); 458 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
437 __flush_tlb_all(); 459 __flush_tlb_all();
438 return last_map_addr; 460 return last_map_addr;
439} 461}
@@ -450,6 +472,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
450 unsigned long pmd_phys; 472 unsigned long pmd_phys;
451 pud_t *pud = pud_page + pud_index(addr); 473 pud_t *pud = pud_page + pud_index(addr);
452 pmd_t *pmd; 474 pmd_t *pmd;
475 pgprot_t prot = PAGE_KERNEL;
453 476
454 if (addr >= end) 477 if (addr >= end)
455 break; 478 break;
@@ -461,10 +484,26 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
461 } 484 }
462 485
463 if (pud_val(*pud)) { 486 if (pud_val(*pud)) {
464 if (!pud_large(*pud)) 487 if (!pud_large(*pud)) {
465 last_map_addr = phys_pmd_update(pud, addr, end, 488 last_map_addr = phys_pmd_update(pud, addr, end,
466 page_size_mask); 489 page_size_mask, prot);
467 continue; 490 continue;
491 }
492 /*
493 * If we are ok with PG_LEVEL_1G mapping, then we will
494 * use the existing mapping.
495 *
496 * Otherwise, we will split the gbpage mapping but use
497 * the same existing protection bits except for large
498 * page, so that we don't violate Intel's TLB
499 * Application note (317080) which says, while changing
500 * the page sizes, new and old translations should
501 * not differ with respect to page frame and
502 * attributes.
503 */
504 if (page_size_mask & (1 << PG_LEVEL_1G))
505 continue;
506 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
468 } 507 }
469 508
470 if (page_size_mask & (1<<PG_LEVEL_1G)) { 509 if (page_size_mask & (1<<PG_LEVEL_1G)) {
@@ -478,7 +517,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
478 } 517 }
479 518
480 pmd = alloc_low_page(&pmd_phys); 519 pmd = alloc_low_page(&pmd_phys);
481 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); 520 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
521 prot);
482 unmap_low_page(pmd); 522 unmap_low_page(pmd);
483 523
484 spin_lock(&init_mm.page_table_lock); 524 spin_lock(&init_mm.page_table_lock);
@@ -486,6 +526,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
486 spin_unlock(&init_mm.page_table_lock); 526 spin_unlock(&init_mm.page_table_lock);
487 } 527 }
488 __flush_tlb_all(); 528 __flush_tlb_all();
529
489 update_page_count(PG_LEVEL_1G, pages); 530 update_page_count(PG_LEVEL_1G, pages);
490 531
491 return last_map_addr; 532 return last_map_addr;
@@ -502,27 +543,28 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
502 return phys_pud_init(pud, addr, end, page_size_mask); 543 return phys_pud_init(pud, addr, end, page_size_mask);
503} 544}
504 545
505static void __init find_early_table_space(unsigned long end) 546static void __init find_early_table_space(unsigned long end, int use_pse,
547 int use_gbpages)
506{ 548{
507 unsigned long puds, pmds, ptes, tables, start; 549 unsigned long puds, pmds, ptes, tables, start;
508 550
509 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 551 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
510 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 552 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
511 if (direct_gbpages) { 553 if (use_gbpages) {
512 unsigned long extra; 554 unsigned long extra;
513 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 555 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
514 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 556 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
515 } else 557 } else
516 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 558 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
517 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 559 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
518 560
519 if (cpu_has_pse) { 561 if (use_pse) {
520 unsigned long extra; 562 unsigned long extra;
521 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 563 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
522 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 564 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
523 } else 565 } else
524 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 566 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
525 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); 567 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
526 568
527 /* 569 /*
528 * RED-PEN putting page tables only on node 0 could 570 * RED-PEN putting page tables only on node 0 could
@@ -584,6 +626,7 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
584 pgd_populate(&init_mm, pgd, __va(pud_phys)); 626 pgd_populate(&init_mm, pgd, __va(pud_phys));
585 spin_unlock(&init_mm.page_table_lock); 627 spin_unlock(&init_mm.page_table_lock);
586 } 628 }
629 __flush_tlb_all();
587 630
588 return last_map_addr; 631 return last_map_addr;
589} 632}
@@ -627,6 +670,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
627 670
628 struct map_range mr[NR_RANGE_MR]; 671 struct map_range mr[NR_RANGE_MR];
629 int nr_range, i; 672 int nr_range, i;
673 int use_pse, use_gbpages;
630 674
631 printk(KERN_INFO "init_memory_mapping\n"); 675 printk(KERN_INFO "init_memory_mapping\n");
632 676
@@ -640,9 +684,21 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
640 if (!after_bootmem) 684 if (!after_bootmem)
641 init_gbpages(); 685 init_gbpages();
642 686
643 if (direct_gbpages) 687#ifdef CONFIG_DEBUG_PAGEALLOC
688 /*
689 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
690 * This will simplify cpa(), which otherwise needs to support splitting
691 * large pages into small in interrupt context, etc.
692 */
693 use_pse = use_gbpages = 0;
694#else
695 use_pse = cpu_has_pse;
696 use_gbpages = direct_gbpages;
697#endif
698
699 if (use_gbpages)
644 page_size_mask |= 1 << PG_LEVEL_1G; 700 page_size_mask |= 1 << PG_LEVEL_1G;
645 if (cpu_has_pse) 701 if (use_pse)
646 page_size_mask |= 1 << PG_LEVEL_2M; 702 page_size_mask |= 1 << PG_LEVEL_2M;
647 703
648 memset(mr, 0, sizeof(mr)); 704 memset(mr, 0, sizeof(mr));
@@ -703,7 +759,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
703 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 759 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
704 760
705 if (!after_bootmem) 761 if (!after_bootmem)
706 find_early_table_space(end); 762 find_early_table_space(end, use_pse, use_gbpages);
707 763
708 for (i = 0; i < nr_range; i++) 764 for (i = 0; i < nr_range; i++)
709 last_map_addr = kernel_physical_mapping_init( 765 last_map_addr = kernel_physical_mapping_init(
@@ -862,8 +918,6 @@ void __init mem_init(void)
862 reservedpages << (PAGE_SHIFT-10), 918 reservedpages << (PAGE_SHIFT-10),
863 datasize >> 10, 919 datasize >> 10,
864 initsize >> 10); 920 initsize >> 10);
865
866 cpa_init();
867} 921}
868 922
869void free_init_pages(char *what, unsigned long begin, unsigned long end) 923void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cac6da54203b..6ab3196d12b4 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -83,6 +83,25 @@ int page_is_ram(unsigned long pagenr)
83 return 0; 83 return 0;
84} 84}
85 85
86int pagerange_is_ram(unsigned long start, unsigned long end)
87{
88 int ram_page = 0, not_rampage = 0;
89 unsigned long page_nr;
90
91 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
92 ++page_nr) {
93 if (page_is_ram(page_nr))
94 ram_page = 1;
95 else
96 not_rampage = 1;
97
98 if (ram_page == not_rampage)
99 return -1;
100 }
101
102 return ram_page;
103}
104
86/* 105/*
87 * Fix up the linear direct mapping of the kernel to avoid cache attribute 106 * Fix up the linear direct mapping of the kernel to avoid cache attribute
88 * conflicts. 107 * conflicts.
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a4dd793d6003..cebcbf152d46 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
79 return 0; 79 return 0;
80 80
81 addr = 0x8000; 81 addr = 0x8000;
82 nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 82 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, 83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
84 nodemap_size, L1_CACHE_BYTES); 84 nodemap_size, L1_CACHE_BYTES);
85 if (nodemap_addr == -1UL) { 85 if (nodemap_addr == -1UL) {
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; 176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
177 unsigned long bootmap_start, nodedata_phys; 177 unsigned long bootmap_start, nodedata_phys;
178 void *bootmap; 178 void *bootmap;
179 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 179 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
180 int nid; 180 int nid;
181 181
182 start = round_up(start, ZONE_ALIGN); 182 start = roundup(start, ZONE_ALIGN);
183 183
184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
185 start, end); 185 start, end);
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); 210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
211 nid = phys_to_nid(nodedata_phys); 211 nid = phys_to_nid(nodedata_phys);
212 if (nid == nodeid) 212 if (nid == nodeid)
213 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 213 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
214 else 214 else
215 bootmap_start = round_up(start, PAGE_SIZE); 215 bootmap_start = roundup(start, PAGE_SIZE);
216 /* 216 /*
217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like 217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
218 * to use that to align to PAGE_SIZE 218 * to use that to align to PAGE_SIZE
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index d4aa503caaa2..e1d106909218 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -32,7 +32,7 @@ enum {
32 GPS = (1<<30) 32 GPS = (1<<30)
33}; 33};
34 34
35#define PAGE_TESTBIT __pgprot(_PAGE_UNUSED1) 35#define PAGE_CPA_TEST __pgprot(_PAGE_CPA_TEST)
36 36
37static int pte_testbit(pte_t pte) 37static int pte_testbit(pte_t pte)
38{ 38{
@@ -118,6 +118,7 @@ static int pageattr_test(void)
118 unsigned int level; 118 unsigned int level;
119 int i, k; 119 int i, k;
120 int err; 120 int err;
121 unsigned long test_addr;
121 122
122 if (print) 123 if (print)
123 printk(KERN_INFO "CPA self-test:\n"); 124 printk(KERN_INFO "CPA self-test:\n");
@@ -172,7 +173,8 @@ static int pageattr_test(void)
172 continue; 173 continue;
173 } 174 }
174 175
175 err = change_page_attr_set(addr[i], len[i], PAGE_TESTBIT); 176 test_addr = addr[i];
177 err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0);
176 if (err < 0) { 178 if (err < 0) {
177 printk(KERN_ERR "CPA %d failed %d\n", i, err); 179 printk(KERN_ERR "CPA %d failed %d\n", i, err);
178 failed++; 180 failed++;
@@ -204,7 +206,8 @@ static int pageattr_test(void)
204 failed++; 206 failed++;
205 continue; 207 continue;
206 } 208 }
207 err = change_page_attr_clear(addr[i], len[i], PAGE_TESTBIT); 209 test_addr = addr[i];
210 err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
208 if (err < 0) { 211 if (err < 0) {
209 printk(KERN_ERR "CPA reverting failed: %d\n", err); 212 printk(KERN_ERR "CPA reverting failed: %d\n", err);
210 failed++; 213 failed++;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 43e2f8483e4f..a9ec89c3fbca 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -25,15 +25,27 @@
25 * The current flushing context - we pass it instead of 5 arguments: 25 * The current flushing context - we pass it instead of 5 arguments:
26 */ 26 */
27struct cpa_data { 27struct cpa_data {
28 unsigned long vaddr; 28 unsigned long *vaddr;
29 pgprot_t mask_set; 29 pgprot_t mask_set;
30 pgprot_t mask_clr; 30 pgprot_t mask_clr;
31 int numpages; 31 int numpages;
32 int flushtlb; 32 int flags;
33 unsigned long pfn; 33 unsigned long pfn;
34 unsigned force_split : 1; 34 unsigned force_split : 1;
35 int curpage;
35}; 36};
36 37
38/*
39 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
40 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
41 * entries change the page attribute in parallel to some other cpu
42 * splitting a large page entry along with changing the attribute.
43 */
44static DEFINE_SPINLOCK(cpa_lock);
45
46#define CPA_FLUSHTLB 1
47#define CPA_ARRAY 2
48
37#ifdef CONFIG_PROC_FS 49#ifdef CONFIG_PROC_FS
38static unsigned long direct_pages_count[PG_LEVEL_NUM]; 50static unsigned long direct_pages_count[PG_LEVEL_NUM];
39 51
@@ -84,7 +96,7 @@ static inline unsigned long highmap_start_pfn(void)
84 96
85static inline unsigned long highmap_end_pfn(void) 97static inline unsigned long highmap_end_pfn(void)
86{ 98{
87 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; 99 return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
88} 100}
89 101
90#endif 102#endif
@@ -190,6 +202,41 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
190 } 202 }
191} 203}
192 204
205static void cpa_flush_array(unsigned long *start, int numpages, int cache)
206{
207 unsigned int i, level;
208 unsigned long *addr;
209
210 BUG_ON(irqs_disabled());
211
212 on_each_cpu(__cpa_flush_range, NULL, 1);
213
214 if (!cache)
215 return;
216
217 /* 4M threshold */
218 if (numpages >= 1024) {
219 if (boot_cpu_data.x86_model >= 4)
220 wbinvd();
221 return;
222 }
223 /*
224 * We only need to flush on one CPU,
225 * clflush is a MESI-coherent instruction that
226 * will cause all other CPUs to flush the same
227 * cachelines:
228 */
229 for (i = 0, addr = start; i < numpages; i++, addr++) {
230 pte_t *pte = lookup_address(*addr, &level);
231
232 /*
233 * Only flush present addresses:
234 */
235 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
236 clflush_cache_range((void *) *addr, PAGE_SIZE);
237 }
238}
239
193/* 240/*
194 * Certain areas of memory on x86 require very specific protection flags, 241 * Certain areas of memory on x86 require very specific protection flags,
195 * for example the BIOS area or kernel text. Callers don't always get this 242 * for example the BIOS area or kernel text. Callers don't always get this
@@ -398,7 +445,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
398 */ 445 */
399 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); 446 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
400 __set_pmd_pte(kpte, address, new_pte); 447 __set_pmd_pte(kpte, address, new_pte);
401 cpa->flushtlb = 1; 448 cpa->flags |= CPA_FLUSHTLB;
402 do_split = 0; 449 do_split = 0;
403 } 450 }
404 451
@@ -408,84 +455,6 @@ out_unlock:
408 return do_split; 455 return do_split;
409} 456}
410 457
411static LIST_HEAD(page_pool);
412static unsigned long pool_size, pool_pages, pool_low;
413static unsigned long pool_used, pool_failed;
414
415static void cpa_fill_pool(struct page **ret)
416{
417 gfp_t gfp = GFP_KERNEL;
418 unsigned long flags;
419 struct page *p;
420
421 /*
422 * Avoid recursion (on debug-pagealloc) and also signal
423 * our priority to get to these pagetables:
424 */
425 if (current->flags & PF_MEMALLOC)
426 return;
427 current->flags |= PF_MEMALLOC;
428
429 /*
430 * Allocate atomically from atomic contexts:
431 */
432 if (in_atomic() || irqs_disabled() || debug_pagealloc)
433 gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
434
435 while (pool_pages < pool_size || (ret && !*ret)) {
436 p = alloc_pages(gfp, 0);
437 if (!p) {
438 pool_failed++;
439 break;
440 }
441 /*
442 * If the call site needs a page right now, provide it:
443 */
444 if (ret && !*ret) {
445 *ret = p;
446 continue;
447 }
448 spin_lock_irqsave(&pgd_lock, flags);
449 list_add(&p->lru, &page_pool);
450 pool_pages++;
451 spin_unlock_irqrestore(&pgd_lock, flags);
452 }
453
454 current->flags &= ~PF_MEMALLOC;
455}
456
457#define SHIFT_MB (20 - PAGE_SHIFT)
458#define ROUND_MB_GB ((1 << 10) - 1)
459#define SHIFT_MB_GB 10
460#define POOL_PAGES_PER_GB 16
461
462void __init cpa_init(void)
463{
464 struct sysinfo si;
465 unsigned long gb;
466
467 si_meminfo(&si);
468 /*
469 * Calculate the number of pool pages:
470 *
471 * Convert totalram (nr of pages) to MiB and round to the next
472 * GiB. Shift MiB to Gib and multiply the result by
473 * POOL_PAGES_PER_GB:
474 */
475 if (debug_pagealloc) {
476 gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
477 pool_size = POOL_PAGES_PER_GB * gb;
478 } else {
479 pool_size = 1;
480 }
481 pool_low = pool_size;
482
483 cpa_fill_pool(NULL);
484 printk(KERN_DEBUG
485 "CPA: page pool initialized %lu of %lu pages preallocated\n",
486 pool_pages, pool_size);
487}
488
489static int split_large_page(pte_t *kpte, unsigned long address) 458static int split_large_page(pte_t *kpte, unsigned long address)
490{ 459{
491 unsigned long flags, pfn, pfninc = 1; 460 unsigned long flags, pfn, pfninc = 1;
@@ -494,28 +463,15 @@ static int split_large_page(pte_t *kpte, unsigned long address)
494 pgprot_t ref_prot; 463 pgprot_t ref_prot;
495 struct page *base; 464 struct page *base;
496 465
497 /* 466 if (!debug_pagealloc)
498 * Get a page from the pool. The pool list is protected by the 467 spin_unlock(&cpa_lock);
499 * pgd_lock, which we have to take anyway for the split 468 base = alloc_pages(GFP_KERNEL, 0);
500 * operation: 469 if (!debug_pagealloc)
501 */ 470 spin_lock(&cpa_lock);
502 spin_lock_irqsave(&pgd_lock, flags); 471 if (!base)
503 if (list_empty(&page_pool)) { 472 return -ENOMEM;
504 spin_unlock_irqrestore(&pgd_lock, flags);
505 base = NULL;
506 cpa_fill_pool(&base);
507 if (!base)
508 return -ENOMEM;
509 spin_lock_irqsave(&pgd_lock, flags);
510 } else {
511 base = list_first_entry(&page_pool, struct page, lru);
512 list_del(&base->lru);
513 pool_pages--;
514
515 if (pool_pages < pool_low)
516 pool_low = pool_pages;
517 }
518 473
474 spin_lock_irqsave(&pgd_lock, flags);
519 /* 475 /*
520 * Check for races, another CPU might have split this page 476 * Check for races, another CPU might have split this page
521 * up for us already: 477 * up for us already:
@@ -572,11 +528,8 @@ out_unlock:
572 * If we dropped out via the lookup_address check under 528 * If we dropped out via the lookup_address check under
573 * pgd_lock then stick the page back into the pool: 529 * pgd_lock then stick the page back into the pool:
574 */ 530 */
575 if (base) { 531 if (base)
576 list_add(&base->lru, &page_pool); 532 __free_page(base);
577 pool_pages++;
578 } else
579 pool_used++;
580 spin_unlock_irqrestore(&pgd_lock, flags); 533 spin_unlock_irqrestore(&pgd_lock, flags);
581 534
582 return 0; 535 return 0;
@@ -584,11 +537,16 @@ out_unlock:
584 537
585static int __change_page_attr(struct cpa_data *cpa, int primary) 538static int __change_page_attr(struct cpa_data *cpa, int primary)
586{ 539{
587 unsigned long address = cpa->vaddr; 540 unsigned long address;
588 int do_split, err; 541 int do_split, err;
589 unsigned int level; 542 unsigned int level;
590 pte_t *kpte, old_pte; 543 pte_t *kpte, old_pte;
591 544
545 if (cpa->flags & CPA_ARRAY)
546 address = cpa->vaddr[cpa->curpage];
547 else
548 address = *cpa->vaddr;
549
592repeat: 550repeat:
593 kpte = lookup_address(address, &level); 551 kpte = lookup_address(address, &level);
594 if (!kpte) 552 if (!kpte)
@@ -600,7 +558,7 @@ repeat:
600 return 0; 558 return 0;
601 WARN(1, KERN_WARNING "CPA: called for zero pte. " 559 WARN(1, KERN_WARNING "CPA: called for zero pte. "
602 "vaddr = %lx cpa->vaddr = %lx\n", address, 560 "vaddr = %lx cpa->vaddr = %lx\n", address,
603 cpa->vaddr); 561 *cpa->vaddr);
604 return -EINVAL; 562 return -EINVAL;
605 } 563 }
606 564
@@ -626,7 +584,7 @@ repeat:
626 */ 584 */
627 if (pte_val(old_pte) != pte_val(new_pte)) { 585 if (pte_val(old_pte) != pte_val(new_pte)) {
628 set_pte_atomic(kpte, new_pte); 586 set_pte_atomic(kpte, new_pte);
629 cpa->flushtlb = 1; 587 cpa->flags |= CPA_FLUSHTLB;
630 } 588 }
631 cpa->numpages = 1; 589 cpa->numpages = 1;
632 return 0; 590 return 0;
@@ -650,7 +608,25 @@ repeat:
650 */ 608 */
651 err = split_large_page(kpte, address); 609 err = split_large_page(kpte, address);
652 if (!err) { 610 if (!err) {
653 cpa->flushtlb = 1; 611 /*
612 * Do a global flush tlb after splitting the large page
613 * and before we do the actual change page attribute in the PTE.
614 *
615 * With out this, we violate the TLB application note, that says
616 * "The TLBs may contain both ordinary and large-page
617 * translations for a 4-KByte range of linear addresses. This
618 * may occur if software modifies the paging structures so that
619 * the page size used for the address range changes. If the two
620 * translations differ with respect to page frame or attributes
621 * (e.g., permissions), processor behavior is undefined and may
622 * be implementation-specific."
623 *
624 * We do this global tlb flush inside the cpa_lock, so that we
625 * don't allow any other cpu, with stale tlb entries change the
626 * page attribute in parallel, that also falls into the
627 * just split large page entry.
628 */
629 flush_tlb_all();
654 goto repeat; 630 goto repeat;
655 } 631 }
656 632
@@ -663,6 +639,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
663{ 639{
664 struct cpa_data alias_cpa; 640 struct cpa_data alias_cpa;
665 int ret = 0; 641 int ret = 0;
642 unsigned long temp_cpa_vaddr, vaddr;
666 643
667 if (cpa->pfn >= max_pfn_mapped) 644 if (cpa->pfn >= max_pfn_mapped)
668 return 0; 645 return 0;
@@ -675,16 +652,24 @@ static int cpa_process_alias(struct cpa_data *cpa)
675 * No need to redo, when the primary call touched the direct 652 * No need to redo, when the primary call touched the direct
676 * mapping already: 653 * mapping already:
677 */ 654 */
678 if (!(within(cpa->vaddr, PAGE_OFFSET, 655 if (cpa->flags & CPA_ARRAY)
656 vaddr = cpa->vaddr[cpa->curpage];
657 else
658 vaddr = *cpa->vaddr;
659
660 if (!(within(vaddr, PAGE_OFFSET,
679 PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) 661 PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT))
680#ifdef CONFIG_X86_64 662#ifdef CONFIG_X86_64
681 || within(cpa->vaddr, PAGE_OFFSET + (1UL<<32), 663 || within(vaddr, PAGE_OFFSET + (1UL<<32),
682 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) 664 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))
683#endif 665#endif
684 )) { 666 )) {
685 667
686 alias_cpa = *cpa; 668 alias_cpa = *cpa;
687 alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); 669 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
670 alias_cpa.vaddr = &temp_cpa_vaddr;
671 alias_cpa.flags &= ~CPA_ARRAY;
672
688 673
689 ret = __change_page_attr_set_clr(&alias_cpa, 0); 674 ret = __change_page_attr_set_clr(&alias_cpa, 0);
690 } 675 }
@@ -696,7 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
696 * No need to redo, when the primary call touched the high 681 * No need to redo, when the primary call touched the high
697 * mapping already: 682 * mapping already:
698 */ 683 */
699 if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end)) 684 if (within(vaddr, (unsigned long) _text, (unsigned long) _end))
700 return 0; 685 return 0;
701 686
702 /* 687 /*
@@ -707,8 +692,9 @@ static int cpa_process_alias(struct cpa_data *cpa)
707 return 0; 692 return 0;
708 693
709 alias_cpa = *cpa; 694 alias_cpa = *cpa;
710 alias_cpa.vaddr = 695 temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
711 (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; 696 alias_cpa.vaddr = &temp_cpa_vaddr;
697 alias_cpa.flags &= ~CPA_ARRAY;
712 698
713 /* 699 /*
714 * The high mapping range is imprecise, so ignore the return value. 700 * The high mapping range is imprecise, so ignore the return value.
@@ -728,8 +714,15 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
728 * preservation check. 714 * preservation check.
729 */ 715 */
730 cpa->numpages = numpages; 716 cpa->numpages = numpages;
717 /* for array changes, we can't use large page */
718 if (cpa->flags & CPA_ARRAY)
719 cpa->numpages = 1;
731 720
721 if (!debug_pagealloc)
722 spin_lock(&cpa_lock);
732 ret = __change_page_attr(cpa, checkalias); 723 ret = __change_page_attr(cpa, checkalias);
724 if (!debug_pagealloc)
725 spin_unlock(&cpa_lock);
733 if (ret) 726 if (ret)
734 return ret; 727 return ret;
735 728
@@ -746,7 +739,11 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
746 */ 739 */
747 BUG_ON(cpa->numpages > numpages); 740 BUG_ON(cpa->numpages > numpages);
748 numpages -= cpa->numpages; 741 numpages -= cpa->numpages;
749 cpa->vaddr += cpa->numpages * PAGE_SIZE; 742 if (cpa->flags & CPA_ARRAY)
743 cpa->curpage++;
744 else
745 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
746
750 } 747 }
751 return 0; 748 return 0;
752} 749}
@@ -757,9 +754,9 @@ static inline int cache_attr(pgprot_t attr)
757 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); 754 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
758} 755}
759 756
760static int change_page_attr_set_clr(unsigned long addr, int numpages, 757static int change_page_attr_set_clr(unsigned long *addr, int numpages,
761 pgprot_t mask_set, pgprot_t mask_clr, 758 pgprot_t mask_set, pgprot_t mask_clr,
762 int force_split) 759 int force_split, int array)
763{ 760{
764 struct cpa_data cpa; 761 struct cpa_data cpa;
765 int ret, cache, checkalias; 762 int ret, cache, checkalias;
@@ -774,21 +771,38 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
774 return 0; 771 return 0;
775 772
776 /* Ensure we are PAGE_SIZE aligned */ 773 /* Ensure we are PAGE_SIZE aligned */
777 if (addr & ~PAGE_MASK) { 774 if (!array) {
778 addr &= PAGE_MASK; 775 if (*addr & ~PAGE_MASK) {
779 /* 776 *addr &= PAGE_MASK;
780 * People should not be passing in unaligned addresses: 777 /*
781 */ 778 * People should not be passing in unaligned addresses:
782 WARN_ON_ONCE(1); 779 */
780 WARN_ON_ONCE(1);
781 }
782 } else {
783 int i;
784 for (i = 0; i < numpages; i++) {
785 if (addr[i] & ~PAGE_MASK) {
786 addr[i] &= PAGE_MASK;
787 WARN_ON_ONCE(1);
788 }
789 }
783 } 790 }
784 791
792 /* Must avoid aliasing mappings in the highmem code */
793 kmap_flush_unused();
794
785 cpa.vaddr = addr; 795 cpa.vaddr = addr;
786 cpa.numpages = numpages; 796 cpa.numpages = numpages;
787 cpa.mask_set = mask_set; 797 cpa.mask_set = mask_set;
788 cpa.mask_clr = mask_clr; 798 cpa.mask_clr = mask_clr;
789 cpa.flushtlb = 0; 799 cpa.flags = 0;
800 cpa.curpage = 0;
790 cpa.force_split = force_split; 801 cpa.force_split = force_split;
791 802
803 if (array)
804 cpa.flags |= CPA_ARRAY;
805
792 /* No alias checking for _NX bit modifications */ 806 /* No alias checking for _NX bit modifications */
793 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; 807 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
794 808
@@ -797,7 +811,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
797 /* 811 /*
798 * Check whether we really changed something: 812 * Check whether we really changed something:
799 */ 813 */
800 if (!cpa.flushtlb) 814 if (!(cpa.flags & CPA_FLUSHTLB))
801 goto out; 815 goto out;
802 816
803 /* 817 /*
@@ -812,27 +826,30 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
812 * error case we fall back to cpa_flush_all (which uses 826 * error case we fall back to cpa_flush_all (which uses
813 * wbindv): 827 * wbindv):
814 */ 828 */
815 if (!ret && cpu_has_clflush) 829 if (!ret && cpu_has_clflush) {
816 cpa_flush_range(addr, numpages, cache); 830 if (cpa.flags & CPA_ARRAY)
817 else 831 cpa_flush_array(addr, numpages, cache);
832 else
833 cpa_flush_range(*addr, numpages, cache);
834 } else
818 cpa_flush_all(cache); 835 cpa_flush_all(cache);
819 836
820out: 837out:
821 cpa_fill_pool(NULL);
822
823 return ret; 838 return ret;
824} 839}
825 840
826static inline int change_page_attr_set(unsigned long addr, int numpages, 841static inline int change_page_attr_set(unsigned long *addr, int numpages,
827 pgprot_t mask) 842 pgprot_t mask, int array)
828{ 843{
829 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0); 844 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
845 array);
830} 846}
831 847
832static inline int change_page_attr_clear(unsigned long addr, int numpages, 848static inline int change_page_attr_clear(unsigned long *addr, int numpages,
833 pgprot_t mask) 849 pgprot_t mask, int array)
834{ 850{
835 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0); 851 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
852 array);
836} 853}
837 854
838int _set_memory_uc(unsigned long addr, int numpages) 855int _set_memory_uc(unsigned long addr, int numpages)
@@ -840,8 +857,8 @@ int _set_memory_uc(unsigned long addr, int numpages)
840 /* 857 /*
841 * for now UC MINUS. see comments in ioremap_nocache() 858 * for now UC MINUS. see comments in ioremap_nocache()
842 */ 859 */
843 return change_page_attr_set(addr, numpages, 860 return change_page_attr_set(&addr, numpages,
844 __pgprot(_PAGE_CACHE_UC_MINUS)); 861 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
845} 862}
846 863
847int set_memory_uc(unsigned long addr, int numpages) 864int set_memory_uc(unsigned long addr, int numpages)
@@ -857,10 +874,48 @@ int set_memory_uc(unsigned long addr, int numpages)
857} 874}
858EXPORT_SYMBOL(set_memory_uc); 875EXPORT_SYMBOL(set_memory_uc);
859 876
877int set_memory_array_uc(unsigned long *addr, int addrinarray)
878{
879 unsigned long start;
880 unsigned long end;
881 int i;
882 /*
883 * for now UC MINUS. see comments in ioremap_nocache()
884 */
885 for (i = 0; i < addrinarray; i++) {
886 start = __pa(addr[i]);
887 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
888 if (end != __pa(addr[i + 1]))
889 break;
890 i++;
891 }
892 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
893 goto out;
894 }
895
896 return change_page_attr_set(addr, addrinarray,
897 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
898out:
899 for (i = 0; i < addrinarray; i++) {
900 unsigned long tmp = __pa(addr[i]);
901
902 if (tmp == start)
903 break;
904 for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
905 if (end != __pa(addr[i + 1]))
906 break;
907 i++;
908 }
909 free_memtype(tmp, end);
910 }
911 return -EINVAL;
912}
913EXPORT_SYMBOL(set_memory_array_uc);
914
860int _set_memory_wc(unsigned long addr, int numpages) 915int _set_memory_wc(unsigned long addr, int numpages)
861{ 916{
862 return change_page_attr_set(addr, numpages, 917 return change_page_attr_set(&addr, numpages,
863 __pgprot(_PAGE_CACHE_WC)); 918 __pgprot(_PAGE_CACHE_WC), 0);
864} 919}
865 920
866int set_memory_wc(unsigned long addr, int numpages) 921int set_memory_wc(unsigned long addr, int numpages)
@@ -878,8 +933,8 @@ EXPORT_SYMBOL(set_memory_wc);
878 933
879int _set_memory_wb(unsigned long addr, int numpages) 934int _set_memory_wb(unsigned long addr, int numpages)
880{ 935{
881 return change_page_attr_clear(addr, numpages, 936 return change_page_attr_clear(&addr, numpages,
882 __pgprot(_PAGE_CACHE_MASK)); 937 __pgprot(_PAGE_CACHE_MASK), 0);
883} 938}
884 939
885int set_memory_wb(unsigned long addr, int numpages) 940int set_memory_wb(unsigned long addr, int numpages)
@@ -890,37 +945,59 @@ int set_memory_wb(unsigned long addr, int numpages)
890} 945}
891EXPORT_SYMBOL(set_memory_wb); 946EXPORT_SYMBOL(set_memory_wb);
892 947
948int set_memory_array_wb(unsigned long *addr, int addrinarray)
949{
950 int i;
951
952 for (i = 0; i < addrinarray; i++) {
953 unsigned long start = __pa(addr[i]);
954 unsigned long end;
955
956 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
957 if (end != __pa(addr[i + 1]))
958 break;
959 i++;
960 }
961 free_memtype(start, end);
962 }
963 return change_page_attr_clear(addr, addrinarray,
964 __pgprot(_PAGE_CACHE_MASK), 1);
965}
966EXPORT_SYMBOL(set_memory_array_wb);
967
893int set_memory_x(unsigned long addr, int numpages) 968int set_memory_x(unsigned long addr, int numpages)
894{ 969{
895 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); 970 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
896} 971}
897EXPORT_SYMBOL(set_memory_x); 972EXPORT_SYMBOL(set_memory_x);
898 973
899int set_memory_nx(unsigned long addr, int numpages) 974int set_memory_nx(unsigned long addr, int numpages)
900{ 975{
901 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); 976 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
902} 977}
903EXPORT_SYMBOL(set_memory_nx); 978EXPORT_SYMBOL(set_memory_nx);
904 979
905int set_memory_ro(unsigned long addr, int numpages) 980int set_memory_ro(unsigned long addr, int numpages)
906{ 981{
907 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); 982 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
908} 983}
984EXPORT_SYMBOL_GPL(set_memory_ro);
909 985
910int set_memory_rw(unsigned long addr, int numpages) 986int set_memory_rw(unsigned long addr, int numpages)
911{ 987{
912 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); 988 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
913} 989}
990EXPORT_SYMBOL_GPL(set_memory_rw);
914 991
915int set_memory_np(unsigned long addr, int numpages) 992int set_memory_np(unsigned long addr, int numpages)
916{ 993{
917 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); 994 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
918} 995}
919 996
920int set_memory_4k(unsigned long addr, int numpages) 997int set_memory_4k(unsigned long addr, int numpages)
921{ 998{
922 return change_page_attr_set_clr(addr, numpages, __pgprot(0), 999 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
923 __pgprot(0), 1); 1000 __pgprot(0), 1, 0);
924} 1001}
925 1002
926int set_pages_uc(struct page *page, int numpages) 1003int set_pages_uc(struct page *page, int numpages)
@@ -973,22 +1050,38 @@ int set_pages_rw(struct page *page, int numpages)
973 1050
974static int __set_pages_p(struct page *page, int numpages) 1051static int __set_pages_p(struct page *page, int numpages)
975{ 1052{
976 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), 1053 unsigned long tempaddr = (unsigned long) page_address(page);
1054 struct cpa_data cpa = { .vaddr = &tempaddr,
977 .numpages = numpages, 1055 .numpages = numpages,
978 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), 1056 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
979 .mask_clr = __pgprot(0)}; 1057 .mask_clr = __pgprot(0),
1058 .flags = 0};
980 1059
981 return __change_page_attr_set_clr(&cpa, 1); 1060 /*
1061 * No alias checking needed for setting present flag. otherwise,
1062 * we may need to break large pages for 64-bit kernel text
1063 * mappings (this adds to complexity if we want to do this from
1064 * atomic context especially). Let's keep it simple!
1065 */
1066 return __change_page_attr_set_clr(&cpa, 0);
982} 1067}
983 1068
984static int __set_pages_np(struct page *page, int numpages) 1069static int __set_pages_np(struct page *page, int numpages)
985{ 1070{
986 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), 1071 unsigned long tempaddr = (unsigned long) page_address(page);
1072 struct cpa_data cpa = { .vaddr = &tempaddr,
987 .numpages = numpages, 1073 .numpages = numpages,
988 .mask_set = __pgprot(0), 1074 .mask_set = __pgprot(0),
989 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; 1075 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1076 .flags = 0};
990 1077
991 return __change_page_attr_set_clr(&cpa, 1); 1078 /*
1079 * No alias checking needed for setting not present flag. otherwise,
1080 * we may need to break large pages for 64-bit kernel text
1081 * mappings (this adds to complexity if we want to do this from
1082 * atomic context especially). Let's keep it simple!
1083 */
1084 return __change_page_attr_set_clr(&cpa, 0);
992} 1085}
993 1086
994void kernel_map_pages(struct page *page, int numpages, int enable) 1087void kernel_map_pages(struct page *page, int numpages, int enable)
@@ -1008,11 +1101,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1008 1101
1009 /* 1102 /*
1010 * The return value is ignored as the calls cannot fail. 1103 * The return value is ignored as the calls cannot fail.
1011 * Large pages are kept enabled at boot time, and are 1104 * Large pages for identity mappings are not used at boot time
1012 * split up quickly with DEBUG_PAGEALLOC. If a splitup 1105 * and hence no memory allocations during large page split.
1013 * fails here (due to temporary memory shortage) no damage
1014 * is done because we just keep the largepage intact up
1015 * to the next attempt when it will likely be split up:
1016 */ 1106 */
1017 if (enable) 1107 if (enable)
1018 __set_pages_p(page, numpages); 1108 __set_pages_p(page, numpages);
@@ -1024,53 +1114,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1024 * but that can deadlock->flush only current cpu: 1114 * but that can deadlock->flush only current cpu:
1025 */ 1115 */
1026 __flush_tlb_all(); 1116 __flush_tlb_all();
1027
1028 /*
1029 * Try to refill the page pool here. We can do this only after
1030 * the tlb flush.
1031 */
1032 cpa_fill_pool(NULL);
1033} 1117}
1034 1118
1035#ifdef CONFIG_DEBUG_FS
1036static int dpa_show(struct seq_file *m, void *v)
1037{
1038 seq_puts(m, "DEBUG_PAGEALLOC\n");
1039 seq_printf(m, "pool_size : %lu\n", pool_size);
1040 seq_printf(m, "pool_pages : %lu\n", pool_pages);
1041 seq_printf(m, "pool_low : %lu\n", pool_low);
1042 seq_printf(m, "pool_used : %lu\n", pool_used);
1043 seq_printf(m, "pool_failed : %lu\n", pool_failed);
1044
1045 return 0;
1046}
1047
1048static int dpa_open(struct inode *inode, struct file *filp)
1049{
1050 return single_open(filp, dpa_show, NULL);
1051}
1052
1053static const struct file_operations dpa_fops = {
1054 .open = dpa_open,
1055 .read = seq_read,
1056 .llseek = seq_lseek,
1057 .release = single_release,
1058};
1059
1060static int __init debug_pagealloc_proc_init(void)
1061{
1062 struct dentry *de;
1063
1064 de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL,
1065 &dpa_fops);
1066 if (!de)
1067 return -ENOMEM;
1068
1069 return 0;
1070}
1071__initcall(debug_pagealloc_proc_init);
1072#endif
1073
1074#ifdef CONFIG_HIBERNATION 1119#ifdef CONFIG_HIBERNATION
1075 1120
1076bool kernel_page_present(struct page *page) 1121bool kernel_page_present(struct page *page)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2a50e0fa64a5..738fd0f24958 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -7,24 +7,24 @@
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. 7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */ 8 */
9 9
10#include <linux/mm.h> 10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
12#include <linux/gfp.h> 14#include <linux/gfp.h>
15#include <linux/mm.h>
13#include <linux/fs.h> 16#include <linux/fs.h>
14#include <linux/bootmem.h>
15#include <linux/debugfs.h>
16#include <linux/seq_file.h>
17 17
18#include <asm/msr.h> 18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/processor.h> 19#include <asm/processor.h>
21#include <asm/page.h> 20#include <asm/tlbflush.h>
22#include <asm/pgtable.h> 21#include <asm/pgtable.h>
23#include <asm/pat.h>
24#include <asm/e820.h>
25#include <asm/cacheflush.h>
26#include <asm/fcntl.h> 22#include <asm/fcntl.h>
23#include <asm/e820.h>
27#include <asm/mtrr.h> 24#include <asm/mtrr.h>
25#include <asm/page.h>
26#include <asm/msr.h>
27#include <asm/pat.h>
28#include <asm/io.h> 28#include <asm/io.h>
29 29
30#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
@@ -46,6 +46,7 @@ early_param("nopat", nopat);
46 46
47 47
48static int debug_enable; 48static int debug_enable;
49
49static int __init pat_debug_setup(char *str) 50static int __init pat_debug_setup(char *str)
50{ 51{
51 debug_enable = 1; 52 debug_enable = 1;
@@ -145,14 +146,14 @@ static char *cattr_name(unsigned long flags)
145 */ 146 */
146 147
147struct memtype { 148struct memtype {
148 u64 start; 149 u64 start;
149 u64 end; 150 u64 end;
150 unsigned long type; 151 unsigned long type;
151 struct list_head nd; 152 struct list_head nd;
152}; 153};
153 154
154static LIST_HEAD(memtype_list); 155static LIST_HEAD(memtype_list);
155static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ 156static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
156 157
157/* 158/*
158 * Does intersection of PAT memory type and MTRR memory type and returns 159 * Does intersection of PAT memory type and MTRR memory type and returns
@@ -180,8 +181,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
180 return req_type; 181 return req_type;
181} 182}
182 183
183static int chk_conflict(struct memtype *new, struct memtype *entry, 184static int
184 unsigned long *type) 185chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
185{ 186{
186 if (new->type != entry->type) { 187 if (new->type != entry->type) {
187 if (type) { 188 if (type) {
@@ -211,6 +212,66 @@ static struct memtype *cached_entry;
211static u64 cached_start; 212static u64 cached_start;
212 213
213/* 214/*
215 * For RAM pages, mark the pages as non WB memory type using
216 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
217 * set_memory_wc() on a RAM page at a time before marking it as WB again.
218 * This is ok, because only one driver will be owning the page and
219 * doing set_memory_*() calls.
220 *
221 * For now, we use PageNonWB to track that the RAM page is being mapped
222 * as non WB. In future, we will have to use one more flag
223 * (or some other mechanism in page_struct) to distinguish between
224 * UC and WC mapping.
225 */
226static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
227 unsigned long *new_type)
228{
229 struct page *page;
230 u64 pfn, end_pfn;
231
232 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
233 page = pfn_to_page(pfn);
234 if (page_mapped(page) || PageNonWB(page))
235 goto out;
236
237 SetPageNonWB(page);
238 }
239 return 0;
240
241out:
242 end_pfn = pfn;
243 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
244 page = pfn_to_page(pfn);
245 ClearPageNonWB(page);
246 }
247
248 return -EINVAL;
249}
250
251static int free_ram_pages_type(u64 start, u64 end)
252{
253 struct page *page;
254 u64 pfn, end_pfn;
255
256 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
257 page = pfn_to_page(pfn);
258 if (page_mapped(page) || !PageNonWB(page))
259 goto out;
260
261 ClearPageNonWB(page);
262 }
263 return 0;
264
265out:
266 end_pfn = pfn;
267 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
268 page = pfn_to_page(pfn);
269 SetPageNonWB(page);
270 }
271 return -EINVAL;
272}
273
274/*
214 * req_type typically has one of the: 275 * req_type typically has one of the:
215 * - _PAGE_CACHE_WB 276 * - _PAGE_CACHE_WB
216 * - _PAGE_CACHE_WC 277 * - _PAGE_CACHE_WC
@@ -226,14 +287,15 @@ static u64 cached_start;
226 * it will return a negative return value. 287 * it will return a negative return value.
227 */ 288 */
228int reserve_memtype(u64 start, u64 end, unsigned long req_type, 289int reserve_memtype(u64 start, u64 end, unsigned long req_type,
229 unsigned long *new_type) 290 unsigned long *new_type)
230{ 291{
231 struct memtype *new, *entry; 292 struct memtype *new, *entry;
232 unsigned long actual_type; 293 unsigned long actual_type;
233 struct list_head *where; 294 struct list_head *where;
295 int is_range_ram;
234 int err = 0; 296 int err = 0;
235 297
236 BUG_ON(start >= end); /* end is exclusive */ 298 BUG_ON(start >= end); /* end is exclusive */
237 299
238 if (!pat_enabled) { 300 if (!pat_enabled) {
239 /* This is identical to page table setting without PAT */ 301 /* This is identical to page table setting without PAT */
@@ -266,17 +328,24 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
266 actual_type = _PAGE_CACHE_WB; 328 actual_type = _PAGE_CACHE_WB;
267 else 329 else
268 actual_type = _PAGE_CACHE_UC_MINUS; 330 actual_type = _PAGE_CACHE_UC_MINUS;
269 } else 331 } else {
270 actual_type = pat_x_mtrr_type(start, end, 332 actual_type = pat_x_mtrr_type(start, end,
271 req_type & _PAGE_CACHE_MASK); 333 req_type & _PAGE_CACHE_MASK);
334 }
335
336 is_range_ram = pagerange_is_ram(start, end);
337 if (is_range_ram == 1)
338 return reserve_ram_pages_type(start, end, req_type, new_type);
339 else if (is_range_ram < 0)
340 return -EINVAL;
272 341
273 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 342 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
274 if (!new) 343 if (!new)
275 return -ENOMEM; 344 return -ENOMEM;
276 345
277 new->start = start; 346 new->start = start;
278 new->end = end; 347 new->end = end;
279 new->type = actual_type; 348 new->type = actual_type;
280 349
281 if (new_type) 350 if (new_type)
282 *new_type = actual_type; 351 *new_type = actual_type;
@@ -335,6 +404,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
335 start, end, cattr_name(new->type), cattr_name(req_type)); 404 start, end, cattr_name(new->type), cattr_name(req_type));
336 kfree(new); 405 kfree(new);
337 spin_unlock(&memtype_lock); 406 spin_unlock(&memtype_lock);
407
338 return err; 408 return err;
339 } 409 }
340 410
@@ -358,6 +428,7 @@ int free_memtype(u64 start, u64 end)
358{ 428{
359 struct memtype *entry; 429 struct memtype *entry;
360 int err = -EINVAL; 430 int err = -EINVAL;
431 int is_range_ram;
361 432
362 if (!pat_enabled) 433 if (!pat_enabled)
363 return 0; 434 return 0;
@@ -366,6 +437,12 @@ int free_memtype(u64 start, u64 end)
366 if (is_ISA_range(start, end - 1)) 437 if (is_ISA_range(start, end - 1))
367 return 0; 438 return 0;
368 439
440 is_range_ram = pagerange_is_ram(start, end);
441 if (is_range_ram == 1)
442 return free_ram_pages_type(start, end);
443 else if (is_range_ram < 0)
444 return -EINVAL;
445
369 spin_lock(&memtype_lock); 446 spin_lock(&memtype_lock);
370 list_for_each_entry(entry, &memtype_list, nd) { 447 list_for_each_entry(entry, &memtype_list, nd) {
371 if (entry->start == start && entry->end == end) { 448 if (entry->start == start && entry->end == end) {
@@ -386,6 +463,7 @@ int free_memtype(u64 start, u64 end)
386 } 463 }
387 464
388 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); 465 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
466
389 return err; 467 return err;
390} 468}
391 469
@@ -492,9 +570,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
492 570
493void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) 571void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
494{ 572{
573 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
495 u64 addr = (u64)pfn << PAGE_SHIFT; 574 u64 addr = (u64)pfn << PAGE_SHIFT;
496 unsigned long flags; 575 unsigned long flags;
497 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
498 576
499 reserve_memtype(addr, addr + size, want_flags, &flags); 577 reserve_memtype(addr, addr + size, want_flags, &flags);
500 if (flags != want_flags) { 578 if (flags != want_flags) {
@@ -514,7 +592,7 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
514 free_memtype(addr, addr + size); 592 free_memtype(addr, addr + size);
515} 593}
516 594
517#if defined(CONFIG_DEBUG_FS) 595#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
518 596
519/* get Nth element of the linked list */ 597/* get Nth element of the linked list */
520static struct memtype *memtype_get_idx(loff_t pos) 598static struct memtype *memtype_get_idx(loff_t pos)
@@ -537,6 +615,7 @@ static struct memtype *memtype_get_idx(loff_t pos)
537 } 615 }
538 spin_unlock(&memtype_lock); 616 spin_unlock(&memtype_lock);
539 kfree(print_entry); 617 kfree(print_entry);
618
540 return NULL; 619 return NULL;
541} 620}
542 621
@@ -567,6 +646,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
567 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), 646 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
568 print_entry->start, print_entry->end); 647 print_entry->start, print_entry->end);
569 kfree(print_entry); 648 kfree(print_entry);
649
570 return 0; 650 return 0;
571} 651}
572 652
@@ -598,4 +678,4 @@ static int __init pat_memtype_list_init(void)
598 678
599late_initcall(pat_memtype_list_init); 679late_initcall(pat_memtype_list_init);
600 680
601#endif /* CONFIG_DEBUG_FS */ 681#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d50302774fe2..86f2ffc43c3d 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -63,10 +63,8 @@ static inline void pgd_list_del(pgd_t *pgd)
63#define UNSHARED_PTRS_PER_PGD \ 63#define UNSHARED_PTRS_PER_PGD \
64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65 65
66static void pgd_ctor(void *p) 66static void pgd_ctor(pgd_t *pgd)
67{ 67{
68 pgd_t *pgd = p;
69
70 /* If the pgd points to a shared pagetable level (either the 68 /* If the pgd points to a shared pagetable level (either the
71 ptes in non-PAE, or shared PMD in PAE), then just copy the 69 ptes in non-PAE, or shared PMD in PAE), then just copy the
72 references from swapper_pg_dir. */ 70 references from swapper_pg_dir. */
@@ -87,7 +85,7 @@ static void pgd_ctor(void *p)
87 pgd_list_add(pgd); 85 pgd_list_add(pgd);
88} 86}
89 87
90static void pgd_dtor(void *pgd) 88static void pgd_dtor(pgd_t *pgd)
91{ 89{
92 unsigned long flags; /* can be called from interrupt context */ 90 unsigned long flags; /* can be called from interrupt context */
93 91
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index cab0abbd1ebe..0951db9ee519 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -123,7 +123,8 @@ static int __init parse_vmalloc(char *arg)
123 if (!arg) 123 if (!arg)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 __VMALLOC_RESERVE = memparse(arg, &arg); 126 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
127 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
127 return 0; 128 return 0;
128} 129}
129early_param("vmalloc", parse_vmalloc); 130early_param("vmalloc", parse_vmalloc);
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 0227694f7dab..8a5f1614a3d5 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -295,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy)
295 295
296static void nmi_shutdown(void) 296static void nmi_shutdown(void)
297{ 297{
298 struct op_msrs *msrs = &get_cpu_var(cpu_msrs); 298 struct op_msrs *msrs;
299
299 nmi_enabled = 0; 300 nmi_enabled = 0;
300 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 301 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
301 unregister_die_notifier(&profile_exceptions_nb); 302 unregister_die_notifier(&profile_exceptions_nb);
303 msrs = &get_cpu_var(cpu_msrs);
302 model->shutdown(msrs); 304 model->shutdown(msrs);
303 free_msrs(); 305 free_msrs();
304 put_cpu_var(cpu_msrs); 306 put_cpu_var(cpu_msrs);
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 56b4757a1f47..43ac5af338d8 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -10,11 +10,12 @@
10 10
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/ptrace.h>
14#include <linux/nmi.h>
13#include <asm/msr.h> 15#include <asm/msr.h>
14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
16#include <asm/apic.h> 17#include <asm/apic.h>
17#include <asm/nmi.h> 18
18 19
19#include "op_x86_model.h" 20#include "op_x86_model.h"
20#include "op_counter.h" 21#include "op_counter.h"
@@ -40,7 +41,7 @@ static unsigned int num_controls = NUM_CONTROLS_NON_HT;
40static inline void setup_num_counters(void) 41static inline void setup_num_counters(void)
41{ 42{
42#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
43 if (smp_num_siblings == 2){ 44 if (smp_num_siblings == 2) {
44 num_counters = NUM_COUNTERS_HT2; 45 num_counters = NUM_COUNTERS_HT2;
45 num_controls = NUM_CONTROLS_HT2; 46 num_controls = NUM_CONTROLS_HT2;
46 } 47 }
@@ -86,7 +87,7 @@ struct p4_event_binding {
86#define CTR_FLAME_2 (1 << 6) 87#define CTR_FLAME_2 (1 << 6)
87#define CTR_IQ_5 (1 << 7) 88#define CTR_IQ_5 (1 << 7)
88 89
89static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { 90static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
90 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, 91 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
91 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, 92 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
92 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, 93 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
@@ -97,32 +98,32 @@ static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
97 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } 98 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
98}; 99};
99 100
100#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT 101#define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
101 102
102/* p4 event codes in libop/op_event.h are indices into this table. */ 103/* p4 event codes in libop/op_event.h are indices into this table. */
103 104
104static struct p4_event_binding p4_events[NUM_EVENTS] = { 105static struct p4_event_binding p4_events[NUM_EVENTS] = {
105 106
106 { /* BRANCH_RETIRED */ 107 { /* BRANCH_RETIRED */
107 0x05, 0x06, 108 0x05, 0x06,
108 { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, 109 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
109 {CTR_IQ_5, MSR_P4_CRU_ESCR3} } 110 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
110 }, 111 },
111 112
112 { /* MISPRED_BRANCH_RETIRED */ 113 { /* MISPRED_BRANCH_RETIRED */
113 0x04, 0x03, 114 0x04, 0x03,
114 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 115 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
115 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 116 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
116 }, 117 },
117 118
118 { /* TC_DELIVER_MODE */ 119 { /* TC_DELIVER_MODE */
119 0x01, 0x01, 120 0x01, 0x01,
120 { { CTR_MS_0, MSR_P4_TC_ESCR0}, 121 { { CTR_MS_0, MSR_P4_TC_ESCR0},
121 { CTR_MS_2, MSR_P4_TC_ESCR1} } 122 { CTR_MS_2, MSR_P4_TC_ESCR1} }
122 }, 123 },
123 124
124 { /* BPU_FETCH_REQUEST */ 125 { /* BPU_FETCH_REQUEST */
125 0x00, 0x03, 126 0x00, 0x03,
126 { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, 127 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
127 { CTR_BPU_2, MSR_P4_BPU_ESCR1} } 128 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
128 }, 129 },
@@ -146,7 +147,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
146 }, 147 },
147 148
148 { /* LOAD_PORT_REPLAY */ 149 { /* LOAD_PORT_REPLAY */
149 0x02, 0x04, 150 0x02, 0x04,
150 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, 151 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
151 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } 152 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
152 }, 153 },
@@ -170,43 +171,43 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
170 }, 171 },
171 172
172 { /* BSQ_CACHE_REFERENCE */ 173 { /* BSQ_CACHE_REFERENCE */
173 0x07, 0x0c, 174 0x07, 0x0c,
174 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 175 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
175 { CTR_BPU_2, MSR_P4_BSU_ESCR1} } 176 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
176 }, 177 },
177 178
178 { /* IOQ_ALLOCATION */ 179 { /* IOQ_ALLOCATION */
179 0x06, 0x03, 180 0x06, 0x03,
180 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 181 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
181 { 0, 0 } } 182 { 0, 0 } }
182 }, 183 },
183 184
184 { /* IOQ_ACTIVE_ENTRIES */ 185 { /* IOQ_ACTIVE_ENTRIES */
185 0x06, 0x1a, 186 0x06, 0x1a,
186 { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, 187 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
187 { 0, 0 } } 188 { 0, 0 } }
188 }, 189 },
189 190
190 { /* FSB_DATA_ACTIVITY */ 191 { /* FSB_DATA_ACTIVITY */
191 0x06, 0x17, 192 0x06, 0x17,
192 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 193 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
193 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 194 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
194 }, 195 },
195 196
196 { /* BSQ_ALLOCATION */ 197 { /* BSQ_ALLOCATION */
197 0x07, 0x05, 198 0x07, 0x05,
198 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 199 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
199 { 0, 0 } } 200 { 0, 0 } }
200 }, 201 },
201 202
202 { /* BSQ_ACTIVE_ENTRIES */ 203 { /* BSQ_ACTIVE_ENTRIES */
203 0x07, 0x06, 204 0x07, 0x06,
204 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, 205 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
205 { 0, 0 } } 206 { 0, 0 } }
206 }, 207 },
207 208
208 { /* X87_ASSIST */ 209 { /* X87_ASSIST */
209 0x05, 0x03, 210 0x05, 0x03,
210 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 211 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
211 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 212 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
212 }, 213 },
@@ -216,21 +217,21 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
216 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 217 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
217 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 218 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
218 }, 219 },
219 220
220 { /* PACKED_SP_UOP */ 221 { /* PACKED_SP_UOP */
221 0x01, 0x08, 222 0x01, 0x08,
222 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 223 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
223 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 224 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
224 }, 225 },
225 226
226 { /* PACKED_DP_UOP */ 227 { /* PACKED_DP_UOP */
227 0x01, 0x0c, 228 0x01, 0x0c,
228 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 229 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
229 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 230 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
230 }, 231 },
231 232
232 { /* SCALAR_SP_UOP */ 233 { /* SCALAR_SP_UOP */
233 0x01, 0x0a, 234 0x01, 0x0a,
234 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 235 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
235 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 236 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
236 }, 237 },
@@ -242,31 +243,31 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
242 }, 243 },
243 244
244 { /* 64BIT_MMX_UOP */ 245 { /* 64BIT_MMX_UOP */
245 0x01, 0x02, 246 0x01, 0x02,
246 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 247 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
247 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 248 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
248 }, 249 },
249 250
250 { /* 128BIT_MMX_UOP */ 251 { /* 128BIT_MMX_UOP */
251 0x01, 0x1a, 252 0x01, 0x1a,
252 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 253 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
253 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 254 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
254 }, 255 },
255 256
256 { /* X87_FP_UOP */ 257 { /* X87_FP_UOP */
257 0x01, 0x04, 258 0x01, 0x04,
258 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 259 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
259 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 260 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
260 }, 261 },
261 262
262 { /* X87_SIMD_MOVES_UOP */ 263 { /* X87_SIMD_MOVES_UOP */
263 0x01, 0x2e, 264 0x01, 0x2e,
264 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 265 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
265 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 266 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
266 }, 267 },
267 268
268 { /* MACHINE_CLEAR */ 269 { /* MACHINE_CLEAR */
269 0x05, 0x02, 270 0x05, 0x02,
270 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 271 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
271 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 272 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
272 }, 273 },
@@ -276,9 +277,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
276 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 277 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
277 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 278 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
278 }, 279 },
279 280
280 { /* TC_MS_XFER */ 281 { /* TC_MS_XFER */
281 0x00, 0x05, 282 0x00, 0x05,
282 { { CTR_MS_0, MSR_P4_MS_ESCR0}, 283 { { CTR_MS_0, MSR_P4_MS_ESCR0},
283 { CTR_MS_2, MSR_P4_MS_ESCR1} } 284 { CTR_MS_2, MSR_P4_MS_ESCR1} }
284 }, 285 },
@@ -308,7 +309,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
308 }, 309 },
309 310
310 { /* INSTR_RETIRED */ 311 { /* INSTR_RETIRED */
311 0x04, 0x02, 312 0x04, 0x02,
312 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 313 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
313 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 314 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
314 }, 315 },
@@ -319,14 +320,14 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
319 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 320 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
320 }, 321 },
321 322
322 { /* UOP_TYPE */ 323 { /* UOP_TYPE */
323 0x02, 0x02, 324 0x02, 0x02,
324 { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, 325 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
325 { CTR_IQ_5, MSR_P4_RAT_ESCR1} } 326 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
326 }, 327 },
327 328
328 { /* RETIRED_MISPRED_BRANCH_TYPE */ 329 { /* RETIRED_MISPRED_BRANCH_TYPE */
329 0x02, 0x05, 330 0x02, 0x05,
330 { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, 331 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
331 { CTR_MS_2, MSR_P4_TBPU_ESCR1} } 332 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
332 }, 333 },
@@ -349,8 +350,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
349#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) 350#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
350#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) 351#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
351#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) 352#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
352#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 353#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
353#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 354#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
354 355
355#define CCCR_RESERVED_BITS 0x38030FFF 356#define CCCR_RESERVED_BITS 0x38030FFF
356#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) 357#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -360,15 +361,15 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
360#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) 361#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
361#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) 362#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
362#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) 363#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
363#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 364#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
364#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 365#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
365#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) 366#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
366#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) 367#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
367 368
368#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) 369#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
369#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) 370#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
370#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) 371#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
371#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) 372#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
372#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) 373#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
373 374
374 375
@@ -380,7 +381,7 @@ static unsigned int get_stagger(void)
380#ifdef CONFIG_SMP 381#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 382 int cpu = smp_processor_id();
382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); 383 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 384#endif
384 return 0; 385 return 0;
385} 386}
386 387
@@ -395,25 +396,23 @@ static unsigned long reset_value[NUM_COUNTERS_NON_HT];
395 396
396static void p4_fill_in_addresses(struct op_msrs * const msrs) 397static void p4_fill_in_addresses(struct op_msrs * const msrs)
397{ 398{
398 unsigned int i; 399 unsigned int i;
399 unsigned int addr, cccraddr, stag; 400 unsigned int addr, cccraddr, stag;
400 401
401 setup_num_counters(); 402 setup_num_counters();
402 stag = get_stagger(); 403 stag = get_stagger();
403 404
404 /* initialize some registers */ 405 /* initialize some registers */
405 for (i = 0; i < num_counters; ++i) { 406 for (i = 0; i < num_counters; ++i)
406 msrs->counters[i].addr = 0; 407 msrs->counters[i].addr = 0;
407 } 408 for (i = 0; i < num_controls; ++i)
408 for (i = 0; i < num_controls; ++i) {
409 msrs->controls[i].addr = 0; 409 msrs->controls[i].addr = 0;
410 } 410
411
412 /* the counter & cccr registers we pay attention to */ 411 /* the counter & cccr registers we pay attention to */
413 for (i = 0; i < num_counters; ++i) { 412 for (i = 0; i < num_counters; ++i) {
414 addr = p4_counters[VIRT_CTR(stag, i)].counter_address; 413 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
415 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; 414 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
416 if (reserve_perfctr_nmi(addr)){ 415 if (reserve_perfctr_nmi(addr)) {
417 msrs->counters[i].addr = addr; 416 msrs->counters[i].addr = addr;
418 msrs->controls[i].addr = cccraddr; 417 msrs->controls[i].addr = cccraddr;
419 } 418 }
@@ -447,22 +446,22 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
447 if (reserve_evntsel_nmi(addr)) 446 if (reserve_evntsel_nmi(addr))
448 msrs->controls[i].addr = addr; 447 msrs->controls[i].addr = addr;
449 } 448 }
450 449
451 for (addr = MSR_P4_MS_ESCR0 + stag; 450 for (addr = MSR_P4_MS_ESCR0 + stag;
452 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { 451 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
453 if (reserve_evntsel_nmi(addr)) 452 if (reserve_evntsel_nmi(addr))
454 msrs->controls[i].addr = addr; 453 msrs->controls[i].addr = addr;
455 } 454 }
456 455
457 for (addr = MSR_P4_IX_ESCR0 + stag; 456 for (addr = MSR_P4_IX_ESCR0 + stag;
458 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { 457 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
459 if (reserve_evntsel_nmi(addr)) 458 if (reserve_evntsel_nmi(addr))
460 msrs->controls[i].addr = addr; 459 msrs->controls[i].addr = addr;
461 } 460 }
462 461
463 /* there are 2 remaining non-contiguously located ESCRs */ 462 /* there are 2 remaining non-contiguously located ESCRs */
464 463
465 if (num_counters == NUM_COUNTERS_NON_HT) { 464 if (num_counters == NUM_COUNTERS_NON_HT) {
466 /* standard non-HT CPUs handle both remaining ESCRs*/ 465 /* standard non-HT CPUs handle both remaining ESCRs*/
467 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) 466 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
468 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; 467 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
@@ -498,20 +497,20 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
498 unsigned int stag; 497 unsigned int stag;
499 498
500 stag = get_stagger(); 499 stag = get_stagger();
501 500
502 /* convert from counter *number* to counter *bit* */ 501 /* convert from counter *number* to counter *bit* */
503 counter_bit = 1 << VIRT_CTR(stag, ctr); 502 counter_bit = 1 << VIRT_CTR(stag, ctr);
504 503
505 /* find our event binding structure. */ 504 /* find our event binding structure. */
506 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { 505 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
507 printk(KERN_ERR 506 printk(KERN_ERR
508 "oprofile: P4 event code 0x%lx out of range\n", 507 "oprofile: P4 event code 0x%lx out of range\n",
509 counter_config[ctr].event); 508 counter_config[ctr].event);
510 return; 509 return;
511 } 510 }
512 511
513 ev = &(p4_events[counter_config[ctr].event - 1]); 512 ev = &(p4_events[counter_config[ctr].event - 1]);
514 513
515 for (i = 0; i < maxbind; i++) { 514 for (i = 0; i < maxbind; i++) {
516 if (ev->bindings[i].virt_counter & counter_bit) { 515 if (ev->bindings[i].virt_counter & counter_bit) {
517 516
@@ -526,25 +525,24 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
526 ESCR_SET_OS_1(escr, counter_config[ctr].kernel); 525 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
527 } 526 }
528 ESCR_SET_EVENT_SELECT(escr, ev->event_select); 527 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
529 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); 528 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
530 ESCR_WRITE(escr, high, ev, i); 529 ESCR_WRITE(escr, high, ev, i);
531 530
532 /* modify CCCR */ 531 /* modify CCCR */
533 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); 532 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
534 CCCR_CLEAR(cccr); 533 CCCR_CLEAR(cccr);
535 CCCR_SET_REQUIRED_BITS(cccr); 534 CCCR_SET_REQUIRED_BITS(cccr);
536 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); 535 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
537 if (stag == 0) { 536 if (stag == 0)
538 CCCR_SET_PMI_OVF_0(cccr); 537 CCCR_SET_PMI_OVF_0(cccr);
539 } else { 538 else
540 CCCR_SET_PMI_OVF_1(cccr); 539 CCCR_SET_PMI_OVF_1(cccr);
541 }
542 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); 540 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
543 return; 541 return;
544 } 542 }
545 } 543 }
546 544
547 printk(KERN_ERR 545 printk(KERN_ERR
548 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", 546 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
549 counter_config[ctr].event, stag, ctr); 547 counter_config[ctr].event, stag, ctr);
550} 548}
@@ -559,14 +557,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
559 stag = get_stagger(); 557 stag = get_stagger();
560 558
561 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 559 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
562 if (! MISC_PMC_ENABLED_P(low)) { 560 if (!MISC_PMC_ENABLED_P(low)) {
563 printk(KERN_ERR "oprofile: P4 PMC not available\n"); 561 printk(KERN_ERR "oprofile: P4 PMC not available\n");
564 return; 562 return;
565 } 563 }
566 564
567 /* clear the cccrs we will use */ 565 /* clear the cccrs we will use */
568 for (i = 0 ; i < num_counters ; i++) { 566 for (i = 0 ; i < num_counters ; i++) {
569 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 567 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
570 continue; 568 continue;
571 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); 569 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
572 CCCR_CLEAR(low); 570 CCCR_CLEAR(low);
@@ -576,14 +574,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
576 574
577 /* clear all escrs (including those outside our concern) */ 575 /* clear all escrs (including those outside our concern) */
578 for (i = num_counters; i < num_controls; i++) { 576 for (i = num_counters; i < num_controls; i++) {
579 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 577 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
580 continue; 578 continue;
581 wrmsr(msrs->controls[i].addr, 0, 0); 579 wrmsr(msrs->controls[i].addr, 0, 0);
582 } 580 }
583 581
584 /* setup all counters */ 582 /* setup all counters */
585 for (i = 0 ; i < num_counters ; ++i) { 583 for (i = 0 ; i < num_counters ; ++i) {
586 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) { 584 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
587 reset_value[i] = counter_config[i].count; 585 reset_value[i] = counter_config[i].count;
588 pmc_setup_one_p4_counter(i); 586 pmc_setup_one_p4_counter(i);
589 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); 587 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
@@ -603,11 +601,11 @@ static int p4_check_ctrs(struct pt_regs * const regs,
603 stag = get_stagger(); 601 stag = get_stagger();
604 602
605 for (i = 0; i < num_counters; ++i) { 603 for (i = 0; i < num_counters; ++i) {
606 604
607 if (!reset_value[i]) 605 if (!reset_value[i])
608 continue; 606 continue;
609 607
610 /* 608 /*
611 * there is some eccentricity in the hardware which 609 * there is some eccentricity in the hardware which
612 * requires that we perform 2 extra corrections: 610 * requires that we perform 2 extra corrections:
613 * 611 *
@@ -616,24 +614,24 @@ static int p4_check_ctrs(struct pt_regs * const regs,
616 * 614 *
617 * - write the counter back twice to ensure it gets 615 * - write the counter back twice to ensure it gets
618 * updated properly. 616 * updated properly.
619 * 617 *
620 * the former seems to be related to extra NMIs happening 618 * the former seems to be related to extra NMIs happening
621 * during the current NMI; the latter is reported as errata 619 * during the current NMI; the latter is reported as errata
622 * N15 in intel doc 249199-029, pentium 4 specification 620 * N15 in intel doc 249199-029, pentium 4 specification
623 * update, though their suggested work-around does not 621 * update, though their suggested work-around does not
624 * appear to solve the problem. 622 * appear to solve the problem.
625 */ 623 */
626 624
627 real = VIRT_CTR(stag, i); 625 real = VIRT_CTR(stag, i);
628 626
629 CCCR_READ(low, high, real); 627 CCCR_READ(low, high, real);
630 CTR_READ(ctr, high, real); 628 CTR_READ(ctr, high, real);
631 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { 629 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
632 oprofile_add_sample(regs, i); 630 oprofile_add_sample(regs, i);
633 CTR_WRITE(reset_value[i], real); 631 CTR_WRITE(reset_value[i], real);
634 CCCR_CLEAR_OVF(low); 632 CCCR_CLEAR_OVF(low);
635 CCCR_WRITE(low, high, real); 633 CCCR_WRITE(low, high, real);
636 CTR_WRITE(reset_value[i], real); 634 CTR_WRITE(reset_value[i], real);
637 } 635 }
638 } 636 }
639 637
@@ -683,15 +681,16 @@ static void p4_shutdown(struct op_msrs const * const msrs)
683 int i; 681 int i;
684 682
685 for (i = 0 ; i < num_counters ; ++i) { 683 for (i = 0 ; i < num_counters ; ++i) {
686 if (CTR_IS_RESERVED(msrs,i)) 684 if (CTR_IS_RESERVED(msrs, i))
687 release_perfctr_nmi(msrs->counters[i].addr); 685 release_perfctr_nmi(msrs->counters[i].addr);
688 } 686 }
689 /* some of the control registers are specially reserved in 687 /*
688 * some of the control registers are specially reserved in
690 * conjunction with the counter registers (hence the starting offset). 689 * conjunction with the counter registers (hence the starting offset).
691 * This saves a few bits. 690 * This saves a few bits.
692 */ 691 */
693 for (i = num_counters ; i < num_controls ; ++i) { 692 for (i = num_counters ; i < num_controls ; ++i) {
694 if (CTRL_IS_RESERVED(msrs,i)) 693 if (CTRL_IS_RESERVED(msrs, i))
695 release_evntsel_nmi(msrs->controls[i].addr); 694 release_evntsel_nmi(msrs->controls[i].addr);
696 } 695 }
697} 696}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 6a0fca78c362..22e057665e55 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -580,7 +580,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
580 unsigned long action, void *hcpu) 580 unsigned long action, void *hcpu)
581{ 581{
582 int cpu = (long)hcpu; 582 int cpu = (long)hcpu;
583 switch(action) { 583 switch (action) {
584 case CPU_ONLINE: 584 case CPU_ONLINE:
585 case CPU_ONLINE_FROZEN: 585 case CPU_ONLINE_FROZEN:
586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); 586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 8e077185e185..006599db0dc7 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1043,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void)
1043 if (io_apic_assign_pci_irqs) { 1043 if (io_apic_assign_pci_irqs) {
1044 int irq; 1044 int irq;
1045 1045
1046 if (pin) { 1046 if (!pin)
1047 /* 1047 continue;
1048 * interrupt pins are numbered starting 1048
1049 * from 1 1049 /*
1050 */ 1050 * interrupt pins are numbered starting from 1
1051 pin--; 1051 */
1052 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, 1052 pin--;
1053 PCI_SLOT(dev->devfn), pin); 1053 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
1054 /* 1054 PCI_SLOT(dev->devfn), pin);
1055 * Busses behind bridges are typically not listed in the MP-table. 1055 /*
1056 * In this case we have to look up the IRQ based on the parent bus, 1056 * Busses behind bridges are typically not listed in the
1057 * parent slot, and pin number. The SMP code detects such bridged 1057 * MP-table. In this case we have to look up the IRQ
1058 * busses itself so we should get into this branch reliably. 1058 * based on the parent bus, parent slot, and pin number.
1059 */ 1059 * The SMP code detects such bridged busses itself so we
1060 if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ 1060 * should get into this branch reliably.
1061 struct pci_dev *bridge = dev->bus->self; 1061 */
1062 1062 if (irq < 0 && dev->bus->parent) {
1063 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 1063 /* go back to the bridge */
1064 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1064 struct pci_dev *bridge = dev->bus->self;
1065 PCI_SLOT(bridge->devfn), pin); 1065 int bus;
1066 if (irq >= 0) 1066
1067 dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", 1067 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
1068 pci_name(bridge), 1068 bus = bridge->bus->number;
1069 'A' + pin, irq); 1069 irq = IO_APIC_get_PCI_irq_vector(bus,
1070 } 1070 PCI_SLOT(bridge->devfn), pin);
1071 if (irq >= 0) { 1071 if (irq >= 0)
1072 dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); 1072 dev_warn(&dev->dev,
1073 dev->irq = irq; 1073 "using bridge %s INT %c to "
1074 } 1074 "get IRQ %d\n",
1075 pci_name(bridge),
1076 'A' + pin, irq);
1077 }
1078 if (irq >= 0) {
1079 dev_info(&dev->dev,
1080 "PCI->APIC IRQ transform: INT %c "
1081 "-> IRQ %d\n",
1082 'A' + pin, irq);
1083 dev->irq = irq;
1075 } 1084 }
1076 } 1085 }
1077#endif 1086#endif
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 4fc7e872c85e..d1e9b53f9d33 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -1,5 +1,3 @@
1.text
2
3/* 1/*
4 * This may not use any stack, nor any variable that is not "NoSave": 2 * This may not use any stack, nor any variable that is not "NoSave":
5 * 3 *
@@ -12,17 +10,18 @@
12#include <asm/segment.h> 10#include <asm/segment.h>
13#include <asm/page.h> 11#include <asm/page.h>
14#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
13#include <asm/processor-flags.h>
15 14
16 .text 15.text
17 16
18ENTRY(swsusp_arch_suspend) 17ENTRY(swsusp_arch_suspend)
19
20 movl %esp, saved_context_esp 18 movl %esp, saved_context_esp
21 movl %ebx, saved_context_ebx 19 movl %ebx, saved_context_ebx
22 movl %ebp, saved_context_ebp 20 movl %ebp, saved_context_ebp
23 movl %esi, saved_context_esi 21 movl %esi, saved_context_esi
24 movl %edi, saved_context_edi 22 movl %edi, saved_context_edi
25 pushfl ; popl saved_context_eflags 23 pushfl
24 popl saved_context_eflags
26 25
27 call swsusp_save 26 call swsusp_save
28 ret 27 ret
@@ -59,7 +58,7 @@ done:
59 movl mmu_cr4_features, %ecx 58 movl mmu_cr4_features, %ecx
60 jecxz 1f # cr4 Pentium and higher, skip if zero 59 jecxz 1f # cr4 Pentium and higher, skip if zero
61 movl %ecx, %edx 60 movl %ecx, %edx
62 andl $~(1<<7), %edx; # PGE 61 andl $~(X86_CR4_PGE), %edx
63 movl %edx, %cr4; # turn off PGE 62 movl %edx, %cr4; # turn off PGE
641: 631:
65 movl %cr3, %eax; # flush TLB 64 movl %cr3, %eax; # flush TLB
@@ -74,7 +73,8 @@ done:
74 movl saved_context_esi, %esi 73 movl saved_context_esi, %esi
75 movl saved_context_edi, %edi 74 movl saved_context_edi, %edi
76 75
77 pushl saved_context_eflags ; popfl 76 pushl saved_context_eflags
77 popfl
78 78
79 xorl %eax, %eax 79 xorl %eax, %eax
80 80
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7b3508952b9c..a27d562a9744 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -844,7 +844,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
844 844
845/* Early in boot, while setting up the initial pagetable, assume 845/* Early in boot, while setting up the initial pagetable, assume
846 everything is pinned. */ 846 everything is pinned. */
847static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) 847static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
848{ 848{
849#ifdef CONFIG_FLATMEM 849#ifdef CONFIG_FLATMEM
850 BUG_ON(mem_map); /* should only be used early */ 850 BUG_ON(mem_map); /* should only be used early */
@@ -854,7 +854,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
854 854
855/* Early release_pte assumes that all pts are pinned, since there's 855/* Early release_pte assumes that all pts are pinned, since there's
856 only init_mm and anything attached to that is pinned. */ 856 only init_mm and anything attached to that is pinned. */
857static void xen_release_pte_init(u32 pfn) 857static void xen_release_pte_init(unsigned long pfn)
858{ 858{
859 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 859 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
860} 860}
@@ -870,7 +870,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
870 870
871/* This needs to make sure the new pte page is pinned iff its being 871/* This needs to make sure the new pte page is pinned iff its being
872 attached to a pinned pagetable. */ 872 attached to a pinned pagetable. */
873static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) 873static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
874{ 874{
875 struct page *page = pfn_to_page(pfn); 875 struct page *page = pfn_to_page(pfn);
876 876
@@ -888,12 +888,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
888 } 888 }
889} 889}
890 890
891static void xen_alloc_pte(struct mm_struct *mm, u32 pfn) 891static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
892{ 892{
893 xen_alloc_ptpage(mm, pfn, PT_PTE); 893 xen_alloc_ptpage(mm, pfn, PT_PTE);
894} 894}
895 895
896static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn) 896static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
897{ 897{
898 xen_alloc_ptpage(mm, pfn, PT_PMD); 898 xen_alloc_ptpage(mm, pfn, PT_PMD);
899} 899}
@@ -941,7 +941,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
941} 941}
942 942
943/* This should never happen until we're OK to use struct page */ 943/* This should never happen until we're OK to use struct page */
944static void xen_release_ptpage(u32 pfn, unsigned level) 944static void xen_release_ptpage(unsigned long pfn, unsigned level)
945{ 945{
946 struct page *page = pfn_to_page(pfn); 946 struct page *page = pfn_to_page(pfn);
947 947
@@ -955,23 +955,23 @@ static void xen_release_ptpage(u32 pfn, unsigned level)
955 } 955 }
956} 956}
957 957
958static void xen_release_pte(u32 pfn) 958static void xen_release_pte(unsigned long pfn)
959{ 959{
960 xen_release_ptpage(pfn, PT_PTE); 960 xen_release_ptpage(pfn, PT_PTE);
961} 961}
962 962
963static void xen_release_pmd(u32 pfn) 963static void xen_release_pmd(unsigned long pfn)
964{ 964{
965 xen_release_ptpage(pfn, PT_PMD); 965 xen_release_ptpage(pfn, PT_PMD);
966} 966}
967 967
968#if PAGETABLE_LEVELS == 4 968#if PAGETABLE_LEVELS == 4
969static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) 969static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
970{ 970{
971 xen_alloc_ptpage(mm, pfn, PT_PUD); 971 xen_alloc_ptpage(mm, pfn, PT_PUD);
972} 972}
973 973
974static void xen_release_pud(u32 pfn) 974static void xen_release_pud(unsigned long pfn)
975{ 975{
976 xen_release_ptpage(pfn, PT_PUD); 976 xen_release_ptpage(pfn, PT_PUD);
977} 977}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b6acc3a0af46..d67901083888 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -42,7 +42,7 @@ char * __init xen_memory_setup(void)
42 42
43 e820.nr_map = 0; 43 e820.nr_map = 0;
44 44
45 e820_add_region(0, PFN_PHYS(max_pfn), E820_RAM); 45 e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
46 46
47 /* 47 /*
48 * Even though this is normal, usable memory under Xen, reserve 48 * Even though this is normal, usable memory under Xen, reserve