aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-09-22 07:08:57 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-22 07:08:57 -0400
commit0b88641f1bafdbd087d5e63987a30cc0eadd63b9 (patch)
tree81dcf756db373444140bb2623584710c628e3048 /arch/x86
parentfbdbf709938d155c719c76b9894d28342632c797 (diff)
parent72d31053f62c4bc464c2783974926969614a8649 (diff)
Merge commit 'v2.6.27-rc7' into x86/debug
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig72
-rw-r--r--arch/x86/Kconfig.cpu13
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/boot.h10
-rw-r--r--arch/x86/boot/compressed/misc.c39
-rw-r--r--arch/x86/boot/cpu.c3
-rw-r--r--arch/x86/boot/cpucheck.c18
-rw-r--r--arch/x86/boot/main.c5
-rw-r--r--arch/x86/boot/memory.c1
-rw-r--r--arch/x86/configs/i386_defconfig303
-rw-r--r--arch/x86/configs/x86_64_defconfig258
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/ia32/ia32entry.S12
-rw-r--r--arch/x86/ia32/sys_ia32.c2
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/acpi/sleep.c8
-rw-r--r--arch/x86/kernel/alternative.c36
-rw-r--r--arch/x86/kernel/amd_iommu.c38
-rw-r--r--arch/x86/kernel/amd_iommu_init.c28
-rw-r--r--arch/x86/kernel/apic_32.c22
-rw-r--r--arch/x86/kernel/apic_64.c7
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c17
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/bugs.c6
-rw-r--r--arch/x86/kernel/cpu/centaur.c11
-rw-r--r--arch/x86/kernel/cpu/common.c18
-rw-r--r--arch/x86/kernel/cpu/common_64.c74
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c10
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c18
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c12
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c50
-rw-r--r--arch/x86/kernel/cpu/feature_names.c3
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c18
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c20
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c5
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c8
-rw-r--r--arch/x86/kernel/cpuid.c15
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/efi_32.c4
-rw-r--r--arch/x86/kernel/genapic_64.c1
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c10
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/hpet.c53
-rw-r--r--arch/x86/kernel/io_apic_32.c6
-rw-r--r--arch/x86/kernel/io_apic_64.c25
-rw-r--r--arch/x86/kernel/io_delay.c8
-rw-r--r--arch/x86/kernel/irqinit_64.c5
-rw-r--r--arch/x86/kernel/kdebugfs.c1
-rw-r--r--arch/x86/kernel/kprobes.c6
-rw-r--r--arch/x86/kernel/ldt.c6
-rw-r--r--arch/x86/kernel/machine_kexec_32.c57
-rw-r--r--arch/x86/kernel/machine_kexec_64.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c52
-rw-r--r--arch/x86/kernel/microcode.c17
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/module_64.c1
-rw-r--r--arch/x86/kernel/mpparse.c17
-rw-r--r--arch/x86/kernel/msr.c40
-rw-r--r--arch/x86/kernel/nmi.c28
-rw-r--r--arch/x86/kernel/numaq_32.c2
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c172
-rw-r--r--arch/x86/kernel/pci-dma.c157
-rw-r--r--arch/x86/kernel/pci-gart_64.c14
-rw-r--r--arch/x86/kernel/pci-nommu.c14
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
-rw-r--r--arch/x86/kernel/process_32.c7
-rw-r--r--arch/x86/kernel/process_64.c7
-rw-r--r--arch/x86/kernel/reboot.c11
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S178
-rw-r--r--arch/x86/kernel/setup.c47
-rw-r--r--arch/x86/kernel/setup_percpu.c21
-rw-r--r--arch/x86/kernel/signal_64.c62
-rw-r--r--arch/x86/kernel/smpboot.c68
-rw-r--r--arch/x86/kernel/smpcommon.c17
-rw-r--r--arch/x86/kernel/syscall_table_32.S6
-rw-r--r--arch/x86/kernel/tlb_uv.c3
-rw-r--r--arch/x86/kernel/traps_64.c9
-rw-r--r--arch/x86/kernel/tsc.c242
-rw-r--r--arch/x86/kernel/tsc_sync.c6
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmi_32.c3
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S8
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/mmu.c111
-rw-r--r--arch/x86/kvm/paging_tmpl.h14
-rw-r--r--arch/x86/kvm/svm.c22
-rw-r--r--arch/x86/kvm/vmx.c21
-rw-r--r--arch/x86/kvm/vmx.h2
-rw-r--r--arch/x86/kvm/x86.c133
-rw-r--r--arch/x86/lguest/boot.c3
-rw-r--r--arch/x86/lib/copy_user_64.S2
-rw-r--r--arch/x86/lib/copy_user_nocache_64.S3
-rw-r--r--arch/x86/lib/msr-on-cpu.c22
-rw-r--r--arch/x86/mach-rdc321x/platform.c1
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/discontig_32.c3
-rw-r--r--arch/x86/mm/gup.c298
-rw-r--r--arch/x86/mm/hugetlbpage.c78
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c85
-rw-r--r--arch/x86/mm/ioremap.c18
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/mm/pageattr-test.c3
-rw-r--r--arch/x86/mm/pageattr.c27
-rw-r--r--arch/x86/mm/pat.c50
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/mm/pgtable_32.c47
-rw-r--r--arch/x86/mm/srat_32.c12
-rw-r--r--arch/x86/oprofile/nmi_int.c75
-rw-r--r--arch/x86/pci/amd_bus.c52
-rw-r--r--arch/x86/pci/fixup.c3
-rw-r--r--arch/x86/pci/i386.c24
-rw-r--r--arch/x86/pci/irq.c108
-rw-r--r--arch/x86/pci/legacy.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c67
-rw-r--r--arch/x86/pci/numaq_32.c5
-rw-r--r--arch/x86/power/cpu_32.c6
-rw-r--r--arch/x86/power/hibernate_asm_32.S26
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/x86/xen/xen-asm_64.S2
131 files changed, 2563 insertions, 1417 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 03980cb04291..ed92864d1325 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,12 +21,16 @@ config X86
21 select HAVE_UNSTABLE_SCHED_CLOCK 21 select HAVE_UNSTABLE_SCHED_CLOCK
22 select HAVE_IDE 22 select HAVE_IDE
23 select HAVE_OPROFILE 23 select HAVE_OPROFILE
24 select HAVE_IOREMAP_PROT
24 select HAVE_KPROBES 25 select HAVE_KPROBES
26 select ARCH_WANT_OPTIONAL_GPIOLIB
25 select HAVE_KRETPROBES 27 select HAVE_KRETPROBES
26 select HAVE_DYNAMIC_FTRACE 28 select HAVE_DYNAMIC_FTRACE
27 select HAVE_FTRACE 29 select HAVE_FTRACE
28 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
29 select HAVE_ARCH_KGDB if !X86_VOYAGER 31 select HAVE_ARCH_KGDB if !X86_VOYAGER
32 select HAVE_GENERIC_DMA_COHERENT if X86_32
33 select HAVE_EFFICIENT_UNALIGNED_ACCESS
30 34
31config ARCH_DEFCONFIG 35config ARCH_DEFCONFIG
32 string 36 string
@@ -329,20 +333,6 @@ config X86_BIGSMP
329 333
330endif 334endif
331 335
332config X86_RDC321X
333 bool "RDC R-321x SoC"
334 depends on X86_32
335 select M486
336 select X86_REBOOTFIXUPS
337 select GENERIC_GPIO
338 select LEDS_CLASS
339 select LEDS_GPIO
340 select NEW_LEDS
341 help
342 This option is needed for RDC R-321x system-on-chip, also known
343 as R-8610-(G).
344 If you don't have one of these chips, you should say N here.
345
346config X86_VSMP 336config X86_VSMP
347 bool "Support for ScaleMP vSMP" 337 bool "Support for ScaleMP vSMP"
348 select PARAVIRT 338 select PARAVIRT
@@ -366,6 +356,16 @@ config X86_VISWS
366 A kernel compiled for the Visual Workstation will run on general 356 A kernel compiled for the Visual Workstation will run on general
367 PCs as well. See <file:Documentation/sgi-visws.txt> for details. 357 PCs as well. See <file:Documentation/sgi-visws.txt> for details.
368 358
359config X86_RDC321X
360 bool "RDC R-321x SoC"
361 depends on X86_32
362 select M486
363 select X86_REBOOTFIXUPS
364 help
365 This option is needed for RDC R-321x system-on-chip, also known
366 as R-8610-(G).
367 If you don't have one of these chips, you should say N here.
368
369config SCHED_NO_NO_OMIT_FRAME_POINTER 369config SCHED_NO_NO_OMIT_FRAME_POINTER
370 def_bool y 370 def_bool y
371 prompt "Single-depth WCHAN output" 371 prompt "Single-depth WCHAN output"
@@ -577,35 +577,29 @@ config SWIOTLB
577 577
578config IOMMU_HELPER 578config IOMMU_HELPER
579 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) 579 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
580
580config MAXSMP 581config MAXSMP
581 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 582 bool "Configure Maximum number of SMP Processors and NUMA Nodes"
582 depends on X86_64 && SMP 583 depends on X86_64 && SMP && BROKEN
583 default n 584 default n
584 help 585 help
585 Configure maximum number of CPUS and NUMA Nodes for this architecture. 586 Configure maximum number of CPUS and NUMA Nodes for this architecture.
586 If unsure, say N. 587 If unsure, say N.
587 588
588if MAXSMP
589config NR_CPUS 589config NR_CPUS
590 int 590 int "Maximum number of CPUs (2-512)" if !MAXSMP
591 default "4096" 591 range 2 512
592endif
593
594if !MAXSMP
595config NR_CPUS
596 int "Maximum number of CPUs (2-4096)"
597 range 2 4096
598 depends on SMP 592 depends on SMP
593 default "4096" if MAXSMP
599 default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 594 default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
600 default "8" 595 default "8"
601 help 596 help
602 This allows you to specify the maximum number of CPUs which this 597 This allows you to specify the maximum number of CPUs which this
603 kernel will support. The maximum supported value is 4096 and the 598 kernel will support. The maximum supported value is 512 and the
604 minimum value which makes sense is 2. 599 minimum value which makes sense is 2.
605 600
606 This is purely to save memory - each supported CPU adds 601 This is purely to save memory - each supported CPU adds
607 approximately eight kilobytes to the kernel image. 602 approximately eight kilobytes to the kernel image.
608endif
609 603
610config SCHED_SMT 604config SCHED_SMT
611 bool "SMT (Hyperthreading) scheduler support" 605 bool "SMT (Hyperthreading) scheduler support"
@@ -951,9 +945,9 @@ config NUMA
951 local memory controller of the CPU and add some more 945 local memory controller of the CPU and add some more
952 NUMA awareness to the kernel. 946 NUMA awareness to the kernel.
953 947
954 For i386 this is currently highly experimental and should be only 948 For 32-bit this is currently highly experimental and should be only
955 used for kernel development. It might also cause boot failures. 949 used for kernel development. It might also cause boot failures.
956 For x86_64 this is recommended on all multiprocessor Opteron systems. 950 For 64-bit this is recommended on all multiprocessor Opteron systems.
957 If the system is EM64T, you should say N unless your system is 951 If the system is EM64T, you should say N unless your system is
958 EM64T NUMA. 952 EM64T NUMA.
959 953
@@ -996,17 +990,10 @@ config NUMA_EMU
996 into virtual nodes when booted with "numa=fake=N", where N is the 990 into virtual nodes when booted with "numa=fake=N", where N is the
997 number of nodes. This is only useful for debugging. 991 number of nodes. This is only useful for debugging.
998 992
999if MAXSMP
1000
1001config NODES_SHIFT
1002 int
1003 default "9"
1004endif
1005
1006if !MAXSMP
1007config NODES_SHIFT 993config NODES_SHIFT
1008 int "Maximum NUMA Nodes (as a power of 2)" 994 int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
1009 range 1 9 if X86_64 995 range 1 9 if X86_64
996 default "9" if MAXSMP
1010 default "6" if X86_64 997 default "6" if X86_64
1011 default "4" if X86_NUMAQ 998 default "4" if X86_NUMAQ
1012 default "3" 999 default "3"
@@ -1014,7 +1001,6 @@ config NODES_SHIFT
1014 help 1001 help
1015 Specify the maximum number of NUMA Nodes available on the target 1002 Specify the maximum number of NUMA Nodes available on the target
1016 system. Increases memory reserved to accomodate various tables. 1003 system. Increases memory reserved to accomodate various tables.
1017endif
1018 1004
1019config HAVE_ARCH_BOOTMEM_NODE 1005config HAVE_ARCH_BOOTMEM_NODE
1020 def_bool y 1006 def_bool y
@@ -1263,7 +1249,7 @@ config KEXEC
1263 strongly in flux, so no good recommendation can be made. 1249 strongly in flux, so no good recommendation can be made.
1264 1250
1265config CRASH_DUMP 1251config CRASH_DUMP
1266 bool "kernel crash dumps (EXPERIMENTAL)" 1252 bool "kernel crash dumps"
1267 depends on X86_64 || (X86_32 && HIGHMEM) 1253 depends on X86_64 || (X86_32 && HIGHMEM)
1268 help 1254 help
1269 Generate crash dump after being started by kexec. 1255 Generate crash dump after being started by kexec.
@@ -1276,6 +1262,14 @@ config CRASH_DUMP
1276 (CONFIG_RELOCATABLE=y). 1262 (CONFIG_RELOCATABLE=y).
1277 For more details see Documentation/kdump/kdump.txt 1263 For more details see Documentation/kdump/kdump.txt
1278 1264
1265config KEXEC_JUMP
1266 bool "kexec jump (EXPERIMENTAL)"
1267 depends on EXPERIMENTAL
1268 depends on KEXEC && HIBERNATION && X86_32
1269 help
1270 Jump between original kernel and kexeced kernel and invoke
1271 code in physical address mode via KEXEC
1272
1279config PHYSICAL_START 1273config PHYSICAL_START
1280 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 1274 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
1281 default "0x1000000" if X86_NUMAQ 1275 default "0x1000000" if X86_NUMAQ
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 2c518fbc52ec..b225219c448c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -382,14 +382,17 @@ config X86_OOSTORE
382# P6_NOPs are a relatively minor optimization that require a family >= 382# P6_NOPs are a relatively minor optimization that require a family >=
383# 6 processor, except that it is broken on certain VIA chips. 383# 6 processor, except that it is broken on certain VIA chips.
384# Furthermore, AMD chips prefer a totally different sequence of NOPs 384# Furthermore, AMD chips prefer a totally different sequence of NOPs
385# (which work on all CPUs). As a result, disallow these if we're 385# (which work on all CPUs). In addition, it looks like Virtual PC
386# compiling X86_GENERIC but not X86_64 (these NOPs do work on all 386# does not understand them.
387# x86-64 capable chips); the list of processors in the right-hand clause 387#
388# are the cores that benefit from this optimization. 388# As a result, disallow these if we're not compiling for X86_64 (these
389# NOPs do work on all x86-64 capable chips); the list of processors in
390# the right-hand clause are the cores that benefit from this optimization.
389# 391#
390config X86_P6_NOP 392config X86_P6_NOP
391 def_bool y 393 def_bool y
392 depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC) 394 depends on X86_64
395 depends on (MCORE2 || MPENTIUM4 || MPSC)
393 396
394config X86_TSC 397config X86_TSC
395 def_bool y 398 def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 919ce21ea654..f5631da585b6 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -118,11 +118,6 @@ mflags-$(CONFIG_X86_GENERICARCH):= -Iinclude/asm-x86/mach-generic
118fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/ 118fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
119mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/ 119mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
120 120
121# RDC R-321x subarch support
122mflags-$(CONFIG_X86_RDC321X) := -Iinclude/asm-x86/mach-rdc321x
123mcore-$(CONFIG_X86_RDC321X) := arch/x86/mach-default/
124core-$(CONFIG_X86_RDC321X) += arch/x86/mach-rdc321x/
125
126# default subarch .h files 121# default subarch .h files
127mflags-y += -Iinclude/asm-x86/mach-default 122mflags-y += -Iinclude/asm-x86/mach-default
128 123
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index a34b9982c7cb..cc0ef13fba7a 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -24,10 +24,14 @@
24#include <linux/edd.h> 24#include <linux/edd.h>
25#include <asm/boot.h> 25#include <asm/boot.h>
26#include <asm/setup.h> 26#include <asm/setup.h>
27#include "bitops.h"
28#include <asm/cpufeature.h>
27 29
28/* Useful macros */ 30/* Useful macros */
29#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 31#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
30 32
33#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
34
31extern struct setup_header hdr; 35extern struct setup_header hdr;
32extern struct boot_params boot_params; 36extern struct boot_params boot_params;
33 37
@@ -242,6 +246,12 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize);
242int cmdline_find_option_bool(const char *option); 246int cmdline_find_option_bool(const char *option);
243 247
244/* cpu.c, cpucheck.c */ 248/* cpu.c, cpucheck.c */
249struct cpu_features {
250 int level; /* Family, or 64 for x86-64 */
251 int model;
252 u32 flags[NCAPINTS];
253};
254extern struct cpu_features cpu;
245int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); 255int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
246int validate_cpu(void); 256int validate_cpu(void);
247 257
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index bc5553b496f7..9fea73706479 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -182,8 +182,6 @@ static unsigned outcnt;
182static int fill_inbuf(void); 182static int fill_inbuf(void);
183static void flush_window(void); 183static void flush_window(void);
184static void error(char *m); 184static void error(char *m);
185static void gzip_mark(void **);
186static void gzip_release(void **);
187 185
188/* 186/*
189 * This is set up by the setup-routine at boot-time 187 * This is set up by the setup-routine at boot-time
@@ -196,9 +194,6 @@ extern int input_len;
196 194
197static long bytes_out; 195static long bytes_out;
198 196
199static void *malloc(int size);
200static void free(void *where);
201
202static void *memset(void *s, int c, unsigned n); 197static void *memset(void *s, int c, unsigned n);
203static void *memcpy(void *dest, const void *src, unsigned n); 198static void *memcpy(void *dest, const void *src, unsigned n);
204 199
@@ -220,40 +215,6 @@ static int lines, cols;
220 215
221#include "../../../../lib/inflate.c" 216#include "../../../../lib/inflate.c"
222 217
223static void *malloc(int size)
224{
225 void *p;
226
227 if (size < 0)
228 error("Malloc error");
229 if (free_mem_ptr <= 0)
230 error("Memory error");
231
232 free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
233
234 p = (void *)free_mem_ptr;
235 free_mem_ptr += size;
236
237 if (free_mem_ptr >= free_mem_end_ptr)
238 error("Out of memory");
239
240 return p;
241}
242
243static void free(void *where)
244{ /* Don't care */
245}
246
247static void gzip_mark(void **ptr)
248{
249 *ptr = (void *) free_mem_ptr;
250}
251
252static void gzip_release(void **ptr)
253{
254 free_mem_ptr = (memptr) *ptr;
255}
256
257static void scroll(void) 218static void scroll(void)
258{ 219{
259 int i; 220 int i;
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 92d6fd73dc7d..75298fe2edca 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -16,9 +16,6 @@
16 */ 16 */
17 17
18#include "boot.h" 18#include "boot.h"
19#include "bitops.h"
20#include <asm/cpufeature.h>
21
22#include "cpustr.h" 19#include "cpustr.h"
23 20
24static char *cpu_name(int level) 21static char *cpu_name(int level)
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 7804389ee005..4d3ff037201f 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -22,21 +22,13 @@
22 22
23#ifdef _SETUP 23#ifdef _SETUP
24# include "boot.h" 24# include "boot.h"
25# include "bitops.h"
26#endif 25#endif
27#include <linux/types.h> 26#include <linux/types.h>
28#include <asm/cpufeature.h>
29#include <asm/processor-flags.h> 27#include <asm/processor-flags.h>
30#include <asm/required-features.h> 28#include <asm/required-features.h>
31#include <asm/msr-index.h> 29#include <asm/msr-index.h>
32 30
33struct cpu_features { 31struct cpu_features cpu;
34 int level; /* Family, or 64 for x86-64 */
35 int model;
36 u32 flags[NCAPINTS];
37};
38
39static struct cpu_features cpu;
40static u32 cpu_vendor[3]; 32static u32 cpu_vendor[3];
41static u32 err_flags[NCAPINTS]; 33static u32 err_flags[NCAPINTS];
42 34
@@ -46,12 +38,12 @@ static const u32 req_flags[NCAPINTS] =
46{ 38{
47 REQUIRED_MASK0, 39 REQUIRED_MASK0,
48 REQUIRED_MASK1, 40 REQUIRED_MASK1,
49 REQUIRED_MASK2, 41 0, /* REQUIRED_MASK2 not implemented in this file */
50 REQUIRED_MASK3, 42 0, /* REQUIRED_MASK3 not implemented in this file */
51 REQUIRED_MASK4, 43 REQUIRED_MASK4,
52 REQUIRED_MASK5, 44 0, /* REQUIRED_MASK5 not implemented in this file */
53 REQUIRED_MASK6, 45 REQUIRED_MASK6,
54 REQUIRED_MASK7, 46 0, /* REQUIRED_MASK7 not implemented in this file */
55}; 47};
56 48
57#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) 49#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index 2296164b54d2..197421db1af1 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -73,6 +73,11 @@ static void keyboard_set_repeat(void)
73 */ 73 */
74static void query_ist(void) 74static void query_ist(void)
75{ 75{
76 /* Some older BIOSes apparently crash on this call, so filter
77 it from machines too old to have SpeedStep at all. */
78 if (cpu.level < 6)
79 return;
80
76 asm("int $0x15" 81 asm("int $0x15"
77 : "=a" (boot_params.ist_info.signature), 82 : "=a" (boot_params.ist_info.signature),
78 "=b" (boot_params.ist_info.command), 83 "=b" (boot_params.ist_info.command),
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index 53165c97336b..8c3c25f35578 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -13,7 +13,6 @@
13 */ 13 */
14 14
15#include "boot.h" 15#include "boot.h"
16#include <linux/kernel.h>
17 16
18#define SMAP 0x534d4150 /* ASCII "SMAP" */ 17#define SMAP 0x534d4150 /* ASCII "SMAP" */
19 18
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 4d73f53287b6..104275e191a8 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,13 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1 3# Linux kernel version: 2.6.27-rc4
4# Sun May 4 19:59:02 2008 4# Mon Aug 25 15:04:00 2008
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set 8# CONFIG_X86_64 is not set
9CONFIG_X86=y 9CONFIG_X86=y
10CONFIG_DEFCONFIG_LIST="arch/x86/configs/i386_defconfig" 10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11# CONFIG_GENERIC_LOCKBREAK is not set 11# CONFIG_GENERIC_LOCKBREAK is not set
12CONFIG_GENERIC_TIME=y 12CONFIG_GENERIC_TIME=y
13CONFIG_GENERIC_CMOS_UPDATE=y 13CONFIG_GENERIC_CMOS_UPDATE=y
@@ -53,6 +53,7 @@ CONFIG_X86_HT=y
53CONFIG_X86_BIOS_REBOOT=y 53CONFIG_X86_BIOS_REBOOT=y
54CONFIG_X86_TRAMPOLINE=y 54CONFIG_X86_TRAMPOLINE=y
55CONFIG_KTIME_SCALAR=y 55CONFIG_KTIME_SCALAR=y
56CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
56 57
57# 58#
58# General setup 59# General setup
@@ -82,6 +83,7 @@ CONFIG_CGROUPS=y
82CONFIG_CGROUP_NS=y 83CONFIG_CGROUP_NS=y
83# CONFIG_CGROUP_DEVICE is not set 84# CONFIG_CGROUP_DEVICE is not set
84CONFIG_CPUSETS=y 85CONFIG_CPUSETS=y
86CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
85CONFIG_GROUP_SCHED=y 87CONFIG_GROUP_SCHED=y
86CONFIG_FAIR_GROUP_SCHED=y 88CONFIG_FAIR_GROUP_SCHED=y
87# CONFIG_RT_GROUP_SCHED is not set 89# CONFIG_RT_GROUP_SCHED is not set
@@ -105,7 +107,6 @@ CONFIG_SYSCTL=y
105# CONFIG_EMBEDDED is not set 107# CONFIG_EMBEDDED is not set
106CONFIG_UID16=y 108CONFIG_UID16=y
107CONFIG_SYSCTL_SYSCALL=y 109CONFIG_SYSCTL_SYSCALL=y
108CONFIG_SYSCTL_SYSCALL_CHECK=y
109CONFIG_KALLSYMS=y 110CONFIG_KALLSYMS=y
110CONFIG_KALLSYMS_ALL=y 111CONFIG_KALLSYMS_ALL=y
111CONFIG_KALLSYMS_EXTRA_PASS=y 112CONFIG_KALLSYMS_EXTRA_PASS=y
@@ -113,6 +114,7 @@ CONFIG_HOTPLUG=y
113CONFIG_PRINTK=y 114CONFIG_PRINTK=y
114CONFIG_BUG=y 115CONFIG_BUG=y
115CONFIG_ELF_CORE=y 116CONFIG_ELF_CORE=y
117CONFIG_PCSPKR_PLATFORM=y
116# CONFIG_COMPAT_BRK is not set 118# CONFIG_COMPAT_BRK is not set
117CONFIG_BASE_FULL=y 119CONFIG_BASE_FULL=y
118CONFIG_FUTEX=y 120CONFIG_FUTEX=y
@@ -132,27 +134,35 @@ CONFIG_MARKERS=y
132# CONFIG_OPROFILE is not set 134# CONFIG_OPROFILE is not set
133CONFIG_HAVE_OPROFILE=y 135CONFIG_HAVE_OPROFILE=y
134CONFIG_KPROBES=y 136CONFIG_KPROBES=y
137CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
135CONFIG_KRETPROBES=y 138CONFIG_KRETPROBES=y
139CONFIG_HAVE_IOREMAP_PROT=y
136CONFIG_HAVE_KPROBES=y 140CONFIG_HAVE_KPROBES=y
137CONFIG_HAVE_KRETPROBES=y 141CONFIG_HAVE_KRETPROBES=y
142# CONFIG_HAVE_ARCH_TRACEHOOK is not set
138# CONFIG_HAVE_DMA_ATTRS is not set 143# CONFIG_HAVE_DMA_ATTRS is not set
144CONFIG_USE_GENERIC_SMP_HELPERS=y
145# CONFIG_HAVE_CLK is not set
139CONFIG_PROC_PAGE_MONITOR=y 146CONFIG_PROC_PAGE_MONITOR=y
147CONFIG_HAVE_GENERIC_DMA_COHERENT=y
140CONFIG_SLABINFO=y 148CONFIG_SLABINFO=y
141CONFIG_RT_MUTEXES=y 149CONFIG_RT_MUTEXES=y
142# CONFIG_TINY_SHMEM is not set 150# CONFIG_TINY_SHMEM is not set
143CONFIG_BASE_SMALL=0 151CONFIG_BASE_SMALL=0
144CONFIG_MODULES=y 152CONFIG_MODULES=y
153# CONFIG_MODULE_FORCE_LOAD is not set
145CONFIG_MODULE_UNLOAD=y 154CONFIG_MODULE_UNLOAD=y
146CONFIG_MODULE_FORCE_UNLOAD=y 155CONFIG_MODULE_FORCE_UNLOAD=y
147# CONFIG_MODVERSIONS is not set 156# CONFIG_MODVERSIONS is not set
148# CONFIG_MODULE_SRCVERSION_ALL is not set 157# CONFIG_MODULE_SRCVERSION_ALL is not set
149# CONFIG_KMOD is not set 158CONFIG_KMOD=y
150CONFIG_STOP_MACHINE=y 159CONFIG_STOP_MACHINE=y
151CONFIG_BLOCK=y 160CONFIG_BLOCK=y
152# CONFIG_LBD is not set 161# CONFIG_LBD is not set
153CONFIG_BLK_DEV_IO_TRACE=y 162CONFIG_BLK_DEV_IO_TRACE=y
154# CONFIG_LSF is not set 163# CONFIG_LSF is not set
155CONFIG_BLK_DEV_BSG=y 164CONFIG_BLK_DEV_BSG=y
165# CONFIG_BLK_DEV_INTEGRITY is not set
156 166
157# 167#
158# IO Schedulers 168# IO Schedulers
@@ -176,19 +186,17 @@ CONFIG_NO_HZ=y
176CONFIG_HIGH_RES_TIMERS=y 186CONFIG_HIGH_RES_TIMERS=y
177CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 187CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
178CONFIG_SMP=y 188CONFIG_SMP=y
189CONFIG_X86_FIND_SMP_CONFIG=y
190CONFIG_X86_MPPARSE=y
179CONFIG_X86_PC=y 191CONFIG_X86_PC=y
180# CONFIG_X86_ELAN is not set 192# CONFIG_X86_ELAN is not set
181# CONFIG_X86_VOYAGER is not set 193# CONFIG_X86_VOYAGER is not set
182# CONFIG_X86_NUMAQ is not set
183# CONFIG_X86_SUMMIT is not set
184# CONFIG_X86_BIGSMP is not set
185# CONFIG_X86_VISWS is not set
186# CONFIG_X86_GENERICARCH is not set 194# CONFIG_X86_GENERICARCH is not set
187# CONFIG_X86_ES7000 is not set
188# CONFIG_X86_RDC321X is not set
189# CONFIG_X86_VSMP is not set 195# CONFIG_X86_VSMP is not set
196# CONFIG_X86_RDC321X is not set
190CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 197CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
191# CONFIG_PARAVIRT_GUEST is not set 198# CONFIG_PARAVIRT_GUEST is not set
199# CONFIG_MEMTEST is not set
192# CONFIG_M386 is not set 200# CONFIG_M386 is not set
193# CONFIG_M486 is not set 201# CONFIG_M486 is not set
194# CONFIG_M586 is not set 202# CONFIG_M586 is not set
@@ -215,21 +223,19 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
215# CONFIG_MPSC is not set 223# CONFIG_MPSC is not set
216CONFIG_MCORE2=y 224CONFIG_MCORE2=y
217# CONFIG_GENERIC_CPU is not set 225# CONFIG_GENERIC_CPU is not set
218# CONFIG_X86_GENERIC is not set 226CONFIG_X86_GENERIC=y
219CONFIG_X86_CPU=y 227CONFIG_X86_CPU=y
220CONFIG_X86_CMPXCHG=y 228CONFIG_X86_CMPXCHG=y
221CONFIG_X86_L1_CACHE_SHIFT=6 229CONFIG_X86_L1_CACHE_SHIFT=7
222CONFIG_X86_XADD=y 230CONFIG_X86_XADD=y
223CONFIG_X86_WP_WORKS_OK=y 231CONFIG_X86_WP_WORKS_OK=y
224CONFIG_X86_INVLPG=y 232CONFIG_X86_INVLPG=y
225CONFIG_X86_BSWAP=y 233CONFIG_X86_BSWAP=y
226CONFIG_X86_POPAD_OK=y 234CONFIG_X86_POPAD_OK=y
227CONFIG_X86_GOOD_APIC=y
228CONFIG_X86_INTEL_USERCOPY=y 235CONFIG_X86_INTEL_USERCOPY=y
229CONFIG_X86_USE_PPRO_CHECKSUM=y 236CONFIG_X86_USE_PPRO_CHECKSUM=y
230CONFIG_X86_P6_NOP=y
231CONFIG_X86_TSC=y 237CONFIG_X86_TSC=y
232CONFIG_X86_MINIMUM_CPU_FAMILY=6 238CONFIG_X86_MINIMUM_CPU_FAMILY=4
233CONFIG_X86_DEBUGCTLMSR=y 239CONFIG_X86_DEBUGCTLMSR=y
234CONFIG_HPET_TIMER=y 240CONFIG_HPET_TIMER=y
235CONFIG_HPET_EMULATE_RTC=y 241CONFIG_HPET_EMULATE_RTC=y
@@ -247,7 +253,7 @@ CONFIG_X86_IO_APIC=y
247CONFIG_VM86=y 253CONFIG_VM86=y
248# CONFIG_TOSHIBA is not set 254# CONFIG_TOSHIBA is not set
249# CONFIG_I8K is not set 255# CONFIG_I8K is not set
250# CONFIG_X86_REBOOTFIXUPS is not set 256CONFIG_X86_REBOOTFIXUPS=y
251# CONFIG_MICROCODE is not set 257# CONFIG_MICROCODE is not set
252CONFIG_X86_MSR=y 258CONFIG_X86_MSR=y
253CONFIG_X86_CPUID=y 259CONFIG_X86_CPUID=y
@@ -256,32 +262,28 @@ CONFIG_HIGHMEM4G=y
256# CONFIG_HIGHMEM64G is not set 262# CONFIG_HIGHMEM64G is not set
257CONFIG_PAGE_OFFSET=0xC0000000 263CONFIG_PAGE_OFFSET=0xC0000000
258CONFIG_HIGHMEM=y 264CONFIG_HIGHMEM=y
259CONFIG_NEED_NODE_MEMMAP_SIZE=y
260CONFIG_ARCH_FLATMEM_ENABLE=y 265CONFIG_ARCH_FLATMEM_ENABLE=y
261CONFIG_ARCH_SPARSEMEM_ENABLE=y 266CONFIG_ARCH_SPARSEMEM_ENABLE=y
262CONFIG_ARCH_SELECT_MEMORY_MODEL=y 267CONFIG_ARCH_SELECT_MEMORY_MODEL=y
263CONFIG_SELECT_MEMORY_MODEL=y 268CONFIG_SELECT_MEMORY_MODEL=y
264# CONFIG_FLATMEM_MANUAL is not set 269CONFIG_FLATMEM_MANUAL=y
265# CONFIG_DISCONTIGMEM_MANUAL is not set 270# CONFIG_DISCONTIGMEM_MANUAL is not set
266CONFIG_SPARSEMEM_MANUAL=y 271# CONFIG_SPARSEMEM_MANUAL is not set
267CONFIG_SPARSEMEM=y 272CONFIG_FLATMEM=y
268CONFIG_HAVE_MEMORY_PRESENT=y 273CONFIG_FLAT_NODE_MEM_MAP=y
269CONFIG_SPARSEMEM_STATIC=y 274CONFIG_SPARSEMEM_STATIC=y
270# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set 275# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
271
272#
273# Memory hotplug is currently incompatible with Software Suspend
274#
275CONFIG_PAGEFLAGS_EXTENDED=y 276CONFIG_PAGEFLAGS_EXTENDED=y
276CONFIG_SPLIT_PTLOCK_CPUS=4 277CONFIG_SPLIT_PTLOCK_CPUS=4
277CONFIG_RESOURCES_64BIT=y 278CONFIG_RESOURCES_64BIT=y
278CONFIG_ZONE_DMA_FLAG=1 279CONFIG_ZONE_DMA_FLAG=1
279CONFIG_BOUNCE=y 280CONFIG_BOUNCE=y
280CONFIG_VIRT_TO_BUS=y 281CONFIG_VIRT_TO_BUS=y
281# CONFIG_HIGHPTE is not set 282CONFIG_HIGHPTE=y
282# CONFIG_MATH_EMULATION is not set 283# CONFIG_MATH_EMULATION is not set
283CONFIG_MTRR=y 284CONFIG_MTRR=y
284# CONFIG_X86_PAT is not set 285# CONFIG_MTRR_SANITIZER is not set
286CONFIG_X86_PAT=y
285CONFIG_EFI=y 287CONFIG_EFI=y
286# CONFIG_IRQBALANCE is not set 288# CONFIG_IRQBALANCE is not set
287CONFIG_SECCOMP=y 289CONFIG_SECCOMP=y
@@ -293,6 +295,7 @@ CONFIG_HZ=1000
293CONFIG_SCHED_HRTICK=y 295CONFIG_SCHED_HRTICK=y
294CONFIG_KEXEC=y 296CONFIG_KEXEC=y
295CONFIG_CRASH_DUMP=y 297CONFIG_CRASH_DUMP=y
298# CONFIG_KEXEC_JUMP is not set
296CONFIG_PHYSICAL_START=0x1000000 299CONFIG_PHYSICAL_START=0x1000000
297CONFIG_RELOCATABLE=y 300CONFIG_RELOCATABLE=y
298CONFIG_PHYSICAL_ALIGN=0x200000 301CONFIG_PHYSICAL_ALIGN=0x200000
@@ -312,6 +315,7 @@ CONFIG_PM_TRACE_RTC=y
312CONFIG_PM_SLEEP_SMP=y 315CONFIG_PM_SLEEP_SMP=y
313CONFIG_PM_SLEEP=y 316CONFIG_PM_SLEEP=y
314CONFIG_SUSPEND=y 317CONFIG_SUSPEND=y
318# CONFIG_PM_TEST_SUSPEND is not set
315CONFIG_SUSPEND_FREEZER=y 319CONFIG_SUSPEND_FREEZER=y
316CONFIG_HIBERNATION=y 320CONFIG_HIBERNATION=y
317CONFIG_PM_STD_PARTITION="" 321CONFIG_PM_STD_PARTITION=""
@@ -337,6 +341,7 @@ CONFIG_ACPI_THERMAL=y
337CONFIG_ACPI_BLACKLIST_YEAR=0 341CONFIG_ACPI_BLACKLIST_YEAR=0
338# CONFIG_ACPI_DEBUG is not set 342# CONFIG_ACPI_DEBUG is not set
339CONFIG_ACPI_EC=y 343CONFIG_ACPI_EC=y
344# CONFIG_ACPI_PCI_SLOT is not set
340CONFIG_ACPI_POWER=y 345CONFIG_ACPI_POWER=y
341CONFIG_ACPI_SYSTEM=y 346CONFIG_ACPI_SYSTEM=y
342CONFIG_X86_PM_TIMER=y 347CONFIG_X86_PM_TIMER=y
@@ -395,8 +400,8 @@ CONFIG_PCI=y
395# CONFIG_PCI_GOBIOS is not set 400# CONFIG_PCI_GOBIOS is not set
396# CONFIG_PCI_GOMMCONFIG is not set 401# CONFIG_PCI_GOMMCONFIG is not set
397# CONFIG_PCI_GODIRECT is not set 402# CONFIG_PCI_GODIRECT is not set
398CONFIG_PCI_GOANY=y
399# CONFIG_PCI_GOOLPC is not set 403# CONFIG_PCI_GOOLPC is not set
404CONFIG_PCI_GOANY=y
400CONFIG_PCI_BIOS=y 405CONFIG_PCI_BIOS=y
401CONFIG_PCI_DIRECT=y 406CONFIG_PCI_DIRECT=y
402CONFIG_PCI_MMCONFIG=y 407CONFIG_PCI_MMCONFIG=y
@@ -448,10 +453,6 @@ CONFIG_HOTPLUG_PCI=y
448CONFIG_BINFMT_ELF=y 453CONFIG_BINFMT_ELF=y
449# CONFIG_BINFMT_AOUT is not set 454# CONFIG_BINFMT_AOUT is not set
450CONFIG_BINFMT_MISC=y 455CONFIG_BINFMT_MISC=y
451
452#
453# Networking
454#
455CONFIG_NET=y 456CONFIG_NET=y
456 457
457# 458#
@@ -475,7 +476,10 @@ CONFIG_IP_FIB_HASH=y
475CONFIG_IP_MULTIPLE_TABLES=y 476CONFIG_IP_MULTIPLE_TABLES=y
476CONFIG_IP_ROUTE_MULTIPATH=y 477CONFIG_IP_ROUTE_MULTIPATH=y
477CONFIG_IP_ROUTE_VERBOSE=y 478CONFIG_IP_ROUTE_VERBOSE=y
478# CONFIG_IP_PNP is not set 479CONFIG_IP_PNP=y
480CONFIG_IP_PNP_DHCP=y
481CONFIG_IP_PNP_BOOTP=y
482CONFIG_IP_PNP_RARP=y
479# CONFIG_NET_IPIP is not set 483# CONFIG_NET_IPIP is not set
480# CONFIG_NET_IPGRE is not set 484# CONFIG_NET_IPGRE is not set
481CONFIG_IP_MROUTE=y 485CONFIG_IP_MROUTE=y
@@ -618,7 +622,6 @@ CONFIG_NET_SCHED=y
618# CONFIG_NET_SCH_HTB is not set 622# CONFIG_NET_SCH_HTB is not set
619# CONFIG_NET_SCH_HFSC is not set 623# CONFIG_NET_SCH_HFSC is not set
620# CONFIG_NET_SCH_PRIO is not set 624# CONFIG_NET_SCH_PRIO is not set
621# CONFIG_NET_SCH_RR is not set
622# CONFIG_NET_SCH_RED is not set 625# CONFIG_NET_SCH_RED is not set
623# CONFIG_NET_SCH_SFQ is not set 626# CONFIG_NET_SCH_SFQ is not set
624# CONFIG_NET_SCH_TEQL is not set 627# CONFIG_NET_SCH_TEQL is not set
@@ -680,28 +683,19 @@ CONFIG_FIB_RULES=y
680CONFIG_CFG80211=y 683CONFIG_CFG80211=y
681CONFIG_NL80211=y 684CONFIG_NL80211=y
682CONFIG_WIRELESS_EXT=y 685CONFIG_WIRELESS_EXT=y
686CONFIG_WIRELESS_EXT_SYSFS=y
683CONFIG_MAC80211=y 687CONFIG_MAC80211=y
684 688
685# 689#
686# Rate control algorithm selection 690# Rate control algorithm selection
687# 691#
692CONFIG_MAC80211_RC_PID=y
688CONFIG_MAC80211_RC_DEFAULT_PID=y 693CONFIG_MAC80211_RC_DEFAULT_PID=y
689# CONFIG_MAC80211_RC_DEFAULT_NONE is not set
690
691#
692# Selecting 'y' for an algorithm will
693#
694
695#
696# build the algorithm into mac80211.
697#
698CONFIG_MAC80211_RC_DEFAULT="pid" 694CONFIG_MAC80211_RC_DEFAULT="pid"
699CONFIG_MAC80211_RC_PID=y
700# CONFIG_MAC80211_MESH is not set 695# CONFIG_MAC80211_MESH is not set
701CONFIG_MAC80211_LEDS=y 696CONFIG_MAC80211_LEDS=y
702# CONFIG_MAC80211_DEBUGFS is not set 697# CONFIG_MAC80211_DEBUGFS is not set
703# CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set 698# CONFIG_MAC80211_DEBUG_MENU is not set
704# CONFIG_MAC80211_DEBUG is not set
705# CONFIG_IEEE80211 is not set 699# CONFIG_IEEE80211 is not set
706# CONFIG_RFKILL is not set 700# CONFIG_RFKILL is not set
707# CONFIG_NET_9P is not set 701# CONFIG_NET_9P is not set
@@ -717,6 +711,8 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
717CONFIG_STANDALONE=y 711CONFIG_STANDALONE=y
718CONFIG_PREVENT_FIRMWARE_BUILD=y 712CONFIG_PREVENT_FIRMWARE_BUILD=y
719CONFIG_FW_LOADER=y 713CONFIG_FW_LOADER=y
714CONFIG_FIRMWARE_IN_KERNEL=y
715CONFIG_EXTRA_FIRMWARE=""
720# CONFIG_DEBUG_DRIVER is not set 716# CONFIG_DEBUG_DRIVER is not set
721CONFIG_DEBUG_DEVRES=y 717CONFIG_DEBUG_DEVRES=y
722# CONFIG_SYS_HYPERVISOR is not set 718# CONFIG_SYS_HYPERVISOR is not set
@@ -749,6 +745,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
749# CONFIG_BLK_DEV_XIP is not set 745# CONFIG_BLK_DEV_XIP is not set
750# CONFIG_CDROM_PKTCDVD is not set 746# CONFIG_CDROM_PKTCDVD is not set
751# CONFIG_ATA_OVER_ETH is not set 747# CONFIG_ATA_OVER_ETH is not set
748# CONFIG_BLK_DEV_HD is not set
752CONFIG_MISC_DEVICES=y 749CONFIG_MISC_DEVICES=y
753# CONFIG_IBM_ASM is not set 750# CONFIG_IBM_ASM is not set
754# CONFIG_PHANTOM is not set 751# CONFIG_PHANTOM is not set
@@ -760,10 +757,12 @@ CONFIG_MISC_DEVICES=y
760# CONFIG_FUJITSU_LAPTOP is not set 757# CONFIG_FUJITSU_LAPTOP is not set
761# CONFIG_TC1100_WMI is not set 758# CONFIG_TC1100_WMI is not set
762# CONFIG_MSI_LAPTOP is not set 759# CONFIG_MSI_LAPTOP is not set
760# CONFIG_COMPAL_LAPTOP is not set
763# CONFIG_SONY_LAPTOP is not set 761# CONFIG_SONY_LAPTOP is not set
764# CONFIG_THINKPAD_ACPI is not set 762# CONFIG_THINKPAD_ACPI is not set
765# CONFIG_INTEL_MENLOW is not set 763# CONFIG_INTEL_MENLOW is not set
766# CONFIG_ENCLOSURE_SERVICES is not set 764# CONFIG_ENCLOSURE_SERVICES is not set
765# CONFIG_HP_ILO is not set
767CONFIG_HAVE_IDE=y 766CONFIG_HAVE_IDE=y
768# CONFIG_IDE is not set 767# CONFIG_IDE is not set
769 768
@@ -802,12 +801,13 @@ CONFIG_SCSI_WAIT_SCAN=m
802# 801#
803CONFIG_SCSI_SPI_ATTRS=y 802CONFIG_SCSI_SPI_ATTRS=y
804# CONFIG_SCSI_FC_ATTRS is not set 803# CONFIG_SCSI_FC_ATTRS is not set
805# CONFIG_SCSI_ISCSI_ATTRS is not set 804CONFIG_SCSI_ISCSI_ATTRS=y
806# CONFIG_SCSI_SAS_ATTRS is not set 805# CONFIG_SCSI_SAS_ATTRS is not set
807# CONFIG_SCSI_SAS_LIBSAS is not set 806# CONFIG_SCSI_SAS_LIBSAS is not set
808# CONFIG_SCSI_SRP_ATTRS is not set 807# CONFIG_SCSI_SRP_ATTRS is not set
809# CONFIG_SCSI_LOWLEVEL is not set 808# CONFIG_SCSI_LOWLEVEL is not set
810# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set 809# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
810# CONFIG_SCSI_DH is not set
811CONFIG_ATA=y 811CONFIG_ATA=y
812# CONFIG_ATA_NONSTANDARD is not set 812# CONFIG_ATA_NONSTANDARD is not set
813CONFIG_ATA_ACPI=y 813CONFIG_ATA_ACPI=y
@@ -842,7 +842,7 @@ CONFIG_PATA_AMD=y
842# CONFIG_PATA_CS5536 is not set 842# CONFIG_PATA_CS5536 is not set
843# CONFIG_PATA_CYPRESS is not set 843# CONFIG_PATA_CYPRESS is not set
844# CONFIG_PATA_EFAR is not set 844# CONFIG_PATA_EFAR is not set
845# CONFIG_ATA_GENERIC is not set 845CONFIG_ATA_GENERIC=y
846# CONFIG_PATA_HPT366 is not set 846# CONFIG_PATA_HPT366 is not set
847# CONFIG_PATA_HPT37X is not set 847# CONFIG_PATA_HPT37X is not set
848# CONFIG_PATA_HPT3X2N is not set 848# CONFIG_PATA_HPT3X2N is not set
@@ -852,7 +852,7 @@ CONFIG_PATA_AMD=y
852# CONFIG_PATA_JMICRON is not set 852# CONFIG_PATA_JMICRON is not set
853# CONFIG_PATA_TRIFLEX is not set 853# CONFIG_PATA_TRIFLEX is not set
854# CONFIG_PATA_MARVELL is not set 854# CONFIG_PATA_MARVELL is not set
855# CONFIG_PATA_MPIIX is not set 855CONFIG_PATA_MPIIX=y
856CONFIG_PATA_OLDPIIX=y 856CONFIG_PATA_OLDPIIX=y
857# CONFIG_PATA_NETCELL is not set 857# CONFIG_PATA_NETCELL is not set
858# CONFIG_PATA_NINJA32 is not set 858# CONFIG_PATA_NINJA32 is not set
@@ -871,6 +871,7 @@ CONFIG_PATA_OLDPIIX=y
871# CONFIG_PATA_SIS is not set 871# CONFIG_PATA_SIS is not set
872# CONFIG_PATA_VIA is not set 872# CONFIG_PATA_VIA is not set
873# CONFIG_PATA_WINBOND is not set 873# CONFIG_PATA_WINBOND is not set
874CONFIG_PATA_SCH=y
874CONFIG_MD=y 875CONFIG_MD=y
875CONFIG_BLK_DEV_MD=y 876CONFIG_BLK_DEV_MD=y
876# CONFIG_MD_LINEAR is not set 877# CONFIG_MD_LINEAR is not set
@@ -894,13 +895,16 @@ CONFIG_DM_ZERO=y
894# 895#
895# IEEE 1394 (FireWire) support 896# IEEE 1394 (FireWire) support
896# 897#
898
899#
900# Enable only one of the two stacks, unless you know what you are doing
901#
897# CONFIG_FIREWIRE is not set 902# CONFIG_FIREWIRE is not set
898# CONFIG_IEEE1394 is not set 903# CONFIG_IEEE1394 is not set
899# CONFIG_I2O is not set 904# CONFIG_I2O is not set
900CONFIG_MACINTOSH_DRIVERS=y 905CONFIG_MACINTOSH_DRIVERS=y
901CONFIG_MAC_EMUMOUSEBTN=y 906CONFIG_MAC_EMUMOUSEBTN=y
902CONFIG_NETDEVICES=y 907CONFIG_NETDEVICES=y
903# CONFIG_NETDEVICES_MULTIQUEUE is not set
904# CONFIG_IFB is not set 908# CONFIG_IFB is not set
905# CONFIG_DUMMY is not set 909# CONFIG_DUMMY is not set
906# CONFIG_BONDING is not set 910# CONFIG_BONDING is not set
@@ -910,7 +914,23 @@ CONFIG_NETDEVICES=y
910# CONFIG_VETH is not set 914# CONFIG_VETH is not set
911# CONFIG_NET_SB1000 is not set 915# CONFIG_NET_SB1000 is not set
912# CONFIG_ARCNET is not set 916# CONFIG_ARCNET is not set
913# CONFIG_PHYLIB is not set 917CONFIG_PHYLIB=y
918
919#
920# MII PHY device drivers
921#
922# CONFIG_MARVELL_PHY is not set
923# CONFIG_DAVICOM_PHY is not set
924# CONFIG_QSEMI_PHY is not set
925# CONFIG_LXT_PHY is not set
926# CONFIG_CICADA_PHY is not set
927# CONFIG_VITESSE_PHY is not set
928# CONFIG_SMSC_PHY is not set
929# CONFIG_BROADCOM_PHY is not set
930# CONFIG_ICPLUS_PHY is not set
931# CONFIG_REALTEK_PHY is not set
932# CONFIG_FIXED_PHY is not set
933# CONFIG_MDIO_BITBANG is not set
914CONFIG_NET_ETHERNET=y 934CONFIG_NET_ETHERNET=y
915CONFIG_MII=y 935CONFIG_MII=y
916# CONFIG_HAPPYMEAL is not set 936# CONFIG_HAPPYMEAL is not set
@@ -943,10 +963,10 @@ CONFIG_FORCEDETH=y
943CONFIG_E100=y 963CONFIG_E100=y
944# CONFIG_FEALNX is not set 964# CONFIG_FEALNX is not set
945# CONFIG_NATSEMI is not set 965# CONFIG_NATSEMI is not set
946# CONFIG_NE2K_PCI is not set 966CONFIG_NE2K_PCI=y
947# CONFIG_8139CP is not set 967# CONFIG_8139CP is not set
948CONFIG_8139TOO=y 968CONFIG_8139TOO=y
949CONFIG_8139TOO_PIO=y 969# CONFIG_8139TOO_PIO is not set
950# CONFIG_8139TOO_TUNE_TWISTER is not set 970# CONFIG_8139TOO_TUNE_TWISTER is not set
951# CONFIG_8139TOO_8129 is not set 971# CONFIG_8139TOO_8129 is not set
952# CONFIG_8139_OLD_RX_RESET is not set 972# CONFIG_8139_OLD_RX_RESET is not set
@@ -961,25 +981,24 @@ CONFIG_NETDEV_1000=y
961# CONFIG_ACENIC is not set 981# CONFIG_ACENIC is not set
962# CONFIG_DL2K is not set 982# CONFIG_DL2K is not set
963CONFIG_E1000=y 983CONFIG_E1000=y
964# CONFIG_E1000_NAPI is not set
965# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set 984# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
966# CONFIG_E1000E is not set 985CONFIG_E1000E=y
967# CONFIG_E1000E_ENABLED is not set
968# CONFIG_IP1000 is not set 986# CONFIG_IP1000 is not set
969# CONFIG_IGB is not set 987# CONFIG_IGB is not set
970# CONFIG_NS83820 is not set 988# CONFIG_NS83820 is not set
971# CONFIG_HAMACHI is not set 989# CONFIG_HAMACHI is not set
972# CONFIG_YELLOWFIN is not set 990# CONFIG_YELLOWFIN is not set
973# CONFIG_R8169 is not set 991CONFIG_R8169=y
974# CONFIG_SIS190 is not set 992# CONFIG_SIS190 is not set
975# CONFIG_SKGE is not set 993# CONFIG_SKGE is not set
976CONFIG_SKY2=y 994CONFIG_SKY2=y
977# CONFIG_SKY2_DEBUG is not set 995# CONFIG_SKY2_DEBUG is not set
978# CONFIG_VIA_VELOCITY is not set 996# CONFIG_VIA_VELOCITY is not set
979CONFIG_TIGON3=y 997CONFIG_TIGON3=y
980# CONFIG_BNX2 is not set 998CONFIG_BNX2=y
981# CONFIG_QLA3XXX is not set 999# CONFIG_QLA3XXX is not set
982# CONFIG_ATL1 is not set 1000# CONFIG_ATL1 is not set
1001# CONFIG_ATL1E is not set
983CONFIG_NETDEV_10000=y 1002CONFIG_NETDEV_10000=y
984# CONFIG_CHELSIO_T1 is not set 1003# CONFIG_CHELSIO_T1 is not set
985# CONFIG_CHELSIO_T3 is not set 1004# CONFIG_CHELSIO_T3 is not set
@@ -1019,13 +1038,14 @@ CONFIG_WLAN_80211=y
1019# CONFIG_RTL8180 is not set 1038# CONFIG_RTL8180 is not set
1020# CONFIG_RTL8187 is not set 1039# CONFIG_RTL8187 is not set
1021# CONFIG_ADM8211 is not set 1040# CONFIG_ADM8211 is not set
1041# CONFIG_MAC80211_HWSIM is not set
1022# CONFIG_P54_COMMON is not set 1042# CONFIG_P54_COMMON is not set
1023CONFIG_ATH5K=y 1043CONFIG_ATH5K=y
1024# CONFIG_ATH5K_DEBUG is not set 1044# CONFIG_ATH5K_DEBUG is not set
1025# CONFIG_IWLWIFI is not set 1045# CONFIG_ATH9K is not set
1026# CONFIG_IWLCORE is not set 1046# CONFIG_IWLCORE is not set
1027# CONFIG_IWLWIFI_LEDS is not set 1047# CONFIG_IWLWIFI_LEDS is not set
1028# CONFIG_IWL4965 is not set 1048# CONFIG_IWLAGN is not set
1029# CONFIG_IWL3945 is not set 1049# CONFIG_IWL3945 is not set
1030# CONFIG_HOSTAP is not set 1050# CONFIG_HOSTAP is not set
1031# CONFIG_B43 is not set 1051# CONFIG_B43 is not set
@@ -1105,6 +1125,7 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y
1105# CONFIG_MOUSE_PS2_TOUCHKIT is not set 1125# CONFIG_MOUSE_PS2_TOUCHKIT is not set
1106# CONFIG_MOUSE_SERIAL is not set 1126# CONFIG_MOUSE_SERIAL is not set
1107# CONFIG_MOUSE_APPLETOUCH is not set 1127# CONFIG_MOUSE_APPLETOUCH is not set
1128# CONFIG_MOUSE_BCM5974 is not set
1108# CONFIG_MOUSE_VSXXXAA is not set 1129# CONFIG_MOUSE_VSXXXAA is not set
1109CONFIG_INPUT_JOYSTICK=y 1130CONFIG_INPUT_JOYSTICK=y
1110# CONFIG_JOYSTICK_ANALOG is not set 1131# CONFIG_JOYSTICK_ANALOG is not set
@@ -1139,12 +1160,14 @@ CONFIG_INPUT_TOUCHSCREEN=y
1139# CONFIG_TOUCHSCREEN_GUNZE is not set 1160# CONFIG_TOUCHSCREEN_GUNZE is not set
1140# CONFIG_TOUCHSCREEN_ELO is not set 1161# CONFIG_TOUCHSCREEN_ELO is not set
1141# CONFIG_TOUCHSCREEN_MTOUCH is not set 1162# CONFIG_TOUCHSCREEN_MTOUCH is not set
1163# CONFIG_TOUCHSCREEN_INEXIO is not set
1142# CONFIG_TOUCHSCREEN_MK712 is not set 1164# CONFIG_TOUCHSCREEN_MK712 is not set
1143# CONFIG_TOUCHSCREEN_PENMOUNT is not set 1165# CONFIG_TOUCHSCREEN_PENMOUNT is not set
1144# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set 1166# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
1145# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 1167# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
1146# CONFIG_TOUCHSCREEN_UCB1400 is not set 1168# CONFIG_TOUCHSCREEN_UCB1400 is not set
1147# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set 1169# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
1170# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
1148CONFIG_INPUT_MISC=y 1171CONFIG_INPUT_MISC=y
1149# CONFIG_INPUT_PCSPKR is not set 1172# CONFIG_INPUT_PCSPKR is not set
1150# CONFIG_INPUT_APANEL is not set 1173# CONFIG_INPUT_APANEL is not set
@@ -1173,6 +1196,7 @@ CONFIG_SERIO_LIBPS2=y
1173# Character devices 1196# Character devices
1174# 1197#
1175CONFIG_VT=y 1198CONFIG_VT=y
1199CONFIG_CONSOLE_TRANSLATIONS=y
1176CONFIG_VT_CONSOLE=y 1200CONFIG_VT_CONSOLE=y
1177CONFIG_HW_CONSOLE=y 1201CONFIG_HW_CONSOLE=y
1178CONFIG_VT_HW_CONSOLE_BINDING=y 1202CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -1223,8 +1247,8 @@ CONFIG_UNIX98_PTYS=y
1223# CONFIG_LEGACY_PTYS is not set 1247# CONFIG_LEGACY_PTYS is not set
1224# CONFIG_IPMI_HANDLER is not set 1248# CONFIG_IPMI_HANDLER is not set
1225CONFIG_HW_RANDOM=y 1249CONFIG_HW_RANDOM=y
1226# CONFIG_HW_RANDOM_INTEL is not set 1250CONFIG_HW_RANDOM_INTEL=y
1227# CONFIG_HW_RANDOM_AMD is not set 1251CONFIG_HW_RANDOM_AMD=y
1228CONFIG_HW_RANDOM_GEODE=y 1252CONFIG_HW_RANDOM_GEODE=y
1229CONFIG_HW_RANDOM_VIA=y 1253CONFIG_HW_RANDOM_VIA=y
1230CONFIG_NVRAM=y 1254CONFIG_NVRAM=y
@@ -1245,7 +1269,6 @@ CONFIG_NVRAM=y
1245# CONFIG_CS5535_GPIO is not set 1269# CONFIG_CS5535_GPIO is not set
1246# CONFIG_RAW_DRIVER is not set 1270# CONFIG_RAW_DRIVER is not set
1247CONFIG_HPET=y 1271CONFIG_HPET=y
1248# CONFIG_HPET_RTC_IRQ is not set
1249# CONFIG_HPET_MMAP is not set 1272# CONFIG_HPET_MMAP is not set
1250# CONFIG_HANGCHECK_TIMER is not set 1273# CONFIG_HANGCHECK_TIMER is not set
1251# CONFIG_TCG_TPM is not set 1274# CONFIG_TCG_TPM is not set
@@ -1254,43 +1277,64 @@ CONFIG_DEVPORT=y
1254CONFIG_I2C=y 1277CONFIG_I2C=y
1255CONFIG_I2C_BOARDINFO=y 1278CONFIG_I2C_BOARDINFO=y
1256# CONFIG_I2C_CHARDEV is not set 1279# CONFIG_I2C_CHARDEV is not set
1280CONFIG_I2C_HELPER_AUTO=y
1257 1281
1258# 1282#
1259# I2C Hardware Bus support 1283# I2C Hardware Bus support
1260# 1284#
1285
1286#
1287# PC SMBus host controller drivers
1288#
1261# CONFIG_I2C_ALI1535 is not set 1289# CONFIG_I2C_ALI1535 is not set
1262# CONFIG_I2C_ALI1563 is not set 1290# CONFIG_I2C_ALI1563 is not set
1263# CONFIG_I2C_ALI15X3 is not set 1291# CONFIG_I2C_ALI15X3 is not set
1264# CONFIG_I2C_AMD756 is not set 1292# CONFIG_I2C_AMD756 is not set
1265# CONFIG_I2C_AMD8111 is not set 1293# CONFIG_I2C_AMD8111 is not set
1266CONFIG_I2C_I801=y 1294CONFIG_I2C_I801=y
1267# CONFIG_I2C_I810 is not set 1295# CONFIG_I2C_ISCH is not set
1268# CONFIG_I2C_PIIX4 is not set 1296# CONFIG_I2C_PIIX4 is not set
1269# CONFIG_I2C_NFORCE2 is not set 1297# CONFIG_I2C_NFORCE2 is not set
1270# CONFIG_I2C_OCORES is not set
1271# CONFIG_I2C_PARPORT_LIGHT is not set
1272# CONFIG_I2C_PROSAVAGE is not set
1273# CONFIG_I2C_SAVAGE4 is not set
1274# CONFIG_I2C_SIMTEC is not set
1275# CONFIG_SCx200_ACB is not set
1276# CONFIG_I2C_SIS5595 is not set 1298# CONFIG_I2C_SIS5595 is not set
1277# CONFIG_I2C_SIS630 is not set 1299# CONFIG_I2C_SIS630 is not set
1278# CONFIG_I2C_SIS96X is not set 1300# CONFIG_I2C_SIS96X is not set
1279# CONFIG_I2C_TAOS_EVM is not set
1280# CONFIG_I2C_STUB is not set
1281# CONFIG_I2C_TINY_USB is not set
1282# CONFIG_I2C_VIA is not set 1301# CONFIG_I2C_VIA is not set
1283# CONFIG_I2C_VIAPRO is not set 1302# CONFIG_I2C_VIAPRO is not set
1303
1304#
1305# I2C system bus drivers (mostly embedded / system-on-chip)
1306#
1307# CONFIG_I2C_OCORES is not set
1308# CONFIG_I2C_SIMTEC is not set
1309
1310#
1311# External I2C/SMBus adapter drivers
1312#
1313# CONFIG_I2C_PARPORT_LIGHT is not set
1314# CONFIG_I2C_TAOS_EVM is not set
1315# CONFIG_I2C_TINY_USB is not set
1316
1317#
1318# Graphics adapter I2C/DDC channel drivers
1319#
1284# CONFIG_I2C_VOODOO3 is not set 1320# CONFIG_I2C_VOODOO3 is not set
1321
1322#
1323# Other I2C/SMBus bus drivers
1324#
1285# CONFIG_I2C_PCA_PLATFORM is not set 1325# CONFIG_I2C_PCA_PLATFORM is not set
1326# CONFIG_I2C_STUB is not set
1327# CONFIG_SCx200_ACB is not set
1286 1328
1287# 1329#
1288# Miscellaneous I2C Chip support 1330# Miscellaneous I2C Chip support
1289# 1331#
1290# CONFIG_DS1682 is not set 1332# CONFIG_DS1682 is not set
1333# CONFIG_AT24 is not set
1291# CONFIG_SENSORS_EEPROM is not set 1334# CONFIG_SENSORS_EEPROM is not set
1292# CONFIG_SENSORS_PCF8574 is not set 1335# CONFIG_SENSORS_PCF8574 is not set
1293# CONFIG_PCF8575 is not set 1336# CONFIG_PCF8575 is not set
1337# CONFIG_SENSORS_PCA9539 is not set
1294# CONFIG_SENSORS_PCF8591 is not set 1338# CONFIG_SENSORS_PCF8591 is not set
1295# CONFIG_SENSORS_MAX6875 is not set 1339# CONFIG_SENSORS_MAX6875 is not set
1296# CONFIG_SENSORS_TSL2550 is not set 1340# CONFIG_SENSORS_TSL2550 is not set
@@ -1299,6 +1343,8 @@ CONFIG_I2C_I801=y
1299# CONFIG_I2C_DEBUG_BUS is not set 1343# CONFIG_I2C_DEBUG_BUS is not set
1300# CONFIG_I2C_DEBUG_CHIP is not set 1344# CONFIG_I2C_DEBUG_CHIP is not set
1301# CONFIG_SPI is not set 1345# CONFIG_SPI is not set
1346CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1347# CONFIG_GPIOLIB is not set
1302# CONFIG_W1 is not set 1348# CONFIG_W1 is not set
1303CONFIG_POWER_SUPPLY=y 1349CONFIG_POWER_SUPPLY=y
1304# CONFIG_POWER_SUPPLY_DEBUG is not set 1350# CONFIG_POWER_SUPPLY_DEBUG is not set
@@ -1360,8 +1406,10 @@ CONFIG_SSB_POSSIBLE=y
1360# 1406#
1361# Multifunction device drivers 1407# Multifunction device drivers
1362# 1408#
1409# CONFIG_MFD_CORE is not set
1363# CONFIG_MFD_SM501 is not set 1410# CONFIG_MFD_SM501 is not set
1364# CONFIG_HTC_PASIC3 is not set 1411# CONFIG_HTC_PASIC3 is not set
1412# CONFIG_MFD_TMIO is not set
1365 1413
1366# 1414#
1367# Multimedia devices 1415# Multimedia devices
@@ -1372,6 +1420,7 @@ CONFIG_SSB_POSSIBLE=y
1372# 1420#
1373# CONFIG_VIDEO_DEV is not set 1421# CONFIG_VIDEO_DEV is not set
1374# CONFIG_DVB_CORE is not set 1422# CONFIG_DVB_CORE is not set
1423# CONFIG_VIDEO_MEDIA is not set
1375 1424
1376# 1425#
1377# Multimedia drivers 1426# Multimedia drivers
@@ -1418,7 +1467,6 @@ CONFIG_FB_CFB_IMAGEBLIT=y
1418# CONFIG_FB_SYS_IMAGEBLIT is not set 1467# CONFIG_FB_SYS_IMAGEBLIT is not set
1419# CONFIG_FB_FOREIGN_ENDIAN is not set 1468# CONFIG_FB_FOREIGN_ENDIAN is not set
1420# CONFIG_FB_SYS_FOPS is not set 1469# CONFIG_FB_SYS_FOPS is not set
1421CONFIG_FB_DEFERRED_IO=y
1422# CONFIG_FB_SVGALIB is not set 1470# CONFIG_FB_SVGALIB is not set
1423# CONFIG_FB_MACMODES is not set 1471# CONFIG_FB_MACMODES is not set
1424# CONFIG_FB_BACKLIGHT is not set 1472# CONFIG_FB_BACKLIGHT is not set
@@ -1463,6 +1511,7 @@ CONFIG_FB_EFI=y
1463# CONFIG_FB_TRIDENT is not set 1511# CONFIG_FB_TRIDENT is not set
1464# CONFIG_FB_ARK is not set 1512# CONFIG_FB_ARK is not set
1465# CONFIG_FB_PM3 is not set 1513# CONFIG_FB_PM3 is not set
1514# CONFIG_FB_CARMINE is not set
1466# CONFIG_FB_GEODE is not set 1515# CONFIG_FB_GEODE is not set
1467# CONFIG_FB_VIRTUAL is not set 1516# CONFIG_FB_VIRTUAL is not set
1468CONFIG_BACKLIGHT_LCD_SUPPORT=y 1517CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -1470,6 +1519,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
1470CONFIG_BACKLIGHT_CLASS_DEVICE=y 1519CONFIG_BACKLIGHT_CLASS_DEVICE=y
1471# CONFIG_BACKLIGHT_CORGI is not set 1520# CONFIG_BACKLIGHT_CORGI is not set
1472# CONFIG_BACKLIGHT_PROGEAR is not set 1521# CONFIG_BACKLIGHT_PROGEAR is not set
1522# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1473 1523
1474# 1524#
1475# Display device support 1525# Display device support
@@ -1489,15 +1539,7 @@ CONFIG_LOGO=y
1489# CONFIG_LOGO_LINUX_MONO is not set 1539# CONFIG_LOGO_LINUX_MONO is not set
1490# CONFIG_LOGO_LINUX_VGA16 is not set 1540# CONFIG_LOGO_LINUX_VGA16 is not set
1491CONFIG_LOGO_LINUX_CLUT224=y 1541CONFIG_LOGO_LINUX_CLUT224=y
1492
1493#
1494# Sound
1495#
1496CONFIG_SOUND=y 1542CONFIG_SOUND=y
1497
1498#
1499# Advanced Linux Sound Architecture
1500#
1501CONFIG_SND=y 1543CONFIG_SND=y
1502CONFIG_SND_TIMER=y 1544CONFIG_SND_TIMER=y
1503CONFIG_SND_PCM=y 1545CONFIG_SND_PCM=y
@@ -1515,20 +1557,14 @@ CONFIG_SND_VERBOSE_PROCFS=y
1515# CONFIG_SND_VERBOSE_PRINTK is not set 1557# CONFIG_SND_VERBOSE_PRINTK is not set
1516# CONFIG_SND_DEBUG is not set 1558# CONFIG_SND_DEBUG is not set
1517CONFIG_SND_VMASTER=y 1559CONFIG_SND_VMASTER=y
1518 1560CONFIG_SND_DRIVERS=y
1519#
1520# Generic devices
1521#
1522# CONFIG_SND_PCSP is not set 1561# CONFIG_SND_PCSP is not set
1523# CONFIG_SND_DUMMY is not set 1562# CONFIG_SND_DUMMY is not set
1524# CONFIG_SND_VIRMIDI is not set 1563# CONFIG_SND_VIRMIDI is not set
1525# CONFIG_SND_MTPAV is not set 1564# CONFIG_SND_MTPAV is not set
1526# CONFIG_SND_SERIAL_U16550 is not set 1565# CONFIG_SND_SERIAL_U16550 is not set
1527# CONFIG_SND_MPU401 is not set 1566# CONFIG_SND_MPU401 is not set
1528 1567CONFIG_SND_PCI=y
1529#
1530# PCI devices
1531#
1532# CONFIG_SND_AD1889 is not set 1568# CONFIG_SND_AD1889 is not set
1533# CONFIG_SND_ALS300 is not set 1569# CONFIG_SND_ALS300 is not set
1534# CONFIG_SND_ALS4000 is not set 1570# CONFIG_SND_ALS4000 is not set
@@ -1603,36 +1639,14 @@ CONFIG_SND_HDA_GENERIC=y
1603# CONFIG_SND_VIRTUOSO is not set 1639# CONFIG_SND_VIRTUOSO is not set
1604# CONFIG_SND_VX222 is not set 1640# CONFIG_SND_VX222 is not set
1605# CONFIG_SND_YMFPCI is not set 1641# CONFIG_SND_YMFPCI is not set
1606 1642CONFIG_SND_USB=y
1607#
1608# USB devices
1609#
1610# CONFIG_SND_USB_AUDIO is not set 1643# CONFIG_SND_USB_AUDIO is not set
1611# CONFIG_SND_USB_USX2Y is not set 1644# CONFIG_SND_USB_USX2Y is not set
1612# CONFIG_SND_USB_CAIAQ is not set 1645# CONFIG_SND_USB_CAIAQ is not set
1613 1646CONFIG_SND_PCMCIA=y
1614#
1615# PCMCIA devices
1616#
1617# CONFIG_SND_VXPOCKET is not set 1647# CONFIG_SND_VXPOCKET is not set
1618# CONFIG_SND_PDAUDIOCF is not set 1648# CONFIG_SND_PDAUDIOCF is not set
1619
1620#
1621# System on Chip audio support
1622#
1623# CONFIG_SND_SOC is not set 1649# CONFIG_SND_SOC is not set
1624
1625#
1626# ALSA SoC audio for Freescale SOCs
1627#
1628
1629#
1630# SoC Audio for the Texas Instruments OMAP
1631#
1632
1633#
1634# Open Sound System
1635#
1636# CONFIG_SOUND_PRIME is not set 1650# CONFIG_SOUND_PRIME is not set
1637CONFIG_HID_SUPPORT=y 1651CONFIG_HID_SUPPORT=y
1638CONFIG_HID=y 1652CONFIG_HID=y
@@ -1668,6 +1682,7 @@ CONFIG_USB_DEVICEFS=y
1668# CONFIG_USB_DYNAMIC_MINORS is not set 1682# CONFIG_USB_DYNAMIC_MINORS is not set
1669CONFIG_USB_SUSPEND=y 1683CONFIG_USB_SUSPEND=y
1670# CONFIG_USB_OTG is not set 1684# CONFIG_USB_OTG is not set
1685CONFIG_USB_MON=y
1671 1686
1672# 1687#
1673# USB Host Controller Drivers 1688# USB Host Controller Drivers
@@ -1691,6 +1706,7 @@ CONFIG_USB_UHCI_HCD=y
1691# 1706#
1692# CONFIG_USB_ACM is not set 1707# CONFIG_USB_ACM is not set
1693CONFIG_USB_PRINTER=y 1708CONFIG_USB_PRINTER=y
1709# CONFIG_USB_WDM is not set
1694 1710
1695# 1711#
1696# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1712# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -1712,6 +1728,7 @@ CONFIG_USB_STORAGE=y
1712# CONFIG_USB_STORAGE_ALAUDA is not set 1728# CONFIG_USB_STORAGE_ALAUDA is not set
1713# CONFIG_USB_STORAGE_ONETOUCH is not set 1729# CONFIG_USB_STORAGE_ONETOUCH is not set
1714# CONFIG_USB_STORAGE_KARMA is not set 1730# CONFIG_USB_STORAGE_KARMA is not set
1731# CONFIG_USB_STORAGE_SIERRA is not set
1715# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set 1732# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1716CONFIG_USB_LIBUSUAL=y 1733CONFIG_USB_LIBUSUAL=y
1717 1734
@@ -1720,7 +1737,6 @@ CONFIG_USB_LIBUSUAL=y
1720# 1737#
1721# CONFIG_USB_MDC800 is not set 1738# CONFIG_USB_MDC800 is not set
1722# CONFIG_USB_MICROTEK is not set 1739# CONFIG_USB_MICROTEK is not set
1723CONFIG_USB_MON=y
1724 1740
1725# 1741#
1726# USB port drivers 1742# USB port drivers
@@ -1733,7 +1749,6 @@ CONFIG_USB_MON=y
1733# CONFIG_USB_EMI62 is not set 1749# CONFIG_USB_EMI62 is not set
1734# CONFIG_USB_EMI26 is not set 1750# CONFIG_USB_EMI26 is not set
1735# CONFIG_USB_ADUTUX is not set 1751# CONFIG_USB_ADUTUX is not set
1736# CONFIG_USB_AUERSWALD is not set
1737# CONFIG_USB_RIO500 is not set 1752# CONFIG_USB_RIO500 is not set
1738# CONFIG_USB_LEGOTOWER is not set 1753# CONFIG_USB_LEGOTOWER is not set
1739# CONFIG_USB_LCD is not set 1754# CONFIG_USB_LCD is not set
@@ -1750,6 +1765,7 @@ CONFIG_USB_MON=y
1750# CONFIG_USB_TRANCEVIBRATOR is not set 1765# CONFIG_USB_TRANCEVIBRATOR is not set
1751# CONFIG_USB_IOWARRIOR is not set 1766# CONFIG_USB_IOWARRIOR is not set
1752# CONFIG_USB_TEST is not set 1767# CONFIG_USB_TEST is not set
1768# CONFIG_USB_ISIGHTFW is not set
1753# CONFIG_USB_GADGET is not set 1769# CONFIG_USB_GADGET is not set
1754# CONFIG_MMC is not set 1770# CONFIG_MMC is not set
1755# CONFIG_MEMSTICK is not set 1771# CONFIG_MEMSTICK is not set
@@ -1759,7 +1775,9 @@ CONFIG_LEDS_CLASS=y
1759# 1775#
1760# LED drivers 1776# LED drivers
1761# 1777#
1778# CONFIG_LEDS_PCA9532 is not set
1762# CONFIG_LEDS_CLEVO_MAIL is not set 1779# CONFIG_LEDS_CLEVO_MAIL is not set
1780# CONFIG_LEDS_PCA955X is not set
1763 1781
1764# 1782#
1765# LED Triggers 1783# LED Triggers
@@ -1805,6 +1823,7 @@ CONFIG_RTC_INTF_DEV=y
1805# CONFIG_RTC_DRV_PCF8583 is not set 1823# CONFIG_RTC_DRV_PCF8583 is not set
1806# CONFIG_RTC_DRV_M41T80 is not set 1824# CONFIG_RTC_DRV_M41T80 is not set
1807# CONFIG_RTC_DRV_S35390A is not set 1825# CONFIG_RTC_DRV_S35390A is not set
1826# CONFIG_RTC_DRV_FM3130 is not set
1808 1827
1809# 1828#
1810# SPI RTC drivers 1829# SPI RTC drivers
@@ -1837,11 +1856,13 @@ CONFIG_DMADEVICES=y
1837# Firmware Drivers 1856# Firmware Drivers
1838# 1857#
1839# CONFIG_EDD is not set 1858# CONFIG_EDD is not set
1859CONFIG_FIRMWARE_MEMMAP=y
1840CONFIG_EFI_VARS=y 1860CONFIG_EFI_VARS=y
1841# CONFIG_DELL_RBU is not set 1861# CONFIG_DELL_RBU is not set
1842# CONFIG_DCDBAS is not set 1862# CONFIG_DCDBAS is not set
1843CONFIG_DMIID=y 1863CONFIG_DMIID=y
1844# CONFIG_ISCSI_IBFT_FIND is not set 1864CONFIG_ISCSI_IBFT_FIND=y
1865CONFIG_ISCSI_IBFT=y
1845 1866
1846# 1867#
1847# File systems 1868# File systems
@@ -1920,14 +1941,27 @@ CONFIG_HUGETLB_PAGE=y
1920# CONFIG_CRAMFS is not set 1941# CONFIG_CRAMFS is not set
1921# CONFIG_VXFS_FS is not set 1942# CONFIG_VXFS_FS is not set
1922# CONFIG_MINIX_FS is not set 1943# CONFIG_MINIX_FS is not set
1944# CONFIG_OMFS_FS is not set
1923# CONFIG_HPFS_FS is not set 1945# CONFIG_HPFS_FS is not set
1924# CONFIG_QNX4FS_FS is not set 1946# CONFIG_QNX4FS_FS is not set
1925# CONFIG_ROMFS_FS is not set 1947# CONFIG_ROMFS_FS is not set
1926# CONFIG_SYSV_FS is not set 1948# CONFIG_SYSV_FS is not set
1927# CONFIG_UFS_FS is not set 1949# CONFIG_UFS_FS is not set
1928CONFIG_NETWORK_FILESYSTEMS=y 1950CONFIG_NETWORK_FILESYSTEMS=y
1929# CONFIG_NFS_FS is not set 1951CONFIG_NFS_FS=y
1952CONFIG_NFS_V3=y
1953CONFIG_NFS_V3_ACL=y
1954CONFIG_NFS_V4=y
1955CONFIG_ROOT_NFS=y
1930# CONFIG_NFSD is not set 1956# CONFIG_NFSD is not set
1957CONFIG_LOCKD=y
1958CONFIG_LOCKD_V4=y
1959CONFIG_NFS_ACL_SUPPORT=y
1960CONFIG_NFS_COMMON=y
1961CONFIG_SUNRPC=y
1962CONFIG_SUNRPC_GSS=y
1963CONFIG_RPCSEC_GSS_KRB5=y
1964# CONFIG_RPCSEC_GSS_SPKM3 is not set
1931# CONFIG_SMB_FS is not set 1965# CONFIG_SMB_FS is not set
1932# CONFIG_CIFS is not set 1966# CONFIG_CIFS is not set
1933# CONFIG_NCP_FS is not set 1967# CONFIG_NCP_FS is not set
@@ -2001,9 +2035,9 @@ CONFIG_NLS_UTF8=y
2001# Kernel hacking 2035# Kernel hacking
2002# 2036#
2003CONFIG_TRACE_IRQFLAGS_SUPPORT=y 2037CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2004# CONFIG_PRINTK_TIME is not set 2038CONFIG_PRINTK_TIME=y
2005# CONFIG_ENABLE_WARN_DEPRECATED is not set 2039CONFIG_ENABLE_WARN_DEPRECATED=y
2006# CONFIG_ENABLE_MUST_CHECK is not set 2040CONFIG_ENABLE_MUST_CHECK=y
2007CONFIG_FRAME_WARN=2048 2041CONFIG_FRAME_WARN=2048
2008CONFIG_MAGIC_SYSRQ=y 2042CONFIG_MAGIC_SYSRQ=y
2009# CONFIG_UNUSED_SYMBOLS is not set 2043# CONFIG_UNUSED_SYMBOLS is not set
@@ -2033,6 +2067,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
2033# CONFIG_DEBUG_INFO is not set 2067# CONFIG_DEBUG_INFO is not set
2034# CONFIG_DEBUG_VM is not set 2068# CONFIG_DEBUG_VM is not set
2035# CONFIG_DEBUG_WRITECOUNT is not set 2069# CONFIG_DEBUG_WRITECOUNT is not set
2070CONFIG_DEBUG_MEMORY_INIT=y
2036# CONFIG_DEBUG_LIST is not set 2071# CONFIG_DEBUG_LIST is not set
2037# CONFIG_DEBUG_SG is not set 2072# CONFIG_DEBUG_SG is not set
2038CONFIG_FRAME_POINTER=y 2073CONFIG_FRAME_POINTER=y
@@ -2043,23 +2078,32 @@ CONFIG_FRAME_POINTER=y
2043# CONFIG_LKDTM is not set 2078# CONFIG_LKDTM is not set
2044# CONFIG_FAULT_INJECTION is not set 2079# CONFIG_FAULT_INJECTION is not set
2045# CONFIG_LATENCYTOP is not set 2080# CONFIG_LATENCYTOP is not set
2081CONFIG_SYSCTL_SYSCALL_CHECK=y
2082CONFIG_HAVE_FTRACE=y
2083CONFIG_HAVE_DYNAMIC_FTRACE=y
2084# CONFIG_FTRACE is not set
2085# CONFIG_IRQSOFF_TRACER is not set
2086# CONFIG_SYSPROF_TRACER is not set
2087# CONFIG_SCHED_TRACER is not set
2088# CONFIG_CONTEXT_SWITCH_TRACER is not set
2046CONFIG_PROVIDE_OHCI1394_DMA_INIT=y 2089CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
2047# CONFIG_SAMPLES is not set 2090# CONFIG_SAMPLES is not set
2048# CONFIG_KGDB is not set
2049CONFIG_HAVE_ARCH_KGDB=y 2091CONFIG_HAVE_ARCH_KGDB=y
2092# CONFIG_KGDB is not set
2050# CONFIG_STRICT_DEVMEM is not set 2093# CONFIG_STRICT_DEVMEM is not set
2094CONFIG_X86_VERBOSE_BOOTUP=y
2051CONFIG_EARLY_PRINTK=y 2095CONFIG_EARLY_PRINTK=y
2052CONFIG_DEBUG_STACKOVERFLOW=y 2096CONFIG_DEBUG_STACKOVERFLOW=y
2053CONFIG_DEBUG_STACK_USAGE=y 2097CONFIG_DEBUG_STACK_USAGE=y
2054# CONFIG_DEBUG_PAGEALLOC is not set 2098# CONFIG_DEBUG_PAGEALLOC is not set
2099# CONFIG_DEBUG_PER_CPU_MAPS is not set
2055# CONFIG_X86_PTDUMP is not set 2100# CONFIG_X86_PTDUMP is not set
2056CONFIG_DEBUG_RODATA=y 2101CONFIG_DEBUG_RODATA=y
2057# CONFIG_DEBUG_RODATA_TEST is not set 2102# CONFIG_DEBUG_RODATA_TEST is not set
2058CONFIG_DEBUG_NX_TEST=m 2103CONFIG_DEBUG_NX_TEST=m
2059# CONFIG_4KSTACKS is not set 2104# CONFIG_4KSTACKS is not set
2060CONFIG_X86_FIND_SMP_CONFIG=y
2061CONFIG_X86_MPPARSE=y
2062CONFIG_DOUBLEFAULT=y 2105CONFIG_DOUBLEFAULT=y
2106# CONFIG_MMIOTRACE is not set
2063CONFIG_IO_DELAY_TYPE_0X80=0 2107CONFIG_IO_DELAY_TYPE_0X80=0
2064CONFIG_IO_DELAY_TYPE_0XED=1 2108CONFIG_IO_DELAY_TYPE_0XED=1
2065CONFIG_IO_DELAY_TYPE_UDELAY=2 2109CONFIG_IO_DELAY_TYPE_UDELAY=2
@@ -2071,6 +2115,7 @@ CONFIG_IO_DELAY_0X80=y
2071CONFIG_DEFAULT_IO_DELAY_TYPE=0 2115CONFIG_DEFAULT_IO_DELAY_TYPE=0
2072CONFIG_DEBUG_BOOT_PARAMS=y 2116CONFIG_DEBUG_BOOT_PARAMS=y
2073# CONFIG_CPA_DEBUG is not set 2117# CONFIG_CPA_DEBUG is not set
2118# CONFIG_OPTIMIZE_INLINING is not set
2074 2119
2075# 2120#
2076# Security options 2121# Security options
@@ -2080,7 +2125,6 @@ CONFIG_KEYS_DEBUG_PROC_KEYS=y
2080CONFIG_SECURITY=y 2125CONFIG_SECURITY=y
2081CONFIG_SECURITY_NETWORK=y 2126CONFIG_SECURITY_NETWORK=y
2082# CONFIG_SECURITY_NETWORK_XFRM is not set 2127# CONFIG_SECURITY_NETWORK_XFRM is not set
2083CONFIG_SECURITY_CAPABILITIES=y
2084CONFIG_SECURITY_FILE_CAPABILITIES=y 2128CONFIG_SECURITY_FILE_CAPABILITIES=y
2085# CONFIG_SECURITY_ROOTPLUG is not set 2129# CONFIG_SECURITY_ROOTPLUG is not set
2086CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 2130CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
@@ -2141,6 +2185,10 @@ CONFIG_CRYPTO_HMAC=y
2141# CONFIG_CRYPTO_MD4 is not set 2185# CONFIG_CRYPTO_MD4 is not set
2142CONFIG_CRYPTO_MD5=y 2186CONFIG_CRYPTO_MD5=y
2143# CONFIG_CRYPTO_MICHAEL_MIC is not set 2187# CONFIG_CRYPTO_MICHAEL_MIC is not set
2188# CONFIG_CRYPTO_RMD128 is not set
2189# CONFIG_CRYPTO_RMD160 is not set
2190# CONFIG_CRYPTO_RMD256 is not set
2191# CONFIG_CRYPTO_RMD320 is not set
2144CONFIG_CRYPTO_SHA1=y 2192CONFIG_CRYPTO_SHA1=y
2145# CONFIG_CRYPTO_SHA256 is not set 2193# CONFIG_CRYPTO_SHA256 is not set
2146# CONFIG_CRYPTO_SHA512 is not set 2194# CONFIG_CRYPTO_SHA512 is not set
@@ -2151,7 +2199,7 @@ CONFIG_CRYPTO_SHA1=y
2151# Ciphers 2199# Ciphers
2152# 2200#
2153CONFIG_CRYPTO_AES=y 2201CONFIG_CRYPTO_AES=y
2154# CONFIG_CRYPTO_AES_586 is not set 2202CONFIG_CRYPTO_AES_586=y
2155# CONFIG_CRYPTO_ANUBIS is not set 2203# CONFIG_CRYPTO_ANUBIS is not set
2156CONFIG_CRYPTO_ARC4=y 2204CONFIG_CRYPTO_ARC4=y
2157# CONFIG_CRYPTO_BLOWFISH is not set 2205# CONFIG_CRYPTO_BLOWFISH is not set
@@ -2193,6 +2241,7 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y
2193CONFIG_GENERIC_FIND_NEXT_BIT=y 2241CONFIG_GENERIC_FIND_NEXT_BIT=y
2194# CONFIG_CRC_CCITT is not set 2242# CONFIG_CRC_CCITT is not set
2195# CONFIG_CRC16 is not set 2243# CONFIG_CRC16 is not set
2244CONFIG_CRC_T10DIF=y
2196# CONFIG_CRC_ITU_T is not set 2245# CONFIG_CRC_ITU_T is not set
2197CONFIG_CRC32=y 2246CONFIG_CRC32=y
2198# CONFIG_CRC7 is not set 2247# CONFIG_CRC7 is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index a40452429625..678c8acefe04 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,13 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc1 3# Linux kernel version: 2.6.27-rc4
4# Sun May 4 19:59:57 2008 4# Mon Aug 25 14:40:46 2008
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
8CONFIG_X86_64=y 8CONFIG_X86_64=y
9CONFIG_X86=y 9CONFIG_X86=y
10CONFIG_DEFCONFIG_LIST="arch/x86/configs/x86_64_defconfig" 10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
11# CONFIG_GENERIC_LOCKBREAK is not set 11# CONFIG_GENERIC_LOCKBREAK is not set
12CONFIG_GENERIC_TIME=y 12CONFIG_GENERIC_TIME=y
13CONFIG_GENERIC_CMOS_UPDATE=y 13CONFIG_GENERIC_CMOS_UPDATE=y
@@ -53,6 +53,7 @@ CONFIG_X86_HT=y
53CONFIG_X86_BIOS_REBOOT=y 53CONFIG_X86_BIOS_REBOOT=y
54CONFIG_X86_TRAMPOLINE=y 54CONFIG_X86_TRAMPOLINE=y
55# CONFIG_KTIME_SCALAR is not set 55# CONFIG_KTIME_SCALAR is not set
56CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
56 57
57# 58#
58# General setup 59# General setup
@@ -82,6 +83,7 @@ CONFIG_CGROUPS=y
82CONFIG_CGROUP_NS=y 83CONFIG_CGROUP_NS=y
83# CONFIG_CGROUP_DEVICE is not set 84# CONFIG_CGROUP_DEVICE is not set
84CONFIG_CPUSETS=y 85CONFIG_CPUSETS=y
86CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
85CONFIG_GROUP_SCHED=y 87CONFIG_GROUP_SCHED=y
86CONFIG_FAIR_GROUP_SCHED=y 88CONFIG_FAIR_GROUP_SCHED=y
87# CONFIG_RT_GROUP_SCHED is not set 89# CONFIG_RT_GROUP_SCHED is not set
@@ -105,7 +107,6 @@ CONFIG_SYSCTL=y
105# CONFIG_EMBEDDED is not set 107# CONFIG_EMBEDDED is not set
106CONFIG_UID16=y 108CONFIG_UID16=y
107CONFIG_SYSCTL_SYSCALL=y 109CONFIG_SYSCTL_SYSCALL=y
108CONFIG_SYSCTL_SYSCALL_CHECK=y
109CONFIG_KALLSYMS=y 110CONFIG_KALLSYMS=y
110CONFIG_KALLSYMS_ALL=y 111CONFIG_KALLSYMS_ALL=y
111CONFIG_KALLSYMS_EXTRA_PASS=y 112CONFIG_KALLSYMS_EXTRA_PASS=y
@@ -113,6 +114,7 @@ CONFIG_HOTPLUG=y
113CONFIG_PRINTK=y 114CONFIG_PRINTK=y
114CONFIG_BUG=y 115CONFIG_BUG=y
115CONFIG_ELF_CORE=y 116CONFIG_ELF_CORE=y
117CONFIG_PCSPKR_PLATFORM=y
116# CONFIG_COMPAT_BRK is not set 118# CONFIG_COMPAT_BRK is not set
117CONFIG_BASE_FULL=y 119CONFIG_BASE_FULL=y
118CONFIG_FUTEX=y 120CONFIG_FUTEX=y
@@ -132,25 +134,33 @@ CONFIG_MARKERS=y
132# CONFIG_OPROFILE is not set 134# CONFIG_OPROFILE is not set
133CONFIG_HAVE_OPROFILE=y 135CONFIG_HAVE_OPROFILE=y
134CONFIG_KPROBES=y 136CONFIG_KPROBES=y
137CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
135CONFIG_KRETPROBES=y 138CONFIG_KRETPROBES=y
139CONFIG_HAVE_IOREMAP_PROT=y
136CONFIG_HAVE_KPROBES=y 140CONFIG_HAVE_KPROBES=y
137CONFIG_HAVE_KRETPROBES=y 141CONFIG_HAVE_KRETPROBES=y
142# CONFIG_HAVE_ARCH_TRACEHOOK is not set
138# CONFIG_HAVE_DMA_ATTRS is not set 143# CONFIG_HAVE_DMA_ATTRS is not set
144CONFIG_USE_GENERIC_SMP_HELPERS=y
145# CONFIG_HAVE_CLK is not set
139CONFIG_PROC_PAGE_MONITOR=y 146CONFIG_PROC_PAGE_MONITOR=y
147# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
140CONFIG_SLABINFO=y 148CONFIG_SLABINFO=y
141CONFIG_RT_MUTEXES=y 149CONFIG_RT_MUTEXES=y
142# CONFIG_TINY_SHMEM is not set 150# CONFIG_TINY_SHMEM is not set
143CONFIG_BASE_SMALL=0 151CONFIG_BASE_SMALL=0
144CONFIG_MODULES=y 152CONFIG_MODULES=y
153# CONFIG_MODULE_FORCE_LOAD is not set
145CONFIG_MODULE_UNLOAD=y 154CONFIG_MODULE_UNLOAD=y
146CONFIG_MODULE_FORCE_UNLOAD=y 155CONFIG_MODULE_FORCE_UNLOAD=y
147# CONFIG_MODVERSIONS is not set 156# CONFIG_MODVERSIONS is not set
148# CONFIG_MODULE_SRCVERSION_ALL is not set 157# CONFIG_MODULE_SRCVERSION_ALL is not set
149# CONFIG_KMOD is not set 158CONFIG_KMOD=y
150CONFIG_STOP_MACHINE=y 159CONFIG_STOP_MACHINE=y
151CONFIG_BLOCK=y 160CONFIG_BLOCK=y
152CONFIG_BLK_DEV_IO_TRACE=y 161CONFIG_BLK_DEV_IO_TRACE=y
153CONFIG_BLK_DEV_BSG=y 162CONFIG_BLK_DEV_BSG=y
163# CONFIG_BLK_DEV_INTEGRITY is not set
154CONFIG_BLOCK_COMPAT=y 164CONFIG_BLOCK_COMPAT=y
155 165
156# 166#
@@ -175,20 +185,15 @@ CONFIG_NO_HZ=y
175CONFIG_HIGH_RES_TIMERS=y 185CONFIG_HIGH_RES_TIMERS=y
176CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 186CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
177CONFIG_SMP=y 187CONFIG_SMP=y
188CONFIG_X86_FIND_SMP_CONFIG=y
189CONFIG_X86_MPPARSE=y
178CONFIG_X86_PC=y 190CONFIG_X86_PC=y
179# CONFIG_X86_ELAN is not set 191# CONFIG_X86_ELAN is not set
180# CONFIG_X86_VOYAGER is not set 192# CONFIG_X86_VOYAGER is not set
181# CONFIG_X86_NUMAQ is not set
182# CONFIG_X86_SUMMIT is not set
183# CONFIG_X86_BIGSMP is not set
184# CONFIG_X86_VISWS is not set
185# CONFIG_X86_GENERICARCH is not set 193# CONFIG_X86_GENERICARCH is not set
186# CONFIG_X86_ES7000 is not set
187# CONFIG_X86_RDC321X is not set
188# CONFIG_X86_VSMP is not set 194# CONFIG_X86_VSMP is not set
189# CONFIG_PARAVIRT_GUEST is not set 195# CONFIG_PARAVIRT_GUEST is not set
190CONFIG_MEMTEST_BOOTPARAM=y 196# CONFIG_MEMTEST is not set
191CONFIG_MEMTEST_BOOTPARAM_VALUE=0
192# CONFIG_M386 is not set 197# CONFIG_M386 is not set
193# CONFIG_M486 is not set 198# CONFIG_M486 is not set
194# CONFIG_M586 is not set 199# CONFIG_M586 is not set
@@ -220,11 +225,12 @@ CONFIG_X86_L1_CACHE_BYTES=64
220CONFIG_X86_INTERNODE_CACHE_BYTES=64 225CONFIG_X86_INTERNODE_CACHE_BYTES=64
221CONFIG_X86_CMPXCHG=y 226CONFIG_X86_CMPXCHG=y
222CONFIG_X86_L1_CACHE_SHIFT=6 227CONFIG_X86_L1_CACHE_SHIFT=6
223CONFIG_X86_GOOD_APIC=y 228CONFIG_X86_WP_WORKS_OK=y
224CONFIG_X86_INTEL_USERCOPY=y 229CONFIG_X86_INTEL_USERCOPY=y
225CONFIG_X86_USE_PPRO_CHECKSUM=y 230CONFIG_X86_USE_PPRO_CHECKSUM=y
226CONFIG_X86_P6_NOP=y 231CONFIG_X86_P6_NOP=y
227CONFIG_X86_TSC=y 232CONFIG_X86_TSC=y
233CONFIG_X86_CMPXCHG64=y
228CONFIG_X86_CMOV=y 234CONFIG_X86_CMOV=y
229CONFIG_X86_MINIMUM_CPU_FAMILY=64 235CONFIG_X86_MINIMUM_CPU_FAMILY=64
230CONFIG_X86_DEBUGCTLMSR=y 236CONFIG_X86_DEBUGCTLMSR=y
@@ -234,8 +240,10 @@ CONFIG_DMI=y
234CONFIG_GART_IOMMU=y 240CONFIG_GART_IOMMU=y
235CONFIG_CALGARY_IOMMU=y 241CONFIG_CALGARY_IOMMU=y
236CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y 242CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
243CONFIG_AMD_IOMMU=y
237CONFIG_SWIOTLB=y 244CONFIG_SWIOTLB=y
238CONFIG_IOMMU_HELPER=y 245CONFIG_IOMMU_HELPER=y
246# CONFIG_MAXSMP is not set
239CONFIG_NR_CPUS=4 247CONFIG_NR_CPUS=4
240# CONFIG_SCHED_SMT is not set 248# CONFIG_SCHED_SMT is not set
241CONFIG_SCHED_MC=y 249CONFIG_SCHED_MC=y
@@ -281,6 +289,7 @@ CONFIG_ZONE_DMA_FLAG=1
281CONFIG_BOUNCE=y 289CONFIG_BOUNCE=y
282CONFIG_VIRT_TO_BUS=y 290CONFIG_VIRT_TO_BUS=y
283CONFIG_MTRR=y 291CONFIG_MTRR=y
292# CONFIG_MTRR_SANITIZER is not set
284# CONFIG_X86_PAT is not set 293# CONFIG_X86_PAT is not set
285CONFIG_EFI=y 294CONFIG_EFI=y
286CONFIG_SECCOMP=y 295CONFIG_SECCOMP=y
@@ -313,6 +322,7 @@ CONFIG_PM_TRACE_RTC=y
313CONFIG_PM_SLEEP_SMP=y 322CONFIG_PM_SLEEP_SMP=y
314CONFIG_PM_SLEEP=y 323CONFIG_PM_SLEEP=y
315CONFIG_SUSPEND=y 324CONFIG_SUSPEND=y
325# CONFIG_PM_TEST_SUSPEND is not set
316CONFIG_SUSPEND_FREEZER=y 326CONFIG_SUSPEND_FREEZER=y
317CONFIG_HIBERNATION=y 327CONFIG_HIBERNATION=y
318CONFIG_PM_STD_PARTITION="" 328CONFIG_PM_STD_PARTITION=""
@@ -339,6 +349,7 @@ CONFIG_ACPI_NUMA=y
339CONFIG_ACPI_BLACKLIST_YEAR=0 349CONFIG_ACPI_BLACKLIST_YEAR=0
340# CONFIG_ACPI_DEBUG is not set 350# CONFIG_ACPI_DEBUG is not set
341CONFIG_ACPI_EC=y 351CONFIG_ACPI_EC=y
352# CONFIG_ACPI_PCI_SLOT is not set
342CONFIG_ACPI_POWER=y 353CONFIG_ACPI_POWER=y
343CONFIG_ACPI_SYSTEM=y 354CONFIG_ACPI_SYSTEM=y
344CONFIG_X86_PM_TIMER=y 355CONFIG_X86_PM_TIMER=y
@@ -437,10 +448,6 @@ CONFIG_IA32_EMULATION=y
437CONFIG_COMPAT=y 448CONFIG_COMPAT=y
438CONFIG_COMPAT_FOR_U64_ALIGNMENT=y 449CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
439CONFIG_SYSVIPC_COMPAT=y 450CONFIG_SYSVIPC_COMPAT=y
440
441#
442# Networking
443#
444CONFIG_NET=y 451CONFIG_NET=y
445 452
446# 453#
@@ -464,7 +471,10 @@ CONFIG_IP_FIB_HASH=y
464CONFIG_IP_MULTIPLE_TABLES=y 471CONFIG_IP_MULTIPLE_TABLES=y
465CONFIG_IP_ROUTE_MULTIPATH=y 472CONFIG_IP_ROUTE_MULTIPATH=y
466CONFIG_IP_ROUTE_VERBOSE=y 473CONFIG_IP_ROUTE_VERBOSE=y
467# CONFIG_IP_PNP is not set 474CONFIG_IP_PNP=y
475CONFIG_IP_PNP_DHCP=y
476CONFIG_IP_PNP_BOOTP=y
477CONFIG_IP_PNP_RARP=y
468# CONFIG_NET_IPIP is not set 478# CONFIG_NET_IPIP is not set
469# CONFIG_NET_IPGRE is not set 479# CONFIG_NET_IPGRE is not set
470CONFIG_IP_MROUTE=y 480CONFIG_IP_MROUTE=y
@@ -607,7 +617,6 @@ CONFIG_NET_SCHED=y
607# CONFIG_NET_SCH_HTB is not set 617# CONFIG_NET_SCH_HTB is not set
608# CONFIG_NET_SCH_HFSC is not set 618# CONFIG_NET_SCH_HFSC is not set
609# CONFIG_NET_SCH_PRIO is not set 619# CONFIG_NET_SCH_PRIO is not set
610# CONFIG_NET_SCH_RR is not set
611# CONFIG_NET_SCH_RED is not set 620# CONFIG_NET_SCH_RED is not set
612# CONFIG_NET_SCH_SFQ is not set 621# CONFIG_NET_SCH_SFQ is not set
613# CONFIG_NET_SCH_TEQL is not set 622# CONFIG_NET_SCH_TEQL is not set
@@ -669,28 +678,19 @@ CONFIG_FIB_RULES=y
669CONFIG_CFG80211=y 678CONFIG_CFG80211=y
670CONFIG_NL80211=y 679CONFIG_NL80211=y
671CONFIG_WIRELESS_EXT=y 680CONFIG_WIRELESS_EXT=y
681CONFIG_WIRELESS_EXT_SYSFS=y
672CONFIG_MAC80211=y 682CONFIG_MAC80211=y
673 683
674# 684#
675# Rate control algorithm selection 685# Rate control algorithm selection
676# 686#
687CONFIG_MAC80211_RC_PID=y
677CONFIG_MAC80211_RC_DEFAULT_PID=y 688CONFIG_MAC80211_RC_DEFAULT_PID=y
678# CONFIG_MAC80211_RC_DEFAULT_NONE is not set
679
680#
681# Selecting 'y' for an algorithm will
682#
683
684#
685# build the algorithm into mac80211.
686#
687CONFIG_MAC80211_RC_DEFAULT="pid" 689CONFIG_MAC80211_RC_DEFAULT="pid"
688CONFIG_MAC80211_RC_PID=y
689# CONFIG_MAC80211_MESH is not set 690# CONFIG_MAC80211_MESH is not set
690CONFIG_MAC80211_LEDS=y 691CONFIG_MAC80211_LEDS=y
691# CONFIG_MAC80211_DEBUGFS is not set 692# CONFIG_MAC80211_DEBUGFS is not set
692# CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set 693# CONFIG_MAC80211_DEBUG_MENU is not set
693# CONFIG_MAC80211_DEBUG is not set
694# CONFIG_IEEE80211 is not set 694# CONFIG_IEEE80211 is not set
695# CONFIG_RFKILL is not set 695# CONFIG_RFKILL is not set
696# CONFIG_NET_9P is not set 696# CONFIG_NET_9P is not set
@@ -706,6 +706,8 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
706CONFIG_STANDALONE=y 706CONFIG_STANDALONE=y
707CONFIG_PREVENT_FIRMWARE_BUILD=y 707CONFIG_PREVENT_FIRMWARE_BUILD=y
708CONFIG_FW_LOADER=y 708CONFIG_FW_LOADER=y
709CONFIG_FIRMWARE_IN_KERNEL=y
710CONFIG_EXTRA_FIRMWARE=""
709# CONFIG_DEBUG_DRIVER is not set 711# CONFIG_DEBUG_DRIVER is not set
710CONFIG_DEBUG_DEVRES=y 712CONFIG_DEBUG_DEVRES=y
711# CONFIG_SYS_HYPERVISOR is not set 713# CONFIG_SYS_HYPERVISOR is not set
@@ -738,6 +740,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
738# CONFIG_BLK_DEV_XIP is not set 740# CONFIG_BLK_DEV_XIP is not set
739# CONFIG_CDROM_PKTCDVD is not set 741# CONFIG_CDROM_PKTCDVD is not set
740# CONFIG_ATA_OVER_ETH is not set 742# CONFIG_ATA_OVER_ETH is not set
743# CONFIG_BLK_DEV_HD is not set
741CONFIG_MISC_DEVICES=y 744CONFIG_MISC_DEVICES=y
742# CONFIG_IBM_ASM is not set 745# CONFIG_IBM_ASM is not set
743# CONFIG_PHANTOM is not set 746# CONFIG_PHANTOM is not set
@@ -748,10 +751,14 @@ CONFIG_MISC_DEVICES=y
748# CONFIG_ASUS_LAPTOP is not set 751# CONFIG_ASUS_LAPTOP is not set
749# CONFIG_FUJITSU_LAPTOP is not set 752# CONFIG_FUJITSU_LAPTOP is not set
750# CONFIG_MSI_LAPTOP is not set 753# CONFIG_MSI_LAPTOP is not set
754# CONFIG_COMPAL_LAPTOP is not set
751# CONFIG_SONY_LAPTOP is not set 755# CONFIG_SONY_LAPTOP is not set
752# CONFIG_THINKPAD_ACPI is not set 756# CONFIG_THINKPAD_ACPI is not set
753# CONFIG_INTEL_MENLOW is not set 757# CONFIG_INTEL_MENLOW is not set
754# CONFIG_ENCLOSURE_SERVICES is not set 758# CONFIG_ENCLOSURE_SERVICES is not set
759# CONFIG_SGI_XP is not set
760# CONFIG_HP_ILO is not set
761# CONFIG_SGI_GRU is not set
755CONFIG_HAVE_IDE=y 762CONFIG_HAVE_IDE=y
756# CONFIG_IDE is not set 763# CONFIG_IDE is not set
757 764
@@ -790,12 +797,13 @@ CONFIG_SCSI_WAIT_SCAN=m
790# 797#
791CONFIG_SCSI_SPI_ATTRS=y 798CONFIG_SCSI_SPI_ATTRS=y
792# CONFIG_SCSI_FC_ATTRS is not set 799# CONFIG_SCSI_FC_ATTRS is not set
793# CONFIG_SCSI_ISCSI_ATTRS is not set 800CONFIG_SCSI_ISCSI_ATTRS=y
794# CONFIG_SCSI_SAS_ATTRS is not set 801# CONFIG_SCSI_SAS_ATTRS is not set
795# CONFIG_SCSI_SAS_LIBSAS is not set 802# CONFIG_SCSI_SAS_LIBSAS is not set
796# CONFIG_SCSI_SRP_ATTRS is not set 803# CONFIG_SCSI_SRP_ATTRS is not set
797# CONFIG_SCSI_LOWLEVEL is not set 804# CONFIG_SCSI_LOWLEVEL is not set
798# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set 805# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
806# CONFIG_SCSI_DH is not set
799CONFIG_ATA=y 807CONFIG_ATA=y
800# CONFIG_ATA_NONSTANDARD is not set 808# CONFIG_ATA_NONSTANDARD is not set
801CONFIG_ATA_ACPI=y 809CONFIG_ATA_ACPI=y
@@ -857,6 +865,7 @@ CONFIG_PATA_OLDPIIX=y
857# CONFIG_PATA_SIS is not set 865# CONFIG_PATA_SIS is not set
858# CONFIG_PATA_VIA is not set 866# CONFIG_PATA_VIA is not set
859# CONFIG_PATA_WINBOND is not set 867# CONFIG_PATA_WINBOND is not set
868CONFIG_PATA_SCH=y
860CONFIG_MD=y 869CONFIG_MD=y
861CONFIG_BLK_DEV_MD=y 870CONFIG_BLK_DEV_MD=y
862# CONFIG_MD_LINEAR is not set 871# CONFIG_MD_LINEAR is not set
@@ -880,13 +889,16 @@ CONFIG_DM_ZERO=y
880# 889#
881# IEEE 1394 (FireWire) support 890# IEEE 1394 (FireWire) support
882# 891#
892
893#
894# Enable only one of the two stacks, unless you know what you are doing
895#
883# CONFIG_FIREWIRE is not set 896# CONFIG_FIREWIRE is not set
884# CONFIG_IEEE1394 is not set 897# CONFIG_IEEE1394 is not set
885# CONFIG_I2O is not set 898# CONFIG_I2O is not set
886CONFIG_MACINTOSH_DRIVERS=y 899CONFIG_MACINTOSH_DRIVERS=y
887CONFIG_MAC_EMUMOUSEBTN=y 900CONFIG_MAC_EMUMOUSEBTN=y
888CONFIG_NETDEVICES=y 901CONFIG_NETDEVICES=y
889# CONFIG_NETDEVICES_MULTIQUEUE is not set
890# CONFIG_IFB is not set 902# CONFIG_IFB is not set
891# CONFIG_DUMMY is not set 903# CONFIG_DUMMY is not set
892# CONFIG_BONDING is not set 904# CONFIG_BONDING is not set
@@ -896,7 +908,23 @@ CONFIG_NETDEVICES=y
896# CONFIG_VETH is not set 908# CONFIG_VETH is not set
897# CONFIG_NET_SB1000 is not set 909# CONFIG_NET_SB1000 is not set
898# CONFIG_ARCNET is not set 910# CONFIG_ARCNET is not set
899# CONFIG_PHYLIB is not set 911CONFIG_PHYLIB=y
912
913#
914# MII PHY device drivers
915#
916# CONFIG_MARVELL_PHY is not set
917# CONFIG_DAVICOM_PHY is not set
918# CONFIG_QSEMI_PHY is not set
919# CONFIG_LXT_PHY is not set
920# CONFIG_CICADA_PHY is not set
921# CONFIG_VITESSE_PHY is not set
922# CONFIG_SMSC_PHY is not set
923# CONFIG_BROADCOM_PHY is not set
924# CONFIG_ICPLUS_PHY is not set
925# CONFIG_REALTEK_PHY is not set
926# CONFIG_FIXED_PHY is not set
927# CONFIG_MDIO_BITBANG is not set
900CONFIG_NET_ETHERNET=y 928CONFIG_NET_ETHERNET=y
901CONFIG_MII=y 929CONFIG_MII=y
902# CONFIG_HAPPYMEAL is not set 930# CONFIG_HAPPYMEAL is not set
@@ -940,16 +968,15 @@ CONFIG_8139TOO_PIO=y
940# CONFIG_SIS900 is not set 968# CONFIG_SIS900 is not set
941# CONFIG_EPIC100 is not set 969# CONFIG_EPIC100 is not set
942# CONFIG_SUNDANCE is not set 970# CONFIG_SUNDANCE is not set
971# CONFIG_TLAN is not set
943# CONFIG_VIA_RHINE is not set 972# CONFIG_VIA_RHINE is not set
944# CONFIG_SC92031 is not set 973# CONFIG_SC92031 is not set
945CONFIG_NETDEV_1000=y 974CONFIG_NETDEV_1000=y
946# CONFIG_ACENIC is not set 975# CONFIG_ACENIC is not set
947# CONFIG_DL2K is not set 976# CONFIG_DL2K is not set
948CONFIG_E1000=y 977CONFIG_E1000=y
949# CONFIG_E1000_NAPI is not set
950# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set 978# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
951# CONFIG_E1000E is not set 979# CONFIG_E1000E is not set
952# CONFIG_E1000E_ENABLED is not set
953# CONFIG_IP1000 is not set 980# CONFIG_IP1000 is not set
954# CONFIG_IGB is not set 981# CONFIG_IGB is not set
955# CONFIG_NS83820 is not set 982# CONFIG_NS83820 is not set
@@ -965,6 +992,7 @@ CONFIG_TIGON3=y
965# CONFIG_BNX2 is not set 992# CONFIG_BNX2 is not set
966# CONFIG_QLA3XXX is not set 993# CONFIG_QLA3XXX is not set
967# CONFIG_ATL1 is not set 994# CONFIG_ATL1 is not set
995# CONFIG_ATL1E is not set
968CONFIG_NETDEV_10000=y 996CONFIG_NETDEV_10000=y
969# CONFIG_CHELSIO_T1 is not set 997# CONFIG_CHELSIO_T1 is not set
970# CONFIG_CHELSIO_T3 is not set 998# CONFIG_CHELSIO_T3 is not set
@@ -1003,13 +1031,14 @@ CONFIG_WLAN_80211=y
1003# CONFIG_RTL8180 is not set 1031# CONFIG_RTL8180 is not set
1004# CONFIG_RTL8187 is not set 1032# CONFIG_RTL8187 is not set
1005# CONFIG_ADM8211 is not set 1033# CONFIG_ADM8211 is not set
1034# CONFIG_MAC80211_HWSIM is not set
1006# CONFIG_P54_COMMON is not set 1035# CONFIG_P54_COMMON is not set
1007CONFIG_ATH5K=y 1036CONFIG_ATH5K=y
1008# CONFIG_ATH5K_DEBUG is not set 1037# CONFIG_ATH5K_DEBUG is not set
1009# CONFIG_IWLWIFI is not set 1038# CONFIG_ATH9K is not set
1010# CONFIG_IWLCORE is not set 1039# CONFIG_IWLCORE is not set
1011# CONFIG_IWLWIFI_LEDS is not set 1040# CONFIG_IWLWIFI_LEDS is not set
1012# CONFIG_IWL4965 is not set 1041# CONFIG_IWLAGN is not set
1013# CONFIG_IWL3945 is not set 1042# CONFIG_IWL3945 is not set
1014# CONFIG_HOSTAP is not set 1043# CONFIG_HOSTAP is not set
1015# CONFIG_B43 is not set 1044# CONFIG_B43 is not set
@@ -1088,6 +1117,7 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y
1088# CONFIG_MOUSE_PS2_TOUCHKIT is not set 1117# CONFIG_MOUSE_PS2_TOUCHKIT is not set
1089# CONFIG_MOUSE_SERIAL is not set 1118# CONFIG_MOUSE_SERIAL is not set
1090# CONFIG_MOUSE_APPLETOUCH is not set 1119# CONFIG_MOUSE_APPLETOUCH is not set
1120# CONFIG_MOUSE_BCM5974 is not set
1091# CONFIG_MOUSE_VSXXXAA is not set 1121# CONFIG_MOUSE_VSXXXAA is not set
1092CONFIG_INPUT_JOYSTICK=y 1122CONFIG_INPUT_JOYSTICK=y
1093# CONFIG_JOYSTICK_ANALOG is not set 1123# CONFIG_JOYSTICK_ANALOG is not set
@@ -1122,12 +1152,14 @@ CONFIG_INPUT_TOUCHSCREEN=y
1122# CONFIG_TOUCHSCREEN_GUNZE is not set 1152# CONFIG_TOUCHSCREEN_GUNZE is not set
1123# CONFIG_TOUCHSCREEN_ELO is not set 1153# CONFIG_TOUCHSCREEN_ELO is not set
1124# CONFIG_TOUCHSCREEN_MTOUCH is not set 1154# CONFIG_TOUCHSCREEN_MTOUCH is not set
1155# CONFIG_TOUCHSCREEN_INEXIO is not set
1125# CONFIG_TOUCHSCREEN_MK712 is not set 1156# CONFIG_TOUCHSCREEN_MK712 is not set
1126# CONFIG_TOUCHSCREEN_PENMOUNT is not set 1157# CONFIG_TOUCHSCREEN_PENMOUNT is not set
1127# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set 1158# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
1128# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 1159# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
1129# CONFIG_TOUCHSCREEN_UCB1400 is not set 1160# CONFIG_TOUCHSCREEN_UCB1400 is not set
1130# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set 1161# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
1162# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
1131CONFIG_INPUT_MISC=y 1163CONFIG_INPUT_MISC=y
1132# CONFIG_INPUT_PCSPKR is not set 1164# CONFIG_INPUT_PCSPKR is not set
1133# CONFIG_INPUT_APANEL is not set 1165# CONFIG_INPUT_APANEL is not set
@@ -1155,6 +1187,7 @@ CONFIG_SERIO_LIBPS2=y
1155# Character devices 1187# Character devices
1156# 1188#
1157CONFIG_VT=y 1189CONFIG_VT=y
1190CONFIG_CONSOLE_TRANSLATIONS=y
1158CONFIG_VT_CONSOLE=y 1191CONFIG_VT_CONSOLE=y
1159CONFIG_HW_CONSOLE=y 1192CONFIG_HW_CONSOLE=y
1160CONFIG_VT_HW_CONSOLE_BINDING=y 1193CONFIG_VT_HW_CONSOLE_BINDING=y
@@ -1222,7 +1255,6 @@ CONFIG_NVRAM=y
1222# CONFIG_PC8736x_GPIO is not set 1255# CONFIG_PC8736x_GPIO is not set
1223# CONFIG_RAW_DRIVER is not set 1256# CONFIG_RAW_DRIVER is not set
1224CONFIG_HPET=y 1257CONFIG_HPET=y
1225# CONFIG_HPET_RTC_IRQ is not set
1226# CONFIG_HPET_MMAP is not set 1258# CONFIG_HPET_MMAP is not set
1227# CONFIG_HANGCHECK_TIMER is not set 1259# CONFIG_HANGCHECK_TIMER is not set
1228# CONFIG_TCG_TPM is not set 1260# CONFIG_TCG_TPM is not set
@@ -1231,42 +1263,63 @@ CONFIG_DEVPORT=y
1231CONFIG_I2C=y 1263CONFIG_I2C=y
1232CONFIG_I2C_BOARDINFO=y 1264CONFIG_I2C_BOARDINFO=y
1233# CONFIG_I2C_CHARDEV is not set 1265# CONFIG_I2C_CHARDEV is not set
1266CONFIG_I2C_HELPER_AUTO=y
1234 1267
1235# 1268#
1236# I2C Hardware Bus support 1269# I2C Hardware Bus support
1237# 1270#
1271
1272#
1273# PC SMBus host controller drivers
1274#
1238# CONFIG_I2C_ALI1535 is not set 1275# CONFIG_I2C_ALI1535 is not set
1239# CONFIG_I2C_ALI1563 is not set 1276# CONFIG_I2C_ALI1563 is not set
1240# CONFIG_I2C_ALI15X3 is not set 1277# CONFIG_I2C_ALI15X3 is not set
1241# CONFIG_I2C_AMD756 is not set 1278# CONFIG_I2C_AMD756 is not set
1242# CONFIG_I2C_AMD8111 is not set 1279# CONFIG_I2C_AMD8111 is not set
1243CONFIG_I2C_I801=y 1280CONFIG_I2C_I801=y
1244# CONFIG_I2C_I810 is not set 1281# CONFIG_I2C_ISCH is not set
1245# CONFIG_I2C_PIIX4 is not set 1282# CONFIG_I2C_PIIX4 is not set
1246# CONFIG_I2C_NFORCE2 is not set 1283# CONFIG_I2C_NFORCE2 is not set
1247# CONFIG_I2C_OCORES is not set
1248# CONFIG_I2C_PARPORT_LIGHT is not set
1249# CONFIG_I2C_PROSAVAGE is not set
1250# CONFIG_I2C_SAVAGE4 is not set
1251# CONFIG_I2C_SIMTEC is not set
1252# CONFIG_I2C_SIS5595 is not set 1284# CONFIG_I2C_SIS5595 is not set
1253# CONFIG_I2C_SIS630 is not set 1285# CONFIG_I2C_SIS630 is not set
1254# CONFIG_I2C_SIS96X is not set 1286# CONFIG_I2C_SIS96X is not set
1255# CONFIG_I2C_TAOS_EVM is not set
1256# CONFIG_I2C_STUB is not set
1257# CONFIG_I2C_TINY_USB is not set
1258# CONFIG_I2C_VIA is not set 1287# CONFIG_I2C_VIA is not set
1259# CONFIG_I2C_VIAPRO is not set 1288# CONFIG_I2C_VIAPRO is not set
1289
1290#
1291# I2C system bus drivers (mostly embedded / system-on-chip)
1292#
1293# CONFIG_I2C_OCORES is not set
1294# CONFIG_I2C_SIMTEC is not set
1295
1296#
1297# External I2C/SMBus adapter drivers
1298#
1299# CONFIG_I2C_PARPORT_LIGHT is not set
1300# CONFIG_I2C_TAOS_EVM is not set
1301# CONFIG_I2C_TINY_USB is not set
1302
1303#
1304# Graphics adapter I2C/DDC channel drivers
1305#
1260# CONFIG_I2C_VOODOO3 is not set 1306# CONFIG_I2C_VOODOO3 is not set
1307
1308#
1309# Other I2C/SMBus bus drivers
1310#
1261# CONFIG_I2C_PCA_PLATFORM is not set 1311# CONFIG_I2C_PCA_PLATFORM is not set
1312# CONFIG_I2C_STUB is not set
1262 1313
1263# 1314#
1264# Miscellaneous I2C Chip support 1315# Miscellaneous I2C Chip support
1265# 1316#
1266# CONFIG_DS1682 is not set 1317# CONFIG_DS1682 is not set
1318# CONFIG_AT24 is not set
1267# CONFIG_SENSORS_EEPROM is not set 1319# CONFIG_SENSORS_EEPROM is not set
1268# CONFIG_SENSORS_PCF8574 is not set 1320# CONFIG_SENSORS_PCF8574 is not set
1269# CONFIG_PCF8575 is not set 1321# CONFIG_PCF8575 is not set
1322# CONFIG_SENSORS_PCA9539 is not set
1270# CONFIG_SENSORS_PCF8591 is not set 1323# CONFIG_SENSORS_PCF8591 is not set
1271# CONFIG_SENSORS_MAX6875 is not set 1324# CONFIG_SENSORS_MAX6875 is not set
1272# CONFIG_SENSORS_TSL2550 is not set 1325# CONFIG_SENSORS_TSL2550 is not set
@@ -1275,6 +1328,8 @@ CONFIG_I2C_I801=y
1275# CONFIG_I2C_DEBUG_BUS is not set 1328# CONFIG_I2C_DEBUG_BUS is not set
1276# CONFIG_I2C_DEBUG_CHIP is not set 1329# CONFIG_I2C_DEBUG_CHIP is not set
1277# CONFIG_SPI is not set 1330# CONFIG_SPI is not set
1331CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1332# CONFIG_GPIOLIB is not set
1278# CONFIG_W1 is not set 1333# CONFIG_W1 is not set
1279CONFIG_POWER_SUPPLY=y 1334CONFIG_POWER_SUPPLY=y
1280# CONFIG_POWER_SUPPLY_DEBUG is not set 1335# CONFIG_POWER_SUPPLY_DEBUG is not set
@@ -1335,8 +1390,10 @@ CONFIG_SSB_POSSIBLE=y
1335# 1390#
1336# Multifunction device drivers 1391# Multifunction device drivers
1337# 1392#
1393# CONFIG_MFD_CORE is not set
1338# CONFIG_MFD_SM501 is not set 1394# CONFIG_MFD_SM501 is not set
1339# CONFIG_HTC_PASIC3 is not set 1395# CONFIG_HTC_PASIC3 is not set
1396# CONFIG_MFD_TMIO is not set
1340 1397
1341# 1398#
1342# Multimedia devices 1399# Multimedia devices
@@ -1347,6 +1404,7 @@ CONFIG_SSB_POSSIBLE=y
1347# 1404#
1348# CONFIG_VIDEO_DEV is not set 1405# CONFIG_VIDEO_DEV is not set
1349# CONFIG_DVB_CORE is not set 1406# CONFIG_DVB_CORE is not set
1407# CONFIG_VIDEO_MEDIA is not set
1350 1408
1351# 1409#
1352# Multimedia drivers 1410# Multimedia drivers
@@ -1387,7 +1445,6 @@ CONFIG_FB_CFB_IMAGEBLIT=y
1387# CONFIG_FB_SYS_IMAGEBLIT is not set 1445# CONFIG_FB_SYS_IMAGEBLIT is not set
1388# CONFIG_FB_FOREIGN_ENDIAN is not set 1446# CONFIG_FB_FOREIGN_ENDIAN is not set
1389# CONFIG_FB_SYS_FOPS is not set 1447# CONFIG_FB_SYS_FOPS is not set
1390CONFIG_FB_DEFERRED_IO=y
1391# CONFIG_FB_SVGALIB is not set 1448# CONFIG_FB_SVGALIB is not set
1392# CONFIG_FB_MACMODES is not set 1449# CONFIG_FB_MACMODES is not set
1393# CONFIG_FB_BACKLIGHT is not set 1450# CONFIG_FB_BACKLIGHT is not set
@@ -1430,6 +1487,7 @@ CONFIG_FB_EFI=y
1430# CONFIG_FB_TRIDENT is not set 1487# CONFIG_FB_TRIDENT is not set
1431# CONFIG_FB_ARK is not set 1488# CONFIG_FB_ARK is not set
1432# CONFIG_FB_PM3 is not set 1489# CONFIG_FB_PM3 is not set
1490# CONFIG_FB_CARMINE is not set
1433# CONFIG_FB_GEODE is not set 1491# CONFIG_FB_GEODE is not set
1434# CONFIG_FB_VIRTUAL is not set 1492# CONFIG_FB_VIRTUAL is not set
1435CONFIG_BACKLIGHT_LCD_SUPPORT=y 1493CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -1437,6 +1495,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
1437CONFIG_BACKLIGHT_CLASS_DEVICE=y 1495CONFIG_BACKLIGHT_CLASS_DEVICE=y
1438# CONFIG_BACKLIGHT_CORGI is not set 1496# CONFIG_BACKLIGHT_CORGI is not set
1439# CONFIG_BACKLIGHT_PROGEAR is not set 1497# CONFIG_BACKLIGHT_PROGEAR is not set
1498# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1440 1499
1441# 1500#
1442# Display device support 1501# Display device support
@@ -1456,15 +1515,7 @@ CONFIG_LOGO=y
1456# CONFIG_LOGO_LINUX_MONO is not set 1515# CONFIG_LOGO_LINUX_MONO is not set
1457# CONFIG_LOGO_LINUX_VGA16 is not set 1516# CONFIG_LOGO_LINUX_VGA16 is not set
1458CONFIG_LOGO_LINUX_CLUT224=y 1517CONFIG_LOGO_LINUX_CLUT224=y
1459
1460#
1461# Sound
1462#
1463CONFIG_SOUND=y 1518CONFIG_SOUND=y
1464
1465#
1466# Advanced Linux Sound Architecture
1467#
1468CONFIG_SND=y 1519CONFIG_SND=y
1469CONFIG_SND_TIMER=y 1520CONFIG_SND_TIMER=y
1470CONFIG_SND_PCM=y 1521CONFIG_SND_PCM=y
@@ -1482,20 +1533,14 @@ CONFIG_SND_VERBOSE_PROCFS=y
1482# CONFIG_SND_VERBOSE_PRINTK is not set 1533# CONFIG_SND_VERBOSE_PRINTK is not set
1483# CONFIG_SND_DEBUG is not set 1534# CONFIG_SND_DEBUG is not set
1484CONFIG_SND_VMASTER=y 1535CONFIG_SND_VMASTER=y
1485 1536CONFIG_SND_DRIVERS=y
1486#
1487# Generic devices
1488#
1489# CONFIG_SND_PCSP is not set 1537# CONFIG_SND_PCSP is not set
1490# CONFIG_SND_DUMMY is not set 1538# CONFIG_SND_DUMMY is not set
1491# CONFIG_SND_VIRMIDI is not set 1539# CONFIG_SND_VIRMIDI is not set
1492# CONFIG_SND_MTPAV is not set 1540# CONFIG_SND_MTPAV is not set
1493# CONFIG_SND_SERIAL_U16550 is not set 1541# CONFIG_SND_SERIAL_U16550 is not set
1494# CONFIG_SND_MPU401 is not set 1542# CONFIG_SND_MPU401 is not set
1495 1543CONFIG_SND_PCI=y
1496#
1497# PCI devices
1498#
1499# CONFIG_SND_AD1889 is not set 1544# CONFIG_SND_AD1889 is not set
1500# CONFIG_SND_ALS300 is not set 1545# CONFIG_SND_ALS300 is not set
1501# CONFIG_SND_ALS4000 is not set 1546# CONFIG_SND_ALS4000 is not set
@@ -1568,36 +1613,14 @@ CONFIG_SND_HDA_GENERIC=y
1568# CONFIG_SND_VIRTUOSO is not set 1613# CONFIG_SND_VIRTUOSO is not set
1569# CONFIG_SND_VX222 is not set 1614# CONFIG_SND_VX222 is not set
1570# CONFIG_SND_YMFPCI is not set 1615# CONFIG_SND_YMFPCI is not set
1571 1616CONFIG_SND_USB=y
1572#
1573# USB devices
1574#
1575# CONFIG_SND_USB_AUDIO is not set 1617# CONFIG_SND_USB_AUDIO is not set
1576# CONFIG_SND_USB_USX2Y is not set 1618# CONFIG_SND_USB_USX2Y is not set
1577# CONFIG_SND_USB_CAIAQ is not set 1619# CONFIG_SND_USB_CAIAQ is not set
1578 1620CONFIG_SND_PCMCIA=y
1579#
1580# PCMCIA devices
1581#
1582# CONFIG_SND_VXPOCKET is not set 1621# CONFIG_SND_VXPOCKET is not set
1583# CONFIG_SND_PDAUDIOCF is not set 1622# CONFIG_SND_PDAUDIOCF is not set
1584
1585#
1586# System on Chip audio support
1587#
1588# CONFIG_SND_SOC is not set 1623# CONFIG_SND_SOC is not set
1589
1590#
1591# ALSA SoC audio for Freescale SOCs
1592#
1593
1594#
1595# SoC Audio for the Texas Instruments OMAP
1596#
1597
1598#
1599# Open Sound System
1600#
1601# CONFIG_SOUND_PRIME is not set 1624# CONFIG_SOUND_PRIME is not set
1602CONFIG_HID_SUPPORT=y 1625CONFIG_HID_SUPPORT=y
1603CONFIG_HID=y 1626CONFIG_HID=y
@@ -1633,6 +1656,7 @@ CONFIG_USB_DEVICEFS=y
1633# CONFIG_USB_DYNAMIC_MINORS is not set 1656# CONFIG_USB_DYNAMIC_MINORS is not set
1634CONFIG_USB_SUSPEND=y 1657CONFIG_USB_SUSPEND=y
1635# CONFIG_USB_OTG is not set 1658# CONFIG_USB_OTG is not set
1659CONFIG_USB_MON=y
1636 1660
1637# 1661#
1638# USB Host Controller Drivers 1662# USB Host Controller Drivers
@@ -1656,6 +1680,7 @@ CONFIG_USB_UHCI_HCD=y
1656# 1680#
1657# CONFIG_USB_ACM is not set 1681# CONFIG_USB_ACM is not set
1658CONFIG_USB_PRINTER=y 1682CONFIG_USB_PRINTER=y
1683# CONFIG_USB_WDM is not set
1659 1684
1660# 1685#
1661# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1686# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -1677,6 +1702,7 @@ CONFIG_USB_STORAGE=y
1677# CONFIG_USB_STORAGE_ALAUDA is not set 1702# CONFIG_USB_STORAGE_ALAUDA is not set
1678# CONFIG_USB_STORAGE_ONETOUCH is not set 1703# CONFIG_USB_STORAGE_ONETOUCH is not set
1679# CONFIG_USB_STORAGE_KARMA is not set 1704# CONFIG_USB_STORAGE_KARMA is not set
1705# CONFIG_USB_STORAGE_SIERRA is not set
1680# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set 1706# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1681CONFIG_USB_LIBUSUAL=y 1707CONFIG_USB_LIBUSUAL=y
1682 1708
@@ -1685,7 +1711,6 @@ CONFIG_USB_LIBUSUAL=y
1685# 1711#
1686# CONFIG_USB_MDC800 is not set 1712# CONFIG_USB_MDC800 is not set
1687# CONFIG_USB_MICROTEK is not set 1713# CONFIG_USB_MICROTEK is not set
1688CONFIG_USB_MON=y
1689 1714
1690# 1715#
1691# USB port drivers 1716# USB port drivers
@@ -1698,7 +1723,6 @@ CONFIG_USB_MON=y
1698# CONFIG_USB_EMI62 is not set 1723# CONFIG_USB_EMI62 is not set
1699# CONFIG_USB_EMI26 is not set 1724# CONFIG_USB_EMI26 is not set
1700# CONFIG_USB_ADUTUX is not set 1725# CONFIG_USB_ADUTUX is not set
1701# CONFIG_USB_AUERSWALD is not set
1702# CONFIG_USB_RIO500 is not set 1726# CONFIG_USB_RIO500 is not set
1703# CONFIG_USB_LEGOTOWER is not set 1727# CONFIG_USB_LEGOTOWER is not set
1704# CONFIG_USB_LCD is not set 1728# CONFIG_USB_LCD is not set
@@ -1715,6 +1739,7 @@ CONFIG_USB_MON=y
1715# CONFIG_USB_TRANCEVIBRATOR is not set 1739# CONFIG_USB_TRANCEVIBRATOR is not set
1716# CONFIG_USB_IOWARRIOR is not set 1740# CONFIG_USB_IOWARRIOR is not set
1717# CONFIG_USB_TEST is not set 1741# CONFIG_USB_TEST is not set
1742# CONFIG_USB_ISIGHTFW is not set
1718# CONFIG_USB_GADGET is not set 1743# CONFIG_USB_GADGET is not set
1719# CONFIG_MMC is not set 1744# CONFIG_MMC is not set
1720# CONFIG_MEMSTICK is not set 1745# CONFIG_MEMSTICK is not set
@@ -1724,7 +1749,9 @@ CONFIG_LEDS_CLASS=y
1724# 1749#
1725# LED drivers 1750# LED drivers
1726# 1751#
1752# CONFIG_LEDS_PCA9532 is not set
1727# CONFIG_LEDS_CLEVO_MAIL is not set 1753# CONFIG_LEDS_CLEVO_MAIL is not set
1754# CONFIG_LEDS_PCA955X is not set
1728 1755
1729# 1756#
1730# LED Triggers 1757# LED Triggers
@@ -1770,6 +1797,7 @@ CONFIG_RTC_INTF_DEV=y
1770# CONFIG_RTC_DRV_PCF8583 is not set 1797# CONFIG_RTC_DRV_PCF8583 is not set
1771# CONFIG_RTC_DRV_M41T80 is not set 1798# CONFIG_RTC_DRV_M41T80 is not set
1772# CONFIG_RTC_DRV_S35390A is not set 1799# CONFIG_RTC_DRV_S35390A is not set
1800# CONFIG_RTC_DRV_FM3130 is not set
1773 1801
1774# 1802#
1775# SPI RTC drivers 1803# SPI RTC drivers
@@ -1802,11 +1830,13 @@ CONFIG_DMADEVICES=y
1802# Firmware Drivers 1830# Firmware Drivers
1803# 1831#
1804# CONFIG_EDD is not set 1832# CONFIG_EDD is not set
1833CONFIG_FIRMWARE_MEMMAP=y
1805CONFIG_EFI_VARS=y 1834CONFIG_EFI_VARS=y
1806# CONFIG_DELL_RBU is not set 1835# CONFIG_DELL_RBU is not set
1807# CONFIG_DCDBAS is not set 1836# CONFIG_DCDBAS is not set
1808CONFIG_DMIID=y 1837CONFIG_DMIID=y
1809# CONFIG_ISCSI_IBFT_FIND is not set 1838CONFIG_ISCSI_IBFT_FIND=y
1839CONFIG_ISCSI_IBFT=y
1810 1840
1811# 1841#
1812# File systems 1842# File systems
@@ -1886,14 +1916,27 @@ CONFIG_HUGETLB_PAGE=y
1886# CONFIG_CRAMFS is not set 1916# CONFIG_CRAMFS is not set
1887# CONFIG_VXFS_FS is not set 1917# CONFIG_VXFS_FS is not set
1888# CONFIG_MINIX_FS is not set 1918# CONFIG_MINIX_FS is not set
1919# CONFIG_OMFS_FS is not set
1889# CONFIG_HPFS_FS is not set 1920# CONFIG_HPFS_FS is not set
1890# CONFIG_QNX4FS_FS is not set 1921# CONFIG_QNX4FS_FS is not set
1891# CONFIG_ROMFS_FS is not set 1922# CONFIG_ROMFS_FS is not set
1892# CONFIG_SYSV_FS is not set 1923# CONFIG_SYSV_FS is not set
1893# CONFIG_UFS_FS is not set 1924# CONFIG_UFS_FS is not set
1894CONFIG_NETWORK_FILESYSTEMS=y 1925CONFIG_NETWORK_FILESYSTEMS=y
1895# CONFIG_NFS_FS is not set 1926CONFIG_NFS_FS=y
1927CONFIG_NFS_V3=y
1928CONFIG_NFS_V3_ACL=y
1929CONFIG_NFS_V4=y
1930CONFIG_ROOT_NFS=y
1896# CONFIG_NFSD is not set 1931# CONFIG_NFSD is not set
1932CONFIG_LOCKD=y
1933CONFIG_LOCKD_V4=y
1934CONFIG_NFS_ACL_SUPPORT=y
1935CONFIG_NFS_COMMON=y
1936CONFIG_SUNRPC=y
1937CONFIG_SUNRPC_GSS=y
1938CONFIG_RPCSEC_GSS_KRB5=y
1939# CONFIG_RPCSEC_GSS_SPKM3 is not set
1897# CONFIG_SMB_FS is not set 1940# CONFIG_SMB_FS is not set
1898# CONFIG_CIFS is not set 1941# CONFIG_CIFS is not set
1899# CONFIG_NCP_FS is not set 1942# CONFIG_NCP_FS is not set
@@ -1967,9 +2010,9 @@ CONFIG_NLS_UTF8=y
1967# Kernel hacking 2010# Kernel hacking
1968# 2011#
1969CONFIG_TRACE_IRQFLAGS_SUPPORT=y 2012CONFIG_TRACE_IRQFLAGS_SUPPORT=y
1970# CONFIG_PRINTK_TIME is not set 2013CONFIG_PRINTK_TIME=y
1971# CONFIG_ENABLE_WARN_DEPRECATED is not set 2014CONFIG_ENABLE_WARN_DEPRECATED=y
1972# CONFIG_ENABLE_MUST_CHECK is not set 2015CONFIG_ENABLE_MUST_CHECK=y
1973CONFIG_FRAME_WARN=2048 2016CONFIG_FRAME_WARN=2048
1974CONFIG_MAGIC_SYSRQ=y 2017CONFIG_MAGIC_SYSRQ=y
1975# CONFIG_UNUSED_SYMBOLS is not set 2018# CONFIG_UNUSED_SYMBOLS is not set
@@ -1998,6 +2041,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
1998# CONFIG_DEBUG_INFO is not set 2041# CONFIG_DEBUG_INFO is not set
1999# CONFIG_DEBUG_VM is not set 2042# CONFIG_DEBUG_VM is not set
2000# CONFIG_DEBUG_WRITECOUNT is not set 2043# CONFIG_DEBUG_WRITECOUNT is not set
2044CONFIG_DEBUG_MEMORY_INIT=y
2001# CONFIG_DEBUG_LIST is not set 2045# CONFIG_DEBUG_LIST is not set
2002# CONFIG_DEBUG_SG is not set 2046# CONFIG_DEBUG_SG is not set
2003CONFIG_FRAME_POINTER=y 2047CONFIG_FRAME_POINTER=y
@@ -2008,11 +2052,20 @@ CONFIG_FRAME_POINTER=y
2008# CONFIG_LKDTM is not set 2052# CONFIG_LKDTM is not set
2009# CONFIG_FAULT_INJECTION is not set 2053# CONFIG_FAULT_INJECTION is not set
2010# CONFIG_LATENCYTOP is not set 2054# CONFIG_LATENCYTOP is not set
2055CONFIG_SYSCTL_SYSCALL_CHECK=y
2056CONFIG_HAVE_FTRACE=y
2057CONFIG_HAVE_DYNAMIC_FTRACE=y
2058# CONFIG_FTRACE is not set
2059# CONFIG_IRQSOFF_TRACER is not set
2060# CONFIG_SYSPROF_TRACER is not set
2061# CONFIG_SCHED_TRACER is not set
2062# CONFIG_CONTEXT_SWITCH_TRACER is not set
2011CONFIG_PROVIDE_OHCI1394_DMA_INIT=y 2063CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
2012# CONFIG_SAMPLES is not set 2064# CONFIG_SAMPLES is not set
2013# CONFIG_KGDB is not set
2014CONFIG_HAVE_ARCH_KGDB=y 2065CONFIG_HAVE_ARCH_KGDB=y
2066# CONFIG_KGDB is not set
2015# CONFIG_STRICT_DEVMEM is not set 2067# CONFIG_STRICT_DEVMEM is not set
2068CONFIG_X86_VERBOSE_BOOTUP=y
2016CONFIG_EARLY_PRINTK=y 2069CONFIG_EARLY_PRINTK=y
2017CONFIG_DEBUG_STACKOVERFLOW=y 2070CONFIG_DEBUG_STACKOVERFLOW=y
2018CONFIG_DEBUG_STACK_USAGE=y 2071CONFIG_DEBUG_STACK_USAGE=y
@@ -2023,8 +2076,8 @@ CONFIG_DEBUG_RODATA=y
2023# CONFIG_DIRECT_GBPAGES is not set 2076# CONFIG_DIRECT_GBPAGES is not set
2024# CONFIG_DEBUG_RODATA_TEST is not set 2077# CONFIG_DEBUG_RODATA_TEST is not set
2025CONFIG_DEBUG_NX_TEST=m 2078CONFIG_DEBUG_NX_TEST=m
2026CONFIG_X86_MPPARSE=y
2027# CONFIG_IOMMU_DEBUG is not set 2079# CONFIG_IOMMU_DEBUG is not set
2080# CONFIG_MMIOTRACE is not set
2028CONFIG_IO_DELAY_TYPE_0X80=0 2081CONFIG_IO_DELAY_TYPE_0X80=0
2029CONFIG_IO_DELAY_TYPE_0XED=1 2082CONFIG_IO_DELAY_TYPE_0XED=1
2030CONFIG_IO_DELAY_TYPE_UDELAY=2 2083CONFIG_IO_DELAY_TYPE_UDELAY=2
@@ -2036,6 +2089,7 @@ CONFIG_IO_DELAY_0X80=y
2036CONFIG_DEFAULT_IO_DELAY_TYPE=0 2089CONFIG_DEFAULT_IO_DELAY_TYPE=0
2037CONFIG_DEBUG_BOOT_PARAMS=y 2090CONFIG_DEBUG_BOOT_PARAMS=y
2038# CONFIG_CPA_DEBUG is not set 2091# CONFIG_CPA_DEBUG is not set
2092# CONFIG_OPTIMIZE_INLINING is not set
2039 2093
2040# 2094#
2041# Security options 2095# Security options
@@ -2045,7 +2099,6 @@ CONFIG_KEYS_DEBUG_PROC_KEYS=y
2045CONFIG_SECURITY=y 2099CONFIG_SECURITY=y
2046CONFIG_SECURITY_NETWORK=y 2100CONFIG_SECURITY_NETWORK=y
2047# CONFIG_SECURITY_NETWORK_XFRM is not set 2101# CONFIG_SECURITY_NETWORK_XFRM is not set
2048CONFIG_SECURITY_CAPABILITIES=y
2049CONFIG_SECURITY_FILE_CAPABILITIES=y 2102CONFIG_SECURITY_FILE_CAPABILITIES=y
2050# CONFIG_SECURITY_ROOTPLUG is not set 2103# CONFIG_SECURITY_ROOTPLUG is not set
2051CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 2104CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
@@ -2106,6 +2159,10 @@ CONFIG_CRYPTO_HMAC=y
2106# CONFIG_CRYPTO_MD4 is not set 2159# CONFIG_CRYPTO_MD4 is not set
2107CONFIG_CRYPTO_MD5=y 2160CONFIG_CRYPTO_MD5=y
2108# CONFIG_CRYPTO_MICHAEL_MIC is not set 2161# CONFIG_CRYPTO_MICHAEL_MIC is not set
2162# CONFIG_CRYPTO_RMD128 is not set
2163# CONFIG_CRYPTO_RMD160 is not set
2164# CONFIG_CRYPTO_RMD256 is not set
2165# CONFIG_CRYPTO_RMD320 is not set
2109CONFIG_CRYPTO_SHA1=y 2166CONFIG_CRYPTO_SHA1=y
2110# CONFIG_CRYPTO_SHA256 is not set 2167# CONFIG_CRYPTO_SHA256 is not set
2111# CONFIG_CRYPTO_SHA512 is not set 2168# CONFIG_CRYPTO_SHA512 is not set
@@ -2155,6 +2212,7 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y
2155CONFIG_GENERIC_FIND_NEXT_BIT=y 2212CONFIG_GENERIC_FIND_NEXT_BIT=y
2156# CONFIG_CRC_CCITT is not set 2213# CONFIG_CRC_CCITT is not set
2157# CONFIG_CRC16 is not set 2214# CONFIG_CRC16 is not set
2215CONFIG_CRC_T10DIF=y
2158# CONFIG_CRC_ITU_T is not set 2216# CONFIG_CRC_ITU_T is not set
2159CONFIG_CRC32=y 2217CONFIG_CRC32=y
2160# CONFIG_CRC7 is not set 2218# CONFIG_CRC7 is not set
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 58cccb6483b0..a0e1dbe67dc1 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -441,12 +441,6 @@ beyond_if:
441 regs->r8 = regs->r9 = regs->r10 = regs->r11 = 441 regs->r8 = regs->r9 = regs->r10 = regs->r11 =
442 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; 442 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
443 set_fs(USER_DS); 443 set_fs(USER_DS);
444 if (unlikely(current->ptrace & PT_PTRACED)) {
445 if (current->ptrace & PT_TRACE_EXEC)
446 ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
447 else
448 send_sig(SIGTRAP, current, 0);
449 }
450 return 0; 444 return 0;
451} 445}
452 446
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 021d71bc69b5..ffc1bb4fed7d 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -201,7 +201,7 @@ sysexit_from_sys_call:
201 movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ 201 movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */
202 .endm 202 .endm
203 203
204 .macro auditsys_exit exit 204 .macro auditsys_exit exit,ebpsave=RBP
205 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) 205 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
206 jnz int_ret_from_sys_call 206 jnz int_ret_from_sys_call
207 TRACE_IRQS_ON 207 TRACE_IRQS_ON
@@ -214,7 +214,7 @@ sysexit_from_sys_call:
214 call audit_syscall_exit 214 call audit_syscall_exit
215 GET_THREAD_INFO(%r10) 215 GET_THREAD_INFO(%r10)
216 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ 216 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
217 movl RBP-ARGOFFSET(%rsp),%ebp /* reload user register value */ 217 movl \ebpsave-ARGOFFSET(%rsp),%ebp /* reload user register value */
218 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi 218 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
219 cli 219 cli
220 TRACE_IRQS_OFF 220 TRACE_IRQS_OFF
@@ -347,7 +347,7 @@ cstar_auditsys:
347 jmp cstar_dispatch 347 jmp cstar_dispatch
348 348
349sysretl_audit: 349sysretl_audit:
350 auditsys_exit sysretl_from_sys_call 350 auditsys_exit sysretl_from_sys_call, RCX /* user %ebp in RCX slot */
351#endif 351#endif
352 352
353cstar_tracesys: 353cstar_tracesys:
@@ -826,4 +826,10 @@ ia32_sys_call_table:
826 .quad sys32_fallocate 826 .quad sys32_fallocate
827 .quad compat_sys_timerfd_settime /* 325 */ 827 .quad compat_sys_timerfd_settime /* 325 */
828 .quad compat_sys_timerfd_gettime 828 .quad compat_sys_timerfd_gettime
829 .quad compat_sys_signalfd4
830 .quad sys_eventfd2
831 .quad sys_epoll_create1
832 .quad sys_dup3 /* 330 */
833 .quad sys_pipe2
834 .quad sys_inotify_init1
829ia32_syscall_end: 835ia32_syscall_end:
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index f00afdf61e67..d3c64088b981 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -238,7 +238,7 @@ asmlinkage long sys32_pipe(int __user *fd)
238 int retval; 238 int retval;
239 int fds[2]; 239 int fds[2];
240 240
241 retval = do_pipe(fds); 241 retval = do_pipe_flags(fds, 0);
242 if (retval) 242 if (retval)
243 goto out; 243 goto out;
244 if (copy_to_user(fd, fds, sizeof(fds))) 244 if (copy_to_user(fd, fds, sizeof(fds)))
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index fa88a1d71290..bfd10fd211cd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -97,6 +97,8 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
97#warning ACPI uses CMPXCHG, i486 and later hardware 97#warning ACPI uses CMPXCHG, i486 and later hardware
98#endif 98#endif
99 99
100static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
101
100/* -------------------------------------------------------------------------- 102/* --------------------------------------------------------------------------
101 Boot-time Configuration 103 Boot-time Configuration
102 -------------------------------------------------------------------------- */ 104 -------------------------------------------------------------------------- */
@@ -158,6 +160,14 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
158struct acpi_mcfg_allocation *pci_mmcfg_config; 160struct acpi_mcfg_allocation *pci_mmcfg_config;
159int pci_mmcfg_config_num; 161int pci_mmcfg_config_num;
160 162
163static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
164{
165 if (!strcmp(mcfg->header.oem_id, "SGI"))
166 acpi_mcfg_64bit_base_addr = TRUE;
167
168 return 0;
169}
170
161int __init acpi_parse_mcfg(struct acpi_table_header *header) 171int __init acpi_parse_mcfg(struct acpi_table_header *header)
162{ 172{
163 struct acpi_table_mcfg *mcfg; 173 struct acpi_table_mcfg *mcfg;
@@ -190,8 +200,12 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header)
190 } 200 }
191 201
192 memcpy(pci_mmcfg_config, &mcfg[1], config_size); 202 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
203
204 acpi_mcfg_oem_check(mcfg);
205
193 for (i = 0; i < pci_mmcfg_config_num; ++i) { 206 for (i = 0; i < pci_mmcfg_config_num; ++i) {
194 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) { 207 if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
208 !acpi_mcfg_64bit_base_addr) {
195 printk(KERN_ERR PREFIX 209 printk(KERN_ERR PREFIX
196 "MMCONFIG not in low 4GB of memory\n"); 210 "MMCONFIG not in low 4GB of memory\n");
197 kfree(pci_mmcfg_config); 211 kfree(pci_mmcfg_config);
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 9220cf46aa10..c2502eb9aa83 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
73 struct cpuinfo_x86 *c = &cpu_data(cpu); 73 struct cpuinfo_x86 *c = &cpu_data(cpu);
74 74
75 cpumask_t saved_mask; 75 cpumask_t saved_mask;
76 cpumask_of_cpu_ptr(new_mask, cpu);
77 int retval; 76 int retval;
78 unsigned int eax, ebx, ecx, edx; 77 unsigned int eax, ebx, ecx, edx;
79 unsigned int edx_part; 78 unsigned int edx_part;
@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
92 91
93 /* Make sure we are running on right CPU */ 92 /* Make sure we are running on right CPU */
94 saved_mask = current->cpus_allowed; 93 saved_mask = current->cpus_allowed;
95 retval = set_cpus_allowed_ptr(current, new_mask); 94 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
96 if (retval) 95 if (retval)
97 return -1; 96 return -1;
98 97
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index a3ddad18aaa3..426e5d91b63a 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -20,7 +20,7 @@ unsigned long acpi_realmode_flags;
20/* address in low memory of the wakeup routine. */ 20/* address in low memory of the wakeup routine. */
21static unsigned long acpi_realmode; 21static unsigned long acpi_realmode;
22 22
23#ifdef CONFIG_64BIT 23#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
24static char temp_stack[10240]; 24static char temp_stack[10240];
25#endif 25#endif
26 26
@@ -86,7 +86,7 @@ int acpi_save_state_mem(void)
86#endif /* !CONFIG_64BIT */ 86#endif /* !CONFIG_64BIT */
87 87
88 header->pmode_cr0 = read_cr0(); 88 header->pmode_cr0 = read_cr0();
89 header->pmode_cr4 = read_cr4(); 89 header->pmode_cr4 = read_cr4_safe();
90 header->realmode_flags = acpi_realmode_flags; 90 header->realmode_flags = acpi_realmode_flags;
91 header->real_magic = 0x12345678; 91 header->real_magic = 0x12345678;
92 92
@@ -150,6 +150,10 @@ static int __init acpi_sleep_setup(char *str)
150 acpi_realmode_flags |= 2; 150 acpi_realmode_flags |= 2;
151 if (strncmp(str, "s3_beep", 7) == 0) 151 if (strncmp(str, "s3_beep", 7) == 0)
152 acpi_realmode_flags |= 4; 152 acpi_realmode_flags |= 4;
153#ifdef CONFIG_HIBERNATION
154 if (strncmp(str, "s4_nohwsig", 10) == 0)
155 acpi_no_s4_hw_signature();
156#endif
153 if (strncmp(str, "old_ordering", 12) == 0) 157 if (strncmp(str, "old_ordering", 12) == 0)
154 acpi_old_suspend_ordering(); 158 acpi_old_suspend_ordering();
155 str = strchr(str, ','); 159 str = strchr(str, ',');
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 2763cb37b553..65a0c1b48696 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
145extern char __vsyscall_0; 145extern char __vsyscall_0;
146const unsigned char *const *find_nop_table(void) 146const unsigned char *const *find_nop_table(void)
147{ 147{
148 return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || 148 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
149 boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; 149 boot_cpu_has(X86_FEATURE_NOPL))
150 return p6_nops;
151 else
152 return k8_nops;
150} 153}
151 154
152#else /* CONFIG_X86_64 */ 155#else /* CONFIG_X86_64 */
153 156
154static const struct nop {
155 int cpuid;
156 const unsigned char *const *noptable;
157} noptypes[] = {
158 { X86_FEATURE_K8, k8_nops },
159 { X86_FEATURE_K7, k7_nops },
160 { X86_FEATURE_P4, p6_nops },
161 { X86_FEATURE_P3, p6_nops },
162 { -1, NULL }
163};
164
165const unsigned char *const *find_nop_table(void) 157const unsigned char *const *find_nop_table(void)
166{ 158{
167 const unsigned char *const *noptable = intel_nops; 159 if (boot_cpu_has(X86_FEATURE_K8))
168 int i; 160 return k8_nops;
169 161 else if (boot_cpu_has(X86_FEATURE_K7))
170 for (i = 0; noptypes[i].cpuid >= 0; i++) { 162 return k7_nops;
171 if (boot_cpu_has(noptypes[i].cpuid)) { 163 else if (boot_cpu_has(X86_FEATURE_NOPL))
172 noptable = noptypes[i].noptable; 164 return p6_nops;
173 break; 165 else
174 } 166 return intel_nops;
175 }
176 return noptable;
177} 167}
178 168
179#endif /* CONFIG_X86_64 */ 169#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index c25210e6ac88..69b4d060b21c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -29,9 +29,6 @@
29 29
30#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 30#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
31 31
32#define to_pages(addr, size) \
33 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
34
35#define EXIT_LOOP_COUNT 10000000 32#define EXIT_LOOP_COUNT 10000000
36 33
37static DEFINE_RWLOCK(amd_iommu_devtable_lock); 34static DEFINE_RWLOCK(amd_iommu_devtable_lock);
@@ -68,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
68 u8 *target; 65 u8 *target;
69 66
70 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 67 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
71 target = (iommu->cmd_buf + tail); 68 target = iommu->cmd_buf + tail;
72 memcpy_toio(target, cmd, sizeof(*cmd)); 69 memcpy_toio(target, cmd, sizeof(*cmd));
73 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; 70 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
74 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 71 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
@@ -104,16 +101,13 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
104 */ 101 */
105static int iommu_completion_wait(struct amd_iommu *iommu) 102static int iommu_completion_wait(struct amd_iommu *iommu)
106{ 103{
107 int ret; 104 int ret, ready = 0;
105 unsigned status = 0;
108 struct iommu_cmd cmd; 106 struct iommu_cmd cmd;
109 volatile u64 ready = 0;
110 unsigned long ready_phys = virt_to_phys(&ready);
111 unsigned long i = 0; 107 unsigned long i = 0;
112 108
113 memset(&cmd, 0, sizeof(cmd)); 109 memset(&cmd, 0, sizeof(cmd));
114 cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; 110 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
115 cmd.data[1] = upper_32_bits(ready_phys);
116 cmd.data[2] = 1; /* value written to 'ready' */
117 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 111 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
118 112
119 iommu->need_sync = 0; 113 iommu->need_sync = 0;
@@ -125,9 +119,15 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
125 119
126 while (!ready && (i < EXIT_LOOP_COUNT)) { 120 while (!ready && (i < EXIT_LOOP_COUNT)) {
127 ++i; 121 ++i;
128 cpu_relax(); 122 /* wait for the bit to become one */
123 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
124 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
129 } 125 }
130 126
127 /* set bit back to zero */
128 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
129 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
130
131 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 131 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
132 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 132 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
133 133
@@ -164,7 +164,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
164 address &= PAGE_MASK; 164 address &= PAGE_MASK;
165 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); 165 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
166 cmd.data[1] |= domid; 166 cmd.data[1] |= domid;
167 cmd.data[2] = LOW_U32(address); 167 cmd.data[2] = lower_32_bits(address);
168 cmd.data[3] = upper_32_bits(address); 168 cmd.data[3] = upper_32_bits(address);
169 if (s) /* size bit - we flush more than one 4kb page */ 169 if (s) /* size bit - we flush more than one 4kb page */
170 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 170 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
@@ -185,7 +185,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
185 u64 address, size_t size) 185 u64 address, size_t size)
186{ 186{
187 int s = 0; 187 int s = 0;
188 unsigned pages = to_pages(address, size); 188 unsigned pages = iommu_num_pages(address, size);
189 189
190 address &= PAGE_MASK; 190 address &= PAGE_MASK;
191 191
@@ -557,8 +557,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
557 if (iommu->exclusion_start && 557 if (iommu->exclusion_start &&
558 iommu->exclusion_start < dma_dom->aperture_size) { 558 iommu->exclusion_start < dma_dom->aperture_size) {
559 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 559 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
560 int pages = to_pages(iommu->exclusion_start, 560 int pages = iommu_num_pages(iommu->exclusion_start,
561 iommu->exclusion_length); 561 iommu->exclusion_length);
562 dma_ops_reserve_addresses(dma_dom, startpage, pages); 562 dma_ops_reserve_addresses(dma_dom, startpage, pages);
563 } 563 }
564 564
@@ -667,7 +667,7 @@ static int get_device_resources(struct device *dev,
667 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 667 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
668 668
669 /* device not translated by any IOMMU in the system? */ 669 /* device not translated by any IOMMU in the system? */
670 if (_bdf >= amd_iommu_last_bdf) { 670 if (_bdf > amd_iommu_last_bdf) {
671 *iommu = NULL; 671 *iommu = NULL;
672 *domain = NULL; 672 *domain = NULL;
673 *bdf = 0xffff; 673 *bdf = 0xffff;
@@ -767,7 +767,7 @@ static dma_addr_t __map_single(struct device *dev,
767 unsigned int pages; 767 unsigned int pages;
768 int i; 768 int i;
769 769
770 pages = to_pages(paddr, size); 770 pages = iommu_num_pages(paddr, size);
771 paddr &= PAGE_MASK; 771 paddr &= PAGE_MASK;
772 772
773 address = dma_ops_alloc_addresses(dev, dma_dom, pages); 773 address = dma_ops_alloc_addresses(dev, dma_dom, pages);
@@ -802,7 +802,7 @@ static void __unmap_single(struct amd_iommu *iommu,
802 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 802 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
803 return; 803 return;
804 804
805 pages = to_pages(dma_addr, size); 805 pages = iommu_num_pages(dma_addr, size);
806 dma_addr &= PAGE_MASK; 806 dma_addr &= PAGE_MASK;
807 start = dma_addr; 807 start = dma_addr;
808 808
@@ -1085,7 +1085,7 @@ void prealloc_protection_domains(void)
1085 1085
1086 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1086 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1087 devid = (dev->bus->number << 8) | dev->devfn; 1087 devid = (dev->bus->number << 8) | dev->devfn;
1088 if (devid >= amd_iommu_last_bdf) 1088 if (devid > amd_iommu_last_bdf)
1089 continue; 1089 continue;
1090 devid = amd_iommu_alias_table[devid]; 1090 devid = amd_iommu_alias_table[devid];
1091 if (domain_for_device(devid)) 1091 if (domain_for_device(devid))
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c9d8ff2eb130..a69cc0f52042 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -732,7 +732,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
732 set_device_exclusion_range(m->devid, m); 732 set_device_exclusion_range(m->devid, m);
733 break; 733 break;
734 case ACPI_IVMD_TYPE_ALL: 734 case ACPI_IVMD_TYPE_ALL:
735 for (i = 0; i < amd_iommu_last_bdf; ++i) 735 for (i = 0; i <= amd_iommu_last_bdf; ++i)
736 set_device_exclusion_range(i, m); 736 set_device_exclusion_range(i, m);
737 break; 737 break;
738 case ACPI_IVMD_TYPE_RANGE: 738 case ACPI_IVMD_TYPE_RANGE:
@@ -801,6 +801,21 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
801} 801}
802 802
803/* 803/*
804 * Init the device table to not allow DMA access for devices and
805 * suppress all page faults
806 */
807static void init_device_table(void)
808{
809 u16 devid;
810
811 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
812 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
813 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
814 set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
815 }
816}
817
818/*
804 * This function finally enables all IOMMUs found in the system after 819 * This function finally enables all IOMMUs found in the system after
805 * they have been initialized 820 * they have been initialized
806 */ 821 */
@@ -931,10 +946,13 @@ int __init amd_iommu_init(void)
931 if (amd_iommu_pd_alloc_bitmap == NULL) 946 if (amd_iommu_pd_alloc_bitmap == NULL)
932 goto free; 947 goto free;
933 948
949 /* init the device table */
950 init_device_table();
951
934 /* 952 /*
935 * let all alias entries point to itself 953 * let all alias entries point to itself
936 */ 954 */
937 for (i = 0; i < amd_iommu_last_bdf; ++i) 955 for (i = 0; i <= amd_iommu_last_bdf; ++i)
938 amd_iommu_alias_table[i] = i; 956 amd_iommu_alias_table[i] = i;
939 957
940 /* 958 /*
@@ -954,15 +972,15 @@ int __init amd_iommu_init(void)
954 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 972 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
955 goto free; 973 goto free;
956 974
957 ret = amd_iommu_init_dma_ops(); 975 ret = sysdev_class_register(&amd_iommu_sysdev_class);
958 if (ret) 976 if (ret)
959 goto free; 977 goto free;
960 978
961 ret = sysdev_class_register(&amd_iommu_sysdev_class); 979 ret = sysdev_register(&device_amd_iommu);
962 if (ret) 980 if (ret)
963 goto free; 981 goto free;
964 982
965 ret = sysdev_register(&device_amd_iommu); 983 ret = amd_iommu_init_dma_ops();
966 if (ret) 984 if (ret)
967 goto free; 985 goto free;
968 986
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index d6c898358371..f88bd0d982b0 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1454,8 +1454,6 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1454 } 1454 }
1455} 1455}
1456 1456
1457unsigned int __cpuinitdata maxcpus = NR_CPUS;
1458
1459void __cpuinit generic_processor_info(int apicid, int version) 1457void __cpuinit generic_processor_info(int apicid, int version)
1460{ 1458{
1461 int cpu; 1459 int cpu;
@@ -1482,12 +1480,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1482 return; 1480 return;
1483 } 1481 }
1484 1482
1485 if (num_processors >= maxcpus) {
1486 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1487 " Processor ignored.\n", maxcpus);
1488 return;
1489 }
1490
1491 num_processors++; 1483 num_processors++;
1492 cpus_complement(tmp_map, cpu_present_map); 1484 cpus_complement(tmp_map, cpu_present_map);
1493 cpu = first_cpu(tmp_map); 1485 cpu = first_cpu(tmp_map);
@@ -1720,15 +1712,19 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1720} 1712}
1721early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1713early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1722 1714
1723static int __init apic_set_verbosity(char *str) 1715static int __init apic_set_verbosity(char *arg)
1724{ 1716{
1725 if (strcmp("debug", str) == 0) 1717 if (!arg)
1718 return -EINVAL;
1719
1720 if (strcmp(arg, "debug") == 0)
1726 apic_verbosity = APIC_DEBUG; 1721 apic_verbosity = APIC_DEBUG;
1727 else if (strcmp("verbose", str) == 0) 1722 else if (strcmp(arg, "verbose") == 0)
1728 apic_verbosity = APIC_VERBOSE; 1723 apic_verbosity = APIC_VERBOSE;
1729 return 1; 1724
1725 return 0;
1730} 1726}
1731__setup("apic=", apic_set_verbosity); 1727early_param("apic", apic_set_verbosity);
1732 1728
1733static int __init lapic_insert_resource(void) 1729static int __init lapic_insert_resource(void)
1734{ 1730{
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 7f1f030da7ee..446c062e831c 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -90,7 +90,6 @@ static unsigned long apic_phys;
90 90
91unsigned long mp_lapic_addr; 91unsigned long mp_lapic_addr;
92 92
93unsigned int __cpuinitdata maxcpus = NR_CPUS;
94/* 93/*
95 * Get the LAPIC version 94 * Get the LAPIC version
96 */ 95 */
@@ -1062,12 +1061,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1062 return; 1061 return;
1063 } 1062 }
1064 1063
1065 if (num_processors >= maxcpus) {
1066 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1067 " Processor ignored.\n", maxcpus);
1068 return;
1069 }
1070
1071 num_processors++; 1064 num_processors++;
1072 cpus_complement(tmp_map, cpu_present_map); 1065 cpus_complement(tmp_map, cpu_present_map);
1073 cpu = first_cpu(tmp_map); 1066 cpu = first_cpu(tmp_map);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index bf9b441331e9..9ee24e6bc4b0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -219,7 +219,6 @@
219#include <linux/time.h> 219#include <linux/time.h>
220#include <linux/sched.h> 220#include <linux/sched.h>
221#include <linux/pm.h> 221#include <linux/pm.h>
222#include <linux/pm_legacy.h>
223#include <linux/capability.h> 222#include <linux/capability.h>
224#include <linux/device.h> 223#include <linux/device.h>
225#include <linux/kernel.h> 224#include <linux/kernel.h>
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 84a8220a6072..a6ef672adbba 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
56 56
57 switch (c->x86_vendor) { 57 switch (c->x86_vendor) {
58 case X86_VENDOR_INTEL: 58 case X86_VENDOR_INTEL:
59 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) 59 /*
60 * There is a known erratum on Pentium III and Core Solo
61 * and Core Duo CPUs.
62 * " Page with PAT set to WC while associated MTRR is UC
63 * may consolidate to UC "
64 * Because of this erratum, it is better to stick with
65 * setting WC in MTRR rather than using PAT on these CPUs.
66 *
67 * Enable PAT WC only on P4, Core 2 or later CPUs.
68 */
69 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
60 return; 70 return;
61 break; 71
72 pat_disable("PAT WC disabled due to known CPU erratum.");
73 return;
74
62 case X86_VENDOR_AMD: 75 case X86_VENDOR_AMD:
63 case X86_VENDOR_CENTAUR: 76 case X86_VENDOR_CENTAUR:
64 case X86_VENDOR_TRANSMETA: 77 case X86_VENDOR_TRANSMETA:
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index cae9cabc3031..18514ed26104 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
31 if (c->x86_power & (1<<8)) 31 if (c->x86_power & (1<<8))
32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
33 } 33 }
34
35 /* Set MTRR capability flag if appropriate */
36 if (c->x86_model == 13 || c->x86_model == 9 ||
37 (c->x86_model == 8 && c->x86_mask >= 8))
38 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
34} 39}
35 40
36static void __cpuinit init_amd(struct cpuinfo_x86 *c) 41static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
166 mbytes); 171 mbytes);
167 } 172 }
168 173
169 /* Set MTRR capability flag if appropriate */
170 if (c->x86_model == 13 || c->x86_model == 9 ||
171 (c->x86_model == 8 && c->x86_mask >= 8))
172 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
173 break; 174 break;
174 } 175 }
175 176
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c9b58a806e85..c8e315f1aa83 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
50 */ 50 */
51static void __init check_fpu(void) 51static void __init check_fpu(void)
52{ 52{
53 s32 fdiv_bug;
54
53 if (!boot_cpu_data.hard_math) { 55 if (!boot_cpu_data.hard_math) {
54#ifndef CONFIG_MATH_EMULATION 56#ifndef CONFIG_MATH_EMULATION
55 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); 57 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
@@ -74,8 +76,10 @@ static void __init check_fpu(void)
74 "fistpl %0\n\t" 76 "fistpl %0\n\t"
75 "fwait\n\t" 77 "fwait\n\t"
76 "fninit" 78 "fninit"
77 : "=m" (*&boot_cpu_data.fdiv_bug) 79 : "=m" (*&fdiv_bug)
78 : "m" (*&x), "m" (*&y)); 80 : "m" (*&x), "m" (*&y));
81
82 boot_cpu_data.fdiv_bug = fdiv_bug;
79 if (boot_cpu_data.fdiv_bug) 83 if (boot_cpu_data.fdiv_bug)
80 printk("Hmm, FPU with FDIV bug.\n"); 84 printk("Hmm, FPU with FDIV bug.\n");
81} 85}
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e0f45edd6a55..a0534c04d38a 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -314,6 +314,16 @@ enum {
314 EAMD3D = 1<<20, 314 EAMD3D = 1<<20,
315}; 315};
316 316
317static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
318{
319 switch (c->x86) {
320 case 5:
321 /* Emulate MTRRs using Centaur's MCR. */
322 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
323 break;
324 }
325}
326
317static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 327static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
318{ 328{
319 329
@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
462static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 472static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
463 .c_vendor = "Centaur", 473 .c_vendor = "Centaur",
464 .c_ident = { "CentaurHauls" }, 474 .c_ident = { "CentaurHauls" },
475 .c_early_init = early_init_centaur,
465 .c_init = init_centaur, 476 .c_init = init_centaur,
466 .c_size_cache = centaur_size_cache, 477 .c_size_cache = centaur_size_cache,
467}; 478};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 80ab20d4fa39..4e456bd955bb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,6 +13,7 @@
13#include <asm/mtrr.h> 13#include <asm/mtrr.h>
14#include <asm/mce.h> 14#include <asm/mce.h>
15#include <asm/pat.h> 15#include <asm/pat.h>
16#include <asm/asm.h>
16#ifdef CONFIG_X86_LOCAL_APIC 17#ifdef CONFIG_X86_LOCAL_APIC
17#include <asm/mpspec.h> 18#include <asm/mpspec.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
@@ -334,11 +335,24 @@ static void __init early_cpu_detect(void)
334 335
335 get_cpu_vendor(c, 1); 336 get_cpu_vendor(c, 1);
336 337
338 early_get_cap(c);
339
337 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 340 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
338 cpu_devs[c->x86_vendor]->c_early_init) 341 cpu_devs[c->x86_vendor]->c_early_init)
339 cpu_devs[c->x86_vendor]->c_early_init(c); 342 cpu_devs[c->x86_vendor]->c_early_init(c);
343}
340 344
341 early_get_cap(c); 345/*
346 * The NOPL instruction is supposed to exist on all CPUs with
347 * family >= 6; unfortunately, that's not true in practice because
348 * of early VIA chips and (more importantly) broken virtualizers that
349 * are not easy to detect. In the latter case it doesn't even *fail*
350 * reliably, so probing for it doesn't even work. Disable it completely
351 * unless we can find a reliable way to detect all the broken cases.
352 */
353static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
354{
355 clear_cpu_cap(c, X86_FEATURE_NOPL);
342} 356}
343 357
344static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 358static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
@@ -395,8 +409,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
395 } 409 }
396 410
397 init_scattered_cpuid_features(c); 411 init_scattered_cpuid_features(c);
412 detect_nopl(c);
398 } 413 }
399
400} 414}
401 415
402static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 416static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index bca2d6980e82..305b465889b0 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -18,6 +18,7 @@
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/mce.h> 19#include <asm/mce.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/asm.h>
21#include <asm/numa.h> 22#include <asm/numa.h>
22#ifdef CONFIG_X86_LOCAL_APIC 23#ifdef CONFIG_X86_LOCAL_APIC
23#include <asm/mpspec.h> 24#include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
215 } 216 }
216} 217}
217 218
219/*
220 * The NOPL instruction is supposed to exist on all CPUs with
221 * family >= 6, unfortunately, that's not true in practice because
222 * of early VIA chips and (more importantly) broken virtualizers that
223 * are not easy to detect. Hence, probe for it based on first
224 * principles.
225 *
226 * Note: no 64-bit chip is known to lack these, but put the code here
227 * for consistency with 32 bits, and to make it utterly trivial to
228 * diagnose the problem should it ever surface.
229 */
230static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
231{
232 const u32 nopl_signature = 0x888c53b1; /* Random number */
233 u32 has_nopl = nopl_signature;
234
235 clear_cpu_cap(c, X86_FEATURE_NOPL);
236 if (c->x86 >= 6) {
237 asm volatile("\n"
238 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
239 "2:\n"
240 " .section .fixup,\"ax\"\n"
241 "3: xor %0,%0\n"
242 " jmp 2b\n"
243 " .previous\n"
244 _ASM_EXTABLE(1b,3b)
245 : "+a" (has_nopl));
246
247 if (has_nopl == nopl_signature)
248 set_cpu_cap(c, X86_FEATURE_NOPL);
249 }
250}
251
218static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
219 253
220void __init early_cpu_init(void) 254void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
313 c->x86_phys_bits = eax & 0xff; 347 c->x86_phys_bits = eax & 0xff;
314 } 348 }
315 349
350 detect_nopl(c);
351
316 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
317 cpu_devs[c->x86_vendor]->c_early_init) 353 cpu_devs[c->x86_vendor]->c_early_init)
318 cpu_devs[c->x86_vendor]->c_early_init(c); 354 cpu_devs[c->x86_vendor]->c_early_init(c);
@@ -544,17 +580,20 @@ void pda_init(int cpu)
544 /* others are initialized in smpboot.c */ 580 /* others are initialized in smpboot.c */
545 pda->pcurrent = &init_task; 581 pda->pcurrent = &init_task;
546 pda->irqstackptr = boot_cpu_stack; 582 pda->irqstackptr = boot_cpu_stack;
583 pda->irqstackptr += IRQSTACKSIZE - 64;
547 } else { 584 } else {
548 pda->irqstackptr = (char *) 585 if (!pda->irqstackptr) {
549 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 586 pda->irqstackptr = (char *)
550 if (!pda->irqstackptr) 587 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
551 panic("cannot allocate irqstack for cpu %d", cpu); 588 if (!pda->irqstackptr)
589 panic("cannot allocate irqstack for cpu %d",
590 cpu);
591 pda->irqstackptr += IRQSTACKSIZE - 64;
592 }
552 593
553 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 594 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
554 pda->nodenumber = cpu_to_node(cpu); 595 pda->nodenumber = cpu_to_node(cpu);
555 } 596 }
556
557 pda->irqstackptr += IRQSTACKSIZE-64;
558} 597}
559 598
560char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 599char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
@@ -652,19 +691,22 @@ void __cpuinit cpu_init(void)
652 /* 691 /*
653 * set up and load the per-CPU TSS 692 * set up and load the per-CPU TSS
654 */ 693 */
655 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 694 if (!orig_ist->ist[0]) {
656 static const unsigned int order[N_EXCEPTION_STACKS] = { 695 static const unsigned int order[N_EXCEPTION_STACKS] = {
657 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 696 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
658 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 697 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
659 }; 698 };
660 if (cpu) { 699 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
661 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 700 if (cpu) {
662 if (!estacks) 701 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
663 panic("Cannot allocate exception stack %ld %d\n", 702 if (!estacks)
664 v, cpu); 703 panic("Cannot allocate exception "
704 "stack %ld %d\n", v, cpu);
705 }
706 estacks += PAGE_SIZE << order[v];
707 orig_ist->ist[v] = t->x86_tss.ist[v] =
708 (unsigned long)estacks;
665 } 709 }
666 estacks += PAGE_SIZE << order[v];
667 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
668 } 710 }
669 711
670 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 712 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index cb7a5715596d..efae3b22a0ff 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -235,9 +235,9 @@ config X86_LONGHAUL
235 If in doubt, say N. 235 If in doubt, say N.
236 236
237config X86_E_POWERSAVER 237config X86_E_POWERSAVER
238 tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" 238 tristate "VIA C7 Enhanced PowerSaver"
239 select CPU_FREQ_TABLE 239 select CPU_FREQ_TABLE
240 depends on X86_32 && EXPERIMENTAL 240 depends on X86_32
241 help 241 help
242 This adds the CPUFreq driver for VIA C7 processors. 242 This adds the CPUFreq driver for VIA C7 processors.
243 243
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ff2fff56f0a8..dd097b835839 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd)
200static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
201{ 201{
202 cpumask_t saved_mask = current->cpus_allowed; 202 cpumask_t saved_mask = current->cpus_allowed;
203 cpumask_of_cpu_ptr_declare(cpu_mask);
204 unsigned int i; 203 unsigned int i;
205 204
206 for_each_cpu_mask_nr(i, cmd->mask) { 205 for_each_cpu_mask_nr(i, cmd->mask) {
207 cpumask_of_cpu_ptr_next(cpu_mask, i); 206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
208 set_cpus_allowed_ptr(current, cpu_mask);
209 do_drv_write(cmd); 207 do_drv_write(cmd);
210 } 208 }
211 209
@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu)
269 } aperf_cur, mperf_cur; 267 } aperf_cur, mperf_cur;
270 268
271 cpumask_t saved_mask; 269 cpumask_t saved_mask;
272 cpumask_of_cpu_ptr(cpu_mask, cpu);
273 unsigned int perf_percent; 270 unsigned int perf_percent;
274 unsigned int retval; 271 unsigned int retval;
275 272
276 saved_mask = current->cpus_allowed; 273 saved_mask = current->cpus_allowed;
277 set_cpus_allowed_ptr(current, cpu_mask); 274 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
278 if (get_cpu() != cpu) { 275 if (get_cpu() != cpu) {
279 /* We were not able to run on requested processor */ 276 /* We were not able to run on requested processor */
280 put_cpu(); 277 put_cpu();
@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu)
340 337
341static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 338static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
342{ 339{
343 cpumask_of_cpu_ptr(cpu_mask, cpu);
344 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 340 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
345 unsigned int freq; 341 unsigned int freq;
346 unsigned int cached_freq; 342 unsigned int cached_freq;
@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
353 } 349 }
354 350
355 cached_freq = data->freq_table[data->acpi_data->state].frequency; 351 cached_freq = data->freq_table[data->acpi_data->state].frequency;
356 freq = extract_freq(get_cur_val(cpu_mask), data); 352 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
357 if (freq != cached_freq) { 353 if (freq != cached_freq) {
358 /* 354 /*
359 * The dreaded BIOS frequency change behind our back. 355 * The dreaded BIOS frequency change behind our back.
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index 94619c22f563..e4a4bf870e94 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -44,7 +44,7 @@ struct s_elan_multiplier {
44 * It is important that the frequencies 44 * It is important that the frequencies
45 * are listed in ascending order here! 45 * are listed in ascending order here!
46 */ 46 */
47struct s_elan_multiplier elan_multiplier[] = { 47static struct s_elan_multiplier elan_multiplier[] = {
48 {1000, 0x02, 0x18}, 48 {1000, 0x02, 0x18},
49 {2000, 0x02, 0x10}, 49 {2000, 0x02, 0x10},
50 {4000, 0x02, 0x08}, 50 {4000, 0x02, 0x08},
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 53c7b6936973..84bb395038d8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -66,7 +66,6 @@ static u32 find_freq_from_fid(u32 fid)
66 return 800 + (fid * 100); 66 return 800 + (fid * 100);
67} 67}
68 68
69
70/* Return a frequency in KHz, given an input fid */ 69/* Return a frequency in KHz, given an input fid */
71static u32 find_khz_freq_from_fid(u32 fid) 70static u32 find_khz_freq_from_fid(u32 fid)
72{ 71{
@@ -78,7 +77,6 @@ static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 p
78 return data[pstate].frequency; 77 return data[pstate].frequency;
79} 78}
80 79
81
82/* Return the vco fid for an input fid 80/* Return the vco fid for an input fid
83 * 81 *
84 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 82 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -166,7 +164,6 @@ static void fidvid_msr_init(void)
166 wrmsr(MSR_FIDVID_CTL, lo, hi); 164 wrmsr(MSR_FIDVID_CTL, lo, hi);
167} 165}
168 166
169
170/* write the new fid value along with the other control fields to the msr */ 167/* write the new fid value along with the other control fields to the msr */
171static int write_new_fid(struct powernow_k8_data *data, u32 fid) 168static int write_new_fid(struct powernow_k8_data *data, u32 fid)
172{ 169{
@@ -479,12 +476,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
479static int check_supported_cpu(unsigned int cpu) 476static int check_supported_cpu(unsigned int cpu)
480{ 477{
481 cpumask_t oldmask; 478 cpumask_t oldmask;
482 cpumask_of_cpu_ptr(cpu_mask, cpu);
483 u32 eax, ebx, ecx, edx; 479 u32 eax, ebx, ecx, edx;
484 unsigned int rc = 0; 480 unsigned int rc = 0;
485 481
486 oldmask = current->cpus_allowed; 482 oldmask = current->cpus_allowed;
487 set_cpus_allowed_ptr(current, cpu_mask); 483 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
488 484
489 if (smp_processor_id() != cpu) { 485 if (smp_processor_id() != cpu) {
490 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); 486 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -1017,7 +1013,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1017static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) 1013static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1018{ 1014{
1019 cpumask_t oldmask; 1015 cpumask_t oldmask;
1020 cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
1021 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1016 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1022 u32 checkfid; 1017 u32 checkfid;
1023 u32 checkvid; 1018 u32 checkvid;
@@ -1032,7 +1027,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1032 1027
1033 /* only run on specific CPU from here on */ 1028 /* only run on specific CPU from here on */
1034 oldmask = current->cpus_allowed; 1029 oldmask = current->cpus_allowed;
1035 set_cpus_allowed_ptr(current, cpu_mask); 1030 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
1036 1031
1037 if (smp_processor_id() != pol->cpu) { 1032 if (smp_processor_id() != pol->cpu) {
1038 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1033 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1107,7 +1102,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1107{ 1102{
1108 struct powernow_k8_data *data; 1103 struct powernow_k8_data *data;
1109 cpumask_t oldmask; 1104 cpumask_t oldmask;
1110 cpumask_of_cpu_ptr_declare(newmask);
1111 int rc; 1105 int rc;
1112 1106
1113 if (!cpu_online(pol->cpu)) 1107 if (!cpu_online(pol->cpu))
@@ -1159,8 +1153,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1159 1153
1160 /* only run on specific CPU from here on */ 1154 /* only run on specific CPU from here on */
1161 oldmask = current->cpus_allowed; 1155 oldmask = current->cpus_allowed;
1162 cpumask_of_cpu_ptr_next(newmask, pol->cpu); 1156 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
1163 set_cpus_allowed_ptr(current, newmask);
1164 1157
1165 if (smp_processor_id() != pol->cpu) { 1158 if (smp_processor_id() != pol->cpu) {
1166 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1159 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1182,7 +1175,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1182 set_cpus_allowed_ptr(current, &oldmask); 1175 set_cpus_allowed_ptr(current, &oldmask);
1183 1176
1184 if (cpu_family == CPU_HW_PSTATE) 1177 if (cpu_family == CPU_HW_PSTATE)
1185 pol->cpus = *newmask; 1178 pol->cpus = cpumask_of_cpu(pol->cpu);
1186 else 1179 else
1187 pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1180 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1188 data->available_cores = &(pol->cpus); 1181 data->available_cores = &(pol->cpus);
@@ -1248,7 +1241,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
1248{ 1241{
1249 struct powernow_k8_data *data; 1242 struct powernow_k8_data *data;
1250 cpumask_t oldmask = current->cpus_allowed; 1243 cpumask_t oldmask = current->cpus_allowed;
1251 cpumask_of_cpu_ptr(newmask, cpu);
1252 unsigned int khz = 0; 1244 unsigned int khz = 0;
1253 unsigned int first; 1245 unsigned int first;
1254 1246
@@ -1258,7 +1250,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1258 if (!data) 1250 if (!data)
1259 return -EINVAL; 1251 return -EINVAL;
1260 1252
1261 set_cpus_allowed_ptr(current, newmask); 1253 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
1262 if (smp_processor_id() != cpu) { 1254 if (smp_processor_id() != cpu) {
1263 printk(KERN_ERR PFX 1255 printk(KERN_ERR PFX
1264 "limiting to CPU %d failed in powernowk8_get\n", cpu); 1256 "limiting to CPU %d failed in powernowk8_get\n", cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index ca2ac13b7af2..15e13c01cc36 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
324 unsigned l, h; 324 unsigned l, h;
325 unsigned clock_freq; 325 unsigned clock_freq;
326 cpumask_t saved_mask; 326 cpumask_t saved_mask;
327 cpumask_of_cpu_ptr(new_mask, cpu);
328 327
329 saved_mask = current->cpus_allowed; 328 saved_mask = current->cpus_allowed;
330 set_cpus_allowed_ptr(current, new_mask); 329 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
331 if (smp_processor_id() != cpu) 330 if (smp_processor_id() != cpu)
332 return 0; 331 return 0;
333 332
@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy,
585 * Best effort undo.. 584 * Best effort undo..
586 */ 585 */
587 586
588 if (!cpus_empty(*covered_cpus)) { 587 if (!cpus_empty(*covered_cpus))
589 cpumask_of_cpu_ptr_declare(new_mask);
590
591 for_each_cpu_mask_nr(j, *covered_cpus) { 588 for_each_cpu_mask_nr(j, *covered_cpus) {
592 cpumask_of_cpu_ptr_next(new_mask, j); 589 set_cpus_allowed_ptr(current,
593 set_cpus_allowed_ptr(current, new_mask); 590 &cpumask_of_cpu(j));
594 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 591 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
595 } 592 }
596 }
597 593
598 tmp = freqs.new; 594 tmp = freqs.new;
599 freqs.new = freqs.old; 595 freqs.new = freqs.old;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 2f3728dc24f6..191f7263c61d 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 cpumask_of_cpu_ptr(newmask, cpu); 247 return _speedstep_get(&cpumask_of_cpu(cpu));
248 return _speedstep_get(newmask);
249} 248}
250 249
251/** 250/**
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 3fd7a67bb06a..898a5a2002ed 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,13 +15,11 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 unsigned long flags;
22 21
23 /* we test for DEVID by checking whether CCR3 is writable */ 22 /* we test for DEVID by checking whether CCR3 is writable */
24 local_irq_save(flags);
25 ccr3 = getCx86(CX86_CCR3); 23 ccr3 = getCx86(CX86_CCR3);
26 setCx86(CX86_CCR3, ccr3 ^ 0x80); 24 setCx86(CX86_CCR3, ccr3 ^ 0x80);
27 getCx86(0xc0); /* dummy to change bus */ 25 getCx86(0xc0); /* dummy to change bus */
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 *dir0 = getCx86(CX86_DIR0); 42 *dir0 = getCx86(CX86_DIR0);
45 *dir1 = getCx86(CX86_DIR1); 43 *dir1 = getCx86(CX86_DIR1);
46 } 44 }
47 local_irq_restore(flags);
48} 45}
49 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{
49 unsigned long flags;
50
51 local_irq_save(flags);
52 __do_cyrix_devid(dir0, dir1);
53 local_irq_restore(flags);
54}
50/* 55/*
51 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in 56 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
52 * order to identify the Cyrix CPU model after we're out of setup.c 57 * order to identify the Cyrix CPU model after we're out of setup.c
@@ -134,23 +139,6 @@ static void __cpuinit set_cx86_memwb(void)
134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); 139 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
135} 140}
136 141
137static void __cpuinit set_cx86_inc(void)
138{
139 unsigned char ccr3;
140
141 printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
142
143 ccr3 = getCx86(CX86_CCR3);
144 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
145 /* PCR1 -- Performance Control */
146 /* Incrementor on, whatever that is */
147 setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
148 /* PCR0 -- Performance Control */
149 /* Incrementor Margin 10 */
150 setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
151 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
152}
153
154/* 142/*
155 * Configure later MediaGX and/or Geode processor. 143 * Configure later MediaGX and/or Geode processor.
156 */ 144 */
@@ -174,11 +162,28 @@ static void __cpuinit geode_configure(void)
174 162
175 set_cx86_memwb(); 163 set_cx86_memwb();
176 set_cx86_reorder(); 164 set_cx86_reorder();
177 set_cx86_inc();
178 165
179 local_irq_restore(flags); 166 local_irq_restore(flags);
180} 167}
181 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
170{
171 unsigned char dir0, dir0_msn, dir1 = 0;
172
173 __do_cyrix_devid(&dir0, &dir1);
174 dir0_msn = dir0 >> 4; /* identifies CPU "family" */
175
176 switch (dir0_msn) {
177 case 3: /* 6x86/6x86L */
178 /* Emulate MTRRs using Cyrix's ARRs. */
179 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
180 break;
181 case 5: /* 6x86MX/M II */
182 /* Emulate MTRRs using Cyrix's ARRs. */
183 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
184 break;
185 }
186}
182 187
183static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
184{ 189{
@@ -434,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
434static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 439static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
435 .c_vendor = "Cyrix", 440 .c_vendor = "Cyrix",
436 .c_ident = { "CyrixInstead" }, 441 .c_ident = { "CyrixInstead" },
442 .c_early_init = early_init_cyrix,
437 .c_init = init_cyrix, 443 .c_init = init_cyrix,
438 .c_identify = cyrix_identify, 444 .c_identify = cyrix_identify,
439}; 445};
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c
index e43ad4ad4cba..c9017799497c 100644
--- a/arch/x86/kernel/cpu/feature_names.c
+++ b/arch/x86/kernel/cpu/feature_names.c
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
39 NULL, NULL, NULL, NULL, 39 NULL, NULL, NULL, NULL,
40 "constant_tsc", "up", NULL, "arch_perfmon", 40 "constant_tsc", "up", NULL, "arch_perfmon",
41 "pebs", "bts", NULL, NULL, 41 "pebs", "bts", NULL, NULL,
42 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 42 "rep_good", NULL, NULL, NULL,
43 "nopl", NULL, NULL, NULL,
43 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
44 45
45 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 650d40f7912b..6b0a10b002f1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
516 unsigned long j; 516 unsigned long j;
517 int retval; 517 int retval;
518 cpumask_t oldmask; 518 cpumask_t oldmask;
519 cpumask_of_cpu_ptr(newmask, cpu);
520 519
521 if (num_cache_leaves == 0) 520 if (num_cache_leaves == 0)
522 return -ENOENT; 521 return -ENOENT;
@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
527 return -ENOMEM; 526 return -ENOMEM;
528 527
529 oldmask = current->cpus_allowed; 528 oldmask = current->cpus_allowed;
530 retval = set_cpus_allowed_ptr(current, newmask); 529 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
531 if (retval) 530 if (retval)
532 goto out; 531 goto out;
533 532
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 65a339678ece..726a5fcdf341 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = {
759}; 759};
760 760
761DEFINE_PER_CPU(struct sys_device, device_mce); 761DEFINE_PER_CPU(struct sys_device, device_mce);
762void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
762 763
763/* Why are there no generic functions for this? */ 764/* Why are there no generic functions for this? */
764#define ACCESSOR(name, var, start) \ 765#define ACCESSOR(name, var, start) \
@@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
883 case CPU_ONLINE: 884 case CPU_ONLINE:
884 case CPU_ONLINE_FROZEN: 885 case CPU_ONLINE_FROZEN:
885 mce_create_device(cpu); 886 mce_create_device(cpu);
887 if (threshold_cpu_callback)
888 threshold_cpu_callback(action, cpu);
886 break; 889 break;
887 case CPU_DEAD: 890 case CPU_DEAD:
888 case CPU_DEAD_FROZEN: 891 case CPU_DEAD_FROZEN:
892 if (threshold_cpu_callback)
893 threshold_cpu_callback(action, cpu);
889 mce_remove_device(cpu); 894 mce_remove_device(cpu);
890 break; 895 break;
891 } 896 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 88736cadbaa6..5eb390a4b2e9 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
628 deallocate_threshold_block(cpu, bank); 628 deallocate_threshold_block(cpu, bank);
629 629
630free_out: 630free_out:
631 kobject_del(b->kobj);
631 kobject_put(b->kobj); 632 kobject_put(b->kobj);
632 kfree(b); 633 kfree(b);
633 per_cpu(threshold_banks, cpu)[bank] = NULL; 634 per_cpu(threshold_banks, cpu)[bank] = NULL;
@@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu)
645} 646}
646 647
647/* get notified when a cpu comes on/off */ 648/* get notified when a cpu comes on/off */
648static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, 649static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action,
649 unsigned long action, void *hcpu) 650 unsigned int cpu)
650{ 651{
651 /* cpu was unsigned int to begin with */
652 unsigned int cpu = (unsigned long)hcpu;
653
654 if (cpu >= NR_CPUS) 652 if (cpu >= NR_CPUS)
655 goto out; 653 return;
656 654
657 switch (action) { 655 switch (action) {
658 case CPU_ONLINE: 656 case CPU_ONLINE:
@@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
666 default: 664 default:
667 break; 665 break;
668 } 666 }
669 out:
670 return NOTIFY_OK;
671} 667}
672 668
673static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
674 .notifier_call = threshold_cpu_callback,
675};
676
677static __init int threshold_init_device(void) 669static __init int threshold_init_device(void)
678{ 670{
679 unsigned lcpu = 0; 671 unsigned lcpu = 0;
@@ -684,7 +676,7 @@ static __init int threshold_init_device(void)
684 if (err) 676 if (err)
685 return err; 677 return err;
686 } 678 }
687 register_hotcpu_notifier(&threshold_cpu_notifier); 679 threshold_cpu_callback = amd_64_threshold_cpu_callback;
688 return 0; 680 return 0;
689} 681}
690 682
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 509bd3d9eacd..cb7d3b6a80eb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
379 unsigned long *size, mtrr_type *type) 379 unsigned long *size, mtrr_type *type)
380{ 380{
381 unsigned int mask_lo, mask_hi, base_lo, base_hi; 381 unsigned int mask_lo, mask_hi, base_lo, base_hi;
382 unsigned int tmp, hi;
382 383
383 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 384 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
384 if ((mask_lo & 0x800) == 0) { 385 if ((mask_lo & 0x800) == 0) {
@@ -392,8 +393,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
392 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 393 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
393 394
394 /* Work out the shifted address mask. */ 395 /* Work out the shifted address mask. */
395 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) 396 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
396 | mask_lo >> PAGE_SHIFT; 397 mask_lo = size_or_mask | tmp;
398 /* Expand tmp with high bits to all 1s*/
399 hi = fls(tmp);
400 if (hi > 0) {
401 tmp |= ~((1<<(hi - 1)) - 1);
402
403 if (tmp != mask_lo) {
404 static int once = 1;
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp;
411 }
412 }
397 413
398 /* This works correctly if size is a power of two, i.e. a 414 /* This works correctly if size is a power of two, i.e. a
399 contiguous range. */ 415 contiguous range. */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6f23969c8faf..b117d7f8a564 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1496 1496
1497 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1497 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1498 if (!highest_pfn) { 1498 if (!highest_pfn) {
1499 if (!kvm_para_available()) { 1499 WARN(!kvm_para_available(), KERN_WARNING
1500 printk(KERN_WARNING
1501 "WARNING: strange, CPU MTRRs all blank?\n"); 1500 "WARNING: strange, CPU MTRRs all blank?\n");
1502 WARN_ON(1);
1503 }
1504 return 0; 1501 return 0;
1505 } 1502 }
1506 1503
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index de7439f82b92..05cc22dbd4ff 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -478,7 +478,13 @@ static int setup_p4_watchdog(unsigned nmi_hz)
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 478 perfctr_msr = MSR_P4_IQ_PERFCTR1;
479 evntsel_msr = MSR_P4_CRU_ESCR0; 479 evntsel_msr = MSR_P4_CRU_ESCR0;
480 cccr_msr = MSR_P4_IQ_CCCR1; 480 cccr_msr = MSR_P4_IQ_CCCR1;
481 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); 481
482 /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
483 if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4)
484 cccr_val = P4_CCCR_OVF_PMI0;
485 else
486 cccr_val = P4_CCCR_OVF_PMI1;
487 cccr_val |= P4_CCCR_ESCR_SELECT(4);
482 } 488 }
483 489
484 evntsel = P4_ESCR_EVENT_SELECT(0x3F) 490 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 14b11b3be31c..8e9cd6a8ec12 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -89,6 +89,8 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
89 struct cpuid_regs cmd; 89 struct cpuid_regs cmd;
90 int cpu = iminor(file->f_path.dentry->d_inode); 90 int cpu = iminor(file->f_path.dentry->d_inode);
91 u64 pos = *ppos; 91 u64 pos = *ppos;
92 ssize_t bytes = 0;
93 int err = 0;
92 94
93 if (count % 16) 95 if (count % 16)
94 return -EINVAL; /* Invalid chunk size */ 96 return -EINVAL; /* Invalid chunk size */
@@ -96,14 +98,19 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
96 for (; count; count -= 16) { 98 for (; count; count -= 16) {
97 cmd.eax = pos; 99 cmd.eax = pos;
98 cmd.ecx = pos >> 32; 100 cmd.ecx = pos >> 32;
99 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); 101 err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
100 if (copy_to_user(tmp, &cmd, 16)) 102 if (err)
101 return -EFAULT; 103 break;
104 if (copy_to_user(tmp, &cmd, 16)) {
105 err = -EFAULT;
106 break;
107 }
102 tmp += 16; 108 tmp += 16;
109 bytes += 16;
103 *ppos = ++pos; 110 *ppos = ++pos;
104 } 111 }
105 112
106 return tmp - buf; 113 return bytes ? bytes : err;
107} 114}
108 115
109static int cpuid_open(struct inode *inode, struct file *file) 116static int cpuid_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 9af89078f7bb..66e48aa2dd1b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1203,7 +1203,7 @@ static int __init parse_memmap_opt(char *p)
1203 if (!p) 1203 if (!p)
1204 return -EINVAL; 1204 return -EINVAL;
1205 1205
1206 if (!strcmp(p, "exactmap")) { 1206 if (!strncmp(p, "exactmap", 8)) {
1207#ifdef CONFIG_CRASH_DUMP 1207#ifdef CONFIG_CRASH_DUMP
1208 /* 1208 /*
1209 * If we are doing a crash dump, we still need to know 1209 * If we are doing a crash dump, we still need to know
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
index 4b63c8e1f13b..5cab48ee61a4 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/kernel/efi_32.c
@@ -53,7 +53,7 @@ void efi_call_phys_prelog(void)
53 * directory. If I have PAE, I just need to duplicate one entry in 53 * directory. If I have PAE, I just need to duplicate one entry in
54 * page directory. 54 * page directory.
55 */ 55 */
56 cr4 = read_cr4(); 56 cr4 = read_cr4_safe();
57 57
58 if (cr4 & X86_CR4_PAE) { 58 if (cr4 & X86_CR4_PAE) {
59 efi_bak_pg_dir_pointer[0].pgd = 59 efi_bak_pg_dir_pointer[0].pgd =
@@ -91,7 +91,7 @@ void efi_call_phys_epilog(void)
91 gdt_descr.size = GDT_SIZE - 1; 91 gdt_descr.size = GDT_SIZE - 1;
92 load_gdt(&gdt_descr); 92 load_gdt(&gdt_descr);
93 93
94 cr4 = read_cr4(); 94 cr4 = read_cr4_safe();
95 95
96 if (cr4 & X86_CR4_PAE) { 96 if (cr4 & X86_CR4_PAE) {
97 swapper_pg_dir[pgd_index(0)].pgd = 97 swapper_pg_dir[pgd_index(0)].pgd =
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 1fa8be5bd217..eaff0bbb1444 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -99,3 +99,4 @@ int is_uv_system(void)
99{ 99{
100 return uv_system_type != UV_NONE; 100 return uv_system_type != UV_NONE;
101} 101}
102EXPORT_SYMBOL_GPL(is_uv_system);
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 2cfcbded888a..bfa837cb16be 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -222,7 +222,7 @@ static __init void map_low_mmrs(void)
222 222
223enum map_type {map_wb, map_uc}; 223enum map_type {map_wb, map_uc};
224 224
225static void map_high(char *id, unsigned long base, int shift, enum map_type map_type) 225static __init void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
226{ 226{
227 unsigned long bytes, paddr; 227 unsigned long bytes, paddr;
228 228
@@ -293,7 +293,9 @@ static __init void uv_rtc_init(void)
293 sn_rtc_cycles_per_second = ticks_per_sec; 293 sn_rtc_cycles_per_second = ticks_per_sec;
294} 294}
295 295
296static __init void uv_system_init(void) 296static bool uv_system_inited;
297
298void __init uv_system_init(void)
297{ 299{
298 union uvh_si_addr_map_config_u m_n_config; 300 union uvh_si_addr_map_config_u m_n_config;
299 union uvh_node_id_u node_id; 301 union uvh_node_id_u node_id;
@@ -383,6 +385,7 @@ static __init void uv_system_init(void)
383 map_mmr_high(max_pnode); 385 map_mmr_high(max_pnode);
384 map_config_high(max_pnode); 386 map_config_high(max_pnode);
385 map_mmioh_high(max_pnode); 387 map_mmioh_high(max_pnode);
388 uv_system_inited = true;
386} 389}
387 390
388/* 391/*
@@ -391,8 +394,7 @@ static __init void uv_system_init(void)
391 */ 394 */
392void __cpuinit uv_cpu_init(void) 395void __cpuinit uv_cpu_init(void)
393{ 396{
394 if (!uv_node_to_blade) 397 BUG_ON(!uv_system_inited);
395 uv_system_init();
396 398
397 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 399 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
398 400
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1b318e903bf6..9bfc4d72fb2e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -88,6 +88,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
88 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 88 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
89 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 89 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
90 (__START_KERNEL & PGDIR_MASK))); 90 (__START_KERNEL & PGDIR_MASK)));
91 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
91 92
92 /* clear bss before set_intr_gate with early_idt_handler */ 93 /* clear bss before set_intr_gate with early_idt_handler */
93 clear_bss(); 94 clear_bss();
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index f67e93441caf..a7010c3a377a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -456,9 +456,6 @@ is386: movl $2,%ecx # set MP
4561: 4561:
457#endif /* CONFIG_SMP */ 457#endif /* CONFIG_SMP */
458 jmp *(initial_code) 458 jmp *(initial_code)
459.align 4
460ENTRY(initial_code)
461 .long i386_start_kernel
462 459
463/* 460/*
464 * We depend on ET to be correct. This checks for 287/387. 461 * We depend on ET to be correct. This checks for 287/387.
@@ -601,6 +598,11 @@ ignore_int:
601#endif 598#endif
602 iret 599 iret
603 600
601.section .cpuinit.data,"wa"
602.align 4
603ENTRY(initial_code)
604 .long i386_start_kernel
605
604.section .text 606.section .text
605/* 607/*
606 * Real beginning of normal "text" segment 608 * Real beginning of normal "text" segment
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 0ea6a19bfdfe..73deaffadd03 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -210,8 +210,8 @@ static void hpet_legacy_clockevent_register(void)
210 /* Calculate the min / max delta */ 210 /* Calculate the min / max delta */
211 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 211 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
212 &hpet_clockevent); 212 &hpet_clockevent);
213 hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, 213 /* 5 usec minimum reprogramming delta. */
214 &hpet_clockevent); 214 hpet_clockevent.min_delta_ns = 5000;
215 215
216 /* 216 /*
217 * Start hpet with the boot cpu mask and make it 217 * Start hpet with the boot cpu mask and make it
@@ -270,15 +270,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode,
270} 270}
271 271
272static int hpet_legacy_next_event(unsigned long delta, 272static int hpet_legacy_next_event(unsigned long delta,
273 struct clock_event_device *evt) 273 struct clock_event_device *evt)
274{ 274{
275 unsigned long cnt; 275 u32 cnt;
276 276
277 cnt = hpet_readl(HPET_COUNTER); 277 cnt = hpet_readl(HPET_COUNTER);
278 cnt += delta; 278 cnt += (u32) delta;
279 hpet_writel(cnt, HPET_T0_CMP); 279 hpet_writel(cnt, HPET_T0_CMP);
280 280
281 return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; 281 /*
282 * We need to read back the CMP register to make sure that
283 * what we wrote hit the chip before we compare it to the
284 * counter.
285 */
286 WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt);
287
288 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
282} 289}
283 290
284/* 291/*
@@ -359,6 +366,7 @@ static int hpet_clocksource_register(void)
359int __init hpet_enable(void) 366int __init hpet_enable(void)
360{ 367{
361 unsigned long id; 368 unsigned long id;
369 int i;
362 370
363 if (!is_hpet_capable()) 371 if (!is_hpet_capable())
364 return 0; 372 return 0;
@@ -369,6 +377,29 @@ int __init hpet_enable(void)
369 * Read the period and check for a sane value: 377 * Read the period and check for a sane value:
370 */ 378 */
371 hpet_period = hpet_readl(HPET_PERIOD); 379 hpet_period = hpet_readl(HPET_PERIOD);
380
381 /*
382 * AMD SB700 based systems with spread spectrum enabled use a
383 * SMM based HPET emulation to provide proper frequency
384 * setting. The SMM code is initialized with the first HPET
385 * register access and takes some time to complete. During
386 * this time the config register reads 0xffffffff. We check
387 * for max. 1000 loops whether the config register reads a non
388 * 0xffffffff value to make sure that HPET is up and running
389 * before we go further. A counting loop is safe, as the HPET
390 * access takes thousands of CPU cycles. On non SB700 based
391 * machines this check is only done once and has no side
392 * effects.
393 */
394 for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
395 if (i == 1000) {
396 printk(KERN_WARNING
397 "HPET config register value = 0xFFFFFFFF. "
398 "Disabling HPET\n");
399 goto out_nohpet;
400 }
401 }
402
372 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) 403 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
373 goto out_nohpet; 404 goto out_nohpet;
374 405
@@ -468,7 +499,7 @@ void hpet_disable(void)
468#define RTC_NUM_INTS 1 499#define RTC_NUM_INTS 1
469 500
470static unsigned long hpet_rtc_flags; 501static unsigned long hpet_rtc_flags;
471static unsigned long hpet_prev_update_sec; 502static int hpet_prev_update_sec;
472static struct rtc_time hpet_alarm_time; 503static struct rtc_time hpet_alarm_time;
473static unsigned long hpet_pie_count; 504static unsigned long hpet_pie_count;
474static unsigned long hpet_t1_cmp; 505static unsigned long hpet_t1_cmp;
@@ -575,6 +606,9 @@ int hpet_set_rtc_irq_bit(unsigned long bit_mask)
575 606
576 hpet_rtc_flags |= bit_mask; 607 hpet_rtc_flags |= bit_mask;
577 608
609 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
610 hpet_prev_update_sec = -1;
611
578 if (!oldbits) 612 if (!oldbits)
579 hpet_rtc_timer_init(); 613 hpet_rtc_timer_init();
580 614
@@ -652,7 +686,7 @@ static void hpet_rtc_timer_reinit(void)
652 if (hpet_rtc_flags & RTC_PIE) 686 if (hpet_rtc_flags & RTC_PIE)
653 hpet_pie_count += lost_ints; 687 hpet_pie_count += lost_ints;
654 if (printk_ratelimit()) 688 if (printk_ratelimit())
655 printk(KERN_WARNING "rtc: lost %d interrupts\n", 689 printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
656 lost_ints); 690 lost_ints);
657 } 691 }
658} 692}
@@ -670,7 +704,8 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
670 704
671 if (hpet_rtc_flags & RTC_UIE && 705 if (hpet_rtc_flags & RTC_UIE &&
672 curr_time.tm_sec != hpet_prev_update_sec) { 706 curr_time.tm_sec != hpet_prev_update_sec) {
673 rtc_int_flag = RTC_UF; 707 if (hpet_prev_update_sec >= 0)
708 rtc_int_flag = RTC_UF;
674 hpet_prev_update_sec = curr_time.tm_sec; 709 hpet_prev_update_sec = curr_time.tm_sec;
675 } 710 }
676 711
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index de9aa0e3a9c5..09cddb57bec4 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -57,7 +57,7 @@ atomic_t irq_mis_count;
57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
58 58
59static DEFINE_SPINLOCK(ioapic_lock); 59static DEFINE_SPINLOCK(ioapic_lock);
60static DEFINE_SPINLOCK(vector_lock); 60DEFINE_SPINLOCK(vector_lock);
61 61
62int timer_through_8259 __initdata; 62int timer_through_8259 __initdata;
63 63
@@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq)
1209 return vector; 1209 return vector;
1210} 1210}
1211 1211
1212void setup_vector_irq(int cpu)
1213{
1214}
1215
1216static struct irq_chip ioapic_chip; 1212static struct irq_chip ioapic_chip;
1217 1213
1218#define IOAPIC_AUTO -1 1214#define IOAPIC_AUTO -1
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 8269434d1707..61a83b70c18f 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -101,7 +101,7 @@ int timer_through_8259 __initdata;
101static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 101static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
102 102
103static DEFINE_SPINLOCK(ioapic_lock); 103static DEFINE_SPINLOCK(ioapic_lock);
104DEFINE_SPINLOCK(vector_lock); 104static DEFINE_SPINLOCK(vector_lock);
105 105
106/* 106/*
107 * # of IRQ routing registers 107 * # of IRQ routing registers
@@ -697,6 +697,19 @@ static int pin_2_irq(int idx, int apic, int pin)
697 return irq; 697 return irq;
698} 698}
699 699
700void lock_vector_lock(void)
701{
702 /* Used to the online set of cpus does not change
703 * during assign_irq_vector.
704 */
705 spin_lock(&vector_lock);
706}
707
708void unlock_vector_lock(void)
709{
710 spin_unlock(&vector_lock);
711}
712
700static int __assign_irq_vector(int irq, cpumask_t mask) 713static int __assign_irq_vector(int irq, cpumask_t mask)
701{ 714{
702 /* 715 /*
@@ -802,7 +815,7 @@ static void __clear_irq_vector(int irq)
802 cpus_clear(cfg->domain); 815 cpus_clear(cfg->domain);
803} 816}
804 817
805static void __setup_vector_irq(int cpu) 818void __setup_vector_irq(int cpu)
806{ 819{
807 /* Initialize vector_irq on a new cpu */ 820 /* Initialize vector_irq on a new cpu */
808 /* This function must be called with vector_lock held */ 821 /* This function must be called with vector_lock held */
@@ -825,14 +838,6 @@ static void __setup_vector_irq(int cpu)
825 } 838 }
826} 839}
827 840
828void setup_vector_irq(int cpu)
829{
830 spin_lock(&vector_lock);
831 __setup_vector_irq(smp_processor_id());
832 spin_unlock(&vector_lock);
833}
834
835
836static struct irq_chip ioapic_chip; 841static struct irq_chip ioapic_chip;
837 842
838static void ioapic_register_intr(int irq, unsigned long trigger) 843static void ioapic_register_intr(int irq, unsigned long trigger)
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
index 1c3a66a67f83..720d2607aacb 100644
--- a/arch/x86/kernel/io_delay.c
+++ b/arch/x86/kernel/io_delay.c
@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
92 DMI_MATCH(DMI_BOARD_NAME, "30BF") 92 DMI_MATCH(DMI_BOARD_NAME, "30BF")
93 } 93 }
94 }, 94 },
95 {
96 .callback = dmi_io_delay_0xed_port,
97 .ident = "Presario F700",
98 .matches = {
99 DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
100 DMI_MATCH(DMI_BOARD_NAME, "30D3")
101 }
102 },
95 { } 103 { }
96}; 104};
97 105
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 0373e88de95a..1f26fd9ec4f4 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -43,10 +43,11 @@
43 43
44#define BUILD_IRQ(nr) \ 44#define BUILD_IRQ(nr) \
45 asmlinkage void IRQ_NAME(nr); \ 45 asmlinkage void IRQ_NAME(nr); \
46 asm("\n.p2align\n" \ 46 asm("\n.text\n.p2align\n" \
47 "IRQ" #nr "_interrupt:\n\t" \ 47 "IRQ" #nr "_interrupt:\n\t" \
48 "push $~(" #nr ") ; " \ 48 "push $~(" #nr ") ; " \
49 "jmp common_interrupt"); 49 "jmp common_interrupt\n" \
50 ".previous");
50 51
51#define BI(x,y) \ 52#define BI(x,y) \
52 BUILD_IRQ(x##y) 53 BUILD_IRQ(x##y)
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index f2d43bc75514..ff7d3b0124f1 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -139,6 +139,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
139 if (PageHighMem(pg)) { 139 if (PageHighMem(pg)) {
140 data = ioremap_cache(pa_data, sizeof(*data)); 140 data = ioremap_cache(pa_data, sizeof(*data));
141 if (!data) { 141 if (!data) {
142 kfree(node);
142 error = -ENXIO; 143 error = -ENXIO;
143 goto err_dir; 144 goto err_dir;
144 } 145 }
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 43c019f85f0d..6c27679ec6aa 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -431,7 +431,6 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
431 regs->ip = (unsigned long)p->ainsn.insn; 431 regs->ip = (unsigned long)p->ainsn.insn;
432} 432}
433 433
434/* Called with kretprobe_lock held */
435void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 434void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
436 struct pt_regs *regs) 435 struct pt_regs *regs)
437{ 436{
@@ -682,8 +681,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
682 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 681 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
683 682
684 INIT_HLIST_HEAD(&empty_rp); 683 INIT_HLIST_HEAD(&empty_rp);
685 spin_lock_irqsave(&kretprobe_lock, flags); 684 kretprobe_hash_lock(current, &head, &flags);
686 head = kretprobe_inst_table_head(current);
687 /* fixup registers */ 685 /* fixup registers */
688#ifdef CONFIG_X86_64 686#ifdef CONFIG_X86_64
689 regs->cs = __KERNEL_CS; 687 regs->cs = __KERNEL_CS;
@@ -732,7 +730,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
732 730
733 kretprobe_assert(ri, orig_ret_address, trampoline_address); 731 kretprobe_assert(ri, orig_ret_address, trampoline_address);
734 732
735 spin_unlock_irqrestore(&kretprobe_lock, flags); 733 kretprobe_hash_unlock(current, &flags);
736 734
737 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 735 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
738 hlist_del(&ri->hlist); 736 hlist_del(&ri->hlist);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 3fee2aa50f3f..b68e21f06f4f 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
62 62
63 if (reload) { 63 if (reload) {
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 cpumask_of_cpu_ptr_declare(mask);
66
67 preempt_disable(); 65 preempt_disable();
68 load_LDT(pc); 66 load_LDT(pc);
69 cpumask_of_cpu_ptr_next(mask, smp_processor_id()); 67 if (!cpus_equal(current->mm->cpu_vm_mask,
70 if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) 68 cpumask_of_cpu(smp_processor_id())))
71 smp_call_function(flush_ldt, current->mm, 1); 69 smp_call_function(flush_ldt, current->mm, 1);
72 preempt_enable(); 70 preempt_enable();
73#else 71#else
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 8864230d55af..0732adba05ca 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/numa.h> 13#include <linux/numa.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/suspend.h>
15 16
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
@@ -22,6 +23,7 @@
22#include <asm/cpufeature.h> 23#include <asm/cpufeature.h>
23#include <asm/desc.h> 24#include <asm/desc.h>
24#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/cacheflush.h>
25 27
26#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) 28#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
27static u32 kexec_pgd[1024] PAGE_ALIGNED; 29static u32 kexec_pgd[1024] PAGE_ALIGNED;
@@ -77,7 +79,7 @@ static void load_segments(void)
77/* 79/*
78 * A architecture hook called to validate the 80 * A architecture hook called to validate the
79 * proposed image and prepare the control pages 81 * proposed image and prepare the control pages
80 * as needed. The pages for KEXEC_CONTROL_CODE_SIZE 82 * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE
81 * have been allocated, but the segments have yet 83 * have been allocated, but the segments have yet
82 * been copied into the kernel. 84 * been copied into the kernel.
83 * 85 *
@@ -85,10 +87,12 @@ static void load_segments(void)
85 * reboot code buffer to allow us to avoid allocations 87 * reboot code buffer to allow us to avoid allocations
86 * later. 88 * later.
87 * 89 *
88 * Currently nothing. 90 * Make control page executable.
89 */ 91 */
90int machine_kexec_prepare(struct kimage *image) 92int machine_kexec_prepare(struct kimage *image)
91{ 93{
94 if (nx_enabled)
95 set_pages_x(image->control_code_page, 1);
92 return 0; 96 return 0;
93} 97}
94 98
@@ -98,27 +102,54 @@ int machine_kexec_prepare(struct kimage *image)
98 */ 102 */
99void machine_kexec_cleanup(struct kimage *image) 103void machine_kexec_cleanup(struct kimage *image)
100{ 104{
105 if (nx_enabled)
106 set_pages_nx(image->control_code_page, 1);
101} 107}
102 108
103/* 109/*
104 * Do not allocate memory (or fail in any way) in machine_kexec(). 110 * Do not allocate memory (or fail in any way) in machine_kexec().
105 * We are past the point of no return, committed to rebooting now. 111 * We are past the point of no return, committed to rebooting now.
106 */ 112 */
107NORET_TYPE void machine_kexec(struct kimage *image) 113void machine_kexec(struct kimage *image)
108{ 114{
109 unsigned long page_list[PAGES_NR]; 115 unsigned long page_list[PAGES_NR];
110 void *control_page; 116 void *control_page;
117 int save_ftrace_enabled;
118 asmlinkage unsigned long
119 (*relocate_kernel_ptr)(unsigned long indirection_page,
120 unsigned long control_page,
121 unsigned long start_address,
122 unsigned int has_pae,
123 unsigned int preserve_context);
124
125#ifdef CONFIG_KEXEC_JUMP
126 if (kexec_image->preserve_context)
127 save_processor_state();
128#endif
111 129
112 tracer_disable(); 130 save_ftrace_enabled = __ftrace_enabled_save();
113 131
114 /* Interrupts aren't acceptable while we reboot */ 132 /* Interrupts aren't acceptable while we reboot */
115 local_irq_disable(); 133 local_irq_disable();
116 134
135 if (image->preserve_context) {
136#ifdef CONFIG_X86_IO_APIC
137 /* We need to put APICs in legacy mode so that we can
138 * get timer interrupts in second kernel. kexec/kdump
139 * paths already have calls to disable_IO_APIC() in
140 * one form or other. kexec jump path also need
141 * one.
142 */
143 disable_IO_APIC();
144#endif
145 }
146
117 control_page = page_address(image->control_code_page); 147 control_page = page_address(image->control_code_page);
118 memcpy(control_page, relocate_kernel, PAGE_SIZE); 148 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
119 149
150 relocate_kernel_ptr = control_page;
120 page_list[PA_CONTROL_PAGE] = __pa(control_page); 151 page_list[PA_CONTROL_PAGE] = __pa(control_page);
121 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; 152 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
122 page_list[PA_PGD] = __pa(kexec_pgd); 153 page_list[PA_PGD] = __pa(kexec_pgd);
123 page_list[VA_PGD] = (unsigned long)kexec_pgd; 154 page_list[VA_PGD] = (unsigned long)kexec_pgd;
124#ifdef CONFIG_X86_PAE 155#ifdef CONFIG_X86_PAE
@@ -131,6 +162,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
131 page_list[VA_PTE_0] = (unsigned long)kexec_pte0; 162 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
132 page_list[PA_PTE_1] = __pa(kexec_pte1); 163 page_list[PA_PTE_1] = __pa(kexec_pte1);
133 page_list[VA_PTE_1] = (unsigned long)kexec_pte1; 164 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
165 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT);
134 166
135 /* The segment registers are funny things, they have both a 167 /* The segment registers are funny things, they have both a
136 * visible and an invisible part. Whenever the visible part is 168 * visible and an invisible part. Whenever the visible part is
@@ -149,8 +181,17 @@ NORET_TYPE void machine_kexec(struct kimage *image)
149 set_idt(phys_to_virt(0),0); 181 set_idt(phys_to_virt(0),0);
150 182
151 /* now call it */ 183 /* now call it */
152 relocate_kernel((unsigned long)image->head, (unsigned long)page_list, 184 image->start = relocate_kernel_ptr((unsigned long)image->head,
153 image->start, cpu_has_pae); 185 (unsigned long)page_list,
186 image->start, cpu_has_pae,
187 image->preserve_context);
188
189#ifdef CONFIG_KEXEC_JUMP
190 if (kexec_image->preserve_context)
191 restore_processor_state();
192#endif
193
194 __ftrace_enabled_restore(save_ftrace_enabled);
154} 195}
155 196
156void arch_crash_save_vmcoreinfo(void) 197void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 9dd9262693a3..c43caa3a91f3 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -181,7 +181,7 @@ void machine_kexec_cleanup(struct kimage *image)
181 * Do not allocate memory (or fail in any way) in machine_kexec(). 181 * Do not allocate memory (or fail in any way) in machine_kexec().
182 * We are past the point of no return, committed to rebooting now. 182 * We are past the point of no return, committed to rebooting now.
183 */ 183 */
184NORET_TYPE void machine_kexec(struct kimage *image) 184void machine_kexec(struct kimage *image)
185{ 185{
186 unsigned long page_list[PAGES_NR]; 186 unsigned long page_list[PAGES_NR];
187 void *control_page; 187 void *control_page;
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 07c0f828f488..3b599518c322 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -33,6 +33,8 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <asm/geode.h> 34#include <asm/geode.h>
35 35
36#define MFGPT_DEFAULT_IRQ 7
37
36static struct mfgpt_timer_t { 38static struct mfgpt_timer_t {
37 unsigned int avail:1; 39 unsigned int avail:1;
38} mfgpt_timers[MFGPT_MAX_TIMERS]; 40} mfgpt_timers[MFGPT_MAX_TIMERS];
@@ -157,29 +159,48 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
157} 159}
158EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); 160EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
159 161
160int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) 162int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
161{ 163{
162 u32 val, dummy; 164 u32 zsel, lpc, dummy;
163 int offset; 165 int shift;
164 166
165 if (timer < 0 || timer >= MFGPT_MAX_TIMERS) 167 if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
166 return -EIO; 168 return -EIO;
167 169
168 if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) 170 /*
171 * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
172 * is using the same CMP of the timer's Siamese twin, the IRQ is set to
173 * 2, and we mustn't use nor change it.
174 * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
175 * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
176 * with *irq==0 is safe. Currently there _are_ no 2 drivers.
177 */
178 rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
179 shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
180 if (((zsel >> shift) & 0xF) == 2)
169 return -EIO; 181 return -EIO;
170 182
171 rdmsr(MSR_PIC_ZSEL_LOW, val, dummy); 183 /* Choose IRQ: if none supplied, keep IRQ already set or use default */
184 if (!*irq)
185 *irq = (zsel >> shift) & 0xF;
186 if (!*irq)
187 *irq = MFGPT_DEFAULT_IRQ;
172 188
173 offset = (timer % 4) * 4; 189 /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
174 190 if (*irq < 1 || *irq == 2 || *irq > 15)
175 val &= ~((0xF << offset) | (0xF << (offset + 16))); 191 return -EIO;
192 rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
193 if (lpc & (1 << *irq))
194 return -EIO;
176 195
196 /* All chosen and checked - go for it */
197 if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
198 return -EIO;
177 if (enable) { 199 if (enable) {
178 val |= (irq & 0x0F) << (offset); 200 zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
179 val |= (irq & 0x0F) << (offset + 16); 201 wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
180 } 202 }
181 203
182 wrmsr(MSR_PIC_ZSEL_LOW, val, dummy);
183 return 0; 204 return 0;
184} 205}
185 206
@@ -242,7 +263,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
242static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; 263static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
243static u16 mfgpt_event_clock; 264static u16 mfgpt_event_clock;
244 265
245static int irq = 7; 266static int irq;
246static int __init mfgpt_setup(char *str) 267static int __init mfgpt_setup(char *str)
247{ 268{
248 get_option(&str, &irq); 269 get_option(&str, &irq);
@@ -346,7 +367,7 @@ int __init mfgpt_timer_setup(void)
346 mfgpt_event_clock = timer; 367 mfgpt_event_clock = timer;
347 368
348 /* Set up the IRQ on the MFGPT side */ 369 /* Set up the IRQ on the MFGPT side */
349 if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) { 370 if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
350 printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); 371 printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
351 return -EIO; 372 return -EIO;
352 } 373 }
@@ -374,13 +395,14 @@ int __init mfgpt_timer_setup(void)
374 &mfgpt_clockevent); 395 &mfgpt_clockevent);
375 396
376 printk(KERN_INFO 397 printk(KERN_INFO
377 "mfgpt-timer: registering the MFGPT timer as a clock event.\n"); 398 "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
399 timer, irq);
378 clockevents_register_device(&mfgpt_clockevent); 400 clockevents_register_device(&mfgpt_clockevent);
379 401
380 return 0; 402 return 0;
381 403
382err: 404err:
383 geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq); 405 geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
384 printk(KERN_ERR 406 printk(KERN_ERR
385 "mfgpt-timer: Unable to set up the MFGPT clock source\n"); 407 "mfgpt-timer: Unable to set up the MFGPT clock source\n");
386 return -EIO; 408 return -EIO;
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 6994c751590e..652fa5c38ebe 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -388,7 +388,6 @@ static int do_microcode_update (void)
388 void *new_mc = NULL; 388 void *new_mc = NULL;
389 int cpu; 389 int cpu;
390 cpumask_t old; 390 cpumask_t old;
391 cpumask_of_cpu_ptr_declare(newmask);
392 391
393 old = current->cpus_allowed; 392 old = current->cpus_allowed;
394 393
@@ -405,8 +404,7 @@ static int do_microcode_update (void)
405 404
406 if (!uci->valid) 405 if (!uci->valid)
407 continue; 406 continue;
408 cpumask_of_cpu_ptr_next(newmask, cpu); 407 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
409 set_cpus_allowed_ptr(current, newmask);
410 error = get_maching_microcode(new_mc, cpu); 408 error = get_maching_microcode(new_mc, cpu);
411 if (error < 0) 409 if (error < 0)
412 goto out; 410 goto out;
@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu)
576 struct cpuinfo_x86 *c = &cpu_data(cpu); 574 struct cpuinfo_x86 *c = &cpu_data(cpu);
577 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 575 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
578 cpumask_t old; 576 cpumask_t old;
579 cpumask_of_cpu_ptr(newmask, cpu);
580 unsigned int val[2]; 577 unsigned int val[2];
581 int err = 0; 578 int err = 0;
582 579
@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu)
585 return 0; 582 return 0;
586 583
587 old = current->cpus_allowed; 584 old = current->cpus_allowed;
588 set_cpus_allowed_ptr(current, newmask); 585 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
589 586
590 /* Check if the microcode we have in memory matches the CPU */ 587 /* Check if the microcode we have in memory matches the CPU */
591 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 588 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu)
623static void microcode_init_cpu(int cpu, int resume) 620static void microcode_init_cpu(int cpu, int resume)
624{ 621{
625 cpumask_t old; 622 cpumask_t old;
626 cpumask_of_cpu_ptr(newmask, cpu);
627 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 623 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
628 624
629 old = current->cpus_allowed; 625 old = current->cpus_allowed;
630 626
631 set_cpus_allowed_ptr(current, newmask); 627 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
632 mutex_lock(&microcode_mutex); 628 mutex_lock(&microcode_mutex);
633 collect_cpu_info(cpu); 629 collect_cpu_info(cpu);
634 if (uci->valid && system_state == SYSTEM_RUNNING && !resume) 630 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev,
661 if (end == buf) 657 if (end == buf)
662 return -EINVAL; 658 return -EINVAL;
663 if (val == 1) { 659 if (val == 1) {
664 cpumask_t old; 660 cpumask_t old = current->cpus_allowed;
665 cpumask_of_cpu_ptr(newmask, cpu);
666
667 old = current->cpus_allowed;
668 661
669 get_online_cpus(); 662 get_online_cpus();
670 set_cpus_allowed_ptr(current, newmask); 663 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
671 664
672 mutex_lock(&microcode_mutex); 665 mutex_lock(&microcode_mutex);
673 if (uci->valid) 666 if (uci->valid)
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index fdfdc550b366..efc2f361fe85 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
238 {} 238 {}
239}; 239};
240 240
241void __init check_enable_amd_mmconf_dmi(void) 241void __cpuinit check_enable_amd_mmconf_dmi(void)
242{ 242{
243 dmi_check_system(mmconf_dmi_table); 243 dmi_check_system(mmconf_dmi_table);
244} 244}
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c
index 0e867676b5a5..6ba87830d4b1 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module_64.c
@@ -22,6 +22,7 @@
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/mm.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/bug.h> 27#include <linux/bug.h>
27 28
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 6ae005ccaed8..b3fb430725cb 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -49,7 +49,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
49 return sum & 0xFF; 49 return sum & 0xFF;
50} 50}
51 51
52static void __cpuinit MP_processor_info(struct mpc_config_processor *m) 52static void __init MP_processor_info(struct mpc_config_processor *m)
53{ 53{
54 int apicid; 54 int apicid;
55 char *bootup_cpu = ""; 55 char *bootup_cpu = "";
@@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
83 if (x86_quirks->mpc_oem_bus_info) 83 if (x86_quirks->mpc_oem_bus_info)
84 x86_quirks->mpc_oem_bus_info(m, str); 84 x86_quirks->mpc_oem_bus_info(m, str);
85 else 85 else
86 printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str); 86 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str);
87 87
88#if MAX_MP_BUSSES < 256 88#if MAX_MP_BUSSES < 256
89 if (m->mpc_busid >= MAX_MP_BUSSES) { 89 if (m->mpc_busid >= MAX_MP_BUSSES) {
@@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
154 154
155static void print_MP_intsrc_info(struct mpc_config_intsrc *m) 155static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
156{ 156{
157 printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," 157 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
158 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 158 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
159 m->mpc_irqtype, m->mpc_irqflag & 3, 159 m->mpc_irqtype, m->mpc_irqflag & 3,
160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
@@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
163 163
164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
165{ 165{
166 printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," 166 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
167 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 167 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
168 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, 168 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
169 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, 169 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
@@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
235 235
236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) 236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
237{ 237{
238 printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x," 238 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n", 239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
240 m->mpc_irqtype, m->mpc_irqflag & 3, 240 m->mpc_irqtype, m->mpc_irqflag & 3,
241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, 241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
@@ -484,7 +484,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
484} 484}
485 485
486 486
487static void construct_ioapic_table(int mpc_default_type) 487static void __init construct_ioapic_table(int mpc_default_type)
488{ 488{
489 struct mpc_config_ioapic ioapic; 489 struct mpc_config_ioapic ioapic;
490 struct mpc_config_bus bus; 490 struct mpc_config_bus bus;
@@ -529,7 +529,7 @@ static void construct_ioapic_table(int mpc_default_type)
529 construct_default_ioirq_mptable(mpc_default_type); 529 construct_default_ioirq_mptable(mpc_default_type);
530} 530}
531#else 531#else
532static inline void construct_ioapic_table(int mpc_default_type) { } 532static inline void __init construct_ioapic_table(int mpc_default_type) { }
533#endif 533#endif
534 534
535static inline void __init construct_default_ISA_mptable(int mpc_default_type) 535static inline void __init construct_default_ISA_mptable(int mpc_default_type)
@@ -695,7 +695,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
695 unsigned int *bp = phys_to_virt(base); 695 unsigned int *bp = phys_to_virt(base);
696 struct intel_mp_floating *mpf; 696 struct intel_mp_floating *mpf;
697 697
698 printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length); 698 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
699 bp, length);
699 BUILD_BUG_ON(sizeof(*mpf) != 16); 700 BUILD_BUG_ON(sizeof(*mpf) != 16);
700 701
701 while (length > 0) { 702 while (length > 0) {
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 9fd809552447..2e2af5d18191 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -72,21 +72,28 @@ static ssize_t msr_read(struct file *file, char __user *buf,
72 u32 data[2]; 72 u32 data[2];
73 u32 reg = *ppos; 73 u32 reg = *ppos;
74 int cpu = iminor(file->f_path.dentry->d_inode); 74 int cpu = iminor(file->f_path.dentry->d_inode);
75 int err; 75 int err = 0;
76 ssize_t bytes = 0;
76 77
77 if (count % 8) 78 if (count % 8)
78 return -EINVAL; /* Invalid chunk size */ 79 return -EINVAL; /* Invalid chunk size */
79 80
80 for (; count; count -= 8) { 81 for (; count; count -= 8) {
81 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); 82 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
82 if (err) 83 if (err) {
83 return -EIO; 84 if (err == -EFAULT) /* Fix idiotic error code */
84 if (copy_to_user(tmp, &data, 8)) 85 err = -EIO;
85 return -EFAULT; 86 break;
87 }
88 if (copy_to_user(tmp, &data, 8)) {
89 err = -EFAULT;
90 break;
91 }
86 tmp += 2; 92 tmp += 2;
93 bytes += 8;
87 } 94 }
88 95
89 return ((char __user *)tmp) - buf; 96 return bytes ? bytes : err;
90} 97}
91 98
92static ssize_t msr_write(struct file *file, const char __user *buf, 99static ssize_t msr_write(struct file *file, const char __user *buf,
@@ -96,21 +103,28 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
96 u32 data[2]; 103 u32 data[2];
97 u32 reg = *ppos; 104 u32 reg = *ppos;
98 int cpu = iminor(file->f_path.dentry->d_inode); 105 int cpu = iminor(file->f_path.dentry->d_inode);
99 int err; 106 int err = 0;
107 ssize_t bytes = 0;
100 108
101 if (count % 8) 109 if (count % 8)
102 return -EINVAL; /* Invalid chunk size */ 110 return -EINVAL; /* Invalid chunk size */
103 111
104 for (; count; count -= 8) { 112 for (; count; count -= 8) {
105 if (copy_from_user(&data, tmp, 8)) 113 if (copy_from_user(&data, tmp, 8)) {
106 return -EFAULT; 114 err = -EFAULT;
115 break;
116 }
107 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); 117 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
108 if (err) 118 if (err) {
109 return -EIO; 119 if (err == -EFAULT) /* Fix idiotic error code */
120 err = -EIO;
121 break;
122 }
110 tmp += 2; 123 tmp += 2;
124 bytes += 8;
111 } 125 }
112 126
113 return ((char __user *)tmp) - buf; 127 return bytes ? bytes : err;
114} 128}
115 129
116static int msr_open(struct inode *inode, struct file *file) 130static int msr_open(struct inode *inode, struct file *file)
@@ -131,7 +145,7 @@ static int msr_open(struct inode *inode, struct file *file)
131 ret = -EIO; /* MSR not supported */ 145 ret = -EIO; /* MSR not supported */
132out: 146out:
133 unlock_kernel(); 147 unlock_kernel();
134 return 0; 148 return ret;
135} 149}
136 150
137/* 151/*
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ac6d51222e7d..abb78a2cc4ad 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -114,6 +114,23 @@ static __init void nmi_cpu_busy(void *data)
114} 114}
115#endif 115#endif
116 116
117static void report_broken_nmi(int cpu, int *prev_nmi_count)
118{
119 printk(KERN_CONT "\n");
120
121 printk(KERN_WARNING
122 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
123 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
124
125 printk(KERN_WARNING
126 "Please report this to bugzilla.kernel.org,\n");
127 printk(KERN_WARNING
128 "and attach the output of the 'dmesg' command.\n");
129
130 per_cpu(wd_enabled, cpu) = 0;
131 atomic_dec(&nmi_active);
132}
133
117int __init check_nmi_watchdog(void) 134int __init check_nmi_watchdog(void)
118{ 135{
119 unsigned int *prev_nmi_count; 136 unsigned int *prev_nmi_count;
@@ -141,15 +158,8 @@ int __init check_nmi_watchdog(void)
141 for_each_online_cpu(cpu) { 158 for_each_online_cpu(cpu) {
142 if (!per_cpu(wd_enabled, cpu)) 159 if (!per_cpu(wd_enabled, cpu))
143 continue; 160 continue;
144 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { 161 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
145 printk(KERN_WARNING "WARNING: CPU#%d: NMI " 162 report_broken_nmi(cpu, prev_nmi_count);
146 "appears to be stuck (%d->%d)!\n",
147 cpu,
148 prev_nmi_count[cpu],
149 get_nmi_count(cpu));
150 per_cpu(wd_enabled, cpu) = 0;
151 atomic_dec(&nmi_active);
152 }
153 } 163 }
154 endflag = 1; 164 endflag = 1;
155 if (!atomic_read(&nmi_active)) { 165 if (!atomic_read(&nmi_active)) {
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index b8c45610b20a..eecc8c18f010 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -73,7 +73,7 @@ static void __init smp_dump_qct(void)
73} 73}
74 74
75 75
76void __init numaq_tsc_disable(void) 76void __cpuinit numaq_tsc_disable(void)
77{ 77{
78 if (!found_numaq) 78 if (!found_numaq)
79 return; 79 return;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c6044682e1e7..e2f43768723a 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -472,7 +472,7 @@ struct pv_lock_ops pv_lock_ops = {
472 .spin_unlock = __ticket_spin_unlock, 472 .spin_unlock = __ticket_spin_unlock,
473#endif 473#endif
474}; 474};
475EXPORT_SYMBOL_GPL(pv_lock_ops); 475EXPORT_SYMBOL(pv_lock_ops);
476 476
477EXPORT_SYMBOL_GPL(pv_time_ops); 477EXPORT_SYMBOL_GPL(pv_time_ops);
478EXPORT_SYMBOL (pv_cpu_ops); 478EXPORT_SYMBOL (pv_cpu_ops);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 151f2d171f7c..dcdac6c826e9 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -29,6 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/crash_dump.h>
32#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
33#include <linux/bitops.h> 34#include <linux/bitops.h>
34#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
@@ -36,6 +37,7 @@
36#include <linux/delay.h> 37#include <linux/delay.h>
37#include <linux/scatterlist.h> 38#include <linux/scatterlist.h>
38#include <linux/iommu-helper.h> 39#include <linux/iommu-helper.h>
40
39#include <asm/iommu.h> 41#include <asm/iommu.h>
40#include <asm/calgary.h> 42#include <asm/calgary.h>
41#include <asm/tce.h> 43#include <asm/tce.h>
@@ -167,6 +169,8 @@ static void calgary_dump_error_regs(struct iommu_table *tbl);
167static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 169static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
168static void calioc2_tce_cache_blast(struct iommu_table *tbl); 170static void calioc2_tce_cache_blast(struct iommu_table *tbl);
169static void calioc2_dump_error_regs(struct iommu_table *tbl); 171static void calioc2_dump_error_regs(struct iommu_table *tbl);
172static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl);
173static void get_tce_space_from_tar(void);
170 174
171static struct cal_chipset_ops calgary_chip_ops = { 175static struct cal_chipset_ops calgary_chip_ops = {
172 .handle_quirks = calgary_handle_quirks, 176 .handle_quirks = calgary_handle_quirks,
@@ -339,9 +343,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
339 /* were we called with bad_dma_address? */ 343 /* were we called with bad_dma_address? */
340 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 344 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
341 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { 345 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
342 printk(KERN_ERR "Calgary: driver tried unmapping bad DMA " 346 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
343 "address 0x%Lx\n", dma_addr); 347 "address 0x%Lx\n", dma_addr);
344 WARN_ON(1);
345 return; 348 return;
346 } 349 }
347 350
@@ -410,22 +413,6 @@ static void calgary_unmap_sg(struct device *dev,
410 } 413 }
411} 414}
412 415
413static int calgary_nontranslate_map_sg(struct device* dev,
414 struct scatterlist *sg, int nelems, int direction)
415{
416 struct scatterlist *s;
417 int i;
418
419 for_each_sg(sg, s, nelems, i) {
420 struct page *p = sg_page(s);
421
422 BUG_ON(!p);
423 s->dma_address = virt_to_bus(sg_virt(s));
424 s->dma_length = s->length;
425 }
426 return nelems;
427}
428
429static int calgary_map_sg(struct device *dev, struct scatterlist *sg, 416static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
430 int nelems, int direction) 417 int nelems, int direction)
431{ 418{
@@ -436,9 +423,6 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
436 unsigned long entry; 423 unsigned long entry;
437 int i; 424 int i;
438 425
439 if (!translation_enabled(tbl))
440 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
441
442 for_each_sg(sg, s, nelems, i) { 426 for_each_sg(sg, s, nelems, i) {
443 BUG_ON(!sg_page(s)); 427 BUG_ON(!sg_page(s));
444 428
@@ -474,7 +458,6 @@ error:
474static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, 458static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
475 size_t size, int direction) 459 size_t size, int direction)
476{ 460{
477 dma_addr_t dma_handle = bad_dma_address;
478 void *vaddr = phys_to_virt(paddr); 461 void *vaddr = phys_to_virt(paddr);
479 unsigned long uaddr; 462 unsigned long uaddr;
480 unsigned int npages; 463 unsigned int npages;
@@ -483,12 +466,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
483 uaddr = (unsigned long)vaddr; 466 uaddr = (unsigned long)vaddr;
484 npages = num_dma_pages(uaddr, size); 467 npages = num_dma_pages(uaddr, size);
485 468
486 if (translation_enabled(tbl)) 469 return iommu_alloc(dev, tbl, vaddr, npages, direction);
487 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
488 else
489 dma_handle = virt_to_bus(vaddr);
490
491 return dma_handle;
492} 470}
493 471
494static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, 472static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
@@ -497,9 +475,6 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
497 struct iommu_table *tbl = find_iommu_table(dev); 475 struct iommu_table *tbl = find_iommu_table(dev);
498 unsigned int npages; 476 unsigned int npages;
499 477
500 if (!translation_enabled(tbl))
501 return;
502
503 npages = num_dma_pages(dma_handle, size); 478 npages = num_dma_pages(dma_handle, size);
504 iommu_free(tbl, dma_handle, npages); 479 iommu_free(tbl, dma_handle, npages);
505} 480}
@@ -522,18 +497,12 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
522 goto error; 497 goto error;
523 memset(ret, 0, size); 498 memset(ret, 0, size);
524 499
525 if (translation_enabled(tbl)) { 500 /* set up tces to cover the allocated range */
526 /* set up tces to cover the allocated range */ 501 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
527 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); 502 if (mapping == bad_dma_address)
528 if (mapping == bad_dma_address) 503 goto free;
529 goto free; 504 *dma_handle = mapping;
530
531 *dma_handle = mapping;
532 } else /* non translated slot */
533 *dma_handle = virt_to_bus(ret);
534
535 return ret; 505 return ret;
536
537free: 506free:
538 free_pages((unsigned long)ret, get_order(size)); 507 free_pages((unsigned long)ret, get_order(size));
539 ret = NULL; 508 ret = NULL;
@@ -541,7 +510,7 @@ error:
541 return ret; 510 return ret;
542} 511}
543 512
544static const struct dma_mapping_ops calgary_dma_ops = { 513static struct dma_mapping_ops calgary_dma_ops = {
545 .alloc_coherent = calgary_alloc_coherent, 514 .alloc_coherent = calgary_alloc_coherent,
546 .map_single = calgary_map_single, 515 .map_single = calgary_map_single,
547 .unmap_single = calgary_unmap_single, 516 .unmap_single = calgary_unmap_single,
@@ -830,7 +799,11 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
830 799
831 tbl = pci_iommu(dev->bus); 800 tbl = pci_iommu(dev->bus);
832 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space; 801 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
833 tce_free(tbl, 0, tbl->it_size); 802
803 if (is_kdump_kernel())
804 calgary_init_bitmap_from_tce_table(tbl);
805 else
806 tce_free(tbl, 0, tbl->it_size);
834 807
835 if (is_calgary(dev->device)) 808 if (is_calgary(dev->device))
836 tbl->chip_ops = &calgary_chip_ops; 809 tbl->chip_ops = &calgary_chip_ops;
@@ -1209,6 +1182,10 @@ static int __init calgary_init(void)
1209 if (ret) 1182 if (ret)
1210 return ret; 1183 return ret;
1211 1184
1185 /* Purely for kdump kernel case */
1186 if (is_kdump_kernel())
1187 get_tce_space_from_tar();
1188
1212 do { 1189 do {
1213 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev); 1190 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1214 if (!dev) 1191 if (!dev)
@@ -1230,6 +1207,16 @@ static int __init calgary_init(void)
1230 goto error; 1207 goto error;
1231 } while (1); 1208 } while (1);
1232 1209
1210 dev = NULL;
1211 for_each_pci_dev(dev) {
1212 struct iommu_table *tbl;
1213
1214 tbl = find_iommu_table(&dev->dev);
1215
1216 if (translation_enabled(tbl))
1217 dev->dev.archdata.dma_ops = &calgary_dma_ops;
1218 }
1219
1233 return ret; 1220 return ret;
1234 1221
1235error: 1222error:
@@ -1251,6 +1238,7 @@ error:
1251 calgary_disable_translation(dev); 1238 calgary_disable_translation(dev);
1252 calgary_free_bus(dev); 1239 calgary_free_bus(dev);
1253 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ 1240 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
1241 dev->dev.archdata.dma_ops = NULL;
1254 } while (1); 1242 } while (1);
1255 1243
1256 return ret; 1244 return ret;
@@ -1280,13 +1268,15 @@ static inline int __init determine_tce_table_size(u64 ram)
1280static int __init build_detail_arrays(void) 1268static int __init build_detail_arrays(void)
1281{ 1269{
1282 unsigned long ptr; 1270 unsigned long ptr;
1283 int i, scal_detail_size, rio_detail_size; 1271 unsigned numnodes, i;
1272 int scal_detail_size, rio_detail_size;
1284 1273
1285 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){ 1274 numnodes = rio_table_hdr->num_scal_dev;
1275 if (numnodes > MAX_NUMNODES){
1286 printk(KERN_WARNING 1276 printk(KERN_WARNING
1287 "Calgary: MAX_NUMNODES too low! Defined as %d, " 1277 "Calgary: MAX_NUMNODES too low! Defined as %d, "
1288 "but system has %d nodes.\n", 1278 "but system has %d nodes.\n",
1289 MAX_NUMNODES, rio_table_hdr->num_scal_dev); 1279 MAX_NUMNODES, numnodes);
1290 return -ENODEV; 1280 return -ENODEV;
1291 } 1281 }
1292 1282
@@ -1307,8 +1297,7 @@ static int __init build_detail_arrays(void)
1307 } 1297 }
1308 1298
1309 ptr = ((unsigned long)rio_table_hdr) + 3; 1299 ptr = ((unsigned long)rio_table_hdr) + 3;
1310 for (i = 0; i < rio_table_hdr->num_scal_dev; 1300 for (i = 0; i < numnodes; i++, ptr += scal_detail_size)
1311 i++, ptr += scal_detail_size)
1312 scal_devs[i] = (struct scal_detail *)ptr; 1301 scal_devs[i] = (struct scal_detail *)ptr;
1313 1302
1314 for (i = 0; i < rio_table_hdr->num_rio_dev; 1303 for (i = 0; i < rio_table_hdr->num_rio_dev;
@@ -1339,6 +1328,61 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
1339 return (val != 0xffffffff); 1328 return (val != 0xffffffff);
1340} 1329}
1341 1330
1331/*
1332 * calgary_init_bitmap_from_tce_table():
1333 * Funtion for kdump case. In the second/kdump kernel initialize
1334 * the bitmap based on the tce table entries obtained from first kernel
1335 */
1336static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
1337{
1338 u64 *tp;
1339 unsigned int index;
1340 tp = ((u64 *)tbl->it_base);
1341 for (index = 0 ; index < tbl->it_size; index++) {
1342 if (*tp != 0x0)
1343 set_bit(index, tbl->it_map);
1344 tp++;
1345 }
1346}
1347
1348/*
1349 * get_tce_space_from_tar():
1350 * Function for kdump case. Get the tce tables from first kernel
1351 * by reading the contents of the base adress register of calgary iommu
1352 */
1353static void __init get_tce_space_from_tar(void)
1354{
1355 int bus;
1356 void __iomem *target;
1357 unsigned long tce_space;
1358
1359 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1360 struct calgary_bus_info *info = &bus_info[bus];
1361 unsigned short pci_device;
1362 u32 val;
1363
1364 val = read_pci_config(bus, 0, 0, 0);
1365 pci_device = (val & 0xFFFF0000) >> 16;
1366
1367 if (!is_cal_pci_dev(pci_device))
1368 continue;
1369 if (info->translation_disabled)
1370 continue;
1371
1372 if (calgary_bus_has_devices(bus, pci_device) ||
1373 translate_empty_slots) {
1374 target = calgary_reg(bus_info[bus].bbar,
1375 tar_offset(bus));
1376 tce_space = be64_to_cpu(readq(target));
1377 tce_space = tce_space & TAR_SW_BITS;
1378
1379 tce_space = tce_space & (~specified_table_size);
1380 info->tce_space = (u64 *)__va(tce_space);
1381 }
1382 }
1383 return;
1384}
1385
1342void __init detect_calgary(void) 1386void __init detect_calgary(void)
1343{ 1387{
1344 int bus; 1388 int bus;
@@ -1394,7 +1438,8 @@ void __init detect_calgary(void)
1394 return; 1438 return;
1395 } 1439 }
1396 1440
1397 specified_table_size = determine_tce_table_size(max_pfn * PAGE_SIZE); 1441 specified_table_size = determine_tce_table_size((is_kdump_kernel() ?
1442 saved_max_pfn : max_pfn) * PAGE_SIZE);
1398 1443
1399 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { 1444 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1400 struct calgary_bus_info *info = &bus_info[bus]; 1445 struct calgary_bus_info *info = &bus_info[bus];
@@ -1412,10 +1457,16 @@ void __init detect_calgary(void)
1412 1457
1413 if (calgary_bus_has_devices(bus, pci_device) || 1458 if (calgary_bus_has_devices(bus, pci_device) ||
1414 translate_empty_slots) { 1459 translate_empty_slots) {
1415 tbl = alloc_tce_table(); 1460 /*
1416 if (!tbl) 1461 * If it is kdump kernel, find and use tce tables
1417 goto cleanup; 1462 * from first kernel, else allocate tce tables here
1418 info->tce_space = tbl; 1463 */
1464 if (!is_kdump_kernel()) {
1465 tbl = alloc_tce_table();
1466 if (!tbl)
1467 goto cleanup;
1468 info->tce_space = tbl;
1469 }
1419 calgary_found = 1; 1470 calgary_found = 1;
1420 } 1471 }
1421 } 1472 }
@@ -1430,6 +1481,10 @@ void __init detect_calgary(void)
1430 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, " 1481 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, "
1431 "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size, 1482 "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size,
1432 debugging ? "enabled" : "disabled"); 1483 debugging ? "enabled" : "disabled");
1484
1485 /* swiotlb for devices that aren't behind the Calgary. */
1486 if (max_pfn > MAX_DMA32_PFN)
1487 swiotlb = 1;
1433 } 1488 }
1434 return; 1489 return;
1435 1490
@@ -1446,7 +1501,7 @@ int __init calgary_iommu_init(void)
1446{ 1501{
1447 int ret; 1502 int ret;
1448 1503
1449 if (no_iommu || swiotlb) 1504 if (no_iommu || (swiotlb && !calgary_detected))
1450 return -ENODEV; 1505 return -ENODEV;
1451 1506
1452 if (!calgary_detected) 1507 if (!calgary_detected)
@@ -1459,15 +1514,14 @@ int __init calgary_iommu_init(void)
1459 if (ret) { 1514 if (ret) {
1460 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " 1515 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1461 "falling back to no_iommu\n", ret); 1516 "falling back to no_iommu\n", ret);
1462 if (max_pfn > MAX_DMA32_PFN)
1463 printk(KERN_ERR "WARNING more than 4GB of memory, "
1464 "32bit PCI may malfunction.\n");
1465 return ret; 1517 return ret;
1466 } 1518 }
1467 1519
1468 force_iommu = 1; 1520 force_iommu = 1;
1469 bad_dma_address = 0x0; 1521 bad_dma_address = 0x0;
1470 dma_ops = &calgary_dma_ops; 1522 /* dma_ops is set to swiotlb or nommu */
1523 if (!dma_ops)
1524 dma_ops = &nommu_dma_ops;
1471 1525
1472 return 0; 1526 return 0;
1473} 1527}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index cbecb05551bb..87d4d6964ec2 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,7 +11,7 @@
11 11
12static int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13 13
14const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
16 16
17static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -123,6 +123,14 @@ void __init pci_iommu_alloc(void)
123 123
124 pci_swiotlb_init(); 124 pci_swiotlb_init();
125} 125}
126
127unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
128{
129 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
130
131 return size >> PAGE_SHIFT;
132}
133EXPORT_SYMBOL(iommu_num_pages);
126#endif 134#endif
127 135
128/* 136/*
@@ -192,126 +200,10 @@ static __init int iommu_setup(char *p)
192} 200}
193early_param("iommu", iommu_setup); 201early_param("iommu", iommu_setup);
194 202
195#ifdef CONFIG_X86_32
196int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
197 dma_addr_t device_addr, size_t size, int flags)
198{
199 void __iomem *mem_base = NULL;
200 int pages = size >> PAGE_SHIFT;
201 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
202
203 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
204 goto out;
205 if (!size)
206 goto out;
207 if (dev->dma_mem)
208 goto out;
209
210 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
211
212 mem_base = ioremap(bus_addr, size);
213 if (!mem_base)
214 goto out;
215
216 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
217 if (!dev->dma_mem)
218 goto out;
219 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
220 if (!dev->dma_mem->bitmap)
221 goto free1_out;
222
223 dev->dma_mem->virt_base = mem_base;
224 dev->dma_mem->device_base = device_addr;
225 dev->dma_mem->size = pages;
226 dev->dma_mem->flags = flags;
227
228 if (flags & DMA_MEMORY_MAP)
229 return DMA_MEMORY_MAP;
230
231 return DMA_MEMORY_IO;
232
233 free1_out:
234 kfree(dev->dma_mem);
235 out:
236 if (mem_base)
237 iounmap(mem_base);
238 return 0;
239}
240EXPORT_SYMBOL(dma_declare_coherent_memory);
241
242void dma_release_declared_memory(struct device *dev)
243{
244 struct dma_coherent_mem *mem = dev->dma_mem;
245
246 if (!mem)
247 return;
248 dev->dma_mem = NULL;
249 iounmap(mem->virt_base);
250 kfree(mem->bitmap);
251 kfree(mem);
252}
253EXPORT_SYMBOL(dma_release_declared_memory);
254
255void *dma_mark_declared_memory_occupied(struct device *dev,
256 dma_addr_t device_addr, size_t size)
257{
258 struct dma_coherent_mem *mem = dev->dma_mem;
259 int pos, err;
260 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
261
262 pages >>= PAGE_SHIFT;
263
264 if (!mem)
265 return ERR_PTR(-EINVAL);
266
267 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
268 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
269 if (err != 0)
270 return ERR_PTR(err);
271 return mem->virt_base + (pos << PAGE_SHIFT);
272}
273EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
274
275static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
276 dma_addr_t *dma_handle, void **ret)
277{
278 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
279 int order = get_order(size);
280
281 if (mem) {
282 int page = bitmap_find_free_region(mem->bitmap, mem->size,
283 order);
284 if (page >= 0) {
285 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
286 *ret = mem->virt_base + (page << PAGE_SHIFT);
287 memset(*ret, 0, size);
288 }
289 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
290 *ret = NULL;
291 }
292 return (mem != NULL);
293}
294
295static int dma_release_coherent(struct device *dev, int order, void *vaddr)
296{
297 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
298
299 if (mem && vaddr >= mem->virt_base && vaddr <
300 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
301 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
302
303 bitmap_release_region(mem->bitmap, page, order);
304 return 1;
305 }
306 return 0;
307}
308#else
309#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
310#define dma_release_coherent(dev, order, vaddr) (0)
311#endif /* CONFIG_X86_32 */
312
313int dma_supported(struct device *dev, u64 mask) 203int dma_supported(struct device *dev, u64 mask)
314{ 204{
205 struct dma_mapping_ops *ops = get_dma_ops(dev);
206
315#ifdef CONFIG_PCI 207#ifdef CONFIG_PCI
316 if (mask > 0xffffffff && forbid_dac > 0) { 208 if (mask > 0xffffffff && forbid_dac > 0) {
317 dev_info(dev, "PCI: Disallowing DAC for device\n"); 209 dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +211,8 @@ int dma_supported(struct device *dev, u64 mask)
319 } 211 }
320#endif 212#endif
321 213
322 if (dma_ops->dma_supported) 214 if (ops->dma_supported)
323 return dma_ops->dma_supported(dev, mask); 215 return ops->dma_supported(dev, mask);
324 216
325 /* Copied from i386. Doesn't make much sense, because it will 217 /* Copied from i386. Doesn't make much sense, because it will
326 only work for pci_alloc_coherent. 218 only work for pci_alloc_coherent.
@@ -367,6 +259,7 @@ void *
367dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 259dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
368 gfp_t gfp) 260 gfp_t gfp)
369{ 261{
262 struct dma_mapping_ops *ops = get_dma_ops(dev);
370 void *memory = NULL; 263 void *memory = NULL;
371 struct page *page; 264 struct page *page;
372 unsigned long dma_mask = 0; 265 unsigned long dma_mask = 0;
@@ -376,7 +269,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
376 /* ignore region specifiers */ 269 /* ignore region specifiers */
377 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 270 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
378 271
379 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 272 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
380 return memory; 273 return memory;
381 274
382 if (!dev) { 275 if (!dev) {
@@ -435,8 +328,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
435 /* Let low level make its own zone decisions */ 328 /* Let low level make its own zone decisions */
436 gfp &= ~(GFP_DMA32|GFP_DMA); 329 gfp &= ~(GFP_DMA32|GFP_DMA);
437 330
438 if (dma_ops->alloc_coherent) 331 if (ops->alloc_coherent)
439 return dma_ops->alloc_coherent(dev, size, 332 return ops->alloc_coherent(dev, size,
440 dma_handle, gfp); 333 dma_handle, gfp);
441 return NULL; 334 return NULL;
442 } 335 }
@@ -448,14 +341,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 } 341 }
449 } 342 }
450 343
451 if (dma_ops->alloc_coherent) { 344 if (ops->alloc_coherent) {
452 free_pages((unsigned long)memory, get_order(size)); 345 free_pages((unsigned long)memory, get_order(size));
453 gfp &= ~(GFP_DMA|GFP_DMA32); 346 gfp &= ~(GFP_DMA|GFP_DMA32);
454 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 347 return ops->alloc_coherent(dev, size, dma_handle, gfp);
455 } 348 }
456 349
457 if (dma_ops->map_simple) { 350 if (ops->map_simple) {
458 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 351 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
459 size, 352 size,
460 PCI_DMA_BIDIRECTIONAL); 353 PCI_DMA_BIDIRECTIONAL);
461 if (*dma_handle != bad_dma_address) 354 if (*dma_handle != bad_dma_address)
@@ -477,12 +370,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
477void dma_free_coherent(struct device *dev, size_t size, 370void dma_free_coherent(struct device *dev, size_t size,
478 void *vaddr, dma_addr_t bus) 371 void *vaddr, dma_addr_t bus)
479{ 372{
373 struct dma_mapping_ops *ops = get_dma_ops(dev);
374
480 int order = get_order(size); 375 int order = get_order(size);
481 WARN_ON(irqs_disabled()); /* for portability */ 376 WARN_ON(irqs_disabled()); /* for portability */
482 if (dma_release_coherent(dev, order, vaddr)) 377 if (dma_release_from_coherent(dev, order, vaddr))
483 return; 378 return;
484 if (dma_ops->unmap_single) 379 if (ops->unmap_single)
485 dma_ops->unmap_single(dev, bus, size, 0); 380 ops->unmap_single(dev, bus, size, 0);
486 free_pages((unsigned long)vaddr, order); 381 free_pages((unsigned long)vaddr, order);
487} 382}
488EXPORT_SYMBOL(dma_free_coherent); 383EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index df5f142657d2..49285f8fd4d5 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry;
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69 69
70#define to_pages(addr, size) \
71 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
72
73#define EMERGENCY_PAGES 32 /* = 128KB */ 70#define EMERGENCY_PAGES 32 /* = 128KB */
74 71
75#ifdef CONFIG_AGP 72#ifdef CONFIG_AGP
@@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
241static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
242 size_t size, int dir) 239 size_t size, int dir)
243{ 240{
244 unsigned long npages = to_pages(phys_mem, size); 241 unsigned long npages = iommu_num_pages(phys_mem, size);
245 unsigned long iommu_page = alloc_iommu(dev, npages); 242 unsigned long iommu_page = alloc_iommu(dev, npages);
246 int i; 243 int i;
247 244
@@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
304 return; 301 return;
305 302
306 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 303 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
307 npages = to_pages(dma_addr, size); 304 npages = iommu_num_pages(dma_addr, size);
308 for (i = 0; i < npages; i++) { 305 for (i = 0; i < npages; i++) {
309 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 306 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
310 CLEAR_LEAK(iommu_page + i); 307 CLEAR_LEAK(iommu_page + i);
@@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
387 } 384 }
388 385
389 addr = phys_addr; 386 addr = phys_addr;
390 pages = to_pages(s->offset, s->length); 387 pages = iommu_num_pages(s->offset, s->length);
391 while (pages--) { 388 while (pages--) {
392 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 389 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
393 SET_LEAK(iommu_page); 390 SET_LEAK(iommu_page);
@@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
470 467
471 seg_size += s->length; 468 seg_size += s->length;
472 need = nextneed; 469 need = nextneed;
473 pages += to_pages(s->offset, s->length); 470 pages += iommu_num_pages(s->offset, s->length);
474 ps = s; 471 ps = s;
475 } 472 }
476 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 473 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
@@ -692,8 +689,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
692 689
693extern int agp_amd64_init(void); 690extern int agp_amd64_init(void);
694 691
695static const struct dma_mapping_ops gart_dma_ops = { 692static struct dma_mapping_ops gart_dma_ops = {
696 .mapping_error = NULL,
697 .map_single = gart_map_single, 693 .map_single = gart_map_single,
698 .map_simple = gart_map_simple, 694 .map_simple = gart_map_simple,
699 .unmap_single = gart_unmap_single, 695 .unmap_single = gart_unmap_single,
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 792b9179eff3..3f91f71cdc3e 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return nents; 72 return nents;
73} 73}
74 74
75/* Make sure we keep the same behaviour */ 75struct dma_mapping_ops nommu_dma_ops = {
76static int nommu_mapping_error(dma_addr_t dma_addr)
77{
78#ifdef CONFIG_X86_32
79 return 0;
80#else
81 return (dma_addr == bad_dma_address);
82#endif
83}
84
85
86const struct dma_mapping_ops nommu_dma_ops = {
87 .map_single = nommu_map_single, 76 .map_single = nommu_map_single,
88 .map_sg = nommu_map_sg, 77 .map_sg = nommu_map_sg,
89 .mapping_error = nommu_mapping_error,
90 .is_phys = 1, 78 .is_phys = 1,
91}; 79};
92 80
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 20df839b9c20..c4ce0332759e 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19} 19}
20 20
21const struct dma_mapping_ops swiotlb_dma_ops = { 21struct dma_mapping_ops swiotlb_dma_ops = {
22 .mapping_error = swiotlb_dma_mapping_error, 22 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent, 23 .alloc_coherent = swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent, 24 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7b9ee9f09639..62a4790e425d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -96,7 +96,6 @@ static inline void play_dead(void)
96{ 96{
97 /* This must be done before dead CPU ack */ 97 /* This must be done before dead CPU ack */
98 cpu_exit_clear(); 98 cpu_exit_clear();
99 wbinvd();
100 mb(); 99 mb();
101 /* Ack it */ 100 /* Ack it */
102 __get_cpu_var(cpu_state) = CPU_DEAD; 101 __get_cpu_var(cpu_state) = CPU_DEAD;
@@ -105,8 +104,8 @@ static inline void play_dead(void)
105 * With physical CPU hotplug, we should halt the cpu 104 * With physical CPU hotplug, we should halt the cpu
106 */ 105 */
107 local_irq_disable(); 106 local_irq_disable();
108 while (1) 107 /* mask all interrupts, flush any and all caches, and halt */
109 halt(); 108 wbinvd_halt();
110} 109}
111#else 110#else
112static inline void play_dead(void) 111static inline void play_dead(void)
@@ -129,7 +128,7 @@ void cpu_idle(void)
129 128
130 /* endless idle loop with no priority at all */ 129 /* endless idle loop with no priority at all */
131 while (1) { 130 while (1) {
132 tick_nohz_stop_sched_tick(); 131 tick_nohz_stop_sched_tick(1);
133 while (!need_resched()) { 132 while (!need_resched()) {
134 133
135 check_pgt_cache(); 134 check_pgt_cache();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e8a8e1b99817..71553b664e2a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -93,14 +93,13 @@ DECLARE_PER_CPU(int, cpu_state);
93static inline void play_dead(void) 93static inline void play_dead(void)
94{ 94{
95 idle_task_exit(); 95 idle_task_exit();
96 wbinvd();
97 mb(); 96 mb();
98 /* Ack it */ 97 /* Ack it */
99 __get_cpu_var(cpu_state) = CPU_DEAD; 98 __get_cpu_var(cpu_state) = CPU_DEAD;
100 99
101 local_irq_disable(); 100 local_irq_disable();
102 while (1) 101 /* mask all interrupts, flush any and all caches, and halt */
103 halt(); 102 wbinvd_halt();
104} 103}
105#else 104#else
106static inline void play_dead(void) 105static inline void play_dead(void)
@@ -120,7 +119,7 @@ void cpu_idle(void)
120 current_thread_info()->status |= TS_POLLING; 119 current_thread_info()->status |= TS_POLLING;
121 /* endless idle loop with no priority at all */ 120 /* endless idle loop with no priority at all */
122 while (1) { 121 while (1) {
123 tick_nohz_stop_sched_tick(); 122 tick_nohz_stop_sched_tick(1);
124 while (!need_resched()) { 123 while (!need_resched()) {
125 124
126 rmb(); 125 rmb();
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 06a9f643817e..724adfc63cb9 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -414,25 +414,20 @@ void native_machine_shutdown(void)
414 414
415 /* The boot cpu is always logical cpu 0 */ 415 /* The boot cpu is always logical cpu 0 */
416 int reboot_cpu_id = 0; 416 int reboot_cpu_id = 0;
417 cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
418 417
419#ifdef CONFIG_X86_32 418#ifdef CONFIG_X86_32
420 /* See if there has been given a command line override */ 419 /* See if there has been given a command line override */
421 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 420 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
422 cpu_online(reboot_cpu)) { 421 cpu_online(reboot_cpu))
423 reboot_cpu_id = reboot_cpu; 422 reboot_cpu_id = reboot_cpu;
424 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
425 }
426#endif 423#endif
427 424
428 /* Make certain the cpu I'm about to reboot on is online */ 425 /* Make certain the cpu I'm about to reboot on is online */
429 if (!cpu_online(reboot_cpu_id)) { 426 if (!cpu_online(reboot_cpu_id))
430 reboot_cpu_id = smp_processor_id(); 427 reboot_cpu_id = smp_processor_id();
431 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
432 }
433 428
434 /* Make certain I only run on the appropriate processor */ 429 /* Make certain I only run on the appropriate processor */
435 set_cpus_allowed_ptr(current, newmask); 430 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
436 431
437 /* O.K Now that I'm on the appropriate processor, 432 /* O.K Now that I'm on the appropriate processor,
438 * stop all of the others. 433 * stop all of the others.
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index c30fe25d470d..6f50664b2ba5 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -20,11 +20,45 @@
20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
21#define PAE_PGD_ATTR (_PAGE_PRESENT) 21#define PAE_PGD_ATTR (_PAGE_PRESENT)
22 22
23/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
25 * jumping back
26 */
27#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
28
29/* Minimal CPU state */
30#define ESP DATA(0x0)
31#define CR0 DATA(0x4)
32#define CR3 DATA(0x8)
33#define CR4 DATA(0xc)
34
35/* other data */
36#define CP_VA_CONTROL_PAGE DATA(0x10)
37#define CP_PA_PGD DATA(0x14)
38#define CP_PA_SWAP_PAGE DATA(0x18)
39#define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
40
23 .text 41 .text
24 .align PAGE_SIZE 42 .align PAGE_SIZE
25 .globl relocate_kernel 43 .globl relocate_kernel
26relocate_kernel: 44relocate_kernel:
27 movl 8(%esp), %ebp /* list of pages */ 45 /* Save the CPU context, used for jumping back */
46
47 pushl %ebx
48 pushl %esi
49 pushl %edi
50 pushl %ebp
51 pushf
52
53 movl 20+8(%esp), %ebp /* list of pages */
54 movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
55 movl %esp, ESP(%edi)
56 movl %cr0, %eax
57 movl %eax, CR0(%edi)
58 movl %cr3, %eax
59 movl %eax, CR3(%edi)
60 movl %cr4, %eax
61 movl %eax, CR4(%edi)
28 62
29#ifdef CONFIG_X86_PAE 63#ifdef CONFIG_X86_PAE
30 /* map the control page at its virtual address */ 64 /* map the control page at its virtual address */
@@ -138,15 +172,25 @@ relocate_kernel:
138 172
139relocate_new_kernel: 173relocate_new_kernel:
140 /* read the arguments and say goodbye to the stack */ 174 /* read the arguments and say goodbye to the stack */
141 movl 4(%esp), %ebx /* page_list */ 175 movl 20+4(%esp), %ebx /* page_list */
142 movl 8(%esp), %ebp /* list of pages */ 176 movl 20+8(%esp), %ebp /* list of pages */
143 movl 12(%esp), %edx /* start address */ 177 movl 20+12(%esp), %edx /* start address */
144 movl 16(%esp), %ecx /* cpu_has_pae */ 178 movl 20+16(%esp), %ecx /* cpu_has_pae */
179 movl 20+20(%esp), %esi /* preserve_context */
145 180
146 /* zero out flags, and disable interrupts */ 181 /* zero out flags, and disable interrupts */
147 pushl $0 182 pushl $0
148 popfl 183 popfl
149 184
185 /* save some information for jumping back */
186 movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
187 movl %edi, CP_VA_CONTROL_PAGE(%edi)
188 movl PTR(PA_PGD)(%ebp), %eax
189 movl %eax, CP_PA_PGD(%edi)
190 movl PTR(PA_SWAP_PAGE)(%ebp), %eax
191 movl %eax, CP_PA_SWAP_PAGE(%edi)
192 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
193
150 /* get physical address of control page now */ 194 /* get physical address of control page now */
151 /* this is impossible after page table switch */ 195 /* this is impossible after page table switch */
152 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi 196 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
@@ -197,8 +241,90 @@ identity_mapped:
197 xorl %eax, %eax 241 xorl %eax, %eax
198 movl %eax, %cr3 242 movl %eax, %cr3
199 243
244 movl CP_PA_SWAP_PAGE(%edi), %eax
245 pushl %eax
246 pushl %ebx
247 call swap_pages
248 addl $8, %esp
249
250 /* To be certain of avoiding problems with self-modifying code
251 * I need to execute a serializing instruction here.
252 * So I flush the TLB, it's handy, and not processor dependent.
253 */
254 xorl %eax, %eax
255 movl %eax, %cr3
256
257 /* set all of the registers to known values */
258 /* leave %esp alone */
259
260 testl %esi, %esi
261 jnz 1f
262 xorl %edi, %edi
263 xorl %eax, %eax
264 xorl %ebx, %ebx
265 xorl %ecx, %ecx
266 xorl %edx, %edx
267 xorl %esi, %esi
268 xorl %ebp, %ebp
269 ret
2701:
271 popl %edx
272 movl CP_PA_SWAP_PAGE(%edi), %esp
273 addl $PAGE_SIZE, %esp
2742:
275 call *%edx
276
277 /* get the re-entry point of the peer system */
278 movl 0(%esp), %ebp
279 call 1f
2801:
281 popl %ebx
282 subl $(1b - relocate_kernel), %ebx
283 movl CP_VA_CONTROL_PAGE(%ebx), %edi
284 lea PAGE_SIZE(%ebx), %esp
285 movl CP_PA_SWAP_PAGE(%ebx), %eax
286 movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
287 pushl %eax
288 pushl %edx
289 call swap_pages
290 addl $8, %esp
291 movl CP_PA_PGD(%ebx), %eax
292 movl %eax, %cr3
293 movl %cr0, %eax
294 orl $(1<<31), %eax
295 movl %eax, %cr0
296 lea PAGE_SIZE(%edi), %esp
297 movl %edi, %eax
298 addl $(virtual_mapped - relocate_kernel), %eax
299 pushl %eax
300 ret
301
302virtual_mapped:
303 movl CR4(%edi), %eax
304 movl %eax, %cr4
305 movl CR3(%edi), %eax
306 movl %eax, %cr3
307 movl CR0(%edi), %eax
308 movl %eax, %cr0
309 movl ESP(%edi), %esp
310 movl %ebp, %eax
311
312 popf
313 popl %ebp
314 popl %edi
315 popl %esi
316 popl %ebx
317 ret
318
200 /* Do the copies */ 319 /* Do the copies */
201 movl %ebx, %ecx 320swap_pages:
321 movl 8(%esp), %edx
322 movl 4(%esp), %ecx
323 pushl %ebp
324 pushl %ebx
325 pushl %edi
326 pushl %esi
327 movl %ecx, %ebx
202 jmp 1f 328 jmp 1f
203 329
2040: /* top, read another word from the indirection page */ 3300: /* top, read another word from the indirection page */
@@ -226,27 +352,31 @@ identity_mapped:
226 movl %ecx, %esi /* For every source page do a copy */ 352 movl %ecx, %esi /* For every source page do a copy */
227 andl $0xfffff000, %esi 353 andl $0xfffff000, %esi
228 354
355 movl %edi, %eax
356 movl %esi, %ebp
357
358 movl %edx, %edi
229 movl $1024, %ecx 359 movl $1024, %ecx
230 rep ; movsl 360 rep ; movsl
231 jmp 0b
232
2333:
234 361
235 /* To be certain of avoiding problems with self-modifying code 362 movl %ebp, %edi
236 * I need to execute a serializing instruction here. 363 movl %eax, %esi
237 * So I flush the TLB, it's handy, and not processor dependent. 364 movl $1024, %ecx
238 */ 365 rep ; movsl
239 xorl %eax, %eax
240 movl %eax, %cr3
241 366
242 /* set all of the registers to known values */ 367 movl %eax, %edi
243 /* leave %esp alone */ 368 movl %edx, %esi
369 movl $1024, %ecx
370 rep ; movsl
244 371
245 xorl %eax, %eax 372 lea PAGE_SIZE(%ebp), %esi
246 xorl %ebx, %ebx 373 jmp 0b
247 xorl %ecx, %ecx 3743:
248 xorl %edx, %edx 375 popl %esi
249 xorl %esi, %esi 376 popl %edi
250 xorl %edi, %edi 377 popl %ebx
251 xorl %ebp, %ebp 378 popl %ebp
252 ret 379 ret
380
381 .globl kexec_control_code_size
382.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b4aacb9f52e3..9838f2539dfc 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -445,7 +445,7 @@ static void __init reserve_early_setup_data(void)
445 * @size: Size of the crashkernel memory to reserve. 445 * @size: Size of the crashkernel memory to reserve.
446 * Returns the base address on success, and -1ULL on failure. 446 * Returns the base address on success, and -1ULL on failure.
447 */ 447 */
448unsigned long long find_and_reserve_crashkernel(unsigned long long size) 448unsigned long long __init find_and_reserve_crashkernel(unsigned long long size)
449{ 449{
450 const unsigned long long alignment = 16<<20; /* 16M */ 450 const unsigned long long alignment = 16<<20; /* 16M */
451 unsigned long long start = 0LL; 451 unsigned long long start = 0LL;
@@ -597,11 +597,11 @@ void __init setup_arch(char **cmdline_p)
597 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 597 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
598 visws_early_detect(); 598 visws_early_detect();
599 pre_setup_arch_hook(); 599 pre_setup_arch_hook();
600 early_cpu_init();
601#else 600#else
602 printk(KERN_INFO "Command line: %s\n", boot_command_line); 601 printk(KERN_INFO "Command line: %s\n", boot_command_line);
603#endif 602#endif
604 603
604 early_cpu_init();
605 early_ioremap_init(); 605 early_ioremap_init();
606 606
607 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); 607 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
@@ -665,14 +665,23 @@ void __init setup_arch(char **cmdline_p)
665 bss_resource.start = virt_to_phys(&__bss_start); 665 bss_resource.start = virt_to_phys(&__bss_start);
666 bss_resource.end = virt_to_phys(&__bss_stop)-1; 666 bss_resource.end = virt_to_phys(&__bss_stop)-1;
667 667
668#ifdef CONFIG_X86_64
669 early_cpu_init();
670#endif
671 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 668 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
672 *cmdline_p = command_line; 669 *cmdline_p = command_line;
673 670
674 parse_early_param(); 671 parse_early_param();
675 672
673#ifdef CONFIG_X86_64
674 check_efer();
675#endif
676
677#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
678 /*
679 * Must be before kernel pagetables are setup
680 * or fixmap area is touched.
681 */
682 vmi_init();
683#endif
684
676 /* after early param, so could get panic from serial */ 685 /* after early param, so could get panic from serial */
677 reserve_early_setup_data(); 686 reserve_early_setup_data();
678 687
@@ -733,7 +742,6 @@ void __init setup_arch(char **cmdline_p)
733#else 742#else
734 num_physpages = max_pfn; 743 num_physpages = max_pfn;
735 744
736 check_efer();
737 745
738 /* How many end-of-memory variables you have, grandma! */ 746 /* How many end-of-memory variables you have, grandma! */
739 /* need this before calling reserve_initrd */ 747 /* need this before calling reserve_initrd */
@@ -791,10 +799,6 @@ void __init setup_arch(char **cmdline_p)
791 799
792 initmem_init(0, max_pfn); 800 initmem_init(0, max_pfn);
793 801
794#ifdef CONFIG_X86_64
795 dma32_reserve_bootmem();
796#endif
797
798#ifdef CONFIG_ACPI_SLEEP 802#ifdef CONFIG_ACPI_SLEEP
799 /* 803 /*
800 * Reserve low memory region for sleep support. 804 * Reserve low memory region for sleep support.
@@ -809,20 +813,21 @@ void __init setup_arch(char **cmdline_p)
809#endif 813#endif
810 reserve_crashkernel(); 814 reserve_crashkernel();
811 815
816#ifdef CONFIG_X86_64
817 /*
818 * dma32_reserve_bootmem() allocates bootmem which may conflict
819 * with the crashkernel command line, so do that after
820 * reserve_crashkernel()
821 */
822 dma32_reserve_bootmem();
823#endif
824
812 reserve_ibft_region(); 825 reserve_ibft_region();
813 826
814#ifdef CONFIG_KVM_CLOCK 827#ifdef CONFIG_KVM_CLOCK
815 kvmclock_init(); 828 kvmclock_init();
816#endif 829#endif
817 830
818#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
819 /*
820 * Must be after max_low_pfn is determined, and before kernel
821 * pagetables are setup.
822 */
823 vmi_init();
824#endif
825
826 paravirt_pagetable_setup_start(swapper_pg_dir); 831 paravirt_pagetable_setup_start(swapper_pg_dir);
827 paging_init(); 832 paging_init();
828 paravirt_pagetable_setup_done(swapper_pg_dir); 833 paravirt_pagetable_setup_done(swapper_pg_dir);
@@ -859,12 +864,6 @@ void __init setup_arch(char **cmdline_p)
859 init_apic_mappings(); 864 init_apic_mappings();
860 ioapic_init_mappings(); 865 ioapic_init_mappings();
861 866
862#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
863 if (def_to_bigsmp)
864 printk(KERN_WARNING "More than 8 CPUs detected and "
865 "CONFIG_X86_PC cannot handle it.\nUse "
866 "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
867#endif
868 kvm_guest_init(); 867 kvm_guest_init();
869 868
870 e820_reserve_resources(); 869 e820_reserve_resources();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 61f3966632a8..0e67f72d9316 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void)
80#endif 80#endif
81} 81}
82 82
83#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
84cpumask_t *cpumask_of_cpu_map __read_mostly;
85EXPORT_SYMBOL(cpumask_of_cpu_map);
86
87/* requires nr_cpu_ids to be initialized */
88static void __init setup_cpumask_of_cpu(void)
89{
90 int i;
91
92 /* alloc_bootmem zeroes memory */
93 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
94 for (i = 0; i < nr_cpu_ids; i++)
95 cpu_set(i, cpumask_of_cpu_map[i]);
96}
97#else
98static inline void setup_cpumask_of_cpu(void) { }
99#endif
100
101#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
102/* 84/*
103 * Great future not-so-futuristic plan: make i386 and x86_64 do it 85 * Great future not-so-futuristic plan: make i386 and x86_64 do it
@@ -204,9 +186,6 @@ void __init setup_per_cpu_areas(void)
204 186
205 /* Setup node to cpumask map */ 187 /* Setup node to cpumask map */
206 setup_node_to_cpumask_map(); 188 setup_node_to_cpumask_map();
207
208 /* Setup cpumask_of_cpu map */
209 setup_cpumask_of_cpu();
210} 189}
211 190
212#endif 191#endif
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 47c3d249e638..ca316b5b742c 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -53,6 +53,68 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
53 return do_sigaltstack(uss, uoss, regs->sp); 53 return do_sigaltstack(uss, uoss, regs->sp);
54} 54}
55 55
56/*
57 * Signal frame handlers.
58 */
59
60static inline int save_i387(struct _fpstate __user *buf)
61{
62 struct task_struct *tsk = current;
63 int err = 0;
64
65 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
66 sizeof(tsk->thread.xstate->fxsave));
67
68 if ((unsigned long)buf % 16)
69 printk("save_i387: bad fpstate %p\n", buf);
70
71 if (!used_math())
72 return 0;
73 clear_used_math(); /* trigger finit */
74 if (task_thread_info(tsk)->status & TS_USEDFPU) {
75 err = save_i387_checking((struct i387_fxsave_struct __user *)
76 buf);
77 if (err)
78 return err;
79 task_thread_info(tsk)->status &= ~TS_USEDFPU;
80 stts();
81 } else {
82 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
83 sizeof(struct i387_fxsave_struct)))
84 return -1;
85 }
86 return 1;
87}
88
89/*
90 * This restores directly out of user space. Exceptions are handled.
91 */
92static inline int restore_i387(struct _fpstate __user *buf)
93{
94 struct task_struct *tsk = current;
95 int err;
96
97 if (!used_math()) {
98 err = init_fpu(tsk);
99 if (err)
100 return err;
101 }
102
103 if (!(task_thread_info(current)->status & TS_USEDFPU)) {
104 clts();
105 task_thread_info(current)->status |= TS_USEDFPU;
106 }
107 err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
108 if (unlikely(err)) {
109 /*
110 * Encountered an error while doing the restore from the
111 * user buffer, clear the fpu state.
112 */
113 clear_fpu(tsk);
114 clear_used_math();
115 }
116 return err;
117}
56 118
57/* 119/*
58 * Do a signal return; undo the signal stack. 120 * Do a signal return; undo the signal stack.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 332512767f4f..7985c5b3f916 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -326,12 +326,16 @@ static void __cpuinit start_secondary(void *unused)
326 * for which cpus receive the IPI. Holding this 326 * for which cpus receive the IPI. Holding this
327 * lock helps us to not include this cpu in a currently in progress 327 * lock helps us to not include this cpu in a currently in progress
328 * smp_call_function(). 328 * smp_call_function().
329 *
330 * We need to hold vector_lock so there the set of online cpus
331 * does not change while we are assigning vectors to cpus. Holding
332 * this lock ensures we don't half assign or remove an irq from a cpu.
329 */ 333 */
330 ipi_call_lock_irq(); 334 ipi_call_lock_irq();
331#ifdef CONFIG_X86_IO_APIC 335 lock_vector_lock();
332 setup_vector_irq(smp_processor_id()); 336 __setup_vector_irq(smp_processor_id());
333#endif
334 cpu_set(smp_processor_id(), cpu_online_map); 337 cpu_set(smp_processor_id(), cpu_online_map);
338 unlock_vector_lock();
335 ipi_call_unlock_irq(); 339 ipi_call_unlock_irq();
336 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 340 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
337 341
@@ -752,6 +756,14 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
752} 756}
753 757
754#ifdef CONFIG_X86_64 758#ifdef CONFIG_X86_64
759
760/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
761static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
762{
763 if (!after_bootmem)
764 free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
765}
766
755/* 767/*
756 * Allocate node local memory for the AP pda. 768 * Allocate node local memory for the AP pda.
757 * 769 *
@@ -780,8 +792,7 @@ int __cpuinit get_local_pda(int cpu)
780 792
781 if (oldpda) { 793 if (oldpda) {
782 memcpy(newpda, oldpda, size); 794 memcpy(newpda, oldpda, size);
783 if (!after_bootmem) 795 free_bootmem_pda(oldpda);
784 free_bootmem((unsigned long)oldpda, size);
785 } 796 }
786 797
787 newpda->in_bootmem = 0; 798 newpda->in_bootmem = 0;
@@ -1044,6 +1055,34 @@ static __init void disable_smp(void)
1044static int __init smp_sanity_check(unsigned max_cpus) 1055static int __init smp_sanity_check(unsigned max_cpus)
1045{ 1056{
1046 preempt_disable(); 1057 preempt_disable();
1058
1059#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
1060 if (def_to_bigsmp && nr_cpu_ids > 8) {
1061 unsigned int cpu;
1062 unsigned nr;
1063
1064 printk(KERN_WARNING
1065 "More than 8 CPUs detected - skipping them.\n"
1066 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
1067
1068 nr = 0;
1069 for_each_present_cpu(cpu) {
1070 if (nr >= 8)
1071 cpu_clear(cpu, cpu_present_map);
1072 nr++;
1073 }
1074
1075 nr = 0;
1076 for_each_possible_cpu(cpu) {
1077 if (nr >= 8)
1078 cpu_clear(cpu, cpu_possible_map);
1079 nr++;
1080 }
1081
1082 nr_cpu_ids = 8;
1083 }
1084#endif
1085
1047 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { 1086 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1048 printk(KERN_WARNING "weird, boot CPU (#%d) not listed" 1087 printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
1049 "by the BIOS.\n", hard_smp_processor_id()); 1088 "by the BIOS.\n", hard_smp_processor_id());
@@ -1182,6 +1221,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1182 printk(KERN_INFO "CPU%d: ", 0); 1221 printk(KERN_INFO "CPU%d: ", 0);
1183 print_cpu_info(&cpu_data(0)); 1222 print_cpu_info(&cpu_data(0));
1184 setup_boot_clock(); 1223 setup_boot_clock();
1224
1225 if (is_uv_system())
1226 uv_system_init();
1185out: 1227out:
1186 preempt_enable(); 1228 preempt_enable();
1187} 1229}
@@ -1336,7 +1378,9 @@ int __cpu_disable(void)
1336 remove_siblinginfo(cpu); 1378 remove_siblinginfo(cpu);
1337 1379
1338 /* It's now safe to remove this processor from the online map */ 1380 /* It's now safe to remove this processor from the online map */
1381 lock_vector_lock();
1339 remove_cpu_from_maps(cpu); 1382 remove_cpu_from_maps(cpu);
1383 unlock_vector_lock();
1340 fixup_irqs(cpu_online_map); 1384 fixup_irqs(cpu_online_map);
1341 return 0; 1385 return 0;
1342} 1386}
@@ -1370,17 +1414,3 @@ void __cpu_die(unsigned int cpu)
1370 BUG(); 1414 BUG();
1371} 1415}
1372#endif 1416#endif
1373
1374/*
1375 * If the BIOS enumerates physical processors before logical,
1376 * maxcpus=N at enumeration-time can be used to disable HT.
1377 */
1378static int __init parse_maxcpus(char *arg)
1379{
1380 extern unsigned int maxcpus;
1381
1382 if (arg)
1383 maxcpus = simple_strtoul(arg, NULL, 0);
1384 return 0;
1385}
1386early_param("maxcpus", parse_maxcpus);
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 99941b37eca0..397e309839dd 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -8,18 +8,21 @@
8DEFINE_PER_CPU(unsigned long, this_cpu_off); 8DEFINE_PER_CPU(unsigned long, this_cpu_off);
9EXPORT_PER_CPU_SYMBOL(this_cpu_off); 9EXPORT_PER_CPU_SYMBOL(this_cpu_off);
10 10
11/* Initialize the CPU's GDT. This is either the boot CPU doing itself 11/*
12 (still using the master per-cpu area), or a CPU doing it for a 12 * Initialize the CPU's GDT. This is either the boot CPU doing itself
13 secondary which will soon come up. */ 13 * (still using the master per-cpu area), or a CPU doing it for a
14 * secondary which will soon come up.
15 */
14__cpuinit void init_gdt(int cpu) 16__cpuinit void init_gdt(int cpu)
15{ 17{
16 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 18 struct desc_struct gdt;
17 19
18 pack_descriptor(&gdt[GDT_ENTRY_PERCPU], 20 pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
19 __per_cpu_offset[cpu], 0xFFFFF,
20 0x2 | DESCTYPE_S, 0x8); 21 0x2 | DESCTYPE_S, 0x8);
22 gdt.s = 1;
21 23
22 gdt[GDT_ENTRY_PERCPU].s = 1; 24 write_gdt_entry(get_cpu_gdt_table(cpu),
25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
23 26
24 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; 27 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
25 per_cpu(cpu_number, cpu) = cpu; 28 per_cpu(cpu_number, cpu) = cpu;
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index adff5562f5fd..d44395ff34c3 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -326,3 +326,9 @@ ENTRY(sys_call_table)
326 .long sys_fallocate 326 .long sys_fallocate
327 .long sys_timerfd_settime /* 325 */ 327 .long sys_timerfd_settime /* 325 */
328 .long sys_timerfd_gettime 328 .long sys_timerfd_gettime
329 .long sys_signalfd4
330 .long sys_eventfd2
331 .long sys_epoll_create1
332 .long sys_dup3 /* 330 */
333 .long sys_pipe2
334 .long sys_inotify_init1
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index d0fbb7712ab0..8b8c0d6640fa 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -17,6 +17,7 @@
17#include <asm/genapic.h> 17#include <asm/genapic.h>
18#include <asm/idle.h> 18#include <asm/idle.h>
19#include <asm/tsc.h> 19#include <asm/tsc.h>
20#include <asm/irq_vectors.h>
20 21
21#include <mach_apic.h> 22#include <mach_apic.h>
22 23
@@ -783,7 +784,7 @@ static int __init uv_bau_init(void)
783 uv_init_blade(blade, node, cur_cpu); 784 uv_init_blade(blade, node, cur_cpu);
784 cur_cpu += uv_blade_nr_possible_cpus(blade); 785 cur_cpu += uv_blade_nr_possible_cpus(blade);
785 } 786 }
786 set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 787 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
787 uv_enable_timeouts(); 788 uv_enable_timeouts();
788 789
789 return 0; 790 return 0;
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 8b5b3b81d437..7ffc8cff6ccc 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -1130,7 +1130,14 @@ asmlinkage void math_state_restore(void)
1130 } 1130 }
1131 1131
1132 clts(); /* Allow maths ops (or we recurse) */ 1132 clts(); /* Allow maths ops (or we recurse) */
1133 restore_fpu_checking(&me->thread.xstate->fxsave); 1133 /*
1134 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
1135 */
1136 if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
1137 stts();
1138 force_sig(SIGSEGV, me);
1139 return;
1140 }
1134 task_thread_info(me)->status |= TS_USEDFPU; 1141 task_thread_info(me)->status |= TS_USEDFPU;
1135 me->fpu_counter++; 1142 me->fpu_counter++;
1136} 1143}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7603c0553909..8f98e9de1b82 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *pm, u64 *hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
@@ -122,80 +122,216 @@ static u64 __init tsc_read_refs(u64 *pm, u64 *hpet)
122 return ULLONG_MAX; 122 return ULLONG_MAX;
123} 123}
124 124
125/** 125/*
126 * native_calibrate_tsc - calibrate the tsc on boot 126 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC
128 * in kHz.
129 *
130 * Return ULONG_MAX on failure to calibrate.
127 */ 131 */
128unsigned long native_calibrate_tsc(void) 132static unsigned long pit_calibrate_tsc(void)
129{ 133{
130 unsigned long flags; 134 u64 tsc, t1, t2, delta;
131 u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; 135 unsigned long tscmin, tscmax;
132 int hpet = is_hpet_enabled(); 136 int pitcnt;
133 unsigned int tsc_khz_val = 0;
134
135 local_irq_save(flags);
136
137 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
138 137
138 /* Set the Gate high, disable speaker */
139 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 139 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
140 140
141 /*
142 * Setup CTC channel 2* for mode 0, (interrupt on terminal
143 * count mode), binary count. Set the latch register to 50ms
144 * (LSB then MSB) to begin countdown.
145 */
141 outb(0xb0, 0x43); 146 outb(0xb0, 0x43);
142 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
143 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
144 tr1 = get_cycles();
145 while ((inb(0x61) & 0x20) == 0);
146 tr2 = get_cycles();
147 149
148 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 150 tsc = t1 = t2 = get_cycles();
149 151
150 local_irq_restore(flags); 152 pitcnt = 0;
153 tscmax = 0;
154 tscmin = ULONG_MAX;
155 while ((inb(0x61) & 0x20) == 0) {
156 t2 = get_cycles();
157 delta = t2 - tsc;
158 tsc = t2;
159 if ((unsigned long) delta < tscmin)
160 tscmin = (unsigned int) delta;
161 if ((unsigned long) delta > tscmax)
162 tscmax = (unsigned int) delta;
163 pitcnt++;
164 }
151 165
152 /* 166 /*
153 * Preset the result with the raw and inaccurate PIT 167 * Sanity checks:
154 * calibration value 168 *
169 * If we were not able to read the PIT more than 5000
170 * times, then we have been hit by a massive SMI
171 *
172 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well.
155 */ 174 */
156 delta = (tr2 - tr1); 175 if (pitcnt < 5000 || tscmax > 10 * tscmin)
176 return ULONG_MAX;
177
178 /* Calculate the PIT value */
179 delta = t2 - t1;
157 do_div(delta, 50); 180 do_div(delta, 50);
158 tsc_khz_val = delta; 181 return delta;
182}
183
184
185/**
186 * native_calibrate_tsc - calibrate the tsc on boot
187 */
188unsigned long native_calibrate_tsc(void)
189{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags;
193 int hpet = is_hpet_enabled(), i;
194
195 /*
196 * Run 5 calibration loops to get the lowest frequency value
197 * (the best estimate). We use two different calibration modes
198 * here:
199 *
200 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
201 * load a timeout of 50ms. We read the time right after we
202 * started the timer and wait until the PIT count down reaches
203 * zero. In each wait loop iteration we read the TSC and check
204 * the delta to the previous read. We keep track of the min
205 * and max values of that delta. The delta is mostly defined
206 * by the IO time of the PIT access, so we can detect when a
207 * SMI/SMM disturbance happend between the two reads. If the
208 * maximum time is significantly larger than the minimum time,
209 * then we discard the result and have another try.
210 *
211 * 2) Reference counter. If available we use the HPET or the
212 * PMTIMER as a reference to check the sanity of that value.
213 * We use separate TSC readouts and check inside of the
214 * reference read for a SMI/SMM disturbance. We dicard
215 * disturbed values here as well. We do that around the PIT
216 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway.
218 */
219 for (i = 0; i < 5; i++) {
220 unsigned long tsc_pit_khz;
221
222 /*
223 * Read the start value and the reference count of
224 * hpet/pmtimer when available. Then do the PIT
225 * calibration, which will take at least 50ms, and
226 * read the end value.
227 */
228 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
230 tsc_pit_khz = pit_calibrate_tsc();
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
232 local_irq_restore(flags);
233
234 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236
237 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2)
239 continue;
240
241 /* Check, whether the sampling was disturbed by an SMI */
242 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
243 continue;
244
245 tsc2 = (tsc2 - tsc1) * 1000000LL;
246
247 if (hpet) {
248 if (hpet2 < hpet1)
249 hpet2 += 0x100000000ULL;
250 hpet2 -= hpet1;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
252 do_div(tsc1, 1000000);
253 } else {
254 if (pm2 < pm1)
255 pm2 += (u64)ACPI_PM_OVRRUN;
256 pm2 -= pm1;
257 tsc1 = pm2 * 1000000000LL;
258 do_div(tsc1, PMTMR_TICKS_PER_SEC);
259 }
260
261 do_div(tsc2, tsc1);
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
263 }
264
265 /*
266 * Now check the results.
267 */
268 if (tsc_pit_min == ULONG_MAX) {
269 /* PIT gave no useful value */
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271
272 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0;
276 }
277
278 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n");
282 return 0;
283 }
284
285 /* Use the alternative source */
286 printk(KERN_INFO "TSC: using %s reference calibration\n",
287 hpet ? "HPET" : "PMTIMER");
288
289 return tsc_ref_min;
290 }
159 291
160 /* hpet or pmtimer available ? */ 292 /* We don't have an alternative source, use the PIT calibration value */
161 if (!hpet && !pm1 && !pm2) { 293 if (!hpet && !pm1 && !pm2) {
162 printk(KERN_INFO "TSC calibrated against PIT\n"); 294 printk(KERN_INFO "TSC: Using PIT calibration value\n");
163 goto out; 295 return tsc_pit_min;
164 } 296 }
165 297
166 /* Check, whether the sampling was disturbed by an SMI */ 298 /* The alternative source failed, use the PIT calibration value */
167 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { 299 if (tsc_ref_min == ULONG_MAX) {
168 printk(KERN_WARNING "TSC calibration disturbed by SMI, " 300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due "
169 "using PIT calibration result\n"); 301 "to SMI disturbance. Using PIT calibration\n");
170 goto out; 302 return tsc_pit_min;
171 } 303 }
172 304
173 tsc2 = (tsc2 - tsc1) * 1000000LL; 305 /* Check the reference deviation */
174 306 delta = ((u64) tsc_pit_min) * 100;
175 if (hpet) { 307 do_div(delta, tsc_ref_min);
176 printk(KERN_INFO "TSC calibrated against HPET\n"); 308
177 if (hpet2 < hpet1) 309 /*
178 hpet2 += 0x100000000ULL; 310 * If both calibration results are inside a 5% window, the we
179 hpet2 -= hpet1; 311 * use the lower frequency of those as it is probably the
180 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 312 * closest estimate.
181 do_div(tsc1, 1000000); 313 */
182 } else { 314 if (delta >= 95 && delta <= 105) {
183 printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); 315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
184 if (pm2 < pm1) 316 hpet ? "HPET" : "PMTIMER");
185 pm2 += (u64)ACPI_PM_OVRRUN; 317 printk(KERN_INFO "TSC: using %s calibration value\n",
186 pm2 -= pm1; 318 tsc_pit_min <= tsc_ref_min ? "PIT" :
187 tsc1 = pm2 * 1000000000LL; 319 hpet ? "HPET" : "PMTIMER");
188 do_div(tsc1, PMTMR_TICKS_PER_SEC); 320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
189 } 321 }
190 322
191 do_div(tsc2, tsc1); 323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
192 tsc_khz_val = tsc2; 324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
193 325
194out: 326 /*
195 return tsc_khz_val; 327 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed.
330 */
331 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min;
196} 333}
197 334
198
199#ifdef CONFIG_X86_32 335#ifdef CONFIG_X86_32
200/* Only called from the Powernow K7 cpu freq driver */ 336/* Only called from the Powernow K7 cpu freq driver */
201int recalibrate_cpu_khz(void) 337int recalibrate_cpu_khz(void)
@@ -314,7 +450,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
314 mark_tsc_unstable("cpufreq changes"); 450 mark_tsc_unstable("cpufreq changes");
315 } 451 }
316 452
317 set_cyc2ns_scale(tsc_khz_ref, freq->cpu); 453 set_cyc2ns_scale(tsc_khz, freq->cpu);
318 454
319 return 0; 455 return 0;
320} 456}
@@ -325,6 +461,10 @@ static struct notifier_block time_cpufreq_notifier_block = {
325 461
326static int __init cpufreq_tsc(void) 462static int __init cpufreq_tsc(void)
327{ 463{
464 if (!cpu_has_tsc)
465 return 0;
466 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
467 return 0;
328 cpufreq_register_notifier(&time_cpufreq_notifier_block, 468 cpufreq_register_notifier(&time_cpufreq_notifier_block,
329 CPUFREQ_TRANSITION_NOTIFIER); 469 CPUFREQ_TRANSITION_NOTIFIER);
330 return 0; 470 return 0;
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 0577825cf89b..9ffb01c31c40 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -88,11 +88,9 @@ static __cpuinit void check_tsc_warp(void)
88 __raw_spin_unlock(&sync_lock); 88 __raw_spin_unlock(&sync_lock);
89 } 89 }
90 } 90 }
91 if (!(now-start)) { 91 WARN(!(now-start),
92 printk("Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", 92 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
93 now-start, end-start); 93 now-start, end-start);
94 WARN_ON(1);
95 }
96} 94}
97 95
98/* 96/*
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 41e01b145c48..594ef47f0a63 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -184,8 +184,6 @@ static int __init visws_get_smp_config(unsigned int early)
184 return 1; 184 return 1;
185} 185}
186 186
187extern unsigned int __cpuinitdata maxcpus;
188
189/* 187/*
190 * The Visual Workstation is Intel MP compliant in the hardware 188 * The Visual Workstation is Intel MP compliant in the hardware
191 * sense, but it doesn't have a BIOS(-configuration table). 189 * sense, but it doesn't have a BIOS(-configuration table).
@@ -244,8 +242,8 @@ static int __init visws_find_smp_config(unsigned int reserve)
244 ncpus = CO_CPU_MAX; 242 ncpus = CO_CPU_MAX;
245 } 243 }
246 244
247 if (ncpus > maxcpus) 245 if (ncpus > setup_max_cpus)
248 ncpus = maxcpus; 246 ncpus = setup_max_cpus;
249 247
250#ifdef CONFIG_X86_LOCAL_APIC 248#ifdef CONFIG_X86_LOCAL_APIC
251 smp_found_config = 1; 249 smp_found_config = 1;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 0a1b1a9d922d..6ca515d6db54 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -37,6 +37,7 @@
37#include <asm/timer.h> 37#include <asm/timer.h>
38#include <asm/vmi_time.h> 38#include <asm/vmi_time.h>
39#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
40#include <asm/setup.h>
40 41
41/* Convenient for calling VMI functions indirectly in the ROM */ 42/* Convenient for calling VMI functions indirectly in the ROM */
42typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); 43typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -683,7 +684,7 @@ void vmi_bringup(void)
683{ 684{
684 /* We must establish the lowmem mapping for MMU ops to work */ 685 /* We must establish the lowmem mapping for MMU ops to work */
685 if (vmi_ops.set_linear_mapping) 686 if (vmi_ops.set_linear_mapping)
686 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); 687 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
687} 688}
688 689
689/* 690/*
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index cdb2363697d2..af5bdad84604 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -209,3 +209,11 @@ SECTIONS
209 209
210 DWARF_DEBUG 210 DWARF_DEBUG
211} 211}
212
213#ifdef CONFIG_KEXEC
214/* Link time checks */
215#include <asm/kexec.h>
216
217ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
218 "kexec control code size is too big")
219#endif
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8d45fabc5f3b..ce3251ce5504 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 tristate "Kernel-based Virtual Machine (KVM) support" 21 tristate "Kernel-based Virtual Machine (KVM) support"
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 select PREEMPT_NOTIFIERS 23 select PREEMPT_NOTIFIERS
24 select MMU_NOTIFIER
24 select ANON_INODES 25 select ANON_INODES
25 ---help--- 26 ---help---
26 Support hosting fully virtualized guest machines using hardware 27 Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b0e4ddca6c18..3da2508eb22a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -653,6 +653,88 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
653 account_shadowed(kvm, gfn); 653 account_shadowed(kvm, gfn);
654} 654}
655 655
656static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
657{
658 u64 *spte;
659 int need_tlb_flush = 0;
660
661 while ((spte = rmap_next(kvm, rmapp, NULL))) {
662 BUG_ON(!(*spte & PT_PRESENT_MASK));
663 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
664 rmap_remove(kvm, spte);
665 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
666 need_tlb_flush = 1;
667 }
668 return need_tlb_flush;
669}
670
671static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
672 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
673{
674 int i;
675 int retval = 0;
676
677 /*
678 * If mmap_sem isn't taken, we can look the memslots with only
679 * the mmu_lock by skipping over the slots with userspace_addr == 0.
680 */
681 for (i = 0; i < kvm->nmemslots; i++) {
682 struct kvm_memory_slot *memslot = &kvm->memslots[i];
683 unsigned long start = memslot->userspace_addr;
684 unsigned long end;
685
686 /* mmu_lock protects userspace_addr */
687 if (!start)
688 continue;
689
690 end = start + (memslot->npages << PAGE_SHIFT);
691 if (hva >= start && hva < end) {
692 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
693 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
694 retval |= handler(kvm,
695 &memslot->lpage_info[
696 gfn_offset /
697 KVM_PAGES_PER_HPAGE].rmap_pde);
698 }
699 }
700
701 return retval;
702}
703
704int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
705{
706 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
707}
708
709static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
710{
711 u64 *spte;
712 int young = 0;
713
714 /* always return old for EPT */
715 if (!shadow_accessed_mask)
716 return 0;
717
718 spte = rmap_next(kvm, rmapp, NULL);
719 while (spte) {
720 int _young;
721 u64 _spte = *spte;
722 BUG_ON(!(_spte & PT_PRESENT_MASK));
723 _young = _spte & PT_ACCESSED_MASK;
724 if (_young) {
725 young = 1;
726 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
727 }
728 spte = rmap_next(kvm, rmapp, spte);
729 }
730 return young;
731}
732
733int kvm_age_hva(struct kvm *kvm, unsigned long hva)
734{
735 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
736}
737
656#ifdef MMU_DEBUG 738#ifdef MMU_DEBUG
657static int is_empty_shadow_page(u64 *spt) 739static int is_empty_shadow_page(u64 *spt)
658{ 740{
@@ -1203,6 +1285,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1203 int r; 1285 int r;
1204 int largepage = 0; 1286 int largepage = 0;
1205 pfn_t pfn; 1287 pfn_t pfn;
1288 unsigned long mmu_seq;
1206 1289
1207 down_read(&current->mm->mmap_sem); 1290 down_read(&current->mm->mmap_sem);
1208 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1291 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@@ -1210,6 +1293,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1210 largepage = 1; 1293 largepage = 1;
1211 } 1294 }
1212 1295
1296 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1297 /* implicit mb(), we'll read before PT lock is unlocked */
1213 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1298 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1214 up_read(&current->mm->mmap_sem); 1299 up_read(&current->mm->mmap_sem);
1215 1300
@@ -1220,6 +1305,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1220 } 1305 }
1221 1306
1222 spin_lock(&vcpu->kvm->mmu_lock); 1307 spin_lock(&vcpu->kvm->mmu_lock);
1308 if (mmu_notifier_retry(vcpu, mmu_seq))
1309 goto out_unlock;
1223 kvm_mmu_free_some_pages(vcpu); 1310 kvm_mmu_free_some_pages(vcpu);
1224 r = __direct_map(vcpu, v, write, largepage, gfn, pfn, 1311 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
1225 PT32E_ROOT_LEVEL); 1312 PT32E_ROOT_LEVEL);
@@ -1227,6 +1314,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1227 1314
1228 1315
1229 return r; 1316 return r;
1317
1318out_unlock:
1319 spin_unlock(&vcpu->kvm->mmu_lock);
1320 kvm_release_pfn_clean(pfn);
1321 return 0;
1230} 1322}
1231 1323
1232 1324
@@ -1345,6 +1437,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1345 int r; 1437 int r;
1346 int largepage = 0; 1438 int largepage = 0;
1347 gfn_t gfn = gpa >> PAGE_SHIFT; 1439 gfn_t gfn = gpa >> PAGE_SHIFT;
1440 unsigned long mmu_seq;
1348 1441
1349 ASSERT(vcpu); 1442 ASSERT(vcpu);
1350 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 1443 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1358,6 +1451,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1358 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1451 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1359 largepage = 1; 1452 largepage = 1;
1360 } 1453 }
1454 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1455 /* implicit mb(), we'll read before PT lock is unlocked */
1361 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1456 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1362 up_read(&current->mm->mmap_sem); 1457 up_read(&current->mm->mmap_sem);
1363 if (is_error_pfn(pfn)) { 1458 if (is_error_pfn(pfn)) {
@@ -1365,12 +1460,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1365 return 1; 1460 return 1;
1366 } 1461 }
1367 spin_lock(&vcpu->kvm->mmu_lock); 1462 spin_lock(&vcpu->kvm->mmu_lock);
1463 if (mmu_notifier_retry(vcpu, mmu_seq))
1464 goto out_unlock;
1368 kvm_mmu_free_some_pages(vcpu); 1465 kvm_mmu_free_some_pages(vcpu);
1369 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 1466 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1370 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); 1467 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
1371 spin_unlock(&vcpu->kvm->mmu_lock); 1468 spin_unlock(&vcpu->kvm->mmu_lock);
1372 1469
1373 return r; 1470 return r;
1471
1472out_unlock:
1473 spin_unlock(&vcpu->kvm->mmu_lock);
1474 kvm_release_pfn_clean(pfn);
1475 return 0;
1374} 1476}
1375 1477
1376static void nonpaging_free(struct kvm_vcpu *vcpu) 1478static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1670,6 +1772,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1670 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1772 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1671 vcpu->arch.update_pte.largepage = 1; 1773 vcpu->arch.update_pte.largepage = 1;
1672 } 1774 }
1775 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
1776 /* implicit mb(), we'll read before PT lock is unlocked */
1673 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1777 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1674 up_read(&current->mm->mmap_sem); 1778 up_read(&current->mm->mmap_sem);
1675 1779
@@ -1814,6 +1918,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1814 spin_unlock(&vcpu->kvm->mmu_lock); 1918 spin_unlock(&vcpu->kvm->mmu_lock);
1815 return r; 1919 return r;
1816} 1920}
1921EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
1817 1922
1818void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1923void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1819{ 1924{
@@ -1870,6 +1975,12 @@ void kvm_enable_tdp(void)
1870} 1975}
1871EXPORT_SYMBOL_GPL(kvm_enable_tdp); 1976EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1872 1977
1978void kvm_disable_tdp(void)
1979{
1980 tdp_enabled = false;
1981}
1982EXPORT_SYMBOL_GPL(kvm_disable_tdp);
1983
1873static void free_mmu_pages(struct kvm_vcpu *vcpu) 1984static void free_mmu_pages(struct kvm_vcpu *vcpu)
1874{ 1985{
1875 struct kvm_mmu_page *sp; 1986 struct kvm_mmu_page *sp;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4d918220baeb..4a814bff21f2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -263,6 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
263 pfn = vcpu->arch.update_pte.pfn; 263 pfn = vcpu->arch.update_pte.pfn;
264 if (is_error_pfn(pfn)) 264 if (is_error_pfn(pfn))
265 return; 265 return;
266 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
267 return;
266 kvm_get_pfn(pfn); 268 kvm_get_pfn(pfn);
267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), 270 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
@@ -343,7 +345,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
343 shadow_addr = __pa(shadow_page->spt); 345 shadow_addr = __pa(shadow_page->spt);
344 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 346 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
345 | PT_WRITABLE_MASK | PT_USER_MASK; 347 | PT_WRITABLE_MASK | PT_USER_MASK;
346 *shadow_ent = shadow_pte; 348 set_shadow_pte(shadow_ent, shadow_pte);
347 } 349 }
348 350
349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 351 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
@@ -380,6 +382,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
380 int r; 382 int r;
381 pfn_t pfn; 383 pfn_t pfn;
382 int largepage = 0; 384 int largepage = 0;
385 unsigned long mmu_seq;
383 386
384 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 387 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
385 kvm_mmu_audit(vcpu, "pre page fault"); 388 kvm_mmu_audit(vcpu, "pre page fault");
@@ -413,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
413 largepage = 1; 416 largepage = 1;
414 } 417 }
415 } 418 }
419 mmu_seq = vcpu->kvm->mmu_notifier_seq;
420 /* implicit mb(), we'll read before PT lock is unlocked */
416 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); 421 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
417 up_read(&current->mm->mmap_sem); 422 up_read(&current->mm->mmap_sem);
418 423
@@ -424,6 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
424 } 429 }
425 430
426 spin_lock(&vcpu->kvm->mmu_lock); 431 spin_lock(&vcpu->kvm->mmu_lock);
432 if (mmu_notifier_retry(vcpu, mmu_seq))
433 goto out_unlock;
427 kvm_mmu_free_some_pages(vcpu); 434 kvm_mmu_free_some_pages(vcpu);
428 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 435 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
429 largepage, &write_pt, pfn); 436 largepage, &write_pt, pfn);
@@ -439,6 +446,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
439 spin_unlock(&vcpu->kvm->mmu_lock); 446 spin_unlock(&vcpu->kvm->mmu_lock);
440 447
441 return write_pt; 448 return write_pt;
449
450out_unlock:
451 spin_unlock(&vcpu->kvm->mmu_lock);
452 kvm_release_pfn_clean(pfn);
453 return 0;
442} 454}
443 455
444static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 456static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b756e876dce3..8233b86c778c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -62,6 +62,7 @@ static int npt = 1;
62module_param(npt, int, S_IRUGO); 62module_param(npt, int, S_IRUGO);
63 63
64static void kvm_reput_irq(struct vcpu_svm *svm); 64static void kvm_reput_irq(struct vcpu_svm *svm);
65static void svm_flush_tlb(struct kvm_vcpu *vcpu);
65 66
66static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 67static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
67{ 68{
@@ -453,7 +454,8 @@ static __init int svm_hardware_setup(void)
453 if (npt_enabled) { 454 if (npt_enabled) {
454 printk(KERN_INFO "kvm: Nested Paging enabled\n"); 455 printk(KERN_INFO "kvm: Nested Paging enabled\n");
455 kvm_enable_tdp(); 456 kvm_enable_tdp();
456 } 457 } else
458 kvm_disable_tdp();
457 459
458 return 0; 460 return 0;
459 461
@@ -877,6 +879,10 @@ set:
877static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 879static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
878{ 880{
879 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; 881 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
882 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
883
884 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
885 force_new_asid(vcpu);
880 886
881 vcpu->arch.cr4 = cr4; 887 vcpu->arch.cr4 = cr4;
882 if (!npt_enabled) 888 if (!npt_enabled)
@@ -1007,10 +1013,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1007 struct kvm *kvm = svm->vcpu.kvm; 1013 struct kvm *kvm = svm->vcpu.kvm;
1008 u64 fault_address; 1014 u64 fault_address;
1009 u32 error_code; 1015 u32 error_code;
1016 bool event_injection = false;
1010 1017
1011 if (!irqchip_in_kernel(kvm) && 1018 if (!irqchip_in_kernel(kvm) &&
1012 is_external_interrupt(exit_int_info)) 1019 is_external_interrupt(exit_int_info)) {
1020 event_injection = true;
1013 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1021 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
1022 }
1014 1023
1015 fault_address = svm->vmcb->control.exit_info_2; 1024 fault_address = svm->vmcb->control.exit_info_2;
1016 error_code = svm->vmcb->control.exit_info_1; 1025 error_code = svm->vmcb->control.exit_info_1;
@@ -1023,7 +1032,16 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1023 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code, 1032 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1024 (u32)fault_address, (u32)(fault_address >> 32), 1033 (u32)fault_address, (u32)(fault_address >> 32),
1025 handler); 1034 handler);
1035 /*
1036 * FIXME: Tis shouldn't be necessary here, but there is a flush
1037 * missing in the MMU code. Until we find this bug, flush the
1038 * complete TLB here on an NPF
1039 */
1040 if (npt_enabled)
1041 svm_flush_tlb(&svm->vcpu);
1026 1042
1043 if (event_injection)
1044 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1027 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1045 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1028} 1046}
1029 1047
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0cac63701719..7041cc52b562 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
2300 (u32)((u64)cr2 >> 32), handler); 2300 (u32)((u64)cr2 >> 32), handler);
2301 if (vect_info & VECTORING_INFO_VALID_MASK)
2302 kvm_mmu_unprotect_page_virt(vcpu, cr2);
2301 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2303 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2302 } 2304 }
2303 2305
@@ -3116,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3116 return ERR_PTR(-ENOMEM); 3118 return ERR_PTR(-ENOMEM);
3117 3119
3118 allocate_vpid(vmx); 3120 allocate_vpid(vmx);
3119 if (id == 0 && vm_need_ept()) {
3120 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3121 VMX_EPT_WRITABLE_MASK |
3122 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3123 kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
3124 VMX_EPT_FAKE_DIRTY_MASK, 0ull,
3125 VMX_EPT_EXECUTABLE_MASK);
3126 kvm_enable_tdp();
3127 }
3128 3121
3129 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 3122 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3130 if (err) 3123 if (err)
@@ -3303,8 +3296,16 @@ static int __init vmx_init(void)
3303 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); 3296 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
3304 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); 3297 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
3305 3298
3306 if (cpu_has_vmx_ept()) 3299 if (vm_need_ept()) {
3307 bypass_guest_pf = 0; 3300 bypass_guest_pf = 0;
3301 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3302 VMX_EPT_WRITABLE_MASK |
3303 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3304 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3305 VMX_EPT_EXECUTABLE_MASK);
3306 kvm_enable_tdp();
3307 } else
3308 kvm_disable_tdp();
3308 3309
3309 if (bypass_guest_pf) 3310 if (bypass_guest_pf)
3310 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); 3311 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 425a13436b3f..23e8373507ad 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -370,8 +370,6 @@ enum vmcs_field {
370#define VMX_EPT_READABLE_MASK 0x1ull 370#define VMX_EPT_READABLE_MASK 0x1ull
371#define VMX_EPT_WRITABLE_MASK 0x2ull 371#define VMX_EPT_WRITABLE_MASK 0x2ull
372#define VMX_EPT_EXECUTABLE_MASK 0x4ull 372#define VMX_EPT_EXECUTABLE_MASK 0x4ull
373#define VMX_EPT_FAKE_ACCESSED_MASK (1ull << 62)
374#define VMX_EPT_FAKE_DIRTY_MASK (1ull << 63)
375 373
376#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 374#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
377 375
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f1cdb011cff..0d682fc6aeb3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -883,6 +883,7 @@ int kvm_dev_ioctl_check_extension(long ext)
883 case KVM_CAP_PIT: 883 case KVM_CAP_PIT:
884 case KVM_CAP_NOP_IO_DELAY: 884 case KVM_CAP_NOP_IO_DELAY:
885 case KVM_CAP_MP_STATE: 885 case KVM_CAP_MP_STATE:
886 case KVM_CAP_SYNC_MMU:
886 r = 1; 887 r = 1;
887 break; 888 break;
888 case KVM_CAP_COALESCED_MMIO: 889 case KVM_CAP_COALESCED_MMIO:
@@ -1495,6 +1496,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1495 goto out; 1496 goto out;
1496 1497
1497 down_write(&kvm->slots_lock); 1498 down_write(&kvm->slots_lock);
1499 spin_lock(&kvm->mmu_lock);
1498 1500
1499 p = &kvm->arch.aliases[alias->slot]; 1501 p = &kvm->arch.aliases[alias->slot];
1500 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; 1502 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1506,6 +1508,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1506 break; 1508 break;
1507 kvm->arch.naliases = n; 1509 kvm->arch.naliases = n;
1508 1510
1511 spin_unlock(&kvm->mmu_lock);
1509 kvm_mmu_zap_all(kvm); 1512 kvm_mmu_zap_all(kvm);
1510 1513
1511 up_write(&kvm->slots_lock); 1514 up_write(&kvm->slots_lock);
@@ -3184,6 +3187,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3184 kvm_desct->base |= seg_desc->base2 << 24; 3187 kvm_desct->base |= seg_desc->base2 << 24;
3185 kvm_desct->limit = seg_desc->limit0; 3188 kvm_desct->limit = seg_desc->limit0;
3186 kvm_desct->limit |= seg_desc->limit << 16; 3189 kvm_desct->limit |= seg_desc->limit << 16;
3190 if (seg_desc->g) {
3191 kvm_desct->limit <<= 12;
3192 kvm_desct->limit |= 0xfff;
3193 }
3187 kvm_desct->selector = selector; 3194 kvm_desct->selector = selector;
3188 kvm_desct->type = seg_desc->type; 3195 kvm_desct->type = seg_desc->type;
3189 kvm_desct->present = seg_desc->p; 3196 kvm_desct->present = seg_desc->p;
@@ -3223,6 +3230,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3223static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3230static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3224 struct desc_struct *seg_desc) 3231 struct desc_struct *seg_desc)
3225{ 3232{
3233 gpa_t gpa;
3226 struct descriptor_table dtable; 3234 struct descriptor_table dtable;
3227 u16 index = selector >> 3; 3235 u16 index = selector >> 3;
3228 3236
@@ -3232,13 +3240,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3232 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 3240 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3233 return 1; 3241 return 1;
3234 } 3242 }
3235 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3243 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3244 gpa += index * 8;
3245 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
3236} 3246}
3237 3247
3238/* allowed just for 8 bytes segments */ 3248/* allowed just for 8 bytes segments */
3239static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3249static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3240 struct desc_struct *seg_desc) 3250 struct desc_struct *seg_desc)
3241{ 3251{
3252 gpa_t gpa;
3242 struct descriptor_table dtable; 3253 struct descriptor_table dtable;
3243 u16 index = selector >> 3; 3254 u16 index = selector >> 3;
3244 3255
@@ -3246,7 +3257,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3246 3257
3247 if (dtable.limit < index * 8 + 7) 3258 if (dtable.limit < index * 8 + 7)
3248 return 1; 3259 return 1;
3249 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3260 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3261 gpa += index * 8;
3262 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
3250} 3263}
3251 3264
3252static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, 3265static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@@ -3258,55 +3271,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3258 base_addr |= (seg_desc->base1 << 16); 3271 base_addr |= (seg_desc->base1 << 16);
3259 base_addr |= (seg_desc->base2 << 24); 3272 base_addr |= (seg_desc->base2 << 24);
3260 3273
3261 return base_addr; 3274 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
3262}
3263
3264static int load_tss_segment32(struct kvm_vcpu *vcpu,
3265 struct desc_struct *seg_desc,
3266 struct tss_segment_32 *tss)
3267{
3268 u32 base_addr;
3269
3270 base_addr = get_tss_base_addr(vcpu, seg_desc);
3271
3272 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3273 sizeof(struct tss_segment_32));
3274}
3275
3276static int save_tss_segment32(struct kvm_vcpu *vcpu,
3277 struct desc_struct *seg_desc,
3278 struct tss_segment_32 *tss)
3279{
3280 u32 base_addr;
3281
3282 base_addr = get_tss_base_addr(vcpu, seg_desc);
3283
3284 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3285 sizeof(struct tss_segment_32));
3286}
3287
3288static int load_tss_segment16(struct kvm_vcpu *vcpu,
3289 struct desc_struct *seg_desc,
3290 struct tss_segment_16 *tss)
3291{
3292 u32 base_addr;
3293
3294 base_addr = get_tss_base_addr(vcpu, seg_desc);
3295
3296 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3297 sizeof(struct tss_segment_16));
3298}
3299
3300static int save_tss_segment16(struct kvm_vcpu *vcpu,
3301 struct desc_struct *seg_desc,
3302 struct tss_segment_16 *tss)
3303{
3304 u32 base_addr;
3305
3306 base_addr = get_tss_base_addr(vcpu, seg_desc);
3307
3308 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3309 sizeof(struct tss_segment_16));
3310} 3275}
3311 3276
3312static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) 3277static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -3466,20 +3431,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3466} 3431}
3467 3432
3468static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, 3433static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3469 struct desc_struct *cseg_desc, 3434 u32 old_tss_base,
3470 struct desc_struct *nseg_desc) 3435 struct desc_struct *nseg_desc)
3471{ 3436{
3472 struct tss_segment_16 tss_segment_16; 3437 struct tss_segment_16 tss_segment_16;
3473 int ret = 0; 3438 int ret = 0;
3474 3439
3475 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) 3440 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3441 sizeof tss_segment_16))
3476 goto out; 3442 goto out;
3477 3443
3478 save_state_to_tss16(vcpu, &tss_segment_16); 3444 save_state_to_tss16(vcpu, &tss_segment_16);
3479 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3480 3445
3481 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) 3446 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3447 sizeof tss_segment_16))
3448 goto out;
3449
3450 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3451 &tss_segment_16, sizeof tss_segment_16))
3482 goto out; 3452 goto out;
3453
3483 if (load_state_from_tss16(vcpu, &tss_segment_16)) 3454 if (load_state_from_tss16(vcpu, &tss_segment_16))
3484 goto out; 3455 goto out;
3485 3456
@@ -3489,20 +3460,26 @@ out:
3489} 3460}
3490 3461
3491static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, 3462static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3492 struct desc_struct *cseg_desc, 3463 u32 old_tss_base,
3493 struct desc_struct *nseg_desc) 3464 struct desc_struct *nseg_desc)
3494{ 3465{
3495 struct tss_segment_32 tss_segment_32; 3466 struct tss_segment_32 tss_segment_32;
3496 int ret = 0; 3467 int ret = 0;
3497 3468
3498 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) 3469 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3470 sizeof tss_segment_32))
3499 goto out; 3471 goto out;
3500 3472
3501 save_state_to_tss32(vcpu, &tss_segment_32); 3473 save_state_to_tss32(vcpu, &tss_segment_32);
3502 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3503 3474
3504 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) 3475 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3476 sizeof tss_segment_32))
3505 goto out; 3477 goto out;
3478
3479 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3480 &tss_segment_32, sizeof tss_segment_32))
3481 goto out;
3482
3506 if (load_state_from_tss32(vcpu, &tss_segment_32)) 3483 if (load_state_from_tss32(vcpu, &tss_segment_32))
3507 goto out; 3484 goto out;
3508 3485
@@ -3517,16 +3494,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3517 struct desc_struct cseg_desc; 3494 struct desc_struct cseg_desc;
3518 struct desc_struct nseg_desc; 3495 struct desc_struct nseg_desc;
3519 int ret = 0; 3496 int ret = 0;
3497 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3498 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
3520 3499
3521 kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3500 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
3522 3501
3502 /* FIXME: Handle errors. Failure to read either TSS or their
3503 * descriptors should generate a pagefault.
3504 */
3523 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) 3505 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3524 goto out; 3506 goto out;
3525 3507
3526 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) 3508 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
3527 goto out; 3509 goto out;
3528 3510
3529
3530 if (reason != TASK_SWITCH_IRET) { 3511 if (reason != TASK_SWITCH_IRET) {
3531 int cpl; 3512 int cpl;
3532 3513
@@ -3544,8 +3525,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3544 3525
3545 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3526 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3546 cseg_desc.type &= ~(1 << 1); //clear the B flag 3527 cseg_desc.type &= ~(1 << 1); //clear the B flag
3547 save_guest_segment_descriptor(vcpu, tr_seg.selector, 3528 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
3548 &cseg_desc);
3549 } 3529 }
3550 3530
3551 if (reason == TASK_SWITCH_IRET) { 3531 if (reason == TASK_SWITCH_IRET) {
@@ -3557,10 +3537,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3557 kvm_x86_ops->cache_regs(vcpu); 3537 kvm_x86_ops->cache_regs(vcpu);
3558 3538
3559 if (nseg_desc.type & 8) 3539 if (nseg_desc.type & 8)
3560 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, 3540 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
3561 &nseg_desc); 3541 &nseg_desc);
3562 else 3542 else
3563 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, 3543 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
3564 &nseg_desc); 3544 &nseg_desc);
3565 3545
3566 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { 3546 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
@@ -3995,16 +3975,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
3995 */ 3975 */
3996 if (!user_alloc) { 3976 if (!user_alloc) {
3997 if (npages && !old.rmap) { 3977 if (npages && !old.rmap) {
3978 unsigned long userspace_addr;
3979
3998 down_write(&current->mm->mmap_sem); 3980 down_write(&current->mm->mmap_sem);
3999 memslot->userspace_addr = do_mmap(NULL, 0, 3981 userspace_addr = do_mmap(NULL, 0,
4000 npages * PAGE_SIZE, 3982 npages * PAGE_SIZE,
4001 PROT_READ | PROT_WRITE, 3983 PROT_READ | PROT_WRITE,
4002 MAP_SHARED | MAP_ANONYMOUS, 3984 MAP_SHARED | MAP_ANONYMOUS,
4003 0); 3985 0);
4004 up_write(&current->mm->mmap_sem); 3986 up_write(&current->mm->mmap_sem);
4005 3987
4006 if (IS_ERR((void *)memslot->userspace_addr)) 3988 if (IS_ERR((void *)userspace_addr))
4007 return PTR_ERR((void *)memslot->userspace_addr); 3989 return PTR_ERR((void *)userspace_addr);
3990
3991 /* set userspace_addr atomically for kvm_hva_to_rmapp */
3992 spin_lock(&kvm->mmu_lock);
3993 memslot->userspace_addr = userspace_addr;
3994 spin_unlock(&kvm->mmu_lock);
4008 } else { 3995 } else {
4009 if (!old.user_alloc && old.rmap) { 3996 if (!old.user_alloc && old.rmap) {
4010 int ret; 3997 int ret;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 0313a5eec412..d9249a882aa5 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1014,6 +1014,9 @@ __init void lguest_init(void)
1014 init_pg_tables_start = __pa(pg0); 1014 init_pg_tables_start = __pa(pg0);
1015 init_pg_tables_end = __pa(pg0); 1015 init_pg_tables_end = __pa(pg0);
1016 1016
1017 /* As described in head_32.S, we map the first 128M of memory. */
1018 max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
1019
1017 /* Load the %fs segment register (the per-cpu segment register) with 1020 /* Load the %fs segment register (the per-cpu segment register) with
1018 * the normal data segment to get through booting. */ 1021 * the normal data segment to get through booting. */
1019 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); 1022 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index dfdf428975c0..f118c110af32 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -52,7 +52,7 @@
52 jnz 100b 52 jnz 100b
53102: 53102:
54 .section .fixup,"ax" 54 .section .fixup,"ax"
55103: addl %r8d,%edx /* ecx is zerorest also */ 55103: addl %ecx,%edx /* ecx is zerorest also */
56 jmp copy_user_handle_tail 56 jmp copy_user_handle_tail
57 .previous 57 .previous
58 58
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
index 40e0e309d27e..cb0c112386fb 100644
--- a/arch/x86/lib/copy_user_nocache_64.S
+++ b/arch/x86/lib/copy_user_nocache_64.S
@@ -32,7 +32,7 @@
32 jnz 100b 32 jnz 100b
33102: 33102:
34 .section .fixup,"ax" 34 .section .fixup,"ax"
35103: addl %r8d,%edx /* ecx is zerorest also */ 35103: addl %ecx,%edx /* ecx is zerorest also */
36 jmp copy_user_handle_tail 36 jmp copy_user_handle_tail
37 .previous 37 .previous
38 38
@@ -108,7 +108,6 @@ ENTRY(__copy_user_nocache)
108 jmp 60f 108 jmp 60f
10950: movl %ecx,%edx 10950: movl %ecx,%edx
11060: sfence 11060: sfence
111 movl %r8d,%ecx
112 jmp copy_user_handle_tail 111 jmp copy_user_handle_tail
113 .previous 112 .previous
114 113
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index d5a2b39f882b..01b868ba82f8 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -30,10 +30,11 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 if (safe) {
33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); 33 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
34 err = rv.err; 34 &rv, 1);
35 err = err ? err : rv.err;
35 } else { 36 } else {
36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 37 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
37 } 38 }
38 *l = rv.l; 39 *l = rv.l;
39 *h = rv.h; 40 *h = rv.h;
@@ -64,23 +65,24 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
64 rv.l = l; 65 rv.l = l;
65 rv.h = h; 66 rv.h = h;
66 if (safe) { 67 if (safe) {
67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); 68 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
68 err = rv.err; 69 &rv, 1);
70 err = err ? err : rv.err;
69 } else { 71 } else {
70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); 72 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
71 } 73 }
72 74
73 return err; 75 return err;
74} 76}
75 77
76void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
77{ 79{
78 _wrmsr_on_cpu(cpu, msr_no, l, h, 0); 80 return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
79} 81}
80 82
81void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 83int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
82{ 84{
83 _rdmsr_on_cpu(cpu, msr_no, l, h, 0); 85 return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
84} 86}
85 87
86/* These "safe" variants are slower and should be used when the target MSR 88/* These "safe" variants are slower and should be used when the target MSR
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
index a037041817c7..4f4e50c3ad3b 100644
--- a/arch/x86/mach-rdc321x/platform.c
+++ b/arch/x86/mach-rdc321x/platform.c
@@ -25,7 +25,6 @@
25#include <linux/list.h> 25#include <linux/list.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/version.h>
29#include <linux/leds.h> 28#include <linux/leds.h>
30 29
31#include <asm/gpio.h> 30#include <asm/gpio.h>
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 1fbb844c3d7a..dfb932dcf136 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,5 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_X86_32) += pgtable_32.o 4obj-$(CONFIG_X86_32) += pgtable_32.o
5 5
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 5dfef9fa061a..62fa440678d8 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -42,7 +42,6 @@
42 42
43struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 43struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
44EXPORT_SYMBOL(node_data); 44EXPORT_SYMBOL(node_data);
45static bootmem_data_t node0_bdata;
46 45
47/* 46/*
48 * numa interface - we expect the numa architecture specific code to have 47 * numa interface - we expect the numa architecture specific code to have
@@ -385,7 +384,7 @@ void __init initmem_init(unsigned long start_pfn,
385 for_each_online_node(nid) 384 for_each_online_node(nid)
386 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 385 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
387 386
388 NODE_DATA(0)->bdata = &node0_bdata; 387 NODE_DATA(0)->bdata = &bootmem_node_data[0];
389 setup_bootmem_allocator(); 388 setup_bootmem_allocator();
390} 389}
391 390
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
new file mode 100644
index 000000000000..007bb06c7504
--- /dev/null
+++ b/arch/x86/mm/gup.c
@@ -0,0 +1,298 @@
1/*
2 * Lockless get_user_pages_fast for x86
3 *
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
7#include <linux/sched.h>
8#include <linux/mm.h>
9#include <linux/vmstat.h>
10#include <linux/highmem.h>
11
12#include <asm/pgtable.h>
13
14static inline pte_t gup_get_pte(pte_t *ptep)
15{
16#ifndef CONFIG_X86_PAE
17 return *ptep;
18#else
19 /*
20 * With get_user_pages_fast, we walk down the pagetables without taking
21 * any locks. For this we would like to load the pointers atoimcally,
22 * but that is not possible (without expensive cmpxchg8b) on PAE. What
23 * we do have is the guarantee that a pte will only either go from not
24 * present to present, or present to not present or both -- it will not
25 * switch to a completely different present page without a TLB flush in
26 * between; something that we are blocking by holding interrupts off.
27 *
28 * Setting ptes from not present to present goes:
29 * ptep->pte_high = h;
30 * smp_wmb();
31 * ptep->pte_low = l;
32 *
33 * And present to not present goes:
34 * ptep->pte_low = 0;
35 * smp_wmb();
36 * ptep->pte_high = 0;
37 *
38 * We must ensure here that the load of pte_low sees l iff pte_high
39 * sees h. We load pte_high *after* loading pte_low, which ensures we
40 * don't see an older value of pte_high. *Then* we recheck pte_low,
41 * which ensures that we haven't picked up a changed pte high. We might
42 * have got rubbish values from pte_low and pte_high, but we are
43 * guaranteed that pte_low will not have the present bit set *unless*
44 * it is 'l'. And get_user_pages_fast only operates on present ptes, so
45 * we're safe.
46 *
47 * gup_get_pte should not be used or copied outside gup.c without being
48 * very careful -- it does not atomically load the pte or anything that
49 * is likely to be useful for you.
50 */
51 pte_t pte;
52
53retry:
54 pte.pte_low = ptep->pte_low;
55 smp_rmb();
56 pte.pte_high = ptep->pte_high;
57 smp_rmb();
58 if (unlikely(pte.pte_low != ptep->pte_low))
59 goto retry;
60
61 return pte;
62#endif
63}
64
65/*
66 * The performance critical leaf functions are made noinline otherwise gcc
67 * inlines everything into a single function which results in too much
68 * register pressure.
69 */
70static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
71 unsigned long end, int write, struct page **pages, int *nr)
72{
73 unsigned long mask;
74 pte_t *ptep;
75
76 mask = _PAGE_PRESENT|_PAGE_USER;
77 if (write)
78 mask |= _PAGE_RW;
79
80 ptep = pte_offset_map(&pmd, addr);
81 do {
82 pte_t pte = gup_get_pte(ptep);
83 struct page *page;
84
85 if ((pte_val(pte) & (mask | _PAGE_SPECIAL)) != mask) {
86 pte_unmap(ptep);
87 return 0;
88 }
89 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
90 page = pte_page(pte);
91 get_page(page);
92 pages[*nr] = page;
93 (*nr)++;
94
95 } while (ptep++, addr += PAGE_SIZE, addr != end);
96 pte_unmap(ptep - 1);
97
98 return 1;
99}
100
101static inline void get_head_page_multiple(struct page *page, int nr)
102{
103 VM_BUG_ON(page != compound_head(page));
104 VM_BUG_ON(page_count(page) == 0);
105 atomic_add(nr, &page->_count);
106}
107
108static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
109 unsigned long end, int write, struct page **pages, int *nr)
110{
111 unsigned long mask;
112 pte_t pte = *(pte_t *)&pmd;
113 struct page *head, *page;
114 int refs;
115
116 mask = _PAGE_PRESENT|_PAGE_USER;
117 if (write)
118 mask |= _PAGE_RW;
119 if ((pte_val(pte) & mask) != mask)
120 return 0;
121 /* hugepages are never "special" */
122 VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL);
123 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
124
125 refs = 0;
126 head = pte_page(pte);
127 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
128 do {
129 VM_BUG_ON(compound_head(page) != head);
130 pages[*nr] = page;
131 (*nr)++;
132 page++;
133 refs++;
134 } while (addr += PAGE_SIZE, addr != end);
135 get_head_page_multiple(head, refs);
136
137 return 1;
138}
139
140static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
141 int write, struct page **pages, int *nr)
142{
143 unsigned long next;
144 pmd_t *pmdp;
145
146 pmdp = pmd_offset(&pud, addr);
147 do {
148 pmd_t pmd = *pmdp;
149
150 next = pmd_addr_end(addr, end);
151 if (pmd_none(pmd))
152 return 0;
153 if (unlikely(pmd_large(pmd))) {
154 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
155 return 0;
156 } else {
157 if (!gup_pte_range(pmd, addr, next, write, pages, nr))
158 return 0;
159 }
160 } while (pmdp++, addr = next, addr != end);
161
162 return 1;
163}
164
165static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
166 unsigned long end, int write, struct page **pages, int *nr)
167{
168 unsigned long mask;
169 pte_t pte = *(pte_t *)&pud;
170 struct page *head, *page;
171 int refs;
172
173 mask = _PAGE_PRESENT|_PAGE_USER;
174 if (write)
175 mask |= _PAGE_RW;
176 if ((pte_val(pte) & mask) != mask)
177 return 0;
178 /* hugepages are never "special" */
179 VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL);
180 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
181
182 refs = 0;
183 head = pte_page(pte);
184 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
185 do {
186 VM_BUG_ON(compound_head(page) != head);
187 pages[*nr] = page;
188 (*nr)++;
189 page++;
190 refs++;
191 } while (addr += PAGE_SIZE, addr != end);
192 get_head_page_multiple(head, refs);
193
194 return 1;
195}
196
197static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
198 int write, struct page **pages, int *nr)
199{
200 unsigned long next;
201 pud_t *pudp;
202
203 pudp = pud_offset(&pgd, addr);
204 do {
205 pud_t pud = *pudp;
206
207 next = pud_addr_end(addr, end);
208 if (pud_none(pud))
209 return 0;
210 if (unlikely(pud_large(pud))) {
211 if (!gup_huge_pud(pud, addr, next, write, pages, nr))
212 return 0;
213 } else {
214 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
215 return 0;
216 }
217 } while (pudp++, addr = next, addr != end);
218
219 return 1;
220}
221
222int get_user_pages_fast(unsigned long start, int nr_pages, int write,
223 struct page **pages)
224{
225 struct mm_struct *mm = current->mm;
226 unsigned long addr, len, end;
227 unsigned long next;
228 pgd_t *pgdp;
229 int nr = 0;
230
231 start &= PAGE_MASK;
232 addr = start;
233 len = (unsigned long) nr_pages << PAGE_SHIFT;
234 end = start + len;
235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
236 start, len)))
237 goto slow_irqon;
238
239 /*
240 * XXX: batch / limit 'nr', to avoid large irq off latency
241 * needs some instrumenting to determine the common sizes used by
242 * important workloads (eg. DB2), and whether limiting the batch size
243 * will decrease performance.
244 *
245 * It seems like we're in the clear for the moment. Direct-IO is
246 * the main guy that batches up lots of get_user_pages, and even
247 * they are limited to 64-at-a-time which is not so many.
248 */
249 /*
250 * This doesn't prevent pagetable teardown, but does prevent
251 * the pagetables and pages from being freed on x86.
252 *
253 * So long as we atomically load page table pointers versus teardown
254 * (which we do on x86, with the above PAE exception), we can follow the
255 * address down to the the page and take a ref on it.
256 */
257 local_irq_disable();
258 pgdp = pgd_offset(mm, addr);
259 do {
260 pgd_t pgd = *pgdp;
261
262 next = pgd_addr_end(addr, end);
263 if (pgd_none(pgd))
264 goto slow;
265 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
266 goto slow;
267 } while (pgdp++, addr = next, addr != end);
268 local_irq_enable();
269
270 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
271 return nr;
272
273 {
274 int ret;
275
276slow:
277 local_irq_enable();
278slow_irqon:
279 /* Try to get the remaining pages with get_user_pages */
280 start += nr << PAGE_SHIFT;
281 pages += nr;
282
283 down_read(&mm->mmap_sem);
284 ret = get_user_pages(current, mm, start,
285 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
286 up_read(&mm->mmap_sem);
287
288 /* Have to be a bit careful with return values */
289 if (nr > 0) {
290 if (ret < 0)
291 ret = nr;
292 else
293 ret += nr;
294 }
295
296 return ret;
297 }
298}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 0b3d567e686d..8f307d914c2e 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -124,7 +124,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
124 return 1; 124 return 1;
125} 125}
126 126
127pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 127pte_t *huge_pte_alloc(struct mm_struct *mm,
128 unsigned long addr, unsigned long sz)
128{ 129{
129 pgd_t *pgd; 130 pgd_t *pgd;
130 pud_t *pud; 131 pud_t *pud;
@@ -133,9 +134,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
133 pgd = pgd_offset(mm, addr); 134 pgd = pgd_offset(mm, addr);
134 pud = pud_alloc(mm, pgd, addr); 135 pud = pud_alloc(mm, pgd, addr);
135 if (pud) { 136 if (pud) {
136 if (pud_none(*pud)) 137 if (sz == PUD_SIZE) {
137 huge_pmd_share(mm, addr, pud); 138 pte = (pte_t *)pud;
138 pte = (pte_t *) pmd_alloc(mm, pud, addr); 139 } else {
140 BUG_ON(sz != PMD_SIZE);
141 if (pud_none(*pud))
142 huge_pmd_share(mm, addr, pud);
143 pte = (pte_t *) pmd_alloc(mm, pud, addr);
144 }
139 } 145 }
140 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 146 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
141 147
@@ -151,8 +157,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
151 pgd = pgd_offset(mm, addr); 157 pgd = pgd_offset(mm, addr);
152 if (pgd_present(*pgd)) { 158 if (pgd_present(*pgd)) {
153 pud = pud_offset(pgd, addr); 159 pud = pud_offset(pgd, addr);
154 if (pud_present(*pud)) 160 if (pud_present(*pud)) {
161 if (pud_large(*pud))
162 return (pte_t *)pud;
155 pmd = pmd_offset(pud, addr); 163 pmd = pmd_offset(pud, addr);
164 }
156 } 165 }
157 return (pte_t *) pmd; 166 return (pte_t *) pmd;
158} 167}
@@ -188,6 +197,11 @@ int pmd_huge(pmd_t pmd)
188 return 0; 197 return 0;
189} 198}
190 199
200int pud_huge(pud_t pud)
201{
202 return 0;
203}
204
191struct page * 205struct page *
192follow_huge_pmd(struct mm_struct *mm, unsigned long address, 206follow_huge_pmd(struct mm_struct *mm, unsigned long address,
193 pmd_t *pmd, int write) 207 pmd_t *pmd, int write)
@@ -208,6 +222,11 @@ int pmd_huge(pmd_t pmd)
208 return !!(pmd_val(pmd) & _PAGE_PSE); 222 return !!(pmd_val(pmd) & _PAGE_PSE);
209} 223}
210 224
225int pud_huge(pud_t pud)
226{
227 return !!(pud_val(pud) & _PAGE_PSE);
228}
229
211struct page * 230struct page *
212follow_huge_pmd(struct mm_struct *mm, unsigned long address, 231follow_huge_pmd(struct mm_struct *mm, unsigned long address,
213 pmd_t *pmd, int write) 232 pmd_t *pmd, int write)
@@ -216,9 +235,22 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
216 235
217 page = pte_page(*(pte_t *)pmd); 236 page = pte_page(*(pte_t *)pmd);
218 if (page) 237 if (page)
219 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); 238 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
239 return page;
240}
241
242struct page *
243follow_huge_pud(struct mm_struct *mm, unsigned long address,
244 pud_t *pud, int write)
245{
246 struct page *page;
247
248 page = pte_page(*(pte_t *)pud);
249 if (page)
250 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
220 return page; 251 return page;
221} 252}
253
222#endif 254#endif
223 255
224/* x86_64 also uses this file */ 256/* x86_64 also uses this file */
@@ -228,6 +260,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
228 unsigned long addr, unsigned long len, 260 unsigned long addr, unsigned long len,
229 unsigned long pgoff, unsigned long flags) 261 unsigned long pgoff, unsigned long flags)
230{ 262{
263 struct hstate *h = hstate_file(file);
231 struct mm_struct *mm = current->mm; 264 struct mm_struct *mm = current->mm;
232 struct vm_area_struct *vma; 265 struct vm_area_struct *vma;
233 unsigned long start_addr; 266 unsigned long start_addr;
@@ -240,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
240 } 273 }
241 274
242full_search: 275full_search:
243 addr = ALIGN(start_addr, HPAGE_SIZE); 276 addr = ALIGN(start_addr, huge_page_size(h));
244 277
245 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 278 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
246 /* At this point: (!vma || addr < vma->vm_end). */ 279 /* At this point: (!vma || addr < vma->vm_end). */
@@ -262,7 +295,7 @@ full_search:
262 } 295 }
263 if (addr + mm->cached_hole_size < vma->vm_start) 296 if (addr + mm->cached_hole_size < vma->vm_start)
264 mm->cached_hole_size = vma->vm_start - addr; 297 mm->cached_hole_size = vma->vm_start - addr;
265 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 298 addr = ALIGN(vma->vm_end, huge_page_size(h));
266 } 299 }
267} 300}
268 301
@@ -270,6 +303,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
270 unsigned long addr0, unsigned long len, 303 unsigned long addr0, unsigned long len,
271 unsigned long pgoff, unsigned long flags) 304 unsigned long pgoff, unsigned long flags)
272{ 305{
306 struct hstate *h = hstate_file(file);
273 struct mm_struct *mm = current->mm; 307 struct mm_struct *mm = current->mm;
274 struct vm_area_struct *vma, *prev_vma; 308 struct vm_area_struct *vma, *prev_vma;
275 unsigned long base = mm->mmap_base, addr = addr0; 309 unsigned long base = mm->mmap_base, addr = addr0;
@@ -290,7 +324,7 @@ try_again:
290 goto fail; 324 goto fail;
291 325
292 /* either no address requested or cant fit in requested address hole */ 326 /* either no address requested or cant fit in requested address hole */
293 addr = (mm->free_area_cache - len) & HPAGE_MASK; 327 addr = (mm->free_area_cache - len) & huge_page_mask(h);
294 do { 328 do {
295 /* 329 /*
296 * Lookup failure means no vma is above this address, 330 * Lookup failure means no vma is above this address,
@@ -321,7 +355,7 @@ try_again:
321 largest_hole = vma->vm_start - addr; 355 largest_hole = vma->vm_start - addr;
322 356
323 /* try just below the current vma->vm_start */ 357 /* try just below the current vma->vm_start */
324 addr = (vma->vm_start - len) & HPAGE_MASK; 358 addr = (vma->vm_start - len) & huge_page_mask(h);
325 } while (len <= vma->vm_start); 359 } while (len <= vma->vm_start);
326 360
327fail: 361fail:
@@ -359,22 +393,23 @@ unsigned long
359hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 393hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
360 unsigned long len, unsigned long pgoff, unsigned long flags) 394 unsigned long len, unsigned long pgoff, unsigned long flags)
361{ 395{
396 struct hstate *h = hstate_file(file);
362 struct mm_struct *mm = current->mm; 397 struct mm_struct *mm = current->mm;
363 struct vm_area_struct *vma; 398 struct vm_area_struct *vma;
364 399
365 if (len & ~HPAGE_MASK) 400 if (len & ~huge_page_mask(h))
366 return -EINVAL; 401 return -EINVAL;
367 if (len > TASK_SIZE) 402 if (len > TASK_SIZE)
368 return -ENOMEM; 403 return -ENOMEM;
369 404
370 if (flags & MAP_FIXED) { 405 if (flags & MAP_FIXED) {
371 if (prepare_hugepage_range(addr, len)) 406 if (prepare_hugepage_range(file, addr, len))
372 return -EINVAL; 407 return -EINVAL;
373 return addr; 408 return addr;
374 } 409 }
375 410
376 if (addr) { 411 if (addr) {
377 addr = ALIGN(addr, HPAGE_SIZE); 412 addr = ALIGN(addr, huge_page_size(h));
378 vma = find_vma(mm, addr); 413 vma = find_vma(mm, addr);
379 if (TASK_SIZE - len >= addr && 414 if (TASK_SIZE - len >= addr &&
380 (!vma || addr + len <= vma->vm_start)) 415 (!vma || addr + len <= vma->vm_start))
@@ -390,3 +425,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
390 425
391#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ 426#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
392 427
428#ifdef CONFIG_X86_64
429static __init int setup_hugepagesz(char *opt)
430{
431 unsigned long ps = memparse(opt, &opt);
432 if (ps == PMD_SIZE) {
433 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
434 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
435 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
436 } else {
437 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
438 ps >> 20);
439 return 0;
440 }
441 return 1;
442}
443__setup("hugepagesz=", setup_hugepagesz);
444#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d37f29376b0c..60ec1d08ff24 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -458,11 +458,7 @@ static void __init pagetable_init(void)
458{ 458{
459 pgd_t *pgd_base = swapper_pg_dir; 459 pgd_t *pgd_base = swapper_pg_dir;
460 460
461 paravirt_pagetable_setup_start(pgd_base);
462
463 permanent_kmaps_init(pgd_base); 461 permanent_kmaps_init(pgd_base);
464
465 paravirt_pagetable_setup_done(pgd_base);
466} 462}
467 463
468#ifdef CONFIG_ACPI_SLEEP 464#ifdef CONFIG_ACPI_SLEEP
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ec37121f6709..d3746efb060d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata;
60 60
61DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 61DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
62 62
63int direct_gbpages __meminitdata 63int direct_gbpages
64#ifdef CONFIG_DIRECT_GBPAGES 64#ifdef CONFIG_DIRECT_GBPAGES
65 = 1 65 = 1
66#endif 66#endif
@@ -86,46 +86,13 @@ early_param("gbpages", parse_direct_gbpages_on);
86 * around without checking the pgd every time. 86 * around without checking the pgd every time.
87 */ 87 */
88 88
89void show_mem(void)
90{
91 long i, total = 0, reserved = 0;
92 long shared = 0, cached = 0;
93 struct page *page;
94 pg_data_t *pgdat;
95
96 printk(KERN_INFO "Mem-info:\n");
97 show_free_areas();
98 for_each_online_pgdat(pgdat) {
99 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
100 /*
101 * This loop can take a while with 256 GB and
102 * 4k pages so defer the NMI watchdog:
103 */
104 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
105 touch_nmi_watchdog();
106
107 if (!pfn_valid(pgdat->node_start_pfn + i))
108 continue;
109
110 page = pfn_to_page(pgdat->node_start_pfn + i);
111 total++;
112 if (PageReserved(page))
113 reserved++;
114 else if (PageSwapCache(page))
115 cached++;
116 else if (page_count(page))
117 shared += page_count(page) - 1;
118 }
119 }
120 printk(KERN_INFO "%lu pages of RAM\n", total);
121 printk(KERN_INFO "%lu reserved pages\n", reserved);
122 printk(KERN_INFO "%lu pages shared\n", shared);
123 printk(KERN_INFO "%lu pages swap cached\n", cached);
124}
125
126int after_bootmem; 89int after_bootmem;
127 90
128static __init void *spp_getpage(void) 91/*
92 * NOTE: This function is marked __ref because it calls __init function
93 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
94 */
95static __ref void *spp_getpage(void)
129{ 96{
130 void *ptr; 97 void *ptr;
131 98
@@ -274,7 +241,7 @@ static unsigned long __initdata table_start;
274static unsigned long __meminitdata table_end; 241static unsigned long __meminitdata table_end;
275static unsigned long __meminitdata table_top; 242static unsigned long __meminitdata table_top;
276 243
277static __meminit void *alloc_low_page(unsigned long *phys) 244static __ref void *alloc_low_page(unsigned long *phys)
278{ 245{
279 unsigned long pfn = table_end++; 246 unsigned long pfn = table_end++;
280 void *adr; 247 void *adr;
@@ -295,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
295 return adr; 262 return adr;
296} 263}
297 264
298static __meminit void unmap_low_page(void *adr) 265static __ref void unmap_low_page(void *adr)
299{ 266{
300 if (after_bootmem) 267 if (after_bootmem)
301 return; 268 return;
@@ -351,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
351{ 318{
352 unsigned long pages = 0; 319 unsigned long pages = 0;
353 unsigned long last_map_addr = end; 320 unsigned long last_map_addr = end;
321 unsigned long start = address;
354 322
355 int i = pmd_index(address); 323 int i = pmd_index(address);
356 324
@@ -368,16 +336,24 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
368 } 336 }
369 337
370 if (pmd_val(*pmd)) { 338 if (pmd_val(*pmd)) {
371 if (!pmd_large(*pmd)) 339 if (!pmd_large(*pmd)) {
340 spin_lock(&init_mm.page_table_lock);
372 last_map_addr = phys_pte_update(pmd, address, 341 last_map_addr = phys_pte_update(pmd, address,
373 end); 342 end);
343 spin_unlock(&init_mm.page_table_lock);
344 }
345 /* Count entries we're using from level2_ident_pgt */
346 if (start == 0)
347 pages++;
374 continue; 348 continue;
375 } 349 }
376 350
377 if (page_size_mask & (1<<PG_LEVEL_2M)) { 351 if (page_size_mask & (1<<PG_LEVEL_2M)) {
378 pages++; 352 pages++;
353 spin_lock(&init_mm.page_table_lock);
379 set_pte((pte_t *)pmd, 354 set_pte((pte_t *)pmd,
380 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 355 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
356 spin_unlock(&init_mm.page_table_lock);
381 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 357 last_map_addr = (address & PMD_MASK) + PMD_SIZE;
382 continue; 358 continue;
383 } 359 }
@@ -386,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
386 last_map_addr = phys_pte_init(pte, address, end); 362 last_map_addr = phys_pte_init(pte, address, end);
387 unmap_low_page(pte); 363 unmap_low_page(pte);
388 364
365 spin_lock(&init_mm.page_table_lock);
389 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); 366 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
367 spin_unlock(&init_mm.page_table_lock);
390 } 368 }
391 update_page_count(PG_LEVEL_2M, pages); 369 update_page_count(PG_LEVEL_2M, pages);
392 return last_map_addr; 370 return last_map_addr;
@@ -399,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
399 pmd_t *pmd = pmd_offset(pud, 0); 377 pmd_t *pmd = pmd_offset(pud, 0);
400 unsigned long last_map_addr; 378 unsigned long last_map_addr;
401 379
402 spin_lock(&init_mm.page_table_lock);
403 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); 380 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
404 spin_unlock(&init_mm.page_table_lock);
405 __flush_tlb_all(); 381 __flush_tlb_all();
406 return last_map_addr; 382 return last_map_addr;
407} 383}
@@ -437,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
437 413
438 if (page_size_mask & (1<<PG_LEVEL_1G)) { 414 if (page_size_mask & (1<<PG_LEVEL_1G)) {
439 pages++; 415 pages++;
416 spin_lock(&init_mm.page_table_lock);
440 set_pte((pte_t *)pud, 417 set_pte((pte_t *)pud,
441 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 418 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
419 spin_unlock(&init_mm.page_table_lock);
442 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 420 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
443 continue; 421 continue;
444 } 422 }
445 423
446 pmd = alloc_low_page(&pmd_phys); 424 pmd = alloc_low_page(&pmd_phys);
447
448 spin_lock(&init_mm.page_table_lock);
449 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); 425 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
450 unmap_low_page(pmd); 426 unmap_low_page(pmd);
427
428 spin_lock(&init_mm.page_table_lock);
451 pud_populate(&init_mm, pud, __va(pmd_phys)); 429 pud_populate(&init_mm, pud, __va(pmd_phys));
452 spin_unlock(&init_mm.page_table_lock); 430 spin_unlock(&init_mm.page_table_lock);
453
454 } 431 }
455 __flush_tlb_all(); 432 __flush_tlb_all();
456 update_page_count(PG_LEVEL_1G, pages); 433 update_page_count(PG_LEVEL_1G, pages);
@@ -542,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
542 continue; 519 continue;
543 } 520 }
544 521
545 if (after_bootmem) 522 pud = alloc_low_page(&pud_phys);
546 pud = pud_offset(pgd, start & PGDIR_MASK);
547 else
548 pud = alloc_low_page(&pud_phys);
549
550 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), 523 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
551 page_size_mask); 524 page_size_mask);
552 unmap_low_page(pud); 525 unmap_low_page(pud);
553 pgd_populate(&init_mm, pgd_offset_k(start), 526
554 __va(pud_phys)); 527 spin_lock(&init_mm.page_table_lock);
528 pgd_populate(&init_mm, pgd, __va(pud_phys));
529 spin_unlock(&init_mm.page_table_lock);
555 } 530 }
556 531
557 return last_map_addr; 532 return last_map_addr;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 24c1d3c30186..d4b6e6a29ae3 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
170 phys_addr &= PAGE_MASK; 170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr+1) - phys_addr; 171 size = PAGE_ALIGN(last_addr+1) - phys_addr;
172 172
173 retval = reserve_memtype(phys_addr, phys_addr + size, 173 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
174 prot_val, &new_prot_val); 174 prot_val, &new_prot_val);
175 if (retval) { 175 if (retval) {
176 pr_debug("Warning: reserve_memtype returned %d\n", retval); 176 pr_debug("Warning: reserve_memtype returned %d\n", retval);
@@ -330,6 +330,14 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
330 return (void __iomem *)ret; 330 return (void __iomem *)ret;
331} 331}
332 332
333void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
334 unsigned long prot_val)
335{
336 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
337 __builtin_return_address(0));
338}
339EXPORT_SYMBOL(ioremap_prot);
340
333/** 341/**
334 * iounmap - Free a IO remapping 342 * iounmap - Free a IO remapping
335 * @addr: virtual address from ioremap_* 343 * @addr: virtual address from ioremap_*
@@ -545,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
545{ 553{
546 if (!early_ioremap_nested) 554 if (!early_ioremap_nested)
547 return 0; 555 return 0;
548 556 WARN(1, KERN_WARNING
549 printk(KERN_WARNING
550 "Debug warning: early ioremap leak of %d areas detected.\n", 557 "Debug warning: early ioremap leak of %d areas detected.\n",
551 early_ioremap_nested); 558 early_ioremap_nested);
552 printk(KERN_WARNING 559 printk(KERN_WARNING
553 "please boot with early_ioremap_debug and report the dmesg.\n"); 560 "please boot with early_ioremap_debug and report the dmesg.\n");
554 WARN_ON(1);
555 561
556 return 1; 562 return 1;
557} 563}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index e7397e108beb..635b50e85581 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
430 "may miss events.\n"); 430 "may miss events.\n");
431} 431}
432 432
433static void leave_uniprocessor(void) 433/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
434 but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
435static void __ref leave_uniprocessor(void)
434{ 436{
435 int cpu; 437 int cpu;
436 int err; 438 int err;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 9782f42dd319..a4dd793d6003 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -23,8 +23,6 @@
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 24EXPORT_SYMBOL(node_data);
25 25
26static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27
28struct memnode memnode; 26struct memnode memnode;
29 27
30s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { 28s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
@@ -198,7 +196,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
198 nodedata_phys + pgdat_size - 1); 196 nodedata_phys + pgdat_size - 1);
199 197
200 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); 198 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
201 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid]; 199 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
202 NODE_DATA(nodeid)->node_start_pfn = start_pfn; 200 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
203 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; 201 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
204 202
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 0dcd42eb94e6..d4aa503caaa2 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -221,8 +221,7 @@ static int pageattr_test(void)
221 failed += print_split(&sc); 221 failed += print_split(&sc);
222 222
223 if (failed) { 223 if (failed) {
224 printk(KERN_ERR "NOT PASSED. Please report.\n"); 224 WARN(1, KERN_ERR "NOT PASSED. Please report.\n");
225 WARN_ON(1);
226 return -EINVAL; 225 return -EINVAL;
227 } else { 226 } else {
228 if (print) 227 if (print)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 65c6e46bf059..43e2f8483e4f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -55,13 +55,19 @@ static void split_page_count(int level)
55 55
56int arch_report_meminfo(char *page) 56int arch_report_meminfo(char *page)
57{ 57{
58 int n = sprintf(page, "DirectMap4k: %8lu\n" 58 int n = sprintf(page, "DirectMap4k: %8lu kB\n",
59 "DirectMap2M: %8lu\n", 59 direct_pages_count[PG_LEVEL_4K] << 2);
60 direct_pages_count[PG_LEVEL_4K], 60#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
61 direct_pages_count[PG_LEVEL_2M]); 61 n += sprintf(page + n, "DirectMap2M: %8lu kB\n",
62 direct_pages_count[PG_LEVEL_2M] << 11);
63#else
64 n += sprintf(page + n, "DirectMap4M: %8lu kB\n",
65 direct_pages_count[PG_LEVEL_2M] << 12);
66#endif
62#ifdef CONFIG_X86_64 67#ifdef CONFIG_X86_64
63 n += sprintf(page + n, "DirectMap1G: %8lu\n", 68 if (direct_gbpages)
64 direct_pages_count[PG_LEVEL_1G]); 69 n += sprintf(page + n, "DirectMap1G: %8lu kB\n",
70 direct_pages_count[PG_LEVEL_1G] << 20);
65#endif 71#endif
66 return n; 72 return n;
67} 73}
@@ -592,10 +598,9 @@ repeat:
592 if (!pte_val(old_pte)) { 598 if (!pte_val(old_pte)) {
593 if (!primary) 599 if (!primary)
594 return 0; 600 return 0;
595 printk(KERN_WARNING "CPA: called for zero pte. " 601 WARN(1, KERN_WARNING "CPA: called for zero pte. "
596 "vaddr = %lx cpa->vaddr = %lx\n", address, 602 "vaddr = %lx cpa->vaddr = %lx\n", address,
597 cpa->vaddr); 603 cpa->vaddr);
598 WARN_ON(1);
599 return -EINVAL; 604 return -EINVAL;
600 } 605 }
601 606
@@ -844,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages)
844 /* 849 /*
845 * for now UC MINUS. see comments in ioremap_nocache() 850 * for now UC MINUS. see comments in ioremap_nocache()
846 */ 851 */
847 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, 852 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
848 _PAGE_CACHE_UC_MINUS, NULL)) 853 _PAGE_CACHE_UC_MINUS, NULL))
849 return -EINVAL; 854 return -EINVAL;
850 855
@@ -863,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages)
863 if (!pat_enabled) 868 if (!pat_enabled)
864 return set_memory_uc(addr, numpages); 869 return set_memory_uc(addr, numpages);
865 870
866 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, 871 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
867 _PAGE_CACHE_WC, NULL)) 872 _PAGE_CACHE_WC, NULL))
868 return -EINVAL; 873 return -EINVAL;
869 874
@@ -879,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
879 884
880int set_memory_wb(unsigned long addr, int numpages) 885int set_memory_wb(unsigned long addr, int numpages)
881{ 886{
882 free_memtype(addr, addr + numpages * PAGE_SIZE); 887 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
883 888
884 return _set_memory_wb(addr, numpages); 889 return _set_memory_wb(addr, numpages);
885} 890}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2fe30916d4b6..2a50e0fa64a5 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
207 return -EBUSY; 207 return -EBUSY;
208} 208}
209 209
210static struct memtype *cached_entry;
211static u64 cached_start;
212
210/* 213/*
211 * req_type typically has one of the: 214 * req_type typically has one of the:
212 * - _PAGE_CACHE_WB 215 * - _PAGE_CACHE_WB
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
280 283
281 spin_lock(&memtype_lock); 284 spin_lock(&memtype_lock);
282 285
286 if (cached_entry && start >= cached_start)
287 entry = cached_entry;
288 else
289 entry = list_entry(&memtype_list, struct memtype, nd);
290
283 /* Search for existing mapping that overlaps the current range */ 291 /* Search for existing mapping that overlaps the current range */
284 where = NULL; 292 where = NULL;
285 list_for_each_entry(entry, &memtype_list, nd) { 293 list_for_each_entry_continue(entry, &memtype_list, nd) {
286 if (end <= entry->start) { 294 if (end <= entry->start) {
287 where = entry->nd.prev; 295 where = entry->nd.prev;
296 cached_entry = list_entry(where, struct memtype, nd);
288 break; 297 break;
289 } else if (start <= entry->start) { /* end > entry->start */ 298 } else if (start <= entry->start) { /* end > entry->start */
290 err = chk_conflict(new, entry, new_type); 299 err = chk_conflict(new, entry, new_type);
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
292 dprintk("Overlap at 0x%Lx-0x%Lx\n", 301 dprintk("Overlap at 0x%Lx-0x%Lx\n",
293 entry->start, entry->end); 302 entry->start, entry->end);
294 where = entry->nd.prev; 303 where = entry->nd.prev;
304 cached_entry = list_entry(where,
305 struct memtype, nd);
295 } 306 }
296 break; 307 break;
297 } else if (start < entry->end) { /* start > entry->start */ 308 } else if (start < entry->end) { /* start > entry->start */
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
299 if (!err) { 310 if (!err) {
300 dprintk("Overlap at 0x%Lx-0x%Lx\n", 311 dprintk("Overlap at 0x%Lx-0x%Lx\n",
301 entry->start, entry->end); 312 entry->start, entry->end);
302 where = &entry->nd; 313 cached_entry = list_entry(entry->nd.prev,
314 struct memtype, nd);
315
316 /*
317 * Move to right position in the linked
318 * list to add this new entry
319 */
320 list_for_each_entry_continue(entry,
321 &memtype_list, nd) {
322 if (start <= entry->start) {
323 where = entry->nd.prev;
324 break;
325 }
326 }
303 } 327 }
304 break; 328 break;
305 } 329 }
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
314 return err; 338 return err;
315 } 339 }
316 340
341 cached_start = start;
342
317 if (where) 343 if (where)
318 list_add(&new->nd, where); 344 list_add(&new->nd, where);
319 else 345 else
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
343 spin_lock(&memtype_lock); 369 spin_lock(&memtype_lock);
344 list_for_each_entry(entry, &memtype_list, nd) { 370 list_for_each_entry(entry, &memtype_list, nd) {
345 if (entry->start == start && entry->end == end) { 371 if (entry->start == start && entry->end == end) {
372 if (cached_entry == entry || cached_start == start)
373 cached_entry = NULL;
374
346 list_del(&entry->nd); 375 list_del(&entry->nd);
347 kfree(entry); 376 kfree(entry);
348 err = 0; 377 err = 0;
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
361} 390}
362 391
363 392
364/*
365 * /dev/mem mmap interface. The memtype used for mapping varies:
366 * - Use UC for mappings with O_SYNC flag
367 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
368 * inherit the memtype from existing mapping.
369 * - Else use UC_MINUS memtype (for backward compatibility with existing
370 * X drivers.
371 */
372pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 393pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
373 unsigned long size, pgprot_t vma_prot) 394 unsigned long size, pgprot_t vma_prot)
374{ 395{
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
406 unsigned long size, pgprot_t *vma_prot) 427 unsigned long size, pgprot_t *vma_prot)
407{ 428{
408 u64 offset = ((u64) pfn) << PAGE_SHIFT; 429 u64 offset = ((u64) pfn) << PAGE_SHIFT;
409 unsigned long flags = _PAGE_CACHE_UC_MINUS; 430 unsigned long flags = -1;
410 int retval; 431 int retval;
411 432
412 if (!range_is_allowed(pfn, size)) 433 if (!range_is_allowed(pfn, size))
413 return 0; 434 return 0;
414 435
415 if (file->f_flags & O_SYNC) { 436 if (file->f_flags & O_SYNC) {
416 flags = _PAGE_CACHE_UC; 437 flags = _PAGE_CACHE_UC_MINUS;
417 } 438 }
418 439
419#ifdef CONFIG_X86_32 440#ifdef CONFIG_X86_32
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
436#endif 457#endif
437 458
438 /* 459 /*
439 * With O_SYNC, we can only take UC mapping. Fail if we cannot. 460 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
461 *
440 * Without O_SYNC, we want to get 462 * Without O_SYNC, we want to get
441 * - WB for WB-able memory and no other conflicting mappings 463 * - WB for WB-able memory and no other conflicting mappings
442 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 464 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
443 * - Inherit from confliting mappings otherwise 465 * - Inherit from confliting mappings otherwise
444 */ 466 */
445 if (flags != _PAGE_CACHE_UC_MINUS) { 467 if (flags != -1) {
446 retval = reserve_memtype(offset, offset + size, flags, NULL); 468 retval = reserve_memtype(offset, offset + size, flags, NULL);
447 } else { 469 } else {
448 retval = reserve_memtype(offset, offset + size, -1, &flags); 470 retval = reserve_memtype(offset, offset + size, -1, &flags);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 557b2abceef8..d50302774fe2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
207 unsigned long addr; 207 unsigned long addr;
208 int i; 208 int i;
209 209
210 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
211 return;
212
210 pud = pud_offset(pgd, 0); 213 pud = pud_offset(pgd, 0);
211 214
212 for (addr = i = 0; i < PREALLOCATED_PMDS; 215 for (addr = i = 0; i < PREALLOCATED_PMDS;
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index b4becbf8c570..cab0abbd1ebe 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -20,53 +20,6 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23void show_mem(void)
24{
25 int total = 0, reserved = 0;
26 int shared = 0, cached = 0;
27 int highmem = 0;
28 struct page *page;
29 pg_data_t *pgdat;
30 unsigned long i;
31 unsigned long flags;
32
33 printk(KERN_INFO "Mem-info:\n");
34 show_free_areas();
35 for_each_online_pgdat(pgdat) {
36 pgdat_resize_lock(pgdat, &flags);
37 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
38 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
39 touch_nmi_watchdog();
40 page = pgdat_page_nr(pgdat, i);
41 total++;
42 if (PageHighMem(page))
43 highmem++;
44 if (PageReserved(page))
45 reserved++;
46 else if (PageSwapCache(page))
47 cached++;
48 else if (page_count(page))
49 shared += page_count(page) - 1;
50 }
51 pgdat_resize_unlock(pgdat, &flags);
52 }
53 printk(KERN_INFO "%d pages of RAM\n", total);
54 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
55 printk(KERN_INFO "%d reserved pages\n", reserved);
56 printk(KERN_INFO "%d pages shared\n", shared);
57 printk(KERN_INFO "%d pages swap cached\n", cached);
58
59 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
60 printk(KERN_INFO "%lu pages writeback\n",
61 global_page_state(NR_WRITEBACK));
62 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
63 printk(KERN_INFO "%lu pages slab\n",
64 global_page_state(NR_SLAB_RECLAIMABLE) +
65 global_page_state(NR_SLAB_UNRECLAIMABLE));
66 printk(KERN_INFO "%lu pages pagetables\n",
67 global_page_state(NR_PAGETABLE));
68}
69
70/* 23/*
71 * Associate a virtual page frame with a given physical page frame 24 * Associate a virtual page frame with a given physical page frame
72 * and protection flags for that frame. 25 * and protection flags for that frame.
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 1eb2973a301c..16ae70fc57e7 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -178,7 +178,7 @@ void acpi_numa_arch_fixup(void)
178 * start of the node, and that the current "end" address is after 178 * start of the node, and that the current "end" address is after
179 * the previous one. 179 * the previous one.
180 */ 180 */
181static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) 181static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
182{ 182{
183 /* 183 /*
184 * Only add present memory as told by the e820. 184 * Only add present memory as told by the e820.
@@ -189,10 +189,10 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
189 if (memory_chunk->start_pfn >= max_pfn) { 189 if (memory_chunk->start_pfn >= max_pfn) {
190 printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", 190 printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n",
191 memory_chunk->start_pfn, memory_chunk->end_pfn); 191 memory_chunk->start_pfn, memory_chunk->end_pfn);
192 return; 192 return -1;
193 } 193 }
194 if (memory_chunk->nid != nid) 194 if (memory_chunk->nid != nid)
195 return; 195 return -1;
196 196
197 if (!node_has_online_mem(nid)) 197 if (!node_has_online_mem(nid))
198 node_start_pfn[nid] = memory_chunk->start_pfn; 198 node_start_pfn[nid] = memory_chunk->start_pfn;
@@ -202,6 +202,8 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
202 202
203 if (node_end_pfn[nid] < memory_chunk->end_pfn) 203 if (node_end_pfn[nid] < memory_chunk->end_pfn)
204 node_end_pfn[nid] = memory_chunk->end_pfn; 204 node_end_pfn[nid] = memory_chunk->end_pfn;
205
206 return 0;
205} 207}
206 208
207int __init get_memcfg_from_srat(void) 209int __init get_memcfg_from_srat(void)
@@ -259,7 +261,9 @@ int __init get_memcfg_from_srat(void)
259 printk(KERN_DEBUG 261 printk(KERN_DEBUG
260 "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", 262 "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
261 j, chunk->nid, chunk->start_pfn, chunk->end_pfn); 263 j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
262 node_read_chunk(chunk->nid, chunk); 264 if (node_read_chunk(chunk->nid, chunk))
265 continue;
266
263 e820_register_active_regions(chunk->nid, chunk->start_pfn, 267 e820_register_active_regions(chunk->nid, chunk->start_pfn,
264 min(chunk->end_pfn, max_pfn)); 268 min(chunk->end_pfn, max_pfn));
265 } 269 }
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 7f3329b55d2e..0227694f7dab 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
17#include <linux/kdebug.h> 17#include <linux/kdebug.h>
18#include <linux/cpu.h>
18#include <asm/nmi.h> 19#include <asm/nmi.h>
19#include <asm/msr.h> 20#include <asm/msr.h>
20#include <asm/apic.h> 21#include <asm/apic.h>
@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
28 29
29static int nmi_start(void); 30static int nmi_start(void);
30static void nmi_stop(void); 31static void nmi_stop(void);
32static void nmi_cpu_start(void *dummy);
33static void nmi_cpu_stop(void *dummy);
31 34
32/* 0 == registered but off, 1 == registered and on */ 35/* 0 == registered but off, 1 == registered and on */
33static int nmi_enabled = 0; 36static int nmi_enabled = 0;
34 37
38#ifdef CONFIG_SMP
39static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
40 void *data)
41{
42 int cpu = (unsigned long)data;
43 switch (action) {
44 case CPU_DOWN_FAILED:
45 case CPU_ONLINE:
46 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
47 break;
48 case CPU_DOWN_PREPARE:
49 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
50 break;
51 }
52 return NOTIFY_DONE;
53}
54
55static struct notifier_block oprofile_cpu_nb = {
56 .notifier_call = oprofile_cpu_notifier
57};
58#endif
59
35#ifdef CONFIG_PM 60#ifdef CONFIG_PM
36 61
37static int nmi_suspend(struct sys_device *dev, pm_message_t state) 62static int nmi_suspend(struct sys_device *dev, pm_message_t state)
38{ 63{
64 /* Only one CPU left, just stop that one */
39 if (nmi_enabled == 1) 65 if (nmi_enabled == 1)
40 nmi_stop(); 66 nmi_cpu_stop(NULL);
41 return 0; 67 return 0;
42} 68}
43 69
44static int nmi_resume(struct sys_device *dev) 70static int nmi_resume(struct sys_device *dev)
45{ 71{
46 if (nmi_enabled == 1) 72 if (nmi_enabled == 1)
47 nmi_start(); 73 nmi_cpu_start(NULL);
48 return 0; 74 return 0;
49} 75}
50 76
@@ -369,20 +395,34 @@ static int __init ppro_init(char **cpu_type)
369{ 395{
370 __u8 cpu_model = boot_cpu_data.x86_model; 396 __u8 cpu_model = boot_cpu_data.x86_model;
371 397
372 if (cpu_model == 14) 398 switch (cpu_model) {
399 case 0 ... 2:
400 *cpu_type = "i386/ppro";
401 break;
402 case 3 ... 5:
403 *cpu_type = "i386/pii";
404 break;
405 case 6 ... 8:
406 *cpu_type = "i386/piii";
407 break;
408 case 9:
409 *cpu_type = "i386/p6_mobile";
410 break;
411 case 10 ... 13:
412 *cpu_type = "i386/p6";
413 break;
414 case 14:
373 *cpu_type = "i386/core"; 415 *cpu_type = "i386/core";
374 else if (cpu_model == 15 || cpu_model == 23) 416 break;
417 case 15: case 23:
375 *cpu_type = "i386/core_2"; 418 *cpu_type = "i386/core_2";
376 else if (cpu_model > 0xd) 419 break;
420 case 26:
421 *cpu_type = "i386/core_2";
422 break;
423 default:
424 /* Unknown */
377 return 0; 425 return 0;
378 else if (cpu_model == 9) {
379 *cpu_type = "i386/p6_mobile";
380 } else if (cpu_model > 5) {
381 *cpu_type = "i386/piii";
382 } else if (cpu_model > 2) {
383 *cpu_type = "i386/pii";
384 } else {
385 *cpu_type = "i386/ppro";
386 } 426 }
387 427
388 model = &op_ppro_spec; 428 model = &op_ppro_spec;
@@ -449,6 +489,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
449 } 489 }
450 490
451 init_sysfs(); 491 init_sysfs();
492#ifdef CONFIG_SMP
493 register_cpu_notifier(&oprofile_cpu_nb);
494#endif
452 using_nmi = 1; 495 using_nmi = 1;
453 ops->create_files = nmi_create_files; 496 ops->create_files = nmi_create_files;
454 ops->setup = nmi_setup; 497 ops->setup = nmi_setup;
@@ -462,6 +505,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
462 505
463void op_nmi_exit(void) 506void op_nmi_exit(void)
464{ 507{
465 if (using_nmi) 508 if (using_nmi) {
466 exit_sysfs(); 509 exit_sysfs();
510#ifdef CONFIG_SMP
511 unregister_cpu_notifier(&oprofile_cpu_nb);
512#endif
513 }
467} 514}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index dbf532369711..6a0fca78c362 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -1,6 +1,7 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <linux/topology.h> 3#include <linux/topology.h>
4#include <linux/cpu.h>
4#include "pci.h" 5#include "pci.h"
5 6
6#ifdef CONFIG_X86_64 7#ifdef CONFIG_X86_64
@@ -555,15 +556,17 @@ static int __init early_fill_mp_bus_info(void)
555 return 0; 556 return 0;
556} 557}
557 558
558postcore_initcall(early_fill_mp_bus_info); 559#else /* !CONFIG_X86_64 */
559 560
560#endif 561static int __init early_fill_mp_bus_info(void) { return 0; }
562
563#endif /* !CONFIG_X86_64 */
561 564
562/* common 32/64 bit code */ 565/* common 32/64 bit code */
563 566
564#define ENABLE_CF8_EXT_CFG (1ULL << 46) 567#define ENABLE_CF8_EXT_CFG (1ULL << 46)
565 568
566static void enable_pci_io_ecs_per_cpu(void *unused) 569static void enable_pci_io_ecs(void *unused)
567{ 570{
568 u64 reg; 571 u64 reg;
569 rdmsrl(MSR_AMD64_NB_CFG, reg); 572 rdmsrl(MSR_AMD64_NB_CFG, reg);
@@ -573,14 +576,51 @@ static void enable_pci_io_ecs_per_cpu(void *unused)
573 } 576 }
574} 577}
575 578
576static int __init enable_pci_io_ecs(void) 579static int __cpuinit amd_cpu_notify(struct notifier_block *self,
580 unsigned long action, void *hcpu)
577{ 581{
582 int cpu = (long)hcpu;
583 switch(action) {
584 case CPU_ONLINE:
585 case CPU_ONLINE_FROZEN:
586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
587 break;
588 default:
589 break;
590 }
591 return NOTIFY_OK;
592}
593
594static struct notifier_block __cpuinitdata amd_cpu_notifier = {
595 .notifier_call = amd_cpu_notify,
596};
597
598static int __init pci_io_ecs_init(void)
599{
600 int cpu;
601
578 /* assume all cpus from fam10h have IO ECS */ 602 /* assume all cpus from fam10h have IO ECS */
579 if (boot_cpu_data.x86 < 0x10) 603 if (boot_cpu_data.x86 < 0x10)
580 return 0; 604 return 0;
581 on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1); 605
606 register_cpu_notifier(&amd_cpu_notifier);
607 for_each_online_cpu(cpu)
608 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
609 (void *)(long)cpu);
582 pci_probe |= PCI_HAS_IO_ECS; 610 pci_probe |= PCI_HAS_IO_ECS;
611
612 return 0;
613}
614
615static int __init amd_postcore_init(void)
616{
617 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
618 return 0;
619
620 early_fill_mp_bus_info();
621 pci_io_ecs_init();
622
583 return 0; 623 return 0;
584} 624}
585 625
586postcore_initcall(enable_pci_io_ecs); 626postcore_initcall(amd_postcore_init);
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index ff3a6a336342..4bdaa590375d 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -23,7 +23,8 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d)
23 pci_read_config_byte(d, reg++, &busno); 23 pci_read_config_byte(d, reg++, &busno);
24 pci_read_config_byte(d, reg++, &suba); 24 pci_read_config_byte(d, reg++, &suba);
25 pci_read_config_byte(d, reg++, &subb); 25 pci_read_config_byte(d, reg++, &subb);
26 DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); 26 dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
27 suba, subb);
27 if (busno) 28 if (busno)
28 pci_scan_bus_with_sysdata(busno); /* Bus A */ 29 pci_scan_bus_with_sysdata(busno); /* Bus A */
29 if (suba < subb) 30 if (suba < subb)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 2aafb67dc5f1..8791fc55e715 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -128,10 +128,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
128 pr = pci_find_parent_resource(dev, r); 128 pr = pci_find_parent_resource(dev, r);
129 if (!r->start || !pr || 129 if (!r->start || !pr ||
130 request_resource(pr, r) < 0) { 130 request_resource(pr, r) < 0) {
131 printk(KERN_ERR "PCI: Cannot allocate " 131 dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
132 "resource region %d "
133 "of bridge %s\n",
134 idx, pci_name(dev));
135 /* 132 /*
136 * Something is wrong with the region. 133 * Something is wrong with the region.
137 * Invalidate the resource to prevent 134 * Invalidate the resource to prevent
@@ -166,15 +163,13 @@ static void __init pcibios_allocate_resources(int pass)
166 else 163 else
167 disabled = !(command & PCI_COMMAND_MEMORY); 164 disabled = !(command & PCI_COMMAND_MEMORY);
168 if (pass == disabled) { 165 if (pass == disabled) {
169 DBG("PCI: Resource %08lx-%08lx " 166 dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n",
170 "(f=%lx, d=%d, p=%d)\n", 167 (unsigned long long) r->start,
171 r->start, r->end, r->flags, disabled, pass); 168 (unsigned long long) r->end,
169 r->flags, disabled, pass);
172 pr = pci_find_parent_resource(dev, r); 170 pr = pci_find_parent_resource(dev, r);
173 if (!pr || request_resource(pr, r) < 0) { 171 if (!pr || request_resource(pr, r) < 0) {
174 printk(KERN_ERR "PCI: Cannot allocate " 172 dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
175 "resource region %d "
176 "of device %s\n",
177 idx, pci_name(dev));
178 /* We'll assign a new address later */ 173 /* We'll assign a new address later */
179 r->end -= r->start; 174 r->end -= r->start;
180 r->start = 0; 175 r->start = 0;
@@ -187,8 +182,7 @@ static void __init pcibios_allocate_resources(int pass)
187 /* Turn the ROM off, leave the resource region, 182 /* Turn the ROM off, leave the resource region,
188 * but keep it unregistered. */ 183 * but keep it unregistered. */
189 u32 reg; 184 u32 reg;
190 DBG("PCI: Switching off ROM of %s\n", 185 dev_dbg(&dev->dev, "disabling ROM\n");
191 pci_name(dev));
192 r->flags &= ~IORESOURCE_ROM_ENABLE; 186 r->flags &= ~IORESOURCE_ROM_ENABLE;
193 pci_read_config_dword(dev, 187 pci_read_config_dword(dev,
194 dev->rom_base_reg, &reg); 188 dev->rom_base_reg, &reg);
@@ -257,8 +251,7 @@ void pcibios_set_master(struct pci_dev *dev)
257 lat = pcibios_max_latency; 251 lat = pcibios_max_latency;
258 else 252 else
259 return; 253 return;
260 printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", 254 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
261 pci_name(dev), lat);
262 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 255 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
263} 256}
264 257
@@ -280,6 +273,7 @@ static void pci_track_mmap_page_range(struct vm_area_struct *vma)
280static struct vm_operations_struct pci_mmap_ops = { 273static struct vm_operations_struct pci_mmap_ops = {
281 .open = pci_track_mmap_page_range, 274 .open = pci_track_mmap_page_range,
282 .close = pci_unmap_page_range, 275 .close = pci_unmap_page_range,
276 .access = generic_access_phys,
283}; 277};
284 278
285int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 279int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 6a06a2eb0597..8e077185e185 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -436,7 +436,7 @@ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
436{ 436{
437 WARN_ON_ONCE(pirq >= 9); 437 WARN_ON_ONCE(pirq >= 9);
438 if (pirq > 8) { 438 if (pirq > 8) {
439 printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); 439 dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq);
440 return 0; 440 return 0;
441 } 441 }
442 return read_config_nybble(router, 0x74, pirq-1); 442 return read_config_nybble(router, 0x74, pirq-1);
@@ -446,7 +446,7 @@ static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
446{ 446{
447 WARN_ON_ONCE(pirq >= 9); 447 WARN_ON_ONCE(pirq >= 9);
448 if (pirq > 8) { 448 if (pirq > 8) {
449 printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); 449 dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq);
450 return 0; 450 return 0;
451 } 451 }
452 write_config_nybble(router, 0x74, pirq-1, irq); 452 write_config_nybble(router, 0x74, pirq-1, irq);
@@ -492,15 +492,17 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq
492 irq = 0; 492 irq = 0;
493 if (pirq <= 4) 493 if (pirq <= 4)
494 irq = read_config_nybble(router, 0x56, pirq - 1); 494 irq = read_config_nybble(router, 0x56, pirq - 1);
495 printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n", 495 dev_info(&dev->dev,
496 dev->vendor, dev->device, pirq, irq); 496 "AMD756: dev [%04x/%04x], router PIRQ %d get IRQ %d\n",
497 dev->vendor, dev->device, pirq, irq);
497 return irq; 498 return irq;
498} 499}
499 500
500static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) 501static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
501{ 502{
502 printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", 503 dev_info(&dev->dev,
503 dev->vendor, dev->device, pirq, irq); 504 "AMD756: dev [%04x/%04x], router PIRQ %d set IRQ %d\n",
505 dev->vendor, dev->device, pirq, irq);
504 if (pirq <= 4) 506 if (pirq <= 4)
505 write_config_nybble(router, 0x56, pirq - 1, irq); 507 write_config_nybble(router, 0x56, pirq - 1, irq);
506 return 1; 508 return 1;
@@ -588,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
588 case PCI_DEVICE_ID_INTEL_ICH10_1: 590 case PCI_DEVICE_ID_INTEL_ICH10_1:
589 case PCI_DEVICE_ID_INTEL_ICH10_2: 591 case PCI_DEVICE_ID_INTEL_ICH10_2:
590 case PCI_DEVICE_ID_INTEL_ICH10_3: 592 case PCI_DEVICE_ID_INTEL_ICH10_3:
593 case PCI_DEVICE_ID_INTEL_PCH_0:
594 case PCI_DEVICE_ID_INTEL_PCH_1:
591 r->name = "PIIX/ICH"; 595 r->name = "PIIX/ICH";
592 r->get = pirq_piix_get; 596 r->get = pirq_piix_get;
593 r->set = pirq_piix_set; 597 r->set = pirq_piix_set;
@@ -730,7 +734,6 @@ static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router,
730 switch (device) { 734 switch (device) {
731 case PCI_DEVICE_ID_AL_M1533: 735 case PCI_DEVICE_ID_AL_M1533:
732 case PCI_DEVICE_ID_AL_M1563: 736 case PCI_DEVICE_ID_AL_M1563:
733 printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
734 r->name = "ALI"; 737 r->name = "ALI";
735 r->get = pirq_ali_get; 738 r->get = pirq_ali_get;
736 r->set = pirq_ali_set; 739 r->set = pirq_ali_set;
@@ -840,11 +843,9 @@ static void __init pirq_find_router(struct irq_router *r)
840 h->probe(r, pirq_router_dev, pirq_router_dev->device)) 843 h->probe(r, pirq_router_dev, pirq_router_dev->device))
841 break; 844 break;
842 } 845 }
843 printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", 846 dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x/%04x]\n",
844 pirq_router.name, 847 pirq_router.name,
845 pirq_router_dev->vendor, 848 pirq_router_dev->vendor, pirq_router_dev->device);
846 pirq_router_dev->device,
847 pci_name(pirq_router_dev));
848 849
849 /* The device remains referenced for the kernel lifetime */ 850 /* The device remains referenced for the kernel lifetime */
850} 851}
@@ -877,7 +878,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
877 /* Find IRQ pin */ 878 /* Find IRQ pin */
878 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 879 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
879 if (!pin) { 880 if (!pin) {
880 DBG(KERN_DEBUG " -> no interrupt pin\n"); 881 dev_dbg(&dev->dev, "no interrupt pin\n");
881 return 0; 882 return 0;
882 } 883 }
883 pin = pin - 1; 884 pin = pin - 1;
@@ -887,20 +888,20 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
887 if (!pirq_table) 888 if (!pirq_table)
888 return 0; 889 return 0;
889 890
890 DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
891 info = pirq_get_info(dev); 891 info = pirq_get_info(dev);
892 if (!info) { 892 if (!info) {
893 DBG(" -> not found in routing table\n" KERN_DEBUG); 893 dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n",
894 'A' + pin);
894 return 0; 895 return 0;
895 } 896 }
896 pirq = info->irq[pin].link; 897 pirq = info->irq[pin].link;
897 mask = info->irq[pin].bitmap; 898 mask = info->irq[pin].bitmap;
898 if (!pirq) { 899 if (!pirq) {
899 DBG(" -> not routed\n" KERN_DEBUG); 900 dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin);
900 return 0; 901 return 0;
901 } 902 }
902 DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, 903 dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x",
903 pirq_table->exclusive_irqs); 904 'A' + pin, pirq, mask, pirq_table->exclusive_irqs);
904 mask &= pcibios_irq_mask; 905 mask &= pcibios_irq_mask;
905 906
906 /* Work around broken HP Pavilion Notebooks which assign USB to 907 /* Work around broken HP Pavilion Notebooks which assign USB to
@@ -930,10 +931,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
930 if (pci_probe & PCI_USE_PIRQ_MASK) 931 if (pci_probe & PCI_USE_PIRQ_MASK)
931 newirq = 0; 932 newirq = 0;
932 else 933 else
933 printk("\n" KERN_WARNING 934 dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask "
934 "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n" 935 "%#x; try pci=usepirqmask\n", newirq, mask);
935 KERN_DEBUG, newirq,
936 pci_name(dev));
937 } 936 }
938 if (!newirq && assign) { 937 if (!newirq && assign) {
939 for (i = 0; i < 16; i++) { 938 for (i = 0; i < 16; i++) {
@@ -944,39 +943,35 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
944 newirq = i; 943 newirq = i;
945 } 944 }
946 } 945 }
947 DBG(" -> newirq=%d", newirq); 946 dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq);
948 947
949 /* Check if it is hardcoded */ 948 /* Check if it is hardcoded */
950 if ((pirq & 0xf0) == 0xf0) { 949 if ((pirq & 0xf0) == 0xf0) {
951 irq = pirq & 0xf; 950 irq = pirq & 0xf;
952 DBG(" -> hardcoded IRQ %d\n", irq); 951 msg = "hardcoded";
953 msg = "Hardcoded";
954 } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ 952 } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
955 ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { 953 ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) {
956 DBG(" -> got IRQ %d\n", irq); 954 msg = "found";
957 msg = "Found";
958 eisa_set_level_irq(irq); 955 eisa_set_level_irq(irq);
959 } else if (newirq && r->set && 956 } else if (newirq && r->set &&
960 (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { 957 (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
961 DBG(" -> assigning IRQ %d", newirq);
962 if (r->set(pirq_router_dev, dev, pirq, newirq)) { 958 if (r->set(pirq_router_dev, dev, pirq, newirq)) {
963 eisa_set_level_irq(newirq); 959 eisa_set_level_irq(newirq);
964 DBG(" ... OK\n"); 960 msg = "assigned";
965 msg = "Assigned";
966 irq = newirq; 961 irq = newirq;
967 } 962 }
968 } 963 }
969 964
970 if (!irq) { 965 if (!irq) {
971 DBG(" ... failed\n");
972 if (newirq && mask == (1 << newirq)) { 966 if (newirq && mask == (1 << newirq)) {
973 msg = "Guessed"; 967 msg = "guessed";
974 irq = newirq; 968 irq = newirq;
975 } else 969 } else {
970 dev_dbg(&dev->dev, "can't route interrupt\n");
976 return 0; 971 return 0;
972 }
977 } 973 }
978 printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, 974 dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq);
979 pci_name(dev));
980 975
981 /* Update IRQ for all devices with the same pirq value */ 976 /* Update IRQ for all devices with the same pirq value */
982 while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { 977 while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
@@ -996,17 +991,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
996 (!(pci_probe & PCI_USE_PIRQ_MASK) || \ 991 (!(pci_probe & PCI_USE_PIRQ_MASK) || \
997 ((1 << dev2->irq) & mask))) { 992 ((1 << dev2->irq) & mask))) {
998#ifndef CONFIG_PCI_MSI 993#ifndef CONFIG_PCI_MSI
999 printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n", 994 dev_info(&dev2->dev, "IRQ routing conflict: "
1000 pci_name(dev2), dev2->irq, irq); 995 "have IRQ %d, want IRQ %d\n",
996 dev2->irq, irq);
1001#endif 997#endif
1002 continue; 998 continue;
1003 } 999 }
1004 dev2->irq = irq; 1000 dev2->irq = irq;
1005 pirq_penalty[irq]++; 1001 pirq_penalty[irq]++;
1006 if (dev != dev2) 1002 if (dev != dev2)
1007 printk(KERN_INFO 1003 dev_info(&dev->dev, "sharing IRQ %d with %s\n",
1008 "PCI: Sharing IRQ %d with %s\n", 1004 irq, pci_name(dev2));
1009 irq, pci_name(dev2));
1010 } 1005 }
1011 } 1006 }
1012 return 1; 1007 return 1;
@@ -1025,8 +1020,7 @@ static void __init pcibios_fixup_irqs(void)
1025 * already in use. 1020 * already in use.
1026 */ 1021 */
1027 if (dev->irq >= 16) { 1022 if (dev->irq >= 16) {
1028 DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", 1023 dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq);
1029 pci_name(dev), dev->irq);
1030 dev->irq = 0; 1024 dev->irq = 0;
1031 } 1025 }
1032 /* 1026 /*
@@ -1070,12 +1064,12 @@ static void __init pcibios_fixup_irqs(void)
1070 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1064 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
1071 PCI_SLOT(bridge->devfn), pin); 1065 PCI_SLOT(bridge->devfn), pin);
1072 if (irq >= 0) 1066 if (irq >= 0)
1073 printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n", 1067 dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n",
1074 pci_name(bridge), 'A' + pin, irq); 1068 pci_name(bridge),
1069 'A' + pin, irq);
1075 } 1070 }
1076 if (irq >= 0) { 1071 if (irq >= 0) {
1077 printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", 1072 dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq);
1078 pci_name(dev), 'A' + pin, irq);
1079 dev->irq = irq; 1073 dev->irq = irq;
1080 } 1074 }
1081 } 1075 }
@@ -1231,25 +1225,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
1231 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1225 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
1232 PCI_SLOT(bridge->devfn), pin); 1226 PCI_SLOT(bridge->devfn), pin);
1233 if (irq >= 0) 1227 if (irq >= 0)
1234 printk(KERN_WARNING 1228 dev_warn(&dev->dev, "using bridge %s "
1235 "PCI: using PPB %s[%c] to get irq %d\n", 1229 "INT %c to get IRQ %d\n",
1236 pci_name(bridge), 1230 pci_name(bridge), 'A' + pin,
1237 'A' + pin, irq); 1231 irq);
1238 dev = bridge; 1232 dev = bridge;
1239 } 1233 }
1240 dev = temp_dev; 1234 dev = temp_dev;
1241 if (irq >= 0) { 1235 if (irq >= 0) {
1242 printk(KERN_INFO 1236 dev_info(&dev->dev, "PCI->APIC IRQ transform: "
1243 "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", 1237 "INT %c -> IRQ %d\n", 'A' + pin, irq);
1244 pci_name(dev), 'A' + pin, irq);
1245 dev->irq = irq; 1238 dev->irq = irq;
1246 return 0; 1239 return 0;
1247 } else 1240 } else
1248 msg = " Probably buggy MP table."; 1241 msg = "; probably buggy MP table";
1249 } else if (pci_probe & PCI_BIOS_IRQ_SCAN) 1242 } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
1250 msg = ""; 1243 msg = "";
1251 else 1244 else
1252 msg = " Please try using pci=biosirq."; 1245 msg = "; please try using pci=biosirq";
1253 1246
1254 /* 1247 /*
1255 * With IDE legacy devices the IRQ lookup failure is not 1248 * With IDE legacy devices the IRQ lookup failure is not
@@ -1259,9 +1252,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
1259 !(dev->class & 0x5)) 1252 !(dev->class & 0x5))
1260 return 0; 1253 return 0;
1261 1254
1262 printk(KERN_WARNING 1255 dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n",
1263 "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", 1256 'A' + pin, msg);
1264 'A' + pin, pci_name(dev), msg);
1265 } 1257 }
1266 return 0; 1258 return 0;
1267} 1259}
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index ec9ce35e44d6..b722dd481b39 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -14,7 +14,7 @@ static void __devinit pcibios_fixup_peer_bridges(void)
14 int n, devfn; 14 int n, devfn;
15 long node; 15 long node;
16 16
17 if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) 17 if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
18 return; 18 return;
19 DBG("PCI: Peer bridge fixup\n"); 19 DBG("PCI: Peer bridge fixup\n");
20 20
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 23faaa890ffc..d9635764ce3d 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -293,7 +293,7 @@ static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl,
293 return AE_OK; 293 return AE_OK;
294} 294}
295 295
296static int __init is_acpi_reserved(unsigned long start, unsigned long end) 296static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
297{ 297{
298 struct resource mcfg_res; 298 struct resource mcfg_res;
299 299
@@ -310,6 +310,41 @@ static int __init is_acpi_reserved(unsigned long start, unsigned long end)
310 return mcfg_res.flags; 310 return mcfg_res.flags;
311} 311}
312 312
313typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
314
315static int __init is_mmconf_reserved(check_reserved_t is_reserved,
316 u64 addr, u64 size, int i,
317 typeof(pci_mmcfg_config[0]) *cfg, int with_e820)
318{
319 u64 old_size = size;
320 int valid = 0;
321
322 while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) {
323 size >>= 1;
324 if (size < (16UL<<20))
325 break;
326 }
327
328 if (size >= (16UL<<20) || size == old_size) {
329 printk(KERN_NOTICE
330 "PCI: MCFG area at %Lx reserved in %s\n",
331 addr, with_e820?"E820":"ACPI motherboard resources");
332 valid = 1;
333
334 if (old_size != size) {
335 /* update end_bus_number */
336 cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1);
337 printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx "
338 "segment %hu buses %u - %u\n",
339 i, (unsigned long)cfg->address, cfg->pci_segment,
340 (unsigned int)cfg->start_bus_number,
341 (unsigned int)cfg->end_bus_number);
342 }
343 }
344
345 return valid;
346}
347
313static void __init pci_mmcfg_reject_broken(int early) 348static void __init pci_mmcfg_reject_broken(int early)
314{ 349{
315 typeof(pci_mmcfg_config[0]) *cfg; 350 typeof(pci_mmcfg_config[0]) *cfg;
@@ -324,21 +359,22 @@ static void __init pci_mmcfg_reject_broken(int early)
324 359
325 for (i = 0; i < pci_mmcfg_config_num; i++) { 360 for (i = 0; i < pci_mmcfg_config_num; i++) {
326 int valid = 0; 361 int valid = 0;
327 u32 size = (cfg->end_bus_number + 1) << 20; 362 u64 addr, size;
363
328 cfg = &pci_mmcfg_config[i]; 364 cfg = &pci_mmcfg_config[i];
365 addr = cfg->start_bus_number;
366 addr <<= 20;
367 addr += cfg->address;
368 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
369 size <<= 20;
329 printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " 370 printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
330 "segment %hu buses %u - %u\n", 371 "segment %hu buses %u - %u\n",
331 i, (unsigned long)cfg->address, cfg->pci_segment, 372 i, (unsigned long)cfg->address, cfg->pci_segment,
332 (unsigned int)cfg->start_bus_number, 373 (unsigned int)cfg->start_bus_number,
333 (unsigned int)cfg->end_bus_number); 374 (unsigned int)cfg->end_bus_number);
334 375
335 if (!early && 376 if (!early)
336 is_acpi_reserved(cfg->address, cfg->address + size - 1)) { 377 valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0);
337 printk(KERN_NOTICE "PCI: MCFG area at %Lx reserved "
338 "in ACPI motherboard resources\n",
339 cfg->address);
340 valid = 1;
341 }
342 378
343 if (valid) 379 if (valid)
344 continue; 380 continue;
@@ -347,16 +383,11 @@ static void __init pci_mmcfg_reject_broken(int early)
347 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" 383 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not"
348 " reserved in ACPI motherboard resources\n", 384 " reserved in ACPI motherboard resources\n",
349 cfg->address); 385 cfg->address);
386
350 /* Don't try to do this check unless configuration 387 /* Don't try to do this check unless configuration
351 type 1 is available. how about type 2 ?*/ 388 type 1 is available. how about type 2 ?*/
352 if (raw_pci_ops && e820_all_mapped(cfg->address, 389 if (raw_pci_ops)
353 cfg->address + size - 1, 390 valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1);
354 E820_RESERVED)) {
355 printk(KERN_NOTICE
356 "PCI: MCFG area at %Lx reserved in E820\n",
357 cfg->address);
358 valid = 1;
359 }
360 391
361 if (!valid) 392 if (!valid)
362 goto reject; 393 goto reject;
@@ -365,7 +396,7 @@ static void __init pci_mmcfg_reject_broken(int early)
365 return; 396 return;
366 397
367reject: 398reject:
368 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 399 printk(KERN_INFO "PCI: Not using MMCONFIG.\n");
369 pci_mmcfg_arch_free(); 400 pci_mmcfg_arch_free();
370 kfree(pci_mmcfg_config); 401 kfree(pci_mmcfg_config);
371 pci_mmcfg_config = NULL; 402 pci_mmcfg_config = NULL;
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index f4b16dc11dad..1177845d3186 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -131,13 +131,14 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d)
131 u8 busno, suba, subb; 131 u8 busno, suba, subb;
132 int quad = BUS2QUAD(d->bus->number); 132 int quad = BUS2QUAD(d->bus->number);
133 133
134 printk("PCI: Searching for i450NX host bridges on %s\n", pci_name(d)); 134 dev_info(&d->dev, "searching for i450NX host bridges\n");
135 reg = 0xd0; 135 reg = 0xd0;
136 for(pxb=0; pxb<2; pxb++) { 136 for(pxb=0; pxb<2; pxb++) {
137 pci_read_config_byte(d, reg++, &busno); 137 pci_read_config_byte(d, reg++, &busno);
138 pci_read_config_byte(d, reg++, &suba); 138 pci_read_config_byte(d, reg++, &suba);
139 pci_read_config_byte(d, reg++, &subb); 139 pci_read_config_byte(d, reg++, &subb);
140 DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); 140 dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n",
141 pxb, busno, suba, subb);
141 if (busno) { 142 if (busno) {
142 /* Bus A */ 143 /* Bus A */
143 pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, busno)); 144 pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, busno));
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
index 7dc5d5cf50a2..d3e083dea720 100644
--- a/arch/x86/power/cpu_32.c
+++ b/arch/x86/power/cpu_32.c
@@ -45,7 +45,7 @@ static void __save_processor_state(struct saved_context *ctxt)
45 ctxt->cr0 = read_cr0(); 45 ctxt->cr0 = read_cr0();
46 ctxt->cr2 = read_cr2(); 46 ctxt->cr2 = read_cr2();
47 ctxt->cr3 = read_cr3(); 47 ctxt->cr3 = read_cr3();
48 ctxt->cr4 = read_cr4(); 48 ctxt->cr4 = read_cr4_safe();
49} 49}
50 50
51/* Needed by apm.c */ 51/* Needed by apm.c */
@@ -98,7 +98,9 @@ static void __restore_processor_state(struct saved_context *ctxt)
98 /* 98 /*
99 * control registers 99 * control registers
100 */ 100 */
101 write_cr4(ctxt->cr4); 101 /* cr4 was introduced in the Pentium CPU */
102 if (ctxt->cr4)
103 write_cr4(ctxt->cr4);
102 write_cr3(ctxt->cr3); 104 write_cr3(ctxt->cr3);
103 write_cr2(ctxt->cr2); 105 write_cr2(ctxt->cr2);
104 write_cr0(ctxt->cr0); 106 write_cr0(ctxt->cr0);
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index b95aa6cfe3cb..4fc7e872c85e 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -28,9 +28,9 @@ ENTRY(swsusp_arch_suspend)
28 ret 28 ret
29 29
30ENTRY(restore_image) 30ENTRY(restore_image)
31 movl resume_pg_dir, %ecx 31 movl resume_pg_dir, %eax
32 subl $__PAGE_OFFSET, %ecx 32 subl $__PAGE_OFFSET, %eax
33 movl %ecx, %cr3 33 movl %eax, %cr3
34 34
35 movl restore_pblist, %edx 35 movl restore_pblist, %edx
36 .p2align 4,,7 36 .p2align 4,,7
@@ -52,17 +52,21 @@ copy_loop:
52 52
53done: 53done:
54 /* go back to the original page tables */ 54 /* go back to the original page tables */
55 movl $swapper_pg_dir, %ecx 55 movl $swapper_pg_dir, %eax
56 subl $__PAGE_OFFSET, %ecx 56 subl $__PAGE_OFFSET, %eax
57 movl %ecx, %cr3 57 movl %eax, %cr3
58 /* Flush TLB, including "global" things (vmalloc) */ 58 /* Flush TLB, including "global" things (vmalloc) */
59 movl mmu_cr4_features, %eax 59 movl mmu_cr4_features, %ecx
60 movl %eax, %edx 60 jecxz 1f # cr4 Pentium and higher, skip if zero
61 movl %ecx, %edx
61 andl $~(1<<7), %edx; # PGE 62 andl $~(1<<7), %edx; # PGE
62 movl %edx, %cr4; # turn off PGE 63 movl %edx, %cr4; # turn off PGE
63 movl %cr3, %ecx; # flush TLB 641:
64 movl %ecx, %cr3 65 movl %cr3, %eax; # flush TLB
65 movl %eax, %cr4; # turn PGE back on 66 movl %eax, %cr3
67 jecxz 1f # cr4 Pentium and higher, skip if zero
68 movl %ecx, %cr4; # turn PGE back on
691:
66 70
67 movl saved_context_esp, %esp 71 movl saved_context_esp, %esp
68 movl saved_context_ebp, %ebp 72 movl saved_context_ebp, %ebp
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 9ff6e3cbf08f..a4e201b47f64 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1324 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 1324 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1325 1325
1326 .pte_val = xen_pte_val, 1326 .pte_val = xen_pte_val,
1327 .pte_flags = native_pte_val, 1327 .pte_flags = native_pte_flags,
1328 .pgd_val = xen_pgd_val, 1328 .pgd_val = xen_pgd_val,
1329 1329
1330 .make_pte = xen_make_pte, 1330 .make_pte = xen_make_pte,
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b6acc3a0af46..d67901083888 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -42,7 +42,7 @@ char * __init xen_memory_setup(void)
42 42
43 e820.nr_map = 0; 43 e820.nr_map = 0;
44 44
45 e820_add_region(0, PFN_PHYS(max_pfn), E820_RAM); 45 e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
46 46
47 /* 47 /*
48 * Even though this is normal, usable memory under Xen, reserve 48 * Even though this is normal, usable memory under Xen, reserve
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 4038cbfe3331..7f58304fafb3 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -173,7 +173,7 @@ ENTRY(xen_sysexit)
173 pushq $__USER32_CS 173 pushq $__USER32_CS
174 pushq %rdx 174 pushq %rdx
175 175
176 pushq $VGCF_in_syscall 176 pushq $0
1771: jmp hypercall_iret 1771: jmp hypercall_iret
178ENDPATCH(xen_sysexit) 178ENDPATCH(xen_sysexit)
179RELOC(xen_sysexit, 1b+1) 179RELOC(xen_sysexit, 1b+1)