diff options
Diffstat (limited to 'arch/x86')
138 files changed, 3639 insertions, 1961 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ac2fb0641a04..97f0d2b6dc0c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -29,6 +29,7 @@ config X86 | |||
29 | select HAVE_FTRACE | 29 | select HAVE_FTRACE |
30 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 30 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
31 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 31 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
32 | select HAVE_ARCH_TRACEHOOK | ||
32 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 33 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
33 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 34 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
34 | 35 | ||
@@ -577,35 +578,29 @@ config SWIOTLB | |||
577 | 578 | ||
578 | config IOMMU_HELPER | 579 | config IOMMU_HELPER |
579 | def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) | 580 | def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) |
581 | |||
580 | config MAXSMP | 582 | config MAXSMP |
581 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" | 583 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" |
582 | depends on X86_64 && SMP | 584 | depends on X86_64 && SMP && BROKEN |
583 | default n | 585 | default n |
584 | help | 586 | help |
585 | Configure maximum number of CPUS and NUMA Nodes for this architecture. | 587 | Configure maximum number of CPUS and NUMA Nodes for this architecture. |
586 | If unsure, say N. | 588 | If unsure, say N. |
587 | 589 | ||
588 | if MAXSMP | ||
589 | config NR_CPUS | 590 | config NR_CPUS |
590 | int | 591 | int "Maximum number of CPUs (2-512)" if !MAXSMP |
591 | default "4096" | 592 | range 2 512 |
592 | endif | ||
593 | |||
594 | if !MAXSMP | ||
595 | config NR_CPUS | ||
596 | int "Maximum number of CPUs (2-4096)" | ||
597 | range 2 4096 | ||
598 | depends on SMP | 593 | depends on SMP |
594 | default "4096" if MAXSMP | ||
599 | default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 | 595 | default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 |
600 | default "8" | 596 | default "8" |
601 | help | 597 | help |
602 | This allows you to specify the maximum number of CPUs which this | 598 | This allows you to specify the maximum number of CPUs which this |
603 | kernel will support. The maximum supported value is 4096 and the | 599 | kernel will support. The maximum supported value is 512 and the |
604 | minimum value which makes sense is 2. | 600 | minimum value which makes sense is 2. |
605 | 601 | ||
606 | This is purely to save memory - each supported CPU adds | 602 | This is purely to save memory - each supported CPU adds |
607 | approximately eight kilobytes to the kernel image. | 603 | approximately eight kilobytes to the kernel image. |
608 | endif | ||
609 | 604 | ||
610 | config SCHED_SMT | 605 | config SCHED_SMT |
611 | bool "SMT (Hyperthreading) scheduler support" | 606 | bool "SMT (Hyperthreading) scheduler support" |
@@ -951,9 +946,9 @@ config NUMA | |||
951 | local memory controller of the CPU and add some more | 946 | local memory controller of the CPU and add some more |
952 | NUMA awareness to the kernel. | 947 | NUMA awareness to the kernel. |
953 | 948 | ||
954 | For i386 this is currently highly experimental and should be only | 949 | For 32-bit this is currently highly experimental and should be only |
955 | used for kernel development. It might also cause boot failures. | 950 | used for kernel development. It might also cause boot failures. |
956 | For x86_64 this is recommended on all multiprocessor Opteron systems. | 951 | For 64-bit this is recommended on all multiprocessor Opteron systems. |
957 | If the system is EM64T, you should say N unless your system is | 952 | If the system is EM64T, you should say N unless your system is |
958 | EM64T NUMA. | 953 | EM64T NUMA. |
959 | 954 | ||
@@ -996,17 +991,10 @@ config NUMA_EMU | |||
996 | into virtual nodes when booted with "numa=fake=N", where N is the | 991 | into virtual nodes when booted with "numa=fake=N", where N is the |
997 | number of nodes. This is only useful for debugging. | 992 | number of nodes. This is only useful for debugging. |
998 | 993 | ||
999 | if MAXSMP | ||
1000 | |||
1001 | config NODES_SHIFT | 994 | config NODES_SHIFT |
1002 | int | 995 | int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP |
1003 | default "9" | ||
1004 | endif | ||
1005 | |||
1006 | if !MAXSMP | ||
1007 | config NODES_SHIFT | ||
1008 | int "Maximum NUMA Nodes (as a power of 2)" | ||
1009 | range 1 9 if X86_64 | 996 | range 1 9 if X86_64 |
997 | default "9" if MAXSMP | ||
1010 | default "6" if X86_64 | 998 | default "6" if X86_64 |
1011 | default "4" if X86_NUMAQ | 999 | default "4" if X86_NUMAQ |
1012 | default "3" | 1000 | default "3" |
@@ -1014,7 +1002,6 @@ config NODES_SHIFT | |||
1014 | help | 1002 | help |
1015 | Specify the maximum number of NUMA Nodes available on the target | 1003 | Specify the maximum number of NUMA Nodes available on the target |
1016 | system. Increases memory reserved to accomodate various tables. | 1004 | system. Increases memory reserved to accomodate various tables. |
1017 | endif | ||
1018 | 1005 | ||
1019 | config HAVE_ARCH_BOOTMEM_NODE | 1006 | config HAVE_ARCH_BOOTMEM_NODE |
1020 | def_bool y | 1007 | def_bool y |
@@ -1034,7 +1021,7 @@ config HAVE_ARCH_ALLOC_REMAP | |||
1034 | 1021 | ||
1035 | config ARCH_FLATMEM_ENABLE | 1022 | config ARCH_FLATMEM_ENABLE |
1036 | def_bool y | 1023 | def_bool y |
1037 | depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA | 1024 | depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA |
1038 | 1025 | ||
1039 | config ARCH_DISCONTIGMEM_ENABLE | 1026 | config ARCH_DISCONTIGMEM_ENABLE |
1040 | def_bool y | 1027 | def_bool y |
@@ -1050,7 +1037,7 @@ config ARCH_SPARSEMEM_DEFAULT | |||
1050 | 1037 | ||
1051 | config ARCH_SPARSEMEM_ENABLE | 1038 | config ARCH_SPARSEMEM_ENABLE |
1052 | def_bool y | 1039 | def_bool y |
1053 | depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) | 1040 | depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH |
1054 | select SPARSEMEM_STATIC if X86_32 | 1041 | select SPARSEMEM_STATIC if X86_32 |
1055 | select SPARSEMEM_VMEMMAP_ENABLE if X86_64 | 1042 | select SPARSEMEM_VMEMMAP_ENABLE if X86_64 |
1056 | 1043 | ||
@@ -1131,10 +1118,10 @@ config MTRR | |||
1131 | You can safely say Y even if your machine doesn't have MTRRs, you'll | 1118 | You can safely say Y even if your machine doesn't have MTRRs, you'll |
1132 | just add about 9 KB to your kernel. | 1119 | just add about 9 KB to your kernel. |
1133 | 1120 | ||
1134 | See <file:Documentation/mtrr.txt> for more information. | 1121 | See <file:Documentation/x86/mtrr.txt> for more information. |
1135 | 1122 | ||
1136 | config MTRR_SANITIZER | 1123 | config MTRR_SANITIZER |
1137 | bool | 1124 | def_bool y |
1138 | prompt "MTRR cleanup support" | 1125 | prompt "MTRR cleanup support" |
1139 | depends on MTRR | 1126 | depends on MTRR |
1140 | help | 1127 | help |
@@ -1145,7 +1132,7 @@ config MTRR_SANITIZER | |||
1145 | The largest mtrr entry size for a continous block can be set with | 1132 | The largest mtrr entry size for a continous block can be set with |
1146 | mtrr_chunk_size. | 1133 | mtrr_chunk_size. |
1147 | 1134 | ||
1148 | If unsure, say N. | 1135 | If unsure, say Y. |
1149 | 1136 | ||
1150 | config MTRR_SANITIZER_ENABLE_DEFAULT | 1137 | config MTRR_SANITIZER_ENABLE_DEFAULT |
1151 | int "MTRR cleanup enable value (0-1)" | 1138 | int "MTRR cleanup enable value (0-1)" |
@@ -1205,7 +1192,6 @@ config IRQBALANCE | |||
1205 | config SECCOMP | 1192 | config SECCOMP |
1206 | def_bool y | 1193 | def_bool y |
1207 | prompt "Enable seccomp to safely compute untrusted bytecode" | 1194 | prompt "Enable seccomp to safely compute untrusted bytecode" |
1208 | depends on PROC_FS | ||
1209 | help | 1195 | help |
1210 | This kernel feature is useful for number crunching applications | 1196 | This kernel feature is useful for number crunching applications |
1211 | that may need to compute untrusted bytecode during their | 1197 | that may need to compute untrusted bytecode during their |
@@ -1213,7 +1199,7 @@ config SECCOMP | |||
1213 | the process as file descriptors supporting the read/write | 1199 | the process as file descriptors supporting the read/write |
1214 | syscalls, it's possible to isolate those applications in | 1200 | syscalls, it's possible to isolate those applications in |
1215 | their own address space using seccomp. Once seccomp is | 1201 | their own address space using seccomp. Once seccomp is |
1216 | enabled via /proc/<pid>/seccomp, it cannot be disabled | 1202 | enabled via prctl(PR_SET_SECCOMP), it cannot be disabled |
1217 | and the task is only allowed to execute a few safe syscalls | 1203 | and the task is only allowed to execute a few safe syscalls |
1218 | defined by each seccomp mode. | 1204 | defined by each seccomp mode. |
1219 | 1205 | ||
@@ -1263,7 +1249,7 @@ config KEXEC | |||
1263 | strongly in flux, so no good recommendation can be made. | 1249 | strongly in flux, so no good recommendation can be made. |
1264 | 1250 | ||
1265 | config CRASH_DUMP | 1251 | config CRASH_DUMP |
1266 | bool "kernel crash dumps (EXPERIMENTAL)" | 1252 | bool "kernel crash dumps" |
1267 | depends on X86_64 || (X86_32 && HIGHMEM) | 1253 | depends on X86_64 || (X86_32 && HIGHMEM) |
1268 | help | 1254 | help |
1269 | Generate crash dump after being started by kexec. | 1255 | Generate crash dump after being started by kexec. |
@@ -1370,14 +1356,14 @@ config PHYSICAL_ALIGN | |||
1370 | Don't change this unless you know what you are doing. | 1356 | Don't change this unless you know what you are doing. |
1371 | 1357 | ||
1372 | config HOTPLUG_CPU | 1358 | config HOTPLUG_CPU |
1373 | bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)" | 1359 | bool "Support for hot-pluggable CPUs" |
1374 | depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER | 1360 | depends on SMP && HOTPLUG && !X86_VOYAGER |
1375 | ---help--- | 1361 | ---help--- |
1376 | Say Y here to experiment with turning CPUs off and on, and to | 1362 | Say Y here to allow turning CPUs off and on. CPUs can be |
1377 | enable suspend on SMP systems. CPUs can be controlled through | 1363 | controlled through /sys/devices/system/cpu. |
1378 | /sys/devices/system/cpu. | 1364 | ( Note: power management support will enable this option |
1379 | Say N if you want to disable CPU hotplug and don't need to | 1365 | automatically on SMP systems. ) |
1380 | suspend. | 1366 | Say N if you want to disable CPU hotplug. |
1381 | 1367 | ||
1382 | config COMPAT_VDSO | 1368 | config COMPAT_VDSO |
1383 | def_bool y | 1369 | def_bool y |
@@ -1392,6 +1378,51 @@ config COMPAT_VDSO | |||
1392 | 1378 | ||
1393 | If unsure, say Y. | 1379 | If unsure, say Y. |
1394 | 1380 | ||
1381 | config CMDLINE_BOOL | ||
1382 | bool "Built-in kernel command line" | ||
1383 | default n | ||
1384 | help | ||
1385 | Allow for specifying boot arguments to the kernel at | ||
1386 | build time. On some systems (e.g. embedded ones), it is | ||
1387 | necessary or convenient to provide some or all of the | ||
1388 | kernel boot arguments with the kernel itself (that is, | ||
1389 | to not rely on the boot loader to provide them.) | ||
1390 | |||
1391 | To compile command line arguments into the kernel, | ||
1392 | set this option to 'Y', then fill in the | ||
1393 | the boot arguments in CONFIG_CMDLINE. | ||
1394 | |||
1395 | Systems with fully functional boot loaders (i.e. non-embedded) | ||
1396 | should leave this option set to 'N'. | ||
1397 | |||
1398 | config CMDLINE | ||
1399 | string "Built-in kernel command string" | ||
1400 | depends on CMDLINE_BOOL | ||
1401 | default "" | ||
1402 | help | ||
1403 | Enter arguments here that should be compiled into the kernel | ||
1404 | image and used at boot time. If the boot loader provides a | ||
1405 | command line at boot time, it is appended to this string to | ||
1406 | form the full kernel command line, when the system boots. | ||
1407 | |||
1408 | However, you can use the CONFIG_CMDLINE_OVERRIDE option to | ||
1409 | change this behavior. | ||
1410 | |||
1411 | In most cases, the command line (whether built-in or provided | ||
1412 | by the boot loader) should specify the device for the root | ||
1413 | file system. | ||
1414 | |||
1415 | config CMDLINE_OVERRIDE | ||
1416 | bool "Built-in command line overrides boot loader arguments" | ||
1417 | default n | ||
1418 | depends on CMDLINE_BOOL | ||
1419 | help | ||
1420 | Set this option to 'Y' to have the kernel ignore the boot loader | ||
1421 | command line, and use ONLY the built-in command line. | ||
1422 | |||
1423 | This is used to work around broken boot loaders. This should | ||
1424 | be set to 'N' under normal conditions. | ||
1425 | |||
1395 | endmenu | 1426 | endmenu |
1396 | 1427 | ||
1397 | config ARCH_ENABLE_MEMORY_HOTPLUG | 1428 | config ARCH_ENABLE_MEMORY_HOTPLUG |
@@ -1787,7 +1818,7 @@ config COMPAT_FOR_U64_ALIGNMENT | |||
1787 | 1818 | ||
1788 | config SYSVIPC_COMPAT | 1819 | config SYSVIPC_COMPAT |
1789 | def_bool y | 1820 | def_bool y |
1790 | depends on X86_64 && COMPAT && SYSVIPC | 1821 | depends on COMPAT && SYSVIPC |
1791 | 1822 | ||
1792 | endmenu | 1823 | endmenu |
1793 | 1824 | ||
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2c518fbc52ec..60a85768cfcb 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -382,14 +382,17 @@ config X86_OOSTORE | |||
382 | # P6_NOPs are a relatively minor optimization that require a family >= | 382 | # P6_NOPs are a relatively minor optimization that require a family >= |
383 | # 6 processor, except that it is broken on certain VIA chips. | 383 | # 6 processor, except that it is broken on certain VIA chips. |
384 | # Furthermore, AMD chips prefer a totally different sequence of NOPs | 384 | # Furthermore, AMD chips prefer a totally different sequence of NOPs |
385 | # (which work on all CPUs). As a result, disallow these if we're | 385 | # (which work on all CPUs). In addition, it looks like Virtual PC |
386 | # compiling X86_GENERIC but not X86_64 (these NOPs do work on all | 386 | # does not understand them. |
387 | # x86-64 capable chips); the list of processors in the right-hand clause | 387 | # |
388 | # are the cores that benefit from this optimization. | 388 | # As a result, disallow these if we're not compiling for X86_64 (these |
389 | # NOPs do work on all x86-64 capable chips); the list of processors in | ||
390 | # the right-hand clause are the cores that benefit from this optimization. | ||
389 | # | 391 | # |
390 | config X86_P6_NOP | 392 | config X86_P6_NOP |
391 | def_bool y | 393 | def_bool y |
392 | depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC) | 394 | depends on X86_64 |
395 | depends on (MCORE2 || MPENTIUM4 || MPSC) | ||
393 | 396 | ||
394 | config X86_TSC | 397 | config X86_TSC |
395 | def_bool y | 398 | def_bool y |
@@ -415,3 +418,21 @@ config X86_MINIMUM_CPU_FAMILY | |||
415 | config X86_DEBUGCTLMSR | 418 | config X86_DEBUGCTLMSR |
416 | def_bool y | 419 | def_bool y |
417 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) | 420 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) |
421 | |||
422 | config X86_DS | ||
423 | bool "Debug Store support" | ||
424 | default y | ||
425 | help | ||
426 | Add support for Debug Store. | ||
427 | This allows the kernel to provide a memory buffer to the hardware | ||
428 | to store various profiling and tracing events. | ||
429 | |||
430 | config X86_PTRACE_BTS | ||
431 | bool "ptrace interface to Branch Trace Store" | ||
432 | default y | ||
433 | depends on (X86_DS && X86_DEBUGCTLMSR) | ||
434 | help | ||
435 | Add a ptrace interface to allow collecting an execution trace | ||
436 | of the traced task. | ||
437 | This collects control flow changes in a (cyclic) buffer and allows | ||
438 | debuggers to fill in the gaps and show an execution trace of the debuggee. | ||
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index a34b9982c7cb..cc0ef13fba7a 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -24,10 +24,14 @@ | |||
24 | #include <linux/edd.h> | 24 | #include <linux/edd.h> |
25 | #include <asm/boot.h> | 25 | #include <asm/boot.h> |
26 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
27 | #include "bitops.h" | ||
28 | #include <asm/cpufeature.h> | ||
27 | 29 | ||
28 | /* Useful macros */ | 30 | /* Useful macros */ |
29 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) | 31 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
30 | 32 | ||
33 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) | ||
34 | |||
31 | extern struct setup_header hdr; | 35 | extern struct setup_header hdr; |
32 | extern struct boot_params boot_params; | 36 | extern struct boot_params boot_params; |
33 | 37 | ||
@@ -242,6 +246,12 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize); | |||
242 | int cmdline_find_option_bool(const char *option); | 246 | int cmdline_find_option_bool(const char *option); |
243 | 247 | ||
244 | /* cpu.c, cpucheck.c */ | 248 | /* cpu.c, cpucheck.c */ |
249 | struct cpu_features { | ||
250 | int level; /* Family, or 64 for x86-64 */ | ||
251 | int model; | ||
252 | u32 flags[NCAPINTS]; | ||
253 | }; | ||
254 | extern struct cpu_features cpu; | ||
245 | int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); | 255 | int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); |
246 | int validate_cpu(void); | 256 | int validate_cpu(void); |
247 | 257 | ||
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index ba7736cf2ec7..29c5fbf08392 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -137,14 +137,15 @@ relocated: | |||
137 | */ | 137 | */ |
138 | movl output_len(%ebx), %eax | 138 | movl output_len(%ebx), %eax |
139 | pushl %eax | 139 | pushl %eax |
140 | # push arguments for decompress_kernel: | ||
140 | pushl %ebp # output address | 141 | pushl %ebp # output address |
141 | movl input_len(%ebx), %eax | 142 | movl input_len(%ebx), %eax |
142 | pushl %eax # input_len | 143 | pushl %eax # input_len |
143 | leal input_data(%ebx), %eax | 144 | leal input_data(%ebx), %eax |
144 | pushl %eax # input_data | 145 | pushl %eax # input_data |
145 | leal boot_heap(%ebx), %eax | 146 | leal boot_heap(%ebx), %eax |
146 | pushl %eax # heap area as third argument | 147 | pushl %eax # heap area |
147 | pushl %esi # real mode pointer as second arg | 148 | pushl %esi # real mode pointer |
148 | call decompress_kernel | 149 | call decompress_kernel |
149 | addl $20, %esp | 150 | addl $20, %esp |
150 | popl %ecx | 151 | popl %ecx |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 9fea73706479..5780d361105b 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | #undef CONFIG_PARAVIRT | 17 | #undef CONFIG_PARAVIRT |
18 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
19 | #define _ASM_DESC_H_ 1 | 19 | #define ASM_X86__DESC_H 1 |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_X86_64 | 22 | #ifdef CONFIG_X86_64 |
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
28 | #include <linux/screen_info.h> | 28 | #include <linux/screen_info.h> |
29 | #include <linux/elf.h> | 29 | #include <linux/elf.h> |
30 | #include <asm/io.h> | 30 | #include <linux/io.h> |
31 | #include <asm/page.h> | 31 | #include <asm/page.h> |
32 | #include <asm/boot.h> | 32 | #include <asm/boot.h> |
33 | #include <asm/bootparam.h> | 33 | #include <asm/bootparam.h> |
@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s) | |||
251 | y--; | 251 | y--; |
252 | } | 252 | } |
253 | } else { | 253 | } else { |
254 | vidmem [(x + cols * y) * 2] = c; | 254 | vidmem[(x + cols * y) * 2] = c; |
255 | if (++x >= cols) { | 255 | if (++x >= cols) { |
256 | x = 0; | 256 | x = 0; |
257 | if (++y >= lines) { | 257 | if (++y >= lines) { |
@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n) | |||
277 | int i; | 277 | int i; |
278 | char *ss = s; | 278 | char *ss = s; |
279 | 279 | ||
280 | for (i = 0; i < n; i++) ss[i] = c; | 280 | for (i = 0; i < n; i++) |
281 | ss[i] = c; | ||
281 | return s; | 282 | return s; |
282 | } | 283 | } |
283 | 284 | ||
@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n) | |||
287 | const char *s = src; | 288 | const char *s = src; |
288 | char *d = dest; | 289 | char *d = dest; |
289 | 290 | ||
290 | for (i = 0; i < n; i++) d[i] = s[i]; | 291 | for (i = 0; i < n; i++) |
292 | d[i] = s[i]; | ||
291 | return dest; | 293 | return dest; |
292 | } | 294 | } |
293 | 295 | ||
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index a1310c52fc0c..857e492c571e 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c | |||
@@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) | |||
492 | continue; | 492 | continue; |
493 | } | 493 | } |
494 | sh_symtab = sec_symtab->symtab; | 494 | sh_symtab = sec_symtab->symtab; |
495 | sym_strtab = sec->link->strtab; | 495 | sym_strtab = sec_symtab->link->strtab; |
496 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { | 496 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { |
497 | Elf32_Rel *rel; | 497 | Elf32_Rel *rel; |
498 | Elf32_Sym *sym; | 498 | Elf32_Sym *sym; |
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c index 92d6fd73dc7d..75298fe2edca 100644 --- a/arch/x86/boot/cpu.c +++ b/arch/x86/boot/cpu.c | |||
@@ -16,9 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "boot.h" | 18 | #include "boot.h" |
19 | #include "bitops.h" | ||
20 | #include <asm/cpufeature.h> | ||
21 | |||
22 | #include "cpustr.h" | 19 | #include "cpustr.h" |
23 | 20 | ||
24 | static char *cpu_name(int level) | 21 | static char *cpu_name(int level) |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 7804389ee005..4d3ff037201f 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
@@ -22,21 +22,13 @@ | |||
22 | 22 | ||
23 | #ifdef _SETUP | 23 | #ifdef _SETUP |
24 | # include "boot.h" | 24 | # include "boot.h" |
25 | # include "bitops.h" | ||
26 | #endif | 25 | #endif |
27 | #include <linux/types.h> | 26 | #include <linux/types.h> |
28 | #include <asm/cpufeature.h> | ||
29 | #include <asm/processor-flags.h> | 27 | #include <asm/processor-flags.h> |
30 | #include <asm/required-features.h> | 28 | #include <asm/required-features.h> |
31 | #include <asm/msr-index.h> | 29 | #include <asm/msr-index.h> |
32 | 30 | ||
33 | struct cpu_features { | 31 | struct cpu_features cpu; |
34 | int level; /* Family, or 64 for x86-64 */ | ||
35 | int model; | ||
36 | u32 flags[NCAPINTS]; | ||
37 | }; | ||
38 | |||
39 | static struct cpu_features cpu; | ||
40 | static u32 cpu_vendor[3]; | 32 | static u32 cpu_vendor[3]; |
41 | static u32 err_flags[NCAPINTS]; | 33 | static u32 err_flags[NCAPINTS]; |
42 | 34 | ||
@@ -46,12 +38,12 @@ static const u32 req_flags[NCAPINTS] = | |||
46 | { | 38 | { |
47 | REQUIRED_MASK0, | 39 | REQUIRED_MASK0, |
48 | REQUIRED_MASK1, | 40 | REQUIRED_MASK1, |
49 | REQUIRED_MASK2, | 41 | 0, /* REQUIRED_MASK2 not implemented in this file */ |
50 | REQUIRED_MASK3, | 42 | 0, /* REQUIRED_MASK3 not implemented in this file */ |
51 | REQUIRED_MASK4, | 43 | REQUIRED_MASK4, |
52 | REQUIRED_MASK5, | 44 | 0, /* REQUIRED_MASK5 not implemented in this file */ |
53 | REQUIRED_MASK6, | 45 | REQUIRED_MASK6, |
54 | REQUIRED_MASK7, | 46 | 0, /* REQUIRED_MASK7 not implemented in this file */ |
55 | }; | 47 | }; |
56 | 48 | ||
57 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) | 49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index af86e431acfa..b993062e9a5f 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -30,7 +30,6 @@ SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */ | |||
30 | SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */ | 30 | SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */ |
31 | /* to be loaded */ | 31 | /* to be loaded */ |
32 | ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */ | 32 | ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */ |
33 | SWAP_DEV = 0 /* SWAP_DEV is now written by "build" */ | ||
34 | 33 | ||
35 | #ifndef SVGA_MODE | 34 | #ifndef SVGA_MODE |
36 | #define SVGA_MODE ASK_VGA | 35 | #define SVGA_MODE ASK_VGA |
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index 2296164b54d2..197421db1af1 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c | |||
@@ -73,6 +73,11 @@ static void keyboard_set_repeat(void) | |||
73 | */ | 73 | */ |
74 | static void query_ist(void) | 74 | static void query_ist(void) |
75 | { | 75 | { |
76 | /* Some older BIOSes apparently crash on this call, so filter | ||
77 | it from machines too old to have SpeedStep at all. */ | ||
78 | if (cpu.level < 6) | ||
79 | return; | ||
80 | |||
76 | asm("int $0x15" | 81 | asm("int $0x15" |
77 | : "=a" (boot_params.ist_info.signature), | 82 | : "=a" (boot_params.ist_info.signature), |
78 | "=b" (boot_params.ist_info.command), | 83 | "=b" (boot_params.ist_info.command), |
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 53165c97336b..8c3c25f35578 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c | |||
@@ -13,7 +13,6 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include "boot.h" | 15 | #include "boot.h" |
16 | #include <linux/kernel.h> | ||
17 | 16 | ||
18 | #define SMAP 0x534d4150 /* ASCII "SMAP" */ | 17 | #define SMAP 0x534d4150 /* ASCII "SMAP" */ |
19 | 18 | ||
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 4d73f53287b6..ef9a52005ec9 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -1,13 +1,13 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.26-rc1 | 3 | # Linux kernel version: 2.6.27-rc5 |
4 | # Sun May 4 19:59:02 2008 | 4 | # Wed Sep 3 17:23:09 2008 |
5 | # | 5 | # |
6 | # CONFIG_64BIT is not set | 6 | # CONFIG_64BIT is not set |
7 | CONFIG_X86_32=y | 7 | CONFIG_X86_32=y |
8 | # CONFIG_X86_64 is not set | 8 | # CONFIG_X86_64 is not set |
9 | CONFIG_X86=y | 9 | CONFIG_X86=y |
10 | CONFIG_DEFCONFIG_LIST="arch/x86/configs/i386_defconfig" | 10 | CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" |
11 | # CONFIG_GENERIC_LOCKBREAK is not set | 11 | # CONFIG_GENERIC_LOCKBREAK is not set |
12 | CONFIG_GENERIC_TIME=y | 12 | CONFIG_GENERIC_TIME=y |
13 | CONFIG_GENERIC_CMOS_UPDATE=y | 13 | CONFIG_GENERIC_CMOS_UPDATE=y |
@@ -53,6 +53,7 @@ CONFIG_X86_HT=y | |||
53 | CONFIG_X86_BIOS_REBOOT=y | 53 | CONFIG_X86_BIOS_REBOOT=y |
54 | CONFIG_X86_TRAMPOLINE=y | 54 | CONFIG_X86_TRAMPOLINE=y |
55 | CONFIG_KTIME_SCALAR=y | 55 | CONFIG_KTIME_SCALAR=y |
56 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
56 | 57 | ||
57 | # | 58 | # |
58 | # General setup | 59 | # General setup |
@@ -82,6 +83,7 @@ CONFIG_CGROUPS=y | |||
82 | CONFIG_CGROUP_NS=y | 83 | CONFIG_CGROUP_NS=y |
83 | # CONFIG_CGROUP_DEVICE is not set | 84 | # CONFIG_CGROUP_DEVICE is not set |
84 | CONFIG_CPUSETS=y | 85 | CONFIG_CPUSETS=y |
86 | CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y | ||
85 | CONFIG_GROUP_SCHED=y | 87 | CONFIG_GROUP_SCHED=y |
86 | CONFIG_FAIR_GROUP_SCHED=y | 88 | CONFIG_FAIR_GROUP_SCHED=y |
87 | # CONFIG_RT_GROUP_SCHED is not set | 89 | # CONFIG_RT_GROUP_SCHED is not set |
@@ -105,7 +107,6 @@ CONFIG_SYSCTL=y | |||
105 | # CONFIG_EMBEDDED is not set | 107 | # CONFIG_EMBEDDED is not set |
106 | CONFIG_UID16=y | 108 | CONFIG_UID16=y |
107 | CONFIG_SYSCTL_SYSCALL=y | 109 | CONFIG_SYSCTL_SYSCALL=y |
108 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
109 | CONFIG_KALLSYMS=y | 110 | CONFIG_KALLSYMS=y |
110 | CONFIG_KALLSYMS_ALL=y | 111 | CONFIG_KALLSYMS_ALL=y |
111 | CONFIG_KALLSYMS_EXTRA_PASS=y | 112 | CONFIG_KALLSYMS_EXTRA_PASS=y |
@@ -113,6 +114,7 @@ CONFIG_HOTPLUG=y | |||
113 | CONFIG_PRINTK=y | 114 | CONFIG_PRINTK=y |
114 | CONFIG_BUG=y | 115 | CONFIG_BUG=y |
115 | CONFIG_ELF_CORE=y | 116 | CONFIG_ELF_CORE=y |
117 | CONFIG_PCSPKR_PLATFORM=y | ||
116 | # CONFIG_COMPAT_BRK is not set | 118 | # CONFIG_COMPAT_BRK is not set |
117 | CONFIG_BASE_FULL=y | 119 | CONFIG_BASE_FULL=y |
118 | CONFIG_FUTEX=y | 120 | CONFIG_FUTEX=y |
@@ -132,27 +134,35 @@ CONFIG_MARKERS=y | |||
132 | # CONFIG_OPROFILE is not set | 134 | # CONFIG_OPROFILE is not set |
133 | CONFIG_HAVE_OPROFILE=y | 135 | CONFIG_HAVE_OPROFILE=y |
134 | CONFIG_KPROBES=y | 136 | CONFIG_KPROBES=y |
137 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | ||
135 | CONFIG_KRETPROBES=y | 138 | CONFIG_KRETPROBES=y |
139 | CONFIG_HAVE_IOREMAP_PROT=y | ||
136 | CONFIG_HAVE_KPROBES=y | 140 | CONFIG_HAVE_KPROBES=y |
137 | CONFIG_HAVE_KRETPROBES=y | 141 | CONFIG_HAVE_KRETPROBES=y |
142 | # CONFIG_HAVE_ARCH_TRACEHOOK is not set | ||
138 | # CONFIG_HAVE_DMA_ATTRS is not set | 143 | # CONFIG_HAVE_DMA_ATTRS is not set |
144 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
145 | # CONFIG_HAVE_CLK is not set | ||
139 | CONFIG_PROC_PAGE_MONITOR=y | 146 | CONFIG_PROC_PAGE_MONITOR=y |
147 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | ||
140 | CONFIG_SLABINFO=y | 148 | CONFIG_SLABINFO=y |
141 | CONFIG_RT_MUTEXES=y | 149 | CONFIG_RT_MUTEXES=y |
142 | # CONFIG_TINY_SHMEM is not set | 150 | # CONFIG_TINY_SHMEM is not set |
143 | CONFIG_BASE_SMALL=0 | 151 | CONFIG_BASE_SMALL=0 |
144 | CONFIG_MODULES=y | 152 | CONFIG_MODULES=y |
153 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
145 | CONFIG_MODULE_UNLOAD=y | 154 | CONFIG_MODULE_UNLOAD=y |
146 | CONFIG_MODULE_FORCE_UNLOAD=y | 155 | CONFIG_MODULE_FORCE_UNLOAD=y |
147 | # CONFIG_MODVERSIONS is not set | 156 | # CONFIG_MODVERSIONS is not set |
148 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 157 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
149 | # CONFIG_KMOD is not set | 158 | CONFIG_KMOD=y |
150 | CONFIG_STOP_MACHINE=y | 159 | CONFIG_STOP_MACHINE=y |
151 | CONFIG_BLOCK=y | 160 | CONFIG_BLOCK=y |
152 | # CONFIG_LBD is not set | 161 | # CONFIG_LBD is not set |
153 | CONFIG_BLK_DEV_IO_TRACE=y | 162 | CONFIG_BLK_DEV_IO_TRACE=y |
154 | # CONFIG_LSF is not set | 163 | # CONFIG_LSF is not set |
155 | CONFIG_BLK_DEV_BSG=y | 164 | CONFIG_BLK_DEV_BSG=y |
165 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
156 | 166 | ||
157 | # | 167 | # |
158 | # IO Schedulers | 168 | # IO Schedulers |
@@ -176,25 +186,23 @@ CONFIG_NO_HZ=y | |||
176 | CONFIG_HIGH_RES_TIMERS=y | 186 | CONFIG_HIGH_RES_TIMERS=y |
177 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | 187 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y |
178 | CONFIG_SMP=y | 188 | CONFIG_SMP=y |
189 | CONFIG_X86_FIND_SMP_CONFIG=y | ||
190 | CONFIG_X86_MPPARSE=y | ||
179 | CONFIG_X86_PC=y | 191 | CONFIG_X86_PC=y |
180 | # CONFIG_X86_ELAN is not set | 192 | # CONFIG_X86_ELAN is not set |
181 | # CONFIG_X86_VOYAGER is not set | 193 | # CONFIG_X86_VOYAGER is not set |
182 | # CONFIG_X86_NUMAQ is not set | ||
183 | # CONFIG_X86_SUMMIT is not set | ||
184 | # CONFIG_X86_BIGSMP is not set | ||
185 | # CONFIG_X86_VISWS is not set | ||
186 | # CONFIG_X86_GENERICARCH is not set | 194 | # CONFIG_X86_GENERICARCH is not set |
187 | # CONFIG_X86_ES7000 is not set | ||
188 | # CONFIG_X86_RDC321X is not set | ||
189 | # CONFIG_X86_VSMP is not set | 195 | # CONFIG_X86_VSMP is not set |
196 | # CONFIG_X86_RDC321X is not set | ||
190 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 197 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y |
191 | # CONFIG_PARAVIRT_GUEST is not set | 198 | # CONFIG_PARAVIRT_GUEST is not set |
199 | # CONFIG_MEMTEST is not set | ||
192 | # CONFIG_M386 is not set | 200 | # CONFIG_M386 is not set |
193 | # CONFIG_M486 is not set | 201 | # CONFIG_M486 is not set |
194 | # CONFIG_M586 is not set | 202 | # CONFIG_M586 is not set |
195 | # CONFIG_M586TSC is not set | 203 | # CONFIG_M586TSC is not set |
196 | # CONFIG_M586MMX is not set | 204 | # CONFIG_M586MMX is not set |
197 | # CONFIG_M686 is not set | 205 | CONFIG_M686=y |
198 | # CONFIG_MPENTIUMII is not set | 206 | # CONFIG_MPENTIUMII is not set |
199 | # CONFIG_MPENTIUMIII is not set | 207 | # CONFIG_MPENTIUMIII is not set |
200 | # CONFIG_MPENTIUMM is not set | 208 | # CONFIG_MPENTIUMM is not set |
@@ -213,30 +221,30 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | |||
213 | # CONFIG_MVIAC3_2 is not set | 221 | # CONFIG_MVIAC3_2 is not set |
214 | # CONFIG_MVIAC7 is not set | 222 | # CONFIG_MVIAC7 is not set |
215 | # CONFIG_MPSC is not set | 223 | # CONFIG_MPSC is not set |
216 | CONFIG_MCORE2=y | 224 | # CONFIG_MCORE2 is not set |
217 | # CONFIG_GENERIC_CPU is not set | 225 | # CONFIG_GENERIC_CPU is not set |
218 | # CONFIG_X86_GENERIC is not set | 226 | CONFIG_X86_GENERIC=y |
219 | CONFIG_X86_CPU=y | 227 | CONFIG_X86_CPU=y |
220 | CONFIG_X86_CMPXCHG=y | 228 | CONFIG_X86_CMPXCHG=y |
221 | CONFIG_X86_L1_CACHE_SHIFT=6 | 229 | CONFIG_X86_L1_CACHE_SHIFT=7 |
222 | CONFIG_X86_XADD=y | 230 | CONFIG_X86_XADD=y |
231 | # CONFIG_X86_PPRO_FENCE is not set | ||
223 | CONFIG_X86_WP_WORKS_OK=y | 232 | CONFIG_X86_WP_WORKS_OK=y |
224 | CONFIG_X86_INVLPG=y | 233 | CONFIG_X86_INVLPG=y |
225 | CONFIG_X86_BSWAP=y | 234 | CONFIG_X86_BSWAP=y |
226 | CONFIG_X86_POPAD_OK=y | 235 | CONFIG_X86_POPAD_OK=y |
227 | CONFIG_X86_GOOD_APIC=y | ||
228 | CONFIG_X86_INTEL_USERCOPY=y | 236 | CONFIG_X86_INTEL_USERCOPY=y |
229 | CONFIG_X86_USE_PPRO_CHECKSUM=y | 237 | CONFIG_X86_USE_PPRO_CHECKSUM=y |
230 | CONFIG_X86_P6_NOP=y | ||
231 | CONFIG_X86_TSC=y | 238 | CONFIG_X86_TSC=y |
232 | CONFIG_X86_MINIMUM_CPU_FAMILY=6 | 239 | CONFIG_X86_CMOV=y |
240 | CONFIG_X86_MINIMUM_CPU_FAMILY=4 | ||
233 | CONFIG_X86_DEBUGCTLMSR=y | 241 | CONFIG_X86_DEBUGCTLMSR=y |
234 | CONFIG_HPET_TIMER=y | 242 | CONFIG_HPET_TIMER=y |
235 | CONFIG_HPET_EMULATE_RTC=y | 243 | CONFIG_HPET_EMULATE_RTC=y |
236 | CONFIG_DMI=y | 244 | CONFIG_DMI=y |
237 | # CONFIG_IOMMU_HELPER is not set | 245 | # CONFIG_IOMMU_HELPER is not set |
238 | CONFIG_NR_CPUS=4 | 246 | CONFIG_NR_CPUS=64 |
239 | # CONFIG_SCHED_SMT is not set | 247 | CONFIG_SCHED_SMT=y |
240 | CONFIG_SCHED_MC=y | 248 | CONFIG_SCHED_MC=y |
241 | # CONFIG_PREEMPT_NONE is not set | 249 | # CONFIG_PREEMPT_NONE is not set |
242 | CONFIG_PREEMPT_VOLUNTARY=y | 250 | CONFIG_PREEMPT_VOLUNTARY=y |
@@ -247,8 +255,9 @@ CONFIG_X86_IO_APIC=y | |||
247 | CONFIG_VM86=y | 255 | CONFIG_VM86=y |
248 | # CONFIG_TOSHIBA is not set | 256 | # CONFIG_TOSHIBA is not set |
249 | # CONFIG_I8K is not set | 257 | # CONFIG_I8K is not set |
250 | # CONFIG_X86_REBOOTFIXUPS is not set | 258 | CONFIG_X86_REBOOTFIXUPS=y |
251 | # CONFIG_MICROCODE is not set | 259 | CONFIG_MICROCODE=y |
260 | CONFIG_MICROCODE_OLD_INTERFACE=y | ||
252 | CONFIG_X86_MSR=y | 261 | CONFIG_X86_MSR=y |
253 | CONFIG_X86_CPUID=y | 262 | CONFIG_X86_CPUID=y |
254 | # CONFIG_NOHIGHMEM is not set | 263 | # CONFIG_NOHIGHMEM is not set |
@@ -256,32 +265,28 @@ CONFIG_HIGHMEM4G=y | |||
256 | # CONFIG_HIGHMEM64G is not set | 265 | # CONFIG_HIGHMEM64G is not set |
257 | CONFIG_PAGE_OFFSET=0xC0000000 | 266 | CONFIG_PAGE_OFFSET=0xC0000000 |
258 | CONFIG_HIGHMEM=y | 267 | CONFIG_HIGHMEM=y |
259 | CONFIG_NEED_NODE_MEMMAP_SIZE=y | ||
260 | CONFIG_ARCH_FLATMEM_ENABLE=y | 268 | CONFIG_ARCH_FLATMEM_ENABLE=y |
261 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 269 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
262 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | 270 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y |
263 | CONFIG_SELECT_MEMORY_MODEL=y | 271 | CONFIG_SELECT_MEMORY_MODEL=y |
264 | # CONFIG_FLATMEM_MANUAL is not set | 272 | CONFIG_FLATMEM_MANUAL=y |
265 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 273 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
266 | CONFIG_SPARSEMEM_MANUAL=y | 274 | # CONFIG_SPARSEMEM_MANUAL is not set |
267 | CONFIG_SPARSEMEM=y | 275 | CONFIG_FLATMEM=y |
268 | CONFIG_HAVE_MEMORY_PRESENT=y | 276 | CONFIG_FLAT_NODE_MEM_MAP=y |
269 | CONFIG_SPARSEMEM_STATIC=y | 277 | CONFIG_SPARSEMEM_STATIC=y |
270 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | 278 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set |
271 | |||
272 | # | ||
273 | # Memory hotplug is currently incompatible with Software Suspend | ||
274 | # | ||
275 | CONFIG_PAGEFLAGS_EXTENDED=y | 279 | CONFIG_PAGEFLAGS_EXTENDED=y |
276 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 280 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
277 | CONFIG_RESOURCES_64BIT=y | 281 | CONFIG_RESOURCES_64BIT=y |
278 | CONFIG_ZONE_DMA_FLAG=1 | 282 | CONFIG_ZONE_DMA_FLAG=1 |
279 | CONFIG_BOUNCE=y | 283 | CONFIG_BOUNCE=y |
280 | CONFIG_VIRT_TO_BUS=y | 284 | CONFIG_VIRT_TO_BUS=y |
281 | # CONFIG_HIGHPTE is not set | 285 | CONFIG_HIGHPTE=y |
282 | # CONFIG_MATH_EMULATION is not set | 286 | # CONFIG_MATH_EMULATION is not set |
283 | CONFIG_MTRR=y | 287 | CONFIG_MTRR=y |
284 | # CONFIG_X86_PAT is not set | 288 | # CONFIG_MTRR_SANITIZER is not set |
289 | CONFIG_X86_PAT=y | ||
285 | CONFIG_EFI=y | 290 | CONFIG_EFI=y |
286 | # CONFIG_IRQBALANCE is not set | 291 | # CONFIG_IRQBALANCE is not set |
287 | CONFIG_SECCOMP=y | 292 | CONFIG_SECCOMP=y |
@@ -293,6 +298,7 @@ CONFIG_HZ=1000 | |||
293 | CONFIG_SCHED_HRTICK=y | 298 | CONFIG_SCHED_HRTICK=y |
294 | CONFIG_KEXEC=y | 299 | CONFIG_KEXEC=y |
295 | CONFIG_CRASH_DUMP=y | 300 | CONFIG_CRASH_DUMP=y |
301 | # CONFIG_KEXEC_JUMP is not set | ||
296 | CONFIG_PHYSICAL_START=0x1000000 | 302 | CONFIG_PHYSICAL_START=0x1000000 |
297 | CONFIG_RELOCATABLE=y | 303 | CONFIG_RELOCATABLE=y |
298 | CONFIG_PHYSICAL_ALIGN=0x200000 | 304 | CONFIG_PHYSICAL_ALIGN=0x200000 |
@@ -312,6 +318,7 @@ CONFIG_PM_TRACE_RTC=y | |||
312 | CONFIG_PM_SLEEP_SMP=y | 318 | CONFIG_PM_SLEEP_SMP=y |
313 | CONFIG_PM_SLEEP=y | 319 | CONFIG_PM_SLEEP=y |
314 | CONFIG_SUSPEND=y | 320 | CONFIG_SUSPEND=y |
321 | # CONFIG_PM_TEST_SUSPEND is not set | ||
315 | CONFIG_SUSPEND_FREEZER=y | 322 | CONFIG_SUSPEND_FREEZER=y |
316 | CONFIG_HIBERNATION=y | 323 | CONFIG_HIBERNATION=y |
317 | CONFIG_PM_STD_PARTITION="" | 324 | CONFIG_PM_STD_PARTITION="" |
@@ -337,6 +344,7 @@ CONFIG_ACPI_THERMAL=y | |||
337 | CONFIG_ACPI_BLACKLIST_YEAR=0 | 344 | CONFIG_ACPI_BLACKLIST_YEAR=0 |
338 | # CONFIG_ACPI_DEBUG is not set | 345 | # CONFIG_ACPI_DEBUG is not set |
339 | CONFIG_ACPI_EC=y | 346 | CONFIG_ACPI_EC=y |
347 | # CONFIG_ACPI_PCI_SLOT is not set | ||
340 | CONFIG_ACPI_POWER=y | 348 | CONFIG_ACPI_POWER=y |
341 | CONFIG_ACPI_SYSTEM=y | 349 | CONFIG_ACPI_SYSTEM=y |
342 | CONFIG_X86_PM_TIMER=y | 350 | CONFIG_X86_PM_TIMER=y |
@@ -395,8 +403,8 @@ CONFIG_PCI=y | |||
395 | # CONFIG_PCI_GOBIOS is not set | 403 | # CONFIG_PCI_GOBIOS is not set |
396 | # CONFIG_PCI_GOMMCONFIG is not set | 404 | # CONFIG_PCI_GOMMCONFIG is not set |
397 | # CONFIG_PCI_GODIRECT is not set | 405 | # CONFIG_PCI_GODIRECT is not set |
398 | CONFIG_PCI_GOANY=y | ||
399 | # CONFIG_PCI_GOOLPC is not set | 406 | # CONFIG_PCI_GOOLPC is not set |
407 | CONFIG_PCI_GOANY=y | ||
400 | CONFIG_PCI_BIOS=y | 408 | CONFIG_PCI_BIOS=y |
401 | CONFIG_PCI_DIRECT=y | 409 | CONFIG_PCI_DIRECT=y |
402 | CONFIG_PCI_MMCONFIG=y | 410 | CONFIG_PCI_MMCONFIG=y |
@@ -448,10 +456,6 @@ CONFIG_HOTPLUG_PCI=y | |||
448 | CONFIG_BINFMT_ELF=y | 456 | CONFIG_BINFMT_ELF=y |
449 | # CONFIG_BINFMT_AOUT is not set | 457 | # CONFIG_BINFMT_AOUT is not set |
450 | CONFIG_BINFMT_MISC=y | 458 | CONFIG_BINFMT_MISC=y |
451 | |||
452 | # | ||
453 | # Networking | ||
454 | # | ||
455 | CONFIG_NET=y | 459 | CONFIG_NET=y |
456 | 460 | ||
457 | # | 461 | # |
@@ -475,7 +479,10 @@ CONFIG_IP_FIB_HASH=y | |||
475 | CONFIG_IP_MULTIPLE_TABLES=y | 479 | CONFIG_IP_MULTIPLE_TABLES=y |
476 | CONFIG_IP_ROUTE_MULTIPATH=y | 480 | CONFIG_IP_ROUTE_MULTIPATH=y |
477 | CONFIG_IP_ROUTE_VERBOSE=y | 481 | CONFIG_IP_ROUTE_VERBOSE=y |
478 | # CONFIG_IP_PNP is not set | 482 | CONFIG_IP_PNP=y |
483 | CONFIG_IP_PNP_DHCP=y | ||
484 | CONFIG_IP_PNP_BOOTP=y | ||
485 | CONFIG_IP_PNP_RARP=y | ||
479 | # CONFIG_NET_IPIP is not set | 486 | # CONFIG_NET_IPIP is not set |
480 | # CONFIG_NET_IPGRE is not set | 487 | # CONFIG_NET_IPGRE is not set |
481 | CONFIG_IP_MROUTE=y | 488 | CONFIG_IP_MROUTE=y |
@@ -618,7 +625,6 @@ CONFIG_NET_SCHED=y | |||
618 | # CONFIG_NET_SCH_HTB is not set | 625 | # CONFIG_NET_SCH_HTB is not set |
619 | # CONFIG_NET_SCH_HFSC is not set | 626 | # CONFIG_NET_SCH_HFSC is not set |
620 | # CONFIG_NET_SCH_PRIO is not set | 627 | # CONFIG_NET_SCH_PRIO is not set |
621 | # CONFIG_NET_SCH_RR is not set | ||
622 | # CONFIG_NET_SCH_RED is not set | 628 | # CONFIG_NET_SCH_RED is not set |
623 | # CONFIG_NET_SCH_SFQ is not set | 629 | # CONFIG_NET_SCH_SFQ is not set |
624 | # CONFIG_NET_SCH_TEQL is not set | 630 | # CONFIG_NET_SCH_TEQL is not set |
@@ -680,28 +686,19 @@ CONFIG_FIB_RULES=y | |||
680 | CONFIG_CFG80211=y | 686 | CONFIG_CFG80211=y |
681 | CONFIG_NL80211=y | 687 | CONFIG_NL80211=y |
682 | CONFIG_WIRELESS_EXT=y | 688 | CONFIG_WIRELESS_EXT=y |
689 | CONFIG_WIRELESS_EXT_SYSFS=y | ||
683 | CONFIG_MAC80211=y | 690 | CONFIG_MAC80211=y |
684 | 691 | ||
685 | # | 692 | # |
686 | # Rate control algorithm selection | 693 | # Rate control algorithm selection |
687 | # | 694 | # |
695 | CONFIG_MAC80211_RC_PID=y | ||
688 | CONFIG_MAC80211_RC_DEFAULT_PID=y | 696 | CONFIG_MAC80211_RC_DEFAULT_PID=y |
689 | # CONFIG_MAC80211_RC_DEFAULT_NONE is not set | ||
690 | |||
691 | # | ||
692 | # Selecting 'y' for an algorithm will | ||
693 | # | ||
694 | |||
695 | # | ||
696 | # build the algorithm into mac80211. | ||
697 | # | ||
698 | CONFIG_MAC80211_RC_DEFAULT="pid" | 697 | CONFIG_MAC80211_RC_DEFAULT="pid" |
699 | CONFIG_MAC80211_RC_PID=y | ||
700 | # CONFIG_MAC80211_MESH is not set | 698 | # CONFIG_MAC80211_MESH is not set |
701 | CONFIG_MAC80211_LEDS=y | 699 | CONFIG_MAC80211_LEDS=y |
702 | # CONFIG_MAC80211_DEBUGFS is not set | 700 | # CONFIG_MAC80211_DEBUGFS is not set |
703 | # CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set | 701 | # CONFIG_MAC80211_DEBUG_MENU is not set |
704 | # CONFIG_MAC80211_DEBUG is not set | ||
705 | # CONFIG_IEEE80211 is not set | 702 | # CONFIG_IEEE80211 is not set |
706 | # CONFIG_RFKILL is not set | 703 | # CONFIG_RFKILL is not set |
707 | # CONFIG_NET_9P is not set | 704 | # CONFIG_NET_9P is not set |
@@ -717,6 +714,8 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
717 | CONFIG_STANDALONE=y | 714 | CONFIG_STANDALONE=y |
718 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 715 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
719 | CONFIG_FW_LOADER=y | 716 | CONFIG_FW_LOADER=y |
717 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
718 | CONFIG_EXTRA_FIRMWARE="" | ||
720 | # CONFIG_DEBUG_DRIVER is not set | 719 | # CONFIG_DEBUG_DRIVER is not set |
721 | CONFIG_DEBUG_DEVRES=y | 720 | CONFIG_DEBUG_DEVRES=y |
722 | # CONFIG_SYS_HYPERVISOR is not set | 721 | # CONFIG_SYS_HYPERVISOR is not set |
@@ -749,6 +748,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 | |||
749 | # CONFIG_BLK_DEV_XIP is not set | 748 | # CONFIG_BLK_DEV_XIP is not set |
750 | # CONFIG_CDROM_PKTCDVD is not set | 749 | # CONFIG_CDROM_PKTCDVD is not set |
751 | # CONFIG_ATA_OVER_ETH is not set | 750 | # CONFIG_ATA_OVER_ETH is not set |
751 | # CONFIG_BLK_DEV_HD is not set | ||
752 | CONFIG_MISC_DEVICES=y | 752 | CONFIG_MISC_DEVICES=y |
753 | # CONFIG_IBM_ASM is not set | 753 | # CONFIG_IBM_ASM is not set |
754 | # CONFIG_PHANTOM is not set | 754 | # CONFIG_PHANTOM is not set |
@@ -760,10 +760,12 @@ CONFIG_MISC_DEVICES=y | |||
760 | # CONFIG_FUJITSU_LAPTOP is not set | 760 | # CONFIG_FUJITSU_LAPTOP is not set |
761 | # CONFIG_TC1100_WMI is not set | 761 | # CONFIG_TC1100_WMI is not set |
762 | # CONFIG_MSI_LAPTOP is not set | 762 | # CONFIG_MSI_LAPTOP is not set |
763 | # CONFIG_COMPAL_LAPTOP is not set | ||
763 | # CONFIG_SONY_LAPTOP is not set | 764 | # CONFIG_SONY_LAPTOP is not set |
764 | # CONFIG_THINKPAD_ACPI is not set | 765 | # CONFIG_THINKPAD_ACPI is not set |
765 | # CONFIG_INTEL_MENLOW is not set | 766 | # CONFIG_INTEL_MENLOW is not set |
766 | # CONFIG_ENCLOSURE_SERVICES is not set | 767 | # CONFIG_ENCLOSURE_SERVICES is not set |
768 | # CONFIG_HP_ILO is not set | ||
767 | CONFIG_HAVE_IDE=y | 769 | CONFIG_HAVE_IDE=y |
768 | # CONFIG_IDE is not set | 770 | # CONFIG_IDE is not set |
769 | 771 | ||
@@ -802,12 +804,13 @@ CONFIG_SCSI_WAIT_SCAN=m | |||
802 | # | 804 | # |
803 | CONFIG_SCSI_SPI_ATTRS=y | 805 | CONFIG_SCSI_SPI_ATTRS=y |
804 | # CONFIG_SCSI_FC_ATTRS is not set | 806 | # CONFIG_SCSI_FC_ATTRS is not set |
805 | # CONFIG_SCSI_ISCSI_ATTRS is not set | 807 | CONFIG_SCSI_ISCSI_ATTRS=y |
806 | # CONFIG_SCSI_SAS_ATTRS is not set | 808 | # CONFIG_SCSI_SAS_ATTRS is not set |
807 | # CONFIG_SCSI_SAS_LIBSAS is not set | 809 | # CONFIG_SCSI_SAS_LIBSAS is not set |
808 | # CONFIG_SCSI_SRP_ATTRS is not set | 810 | # CONFIG_SCSI_SRP_ATTRS is not set |
809 | # CONFIG_SCSI_LOWLEVEL is not set | 811 | # CONFIG_SCSI_LOWLEVEL is not set |
810 | # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set | 812 | # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set |
813 | # CONFIG_SCSI_DH is not set | ||
811 | CONFIG_ATA=y | 814 | CONFIG_ATA=y |
812 | # CONFIG_ATA_NONSTANDARD is not set | 815 | # CONFIG_ATA_NONSTANDARD is not set |
813 | CONFIG_ATA_ACPI=y | 816 | CONFIG_ATA_ACPI=y |
@@ -842,7 +845,7 @@ CONFIG_PATA_AMD=y | |||
842 | # CONFIG_PATA_CS5536 is not set | 845 | # CONFIG_PATA_CS5536 is not set |
843 | # CONFIG_PATA_CYPRESS is not set | 846 | # CONFIG_PATA_CYPRESS is not set |
844 | # CONFIG_PATA_EFAR is not set | 847 | # CONFIG_PATA_EFAR is not set |
845 | # CONFIG_ATA_GENERIC is not set | 848 | CONFIG_ATA_GENERIC=y |
846 | # CONFIG_PATA_HPT366 is not set | 849 | # CONFIG_PATA_HPT366 is not set |
847 | # CONFIG_PATA_HPT37X is not set | 850 | # CONFIG_PATA_HPT37X is not set |
848 | # CONFIG_PATA_HPT3X2N is not set | 851 | # CONFIG_PATA_HPT3X2N is not set |
@@ -852,7 +855,7 @@ CONFIG_PATA_AMD=y | |||
852 | # CONFIG_PATA_JMICRON is not set | 855 | # CONFIG_PATA_JMICRON is not set |
853 | # CONFIG_PATA_TRIFLEX is not set | 856 | # CONFIG_PATA_TRIFLEX is not set |
854 | # CONFIG_PATA_MARVELL is not set | 857 | # CONFIG_PATA_MARVELL is not set |
855 | # CONFIG_PATA_MPIIX is not set | 858 | CONFIG_PATA_MPIIX=y |
856 | CONFIG_PATA_OLDPIIX=y | 859 | CONFIG_PATA_OLDPIIX=y |
857 | # CONFIG_PATA_NETCELL is not set | 860 | # CONFIG_PATA_NETCELL is not set |
858 | # CONFIG_PATA_NINJA32 is not set | 861 | # CONFIG_PATA_NINJA32 is not set |
@@ -871,6 +874,7 @@ CONFIG_PATA_OLDPIIX=y | |||
871 | # CONFIG_PATA_SIS is not set | 874 | # CONFIG_PATA_SIS is not set |
872 | # CONFIG_PATA_VIA is not set | 875 | # CONFIG_PATA_VIA is not set |
873 | # CONFIG_PATA_WINBOND is not set | 876 | # CONFIG_PATA_WINBOND is not set |
877 | CONFIG_PATA_SCH=y | ||
874 | CONFIG_MD=y | 878 | CONFIG_MD=y |
875 | CONFIG_BLK_DEV_MD=y | 879 | CONFIG_BLK_DEV_MD=y |
876 | # CONFIG_MD_LINEAR is not set | 880 | # CONFIG_MD_LINEAR is not set |
@@ -894,13 +898,16 @@ CONFIG_DM_ZERO=y | |||
894 | # | 898 | # |
895 | # IEEE 1394 (FireWire) support | 899 | # IEEE 1394 (FireWire) support |
896 | # | 900 | # |
901 | |||
902 | # | ||
903 | # Enable only one of the two stacks, unless you know what you are doing | ||
904 | # | ||
897 | # CONFIG_FIREWIRE is not set | 905 | # CONFIG_FIREWIRE is not set |
898 | # CONFIG_IEEE1394 is not set | 906 | # CONFIG_IEEE1394 is not set |
899 | # CONFIG_I2O is not set | 907 | # CONFIG_I2O is not set |
900 | CONFIG_MACINTOSH_DRIVERS=y | 908 | CONFIG_MACINTOSH_DRIVERS=y |
901 | CONFIG_MAC_EMUMOUSEBTN=y | 909 | CONFIG_MAC_EMUMOUSEBTN=y |
902 | CONFIG_NETDEVICES=y | 910 | CONFIG_NETDEVICES=y |
903 | # CONFIG_NETDEVICES_MULTIQUEUE is not set | ||
904 | # CONFIG_IFB is not set | 911 | # CONFIG_IFB is not set |
905 | # CONFIG_DUMMY is not set | 912 | # CONFIG_DUMMY is not set |
906 | # CONFIG_BONDING is not set | 913 | # CONFIG_BONDING is not set |
@@ -910,7 +917,23 @@ CONFIG_NETDEVICES=y | |||
910 | # CONFIG_VETH is not set | 917 | # CONFIG_VETH is not set |
911 | # CONFIG_NET_SB1000 is not set | 918 | # CONFIG_NET_SB1000 is not set |
912 | # CONFIG_ARCNET is not set | 919 | # CONFIG_ARCNET is not set |
913 | # CONFIG_PHYLIB is not set | 920 | CONFIG_PHYLIB=y |
921 | |||
922 | # | ||
923 | # MII PHY device drivers | ||
924 | # | ||
925 | # CONFIG_MARVELL_PHY is not set | ||
926 | # CONFIG_DAVICOM_PHY is not set | ||
927 | # CONFIG_QSEMI_PHY is not set | ||
928 | # CONFIG_LXT_PHY is not set | ||
929 | # CONFIG_CICADA_PHY is not set | ||
930 | # CONFIG_VITESSE_PHY is not set | ||
931 | # CONFIG_SMSC_PHY is not set | ||
932 | # CONFIG_BROADCOM_PHY is not set | ||
933 | # CONFIG_ICPLUS_PHY is not set | ||
934 | # CONFIG_REALTEK_PHY is not set | ||
935 | # CONFIG_FIXED_PHY is not set | ||
936 | # CONFIG_MDIO_BITBANG is not set | ||
914 | CONFIG_NET_ETHERNET=y | 937 | CONFIG_NET_ETHERNET=y |
915 | CONFIG_MII=y | 938 | CONFIG_MII=y |
916 | # CONFIG_HAPPYMEAL is not set | 939 | # CONFIG_HAPPYMEAL is not set |
@@ -943,10 +966,10 @@ CONFIG_FORCEDETH=y | |||
943 | CONFIG_E100=y | 966 | CONFIG_E100=y |
944 | # CONFIG_FEALNX is not set | 967 | # CONFIG_FEALNX is not set |
945 | # CONFIG_NATSEMI is not set | 968 | # CONFIG_NATSEMI is not set |
946 | # CONFIG_NE2K_PCI is not set | 969 | CONFIG_NE2K_PCI=y |
947 | # CONFIG_8139CP is not set | 970 | # CONFIG_8139CP is not set |
948 | CONFIG_8139TOO=y | 971 | CONFIG_8139TOO=y |
949 | CONFIG_8139TOO_PIO=y | 972 | # CONFIG_8139TOO_PIO is not set |
950 | # CONFIG_8139TOO_TUNE_TWISTER is not set | 973 | # CONFIG_8139TOO_TUNE_TWISTER is not set |
951 | # CONFIG_8139TOO_8129 is not set | 974 | # CONFIG_8139TOO_8129 is not set |
952 | # CONFIG_8139_OLD_RX_RESET is not set | 975 | # CONFIG_8139_OLD_RX_RESET is not set |
@@ -961,25 +984,24 @@ CONFIG_NETDEV_1000=y | |||
961 | # CONFIG_ACENIC is not set | 984 | # CONFIG_ACENIC is not set |
962 | # CONFIG_DL2K is not set | 985 | # CONFIG_DL2K is not set |
963 | CONFIG_E1000=y | 986 | CONFIG_E1000=y |
964 | # CONFIG_E1000_NAPI is not set | ||
965 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | 987 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set |
966 | # CONFIG_E1000E is not set | 988 | CONFIG_E1000E=y |
967 | # CONFIG_E1000E_ENABLED is not set | ||
968 | # CONFIG_IP1000 is not set | 989 | # CONFIG_IP1000 is not set |
969 | # CONFIG_IGB is not set | 990 | # CONFIG_IGB is not set |
970 | # CONFIG_NS83820 is not set | 991 | # CONFIG_NS83820 is not set |
971 | # CONFIG_HAMACHI is not set | 992 | # CONFIG_HAMACHI is not set |
972 | # CONFIG_YELLOWFIN is not set | 993 | # CONFIG_YELLOWFIN is not set |
973 | # CONFIG_R8169 is not set | 994 | CONFIG_R8169=y |
974 | # CONFIG_SIS190 is not set | 995 | # CONFIG_SIS190 is not set |
975 | # CONFIG_SKGE is not set | 996 | # CONFIG_SKGE is not set |
976 | CONFIG_SKY2=y | 997 | CONFIG_SKY2=y |
977 | # CONFIG_SKY2_DEBUG is not set | 998 | # CONFIG_SKY2_DEBUG is not set |
978 | # CONFIG_VIA_VELOCITY is not set | 999 | # CONFIG_VIA_VELOCITY is not set |
979 | CONFIG_TIGON3=y | 1000 | CONFIG_TIGON3=y |
980 | # CONFIG_BNX2 is not set | 1001 | CONFIG_BNX2=y |
981 | # CONFIG_QLA3XXX is not set | 1002 | # CONFIG_QLA3XXX is not set |
982 | # CONFIG_ATL1 is not set | 1003 | # CONFIG_ATL1 is not set |
1004 | # CONFIG_ATL1E is not set | ||
983 | CONFIG_NETDEV_10000=y | 1005 | CONFIG_NETDEV_10000=y |
984 | # CONFIG_CHELSIO_T1 is not set | 1006 | # CONFIG_CHELSIO_T1 is not set |
985 | # CONFIG_CHELSIO_T3 is not set | 1007 | # CONFIG_CHELSIO_T3 is not set |
@@ -1019,13 +1041,14 @@ CONFIG_WLAN_80211=y | |||
1019 | # CONFIG_RTL8180 is not set | 1041 | # CONFIG_RTL8180 is not set |
1020 | # CONFIG_RTL8187 is not set | 1042 | # CONFIG_RTL8187 is not set |
1021 | # CONFIG_ADM8211 is not set | 1043 | # CONFIG_ADM8211 is not set |
1044 | # CONFIG_MAC80211_HWSIM is not set | ||
1022 | # CONFIG_P54_COMMON is not set | 1045 | # CONFIG_P54_COMMON is not set |
1023 | CONFIG_ATH5K=y | 1046 | CONFIG_ATH5K=y |
1024 | # CONFIG_ATH5K_DEBUG is not set | 1047 | # CONFIG_ATH5K_DEBUG is not set |
1025 | # CONFIG_IWLWIFI is not set | 1048 | # CONFIG_ATH9K is not set |
1026 | # CONFIG_IWLCORE is not set | 1049 | # CONFIG_IWLCORE is not set |
1027 | # CONFIG_IWLWIFI_LEDS is not set | 1050 | # CONFIG_IWLWIFI_LEDS is not set |
1028 | # CONFIG_IWL4965 is not set | 1051 | # CONFIG_IWLAGN is not set |
1029 | # CONFIG_IWL3945 is not set | 1052 | # CONFIG_IWL3945 is not set |
1030 | # CONFIG_HOSTAP is not set | 1053 | # CONFIG_HOSTAP is not set |
1031 | # CONFIG_B43 is not set | 1054 | # CONFIG_B43 is not set |
@@ -1105,6 +1128,7 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y | |||
1105 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | 1128 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set |
1106 | # CONFIG_MOUSE_SERIAL is not set | 1129 | # CONFIG_MOUSE_SERIAL is not set |
1107 | # CONFIG_MOUSE_APPLETOUCH is not set | 1130 | # CONFIG_MOUSE_APPLETOUCH is not set |
1131 | # CONFIG_MOUSE_BCM5974 is not set | ||
1108 | # CONFIG_MOUSE_VSXXXAA is not set | 1132 | # CONFIG_MOUSE_VSXXXAA is not set |
1109 | CONFIG_INPUT_JOYSTICK=y | 1133 | CONFIG_INPUT_JOYSTICK=y |
1110 | # CONFIG_JOYSTICK_ANALOG is not set | 1134 | # CONFIG_JOYSTICK_ANALOG is not set |
@@ -1139,12 +1163,14 @@ CONFIG_INPUT_TOUCHSCREEN=y | |||
1139 | # CONFIG_TOUCHSCREEN_GUNZE is not set | 1163 | # CONFIG_TOUCHSCREEN_GUNZE is not set |
1140 | # CONFIG_TOUCHSCREEN_ELO is not set | 1164 | # CONFIG_TOUCHSCREEN_ELO is not set |
1141 | # CONFIG_TOUCHSCREEN_MTOUCH is not set | 1165 | # CONFIG_TOUCHSCREEN_MTOUCH is not set |
1166 | # CONFIG_TOUCHSCREEN_INEXIO is not set | ||
1142 | # CONFIG_TOUCHSCREEN_MK712 is not set | 1167 | # CONFIG_TOUCHSCREEN_MK712 is not set |
1143 | # CONFIG_TOUCHSCREEN_PENMOUNT is not set | 1168 | # CONFIG_TOUCHSCREEN_PENMOUNT is not set |
1144 | # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set | 1169 | # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set |
1145 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set | 1170 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set |
1146 | # CONFIG_TOUCHSCREEN_UCB1400 is not set | 1171 | # CONFIG_TOUCHSCREEN_UCB1400 is not set |
1147 | # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set | 1172 | # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set |
1173 | # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set | ||
1148 | CONFIG_INPUT_MISC=y | 1174 | CONFIG_INPUT_MISC=y |
1149 | # CONFIG_INPUT_PCSPKR is not set | 1175 | # CONFIG_INPUT_PCSPKR is not set |
1150 | # CONFIG_INPUT_APANEL is not set | 1176 | # CONFIG_INPUT_APANEL is not set |
@@ -1173,6 +1199,7 @@ CONFIG_SERIO_LIBPS2=y | |||
1173 | # Character devices | 1199 | # Character devices |
1174 | # | 1200 | # |
1175 | CONFIG_VT=y | 1201 | CONFIG_VT=y |
1202 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
1176 | CONFIG_VT_CONSOLE=y | 1203 | CONFIG_VT_CONSOLE=y |
1177 | CONFIG_HW_CONSOLE=y | 1204 | CONFIG_HW_CONSOLE=y |
1178 | CONFIG_VT_HW_CONSOLE_BINDING=y | 1205 | CONFIG_VT_HW_CONSOLE_BINDING=y |
@@ -1223,8 +1250,8 @@ CONFIG_UNIX98_PTYS=y | |||
1223 | # CONFIG_LEGACY_PTYS is not set | 1250 | # CONFIG_LEGACY_PTYS is not set |
1224 | # CONFIG_IPMI_HANDLER is not set | 1251 | # CONFIG_IPMI_HANDLER is not set |
1225 | CONFIG_HW_RANDOM=y | 1252 | CONFIG_HW_RANDOM=y |
1226 | # CONFIG_HW_RANDOM_INTEL is not set | 1253 | CONFIG_HW_RANDOM_INTEL=y |
1227 | # CONFIG_HW_RANDOM_AMD is not set | 1254 | CONFIG_HW_RANDOM_AMD=y |
1228 | CONFIG_HW_RANDOM_GEODE=y | 1255 | CONFIG_HW_RANDOM_GEODE=y |
1229 | CONFIG_HW_RANDOM_VIA=y | 1256 | CONFIG_HW_RANDOM_VIA=y |
1230 | CONFIG_NVRAM=y | 1257 | CONFIG_NVRAM=y |
@@ -1245,7 +1272,6 @@ CONFIG_NVRAM=y | |||
1245 | # CONFIG_CS5535_GPIO is not set | 1272 | # CONFIG_CS5535_GPIO is not set |
1246 | # CONFIG_RAW_DRIVER is not set | 1273 | # CONFIG_RAW_DRIVER is not set |
1247 | CONFIG_HPET=y | 1274 | CONFIG_HPET=y |
1248 | # CONFIG_HPET_RTC_IRQ is not set | ||
1249 | # CONFIG_HPET_MMAP is not set | 1275 | # CONFIG_HPET_MMAP is not set |
1250 | # CONFIG_HANGCHECK_TIMER is not set | 1276 | # CONFIG_HANGCHECK_TIMER is not set |
1251 | # CONFIG_TCG_TPM is not set | 1277 | # CONFIG_TCG_TPM is not set |
@@ -1254,43 +1280,64 @@ CONFIG_DEVPORT=y | |||
1254 | CONFIG_I2C=y | 1280 | CONFIG_I2C=y |
1255 | CONFIG_I2C_BOARDINFO=y | 1281 | CONFIG_I2C_BOARDINFO=y |
1256 | # CONFIG_I2C_CHARDEV is not set | 1282 | # CONFIG_I2C_CHARDEV is not set |
1283 | CONFIG_I2C_HELPER_AUTO=y | ||
1257 | 1284 | ||
1258 | # | 1285 | # |
1259 | # I2C Hardware Bus support | 1286 | # I2C Hardware Bus support |
1260 | # | 1287 | # |
1288 | |||
1289 | # | ||
1290 | # PC SMBus host controller drivers | ||
1291 | # | ||
1261 | # CONFIG_I2C_ALI1535 is not set | 1292 | # CONFIG_I2C_ALI1535 is not set |
1262 | # CONFIG_I2C_ALI1563 is not set | 1293 | # CONFIG_I2C_ALI1563 is not set |
1263 | # CONFIG_I2C_ALI15X3 is not set | 1294 | # CONFIG_I2C_ALI15X3 is not set |
1264 | # CONFIG_I2C_AMD756 is not set | 1295 | # CONFIG_I2C_AMD756 is not set |
1265 | # CONFIG_I2C_AMD8111 is not set | 1296 | # CONFIG_I2C_AMD8111 is not set |
1266 | CONFIG_I2C_I801=y | 1297 | CONFIG_I2C_I801=y |
1267 | # CONFIG_I2C_I810 is not set | 1298 | # CONFIG_I2C_ISCH is not set |
1268 | # CONFIG_I2C_PIIX4 is not set | 1299 | # CONFIG_I2C_PIIX4 is not set |
1269 | # CONFIG_I2C_NFORCE2 is not set | 1300 | # CONFIG_I2C_NFORCE2 is not set |
1270 | # CONFIG_I2C_OCORES is not set | ||
1271 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1272 | # CONFIG_I2C_PROSAVAGE is not set | ||
1273 | # CONFIG_I2C_SAVAGE4 is not set | ||
1274 | # CONFIG_I2C_SIMTEC is not set | ||
1275 | # CONFIG_SCx200_ACB is not set | ||
1276 | # CONFIG_I2C_SIS5595 is not set | 1301 | # CONFIG_I2C_SIS5595 is not set |
1277 | # CONFIG_I2C_SIS630 is not set | 1302 | # CONFIG_I2C_SIS630 is not set |
1278 | # CONFIG_I2C_SIS96X is not set | 1303 | # CONFIG_I2C_SIS96X is not set |
1279 | # CONFIG_I2C_TAOS_EVM is not set | ||
1280 | # CONFIG_I2C_STUB is not set | ||
1281 | # CONFIG_I2C_TINY_USB is not set | ||
1282 | # CONFIG_I2C_VIA is not set | 1304 | # CONFIG_I2C_VIA is not set |
1283 | # CONFIG_I2C_VIAPRO is not set | 1305 | # CONFIG_I2C_VIAPRO is not set |
1306 | |||
1307 | # | ||
1308 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
1309 | # | ||
1310 | # CONFIG_I2C_OCORES is not set | ||
1311 | # CONFIG_I2C_SIMTEC is not set | ||
1312 | |||
1313 | # | ||
1314 | # External I2C/SMBus adapter drivers | ||
1315 | # | ||
1316 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1317 | # CONFIG_I2C_TAOS_EVM is not set | ||
1318 | # CONFIG_I2C_TINY_USB is not set | ||
1319 | |||
1320 | # | ||
1321 | # Graphics adapter I2C/DDC channel drivers | ||
1322 | # | ||
1284 | # CONFIG_I2C_VOODOO3 is not set | 1323 | # CONFIG_I2C_VOODOO3 is not set |
1324 | |||
1325 | # | ||
1326 | # Other I2C/SMBus bus drivers | ||
1327 | # | ||
1285 | # CONFIG_I2C_PCA_PLATFORM is not set | 1328 | # CONFIG_I2C_PCA_PLATFORM is not set |
1329 | # CONFIG_I2C_STUB is not set | ||
1330 | # CONFIG_SCx200_ACB is not set | ||
1286 | 1331 | ||
1287 | # | 1332 | # |
1288 | # Miscellaneous I2C Chip support | 1333 | # Miscellaneous I2C Chip support |
1289 | # | 1334 | # |
1290 | # CONFIG_DS1682 is not set | 1335 | # CONFIG_DS1682 is not set |
1336 | # CONFIG_AT24 is not set | ||
1291 | # CONFIG_SENSORS_EEPROM is not set | 1337 | # CONFIG_SENSORS_EEPROM is not set |
1292 | # CONFIG_SENSORS_PCF8574 is not set | 1338 | # CONFIG_SENSORS_PCF8574 is not set |
1293 | # CONFIG_PCF8575 is not set | 1339 | # CONFIG_PCF8575 is not set |
1340 | # CONFIG_SENSORS_PCA9539 is not set | ||
1294 | # CONFIG_SENSORS_PCF8591 is not set | 1341 | # CONFIG_SENSORS_PCF8591 is not set |
1295 | # CONFIG_SENSORS_MAX6875 is not set | 1342 | # CONFIG_SENSORS_MAX6875 is not set |
1296 | # CONFIG_SENSORS_TSL2550 is not set | 1343 | # CONFIG_SENSORS_TSL2550 is not set |
@@ -1299,6 +1346,8 @@ CONFIG_I2C_I801=y | |||
1299 | # CONFIG_I2C_DEBUG_BUS is not set | 1346 | # CONFIG_I2C_DEBUG_BUS is not set |
1300 | # CONFIG_I2C_DEBUG_CHIP is not set | 1347 | # CONFIG_I2C_DEBUG_CHIP is not set |
1301 | # CONFIG_SPI is not set | 1348 | # CONFIG_SPI is not set |
1349 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
1350 | # CONFIG_GPIOLIB is not set | ||
1302 | # CONFIG_W1 is not set | 1351 | # CONFIG_W1 is not set |
1303 | CONFIG_POWER_SUPPLY=y | 1352 | CONFIG_POWER_SUPPLY=y |
1304 | # CONFIG_POWER_SUPPLY_DEBUG is not set | 1353 | # CONFIG_POWER_SUPPLY_DEBUG is not set |
@@ -1360,8 +1409,10 @@ CONFIG_SSB_POSSIBLE=y | |||
1360 | # | 1409 | # |
1361 | # Multifunction device drivers | 1410 | # Multifunction device drivers |
1362 | # | 1411 | # |
1412 | # CONFIG_MFD_CORE is not set | ||
1363 | # CONFIG_MFD_SM501 is not set | 1413 | # CONFIG_MFD_SM501 is not set |
1364 | # CONFIG_HTC_PASIC3 is not set | 1414 | # CONFIG_HTC_PASIC3 is not set |
1415 | # CONFIG_MFD_TMIO is not set | ||
1365 | 1416 | ||
1366 | # | 1417 | # |
1367 | # Multimedia devices | 1418 | # Multimedia devices |
@@ -1372,6 +1423,7 @@ CONFIG_SSB_POSSIBLE=y | |||
1372 | # | 1423 | # |
1373 | # CONFIG_VIDEO_DEV is not set | 1424 | # CONFIG_VIDEO_DEV is not set |
1374 | # CONFIG_DVB_CORE is not set | 1425 | # CONFIG_DVB_CORE is not set |
1426 | # CONFIG_VIDEO_MEDIA is not set | ||
1375 | 1427 | ||
1376 | # | 1428 | # |
1377 | # Multimedia drivers | 1429 | # Multimedia drivers |
@@ -1418,7 +1470,6 @@ CONFIG_FB_CFB_IMAGEBLIT=y | |||
1418 | # CONFIG_FB_SYS_IMAGEBLIT is not set | 1470 | # CONFIG_FB_SYS_IMAGEBLIT is not set |
1419 | # CONFIG_FB_FOREIGN_ENDIAN is not set | 1471 | # CONFIG_FB_FOREIGN_ENDIAN is not set |
1420 | # CONFIG_FB_SYS_FOPS is not set | 1472 | # CONFIG_FB_SYS_FOPS is not set |
1421 | CONFIG_FB_DEFERRED_IO=y | ||
1422 | # CONFIG_FB_SVGALIB is not set | 1473 | # CONFIG_FB_SVGALIB is not set |
1423 | # CONFIG_FB_MACMODES is not set | 1474 | # CONFIG_FB_MACMODES is not set |
1424 | # CONFIG_FB_BACKLIGHT is not set | 1475 | # CONFIG_FB_BACKLIGHT is not set |
@@ -1463,6 +1514,7 @@ CONFIG_FB_EFI=y | |||
1463 | # CONFIG_FB_TRIDENT is not set | 1514 | # CONFIG_FB_TRIDENT is not set |
1464 | # CONFIG_FB_ARK is not set | 1515 | # CONFIG_FB_ARK is not set |
1465 | # CONFIG_FB_PM3 is not set | 1516 | # CONFIG_FB_PM3 is not set |
1517 | # CONFIG_FB_CARMINE is not set | ||
1466 | # CONFIG_FB_GEODE is not set | 1518 | # CONFIG_FB_GEODE is not set |
1467 | # CONFIG_FB_VIRTUAL is not set | 1519 | # CONFIG_FB_VIRTUAL is not set |
1468 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 1520 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
@@ -1470,6 +1522,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y | |||
1470 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | 1522 | CONFIG_BACKLIGHT_CLASS_DEVICE=y |
1471 | # CONFIG_BACKLIGHT_CORGI is not set | 1523 | # CONFIG_BACKLIGHT_CORGI is not set |
1472 | # CONFIG_BACKLIGHT_PROGEAR is not set | 1524 | # CONFIG_BACKLIGHT_PROGEAR is not set |
1525 | # CONFIG_BACKLIGHT_MBP_NVIDIA is not set | ||
1473 | 1526 | ||
1474 | # | 1527 | # |
1475 | # Display device support | 1528 | # Display device support |
@@ -1489,15 +1542,7 @@ CONFIG_LOGO=y | |||
1489 | # CONFIG_LOGO_LINUX_MONO is not set | 1542 | # CONFIG_LOGO_LINUX_MONO is not set |
1490 | # CONFIG_LOGO_LINUX_VGA16 is not set | 1543 | # CONFIG_LOGO_LINUX_VGA16 is not set |
1491 | CONFIG_LOGO_LINUX_CLUT224=y | 1544 | CONFIG_LOGO_LINUX_CLUT224=y |
1492 | |||
1493 | # | ||
1494 | # Sound | ||
1495 | # | ||
1496 | CONFIG_SOUND=y | 1545 | CONFIG_SOUND=y |
1497 | |||
1498 | # | ||
1499 | # Advanced Linux Sound Architecture | ||
1500 | # | ||
1501 | CONFIG_SND=y | 1546 | CONFIG_SND=y |
1502 | CONFIG_SND_TIMER=y | 1547 | CONFIG_SND_TIMER=y |
1503 | CONFIG_SND_PCM=y | 1548 | CONFIG_SND_PCM=y |
@@ -1515,20 +1560,14 @@ CONFIG_SND_VERBOSE_PROCFS=y | |||
1515 | # CONFIG_SND_VERBOSE_PRINTK is not set | 1560 | # CONFIG_SND_VERBOSE_PRINTK is not set |
1516 | # CONFIG_SND_DEBUG is not set | 1561 | # CONFIG_SND_DEBUG is not set |
1517 | CONFIG_SND_VMASTER=y | 1562 | CONFIG_SND_VMASTER=y |
1518 | 1563 | CONFIG_SND_DRIVERS=y | |
1519 | # | ||
1520 | # Generic devices | ||
1521 | # | ||
1522 | # CONFIG_SND_PCSP is not set | 1564 | # CONFIG_SND_PCSP is not set |
1523 | # CONFIG_SND_DUMMY is not set | 1565 | # CONFIG_SND_DUMMY is not set |
1524 | # CONFIG_SND_VIRMIDI is not set | 1566 | # CONFIG_SND_VIRMIDI is not set |
1525 | # CONFIG_SND_MTPAV is not set | 1567 | # CONFIG_SND_MTPAV is not set |
1526 | # CONFIG_SND_SERIAL_U16550 is not set | 1568 | # CONFIG_SND_SERIAL_U16550 is not set |
1527 | # CONFIG_SND_MPU401 is not set | 1569 | # CONFIG_SND_MPU401 is not set |
1528 | 1570 | CONFIG_SND_PCI=y | |
1529 | # | ||
1530 | # PCI devices | ||
1531 | # | ||
1532 | # CONFIG_SND_AD1889 is not set | 1571 | # CONFIG_SND_AD1889 is not set |
1533 | # CONFIG_SND_ALS300 is not set | 1572 | # CONFIG_SND_ALS300 is not set |
1534 | # CONFIG_SND_ALS4000 is not set | 1573 | # CONFIG_SND_ALS4000 is not set |
@@ -1603,36 +1642,14 @@ CONFIG_SND_HDA_GENERIC=y | |||
1603 | # CONFIG_SND_VIRTUOSO is not set | 1642 | # CONFIG_SND_VIRTUOSO is not set |
1604 | # CONFIG_SND_VX222 is not set | 1643 | # CONFIG_SND_VX222 is not set |
1605 | # CONFIG_SND_YMFPCI is not set | 1644 | # CONFIG_SND_YMFPCI is not set |
1606 | 1645 | CONFIG_SND_USB=y | |
1607 | # | ||
1608 | # USB devices | ||
1609 | # | ||
1610 | # CONFIG_SND_USB_AUDIO is not set | 1646 | # CONFIG_SND_USB_AUDIO is not set |
1611 | # CONFIG_SND_USB_USX2Y is not set | 1647 | # CONFIG_SND_USB_USX2Y is not set |
1612 | # CONFIG_SND_USB_CAIAQ is not set | 1648 | # CONFIG_SND_USB_CAIAQ is not set |
1613 | 1649 | CONFIG_SND_PCMCIA=y | |
1614 | # | ||
1615 | # PCMCIA devices | ||
1616 | # | ||
1617 | # CONFIG_SND_VXPOCKET is not set | 1650 | # CONFIG_SND_VXPOCKET is not set |
1618 | # CONFIG_SND_PDAUDIOCF is not set | 1651 | # CONFIG_SND_PDAUDIOCF is not set |
1619 | |||
1620 | # | ||
1621 | # System on Chip audio support | ||
1622 | # | ||
1623 | # CONFIG_SND_SOC is not set | 1652 | # CONFIG_SND_SOC is not set |
1624 | |||
1625 | # | ||
1626 | # ALSA SoC audio for Freescale SOCs | ||
1627 | # | ||
1628 | |||
1629 | # | ||
1630 | # SoC Audio for the Texas Instruments OMAP | ||
1631 | # | ||
1632 | |||
1633 | # | ||
1634 | # Open Sound System | ||
1635 | # | ||
1636 | # CONFIG_SOUND_PRIME is not set | 1653 | # CONFIG_SOUND_PRIME is not set |
1637 | CONFIG_HID_SUPPORT=y | 1654 | CONFIG_HID_SUPPORT=y |
1638 | CONFIG_HID=y | 1655 | CONFIG_HID=y |
@@ -1668,6 +1685,7 @@ CONFIG_USB_DEVICEFS=y | |||
1668 | # CONFIG_USB_DYNAMIC_MINORS is not set | 1685 | # CONFIG_USB_DYNAMIC_MINORS is not set |
1669 | CONFIG_USB_SUSPEND=y | 1686 | CONFIG_USB_SUSPEND=y |
1670 | # CONFIG_USB_OTG is not set | 1687 | # CONFIG_USB_OTG is not set |
1688 | CONFIG_USB_MON=y | ||
1671 | 1689 | ||
1672 | # | 1690 | # |
1673 | # USB Host Controller Drivers | 1691 | # USB Host Controller Drivers |
@@ -1691,6 +1709,7 @@ CONFIG_USB_UHCI_HCD=y | |||
1691 | # | 1709 | # |
1692 | # CONFIG_USB_ACM is not set | 1710 | # CONFIG_USB_ACM is not set |
1693 | CONFIG_USB_PRINTER=y | 1711 | CONFIG_USB_PRINTER=y |
1712 | # CONFIG_USB_WDM is not set | ||
1694 | 1713 | ||
1695 | # | 1714 | # |
1696 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | 1715 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' |
@@ -1712,6 +1731,7 @@ CONFIG_USB_STORAGE=y | |||
1712 | # CONFIG_USB_STORAGE_ALAUDA is not set | 1731 | # CONFIG_USB_STORAGE_ALAUDA is not set |
1713 | # CONFIG_USB_STORAGE_ONETOUCH is not set | 1732 | # CONFIG_USB_STORAGE_ONETOUCH is not set |
1714 | # CONFIG_USB_STORAGE_KARMA is not set | 1733 | # CONFIG_USB_STORAGE_KARMA is not set |
1734 | # CONFIG_USB_STORAGE_SIERRA is not set | ||
1715 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set | 1735 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set |
1716 | CONFIG_USB_LIBUSUAL=y | 1736 | CONFIG_USB_LIBUSUAL=y |
1717 | 1737 | ||
@@ -1720,7 +1740,6 @@ CONFIG_USB_LIBUSUAL=y | |||
1720 | # | 1740 | # |
1721 | # CONFIG_USB_MDC800 is not set | 1741 | # CONFIG_USB_MDC800 is not set |
1722 | # CONFIG_USB_MICROTEK is not set | 1742 | # CONFIG_USB_MICROTEK is not set |
1723 | CONFIG_USB_MON=y | ||
1724 | 1743 | ||
1725 | # | 1744 | # |
1726 | # USB port drivers | 1745 | # USB port drivers |
@@ -1733,7 +1752,6 @@ CONFIG_USB_MON=y | |||
1733 | # CONFIG_USB_EMI62 is not set | 1752 | # CONFIG_USB_EMI62 is not set |
1734 | # CONFIG_USB_EMI26 is not set | 1753 | # CONFIG_USB_EMI26 is not set |
1735 | # CONFIG_USB_ADUTUX is not set | 1754 | # CONFIG_USB_ADUTUX is not set |
1736 | # CONFIG_USB_AUERSWALD is not set | ||
1737 | # CONFIG_USB_RIO500 is not set | 1755 | # CONFIG_USB_RIO500 is not set |
1738 | # CONFIG_USB_LEGOTOWER is not set | 1756 | # CONFIG_USB_LEGOTOWER is not set |
1739 | # CONFIG_USB_LCD is not set | 1757 | # CONFIG_USB_LCD is not set |
@@ -1750,6 +1768,7 @@ CONFIG_USB_MON=y | |||
1750 | # CONFIG_USB_TRANCEVIBRATOR is not set | 1768 | # CONFIG_USB_TRANCEVIBRATOR is not set |
1751 | # CONFIG_USB_IOWARRIOR is not set | 1769 | # CONFIG_USB_IOWARRIOR is not set |
1752 | # CONFIG_USB_TEST is not set | 1770 | # CONFIG_USB_TEST is not set |
1771 | # CONFIG_USB_ISIGHTFW is not set | ||
1753 | # CONFIG_USB_GADGET is not set | 1772 | # CONFIG_USB_GADGET is not set |
1754 | # CONFIG_MMC is not set | 1773 | # CONFIG_MMC is not set |
1755 | # CONFIG_MEMSTICK is not set | 1774 | # CONFIG_MEMSTICK is not set |
@@ -1759,7 +1778,9 @@ CONFIG_LEDS_CLASS=y | |||
1759 | # | 1778 | # |
1760 | # LED drivers | 1779 | # LED drivers |
1761 | # | 1780 | # |
1781 | # CONFIG_LEDS_PCA9532 is not set | ||
1762 | # CONFIG_LEDS_CLEVO_MAIL is not set | 1782 | # CONFIG_LEDS_CLEVO_MAIL is not set |
1783 | # CONFIG_LEDS_PCA955X is not set | ||
1763 | 1784 | ||
1764 | # | 1785 | # |
1765 | # LED Triggers | 1786 | # LED Triggers |
@@ -1805,6 +1826,7 @@ CONFIG_RTC_INTF_DEV=y | |||
1805 | # CONFIG_RTC_DRV_PCF8583 is not set | 1826 | # CONFIG_RTC_DRV_PCF8583 is not set |
1806 | # CONFIG_RTC_DRV_M41T80 is not set | 1827 | # CONFIG_RTC_DRV_M41T80 is not set |
1807 | # CONFIG_RTC_DRV_S35390A is not set | 1828 | # CONFIG_RTC_DRV_S35390A is not set |
1829 | # CONFIG_RTC_DRV_FM3130 is not set | ||
1808 | 1830 | ||
1809 | # | 1831 | # |
1810 | # SPI RTC drivers | 1832 | # SPI RTC drivers |
@@ -1837,11 +1859,13 @@ CONFIG_DMADEVICES=y | |||
1837 | # Firmware Drivers | 1859 | # Firmware Drivers |
1838 | # | 1860 | # |
1839 | # CONFIG_EDD is not set | 1861 | # CONFIG_EDD is not set |
1862 | CONFIG_FIRMWARE_MEMMAP=y | ||
1840 | CONFIG_EFI_VARS=y | 1863 | CONFIG_EFI_VARS=y |
1841 | # CONFIG_DELL_RBU is not set | 1864 | # CONFIG_DELL_RBU is not set |
1842 | # CONFIG_DCDBAS is not set | 1865 | # CONFIG_DCDBAS is not set |
1843 | CONFIG_DMIID=y | 1866 | CONFIG_DMIID=y |
1844 | # CONFIG_ISCSI_IBFT_FIND is not set | 1867 | CONFIG_ISCSI_IBFT_FIND=y |
1868 | CONFIG_ISCSI_IBFT=y | ||
1845 | 1869 | ||
1846 | # | 1870 | # |
1847 | # File systems | 1871 | # File systems |
@@ -1920,14 +1944,27 @@ CONFIG_HUGETLB_PAGE=y | |||
1920 | # CONFIG_CRAMFS is not set | 1944 | # CONFIG_CRAMFS is not set |
1921 | # CONFIG_VXFS_FS is not set | 1945 | # CONFIG_VXFS_FS is not set |
1922 | # CONFIG_MINIX_FS is not set | 1946 | # CONFIG_MINIX_FS is not set |
1947 | # CONFIG_OMFS_FS is not set | ||
1923 | # CONFIG_HPFS_FS is not set | 1948 | # CONFIG_HPFS_FS is not set |
1924 | # CONFIG_QNX4FS_FS is not set | 1949 | # CONFIG_QNX4FS_FS is not set |
1925 | # CONFIG_ROMFS_FS is not set | 1950 | # CONFIG_ROMFS_FS is not set |
1926 | # CONFIG_SYSV_FS is not set | 1951 | # CONFIG_SYSV_FS is not set |
1927 | # CONFIG_UFS_FS is not set | 1952 | # CONFIG_UFS_FS is not set |
1928 | CONFIG_NETWORK_FILESYSTEMS=y | 1953 | CONFIG_NETWORK_FILESYSTEMS=y |
1929 | # CONFIG_NFS_FS is not set | 1954 | CONFIG_NFS_FS=y |
1955 | CONFIG_NFS_V3=y | ||
1956 | CONFIG_NFS_V3_ACL=y | ||
1957 | CONFIG_NFS_V4=y | ||
1958 | CONFIG_ROOT_NFS=y | ||
1930 | # CONFIG_NFSD is not set | 1959 | # CONFIG_NFSD is not set |
1960 | CONFIG_LOCKD=y | ||
1961 | CONFIG_LOCKD_V4=y | ||
1962 | CONFIG_NFS_ACL_SUPPORT=y | ||
1963 | CONFIG_NFS_COMMON=y | ||
1964 | CONFIG_SUNRPC=y | ||
1965 | CONFIG_SUNRPC_GSS=y | ||
1966 | CONFIG_RPCSEC_GSS_KRB5=y | ||
1967 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1931 | # CONFIG_SMB_FS is not set | 1968 | # CONFIG_SMB_FS is not set |
1932 | # CONFIG_CIFS is not set | 1969 | # CONFIG_CIFS is not set |
1933 | # CONFIG_NCP_FS is not set | 1970 | # CONFIG_NCP_FS is not set |
@@ -2001,9 +2038,9 @@ CONFIG_NLS_UTF8=y | |||
2001 | # Kernel hacking | 2038 | # Kernel hacking |
2002 | # | 2039 | # |
2003 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | 2040 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y |
2004 | # CONFIG_PRINTK_TIME is not set | 2041 | CONFIG_PRINTK_TIME=y |
2005 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 2042 | CONFIG_ENABLE_WARN_DEPRECATED=y |
2006 | # CONFIG_ENABLE_MUST_CHECK is not set | 2043 | CONFIG_ENABLE_MUST_CHECK=y |
2007 | CONFIG_FRAME_WARN=2048 | 2044 | CONFIG_FRAME_WARN=2048 |
2008 | CONFIG_MAGIC_SYSRQ=y | 2045 | CONFIG_MAGIC_SYSRQ=y |
2009 | # CONFIG_UNUSED_SYMBOLS is not set | 2046 | # CONFIG_UNUSED_SYMBOLS is not set |
@@ -2033,6 +2070,7 @@ CONFIG_DEBUG_BUGVERBOSE=y | |||
2033 | # CONFIG_DEBUG_INFO is not set | 2070 | # CONFIG_DEBUG_INFO is not set |
2034 | # CONFIG_DEBUG_VM is not set | 2071 | # CONFIG_DEBUG_VM is not set |
2035 | # CONFIG_DEBUG_WRITECOUNT is not set | 2072 | # CONFIG_DEBUG_WRITECOUNT is not set |
2073 | CONFIG_DEBUG_MEMORY_INIT=y | ||
2036 | # CONFIG_DEBUG_LIST is not set | 2074 | # CONFIG_DEBUG_LIST is not set |
2037 | # CONFIG_DEBUG_SG is not set | 2075 | # CONFIG_DEBUG_SG is not set |
2038 | CONFIG_FRAME_POINTER=y | 2076 | CONFIG_FRAME_POINTER=y |
@@ -2043,23 +2081,32 @@ CONFIG_FRAME_POINTER=y | |||
2043 | # CONFIG_LKDTM is not set | 2081 | # CONFIG_LKDTM is not set |
2044 | # CONFIG_FAULT_INJECTION is not set | 2082 | # CONFIG_FAULT_INJECTION is not set |
2045 | # CONFIG_LATENCYTOP is not set | 2083 | # CONFIG_LATENCYTOP is not set |
2084 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
2085 | CONFIG_HAVE_FTRACE=y | ||
2086 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
2087 | # CONFIG_FTRACE is not set | ||
2088 | # CONFIG_IRQSOFF_TRACER is not set | ||
2089 | # CONFIG_SYSPROF_TRACER is not set | ||
2090 | # CONFIG_SCHED_TRACER is not set | ||
2091 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
2046 | CONFIG_PROVIDE_OHCI1394_DMA_INIT=y | 2092 | CONFIG_PROVIDE_OHCI1394_DMA_INIT=y |
2047 | # CONFIG_SAMPLES is not set | 2093 | # CONFIG_SAMPLES is not set |
2048 | # CONFIG_KGDB is not set | ||
2049 | CONFIG_HAVE_ARCH_KGDB=y | 2094 | CONFIG_HAVE_ARCH_KGDB=y |
2095 | # CONFIG_KGDB is not set | ||
2050 | # CONFIG_STRICT_DEVMEM is not set | 2096 | # CONFIG_STRICT_DEVMEM is not set |
2097 | CONFIG_X86_VERBOSE_BOOTUP=y | ||
2051 | CONFIG_EARLY_PRINTK=y | 2098 | CONFIG_EARLY_PRINTK=y |
2052 | CONFIG_DEBUG_STACKOVERFLOW=y | 2099 | CONFIG_DEBUG_STACKOVERFLOW=y |
2053 | CONFIG_DEBUG_STACK_USAGE=y | 2100 | CONFIG_DEBUG_STACK_USAGE=y |
2054 | # CONFIG_DEBUG_PAGEALLOC is not set | 2101 | # CONFIG_DEBUG_PAGEALLOC is not set |
2102 | # CONFIG_DEBUG_PER_CPU_MAPS is not set | ||
2055 | # CONFIG_X86_PTDUMP is not set | 2103 | # CONFIG_X86_PTDUMP is not set |
2056 | CONFIG_DEBUG_RODATA=y | 2104 | CONFIG_DEBUG_RODATA=y |
2057 | # CONFIG_DEBUG_RODATA_TEST is not set | 2105 | # CONFIG_DEBUG_RODATA_TEST is not set |
2058 | CONFIG_DEBUG_NX_TEST=m | 2106 | CONFIG_DEBUG_NX_TEST=m |
2059 | # CONFIG_4KSTACKS is not set | 2107 | # CONFIG_4KSTACKS is not set |
2060 | CONFIG_X86_FIND_SMP_CONFIG=y | ||
2061 | CONFIG_X86_MPPARSE=y | ||
2062 | CONFIG_DOUBLEFAULT=y | 2108 | CONFIG_DOUBLEFAULT=y |
2109 | # CONFIG_MMIOTRACE is not set | ||
2063 | CONFIG_IO_DELAY_TYPE_0X80=0 | 2110 | CONFIG_IO_DELAY_TYPE_0X80=0 |
2064 | CONFIG_IO_DELAY_TYPE_0XED=1 | 2111 | CONFIG_IO_DELAY_TYPE_0XED=1 |
2065 | CONFIG_IO_DELAY_TYPE_UDELAY=2 | 2112 | CONFIG_IO_DELAY_TYPE_UDELAY=2 |
@@ -2071,6 +2118,7 @@ CONFIG_IO_DELAY_0X80=y | |||
2071 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 | 2118 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 |
2072 | CONFIG_DEBUG_BOOT_PARAMS=y | 2119 | CONFIG_DEBUG_BOOT_PARAMS=y |
2073 | # CONFIG_CPA_DEBUG is not set | 2120 | # CONFIG_CPA_DEBUG is not set |
2121 | CONFIG_OPTIMIZE_INLINING=y | ||
2074 | 2122 | ||
2075 | # | 2123 | # |
2076 | # Security options | 2124 | # Security options |
@@ -2080,7 +2128,6 @@ CONFIG_KEYS_DEBUG_PROC_KEYS=y | |||
2080 | CONFIG_SECURITY=y | 2128 | CONFIG_SECURITY=y |
2081 | CONFIG_SECURITY_NETWORK=y | 2129 | CONFIG_SECURITY_NETWORK=y |
2082 | # CONFIG_SECURITY_NETWORK_XFRM is not set | 2130 | # CONFIG_SECURITY_NETWORK_XFRM is not set |
2083 | CONFIG_SECURITY_CAPABILITIES=y | ||
2084 | CONFIG_SECURITY_FILE_CAPABILITIES=y | 2131 | CONFIG_SECURITY_FILE_CAPABILITIES=y |
2085 | # CONFIG_SECURITY_ROOTPLUG is not set | 2132 | # CONFIG_SECURITY_ROOTPLUG is not set |
2086 | CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 | 2133 | CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 |
@@ -2141,6 +2188,10 @@ CONFIG_CRYPTO_HMAC=y | |||
2141 | # CONFIG_CRYPTO_MD4 is not set | 2188 | # CONFIG_CRYPTO_MD4 is not set |
2142 | CONFIG_CRYPTO_MD5=y | 2189 | CONFIG_CRYPTO_MD5=y |
2143 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | 2190 | # CONFIG_CRYPTO_MICHAEL_MIC is not set |
2191 | # CONFIG_CRYPTO_RMD128 is not set | ||
2192 | # CONFIG_CRYPTO_RMD160 is not set | ||
2193 | # CONFIG_CRYPTO_RMD256 is not set | ||
2194 | # CONFIG_CRYPTO_RMD320 is not set | ||
2144 | CONFIG_CRYPTO_SHA1=y | 2195 | CONFIG_CRYPTO_SHA1=y |
2145 | # CONFIG_CRYPTO_SHA256 is not set | 2196 | # CONFIG_CRYPTO_SHA256 is not set |
2146 | # CONFIG_CRYPTO_SHA512 is not set | 2197 | # CONFIG_CRYPTO_SHA512 is not set |
@@ -2151,7 +2202,7 @@ CONFIG_CRYPTO_SHA1=y | |||
2151 | # Ciphers | 2202 | # Ciphers |
2152 | # | 2203 | # |
2153 | CONFIG_CRYPTO_AES=y | 2204 | CONFIG_CRYPTO_AES=y |
2154 | # CONFIG_CRYPTO_AES_586 is not set | 2205 | CONFIG_CRYPTO_AES_586=y |
2155 | # CONFIG_CRYPTO_ANUBIS is not set | 2206 | # CONFIG_CRYPTO_ANUBIS is not set |
2156 | CONFIG_CRYPTO_ARC4=y | 2207 | CONFIG_CRYPTO_ARC4=y |
2157 | # CONFIG_CRYPTO_BLOWFISH is not set | 2208 | # CONFIG_CRYPTO_BLOWFISH is not set |
@@ -2193,6 +2244,7 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y | |||
2193 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 2244 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
2194 | # CONFIG_CRC_CCITT is not set | 2245 | # CONFIG_CRC_CCITT is not set |
2195 | # CONFIG_CRC16 is not set | 2246 | # CONFIG_CRC16 is not set |
2247 | CONFIG_CRC_T10DIF=y | ||
2196 | # CONFIG_CRC_ITU_T is not set | 2248 | # CONFIG_CRC_ITU_T is not set |
2197 | CONFIG_CRC32=y | 2249 | CONFIG_CRC32=y |
2198 | # CONFIG_CRC7 is not set | 2250 | # CONFIG_CRC7 is not set |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index a40452429625..e620ea6e2a7a 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -1,13 +1,13 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.26-rc1 | 3 | # Linux kernel version: 2.6.27-rc5 |
4 | # Sun May 4 19:59:57 2008 | 4 | # Wed Sep 3 17:13:39 2008 |
5 | # | 5 | # |
6 | CONFIG_64BIT=y | 6 | CONFIG_64BIT=y |
7 | # CONFIG_X86_32 is not set | 7 | # CONFIG_X86_32 is not set |
8 | CONFIG_X86_64=y | 8 | CONFIG_X86_64=y |
9 | CONFIG_X86=y | 9 | CONFIG_X86=y |
10 | CONFIG_DEFCONFIG_LIST="arch/x86/configs/x86_64_defconfig" | 10 | CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" |
11 | # CONFIG_GENERIC_LOCKBREAK is not set | 11 | # CONFIG_GENERIC_LOCKBREAK is not set |
12 | CONFIG_GENERIC_TIME=y | 12 | CONFIG_GENERIC_TIME=y |
13 | CONFIG_GENERIC_CMOS_UPDATE=y | 13 | CONFIG_GENERIC_CMOS_UPDATE=y |
@@ -53,6 +53,7 @@ CONFIG_X86_HT=y | |||
53 | CONFIG_X86_BIOS_REBOOT=y | 53 | CONFIG_X86_BIOS_REBOOT=y |
54 | CONFIG_X86_TRAMPOLINE=y | 54 | CONFIG_X86_TRAMPOLINE=y |
55 | # CONFIG_KTIME_SCALAR is not set | 55 | # CONFIG_KTIME_SCALAR is not set |
56 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
56 | 57 | ||
57 | # | 58 | # |
58 | # General setup | 59 | # General setup |
@@ -82,6 +83,7 @@ CONFIG_CGROUPS=y | |||
82 | CONFIG_CGROUP_NS=y | 83 | CONFIG_CGROUP_NS=y |
83 | # CONFIG_CGROUP_DEVICE is not set | 84 | # CONFIG_CGROUP_DEVICE is not set |
84 | CONFIG_CPUSETS=y | 85 | CONFIG_CPUSETS=y |
86 | CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y | ||
85 | CONFIG_GROUP_SCHED=y | 87 | CONFIG_GROUP_SCHED=y |
86 | CONFIG_FAIR_GROUP_SCHED=y | 88 | CONFIG_FAIR_GROUP_SCHED=y |
87 | # CONFIG_RT_GROUP_SCHED is not set | 89 | # CONFIG_RT_GROUP_SCHED is not set |
@@ -105,7 +107,6 @@ CONFIG_SYSCTL=y | |||
105 | # CONFIG_EMBEDDED is not set | 107 | # CONFIG_EMBEDDED is not set |
106 | CONFIG_UID16=y | 108 | CONFIG_UID16=y |
107 | CONFIG_SYSCTL_SYSCALL=y | 109 | CONFIG_SYSCTL_SYSCALL=y |
108 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
109 | CONFIG_KALLSYMS=y | 110 | CONFIG_KALLSYMS=y |
110 | CONFIG_KALLSYMS_ALL=y | 111 | CONFIG_KALLSYMS_ALL=y |
111 | CONFIG_KALLSYMS_EXTRA_PASS=y | 112 | CONFIG_KALLSYMS_EXTRA_PASS=y |
@@ -113,6 +114,7 @@ CONFIG_HOTPLUG=y | |||
113 | CONFIG_PRINTK=y | 114 | CONFIG_PRINTK=y |
114 | CONFIG_BUG=y | 115 | CONFIG_BUG=y |
115 | CONFIG_ELF_CORE=y | 116 | CONFIG_ELF_CORE=y |
117 | CONFIG_PCSPKR_PLATFORM=y | ||
116 | # CONFIG_COMPAT_BRK is not set | 118 | # CONFIG_COMPAT_BRK is not set |
117 | CONFIG_BASE_FULL=y | 119 | CONFIG_BASE_FULL=y |
118 | CONFIG_FUTEX=y | 120 | CONFIG_FUTEX=y |
@@ -132,25 +134,33 @@ CONFIG_MARKERS=y | |||
132 | # CONFIG_OPROFILE is not set | 134 | # CONFIG_OPROFILE is not set |
133 | CONFIG_HAVE_OPROFILE=y | 135 | CONFIG_HAVE_OPROFILE=y |
134 | CONFIG_KPROBES=y | 136 | CONFIG_KPROBES=y |
137 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | ||
135 | CONFIG_KRETPROBES=y | 138 | CONFIG_KRETPROBES=y |
139 | CONFIG_HAVE_IOREMAP_PROT=y | ||
136 | CONFIG_HAVE_KPROBES=y | 140 | CONFIG_HAVE_KPROBES=y |
137 | CONFIG_HAVE_KRETPROBES=y | 141 | CONFIG_HAVE_KRETPROBES=y |
142 | # CONFIG_HAVE_ARCH_TRACEHOOK is not set | ||
138 | # CONFIG_HAVE_DMA_ATTRS is not set | 143 | # CONFIG_HAVE_DMA_ATTRS is not set |
144 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
145 | # CONFIG_HAVE_CLK is not set | ||
139 | CONFIG_PROC_PAGE_MONITOR=y | 146 | CONFIG_PROC_PAGE_MONITOR=y |
147 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
140 | CONFIG_SLABINFO=y | 148 | CONFIG_SLABINFO=y |
141 | CONFIG_RT_MUTEXES=y | 149 | CONFIG_RT_MUTEXES=y |
142 | # CONFIG_TINY_SHMEM is not set | 150 | # CONFIG_TINY_SHMEM is not set |
143 | CONFIG_BASE_SMALL=0 | 151 | CONFIG_BASE_SMALL=0 |
144 | CONFIG_MODULES=y | 152 | CONFIG_MODULES=y |
153 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
145 | CONFIG_MODULE_UNLOAD=y | 154 | CONFIG_MODULE_UNLOAD=y |
146 | CONFIG_MODULE_FORCE_UNLOAD=y | 155 | CONFIG_MODULE_FORCE_UNLOAD=y |
147 | # CONFIG_MODVERSIONS is not set | 156 | # CONFIG_MODVERSIONS is not set |
148 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 157 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
149 | # CONFIG_KMOD is not set | 158 | CONFIG_KMOD=y |
150 | CONFIG_STOP_MACHINE=y | 159 | CONFIG_STOP_MACHINE=y |
151 | CONFIG_BLOCK=y | 160 | CONFIG_BLOCK=y |
152 | CONFIG_BLK_DEV_IO_TRACE=y | 161 | CONFIG_BLK_DEV_IO_TRACE=y |
153 | CONFIG_BLK_DEV_BSG=y | 162 | CONFIG_BLK_DEV_BSG=y |
163 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
154 | CONFIG_BLOCK_COMPAT=y | 164 | CONFIG_BLOCK_COMPAT=y |
155 | 165 | ||
156 | # | 166 | # |
@@ -175,20 +185,15 @@ CONFIG_NO_HZ=y | |||
175 | CONFIG_HIGH_RES_TIMERS=y | 185 | CONFIG_HIGH_RES_TIMERS=y |
176 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | 186 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y |
177 | CONFIG_SMP=y | 187 | CONFIG_SMP=y |
188 | CONFIG_X86_FIND_SMP_CONFIG=y | ||
189 | CONFIG_X86_MPPARSE=y | ||
178 | CONFIG_X86_PC=y | 190 | CONFIG_X86_PC=y |
179 | # CONFIG_X86_ELAN is not set | 191 | # CONFIG_X86_ELAN is not set |
180 | # CONFIG_X86_VOYAGER is not set | 192 | # CONFIG_X86_VOYAGER is not set |
181 | # CONFIG_X86_NUMAQ is not set | ||
182 | # CONFIG_X86_SUMMIT is not set | ||
183 | # CONFIG_X86_BIGSMP is not set | ||
184 | # CONFIG_X86_VISWS is not set | ||
185 | # CONFIG_X86_GENERICARCH is not set | 193 | # CONFIG_X86_GENERICARCH is not set |
186 | # CONFIG_X86_ES7000 is not set | ||
187 | # CONFIG_X86_RDC321X is not set | ||
188 | # CONFIG_X86_VSMP is not set | 194 | # CONFIG_X86_VSMP is not set |
189 | # CONFIG_PARAVIRT_GUEST is not set | 195 | # CONFIG_PARAVIRT_GUEST is not set |
190 | CONFIG_MEMTEST_BOOTPARAM=y | 196 | # CONFIG_MEMTEST is not set |
191 | CONFIG_MEMTEST_BOOTPARAM_VALUE=0 | ||
192 | # CONFIG_M386 is not set | 197 | # CONFIG_M386 is not set |
193 | # CONFIG_M486 is not set | 198 | # CONFIG_M486 is not set |
194 | # CONFIG_M586 is not set | 199 | # CONFIG_M586 is not set |
@@ -213,18 +218,16 @@ CONFIG_MEMTEST_BOOTPARAM_VALUE=0 | |||
213 | # CONFIG_MVIAC3_2 is not set | 218 | # CONFIG_MVIAC3_2 is not set |
214 | # CONFIG_MVIAC7 is not set | 219 | # CONFIG_MVIAC7 is not set |
215 | # CONFIG_MPSC is not set | 220 | # CONFIG_MPSC is not set |
216 | CONFIG_MCORE2=y | 221 | # CONFIG_MCORE2 is not set |
217 | # CONFIG_GENERIC_CPU is not set | 222 | CONFIG_GENERIC_CPU=y |
218 | CONFIG_X86_CPU=y | 223 | CONFIG_X86_CPU=y |
219 | CONFIG_X86_L1_CACHE_BYTES=64 | 224 | CONFIG_X86_L1_CACHE_BYTES=128 |
220 | CONFIG_X86_INTERNODE_CACHE_BYTES=64 | 225 | CONFIG_X86_INTERNODE_CACHE_BYTES=128 |
221 | CONFIG_X86_CMPXCHG=y | 226 | CONFIG_X86_CMPXCHG=y |
222 | CONFIG_X86_L1_CACHE_SHIFT=6 | 227 | CONFIG_X86_L1_CACHE_SHIFT=7 |
223 | CONFIG_X86_GOOD_APIC=y | 228 | CONFIG_X86_WP_WORKS_OK=y |
224 | CONFIG_X86_INTEL_USERCOPY=y | ||
225 | CONFIG_X86_USE_PPRO_CHECKSUM=y | ||
226 | CONFIG_X86_P6_NOP=y | ||
227 | CONFIG_X86_TSC=y | 229 | CONFIG_X86_TSC=y |
230 | CONFIG_X86_CMPXCHG64=y | ||
228 | CONFIG_X86_CMOV=y | 231 | CONFIG_X86_CMOV=y |
229 | CONFIG_X86_MINIMUM_CPU_FAMILY=64 | 232 | CONFIG_X86_MINIMUM_CPU_FAMILY=64 |
230 | CONFIG_X86_DEBUGCTLMSR=y | 233 | CONFIG_X86_DEBUGCTLMSR=y |
@@ -234,10 +237,11 @@ CONFIG_DMI=y | |||
234 | CONFIG_GART_IOMMU=y | 237 | CONFIG_GART_IOMMU=y |
235 | CONFIG_CALGARY_IOMMU=y | 238 | CONFIG_CALGARY_IOMMU=y |
236 | CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y | 239 | CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y |
240 | CONFIG_AMD_IOMMU=y | ||
237 | CONFIG_SWIOTLB=y | 241 | CONFIG_SWIOTLB=y |
238 | CONFIG_IOMMU_HELPER=y | 242 | CONFIG_IOMMU_HELPER=y |
239 | CONFIG_NR_CPUS=4 | 243 | CONFIG_NR_CPUS=64 |
240 | # CONFIG_SCHED_SMT is not set | 244 | CONFIG_SCHED_SMT=y |
241 | CONFIG_SCHED_MC=y | 245 | CONFIG_SCHED_MC=y |
242 | # CONFIG_PREEMPT_NONE is not set | 246 | # CONFIG_PREEMPT_NONE is not set |
243 | CONFIG_PREEMPT_VOLUNTARY=y | 247 | CONFIG_PREEMPT_VOLUNTARY=y |
@@ -246,7 +250,8 @@ CONFIG_X86_LOCAL_APIC=y | |||
246 | CONFIG_X86_IO_APIC=y | 250 | CONFIG_X86_IO_APIC=y |
247 | # CONFIG_X86_MCE is not set | 251 | # CONFIG_X86_MCE is not set |
248 | # CONFIG_I8K is not set | 252 | # CONFIG_I8K is not set |
249 | # CONFIG_MICROCODE is not set | 253 | CONFIG_MICROCODE=y |
254 | CONFIG_MICROCODE_OLD_INTERFACE=y | ||
250 | CONFIG_X86_MSR=y | 255 | CONFIG_X86_MSR=y |
251 | CONFIG_X86_CPUID=y | 256 | CONFIG_X86_CPUID=y |
252 | CONFIG_NUMA=y | 257 | CONFIG_NUMA=y |
@@ -281,7 +286,8 @@ CONFIG_ZONE_DMA_FLAG=1 | |||
281 | CONFIG_BOUNCE=y | 286 | CONFIG_BOUNCE=y |
282 | CONFIG_VIRT_TO_BUS=y | 287 | CONFIG_VIRT_TO_BUS=y |
283 | CONFIG_MTRR=y | 288 | CONFIG_MTRR=y |
284 | # CONFIG_X86_PAT is not set | 289 | # CONFIG_MTRR_SANITIZER is not set |
290 | CONFIG_X86_PAT=y | ||
285 | CONFIG_EFI=y | 291 | CONFIG_EFI=y |
286 | CONFIG_SECCOMP=y | 292 | CONFIG_SECCOMP=y |
287 | # CONFIG_HZ_100 is not set | 293 | # CONFIG_HZ_100 is not set |
@@ -313,6 +319,7 @@ CONFIG_PM_TRACE_RTC=y | |||
313 | CONFIG_PM_SLEEP_SMP=y | 319 | CONFIG_PM_SLEEP_SMP=y |
314 | CONFIG_PM_SLEEP=y | 320 | CONFIG_PM_SLEEP=y |
315 | CONFIG_SUSPEND=y | 321 | CONFIG_SUSPEND=y |
322 | # CONFIG_PM_TEST_SUSPEND is not set | ||
316 | CONFIG_SUSPEND_FREEZER=y | 323 | CONFIG_SUSPEND_FREEZER=y |
317 | CONFIG_HIBERNATION=y | 324 | CONFIG_HIBERNATION=y |
318 | CONFIG_PM_STD_PARTITION="" | 325 | CONFIG_PM_STD_PARTITION="" |
@@ -339,6 +346,7 @@ CONFIG_ACPI_NUMA=y | |||
339 | CONFIG_ACPI_BLACKLIST_YEAR=0 | 346 | CONFIG_ACPI_BLACKLIST_YEAR=0 |
340 | # CONFIG_ACPI_DEBUG is not set | 347 | # CONFIG_ACPI_DEBUG is not set |
341 | CONFIG_ACPI_EC=y | 348 | CONFIG_ACPI_EC=y |
349 | # CONFIG_ACPI_PCI_SLOT is not set | ||
342 | CONFIG_ACPI_POWER=y | 350 | CONFIG_ACPI_POWER=y |
343 | CONFIG_ACPI_SYSTEM=y | 351 | CONFIG_ACPI_SYSTEM=y |
344 | CONFIG_X86_PM_TIMER=y | 352 | CONFIG_X86_PM_TIMER=y |
@@ -437,10 +445,6 @@ CONFIG_IA32_EMULATION=y | |||
437 | CONFIG_COMPAT=y | 445 | CONFIG_COMPAT=y |
438 | CONFIG_COMPAT_FOR_U64_ALIGNMENT=y | 446 | CONFIG_COMPAT_FOR_U64_ALIGNMENT=y |
439 | CONFIG_SYSVIPC_COMPAT=y | 447 | CONFIG_SYSVIPC_COMPAT=y |
440 | |||
441 | # | ||
442 | # Networking | ||
443 | # | ||
444 | CONFIG_NET=y | 448 | CONFIG_NET=y |
445 | 449 | ||
446 | # | 450 | # |
@@ -464,7 +468,10 @@ CONFIG_IP_FIB_HASH=y | |||
464 | CONFIG_IP_MULTIPLE_TABLES=y | 468 | CONFIG_IP_MULTIPLE_TABLES=y |
465 | CONFIG_IP_ROUTE_MULTIPATH=y | 469 | CONFIG_IP_ROUTE_MULTIPATH=y |
466 | CONFIG_IP_ROUTE_VERBOSE=y | 470 | CONFIG_IP_ROUTE_VERBOSE=y |
467 | # CONFIG_IP_PNP is not set | 471 | CONFIG_IP_PNP=y |
472 | CONFIG_IP_PNP_DHCP=y | ||
473 | CONFIG_IP_PNP_BOOTP=y | ||
474 | CONFIG_IP_PNP_RARP=y | ||
468 | # CONFIG_NET_IPIP is not set | 475 | # CONFIG_NET_IPIP is not set |
469 | # CONFIG_NET_IPGRE is not set | 476 | # CONFIG_NET_IPGRE is not set |
470 | CONFIG_IP_MROUTE=y | 477 | CONFIG_IP_MROUTE=y |
@@ -607,7 +614,6 @@ CONFIG_NET_SCHED=y | |||
607 | # CONFIG_NET_SCH_HTB is not set | 614 | # CONFIG_NET_SCH_HTB is not set |
608 | # CONFIG_NET_SCH_HFSC is not set | 615 | # CONFIG_NET_SCH_HFSC is not set |
609 | # CONFIG_NET_SCH_PRIO is not set | 616 | # CONFIG_NET_SCH_PRIO is not set |
610 | # CONFIG_NET_SCH_RR is not set | ||
611 | # CONFIG_NET_SCH_RED is not set | 617 | # CONFIG_NET_SCH_RED is not set |
612 | # CONFIG_NET_SCH_SFQ is not set | 618 | # CONFIG_NET_SCH_SFQ is not set |
613 | # CONFIG_NET_SCH_TEQL is not set | 619 | # CONFIG_NET_SCH_TEQL is not set |
@@ -669,28 +675,19 @@ CONFIG_FIB_RULES=y | |||
669 | CONFIG_CFG80211=y | 675 | CONFIG_CFG80211=y |
670 | CONFIG_NL80211=y | 676 | CONFIG_NL80211=y |
671 | CONFIG_WIRELESS_EXT=y | 677 | CONFIG_WIRELESS_EXT=y |
678 | CONFIG_WIRELESS_EXT_SYSFS=y | ||
672 | CONFIG_MAC80211=y | 679 | CONFIG_MAC80211=y |
673 | 680 | ||
674 | # | 681 | # |
675 | # Rate control algorithm selection | 682 | # Rate control algorithm selection |
676 | # | 683 | # |
684 | CONFIG_MAC80211_RC_PID=y | ||
677 | CONFIG_MAC80211_RC_DEFAULT_PID=y | 685 | CONFIG_MAC80211_RC_DEFAULT_PID=y |
678 | # CONFIG_MAC80211_RC_DEFAULT_NONE is not set | ||
679 | |||
680 | # | ||
681 | # Selecting 'y' for an algorithm will | ||
682 | # | ||
683 | |||
684 | # | ||
685 | # build the algorithm into mac80211. | ||
686 | # | ||
687 | CONFIG_MAC80211_RC_DEFAULT="pid" | 686 | CONFIG_MAC80211_RC_DEFAULT="pid" |
688 | CONFIG_MAC80211_RC_PID=y | ||
689 | # CONFIG_MAC80211_MESH is not set | 687 | # CONFIG_MAC80211_MESH is not set |
690 | CONFIG_MAC80211_LEDS=y | 688 | CONFIG_MAC80211_LEDS=y |
691 | # CONFIG_MAC80211_DEBUGFS is not set | 689 | # CONFIG_MAC80211_DEBUGFS is not set |
692 | # CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set | 690 | # CONFIG_MAC80211_DEBUG_MENU is not set |
693 | # CONFIG_MAC80211_DEBUG is not set | ||
694 | # CONFIG_IEEE80211 is not set | 691 | # CONFIG_IEEE80211 is not set |
695 | # CONFIG_RFKILL is not set | 692 | # CONFIG_RFKILL is not set |
696 | # CONFIG_NET_9P is not set | 693 | # CONFIG_NET_9P is not set |
@@ -706,6 +703,8 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
706 | CONFIG_STANDALONE=y | 703 | CONFIG_STANDALONE=y |
707 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 704 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
708 | CONFIG_FW_LOADER=y | 705 | CONFIG_FW_LOADER=y |
706 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
707 | CONFIG_EXTRA_FIRMWARE="" | ||
709 | # CONFIG_DEBUG_DRIVER is not set | 708 | # CONFIG_DEBUG_DRIVER is not set |
710 | CONFIG_DEBUG_DEVRES=y | 709 | CONFIG_DEBUG_DEVRES=y |
711 | # CONFIG_SYS_HYPERVISOR is not set | 710 | # CONFIG_SYS_HYPERVISOR is not set |
@@ -738,6 +737,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 | |||
738 | # CONFIG_BLK_DEV_XIP is not set | 737 | # CONFIG_BLK_DEV_XIP is not set |
739 | # CONFIG_CDROM_PKTCDVD is not set | 738 | # CONFIG_CDROM_PKTCDVD is not set |
740 | # CONFIG_ATA_OVER_ETH is not set | 739 | # CONFIG_ATA_OVER_ETH is not set |
740 | # CONFIG_BLK_DEV_HD is not set | ||
741 | CONFIG_MISC_DEVICES=y | 741 | CONFIG_MISC_DEVICES=y |
742 | # CONFIG_IBM_ASM is not set | 742 | # CONFIG_IBM_ASM is not set |
743 | # CONFIG_PHANTOM is not set | 743 | # CONFIG_PHANTOM is not set |
@@ -748,10 +748,14 @@ CONFIG_MISC_DEVICES=y | |||
748 | # CONFIG_ASUS_LAPTOP is not set | 748 | # CONFIG_ASUS_LAPTOP is not set |
749 | # CONFIG_FUJITSU_LAPTOP is not set | 749 | # CONFIG_FUJITSU_LAPTOP is not set |
750 | # CONFIG_MSI_LAPTOP is not set | 750 | # CONFIG_MSI_LAPTOP is not set |
751 | # CONFIG_COMPAL_LAPTOP is not set | ||
751 | # CONFIG_SONY_LAPTOP is not set | 752 | # CONFIG_SONY_LAPTOP is not set |
752 | # CONFIG_THINKPAD_ACPI is not set | 753 | # CONFIG_THINKPAD_ACPI is not set |
753 | # CONFIG_INTEL_MENLOW is not set | 754 | # CONFIG_INTEL_MENLOW is not set |
754 | # CONFIG_ENCLOSURE_SERVICES is not set | 755 | # CONFIG_ENCLOSURE_SERVICES is not set |
756 | # CONFIG_SGI_XP is not set | ||
757 | # CONFIG_HP_ILO is not set | ||
758 | # CONFIG_SGI_GRU is not set | ||
755 | CONFIG_HAVE_IDE=y | 759 | CONFIG_HAVE_IDE=y |
756 | # CONFIG_IDE is not set | 760 | # CONFIG_IDE is not set |
757 | 761 | ||
@@ -790,12 +794,13 @@ CONFIG_SCSI_WAIT_SCAN=m | |||
790 | # | 794 | # |
791 | CONFIG_SCSI_SPI_ATTRS=y | 795 | CONFIG_SCSI_SPI_ATTRS=y |
792 | # CONFIG_SCSI_FC_ATTRS is not set | 796 | # CONFIG_SCSI_FC_ATTRS is not set |
793 | # CONFIG_SCSI_ISCSI_ATTRS is not set | 797 | CONFIG_SCSI_ISCSI_ATTRS=y |
794 | # CONFIG_SCSI_SAS_ATTRS is not set | 798 | # CONFIG_SCSI_SAS_ATTRS is not set |
795 | # CONFIG_SCSI_SAS_LIBSAS is not set | 799 | # CONFIG_SCSI_SAS_LIBSAS is not set |
796 | # CONFIG_SCSI_SRP_ATTRS is not set | 800 | # CONFIG_SCSI_SRP_ATTRS is not set |
797 | # CONFIG_SCSI_LOWLEVEL is not set | 801 | # CONFIG_SCSI_LOWLEVEL is not set |
798 | # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set | 802 | # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set |
803 | # CONFIG_SCSI_DH is not set | ||
799 | CONFIG_ATA=y | 804 | CONFIG_ATA=y |
800 | # CONFIG_ATA_NONSTANDARD is not set | 805 | # CONFIG_ATA_NONSTANDARD is not set |
801 | CONFIG_ATA_ACPI=y | 806 | CONFIG_ATA_ACPI=y |
@@ -857,6 +862,7 @@ CONFIG_PATA_OLDPIIX=y | |||
857 | # CONFIG_PATA_SIS is not set | 862 | # CONFIG_PATA_SIS is not set |
858 | # CONFIG_PATA_VIA is not set | 863 | # CONFIG_PATA_VIA is not set |
859 | # CONFIG_PATA_WINBOND is not set | 864 | # CONFIG_PATA_WINBOND is not set |
865 | CONFIG_PATA_SCH=y | ||
860 | CONFIG_MD=y | 866 | CONFIG_MD=y |
861 | CONFIG_BLK_DEV_MD=y | 867 | CONFIG_BLK_DEV_MD=y |
862 | # CONFIG_MD_LINEAR is not set | 868 | # CONFIG_MD_LINEAR is not set |
@@ -880,13 +886,16 @@ CONFIG_DM_ZERO=y | |||
880 | # | 886 | # |
881 | # IEEE 1394 (FireWire) support | 887 | # IEEE 1394 (FireWire) support |
882 | # | 888 | # |
889 | |||
890 | # | ||
891 | # Enable only one of the two stacks, unless you know what you are doing | ||
892 | # | ||
883 | # CONFIG_FIREWIRE is not set | 893 | # CONFIG_FIREWIRE is not set |
884 | # CONFIG_IEEE1394 is not set | 894 | # CONFIG_IEEE1394 is not set |
885 | # CONFIG_I2O is not set | 895 | # CONFIG_I2O is not set |
886 | CONFIG_MACINTOSH_DRIVERS=y | 896 | CONFIG_MACINTOSH_DRIVERS=y |
887 | CONFIG_MAC_EMUMOUSEBTN=y | 897 | CONFIG_MAC_EMUMOUSEBTN=y |
888 | CONFIG_NETDEVICES=y | 898 | CONFIG_NETDEVICES=y |
889 | # CONFIG_NETDEVICES_MULTIQUEUE is not set | ||
890 | # CONFIG_IFB is not set | 899 | # CONFIG_IFB is not set |
891 | # CONFIG_DUMMY is not set | 900 | # CONFIG_DUMMY is not set |
892 | # CONFIG_BONDING is not set | 901 | # CONFIG_BONDING is not set |
@@ -896,7 +905,23 @@ CONFIG_NETDEVICES=y | |||
896 | # CONFIG_VETH is not set | 905 | # CONFIG_VETH is not set |
897 | # CONFIG_NET_SB1000 is not set | 906 | # CONFIG_NET_SB1000 is not set |
898 | # CONFIG_ARCNET is not set | 907 | # CONFIG_ARCNET is not set |
899 | # CONFIG_PHYLIB is not set | 908 | CONFIG_PHYLIB=y |
909 | |||
910 | # | ||
911 | # MII PHY device drivers | ||
912 | # | ||
913 | # CONFIG_MARVELL_PHY is not set | ||
914 | # CONFIG_DAVICOM_PHY is not set | ||
915 | # CONFIG_QSEMI_PHY is not set | ||
916 | # CONFIG_LXT_PHY is not set | ||
917 | # CONFIG_CICADA_PHY is not set | ||
918 | # CONFIG_VITESSE_PHY is not set | ||
919 | # CONFIG_SMSC_PHY is not set | ||
920 | # CONFIG_BROADCOM_PHY is not set | ||
921 | # CONFIG_ICPLUS_PHY is not set | ||
922 | # CONFIG_REALTEK_PHY is not set | ||
923 | # CONFIG_FIXED_PHY is not set | ||
924 | # CONFIG_MDIO_BITBANG is not set | ||
900 | CONFIG_NET_ETHERNET=y | 925 | CONFIG_NET_ETHERNET=y |
901 | CONFIG_MII=y | 926 | CONFIG_MII=y |
902 | # CONFIG_HAPPYMEAL is not set | 927 | # CONFIG_HAPPYMEAL is not set |
@@ -940,16 +965,15 @@ CONFIG_8139TOO_PIO=y | |||
940 | # CONFIG_SIS900 is not set | 965 | # CONFIG_SIS900 is not set |
941 | # CONFIG_EPIC100 is not set | 966 | # CONFIG_EPIC100 is not set |
942 | # CONFIG_SUNDANCE is not set | 967 | # CONFIG_SUNDANCE is not set |
968 | # CONFIG_TLAN is not set | ||
943 | # CONFIG_VIA_RHINE is not set | 969 | # CONFIG_VIA_RHINE is not set |
944 | # CONFIG_SC92031 is not set | 970 | # CONFIG_SC92031 is not set |
945 | CONFIG_NETDEV_1000=y | 971 | CONFIG_NETDEV_1000=y |
946 | # CONFIG_ACENIC is not set | 972 | # CONFIG_ACENIC is not set |
947 | # CONFIG_DL2K is not set | 973 | # CONFIG_DL2K is not set |
948 | CONFIG_E1000=y | 974 | CONFIG_E1000=y |
949 | # CONFIG_E1000_NAPI is not set | ||
950 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | 975 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set |
951 | # CONFIG_E1000E is not set | 976 | # CONFIG_E1000E is not set |
952 | # CONFIG_E1000E_ENABLED is not set | ||
953 | # CONFIG_IP1000 is not set | 977 | # CONFIG_IP1000 is not set |
954 | # CONFIG_IGB is not set | 978 | # CONFIG_IGB is not set |
955 | # CONFIG_NS83820 is not set | 979 | # CONFIG_NS83820 is not set |
@@ -965,6 +989,7 @@ CONFIG_TIGON3=y | |||
965 | # CONFIG_BNX2 is not set | 989 | # CONFIG_BNX2 is not set |
966 | # CONFIG_QLA3XXX is not set | 990 | # CONFIG_QLA3XXX is not set |
967 | # CONFIG_ATL1 is not set | 991 | # CONFIG_ATL1 is not set |
992 | # CONFIG_ATL1E is not set | ||
968 | CONFIG_NETDEV_10000=y | 993 | CONFIG_NETDEV_10000=y |
969 | # CONFIG_CHELSIO_T1 is not set | 994 | # CONFIG_CHELSIO_T1 is not set |
970 | # CONFIG_CHELSIO_T3 is not set | 995 | # CONFIG_CHELSIO_T3 is not set |
@@ -1003,13 +1028,14 @@ CONFIG_WLAN_80211=y | |||
1003 | # CONFIG_RTL8180 is not set | 1028 | # CONFIG_RTL8180 is not set |
1004 | # CONFIG_RTL8187 is not set | 1029 | # CONFIG_RTL8187 is not set |
1005 | # CONFIG_ADM8211 is not set | 1030 | # CONFIG_ADM8211 is not set |
1031 | # CONFIG_MAC80211_HWSIM is not set | ||
1006 | # CONFIG_P54_COMMON is not set | 1032 | # CONFIG_P54_COMMON is not set |
1007 | CONFIG_ATH5K=y | 1033 | CONFIG_ATH5K=y |
1008 | # CONFIG_ATH5K_DEBUG is not set | 1034 | # CONFIG_ATH5K_DEBUG is not set |
1009 | # CONFIG_IWLWIFI is not set | 1035 | # CONFIG_ATH9K is not set |
1010 | # CONFIG_IWLCORE is not set | 1036 | # CONFIG_IWLCORE is not set |
1011 | # CONFIG_IWLWIFI_LEDS is not set | 1037 | # CONFIG_IWLWIFI_LEDS is not set |
1012 | # CONFIG_IWL4965 is not set | 1038 | # CONFIG_IWLAGN is not set |
1013 | # CONFIG_IWL3945 is not set | 1039 | # CONFIG_IWL3945 is not set |
1014 | # CONFIG_HOSTAP is not set | 1040 | # CONFIG_HOSTAP is not set |
1015 | # CONFIG_B43 is not set | 1041 | # CONFIG_B43 is not set |
@@ -1088,6 +1114,7 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y | |||
1088 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | 1114 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set |
1089 | # CONFIG_MOUSE_SERIAL is not set | 1115 | # CONFIG_MOUSE_SERIAL is not set |
1090 | # CONFIG_MOUSE_APPLETOUCH is not set | 1116 | # CONFIG_MOUSE_APPLETOUCH is not set |
1117 | # CONFIG_MOUSE_BCM5974 is not set | ||
1091 | # CONFIG_MOUSE_VSXXXAA is not set | 1118 | # CONFIG_MOUSE_VSXXXAA is not set |
1092 | CONFIG_INPUT_JOYSTICK=y | 1119 | CONFIG_INPUT_JOYSTICK=y |
1093 | # CONFIG_JOYSTICK_ANALOG is not set | 1120 | # CONFIG_JOYSTICK_ANALOG is not set |
@@ -1122,12 +1149,14 @@ CONFIG_INPUT_TOUCHSCREEN=y | |||
1122 | # CONFIG_TOUCHSCREEN_GUNZE is not set | 1149 | # CONFIG_TOUCHSCREEN_GUNZE is not set |
1123 | # CONFIG_TOUCHSCREEN_ELO is not set | 1150 | # CONFIG_TOUCHSCREEN_ELO is not set |
1124 | # CONFIG_TOUCHSCREEN_MTOUCH is not set | 1151 | # CONFIG_TOUCHSCREEN_MTOUCH is not set |
1152 | # CONFIG_TOUCHSCREEN_INEXIO is not set | ||
1125 | # CONFIG_TOUCHSCREEN_MK712 is not set | 1153 | # CONFIG_TOUCHSCREEN_MK712 is not set |
1126 | # CONFIG_TOUCHSCREEN_PENMOUNT is not set | 1154 | # CONFIG_TOUCHSCREEN_PENMOUNT is not set |
1127 | # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set | 1155 | # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set |
1128 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set | 1156 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set |
1129 | # CONFIG_TOUCHSCREEN_UCB1400 is not set | 1157 | # CONFIG_TOUCHSCREEN_UCB1400 is not set |
1130 | # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set | 1158 | # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set |
1159 | # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set | ||
1131 | CONFIG_INPUT_MISC=y | 1160 | CONFIG_INPUT_MISC=y |
1132 | # CONFIG_INPUT_PCSPKR is not set | 1161 | # CONFIG_INPUT_PCSPKR is not set |
1133 | # CONFIG_INPUT_APANEL is not set | 1162 | # CONFIG_INPUT_APANEL is not set |
@@ -1155,6 +1184,7 @@ CONFIG_SERIO_LIBPS2=y | |||
1155 | # Character devices | 1184 | # Character devices |
1156 | # | 1185 | # |
1157 | CONFIG_VT=y | 1186 | CONFIG_VT=y |
1187 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
1158 | CONFIG_VT_CONSOLE=y | 1188 | CONFIG_VT_CONSOLE=y |
1159 | CONFIG_HW_CONSOLE=y | 1189 | CONFIG_HW_CONSOLE=y |
1160 | CONFIG_VT_HW_CONSOLE_BINDING=y | 1190 | CONFIG_VT_HW_CONSOLE_BINDING=y |
@@ -1222,7 +1252,6 @@ CONFIG_NVRAM=y | |||
1222 | # CONFIG_PC8736x_GPIO is not set | 1252 | # CONFIG_PC8736x_GPIO is not set |
1223 | # CONFIG_RAW_DRIVER is not set | 1253 | # CONFIG_RAW_DRIVER is not set |
1224 | CONFIG_HPET=y | 1254 | CONFIG_HPET=y |
1225 | # CONFIG_HPET_RTC_IRQ is not set | ||
1226 | # CONFIG_HPET_MMAP is not set | 1255 | # CONFIG_HPET_MMAP is not set |
1227 | # CONFIG_HANGCHECK_TIMER is not set | 1256 | # CONFIG_HANGCHECK_TIMER is not set |
1228 | # CONFIG_TCG_TPM is not set | 1257 | # CONFIG_TCG_TPM is not set |
@@ -1231,42 +1260,63 @@ CONFIG_DEVPORT=y | |||
1231 | CONFIG_I2C=y | 1260 | CONFIG_I2C=y |
1232 | CONFIG_I2C_BOARDINFO=y | 1261 | CONFIG_I2C_BOARDINFO=y |
1233 | # CONFIG_I2C_CHARDEV is not set | 1262 | # CONFIG_I2C_CHARDEV is not set |
1263 | CONFIG_I2C_HELPER_AUTO=y | ||
1234 | 1264 | ||
1235 | # | 1265 | # |
1236 | # I2C Hardware Bus support | 1266 | # I2C Hardware Bus support |
1237 | # | 1267 | # |
1268 | |||
1269 | # | ||
1270 | # PC SMBus host controller drivers | ||
1271 | # | ||
1238 | # CONFIG_I2C_ALI1535 is not set | 1272 | # CONFIG_I2C_ALI1535 is not set |
1239 | # CONFIG_I2C_ALI1563 is not set | 1273 | # CONFIG_I2C_ALI1563 is not set |
1240 | # CONFIG_I2C_ALI15X3 is not set | 1274 | # CONFIG_I2C_ALI15X3 is not set |
1241 | # CONFIG_I2C_AMD756 is not set | 1275 | # CONFIG_I2C_AMD756 is not set |
1242 | # CONFIG_I2C_AMD8111 is not set | 1276 | # CONFIG_I2C_AMD8111 is not set |
1243 | CONFIG_I2C_I801=y | 1277 | CONFIG_I2C_I801=y |
1244 | # CONFIG_I2C_I810 is not set | 1278 | # CONFIG_I2C_ISCH is not set |
1245 | # CONFIG_I2C_PIIX4 is not set | 1279 | # CONFIG_I2C_PIIX4 is not set |
1246 | # CONFIG_I2C_NFORCE2 is not set | 1280 | # CONFIG_I2C_NFORCE2 is not set |
1247 | # CONFIG_I2C_OCORES is not set | ||
1248 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1249 | # CONFIG_I2C_PROSAVAGE is not set | ||
1250 | # CONFIG_I2C_SAVAGE4 is not set | ||
1251 | # CONFIG_I2C_SIMTEC is not set | ||
1252 | # CONFIG_I2C_SIS5595 is not set | 1281 | # CONFIG_I2C_SIS5595 is not set |
1253 | # CONFIG_I2C_SIS630 is not set | 1282 | # CONFIG_I2C_SIS630 is not set |
1254 | # CONFIG_I2C_SIS96X is not set | 1283 | # CONFIG_I2C_SIS96X is not set |
1255 | # CONFIG_I2C_TAOS_EVM is not set | ||
1256 | # CONFIG_I2C_STUB is not set | ||
1257 | # CONFIG_I2C_TINY_USB is not set | ||
1258 | # CONFIG_I2C_VIA is not set | 1284 | # CONFIG_I2C_VIA is not set |
1259 | # CONFIG_I2C_VIAPRO is not set | 1285 | # CONFIG_I2C_VIAPRO is not set |
1286 | |||
1287 | # | ||
1288 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
1289 | # | ||
1290 | # CONFIG_I2C_OCORES is not set | ||
1291 | # CONFIG_I2C_SIMTEC is not set | ||
1292 | |||
1293 | # | ||
1294 | # External I2C/SMBus adapter drivers | ||
1295 | # | ||
1296 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1297 | # CONFIG_I2C_TAOS_EVM is not set | ||
1298 | # CONFIG_I2C_TINY_USB is not set | ||
1299 | |||
1300 | # | ||
1301 | # Graphics adapter I2C/DDC channel drivers | ||
1302 | # | ||
1260 | # CONFIG_I2C_VOODOO3 is not set | 1303 | # CONFIG_I2C_VOODOO3 is not set |
1304 | |||
1305 | # | ||
1306 | # Other I2C/SMBus bus drivers | ||
1307 | # | ||
1261 | # CONFIG_I2C_PCA_PLATFORM is not set | 1308 | # CONFIG_I2C_PCA_PLATFORM is not set |
1309 | # CONFIG_I2C_STUB is not set | ||
1262 | 1310 | ||
1263 | # | 1311 | # |
1264 | # Miscellaneous I2C Chip support | 1312 | # Miscellaneous I2C Chip support |
1265 | # | 1313 | # |
1266 | # CONFIG_DS1682 is not set | 1314 | # CONFIG_DS1682 is not set |
1315 | # CONFIG_AT24 is not set | ||
1267 | # CONFIG_SENSORS_EEPROM is not set | 1316 | # CONFIG_SENSORS_EEPROM is not set |
1268 | # CONFIG_SENSORS_PCF8574 is not set | 1317 | # CONFIG_SENSORS_PCF8574 is not set |
1269 | # CONFIG_PCF8575 is not set | 1318 | # CONFIG_PCF8575 is not set |
1319 | # CONFIG_SENSORS_PCA9539 is not set | ||
1270 | # CONFIG_SENSORS_PCF8591 is not set | 1320 | # CONFIG_SENSORS_PCF8591 is not set |
1271 | # CONFIG_SENSORS_MAX6875 is not set | 1321 | # CONFIG_SENSORS_MAX6875 is not set |
1272 | # CONFIG_SENSORS_TSL2550 is not set | 1322 | # CONFIG_SENSORS_TSL2550 is not set |
@@ -1275,6 +1325,8 @@ CONFIG_I2C_I801=y | |||
1275 | # CONFIG_I2C_DEBUG_BUS is not set | 1325 | # CONFIG_I2C_DEBUG_BUS is not set |
1276 | # CONFIG_I2C_DEBUG_CHIP is not set | 1326 | # CONFIG_I2C_DEBUG_CHIP is not set |
1277 | # CONFIG_SPI is not set | 1327 | # CONFIG_SPI is not set |
1328 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
1329 | # CONFIG_GPIOLIB is not set | ||
1278 | # CONFIG_W1 is not set | 1330 | # CONFIG_W1 is not set |
1279 | CONFIG_POWER_SUPPLY=y | 1331 | CONFIG_POWER_SUPPLY=y |
1280 | # CONFIG_POWER_SUPPLY_DEBUG is not set | 1332 | # CONFIG_POWER_SUPPLY_DEBUG is not set |
@@ -1335,8 +1387,10 @@ CONFIG_SSB_POSSIBLE=y | |||
1335 | # | 1387 | # |
1336 | # Multifunction device drivers | 1388 | # Multifunction device drivers |
1337 | # | 1389 | # |
1390 | # CONFIG_MFD_CORE is not set | ||
1338 | # CONFIG_MFD_SM501 is not set | 1391 | # CONFIG_MFD_SM501 is not set |
1339 | # CONFIG_HTC_PASIC3 is not set | 1392 | # CONFIG_HTC_PASIC3 is not set |
1393 | # CONFIG_MFD_TMIO is not set | ||
1340 | 1394 | ||
1341 | # | 1395 | # |
1342 | # Multimedia devices | 1396 | # Multimedia devices |
@@ -1347,6 +1401,7 @@ CONFIG_SSB_POSSIBLE=y | |||
1347 | # | 1401 | # |
1348 | # CONFIG_VIDEO_DEV is not set | 1402 | # CONFIG_VIDEO_DEV is not set |
1349 | # CONFIG_DVB_CORE is not set | 1403 | # CONFIG_DVB_CORE is not set |
1404 | # CONFIG_VIDEO_MEDIA is not set | ||
1350 | 1405 | ||
1351 | # | 1406 | # |
1352 | # Multimedia drivers | 1407 | # Multimedia drivers |
@@ -1387,7 +1442,6 @@ CONFIG_FB_CFB_IMAGEBLIT=y | |||
1387 | # CONFIG_FB_SYS_IMAGEBLIT is not set | 1442 | # CONFIG_FB_SYS_IMAGEBLIT is not set |
1388 | # CONFIG_FB_FOREIGN_ENDIAN is not set | 1443 | # CONFIG_FB_FOREIGN_ENDIAN is not set |
1389 | # CONFIG_FB_SYS_FOPS is not set | 1444 | # CONFIG_FB_SYS_FOPS is not set |
1390 | CONFIG_FB_DEFERRED_IO=y | ||
1391 | # CONFIG_FB_SVGALIB is not set | 1445 | # CONFIG_FB_SVGALIB is not set |
1392 | # CONFIG_FB_MACMODES is not set | 1446 | # CONFIG_FB_MACMODES is not set |
1393 | # CONFIG_FB_BACKLIGHT is not set | 1447 | # CONFIG_FB_BACKLIGHT is not set |
@@ -1430,6 +1484,7 @@ CONFIG_FB_EFI=y | |||
1430 | # CONFIG_FB_TRIDENT is not set | 1484 | # CONFIG_FB_TRIDENT is not set |
1431 | # CONFIG_FB_ARK is not set | 1485 | # CONFIG_FB_ARK is not set |
1432 | # CONFIG_FB_PM3 is not set | 1486 | # CONFIG_FB_PM3 is not set |
1487 | # CONFIG_FB_CARMINE is not set | ||
1433 | # CONFIG_FB_GEODE is not set | 1488 | # CONFIG_FB_GEODE is not set |
1434 | # CONFIG_FB_VIRTUAL is not set | 1489 | # CONFIG_FB_VIRTUAL is not set |
1435 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 1490 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
@@ -1437,6 +1492,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y | |||
1437 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | 1492 | CONFIG_BACKLIGHT_CLASS_DEVICE=y |
1438 | # CONFIG_BACKLIGHT_CORGI is not set | 1493 | # CONFIG_BACKLIGHT_CORGI is not set |
1439 | # CONFIG_BACKLIGHT_PROGEAR is not set | 1494 | # CONFIG_BACKLIGHT_PROGEAR is not set |
1495 | # CONFIG_BACKLIGHT_MBP_NVIDIA is not set | ||
1440 | 1496 | ||
1441 | # | 1497 | # |
1442 | # Display device support | 1498 | # Display device support |
@@ -1456,15 +1512,7 @@ CONFIG_LOGO=y | |||
1456 | # CONFIG_LOGO_LINUX_MONO is not set | 1512 | # CONFIG_LOGO_LINUX_MONO is not set |
1457 | # CONFIG_LOGO_LINUX_VGA16 is not set | 1513 | # CONFIG_LOGO_LINUX_VGA16 is not set |
1458 | CONFIG_LOGO_LINUX_CLUT224=y | 1514 | CONFIG_LOGO_LINUX_CLUT224=y |
1459 | |||
1460 | # | ||
1461 | # Sound | ||
1462 | # | ||
1463 | CONFIG_SOUND=y | 1515 | CONFIG_SOUND=y |
1464 | |||
1465 | # | ||
1466 | # Advanced Linux Sound Architecture | ||
1467 | # | ||
1468 | CONFIG_SND=y | 1516 | CONFIG_SND=y |
1469 | CONFIG_SND_TIMER=y | 1517 | CONFIG_SND_TIMER=y |
1470 | CONFIG_SND_PCM=y | 1518 | CONFIG_SND_PCM=y |
@@ -1482,20 +1530,14 @@ CONFIG_SND_VERBOSE_PROCFS=y | |||
1482 | # CONFIG_SND_VERBOSE_PRINTK is not set | 1530 | # CONFIG_SND_VERBOSE_PRINTK is not set |
1483 | # CONFIG_SND_DEBUG is not set | 1531 | # CONFIG_SND_DEBUG is not set |
1484 | CONFIG_SND_VMASTER=y | 1532 | CONFIG_SND_VMASTER=y |
1485 | 1533 | CONFIG_SND_DRIVERS=y | |
1486 | # | ||
1487 | # Generic devices | ||
1488 | # | ||
1489 | # CONFIG_SND_PCSP is not set | 1534 | # CONFIG_SND_PCSP is not set |
1490 | # CONFIG_SND_DUMMY is not set | 1535 | # CONFIG_SND_DUMMY is not set |
1491 | # CONFIG_SND_VIRMIDI is not set | 1536 | # CONFIG_SND_VIRMIDI is not set |
1492 | # CONFIG_SND_MTPAV is not set | 1537 | # CONFIG_SND_MTPAV is not set |
1493 | # CONFIG_SND_SERIAL_U16550 is not set | 1538 | # CONFIG_SND_SERIAL_U16550 is not set |
1494 | # CONFIG_SND_MPU401 is not set | 1539 | # CONFIG_SND_MPU401 is not set |
1495 | 1540 | CONFIG_SND_PCI=y | |
1496 | # | ||
1497 | # PCI devices | ||
1498 | # | ||
1499 | # CONFIG_SND_AD1889 is not set | 1541 | # CONFIG_SND_AD1889 is not set |
1500 | # CONFIG_SND_ALS300 is not set | 1542 | # CONFIG_SND_ALS300 is not set |
1501 | # CONFIG_SND_ALS4000 is not set | 1543 | # CONFIG_SND_ALS4000 is not set |
@@ -1568,36 +1610,14 @@ CONFIG_SND_HDA_GENERIC=y | |||
1568 | # CONFIG_SND_VIRTUOSO is not set | 1610 | # CONFIG_SND_VIRTUOSO is not set |
1569 | # CONFIG_SND_VX222 is not set | 1611 | # CONFIG_SND_VX222 is not set |
1570 | # CONFIG_SND_YMFPCI is not set | 1612 | # CONFIG_SND_YMFPCI is not set |
1571 | 1613 | CONFIG_SND_USB=y | |
1572 | # | ||
1573 | # USB devices | ||
1574 | # | ||
1575 | # CONFIG_SND_USB_AUDIO is not set | 1614 | # CONFIG_SND_USB_AUDIO is not set |
1576 | # CONFIG_SND_USB_USX2Y is not set | 1615 | # CONFIG_SND_USB_USX2Y is not set |
1577 | # CONFIG_SND_USB_CAIAQ is not set | 1616 | # CONFIG_SND_USB_CAIAQ is not set |
1578 | 1617 | CONFIG_SND_PCMCIA=y | |
1579 | # | ||
1580 | # PCMCIA devices | ||
1581 | # | ||
1582 | # CONFIG_SND_VXPOCKET is not set | 1618 | # CONFIG_SND_VXPOCKET is not set |
1583 | # CONFIG_SND_PDAUDIOCF is not set | 1619 | # CONFIG_SND_PDAUDIOCF is not set |
1584 | |||
1585 | # | ||
1586 | # System on Chip audio support | ||
1587 | # | ||
1588 | # CONFIG_SND_SOC is not set | 1620 | # CONFIG_SND_SOC is not set |
1589 | |||
1590 | # | ||
1591 | # ALSA SoC audio for Freescale SOCs | ||
1592 | # | ||
1593 | |||
1594 | # | ||
1595 | # SoC Audio for the Texas Instruments OMAP | ||
1596 | # | ||
1597 | |||
1598 | # | ||
1599 | # Open Sound System | ||
1600 | # | ||
1601 | # CONFIG_SOUND_PRIME is not set | 1621 | # CONFIG_SOUND_PRIME is not set |
1602 | CONFIG_HID_SUPPORT=y | 1622 | CONFIG_HID_SUPPORT=y |
1603 | CONFIG_HID=y | 1623 | CONFIG_HID=y |
@@ -1633,6 +1653,7 @@ CONFIG_USB_DEVICEFS=y | |||
1633 | # CONFIG_USB_DYNAMIC_MINORS is not set | 1653 | # CONFIG_USB_DYNAMIC_MINORS is not set |
1634 | CONFIG_USB_SUSPEND=y | 1654 | CONFIG_USB_SUSPEND=y |
1635 | # CONFIG_USB_OTG is not set | 1655 | # CONFIG_USB_OTG is not set |
1656 | CONFIG_USB_MON=y | ||
1636 | 1657 | ||
1637 | # | 1658 | # |
1638 | # USB Host Controller Drivers | 1659 | # USB Host Controller Drivers |
@@ -1656,6 +1677,7 @@ CONFIG_USB_UHCI_HCD=y | |||
1656 | # | 1677 | # |
1657 | # CONFIG_USB_ACM is not set | 1678 | # CONFIG_USB_ACM is not set |
1658 | CONFIG_USB_PRINTER=y | 1679 | CONFIG_USB_PRINTER=y |
1680 | # CONFIG_USB_WDM is not set | ||
1659 | 1681 | ||
1660 | # | 1682 | # |
1661 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | 1683 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' |
@@ -1677,6 +1699,7 @@ CONFIG_USB_STORAGE=y | |||
1677 | # CONFIG_USB_STORAGE_ALAUDA is not set | 1699 | # CONFIG_USB_STORAGE_ALAUDA is not set |
1678 | # CONFIG_USB_STORAGE_ONETOUCH is not set | 1700 | # CONFIG_USB_STORAGE_ONETOUCH is not set |
1679 | # CONFIG_USB_STORAGE_KARMA is not set | 1701 | # CONFIG_USB_STORAGE_KARMA is not set |
1702 | # CONFIG_USB_STORAGE_SIERRA is not set | ||
1680 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set | 1703 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set |
1681 | CONFIG_USB_LIBUSUAL=y | 1704 | CONFIG_USB_LIBUSUAL=y |
1682 | 1705 | ||
@@ -1685,7 +1708,6 @@ CONFIG_USB_LIBUSUAL=y | |||
1685 | # | 1708 | # |
1686 | # CONFIG_USB_MDC800 is not set | 1709 | # CONFIG_USB_MDC800 is not set |
1687 | # CONFIG_USB_MICROTEK is not set | 1710 | # CONFIG_USB_MICROTEK is not set |
1688 | CONFIG_USB_MON=y | ||
1689 | 1711 | ||
1690 | # | 1712 | # |
1691 | # USB port drivers | 1713 | # USB port drivers |
@@ -1698,7 +1720,6 @@ CONFIG_USB_MON=y | |||
1698 | # CONFIG_USB_EMI62 is not set | 1720 | # CONFIG_USB_EMI62 is not set |
1699 | # CONFIG_USB_EMI26 is not set | 1721 | # CONFIG_USB_EMI26 is not set |
1700 | # CONFIG_USB_ADUTUX is not set | 1722 | # CONFIG_USB_ADUTUX is not set |
1701 | # CONFIG_USB_AUERSWALD is not set | ||
1702 | # CONFIG_USB_RIO500 is not set | 1723 | # CONFIG_USB_RIO500 is not set |
1703 | # CONFIG_USB_LEGOTOWER is not set | 1724 | # CONFIG_USB_LEGOTOWER is not set |
1704 | # CONFIG_USB_LCD is not set | 1725 | # CONFIG_USB_LCD is not set |
@@ -1715,6 +1736,7 @@ CONFIG_USB_MON=y | |||
1715 | # CONFIG_USB_TRANCEVIBRATOR is not set | 1736 | # CONFIG_USB_TRANCEVIBRATOR is not set |
1716 | # CONFIG_USB_IOWARRIOR is not set | 1737 | # CONFIG_USB_IOWARRIOR is not set |
1717 | # CONFIG_USB_TEST is not set | 1738 | # CONFIG_USB_TEST is not set |
1739 | # CONFIG_USB_ISIGHTFW is not set | ||
1718 | # CONFIG_USB_GADGET is not set | 1740 | # CONFIG_USB_GADGET is not set |
1719 | # CONFIG_MMC is not set | 1741 | # CONFIG_MMC is not set |
1720 | # CONFIG_MEMSTICK is not set | 1742 | # CONFIG_MEMSTICK is not set |
@@ -1724,7 +1746,9 @@ CONFIG_LEDS_CLASS=y | |||
1724 | # | 1746 | # |
1725 | # LED drivers | 1747 | # LED drivers |
1726 | # | 1748 | # |
1749 | # CONFIG_LEDS_PCA9532 is not set | ||
1727 | # CONFIG_LEDS_CLEVO_MAIL is not set | 1750 | # CONFIG_LEDS_CLEVO_MAIL is not set |
1751 | # CONFIG_LEDS_PCA955X is not set | ||
1728 | 1752 | ||
1729 | # | 1753 | # |
1730 | # LED Triggers | 1754 | # LED Triggers |
@@ -1770,6 +1794,7 @@ CONFIG_RTC_INTF_DEV=y | |||
1770 | # CONFIG_RTC_DRV_PCF8583 is not set | 1794 | # CONFIG_RTC_DRV_PCF8583 is not set |
1771 | # CONFIG_RTC_DRV_M41T80 is not set | 1795 | # CONFIG_RTC_DRV_M41T80 is not set |
1772 | # CONFIG_RTC_DRV_S35390A is not set | 1796 | # CONFIG_RTC_DRV_S35390A is not set |
1797 | # CONFIG_RTC_DRV_FM3130 is not set | ||
1773 | 1798 | ||
1774 | # | 1799 | # |
1775 | # SPI RTC drivers | 1800 | # SPI RTC drivers |
@@ -1802,11 +1827,13 @@ CONFIG_DMADEVICES=y | |||
1802 | # Firmware Drivers | 1827 | # Firmware Drivers |
1803 | # | 1828 | # |
1804 | # CONFIG_EDD is not set | 1829 | # CONFIG_EDD is not set |
1830 | CONFIG_FIRMWARE_MEMMAP=y | ||
1805 | CONFIG_EFI_VARS=y | 1831 | CONFIG_EFI_VARS=y |
1806 | # CONFIG_DELL_RBU is not set | 1832 | # CONFIG_DELL_RBU is not set |
1807 | # CONFIG_DCDBAS is not set | 1833 | # CONFIG_DCDBAS is not set |
1808 | CONFIG_DMIID=y | 1834 | CONFIG_DMIID=y |
1809 | # CONFIG_ISCSI_IBFT_FIND is not set | 1835 | CONFIG_ISCSI_IBFT_FIND=y |
1836 | CONFIG_ISCSI_IBFT=y | ||
1810 | 1837 | ||
1811 | # | 1838 | # |
1812 | # File systems | 1839 | # File systems |
@@ -1886,14 +1913,27 @@ CONFIG_HUGETLB_PAGE=y | |||
1886 | # CONFIG_CRAMFS is not set | 1913 | # CONFIG_CRAMFS is not set |
1887 | # CONFIG_VXFS_FS is not set | 1914 | # CONFIG_VXFS_FS is not set |
1888 | # CONFIG_MINIX_FS is not set | 1915 | # CONFIG_MINIX_FS is not set |
1916 | # CONFIG_OMFS_FS is not set | ||
1889 | # CONFIG_HPFS_FS is not set | 1917 | # CONFIG_HPFS_FS is not set |
1890 | # CONFIG_QNX4FS_FS is not set | 1918 | # CONFIG_QNX4FS_FS is not set |
1891 | # CONFIG_ROMFS_FS is not set | 1919 | # CONFIG_ROMFS_FS is not set |
1892 | # CONFIG_SYSV_FS is not set | 1920 | # CONFIG_SYSV_FS is not set |
1893 | # CONFIG_UFS_FS is not set | 1921 | # CONFIG_UFS_FS is not set |
1894 | CONFIG_NETWORK_FILESYSTEMS=y | 1922 | CONFIG_NETWORK_FILESYSTEMS=y |
1895 | # CONFIG_NFS_FS is not set | 1923 | CONFIG_NFS_FS=y |
1924 | CONFIG_NFS_V3=y | ||
1925 | CONFIG_NFS_V3_ACL=y | ||
1926 | CONFIG_NFS_V4=y | ||
1927 | CONFIG_ROOT_NFS=y | ||
1896 | # CONFIG_NFSD is not set | 1928 | # CONFIG_NFSD is not set |
1929 | CONFIG_LOCKD=y | ||
1930 | CONFIG_LOCKD_V4=y | ||
1931 | CONFIG_NFS_ACL_SUPPORT=y | ||
1932 | CONFIG_NFS_COMMON=y | ||
1933 | CONFIG_SUNRPC=y | ||
1934 | CONFIG_SUNRPC_GSS=y | ||
1935 | CONFIG_RPCSEC_GSS_KRB5=y | ||
1936 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1897 | # CONFIG_SMB_FS is not set | 1937 | # CONFIG_SMB_FS is not set |
1898 | # CONFIG_CIFS is not set | 1938 | # CONFIG_CIFS is not set |
1899 | # CONFIG_NCP_FS is not set | 1939 | # CONFIG_NCP_FS is not set |
@@ -1967,9 +2007,9 @@ CONFIG_NLS_UTF8=y | |||
1967 | # Kernel hacking | 2007 | # Kernel hacking |
1968 | # | 2008 | # |
1969 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | 2009 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y |
1970 | # CONFIG_PRINTK_TIME is not set | 2010 | CONFIG_PRINTK_TIME=y |
1971 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 2011 | CONFIG_ENABLE_WARN_DEPRECATED=y |
1972 | # CONFIG_ENABLE_MUST_CHECK is not set | 2012 | CONFIG_ENABLE_MUST_CHECK=y |
1973 | CONFIG_FRAME_WARN=2048 | 2013 | CONFIG_FRAME_WARN=2048 |
1974 | CONFIG_MAGIC_SYSRQ=y | 2014 | CONFIG_MAGIC_SYSRQ=y |
1975 | # CONFIG_UNUSED_SYMBOLS is not set | 2015 | # CONFIG_UNUSED_SYMBOLS is not set |
@@ -1998,6 +2038,7 @@ CONFIG_DEBUG_BUGVERBOSE=y | |||
1998 | # CONFIG_DEBUG_INFO is not set | 2038 | # CONFIG_DEBUG_INFO is not set |
1999 | # CONFIG_DEBUG_VM is not set | 2039 | # CONFIG_DEBUG_VM is not set |
2000 | # CONFIG_DEBUG_WRITECOUNT is not set | 2040 | # CONFIG_DEBUG_WRITECOUNT is not set |
2041 | CONFIG_DEBUG_MEMORY_INIT=y | ||
2001 | # CONFIG_DEBUG_LIST is not set | 2042 | # CONFIG_DEBUG_LIST is not set |
2002 | # CONFIG_DEBUG_SG is not set | 2043 | # CONFIG_DEBUG_SG is not set |
2003 | CONFIG_FRAME_POINTER=y | 2044 | CONFIG_FRAME_POINTER=y |
@@ -2008,11 +2049,20 @@ CONFIG_FRAME_POINTER=y | |||
2008 | # CONFIG_LKDTM is not set | 2049 | # CONFIG_LKDTM is not set |
2009 | # CONFIG_FAULT_INJECTION is not set | 2050 | # CONFIG_FAULT_INJECTION is not set |
2010 | # CONFIG_LATENCYTOP is not set | 2051 | # CONFIG_LATENCYTOP is not set |
2052 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
2053 | CONFIG_HAVE_FTRACE=y | ||
2054 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
2055 | # CONFIG_FTRACE is not set | ||
2056 | # CONFIG_IRQSOFF_TRACER is not set | ||
2057 | # CONFIG_SYSPROF_TRACER is not set | ||
2058 | # CONFIG_SCHED_TRACER is not set | ||
2059 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
2011 | CONFIG_PROVIDE_OHCI1394_DMA_INIT=y | 2060 | CONFIG_PROVIDE_OHCI1394_DMA_INIT=y |
2012 | # CONFIG_SAMPLES is not set | 2061 | # CONFIG_SAMPLES is not set |
2013 | # CONFIG_KGDB is not set | ||
2014 | CONFIG_HAVE_ARCH_KGDB=y | 2062 | CONFIG_HAVE_ARCH_KGDB=y |
2063 | # CONFIG_KGDB is not set | ||
2015 | # CONFIG_STRICT_DEVMEM is not set | 2064 | # CONFIG_STRICT_DEVMEM is not set |
2065 | CONFIG_X86_VERBOSE_BOOTUP=y | ||
2016 | CONFIG_EARLY_PRINTK=y | 2066 | CONFIG_EARLY_PRINTK=y |
2017 | CONFIG_DEBUG_STACKOVERFLOW=y | 2067 | CONFIG_DEBUG_STACKOVERFLOW=y |
2018 | CONFIG_DEBUG_STACK_USAGE=y | 2068 | CONFIG_DEBUG_STACK_USAGE=y |
@@ -2023,8 +2073,8 @@ CONFIG_DEBUG_RODATA=y | |||
2023 | # CONFIG_DIRECT_GBPAGES is not set | 2073 | # CONFIG_DIRECT_GBPAGES is not set |
2024 | # CONFIG_DEBUG_RODATA_TEST is not set | 2074 | # CONFIG_DEBUG_RODATA_TEST is not set |
2025 | CONFIG_DEBUG_NX_TEST=m | 2075 | CONFIG_DEBUG_NX_TEST=m |
2026 | CONFIG_X86_MPPARSE=y | ||
2027 | # CONFIG_IOMMU_DEBUG is not set | 2076 | # CONFIG_IOMMU_DEBUG is not set |
2077 | # CONFIG_MMIOTRACE is not set | ||
2028 | CONFIG_IO_DELAY_TYPE_0X80=0 | 2078 | CONFIG_IO_DELAY_TYPE_0X80=0 |
2029 | CONFIG_IO_DELAY_TYPE_0XED=1 | 2079 | CONFIG_IO_DELAY_TYPE_0XED=1 |
2030 | CONFIG_IO_DELAY_TYPE_UDELAY=2 | 2080 | CONFIG_IO_DELAY_TYPE_UDELAY=2 |
@@ -2036,6 +2086,7 @@ CONFIG_IO_DELAY_0X80=y | |||
2036 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 | 2086 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 |
2037 | CONFIG_DEBUG_BOOT_PARAMS=y | 2087 | CONFIG_DEBUG_BOOT_PARAMS=y |
2038 | # CONFIG_CPA_DEBUG is not set | 2088 | # CONFIG_CPA_DEBUG is not set |
2089 | CONFIG_OPTIMIZE_INLINING=y | ||
2039 | 2090 | ||
2040 | # | 2091 | # |
2041 | # Security options | 2092 | # Security options |
@@ -2045,7 +2096,6 @@ CONFIG_KEYS_DEBUG_PROC_KEYS=y | |||
2045 | CONFIG_SECURITY=y | 2096 | CONFIG_SECURITY=y |
2046 | CONFIG_SECURITY_NETWORK=y | 2097 | CONFIG_SECURITY_NETWORK=y |
2047 | # CONFIG_SECURITY_NETWORK_XFRM is not set | 2098 | # CONFIG_SECURITY_NETWORK_XFRM is not set |
2048 | CONFIG_SECURITY_CAPABILITIES=y | ||
2049 | CONFIG_SECURITY_FILE_CAPABILITIES=y | 2099 | CONFIG_SECURITY_FILE_CAPABILITIES=y |
2050 | # CONFIG_SECURITY_ROOTPLUG is not set | 2100 | # CONFIG_SECURITY_ROOTPLUG is not set |
2051 | CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 | 2101 | CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 |
@@ -2106,6 +2156,10 @@ CONFIG_CRYPTO_HMAC=y | |||
2106 | # CONFIG_CRYPTO_MD4 is not set | 2156 | # CONFIG_CRYPTO_MD4 is not set |
2107 | CONFIG_CRYPTO_MD5=y | 2157 | CONFIG_CRYPTO_MD5=y |
2108 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | 2158 | # CONFIG_CRYPTO_MICHAEL_MIC is not set |
2159 | # CONFIG_CRYPTO_RMD128 is not set | ||
2160 | # CONFIG_CRYPTO_RMD160 is not set | ||
2161 | # CONFIG_CRYPTO_RMD256 is not set | ||
2162 | # CONFIG_CRYPTO_RMD320 is not set | ||
2109 | CONFIG_CRYPTO_SHA1=y | 2163 | CONFIG_CRYPTO_SHA1=y |
2110 | # CONFIG_CRYPTO_SHA256 is not set | 2164 | # CONFIG_CRYPTO_SHA256 is not set |
2111 | # CONFIG_CRYPTO_SHA512 is not set | 2165 | # CONFIG_CRYPTO_SHA512 is not set |
@@ -2155,6 +2209,7 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y | |||
2155 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 2209 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
2156 | # CONFIG_CRC_CCITT is not set | 2210 | # CONFIG_CRC_CCITT is not set |
2157 | # CONFIG_CRC16 is not set | 2211 | # CONFIG_CRC16 is not set |
2212 | CONFIG_CRC_T10DIF=y | ||
2158 | # CONFIG_CRC_ITU_T is not set | 2213 | # CONFIG_CRC_ITU_T is not set |
2159 | CONFIG_CRC32=y | 2214 | CONFIG_CRC32=y |
2160 | # CONFIG_CRC7 is not set | 2215 | # CONFIG_CRC7 is not set |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index a0e1dbe67dc1..127ec3f07214 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | |||
85 | dump->regs.ax = regs->ax; | 85 | dump->regs.ax = regs->ax; |
86 | dump->regs.ds = current->thread.ds; | 86 | dump->regs.ds = current->thread.ds; |
87 | dump->regs.es = current->thread.es; | 87 | dump->regs.es = current->thread.es; |
88 | asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs; | 88 | savesegment(fs, fs); |
89 | asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; | 89 | dump->regs.fs = fs; |
90 | savesegment(gs, gs); | ||
91 | dump->regs.gs = gs; | ||
90 | dump->regs.orig_ax = regs->orig_ax; | 92 | dump->regs.orig_ax = regs->orig_ax; |
91 | dump->regs.ip = regs->ip; | 93 | dump->regs.ip = regs->ip; |
92 | dump->regs.cs = regs->cs; | 94 | dump->regs.cs = regs->cs; |
@@ -430,8 +432,9 @@ beyond_if: | |||
430 | current->mm->start_stack = | 432 | current->mm->start_stack = |
431 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); | 433 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); |
432 | /* start thread */ | 434 | /* start thread */ |
433 | asm volatile("movl %0,%%fs" :: "r" (0)); \ | 435 | loadsegment(fs, 0); |
434 | asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); | 436 | loadsegment(ds, __USER32_DS); |
437 | loadsegment(es, __USER32_DS); | ||
435 | load_gs_index(0); | 438 | load_gs_index(0); |
436 | (regs)->ip = ex.a_entry; | 439 | (regs)->ip = ex.a_entry; |
437 | (regs)->sp = current->mm->start_stack; | 440 | (regs)->sp = current->mm->start_stack; |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 20af4c79579a..f1a2ac777faf 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -206,7 +206,7 @@ struct rt_sigframe | |||
206 | { unsigned int cur; \ | 206 | { unsigned int cur; \ |
207 | unsigned short pre; \ | 207 | unsigned short pre; \ |
208 | err |= __get_user(pre, &sc->seg); \ | 208 | err |= __get_user(pre, &sc->seg); \ |
209 | asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \ | 209 | savesegment(seg, cur); \ |
210 | pre |= mask; \ | 210 | pre |= mask; \ |
211 | if (pre != cur) loadsegment(seg, pre); } | 211 | if (pre != cur) loadsegment(seg, pre); } |
212 | 212 | ||
@@ -235,7 +235,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
235 | */ | 235 | */ |
236 | err |= __get_user(gs, &sc->gs); | 236 | err |= __get_user(gs, &sc->gs); |
237 | gs |= 3; | 237 | gs |= 3; |
238 | asm("movl %%gs,%0" : "=r" (oldgs)); | 238 | savesegment(gs, oldgs); |
239 | if (gs != oldgs) | 239 | if (gs != oldgs) |
240 | load_gs_index(gs); | 240 | load_gs_index(gs); |
241 | 241 | ||
@@ -355,14 +355,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
355 | { | 355 | { |
356 | int tmp, err = 0; | 356 | int tmp, err = 0; |
357 | 357 | ||
358 | tmp = 0; | 358 | savesegment(gs, tmp); |
359 | __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp)); | ||
360 | err |= __put_user(tmp, (unsigned int __user *)&sc->gs); | 359 | err |= __put_user(tmp, (unsigned int __user *)&sc->gs); |
361 | __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); | 360 | savesegment(fs, tmp); |
362 | err |= __put_user(tmp, (unsigned int __user *)&sc->fs); | 361 | err |= __put_user(tmp, (unsigned int __user *)&sc->fs); |
363 | __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp)); | 362 | savesegment(ds, tmp); |
364 | err |= __put_user(tmp, (unsigned int __user *)&sc->ds); | 363 | err |= __put_user(tmp, (unsigned int __user *)&sc->ds); |
365 | __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp)); | 364 | savesegment(es, tmp); |
366 | err |= __put_user(tmp, (unsigned int __user *)&sc->es); | 365 | err |= __put_user(tmp, (unsigned int __user *)&sc->es); |
367 | 366 | ||
368 | err |= __put_user((u32)regs->di, &sc->di); | 367 | err |= __put_user((u32)regs->di, &sc->di); |
@@ -498,8 +497,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
498 | regs->dx = 0; | 497 | regs->dx = 0; |
499 | regs->cx = 0; | 498 | regs->cx = 0; |
500 | 499 | ||
501 | asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); | 500 | loadsegment(ds, __USER32_DS); |
502 | asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); | 501 | loadsegment(es, __USER32_DS); |
503 | 502 | ||
504 | regs->cs = __USER32_CS; | 503 | regs->cs = __USER32_CS; |
505 | regs->ss = __USER32_DS; | 504 | regs->ss = __USER32_DS; |
@@ -591,8 +590,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
591 | regs->dx = (unsigned long) &frame->info; | 590 | regs->dx = (unsigned long) &frame->info; |
592 | regs->cx = (unsigned long) &frame->uc; | 591 | regs->cx = (unsigned long) &frame->uc; |
593 | 592 | ||
594 | asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); | 593 | loadsegment(ds, __USER32_DS); |
595 | asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); | 594 | loadsegment(es, __USER32_DS); |
596 | 595 | ||
597 | regs->cs = __USER32_CS; | 596 | regs->cs = __USER32_CS; |
598 | regs->ss = __USER32_DS; | 597 | regs->ss = __USER32_DS; |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index d3c64088b981..beda4232ce69 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -556,15 +556,6 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, | |||
556 | return ret; | 556 | return ret; |
557 | } | 557 | } |
558 | 558 | ||
559 | /* These are here just in case some old ia32 binary calls it. */ | ||
560 | asmlinkage long sys32_pause(void) | ||
561 | { | ||
562 | current->state = TASK_INTERRUPTIBLE; | ||
563 | schedule(); | ||
564 | return -ERESTARTNOHAND; | ||
565 | } | ||
566 | |||
567 | |||
568 | #ifdef CONFIG_SYSCTL_SYSCALL | 559 | #ifdef CONFIG_SYSCTL_SYSCALL |
569 | struct sysctl_ia32 { | 560 | struct sysctl_ia32 { |
570 | unsigned int name; | 561 | unsigned int name; |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index fa88a1d71290..e5032d7b391d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled); | |||
58 | #ifdef CONFIG_X86_64 | 58 | #ifdef CONFIG_X86_64 |
59 | 59 | ||
60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
61 | #include <asm/genapic.h> | ||
62 | 61 | ||
63 | #else /* X86 */ | 62 | #else /* X86 */ |
64 | 63 | ||
@@ -158,6 +157,16 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) | |||
158 | struct acpi_mcfg_allocation *pci_mmcfg_config; | 157 | struct acpi_mcfg_allocation *pci_mmcfg_config; |
159 | int pci_mmcfg_config_num; | 158 | int pci_mmcfg_config_num; |
160 | 159 | ||
160 | static int acpi_mcfg_64bit_base_addr __initdata = FALSE; | ||
161 | |||
162 | static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) | ||
163 | { | ||
164 | if (!strcmp(mcfg->header.oem_id, "SGI")) | ||
165 | acpi_mcfg_64bit_base_addr = TRUE; | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
161 | int __init acpi_parse_mcfg(struct acpi_table_header *header) | 170 | int __init acpi_parse_mcfg(struct acpi_table_header *header) |
162 | { | 171 | { |
163 | struct acpi_table_mcfg *mcfg; | 172 | struct acpi_table_mcfg *mcfg; |
@@ -190,8 +199,12 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header) | |||
190 | } | 199 | } |
191 | 200 | ||
192 | memcpy(pci_mmcfg_config, &mcfg[1], config_size); | 201 | memcpy(pci_mmcfg_config, &mcfg[1], config_size); |
202 | |||
203 | acpi_mcfg_oem_check(mcfg); | ||
204 | |||
193 | for (i = 0; i < pci_mmcfg_config_num; ++i) { | 205 | for (i = 0; i < pci_mmcfg_config_num; ++i) { |
194 | if (pci_mmcfg_config[i].address > 0xFFFFFFFF) { | 206 | if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) && |
207 | !acpi_mcfg_64bit_base_addr) { | ||
195 | printk(KERN_ERR PREFIX | 208 | printk(KERN_ERR PREFIX |
196 | "MMCONFIG not in low 4GB of memory\n"); | 209 | "MMCONFIG not in low 4GB of memory\n"); |
197 | kfree(pci_mmcfg_config); | 210 | kfree(pci_mmcfg_config); |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index fa2161d5003b..426e5d91b63a 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -20,7 +20,7 @@ unsigned long acpi_realmode_flags; | |||
20 | /* address in low memory of the wakeup routine. */ | 20 | /* address in low memory of the wakeup routine. */ |
21 | static unsigned long acpi_realmode; | 21 | static unsigned long acpi_realmode; |
22 | 22 | ||
23 | #ifdef CONFIG_64BIT | 23 | #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) |
24 | static char temp_stack[10240]; | 24 | static char temp_stack[10240]; |
25 | #endif | 25 | #endif |
26 | 26 | ||
@@ -86,7 +86,7 @@ int acpi_save_state_mem(void) | |||
86 | #endif /* !CONFIG_64BIT */ | 86 | #endif /* !CONFIG_64BIT */ |
87 | 87 | ||
88 | header->pmode_cr0 = read_cr0(); | 88 | header->pmode_cr0 = read_cr0(); |
89 | header->pmode_cr4 = read_cr4(); | 89 | header->pmode_cr4 = read_cr4_safe(); |
90 | header->realmode_flags = acpi_realmode_flags; | 90 | header->realmode_flags = acpi_realmode_flags; |
91 | header->real_magic = 0x12345678; | 91 | header->real_magic = 0x12345678; |
92 | 92 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2763cb37b553..fb04e49776ba 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
145 | extern char __vsyscall_0; | 145 | extern char __vsyscall_0; |
146 | const unsigned char *const *find_nop_table(void) | 146 | const unsigned char *const *find_nop_table(void) |
147 | { | 147 | { |
148 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 148 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
149 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; | 149 | boot_cpu_has(X86_FEATURE_NOPL)) |
150 | return p6_nops; | ||
151 | else | ||
152 | return k8_nops; | ||
150 | } | 153 | } |
151 | 154 | ||
152 | #else /* CONFIG_X86_64 */ | 155 | #else /* CONFIG_X86_64 */ |
153 | 156 | ||
154 | static const struct nop { | ||
155 | int cpuid; | ||
156 | const unsigned char *const *noptable; | ||
157 | } noptypes[] = { | ||
158 | { X86_FEATURE_K8, k8_nops }, | ||
159 | { X86_FEATURE_K7, k7_nops }, | ||
160 | { X86_FEATURE_P4, p6_nops }, | ||
161 | { X86_FEATURE_P3, p6_nops }, | ||
162 | { -1, NULL } | ||
163 | }; | ||
164 | |||
165 | const unsigned char *const *find_nop_table(void) | 157 | const unsigned char *const *find_nop_table(void) |
166 | { | 158 | { |
167 | const unsigned char *const *noptable = intel_nops; | 159 | if (boot_cpu_has(X86_FEATURE_K8)) |
168 | int i; | 160 | return k8_nops; |
169 | 161 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
170 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | 162 | return k7_nops; |
171 | if (boot_cpu_has(noptypes[i].cpuid)) { | 163 | else if (boot_cpu_has(X86_FEATURE_NOPL)) |
172 | noptable = noptypes[i].noptable; | 164 | return p6_nops; |
173 | break; | 165 | else |
174 | } | 166 | return intel_nops; |
175 | } | ||
176 | return noptable; | ||
177 | } | 167 | } |
178 | 168 | ||
179 | #endif /* CONFIG_X86_64 */ | 169 | #endif /* CONFIG_X86_64 */ |
@@ -241,25 +231,25 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |||
241 | continue; | 231 | continue; |
242 | if (*ptr > text_end) | 232 | if (*ptr > text_end) |
243 | continue; | 233 | continue; |
244 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */ | 234 | /* turn DS segment override prefix into lock prefix */ |
235 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); | ||
245 | }; | 236 | }; |
246 | } | 237 | } |
247 | 238 | ||
248 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 239 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) |
249 | { | 240 | { |
250 | u8 **ptr; | 241 | u8 **ptr; |
251 | char insn[1]; | ||
252 | 242 | ||
253 | if (noreplace_smp) | 243 | if (noreplace_smp) |
254 | return; | 244 | return; |
255 | 245 | ||
256 | add_nops(insn, 1); | ||
257 | for (ptr = start; ptr < end; ptr++) { | 246 | for (ptr = start; ptr < end; ptr++) { |
258 | if (*ptr < text) | 247 | if (*ptr < text) |
259 | continue; | 248 | continue; |
260 | if (*ptr > text_end) | 249 | if (*ptr > text_end) |
261 | continue; | 250 | continue; |
262 | text_poke(*ptr, insn, 1); | 251 | /* turn lock prefix into DS segment override prefix */ |
252 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); | ||
263 | }; | 253 | }; |
264 | } | 254 | } |
265 | 255 | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 22d7d050905d..042fdc27bc92 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -65,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
65 | u8 *target; | 65 | u8 *target; |
66 | 66 | ||
67 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 67 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
68 | target = (iommu->cmd_buf + tail); | 68 | target = iommu->cmd_buf + tail; |
69 | memcpy_toio(target, cmd, sizeof(*cmd)); | 69 | memcpy_toio(target, cmd, sizeof(*cmd)); |
70 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | 70 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; |
71 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | 71 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); |
@@ -101,32 +101,39 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
101 | */ | 101 | */ |
102 | static int iommu_completion_wait(struct amd_iommu *iommu) | 102 | static int iommu_completion_wait(struct amd_iommu *iommu) |
103 | { | 103 | { |
104 | int ret; | 104 | int ret = 0, ready = 0; |
105 | unsigned status = 0; | ||
105 | struct iommu_cmd cmd; | 106 | struct iommu_cmd cmd; |
106 | volatile u64 ready = 0; | 107 | unsigned long flags, i = 0; |
107 | unsigned long ready_phys = virt_to_phys(&ready); | ||
108 | unsigned long i = 0; | ||
109 | 108 | ||
110 | memset(&cmd, 0, sizeof(cmd)); | 109 | memset(&cmd, 0, sizeof(cmd)); |
111 | cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; | 110 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
112 | cmd.data[1] = upper_32_bits(ready_phys); | ||
113 | cmd.data[2] = 1; /* value written to 'ready' */ | ||
114 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 111 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
115 | 112 | ||
116 | iommu->need_sync = 0; | 113 | iommu->need_sync = 0; |
117 | 114 | ||
118 | ret = iommu_queue_command(iommu, &cmd); | 115 | spin_lock_irqsave(&iommu->lock, flags); |
116 | |||
117 | ret = __iommu_queue_command(iommu, &cmd); | ||
119 | 118 | ||
120 | if (ret) | 119 | if (ret) |
121 | return ret; | 120 | goto out; |
122 | 121 | ||
123 | while (!ready && (i < EXIT_LOOP_COUNT)) { | 122 | while (!ready && (i < EXIT_LOOP_COUNT)) { |
124 | ++i; | 123 | ++i; |
125 | cpu_relax(); | 124 | /* wait for the bit to become one */ |
125 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
126 | ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; | ||
126 | } | 127 | } |
127 | 128 | ||
129 | /* set bit back to zero */ | ||
130 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | ||
131 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
132 | |||
128 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) | 133 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) |
129 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); | 134 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); |
135 | out: | ||
136 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
130 | 137 | ||
131 | return 0; | 138 | return 0; |
132 | } | 139 | } |
@@ -137,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
137 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | 144 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) |
138 | { | 145 | { |
139 | struct iommu_cmd cmd; | 146 | struct iommu_cmd cmd; |
147 | int ret; | ||
140 | 148 | ||
141 | BUG_ON(iommu == NULL); | 149 | BUG_ON(iommu == NULL); |
142 | 150 | ||
@@ -144,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
144 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | 152 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); |
145 | cmd.data[0] = devid; | 153 | cmd.data[0] = devid; |
146 | 154 | ||
155 | ret = iommu_queue_command(iommu, &cmd); | ||
156 | |||
147 | iommu->need_sync = 1; | 157 | iommu->need_sync = 1; |
148 | 158 | ||
149 | return iommu_queue_command(iommu, &cmd); | 159 | return ret; |
150 | } | 160 | } |
151 | 161 | ||
152 | /* | 162 | /* |
@@ -156,21 +166,24 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
156 | u64 address, u16 domid, int pde, int s) | 166 | u64 address, u16 domid, int pde, int s) |
157 | { | 167 | { |
158 | struct iommu_cmd cmd; | 168 | struct iommu_cmd cmd; |
169 | int ret; | ||
159 | 170 | ||
160 | memset(&cmd, 0, sizeof(cmd)); | 171 | memset(&cmd, 0, sizeof(cmd)); |
161 | address &= PAGE_MASK; | 172 | address &= PAGE_MASK; |
162 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); | 173 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); |
163 | cmd.data[1] |= domid; | 174 | cmd.data[1] |= domid; |
164 | cmd.data[2] = LOW_U32(address); | 175 | cmd.data[2] = lower_32_bits(address); |
165 | cmd.data[3] = upper_32_bits(address); | 176 | cmd.data[3] = upper_32_bits(address); |
166 | if (s) /* size bit - we flush more than one 4kb page */ | 177 | if (s) /* size bit - we flush more than one 4kb page */ |
167 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | 178 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; |
168 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | 179 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ |
169 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | 180 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; |
170 | 181 | ||
182 | ret = iommu_queue_command(iommu, &cmd); | ||
183 | |||
171 | iommu->need_sync = 1; | 184 | iommu->need_sync = 1; |
172 | 185 | ||
173 | return iommu_queue_command(iommu, &cmd); | 186 | return ret; |
174 | } | 187 | } |
175 | 188 | ||
176 | /* | 189 | /* |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index d9a9da597e79..a69cc0f52042 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -801,6 +801,21 @@ static int __init init_memory_definitions(struct acpi_table_header *table) | |||
801 | } | 801 | } |
802 | 802 | ||
803 | /* | 803 | /* |
804 | * Init the device table to not allow DMA access for devices and | ||
805 | * suppress all page faults | ||
806 | */ | ||
807 | static void init_device_table(void) | ||
808 | { | ||
809 | u16 devid; | ||
810 | |||
811 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { | ||
812 | set_dev_entry_bit(devid, DEV_ENTRY_VALID); | ||
813 | set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); | ||
814 | set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT); | ||
815 | } | ||
816 | } | ||
817 | |||
818 | /* | ||
804 | * This function finally enables all IOMMUs found in the system after | 819 | * This function finally enables all IOMMUs found in the system after |
805 | * they have been initialized | 820 | * they have been initialized |
806 | */ | 821 | */ |
@@ -931,6 +946,9 @@ int __init amd_iommu_init(void) | |||
931 | if (amd_iommu_pd_alloc_bitmap == NULL) | 946 | if (amd_iommu_pd_alloc_bitmap == NULL) |
932 | goto free; | 947 | goto free; |
933 | 948 | ||
949 | /* init the device table */ | ||
950 | init_device_table(); | ||
951 | |||
934 | /* | 952 | /* |
935 | * let all alias entries point to itself | 953 | * let all alias entries point to itself |
936 | */ | 954 | */ |
@@ -954,15 +972,15 @@ int __init amd_iommu_init(void) | |||
954 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | 972 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
955 | goto free; | 973 | goto free; |
956 | 974 | ||
957 | ret = amd_iommu_init_dma_ops(); | 975 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
958 | if (ret) | 976 | if (ret) |
959 | goto free; | 977 | goto free; |
960 | 978 | ||
961 | ret = sysdev_class_register(&amd_iommu_sysdev_class); | 979 | ret = sysdev_register(&device_amd_iommu); |
962 | if (ret) | 980 | if (ret) |
963 | goto free; | 981 | goto free; |
964 | 982 | ||
965 | ret = sysdev_register(&device_amd_iommu); | 983 | ret = amd_iommu_init_dma_ops(); |
966 | if (ret) | 984 | if (ret) |
967 | goto free; | 985 | goto free; |
968 | 986 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 44e21826db11..9a32b37ee2ee 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -455,11 +455,11 @@ out: | |||
455 | force_iommu || | 455 | force_iommu || |
456 | valid_agp || | 456 | valid_agp || |
457 | fallback_aper_force) { | 457 | fallback_aper_force) { |
458 | printk(KERN_ERR | 458 | printk(KERN_INFO |
459 | "Your BIOS doesn't leave a aperture memory hole\n"); | 459 | "Your BIOS doesn't leave a aperture memory hole\n"); |
460 | printk(KERN_ERR | 460 | printk(KERN_INFO |
461 | "Please enable the IOMMU option in the BIOS setup\n"); | 461 | "Please enable the IOMMU option in the BIOS setup\n"); |
462 | printk(KERN_ERR | 462 | printk(KERN_INFO |
463 | "This costs you %d MB of RAM\n", | 463 | "This costs you %d MB of RAM\n", |
464 | 32 << fallback_aper_order); | 464 | 32 << fallback_aper_order); |
465 | 465 | ||
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 039a8d4aaf62..f88bd0d982b0 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -1454,8 +1454,6 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
1454 | } | 1454 | } |
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | unsigned int __cpuinitdata maxcpus = NR_CPUS; | ||
1458 | |||
1459 | void __cpuinit generic_processor_info(int apicid, int version) | 1457 | void __cpuinit generic_processor_info(int apicid, int version) |
1460 | { | 1458 | { |
1461 | int cpu; | 1459 | int cpu; |
@@ -1482,12 +1480,6 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1482 | return; | 1480 | return; |
1483 | } | 1481 | } |
1484 | 1482 | ||
1485 | if (num_processors >= maxcpus) { | ||
1486 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
1487 | " Processor ignored.\n", maxcpus); | ||
1488 | return; | ||
1489 | } | ||
1490 | |||
1491 | num_processors++; | 1483 | num_processors++; |
1492 | cpus_complement(tmp_map, cpu_present_map); | 1484 | cpus_complement(tmp_map, cpu_present_map); |
1493 | cpu = first_cpu(tmp_map); | 1485 | cpu = first_cpu(tmp_map); |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 7f1f030da7ee..446c062e831c 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -90,7 +90,6 @@ static unsigned long apic_phys; | |||
90 | 90 | ||
91 | unsigned long mp_lapic_addr; | 91 | unsigned long mp_lapic_addr; |
92 | 92 | ||
93 | unsigned int __cpuinitdata maxcpus = NR_CPUS; | ||
94 | /* | 93 | /* |
95 | * Get the LAPIC version | 94 | * Get the LAPIC version |
96 | */ | 95 | */ |
@@ -1062,12 +1061,6 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1062 | return; | 1061 | return; |
1063 | } | 1062 | } |
1064 | 1063 | ||
1065 | if (num_processors >= maxcpus) { | ||
1066 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
1067 | " Processor ignored.\n", maxcpus); | ||
1068 | return; | ||
1069 | } | ||
1070 | |||
1071 | num_processors++; | 1064 | num_processors++; |
1072 | cpus_complement(tmp_map, cpu_present_map); | 1065 | cpus_complement(tmp_map, cpu_present_map); |
1073 | cpu = first_cpu(tmp_map); | 1066 | cpu = first_cpu(tmp_map); |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 9ee24e6bc4b0..5145a6e72bbb 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -228,12 +228,12 @@ | |||
228 | #include <linux/suspend.h> | 228 | #include <linux/suspend.h> |
229 | #include <linux/kthread.h> | 229 | #include <linux/kthread.h> |
230 | #include <linux/jiffies.h> | 230 | #include <linux/jiffies.h> |
231 | #include <linux/smp_lock.h> | ||
232 | 231 | ||
233 | #include <asm/system.h> | 232 | #include <asm/system.h> |
234 | #include <asm/uaccess.h> | 233 | #include <asm/uaccess.h> |
235 | #include <asm/desc.h> | 234 | #include <asm/desc.h> |
236 | #include <asm/i8253.h> | 235 | #include <asm/i8253.h> |
236 | #include <asm/olpc.h> | ||
237 | #include <asm/paravirt.h> | 237 | #include <asm/paravirt.h> |
238 | #include <asm/reboot.h> | 238 | #include <asm/reboot.h> |
239 | 239 | ||
@@ -2217,7 +2217,7 @@ static int __init apm_init(void) | |||
2217 | 2217 | ||
2218 | dmi_check_system(apm_dmi_table); | 2218 | dmi_check_system(apm_dmi_table); |
2219 | 2219 | ||
2220 | if (apm_info.bios.version == 0 || paravirt_enabled()) { | 2220 | if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) { |
2221 | printk(KERN_INFO "apm: BIOS not found.\n"); | 2221 | printk(KERN_INFO "apm: BIOS not found.\n"); |
2222 | return -ENODEV; | 2222 | return -ENODEV; |
2223 | } | 2223 | } |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index aa89387006fe..505543a75a56 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | #define __NO_STUBS 1 | 23 | #define __NO_STUBS 1 |
24 | #undef __SYSCALL | 24 | #undef __SYSCALL |
25 | #undef _ASM_X86_64_UNISTD_H_ | 25 | #undef ASM_X86__UNISTD_64_H |
26 | #define __SYSCALL(nr, sym) [nr] = 1, | 26 | #define __SYSCALL(nr, sym) [nr] = 1, |
27 | static char syscalls[] = { | 27 | static char syscalls[] = { |
28 | #include <asm/unistd.h> | 28 | #include <asm/unistd.h> |
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index c639bd55391c..fdd585f9c53d 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -25,11 +25,11 @@ x86_bios_strerror(long status) | |||
25 | { | 25 | { |
26 | const char *str; | 26 | const char *str; |
27 | switch (status) { | 27 | switch (status) { |
28 | case 0: str = "Call completed without error"; break; | 28 | case 0: str = "Call completed without error"; break; |
29 | case -1: str = "Not implemented"; break; | 29 | case -1: str = "Not implemented"; break; |
30 | case -2: str = "Invalid argument"; break; | 30 | case -2: str = "Invalid argument"; break; |
31 | case -3: str = "Call completed with error"; break; | 31 | case -3: str = "Call completed with error"; break; |
32 | default: str = "Unknown BIOS status code"; break; | 32 | default: str = "Unknown BIOS status code"; break; |
33 | } | 33 | } |
34 | return str; | 34 | return str; |
35 | } | 35 | } |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 84a8220a6072..a6ef672adbba 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | |||
56 | 56 | ||
57 | switch (c->x86_vendor) { | 57 | switch (c->x86_vendor) { |
58 | case X86_VENDOR_INTEL: | 58 | case X86_VENDOR_INTEL: |
59 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | 59 | /* |
60 | * There is a known erratum on Pentium III and Core Solo | ||
61 | * and Core Duo CPUs. | ||
62 | * " Page with PAT set to WC while associated MTRR is UC | ||
63 | * may consolidate to UC " | ||
64 | * Because of this erratum, it is better to stick with | ||
65 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
66 | * | ||
67 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
68 | */ | ||
69 | if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) | ||
60 | return; | 70 | return; |
61 | break; | 71 | |
72 | pat_disable("PAT WC disabled due to known CPU erratum."); | ||
73 | return; | ||
74 | |||
62 | case X86_VENDOR_AMD: | 75 | case X86_VENDOR_AMD: |
63 | case X86_VENDOR_CENTAUR: | 76 | case X86_VENDOR_CENTAUR: |
64 | case X86_VENDOR_TRANSMETA: | 77 | case X86_VENDOR_TRANSMETA: |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index cae9cabc3031..18514ed26104 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
31 | if (c->x86_power & (1<<8)) | 31 | if (c->x86_power & (1<<8)) |
32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
33 | } | 33 | } |
34 | |||
35 | /* Set MTRR capability flag if appropriate */ | ||
36 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
37 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
38 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
34 | } | 39 | } |
35 | 40 | ||
36 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 41 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
166 | mbytes); | 171 | mbytes); |
167 | } | 172 | } |
168 | 173 | ||
169 | /* Set MTRR capability flag if appropriate */ | ||
170 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
171 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
172 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
173 | break; | 174 | break; |
174 | } | 175 | } |
175 | 176 | ||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index e0f45edd6a55..a0534c04d38a 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -314,6 +314,16 @@ enum { | |||
314 | EAMD3D = 1<<20, | 314 | EAMD3D = 1<<20, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
318 | { | ||
319 | switch (c->x86) { | ||
320 | case 5: | ||
321 | /* Emulate MTRRs using Centaur's MCR. */ | ||
322 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); | ||
323 | break; | ||
324 | } | ||
325 | } | ||
326 | |||
317 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 327 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
318 | { | 328 | { |
319 | 329 | ||
@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
462 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 472 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { |
463 | .c_vendor = "Centaur", | 473 | .c_vendor = "Centaur", |
464 | .c_ident = { "CentaurHauls" }, | 474 | .c_ident = { "CentaurHauls" }, |
475 | .c_early_init = early_init_centaur, | ||
465 | .c_init = init_centaur, | 476 | .c_init = init_centaur, |
466 | .c_size_cache = centaur_size_cache, | 477 | .c_size_cache = centaur_size_cache, |
467 | }; | 478 | }; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80ab20d4fa39..4e456bd955bb 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/mtrr.h> | 13 | #include <asm/mtrr.h> |
14 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
15 | #include <asm/pat.h> | 15 | #include <asm/pat.h> |
16 | #include <asm/asm.h> | ||
16 | #ifdef CONFIG_X86_LOCAL_APIC | 17 | #ifdef CONFIG_X86_LOCAL_APIC |
17 | #include <asm/mpspec.h> | 18 | #include <asm/mpspec.h> |
18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
@@ -334,11 +335,24 @@ static void __init early_cpu_detect(void) | |||
334 | 335 | ||
335 | get_cpu_vendor(c, 1); | 336 | get_cpu_vendor(c, 1); |
336 | 337 | ||
338 | early_get_cap(c); | ||
339 | |||
337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 340 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
338 | cpu_devs[c->x86_vendor]->c_early_init) | 341 | cpu_devs[c->x86_vendor]->c_early_init) |
339 | cpu_devs[c->x86_vendor]->c_early_init(c); | 342 | cpu_devs[c->x86_vendor]->c_early_init(c); |
343 | } | ||
340 | 344 | ||
341 | early_get_cap(c); | 345 | /* |
346 | * The NOPL instruction is supposed to exist on all CPUs with | ||
347 | * family >= 6; unfortunately, that's not true in practice because | ||
348 | * of early VIA chips and (more importantly) broken virtualizers that | ||
349 | * are not easy to detect. In the latter case it doesn't even *fail* | ||
350 | * reliably, so probing for it doesn't even work. Disable it completely | ||
351 | * unless we can find a reliable way to detect all the broken cases. | ||
352 | */ | ||
353 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
354 | { | ||
355 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
342 | } | 356 | } |
343 | 357 | ||
344 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 358 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
@@ -395,8 +409,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
395 | } | 409 | } |
396 | 410 | ||
397 | init_scattered_cpuid_features(c); | 411 | init_scattered_cpuid_features(c); |
412 | detect_nopl(c); | ||
398 | } | 413 | } |
399 | |||
400 | } | 414 | } |
401 | 415 | ||
402 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 416 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index dd6e3f15017e..305b465889b0 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/mtrr.h> | 18 | #include <asm/mtrr.h> |
19 | #include <asm/mce.h> | 19 | #include <asm/mce.h> |
20 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
21 | #include <asm/asm.h> | ||
21 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
22 | #ifdef CONFIG_X86_LOCAL_APIC | 23 | #ifdef CONFIG_X86_LOCAL_APIC |
23 | #include <asm/mpspec.h> | 24 | #include <asm/mpspec.h> |
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void) | |||
215 | } | 216 | } |
216 | } | 217 | } |
217 | 218 | ||
219 | /* | ||
220 | * The NOPL instruction is supposed to exist on all CPUs with | ||
221 | * family >= 6, unfortunately, that's not true in practice because | ||
222 | * of early VIA chips and (more importantly) broken virtualizers that | ||
223 | * are not easy to detect. Hence, probe for it based on first | ||
224 | * principles. | ||
225 | * | ||
226 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
227 | * for consistency with 32 bits, and to make it utterly trivial to | ||
228 | * diagnose the problem should it ever surface. | ||
229 | */ | ||
230 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
231 | { | ||
232 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
233 | u32 has_nopl = nopl_signature; | ||
234 | |||
235 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
236 | if (c->x86 >= 6) { | ||
237 | asm volatile("\n" | ||
238 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
239 | "2:\n" | ||
240 | " .section .fixup,\"ax\"\n" | ||
241 | "3: xor %0,%0\n" | ||
242 | " jmp 2b\n" | ||
243 | " .previous\n" | ||
244 | _ASM_EXTABLE(1b,3b) | ||
245 | : "+a" (has_nopl)); | ||
246 | |||
247 | if (has_nopl == nopl_signature) | ||
248 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
249 | } | ||
250 | } | ||
251 | |||
218 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | 252 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); |
219 | 253 | ||
220 | void __init early_cpu_init(void) | 254 | void __init early_cpu_init(void) |
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
313 | c->x86_phys_bits = eax & 0xff; | 347 | c->x86_phys_bits = eax & 0xff; |
314 | } | 348 | } |
315 | 349 | ||
350 | detect_nopl(c); | ||
351 | |||
316 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 352 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
317 | cpu_devs[c->x86_vendor]->c_early_init) | 353 | cpu_devs[c->x86_vendor]->c_early_init) |
318 | cpu_devs[c->x86_vendor]->c_early_init(c); | 354 | cpu_devs[c->x86_vendor]->c_early_init(c); |
@@ -394,6 +430,49 @@ static __init int setup_noclflush(char *arg) | |||
394 | } | 430 | } |
395 | __setup("noclflush", setup_noclflush); | 431 | __setup("noclflush", setup_noclflush); |
396 | 432 | ||
433 | struct msr_range { | ||
434 | unsigned min; | ||
435 | unsigned max; | ||
436 | }; | ||
437 | |||
438 | static struct msr_range msr_range_array[] __cpuinitdata = { | ||
439 | { 0x00000000, 0x00000418}, | ||
440 | { 0xc0000000, 0xc000040b}, | ||
441 | { 0xc0010000, 0xc0010142}, | ||
442 | { 0xc0011000, 0xc001103b}, | ||
443 | }; | ||
444 | |||
445 | static void __cpuinit print_cpu_msr(void) | ||
446 | { | ||
447 | unsigned index; | ||
448 | u64 val; | ||
449 | int i; | ||
450 | unsigned index_min, index_max; | ||
451 | |||
452 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
453 | index_min = msr_range_array[i].min; | ||
454 | index_max = msr_range_array[i].max; | ||
455 | for (index = index_min; index < index_max; index++) { | ||
456 | if (rdmsrl_amd_safe(index, &val)) | ||
457 | continue; | ||
458 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
459 | } | ||
460 | } | ||
461 | } | ||
462 | |||
463 | static int show_msr __cpuinitdata; | ||
464 | static __init int setup_show_msr(char *arg) | ||
465 | { | ||
466 | int num; | ||
467 | |||
468 | get_option(&arg, &num); | ||
469 | |||
470 | if (num > 0) | ||
471 | show_msr = num; | ||
472 | return 1; | ||
473 | } | ||
474 | __setup("show_msr=", setup_show_msr); | ||
475 | |||
397 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 476 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
398 | { | 477 | { |
399 | if (c->x86_model_id[0]) | 478 | if (c->x86_model_id[0]) |
@@ -403,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
403 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | 482 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
404 | else | 483 | else |
405 | printk(KERN_CONT "\n"); | 484 | printk(KERN_CONT "\n"); |
485 | |||
486 | #ifdef CONFIG_SMP | ||
487 | if (c->cpu_index < show_msr) | ||
488 | print_cpu_msr(); | ||
489 | #else | ||
490 | if (show_msr) | ||
491 | print_cpu_msr(); | ||
492 | #endif | ||
406 | } | 493 | } |
407 | 494 | ||
408 | static __init int setup_disablecpuid(char *arg) | 495 | static __init int setup_disablecpuid(char *arg) |
@@ -493,17 +580,20 @@ void pda_init(int cpu) | |||
493 | /* others are initialized in smpboot.c */ | 580 | /* others are initialized in smpboot.c */ |
494 | pda->pcurrent = &init_task; | 581 | pda->pcurrent = &init_task; |
495 | pda->irqstackptr = boot_cpu_stack; | 582 | pda->irqstackptr = boot_cpu_stack; |
583 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
496 | } else { | 584 | } else { |
497 | pda->irqstackptr = (char *) | 585 | if (!pda->irqstackptr) { |
498 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | 586 | pda->irqstackptr = (char *) |
499 | if (!pda->irqstackptr) | 587 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); |
500 | panic("cannot allocate irqstack for cpu %d", cpu); | 588 | if (!pda->irqstackptr) |
589 | panic("cannot allocate irqstack for cpu %d", | ||
590 | cpu); | ||
591 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
592 | } | ||
501 | 593 | ||
502 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 594 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) |
503 | pda->nodenumber = cpu_to_node(cpu); | 595 | pda->nodenumber = cpu_to_node(cpu); |
504 | } | 596 | } |
505 | |||
506 | pda->irqstackptr += IRQSTACKSIZE-64; | ||
507 | } | 597 | } |
508 | 598 | ||
509 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 599 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + |
@@ -601,19 +691,22 @@ void __cpuinit cpu_init(void) | |||
601 | /* | 691 | /* |
602 | * set up and load the per-CPU TSS | 692 | * set up and load the per-CPU TSS |
603 | */ | 693 | */ |
604 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 694 | if (!orig_ist->ist[0]) { |
605 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 695 | static const unsigned int order[N_EXCEPTION_STACKS] = { |
606 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 696 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, |
607 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 697 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER |
608 | }; | 698 | }; |
609 | if (cpu) { | 699 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
610 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | 700 | if (cpu) { |
611 | if (!estacks) | 701 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); |
612 | panic("Cannot allocate exception stack %ld %d\n", | 702 | if (!estacks) |
613 | v, cpu); | 703 | panic("Cannot allocate exception " |
704 | "stack %ld %d\n", v, cpu); | ||
705 | } | ||
706 | estacks += PAGE_SIZE << order[v]; | ||
707 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
708 | (unsigned long)estacks; | ||
614 | } | 709 | } |
615 | estacks += PAGE_SIZE << order[v]; | ||
616 | orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; | ||
617 | } | 710 | } |
618 | 711 | ||
619 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 712 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 4e7271999a74..84bb395038d8 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -737,63 +737,44 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
737 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 737 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
738 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) | 738 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) |
739 | { | 739 | { |
740 | if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) | 740 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) |
741 | return; | 741 | return; |
742 | 742 | ||
743 | data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; | 743 | data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; |
744 | data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; | 744 | data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; |
745 | data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | 745 | data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; |
746 | data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | 746 | data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; |
747 | data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); | 747 | data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); |
748 | data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; | 748 | data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; |
749 | } | ||
750 | |||
751 | |||
752 | static struct acpi_processor_performance *acpi_perf_data; | ||
753 | static int preregister_valid; | ||
754 | |||
755 | static int powernow_k8_cpu_preinit_acpi(void) | ||
756 | { | ||
757 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | ||
758 | if (!acpi_perf_data) | ||
759 | return -ENODEV; | ||
760 | |||
761 | if (acpi_processor_preregister_performance(acpi_perf_data)) | ||
762 | return -ENODEV; | ||
763 | else | ||
764 | preregister_valid = 1; | ||
765 | return 0; | ||
766 | } | 749 | } |
767 | 750 | ||
768 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | 751 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) |
769 | { | 752 | { |
770 | struct cpufreq_frequency_table *powernow_table; | 753 | struct cpufreq_frequency_table *powernow_table; |
771 | int ret_val; | 754 | int ret_val; |
772 | int cpu = 0; | ||
773 | 755 | ||
774 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 756 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { |
775 | if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { | ||
776 | dprintk("register performance failed: bad ACPI data\n"); | 757 | dprintk("register performance failed: bad ACPI data\n"); |
777 | return -EIO; | 758 | return -EIO; |
778 | } | 759 | } |
779 | 760 | ||
780 | /* verify the data contained in the ACPI structures */ | 761 | /* verify the data contained in the ACPI structures */ |
781 | if (data->acpi_data->state_count <= 1) { | 762 | if (data->acpi_data.state_count <= 1) { |
782 | dprintk("No ACPI P-States\n"); | 763 | dprintk("No ACPI P-States\n"); |
783 | goto err_out; | 764 | goto err_out; |
784 | } | 765 | } |
785 | 766 | ||
786 | if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | 767 | if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || |
787 | (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 768 | (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
788 | dprintk("Invalid control/status registers (%x - %x)\n", | 769 | dprintk("Invalid control/status registers (%x - %x)\n", |
789 | data->acpi_data->control_register.space_id, | 770 | data->acpi_data.control_register.space_id, |
790 | data->acpi_data->status_register.space_id); | 771 | data->acpi_data.status_register.space_id); |
791 | goto err_out; | 772 | goto err_out; |
792 | } | 773 | } |
793 | 774 | ||
794 | /* fill in data->powernow_table */ | 775 | /* fill in data->powernow_table */ |
795 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | 776 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) |
796 | * (data->acpi_data->state_count + 1)), GFP_KERNEL); | 777 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); |
797 | if (!powernow_table) { | 778 | if (!powernow_table) { |
798 | dprintk("powernow_table memory alloc failure\n"); | 779 | dprintk("powernow_table memory alloc failure\n"); |
799 | goto err_out; | 780 | goto err_out; |
@@ -806,12 +787,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
806 | if (ret_val) | 787 | if (ret_val) |
807 | goto err_out_mem; | 788 | goto err_out_mem; |
808 | 789 | ||
809 | powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; | 790 | powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; |
810 | powernow_table[data->acpi_data->state_count].index = 0; | 791 | powernow_table[data->acpi_data.state_count].index = 0; |
811 | data->powernow_table = powernow_table; | 792 | data->powernow_table = powernow_table; |
812 | 793 | ||
813 | /* fill in data */ | 794 | /* fill in data */ |
814 | data->numps = data->acpi_data->state_count; | 795 | data->numps = data->acpi_data.state_count; |
815 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 796 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) |
816 | print_basics(data); | 797 | print_basics(data); |
817 | powernow_k8_acpi_pst_values(data, 0); | 798 | powernow_k8_acpi_pst_values(data, 0); |
@@ -819,31 +800,16 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
819 | /* notify BIOS that we exist */ | 800 | /* notify BIOS that we exist */ |
820 | acpi_processor_notify_smm(THIS_MODULE); | 801 | acpi_processor_notify_smm(THIS_MODULE); |
821 | 802 | ||
822 | /* determine affinity, from ACPI if available */ | ||
823 | if (preregister_valid) { | ||
824 | if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || | ||
825 | (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) | ||
826 | data->starting_core_affinity = data->acpi_data->shared_cpu_map; | ||
827 | else | ||
828 | data->starting_core_affinity = cpumask_of_cpu(data->cpu); | ||
829 | } else { | ||
830 | /* best guess from family if not */ | ||
831 | if (cpu_family == CPU_HW_PSTATE) | ||
832 | data->starting_core_affinity = cpumask_of_cpu(data->cpu); | ||
833 | else | ||
834 | data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); | ||
835 | } | ||
836 | |||
837 | return 0; | 803 | return 0; |
838 | 804 | ||
839 | err_out_mem: | 805 | err_out_mem: |
840 | kfree(powernow_table); | 806 | kfree(powernow_table); |
841 | 807 | ||
842 | err_out: | 808 | err_out: |
843 | acpi_processor_unregister_performance(data->acpi_data, data->cpu); | 809 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); |
844 | 810 | ||
845 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ | 811 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ |
846 | data->acpi_data->state_count = 0; | 812 | data->acpi_data.state_count = 0; |
847 | 813 | ||
848 | return -ENODEV; | 814 | return -ENODEV; |
849 | } | 815 | } |
@@ -855,10 +821,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
855 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); | 821 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); |
856 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; | 822 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; |
857 | 823 | ||
858 | for (i = 0; i < data->acpi_data->state_count; i++) { | 824 | for (i = 0; i < data->acpi_data.state_count; i++) { |
859 | u32 index; | 825 | u32 index; |
860 | 826 | ||
861 | index = data->acpi_data->states[i].control & HW_PSTATE_MASK; | 827 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; |
862 | if (index > data->max_hw_pstate) { | 828 | if (index > data->max_hw_pstate) { |
863 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); | 829 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); |
864 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); | 830 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); |
@@ -874,7 +840,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
874 | 840 | ||
875 | powernow_table[i].index = index; | 841 | powernow_table[i].index = index; |
876 | 842 | ||
877 | powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; | 843 | powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; |
878 | } | 844 | } |
879 | return 0; | 845 | return 0; |
880 | } | 846 | } |
@@ -883,16 +849,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
883 | { | 849 | { |
884 | int i; | 850 | int i; |
885 | int cntlofreq = 0; | 851 | int cntlofreq = 0; |
886 | for (i = 0; i < data->acpi_data->state_count; i++) { | 852 | for (i = 0; i < data->acpi_data.state_count; i++) { |
887 | u32 fid; | 853 | u32 fid; |
888 | u32 vid; | 854 | u32 vid; |
889 | 855 | ||
890 | if (data->exttype) { | 856 | if (data->exttype) { |
891 | fid = data->acpi_data->states[i].status & EXT_FID_MASK; | 857 | fid = data->acpi_data.states[i].status & EXT_FID_MASK; |
892 | vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; | 858 | vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; |
893 | } else { | 859 | } else { |
894 | fid = data->acpi_data->states[i].control & FID_MASK; | 860 | fid = data->acpi_data.states[i].control & FID_MASK; |
895 | vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; | 861 | vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; |
896 | } | 862 | } |
897 | 863 | ||
898 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | 864 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); |
@@ -933,10 +899,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
933 | cntlofreq = i; | 899 | cntlofreq = i; |
934 | } | 900 | } |
935 | 901 | ||
936 | if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { | 902 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { |
937 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | 903 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", |
938 | powernow_table[i].frequency, | 904 | powernow_table[i].frequency, |
939 | (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); | 905 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); |
940 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | 906 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
941 | continue; | 907 | continue; |
942 | } | 908 | } |
@@ -946,12 +912,11 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
946 | 912 | ||
947 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | 913 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) |
948 | { | 914 | { |
949 | if (data->acpi_data->state_count) | 915 | if (data->acpi_data.state_count) |
950 | acpi_processor_unregister_performance(data->acpi_data, data->cpu); | 916 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); |
951 | } | 917 | } |
952 | 918 | ||
953 | #else | 919 | #else |
954 | static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } | ||
955 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 920 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
956 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 921 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
957 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 922 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
@@ -1136,7 +1101,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) | |||
1136 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | 1101 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) |
1137 | { | 1102 | { |
1138 | struct powernow_k8_data *data; | 1103 | struct powernow_k8_data *data; |
1139 | cpumask_t oldmask = CPU_MASK_ALL; | 1104 | cpumask_t oldmask; |
1140 | int rc; | 1105 | int rc; |
1141 | 1106 | ||
1142 | if (!cpu_online(pol->cpu)) | 1107 | if (!cpu_online(pol->cpu)) |
@@ -1209,7 +1174,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1209 | /* run on any CPU again */ | 1174 | /* run on any CPU again */ |
1210 | set_cpus_allowed_ptr(current, &oldmask); | 1175 | set_cpus_allowed_ptr(current, &oldmask); |
1211 | 1176 | ||
1212 | pol->cpus = data->starting_core_affinity; | 1177 | if (cpu_family == CPU_HW_PSTATE) |
1178 | pol->cpus = cpumask_of_cpu(pol->cpu); | ||
1179 | else | ||
1180 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | ||
1213 | data->available_cores = &(pol->cpus); | 1181 | data->available_cores = &(pol->cpus); |
1214 | 1182 | ||
1215 | /* Take a crude guess here. | 1183 | /* Take a crude guess here. |
@@ -1332,7 +1300,6 @@ static int __cpuinit powernowk8_init(void) | |||
1332 | } | 1300 | } |
1333 | 1301 | ||
1334 | if (supported_cpus == num_online_cpus()) { | 1302 | if (supported_cpus == num_online_cpus()) { |
1335 | powernow_k8_cpu_preinit_acpi(); | ||
1336 | printk(KERN_INFO PFX "Found %d %s " | 1303 | printk(KERN_INFO PFX "Found %d %s " |
1337 | "processors (%d cpu cores) (" VERSION ")\n", | 1304 | "processors (%d cpu cores) (" VERSION ")\n", |
1338 | num_online_nodes(), | 1305 | num_online_nodes(), |
@@ -1349,10 +1316,6 @@ static void __exit powernowk8_exit(void) | |||
1349 | dprintk("exit\n"); | 1316 | dprintk("exit\n"); |
1350 | 1317 | ||
1351 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | 1318 | cpufreq_unregister_driver(&cpufreq_amd64_driver); |
1352 | |||
1353 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
1354 | free_percpu(acpi_perf_data); | ||
1355 | #endif | ||
1356 | } | 1319 | } |
1357 | 1320 | ||
1358 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); | 1321 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index a62612cd4be8..ab48cfed4d96 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -33,13 +33,12 @@ struct powernow_k8_data { | |||
33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
34 | /* the acpi table needs to be kept. it's only available if ACPI was | 34 | /* the acpi table needs to be kept. it's only available if ACPI was |
35 | * used to determine valid frequency/vid/fid states */ | 35 | * used to determine valid frequency/vid/fid states */ |
36 | struct acpi_processor_performance *acpi_data; | 36 | struct acpi_processor_performance acpi_data; |
37 | #endif | 37 | #endif |
38 | /* we need to keep track of associated cores, but let cpufreq | 38 | /* we need to keep track of associated cores, but let cpufreq |
39 | * handle hotplug events - so just point at cpufreq pol->cpus | 39 | * handle hotplug events - so just point at cpufreq pol->cpus |
40 | * structure */ | 40 | * structure */ |
41 | cpumask_t *available_cores; | 41 | cpumask_t *available_cores; |
42 | cpumask_t starting_core_affinity; | ||
43 | }; | 42 | }; |
44 | 43 | ||
45 | 44 | ||
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 3fd7a67bb06a..898a5a2002ed 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -15,13 +15,11 @@ | |||
15 | /* | 15 | /* |
16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
17 | */ | 17 | */ |
18 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 18 | static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | unsigned long flags; | ||
22 | 21 | ||
23 | /* we test for DEVID by checking whether CCR3 is writable */ | 22 | /* we test for DEVID by checking whether CCR3 is writable */ |
24 | local_irq_save(flags); | ||
25 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
26 | setCx86(CX86_CCR3, ccr3 ^ 0x80); | 24 | setCx86(CX86_CCR3, ccr3 ^ 0x80); |
27 | getCx86(0xc0); /* dummy to change bus */ | 25 | getCx86(0xc0); /* dummy to change bus */ |
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
44 | *dir0 = getCx86(CX86_DIR0); | 42 | *dir0 = getCx86(CX86_DIR0); |
45 | *dir1 = getCx86(CX86_DIR1); | 43 | *dir1 = getCx86(CX86_DIR1); |
46 | } | 44 | } |
47 | local_irq_restore(flags); | ||
48 | } | 45 | } |
49 | 46 | ||
47 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | |||
51 | local_irq_save(flags); | ||
52 | __do_cyrix_devid(dir0, dir1); | ||
53 | local_irq_restore(flags); | ||
54 | } | ||
50 | /* | 55 | /* |
51 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in | 56 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in |
52 | * order to identify the Cyrix CPU model after we're out of setup.c | 57 | * order to identify the Cyrix CPU model after we're out of setup.c |
@@ -134,23 +139,6 @@ static void __cpuinit set_cx86_memwb(void) | |||
134 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); | 139 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); |
135 | } | 140 | } |
136 | 141 | ||
137 | static void __cpuinit set_cx86_inc(void) | ||
138 | { | ||
139 | unsigned char ccr3; | ||
140 | |||
141 | printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n"); | ||
142 | |||
143 | ccr3 = getCx86(CX86_CCR3); | ||
144 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | ||
145 | /* PCR1 -- Performance Control */ | ||
146 | /* Incrementor on, whatever that is */ | ||
147 | setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); | ||
148 | /* PCR0 -- Performance Control */ | ||
149 | /* Incrementor Margin 10 */ | ||
150 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); | ||
151 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | ||
152 | } | ||
153 | |||
154 | /* | 142 | /* |
155 | * Configure later MediaGX and/or Geode processor. | 143 | * Configure later MediaGX and/or Geode processor. |
156 | */ | 144 | */ |
@@ -174,11 +162,28 @@ static void __cpuinit geode_configure(void) | |||
174 | 162 | ||
175 | set_cx86_memwb(); | 163 | set_cx86_memwb(); |
176 | set_cx86_reorder(); | 164 | set_cx86_reorder(); |
177 | set_cx86_inc(); | ||
178 | 165 | ||
179 | local_irq_restore(flags); | 166 | local_irq_restore(flags); |
180 | } | 167 | } |
181 | 168 | ||
169 | static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | ||
170 | { | ||
171 | unsigned char dir0, dir0_msn, dir1 = 0; | ||
172 | |||
173 | __do_cyrix_devid(&dir0, &dir1); | ||
174 | dir0_msn = dir0 >> 4; /* identifies CPU "family" */ | ||
175 | |||
176 | switch (dir0_msn) { | ||
177 | case 3: /* 6x86/6x86L */ | ||
178 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
179 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
180 | break; | ||
181 | case 5: /* 6x86MX/M II */ | ||
182 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
183 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
184 | break; | ||
185 | } | ||
186 | } | ||
182 | 187 | ||
183 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | 188 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) |
184 | { | 189 | { |
@@ -434,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
434 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 439 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
435 | .c_vendor = "Cyrix", | 440 | .c_vendor = "Cyrix", |
436 | .c_ident = { "CyrixInstead" }, | 441 | .c_ident = { "CyrixInstead" }, |
442 | .c_early_init = early_init_cyrix, | ||
437 | .c_init = init_cyrix, | 443 | .c_init = init_cyrix, |
438 | .c_identify = cyrix_identify, | 444 | .c_identify = cyrix_identify, |
439 | }; | 445 | }; |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c index e43ad4ad4cba..c9017799497c 100644 --- a/arch/x86/kernel/cpu/feature_names.c +++ b/arch/x86/kernel/cpu/feature_names.c | |||
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = { | |||
39 | NULL, NULL, NULL, NULL, | 39 | NULL, NULL, NULL, NULL, |
40 | "constant_tsc", "up", NULL, "arch_perfmon", | 40 | "constant_tsc", "up", NULL, "arch_perfmon", |
41 | "pebs", "bts", NULL, NULL, | 41 | "pebs", "bts", NULL, NULL, |
42 | "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 42 | "rep_good", NULL, NULL, NULL, |
43 | "nopl", NULL, NULL, NULL, | ||
43 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
44 | 45 | ||
45 | /* Intel-defined (#2) */ | 46 | /* Intel-defined (#2) */ |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b75f2569b8f8..f113ef4595f6 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -222,10 +222,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
222 | set_cpu_cap(c, X86_FEATURE_BTS); | 222 | set_cpu_cap(c, X86_FEATURE_BTS); |
223 | if (!(l1 & (1<<12))) | 223 | if (!(l1 & (1<<12))) |
224 | set_cpu_cap(c, X86_FEATURE_PEBS); | 224 | set_cpu_cap(c, X86_FEATURE_PEBS); |
225 | ds_init_intel(c); | ||
225 | } | 226 | } |
226 | 227 | ||
227 | if (cpu_has_bts) | 228 | if (cpu_has_bts) |
228 | ds_init_intel(c); | 229 | ptrace_bts_init_intel(c); |
229 | 230 | ||
230 | /* | 231 | /* |
231 | * See if we have a good local APIC by checking for buggy Pentia, | 232 | * See if we have a good local APIC by checking for buggy Pentia, |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 65a339678ece..726a5fcdf341 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = { | |||
759 | }; | 759 | }; |
760 | 760 | ||
761 | DEFINE_PER_CPU(struct sys_device, device_mce); | 761 | DEFINE_PER_CPU(struct sys_device, device_mce); |
762 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata; | ||
762 | 763 | ||
763 | /* Why are there no generic functions for this? */ | 764 | /* Why are there no generic functions for this? */ |
764 | #define ACCESSOR(name, var, start) \ | 765 | #define ACCESSOR(name, var, start) \ |
@@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
883 | case CPU_ONLINE: | 884 | case CPU_ONLINE: |
884 | case CPU_ONLINE_FROZEN: | 885 | case CPU_ONLINE_FROZEN: |
885 | mce_create_device(cpu); | 886 | mce_create_device(cpu); |
887 | if (threshold_cpu_callback) | ||
888 | threshold_cpu_callback(action, cpu); | ||
886 | break; | 889 | break; |
887 | case CPU_DEAD: | 890 | case CPU_DEAD: |
888 | case CPU_DEAD_FROZEN: | 891 | case CPU_DEAD_FROZEN: |
892 | if (threshold_cpu_callback) | ||
893 | threshold_cpu_callback(action, cpu); | ||
889 | mce_remove_device(cpu); | 894 | mce_remove_device(cpu); |
890 | break; | 895 | break; |
891 | } | 896 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 88736cadbaa6..5eb390a4b2e9 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
628 | deallocate_threshold_block(cpu, bank); | 628 | deallocate_threshold_block(cpu, bank); |
629 | 629 | ||
630 | free_out: | 630 | free_out: |
631 | kobject_del(b->kobj); | ||
631 | kobject_put(b->kobj); | 632 | kobject_put(b->kobj); |
632 | kfree(b); | 633 | kfree(b); |
633 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 634 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
@@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu) | |||
645 | } | 646 | } |
646 | 647 | ||
647 | /* get notified when a cpu comes on/off */ | 648 | /* get notified when a cpu comes on/off */ |
648 | static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, | 649 | static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, |
649 | unsigned long action, void *hcpu) | 650 | unsigned int cpu) |
650 | { | 651 | { |
651 | /* cpu was unsigned int to begin with */ | ||
652 | unsigned int cpu = (unsigned long)hcpu; | ||
653 | |||
654 | if (cpu >= NR_CPUS) | 652 | if (cpu >= NR_CPUS) |
655 | goto out; | 653 | return; |
656 | 654 | ||
657 | switch (action) { | 655 | switch (action) { |
658 | case CPU_ONLINE: | 656 | case CPU_ONLINE: |
@@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, | |||
666 | default: | 664 | default: |
667 | break; | 665 | break; |
668 | } | 666 | } |
669 | out: | ||
670 | return NOTIFY_OK; | ||
671 | } | 667 | } |
672 | 668 | ||
673 | static struct notifier_block threshold_cpu_notifier __cpuinitdata = { | ||
674 | .notifier_call = threshold_cpu_callback, | ||
675 | }; | ||
676 | |||
677 | static __init int threshold_init_device(void) | 669 | static __init int threshold_init_device(void) |
678 | { | 670 | { |
679 | unsigned lcpu = 0; | 671 | unsigned lcpu = 0; |
@@ -684,7 +676,7 @@ static __init int threshold_init_device(void) | |||
684 | if (err) | 676 | if (err) |
685 | return err; | 677 | return err; |
686 | } | 678 | } |
687 | register_hotcpu_notifier(&threshold_cpu_notifier); | 679 | threshold_cpu_callback = amd_64_threshold_cpu_callback; |
688 | return 0; | 680 | return 0; |
689 | } | 681 | } |
690 | 682 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 509bd3d9eacd..4e8d77f01eeb 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
379 | unsigned long *size, mtrr_type *type) | 379 | unsigned long *size, mtrr_type *type) |
380 | { | 380 | { |
381 | unsigned int mask_lo, mask_hi, base_lo, base_hi; | 381 | unsigned int mask_lo, mask_hi, base_lo, base_hi; |
382 | unsigned int tmp, hi; | ||
382 | 383 | ||
383 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 384 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
384 | if ((mask_lo & 0x800) == 0) { | 385 | if ((mask_lo & 0x800) == 0) { |
@@ -392,8 +393,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
392 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); | 393 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); |
393 | 394 | ||
394 | /* Work out the shifted address mask. */ | 395 | /* Work out the shifted address mask. */ |
395 | mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) | 396 | tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; |
396 | | mask_lo >> PAGE_SHIFT; | 397 | mask_lo = size_or_mask | tmp; |
398 | /* Expand tmp with high bits to all 1s*/ | ||
399 | hi = fls(tmp); | ||
400 | if (hi > 0) { | ||
401 | tmp |= ~((1<<(hi - 1)) - 1); | ||
402 | |||
403 | if (tmp != mask_lo) { | ||
404 | WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); | ||
405 | mask_lo = tmp; | ||
406 | } | ||
407 | } | ||
397 | 408 | ||
398 | /* This works correctly if size is a power of two, i.e. a | 409 | /* This works correctly if size is a power of two, i.e. a |
399 | contiguous range. */ | 410 | contiguous range. */ |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 84c480bb3715..4c4214690dd1 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -405,9 +405,9 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) | |||
405 | } | 405 | } |
406 | /* RED-PEN: base can be > 32bit */ | 406 | /* RED-PEN: base can be > 32bit */ |
407 | len += seq_printf(seq, | 407 | len += seq_printf(seq, |
408 | "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", | 408 | "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", |
409 | i, base, base >> (20 - PAGE_SHIFT), size, factor, | 409 | i, base, base >> (20 - PAGE_SHIFT), size, factor, |
410 | mtrr_attrib_to_str(type), mtrr_usage_table[i]); | 410 | mtrr_usage_table[i], mtrr_attrib_to_str(type)); |
411 | } | 411 | } |
412 | } | 412 | } |
413 | return 0; | 413 | return 0; |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 6f23969c8faf..c78c04821ea1 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -729,7 +729,7 @@ struct var_mtrr_range_state { | |||
729 | mtrr_type type; | 729 | mtrr_type type; |
730 | }; | 730 | }; |
731 | 731 | ||
732 | struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | 732 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; |
733 | static int __initdata debug_print; | 733 | static int __initdata debug_print; |
734 | 734 | ||
735 | static int __init | 735 | static int __init |
@@ -759,7 +759,8 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
759 | /* take out UC ranges */ | 759 | /* take out UC ranges */ |
760 | for (i = 0; i < num_var_ranges; i++) { | 760 | for (i = 0; i < num_var_ranges; i++) { |
761 | type = range_state[i].type; | 761 | type = range_state[i].type; |
762 | if (type != MTRR_TYPE_UNCACHABLE) | 762 | if (type != MTRR_TYPE_UNCACHABLE && |
763 | type != MTRR_TYPE_WRPROT) | ||
763 | continue; | 764 | continue; |
764 | size = range_state[i].size_pfn; | 765 | size = range_state[i].size_pfn; |
765 | if (!size) | 766 | if (!size) |
@@ -834,7 +835,14 @@ static int __init enable_mtrr_cleanup_setup(char *str) | |||
834 | enable_mtrr_cleanup = 1; | 835 | enable_mtrr_cleanup = 1; |
835 | return 0; | 836 | return 0; |
836 | } | 837 | } |
837 | early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup); | 838 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); |
839 | |||
840 | static int __init mtrr_cleanup_debug_setup(char *str) | ||
841 | { | ||
842 | debug_print = 1; | ||
843 | return 0; | ||
844 | } | ||
845 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | ||
838 | 846 | ||
839 | struct var_mtrr_state { | 847 | struct var_mtrr_state { |
840 | unsigned long range_startk; | 848 | unsigned long range_startk; |
@@ -898,6 +906,27 @@ set_var_mtrr_all(unsigned int address_bits) | |||
898 | } | 906 | } |
899 | } | 907 | } |
900 | 908 | ||
909 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | ||
910 | { | ||
911 | char factor; | ||
912 | unsigned long base = sizek; | ||
913 | |||
914 | if (base & ((1<<10) - 1)) { | ||
915 | /* not MB alignment */ | ||
916 | factor = 'K'; | ||
917 | } else if (base & ((1<<20) - 1)){ | ||
918 | factor = 'M'; | ||
919 | base >>= 10; | ||
920 | } else { | ||
921 | factor = 'G'; | ||
922 | base >>= 20; | ||
923 | } | ||
924 | |||
925 | *factorp = factor; | ||
926 | |||
927 | return base; | ||
928 | } | ||
929 | |||
901 | static unsigned int __init | 930 | static unsigned int __init |
902 | range_to_mtrr(unsigned int reg, unsigned long range_startk, | 931 | range_to_mtrr(unsigned int reg, unsigned long range_startk, |
903 | unsigned long range_sizek, unsigned char type) | 932 | unsigned long range_sizek, unsigned char type) |
@@ -919,13 +948,21 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
919 | align = max_align; | 948 | align = max_align; |
920 | 949 | ||
921 | sizek = 1 << align; | 950 | sizek = 1 << align; |
922 | if (debug_print) | 951 | if (debug_print) { |
952 | char start_factor = 'K', size_factor = 'K'; | ||
953 | unsigned long start_base, size_base; | ||
954 | |||
955 | start_base = to_size_factor(range_startk, &start_factor), | ||
956 | size_base = to_size_factor(sizek, &size_factor), | ||
957 | |||
923 | printk(KERN_DEBUG "Setting variable MTRR %d, " | 958 | printk(KERN_DEBUG "Setting variable MTRR %d, " |
924 | "base: %ldMB, range: %ldMB, type %s\n", | 959 | "base: %ld%cB, range: %ld%cB, type %s\n", |
925 | reg, range_startk >> 10, sizek >> 10, | 960 | reg, start_base, start_factor, |
961 | size_base, size_factor, | ||
926 | (type == MTRR_TYPE_UNCACHABLE)?"UC": | 962 | (type == MTRR_TYPE_UNCACHABLE)?"UC": |
927 | ((type == MTRR_TYPE_WRBACK)?"WB":"Other") | 963 | ((type == MTRR_TYPE_WRBACK)?"WB":"Other") |
928 | ); | 964 | ); |
965 | } | ||
929 | save_var_mtrr(reg++, range_startk, sizek, type); | 966 | save_var_mtrr(reg++, range_startk, sizek, type); |
930 | range_startk += sizek; | 967 | range_startk += sizek; |
931 | range_sizek -= sizek; | 968 | range_sizek -= sizek; |
@@ -970,6 +1007,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
970 | /* try to append some small hole */ | 1007 | /* try to append some small hole */ |
971 | range0_basek = state->range_startk; | 1008 | range0_basek = state->range_startk; |
972 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | 1009 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
1010 | |||
1011 | /* no increase */ | ||
973 | if (range0_sizek == state->range_sizek) { | 1012 | if (range0_sizek == state->range_sizek) { |
974 | if (debug_print) | 1013 | if (debug_print) |
975 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | 1014 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", |
@@ -980,13 +1019,40 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
980 | return 0; | 1019 | return 0; |
981 | } | 1020 | } |
982 | 1021 | ||
983 | range0_sizek -= chunk_sizek; | 1022 | /* only cut back, when it is not the last */ |
984 | if (range0_sizek && sizek) { | 1023 | if (sizek) { |
985 | while (range0_basek + range0_sizek > (basek + sizek)) { | 1024 | while (range0_basek + range0_sizek > (basek + sizek)) { |
986 | range0_sizek -= chunk_sizek; | 1025 | if (range0_sizek >= chunk_sizek) |
987 | if (!range0_sizek) | 1026 | range0_sizek -= chunk_sizek; |
988 | break; | 1027 | else |
989 | } | 1028 | range0_sizek = 0; |
1029 | |||
1030 | if (!range0_sizek) | ||
1031 | break; | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | second_try: | ||
1036 | range_basek = range0_basek + range0_sizek; | ||
1037 | |||
1038 | /* one hole in the middle */ | ||
1039 | if (range_basek > basek && range_basek <= (basek + sizek)) | ||
1040 | second_sizek = range_basek - basek; | ||
1041 | |||
1042 | if (range0_sizek > state->range_sizek) { | ||
1043 | |||
1044 | /* one hole in middle or at end */ | ||
1045 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | ||
1046 | |||
1047 | /* hole size should be less than half of range0 size */ | ||
1048 | if (hole_sizek >= (range0_sizek >> 1) && | ||
1049 | range0_sizek >= chunk_sizek) { | ||
1050 | range0_sizek -= chunk_sizek; | ||
1051 | second_sizek = 0; | ||
1052 | hole_sizek = 0; | ||
1053 | |||
1054 | goto second_try; | ||
1055 | } | ||
990 | } | 1056 | } |
991 | 1057 | ||
992 | if (range0_sizek) { | 1058 | if (range0_sizek) { |
@@ -996,50 +1062,28 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
996 | (range0_basek + range0_sizek)<<10); | 1062 | (range0_basek + range0_sizek)<<10); |
997 | state->reg = range_to_mtrr(state->reg, range0_basek, | 1063 | state->reg = range_to_mtrr(state->reg, range0_basek, |
998 | range0_sizek, MTRR_TYPE_WRBACK); | 1064 | range0_sizek, MTRR_TYPE_WRBACK); |
999 | |||
1000 | } | ||
1001 | |||
1002 | range_basek = range0_basek + range0_sizek; | ||
1003 | range_sizek = chunk_sizek; | ||
1004 | |||
1005 | if (range_basek + range_sizek > basek && | ||
1006 | range_basek + range_sizek <= (basek + sizek)) { | ||
1007 | /* one hole */ | ||
1008 | second_basek = basek; | ||
1009 | second_sizek = range_basek + range_sizek - basek; | ||
1010 | } | 1065 | } |
1011 | 1066 | ||
1012 | /* if last piece, only could one hole near end */ | 1067 | if (range0_sizek < state->range_sizek) { |
1013 | if ((second_basek || !basek) && | 1068 | /* need to handle left over */ |
1014 | range_sizek - (state->range_sizek - range0_sizek) - second_sizek < | ||
1015 | (chunk_sizek >> 1)) { | ||
1016 | /* | ||
1017 | * one hole in middle (second_sizek is 0) or at end | ||
1018 | * (second_sizek is 0 ) | ||
1019 | */ | ||
1020 | hole_sizek = range_sizek - (state->range_sizek - range0_sizek) | ||
1021 | - second_sizek; | ||
1022 | hole_basek = range_basek + range_sizek - hole_sizek | ||
1023 | - second_sizek; | ||
1024 | } else { | ||
1025 | /* fallback for big hole, or several holes */ | ||
1026 | range_sizek = state->range_sizek - range0_sizek; | 1069 | range_sizek = state->range_sizek - range0_sizek; |
1027 | second_basek = 0; | 1070 | |
1028 | second_sizek = 0; | 1071 | if (debug_print) |
1072 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | ||
1073 | range_basek<<10, | ||
1074 | (range_basek + range_sizek)<<10); | ||
1075 | state->reg = range_to_mtrr(state->reg, range_basek, | ||
1076 | range_sizek, MTRR_TYPE_WRBACK); | ||
1029 | } | 1077 | } |
1030 | 1078 | ||
1031 | if (debug_print) | ||
1032 | printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10, | ||
1033 | (range_basek + range_sizek)<<10); | ||
1034 | state->reg = range_to_mtrr(state->reg, range_basek, range_sizek, | ||
1035 | MTRR_TYPE_WRBACK); | ||
1036 | if (hole_sizek) { | 1079 | if (hole_sizek) { |
1080 | hole_basek = range_basek - hole_sizek - second_sizek; | ||
1037 | if (debug_print) | 1081 | if (debug_print) |
1038 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | 1082 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", |
1039 | hole_basek<<10, (hole_basek + hole_sizek)<<10); | 1083 | hole_basek<<10, |
1040 | state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, | 1084 | (hole_basek + hole_sizek)<<10); |
1041 | MTRR_TYPE_UNCACHABLE); | 1085 | state->reg = range_to_mtrr(state->reg, hole_basek, |
1042 | 1086 | hole_sizek, MTRR_TYPE_UNCACHABLE); | |
1043 | } | 1087 | } |
1044 | 1088 | ||
1045 | return second_sizek; | 1089 | return second_sizek; |
@@ -1154,11 +1198,11 @@ struct mtrr_cleanup_result { | |||
1154 | }; | 1198 | }; |
1155 | 1199 | ||
1156 | /* | 1200 | /* |
1157 | * gran_size: 1M, 2M, ..., 2G | 1201 | * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G |
1158 | * chunk size: gran_size, ..., 4G | 1202 | * chunk size: gran_size, ..., 2G |
1159 | * so we need (2+13)*6 | 1203 | * so we need (1+16)*8 |
1160 | */ | 1204 | */ |
1161 | #define NUM_RESULT 90 | 1205 | #define NUM_RESULT 136 |
1162 | #define PSHIFT (PAGE_SHIFT - 10) | 1206 | #define PSHIFT (PAGE_SHIFT - 10) |
1163 | 1207 | ||
1164 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; | 1208 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; |
@@ -1168,13 +1212,14 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | |||
1168 | static int __init mtrr_cleanup(unsigned address_bits) | 1212 | static int __init mtrr_cleanup(unsigned address_bits) |
1169 | { | 1213 | { |
1170 | unsigned long extra_remove_base, extra_remove_size; | 1214 | unsigned long extra_remove_base, extra_remove_size; |
1171 | unsigned long i, base, size, def, dummy; | 1215 | unsigned long base, size, def, dummy; |
1172 | mtrr_type type; | 1216 | mtrr_type type; |
1173 | int nr_range, nr_range_new; | 1217 | int nr_range, nr_range_new; |
1174 | u64 chunk_size, gran_size; | 1218 | u64 chunk_size, gran_size; |
1175 | unsigned long range_sums, range_sums_new; | 1219 | unsigned long range_sums, range_sums_new; |
1176 | int index_good; | 1220 | int index_good; |
1177 | int num_reg_good; | 1221 | int num_reg_good; |
1222 | int i; | ||
1178 | 1223 | ||
1179 | /* extra one for all 0 */ | 1224 | /* extra one for all 0 */ |
1180 | int num[MTRR_NUM_TYPES + 1]; | 1225 | int num[MTRR_NUM_TYPES + 1]; |
@@ -1204,6 +1249,8 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1204 | continue; | 1249 | continue; |
1205 | if (!size) | 1250 | if (!size) |
1206 | type = MTRR_NUM_TYPES; | 1251 | type = MTRR_NUM_TYPES; |
1252 | if (type == MTRR_TYPE_WRPROT) | ||
1253 | type = MTRR_TYPE_UNCACHABLE; | ||
1207 | num[type]++; | 1254 | num[type]++; |
1208 | } | 1255 | } |
1209 | 1256 | ||
@@ -1216,23 +1263,57 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1216 | num_var_ranges - num[MTRR_NUM_TYPES]) | 1263 | num_var_ranges - num[MTRR_NUM_TYPES]) |
1217 | return 0; | 1264 | return 0; |
1218 | 1265 | ||
1266 | /* print original var MTRRs at first, for debugging: */ | ||
1267 | printk(KERN_DEBUG "original variable MTRRs\n"); | ||
1268 | for (i = 0; i < num_var_ranges; i++) { | ||
1269 | char start_factor = 'K', size_factor = 'K'; | ||
1270 | unsigned long start_base, size_base; | ||
1271 | |||
1272 | size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); | ||
1273 | if (!size_base) | ||
1274 | continue; | ||
1275 | |||
1276 | size_base = to_size_factor(size_base, &size_factor), | ||
1277 | start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); | ||
1278 | start_base = to_size_factor(start_base, &start_factor), | ||
1279 | type = range_state[i].type; | ||
1280 | |||
1281 | printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", | ||
1282 | i, start_base, start_factor, | ||
1283 | size_base, size_factor, | ||
1284 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : | ||
1285 | ((type == MTRR_TYPE_WRPROT) ? "WP" : | ||
1286 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) | ||
1287 | ); | ||
1288 | } | ||
1289 | |||
1219 | memset(range, 0, sizeof(range)); | 1290 | memset(range, 0, sizeof(range)); |
1220 | extra_remove_size = 0; | 1291 | extra_remove_size = 0; |
1221 | if (mtrr_tom2) { | 1292 | extra_remove_base = 1 << (32 - PAGE_SHIFT); |
1222 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | 1293 | if (mtrr_tom2) |
1223 | extra_remove_size = | 1294 | extra_remove_size = |
1224 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | 1295 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; |
1225 | } | ||
1226 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | 1296 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, |
1227 | extra_remove_size); | 1297 | extra_remove_size); |
1298 | /* | ||
1299 | * [0, 1M) should always be coverred by var mtrr with WB | ||
1300 | * and fixed mtrrs should take effective before var mtrr for it | ||
1301 | */ | ||
1302 | nr_range = add_range_with_merge(range, nr_range, 0, | ||
1303 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | ||
1304 | /* sort the ranges */ | ||
1305 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
1306 | |||
1228 | range_sums = sum_ranges(range, nr_range); | 1307 | range_sums = sum_ranges(range, nr_range); |
1229 | printk(KERN_INFO "total RAM coverred: %ldM\n", | 1308 | printk(KERN_INFO "total RAM coverred: %ldM\n", |
1230 | range_sums >> (20 - PAGE_SHIFT)); | 1309 | range_sums >> (20 - PAGE_SHIFT)); |
1231 | 1310 | ||
1232 | if (mtrr_chunk_size && mtrr_gran_size) { | 1311 | if (mtrr_chunk_size && mtrr_gran_size) { |
1233 | int num_reg; | 1312 | int num_reg; |
1313 | char gran_factor, chunk_factor, lose_factor; | ||
1314 | unsigned long gran_base, chunk_base, lose_base; | ||
1234 | 1315 | ||
1235 | debug_print = 1; | 1316 | debug_print++; |
1236 | /* convert ranges to var ranges state */ | 1317 | /* convert ranges to var ranges state */ |
1237 | num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, | 1318 | num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, |
1238 | mtrr_gran_size); | 1319 | mtrr_gran_size); |
@@ -1256,34 +1337,48 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1256 | result[i].lose_cover_sizek = | 1337 | result[i].lose_cover_sizek = |
1257 | (range_sums - range_sums_new) << PSHIFT; | 1338 | (range_sums - range_sums_new) << PSHIFT; |
1258 | 1339 | ||
1259 | printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", | 1340 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
1260 | result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10, | 1341 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
1261 | result[i].chunk_sizek >> 10); | 1342 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
1262 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n", | 1343 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
1344 | result[i].bad?"*BAD*":" ", | ||
1345 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
1346 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | ||
1263 | result[i].num_reg, result[i].bad?"-":"", | 1347 | result[i].num_reg, result[i].bad?"-":"", |
1264 | result[i].lose_cover_sizek >> 10); | 1348 | lose_base, lose_factor); |
1265 | if (!result[i].bad) { | 1349 | if (!result[i].bad) { |
1266 | set_var_mtrr_all(address_bits); | 1350 | set_var_mtrr_all(address_bits); |
1267 | return 1; | 1351 | return 1; |
1268 | } | 1352 | } |
1269 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " | 1353 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " |
1270 | "will find optimal one\n"); | 1354 | "will find optimal one\n"); |
1271 | debug_print = 0; | 1355 | debug_print--; |
1272 | memset(result, 0, sizeof(result[0])); | 1356 | memset(result, 0, sizeof(result[0])); |
1273 | } | 1357 | } |
1274 | 1358 | ||
1275 | i = 0; | 1359 | i = 0; |
1276 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); | 1360 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); |
1277 | memset(result, 0, sizeof(result)); | 1361 | memset(result, 0, sizeof(result)); |
1278 | for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) { | 1362 | for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { |
1279 | for (chunk_size = gran_size; chunk_size < (1ULL<<33); | 1363 | char gran_factor; |
1364 | unsigned long gran_base; | ||
1365 | |||
1366 | if (debug_print) | ||
1367 | gran_base = to_size_factor(gran_size >> 10, &gran_factor); | ||
1368 | |||
1369 | for (chunk_size = gran_size; chunk_size < (1ULL<<32); | ||
1280 | chunk_size <<= 1) { | 1370 | chunk_size <<= 1) { |
1281 | int num_reg; | 1371 | int num_reg; |
1282 | 1372 | ||
1283 | if (debug_print) | 1373 | if (debug_print) { |
1284 | printk(KERN_INFO | 1374 | char chunk_factor; |
1285 | "\ngran_size: %lldM chunk_size_size: %lldM\n", | 1375 | unsigned long chunk_base; |
1286 | gran_size >> 20, chunk_size >> 20); | 1376 | |
1377 | chunk_base = to_size_factor(chunk_size>>10, &chunk_factor), | ||
1378 | printk(KERN_INFO "\n"); | ||
1379 | printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n", | ||
1380 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
1381 | } | ||
1287 | if (i >= NUM_RESULT) | 1382 | if (i >= NUM_RESULT) |
1288 | continue; | 1383 | continue; |
1289 | 1384 | ||
@@ -1326,12 +1421,18 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1326 | 1421 | ||
1327 | /* print out all */ | 1422 | /* print out all */ |
1328 | for (i = 0; i < NUM_RESULT; i++) { | 1423 | for (i = 0; i < NUM_RESULT; i++) { |
1329 | printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", | 1424 | char gran_factor, chunk_factor, lose_factor; |
1330 | result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10, | 1425 | unsigned long gran_base, chunk_base, lose_base; |
1331 | result[i].chunk_sizek >> 10); | 1426 | |
1332 | printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n", | 1427 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
1333 | result[i].num_reg, result[i].bad?"-":"", | 1428 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
1334 | result[i].lose_cover_sizek >> 10); | 1429 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
1430 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | ||
1431 | result[i].bad?"*BAD*":" ", | ||
1432 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
1433 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | ||
1434 | result[i].num_reg, result[i].bad?"-":"", | ||
1435 | lose_base, lose_factor); | ||
1335 | } | 1436 | } |
1336 | 1437 | ||
1337 | /* try to find the optimal index */ | 1438 | /* try to find the optimal index */ |
@@ -1339,10 +1440,8 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1339 | nr_mtrr_spare_reg = num_var_ranges - 1; | 1440 | nr_mtrr_spare_reg = num_var_ranges - 1; |
1340 | num_reg_good = -1; | 1441 | num_reg_good = -1; |
1341 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | 1442 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
1342 | if (!min_loss_pfn[i]) { | 1443 | if (!min_loss_pfn[i]) |
1343 | num_reg_good = i; | 1444 | num_reg_good = i; |
1344 | break; | ||
1345 | } | ||
1346 | } | 1445 | } |
1347 | 1446 | ||
1348 | index_good = -1; | 1447 | index_good = -1; |
@@ -1358,21 +1457,26 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1358 | } | 1457 | } |
1359 | 1458 | ||
1360 | if (index_good != -1) { | 1459 | if (index_good != -1) { |
1460 | char gran_factor, chunk_factor, lose_factor; | ||
1461 | unsigned long gran_base, chunk_base, lose_base; | ||
1462 | |||
1361 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); | 1463 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); |
1362 | i = index_good; | 1464 | i = index_good; |
1363 | printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t", | 1465 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
1364 | result[i].gran_sizek >> 10, | 1466 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
1365 | result[i].chunk_sizek >> 10); | 1467 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
1366 | printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n", | 1468 | printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t", |
1367 | result[i].num_reg, | 1469 | gran_base, gran_factor, chunk_base, chunk_factor); |
1368 | result[i].lose_cover_sizek >> 10); | 1470 | printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n", |
1471 | result[i].num_reg, lose_base, lose_factor); | ||
1369 | /* convert ranges to var ranges state */ | 1472 | /* convert ranges to var ranges state */ |
1370 | chunk_size = result[i].chunk_sizek; | 1473 | chunk_size = result[i].chunk_sizek; |
1371 | chunk_size <<= 10; | 1474 | chunk_size <<= 10; |
1372 | gran_size = result[i].gran_sizek; | 1475 | gran_size = result[i].gran_sizek; |
1373 | gran_size <<= 10; | 1476 | gran_size <<= 10; |
1374 | debug_print = 1; | 1477 | debug_print++; |
1375 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); | 1478 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
1479 | debug_print--; | ||
1376 | set_var_mtrr_all(address_bits); | 1480 | set_var_mtrr_all(address_bits); |
1377 | return 1; | 1481 | return 1; |
1378 | } | 1482 | } |
@@ -1496,11 +1600,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1496 | 1600 | ||
1497 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1601 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ |
1498 | if (!highest_pfn) { | 1602 | if (!highest_pfn) { |
1499 | if (!kvm_para_available()) { | 1603 | WARN(!kvm_para_available(), KERN_WARNING |
1500 | printk(KERN_WARNING | ||
1501 | "WARNING: strange, CPU MTRRs all blank?\n"); | 1604 | "WARNING: strange, CPU MTRRs all blank?\n"); |
1502 | WARN_ON(1); | ||
1503 | } | ||
1504 | return 0; | 1605 | return 0; |
1505 | } | 1606 | } |
1506 | 1607 | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index de7439f82b92..6bff382094f5 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -295,13 +295,19 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
295 | /* setup the timer */ | 295 | /* setup the timer */ |
296 | wrmsr(evntsel_msr, evntsel, 0); | 296 | wrmsr(evntsel_msr, evntsel, 0); |
297 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | 297 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); |
298 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
299 | evntsel |= K7_EVNTSEL_ENABLE; | ||
300 | wrmsr(evntsel_msr, evntsel, 0); | ||
301 | 298 | ||
299 | /* initialize the wd struct before enabling */ | ||
302 | wd->perfctr_msr = perfctr_msr; | 300 | wd->perfctr_msr = perfctr_msr; |
303 | wd->evntsel_msr = evntsel_msr; | 301 | wd->evntsel_msr = evntsel_msr; |
304 | wd->cccr_msr = 0; /* unused */ | 302 | wd->cccr_msr = 0; /* unused */ |
303 | |||
304 | /* ok, everything is initialized, announce that we're set */ | ||
305 | cpu_nmi_set_wd_enabled(); | ||
306 | |||
307 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
308 | evntsel |= K7_EVNTSEL_ENABLE; | ||
309 | wrmsr(evntsel_msr, evntsel, 0); | ||
310 | |||
305 | return 1; | 311 | return 1; |
306 | } | 312 | } |
307 | 313 | ||
@@ -379,13 +385,19 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
379 | wrmsr(evntsel_msr, evntsel, 0); | 385 | wrmsr(evntsel_msr, evntsel, 0); |
380 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 386 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
381 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | 387 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); |
382 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
383 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
384 | wrmsr(evntsel_msr, evntsel, 0); | ||
385 | 388 | ||
389 | /* initialize the wd struct before enabling */ | ||
386 | wd->perfctr_msr = perfctr_msr; | 390 | wd->perfctr_msr = perfctr_msr; |
387 | wd->evntsel_msr = evntsel_msr; | 391 | wd->evntsel_msr = evntsel_msr; |
388 | wd->cccr_msr = 0; /* unused */ | 392 | wd->cccr_msr = 0; /* unused */ |
393 | |||
394 | /* ok, everything is initialized, announce that we're set */ | ||
395 | cpu_nmi_set_wd_enabled(); | ||
396 | |||
397 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
398 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
399 | wrmsr(evntsel_msr, evntsel, 0); | ||
400 | |||
389 | return 1; | 401 | return 1; |
390 | } | 402 | } |
391 | 403 | ||
@@ -432,6 +444,27 @@ static const struct wd_ops p6_wd_ops = { | |||
432 | #define P4_CCCR_ENABLE (1 << 12) | 444 | #define P4_CCCR_ENABLE (1 << 12) |
433 | #define P4_CCCR_OVF (1 << 31) | 445 | #define P4_CCCR_OVF (1 << 31) |
434 | 446 | ||
447 | #define P4_CONTROLS 18 | ||
448 | static unsigned int p4_controls[18] = { | ||
449 | MSR_P4_BPU_CCCR0, | ||
450 | MSR_P4_BPU_CCCR1, | ||
451 | MSR_P4_BPU_CCCR2, | ||
452 | MSR_P4_BPU_CCCR3, | ||
453 | MSR_P4_MS_CCCR0, | ||
454 | MSR_P4_MS_CCCR1, | ||
455 | MSR_P4_MS_CCCR2, | ||
456 | MSR_P4_MS_CCCR3, | ||
457 | MSR_P4_FLAME_CCCR0, | ||
458 | MSR_P4_FLAME_CCCR1, | ||
459 | MSR_P4_FLAME_CCCR2, | ||
460 | MSR_P4_FLAME_CCCR3, | ||
461 | MSR_P4_IQ_CCCR0, | ||
462 | MSR_P4_IQ_CCCR1, | ||
463 | MSR_P4_IQ_CCCR2, | ||
464 | MSR_P4_IQ_CCCR3, | ||
465 | MSR_P4_IQ_CCCR4, | ||
466 | MSR_P4_IQ_CCCR5, | ||
467 | }; | ||
435 | /* | 468 | /* |
436 | * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | 469 | * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter |
437 | * CRU_ESCR0 (with any non-null event selector) through a complemented | 470 | * CRU_ESCR0 (with any non-null event selector) through a complemented |
@@ -473,12 +506,38 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
473 | evntsel_msr = MSR_P4_CRU_ESCR0; | 506 | evntsel_msr = MSR_P4_CRU_ESCR0; |
474 | cccr_msr = MSR_P4_IQ_CCCR0; | 507 | cccr_msr = MSR_P4_IQ_CCCR0; |
475 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | 508 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); |
509 | |||
510 | /* | ||
511 | * If we're on the kdump kernel or other situation, we may | ||
512 | * still have other performance counter registers set to | ||
513 | * interrupt and they'll keep interrupting forever because | ||
514 | * of the P4_CCCR_OVF quirk. So we need to ACK all the | ||
515 | * pending interrupts and disable all the registers here, | ||
516 | * before reenabling the NMI delivery. Refer to p4_rearm() | ||
517 | * about the P4_CCCR_OVF quirk. | ||
518 | */ | ||
519 | if (reset_devices) { | ||
520 | unsigned int low, high; | ||
521 | int i; | ||
522 | |||
523 | for (i = 0; i < P4_CONTROLS; i++) { | ||
524 | rdmsr(p4_controls[i], low, high); | ||
525 | low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF); | ||
526 | wrmsr(p4_controls[i], low, high); | ||
527 | } | ||
528 | } | ||
476 | } else { | 529 | } else { |
477 | /* logical cpu 1 */ | 530 | /* logical cpu 1 */ |
478 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | 531 | perfctr_msr = MSR_P4_IQ_PERFCTR1; |
479 | evntsel_msr = MSR_P4_CRU_ESCR0; | 532 | evntsel_msr = MSR_P4_CRU_ESCR0; |
480 | cccr_msr = MSR_P4_IQ_CCCR1; | 533 | cccr_msr = MSR_P4_IQ_CCCR1; |
481 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | 534 | |
535 | /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */ | ||
536 | if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4) | ||
537 | cccr_val = P4_CCCR_OVF_PMI0; | ||
538 | else | ||
539 | cccr_val = P4_CCCR_OVF_PMI1; | ||
540 | cccr_val |= P4_CCCR_ESCR_SELECT(4); | ||
482 | } | 541 | } |
483 | 542 | ||
484 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 543 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
@@ -493,12 +552,17 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
493 | wrmsr(evntsel_msr, evntsel, 0); | 552 | wrmsr(evntsel_msr, evntsel, 0); |
494 | wrmsr(cccr_msr, cccr_val, 0); | 553 | wrmsr(cccr_msr, cccr_val, 0); |
495 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); | 554 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); |
496 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 555 | |
497 | cccr_val |= P4_CCCR_ENABLE; | ||
498 | wrmsr(cccr_msr, cccr_val, 0); | ||
499 | wd->perfctr_msr = perfctr_msr; | 556 | wd->perfctr_msr = perfctr_msr; |
500 | wd->evntsel_msr = evntsel_msr; | 557 | wd->evntsel_msr = evntsel_msr; |
501 | wd->cccr_msr = cccr_msr; | 558 | wd->cccr_msr = cccr_msr; |
559 | |||
560 | /* ok, everything is initialized, announce that we're set */ | ||
561 | cpu_nmi_set_wd_enabled(); | ||
562 | |||
563 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
564 | cccr_val |= P4_CCCR_ENABLE; | ||
565 | wrmsr(cccr_msr, cccr_val, 0); | ||
502 | return 1; | 566 | return 1; |
503 | } | 567 | } |
504 | 568 | ||
@@ -614,13 +678,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
614 | wrmsr(evntsel_msr, evntsel, 0); | 678 | wrmsr(evntsel_msr, evntsel, 0); |
615 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 679 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
616 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); | 680 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); |
617 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
618 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
619 | wrmsr(evntsel_msr, evntsel, 0); | ||
620 | 681 | ||
621 | wd->perfctr_msr = perfctr_msr; | 682 | wd->perfctr_msr = perfctr_msr; |
622 | wd->evntsel_msr = evntsel_msr; | 683 | wd->evntsel_msr = evntsel_msr; |
623 | wd->cccr_msr = 0; /* unused */ | 684 | wd->cccr_msr = 0; /* unused */ |
685 | |||
686 | /* ok, everything is initialized, announce that we're set */ | ||
687 | cpu_nmi_set_wd_enabled(); | ||
688 | |||
689 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
690 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
691 | wrmsr(evntsel_msr, evntsel, 0); | ||
624 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); | 692 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
625 | return 1; | 693 | return 1; |
626 | } | 694 | } |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 14b11b3be31c..6a44d6465991 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/smp_lock.h> | 36 | #include <linux/smp_lock.h> |
37 | #include <linux/major.h> | 37 | #include <linux/major.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/device.h> | 39 | #include <linux/device.h> |
41 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
42 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
@@ -89,6 +88,8 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
89 | struct cpuid_regs cmd; | 88 | struct cpuid_regs cmd; |
90 | int cpu = iminor(file->f_path.dentry->d_inode); | 89 | int cpu = iminor(file->f_path.dentry->d_inode); |
91 | u64 pos = *ppos; | 90 | u64 pos = *ppos; |
91 | ssize_t bytes = 0; | ||
92 | int err = 0; | ||
92 | 93 | ||
93 | if (count % 16) | 94 | if (count % 16) |
94 | return -EINVAL; /* Invalid chunk size */ | 95 | return -EINVAL; /* Invalid chunk size */ |
@@ -96,14 +97,19 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
96 | for (; count; count -= 16) { | 97 | for (; count; count -= 16) { |
97 | cmd.eax = pos; | 98 | cmd.eax = pos; |
98 | cmd.ecx = pos >> 32; | 99 | cmd.ecx = pos >> 32; |
99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); | 100 | err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); |
100 | if (copy_to_user(tmp, &cmd, 16)) | 101 | if (err) |
101 | return -EFAULT; | 102 | break; |
103 | if (copy_to_user(tmp, &cmd, 16)) { | ||
104 | err = -EFAULT; | ||
105 | break; | ||
106 | } | ||
102 | tmp += 16; | 107 | tmp += 16; |
108 | bytes += 16; | ||
103 | *ppos = ++pos; | 109 | *ppos = ++pos; |
104 | } | 110 | } |
105 | 111 | ||
106 | return tmp - buf; | 112 | return bytes ? bytes : err; |
107 | } | 113 | } |
108 | 114 | ||
109 | static int cpuid_open(struct inode *inode, struct file *file) | 115 | static int cpuid_open(struct inode *inode, struct file *file) |
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 15e6c6bc4a46..e90a60ef10c2 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c | |||
@@ -7,9 +7,8 @@ | |||
7 | 7 | ||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/crash_dump.h> | 9 | #include <linux/crash_dump.h> |
10 | 10 | #include <linux/uaccess.h> | |
11 | #include <asm/uaccess.h> | 11 | #include <linux/io.h> |
12 | #include <asm/io.h> | ||
13 | 12 | ||
14 | /** | 13 | /** |
15 | * copy_oldmem_page - copy one page from "oldmem" | 14 | * copy_oldmem_page - copy one page from "oldmem" |
@@ -25,7 +24,7 @@ | |||
25 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 24 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
26 | */ | 25 | */ |
27 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 26 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
28 | size_t csize, unsigned long offset, int userbuf) | 27 | size_t csize, unsigned long offset, int userbuf) |
29 | { | 28 | { |
30 | void *vaddr; | 29 | void *vaddr; |
31 | 30 | ||
@@ -33,14 +32,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
33 | return 0; | 32 | return 0; |
34 | 33 | ||
35 | vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); | 34 | vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); |
35 | if (!vaddr) | ||
36 | return -ENOMEM; | ||
36 | 37 | ||
37 | if (userbuf) { | 38 | if (userbuf) { |
38 | if (copy_to_user(buf, (vaddr + offset), csize)) { | 39 | if (copy_to_user(buf, vaddr + offset, csize)) { |
39 | iounmap(vaddr); | 40 | iounmap(vaddr); |
40 | return -EFAULT; | 41 | return -EFAULT; |
41 | } | 42 | } |
42 | } else | 43 | } else |
43 | memcpy(buf, (vaddr + offset), csize); | 44 | memcpy(buf, vaddr + offset, csize); |
44 | 45 | ||
45 | iounmap(vaddr); | 46 | iounmap(vaddr); |
46 | return csize; | 47 | return csize; |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 11c11b8ec48d..2b69994fd3a8 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -2,26 +2,49 @@ | |||
2 | * Debug Store support | 2 | * Debug Store support |
3 | * | 3 | * |
4 | * This provides a low-level interface to the hardware's Debug Store | 4 | * This provides a low-level interface to the hardware's Debug Store |
5 | * feature that is used for last branch recording (LBR) and | 5 | * feature that is used for branch trace store (BTS) and |
6 | * precise-event based sampling (PEBS). | 6 | * precise-event based sampling (PEBS). |
7 | * | 7 | * |
8 | * Different architectures use a different DS layout/pointer size. | 8 | * It manages: |
9 | * The below functions therefore work on a void*. | 9 | * - per-thread and per-cpu allocation of BTS and PEBS |
10 | * - buffer memory allocation (optional) | ||
11 | * - buffer overflow handling | ||
12 | * - buffer access | ||
10 | * | 13 | * |
14 | * It assumes: | ||
15 | * - get_task_struct on all parameter tasks | ||
16 | * - current is allowed to trace parameter tasks | ||
11 | * | 17 | * |
12 | * Since there is no user for PEBS, yet, only LBR (or branch | ||
13 | * trace store, BTS) is supported. | ||
14 | * | 18 | * |
15 | * | 19 | * Copyright (C) 2007-2008 Intel Corporation. |
16 | * Copyright (C) 2007 Intel Corporation. | 20 | * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 |
17 | * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 | ||
18 | */ | 21 | */ |
19 | 22 | ||
23 | |||
24 | #ifdef CONFIG_X86_DS | ||
25 | |||
20 | #include <asm/ds.h> | 26 | #include <asm/ds.h> |
21 | 27 | ||
22 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
23 | #include <linux/string.h> | 29 | #include <linux/string.h> |
24 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/sched.h> | ||
32 | #include <linux/mm.h> | ||
33 | |||
34 | |||
35 | /* | ||
36 | * The configuration for a particular DS hardware implementation. | ||
37 | */ | ||
38 | struct ds_configuration { | ||
39 | /* the size of the DS structure in bytes */ | ||
40 | unsigned char sizeof_ds; | ||
41 | /* the size of one pointer-typed field in the DS structure in bytes; | ||
42 | this covers the first 8 fields related to buffer management. */ | ||
43 | unsigned char sizeof_field; | ||
44 | /* the size of a BTS/PEBS record in bytes */ | ||
45 | unsigned char sizeof_rec[2]; | ||
46 | }; | ||
47 | static struct ds_configuration ds_cfg; | ||
25 | 48 | ||
26 | 49 | ||
27 | /* | 50 | /* |
@@ -44,378 +67,747 @@ | |||
44 | * (interrupt occurs when write pointer passes interrupt pointer) | 67 | * (interrupt occurs when write pointer passes interrupt pointer) |
45 | * - value to which counter is reset following counter overflow | 68 | * - value to which counter is reset following counter overflow |
46 | * | 69 | * |
47 | * On later architectures, the last branch recording hardware uses | 70 | * Later architectures use 64bit pointers throughout, whereas earlier |
48 | * 64bit pointers even in 32bit mode. | 71 | * architectures use 32bit pointers in 32bit mode. |
49 | * | ||
50 | * | ||
51 | * Branch Trace Store (BTS) records store information about control | ||
52 | * flow changes. They at least provide the following information: | ||
53 | * - source linear address | ||
54 | * - destination linear address | ||
55 | * | 72 | * |
56 | * Netburst supported a predicated bit that had been dropped in later | ||
57 | * architectures. We do not suppor it. | ||
58 | * | 73 | * |
74 | * We compute the base address for the first 8 fields based on: | ||
75 | * - the field size stored in the DS configuration | ||
76 | * - the relative field position | ||
77 | * - an offset giving the start of the respective region | ||
59 | * | 78 | * |
60 | * In order to abstract from the actual DS and BTS layout, we describe | 79 | * This offset is further used to index various arrays holding |
61 | * the access to the relevant fields. | 80 | * information for BTS and PEBS at the respective index. |
62 | * Thanks to Andi Kleen for proposing this design. | ||
63 | * | 81 | * |
64 | * The implementation, however, is not as general as it might seem. In | 82 | * On later 32bit processors, we only access the lower 32bit of the |
65 | * order to stay somewhat simple and efficient, we assume an | 83 | * 64bit pointer fields. The upper halves will be zeroed out. |
66 | * underlying unsigned type (mostly a pointer type) and we expect the | ||
67 | * field to be at least as big as that type. | ||
68 | */ | 84 | */ |
69 | 85 | ||
70 | /* | 86 | enum ds_field { |
71 | * A special from_ip address to indicate that the BTS record is an | 87 | ds_buffer_base = 0, |
72 | * info record that needs to be interpreted or skipped. | 88 | ds_index, |
73 | */ | 89 | ds_absolute_maximum, |
74 | #define BTS_ESCAPE_ADDRESS (-1) | 90 | ds_interrupt_threshold, |
91 | }; | ||
75 | 92 | ||
76 | /* | 93 | enum ds_qualifier { |
77 | * A field access descriptor | 94 | ds_bts = 0, |
78 | */ | 95 | ds_pebs |
79 | struct access_desc { | ||
80 | unsigned char offset; | ||
81 | unsigned char size; | ||
82 | }; | 96 | }; |
83 | 97 | ||
98 | static inline unsigned long ds_get(const unsigned char *base, | ||
99 | enum ds_qualifier qual, enum ds_field field) | ||
100 | { | ||
101 | base += (ds_cfg.sizeof_field * (field + (4 * qual))); | ||
102 | return *(unsigned long *)base; | ||
103 | } | ||
104 | |||
105 | static inline void ds_set(unsigned char *base, enum ds_qualifier qual, | ||
106 | enum ds_field field, unsigned long value) | ||
107 | { | ||
108 | base += (ds_cfg.sizeof_field * (field + (4 * qual))); | ||
109 | (*(unsigned long *)base) = value; | ||
110 | } | ||
111 | |||
112 | |||
84 | /* | 113 | /* |
85 | * The configuration for a particular DS/BTS hardware implementation. | 114 | * Locking is done only for allocating BTS or PEBS resources and for |
115 | * guarding context and buffer memory allocation. | ||
116 | * | ||
117 | * Most functions require the current task to own the ds context part | ||
118 | * they are going to access. All the locking is done when validating | ||
119 | * access to the context. | ||
86 | */ | 120 | */ |
87 | struct ds_configuration { | 121 | static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock); |
88 | /* the DS configuration */ | ||
89 | unsigned char sizeof_ds; | ||
90 | struct access_desc bts_buffer_base; | ||
91 | struct access_desc bts_index; | ||
92 | struct access_desc bts_absolute_maximum; | ||
93 | struct access_desc bts_interrupt_threshold; | ||
94 | /* the BTS configuration */ | ||
95 | unsigned char sizeof_bts; | ||
96 | struct access_desc from_ip; | ||
97 | struct access_desc to_ip; | ||
98 | /* BTS variants used to store additional information like | ||
99 | timestamps */ | ||
100 | struct access_desc info_type; | ||
101 | struct access_desc info_data; | ||
102 | unsigned long debugctl_mask; | ||
103 | }; | ||
104 | 122 | ||
105 | /* | 123 | /* |
106 | * The global configuration used by the below accessor functions | 124 | * Validate that the current task is allowed to access the BTS/PEBS |
125 | * buffer of the parameter task. | ||
126 | * | ||
127 | * Returns 0, if access is granted; -Eerrno, otherwise. | ||
107 | */ | 128 | */ |
108 | static struct ds_configuration ds_cfg; | 129 | static inline int ds_validate_access(struct ds_context *context, |
130 | enum ds_qualifier qual) | ||
131 | { | ||
132 | if (!context) | ||
133 | return -EPERM; | ||
134 | |||
135 | if (context->owner[qual] == current) | ||
136 | return 0; | ||
137 | |||
138 | return -EPERM; | ||
139 | } | ||
140 | |||
109 | 141 | ||
110 | /* | 142 | /* |
111 | * Accessor functions for some DS and BTS fields using the above | 143 | * We either support (system-wide) per-cpu or per-thread allocation. |
112 | * global ptrace_bts_cfg. | 144 | * We distinguish the two based on the task_struct pointer, where a |
145 | * NULL pointer indicates per-cpu allocation for the current cpu. | ||
146 | * | ||
147 | * Allocations are use-counted. As soon as resources are allocated, | ||
148 | * further allocations must be of the same type (per-cpu or | ||
149 | * per-thread). We model this by counting allocations (i.e. the number | ||
150 | * of tracers of a certain type) for one type negatively: | ||
151 | * =0 no tracers | ||
152 | * >0 number of per-thread tracers | ||
153 | * <0 number of per-cpu tracers | ||
154 | * | ||
155 | * The below functions to get and put tracers and to check the | ||
156 | * allocation type require the ds_lock to be held by the caller. | ||
157 | * | ||
158 | * Tracers essentially gives the number of ds contexts for a certain | ||
159 | * type of allocation. | ||
113 | */ | 160 | */ |
114 | static inline unsigned long get_bts_buffer_base(char *base) | 161 | static long tracers; |
162 | |||
163 | static inline void get_tracer(struct task_struct *task) | ||
115 | { | 164 | { |
116 | return *(unsigned long *)(base + ds_cfg.bts_buffer_base.offset); | 165 | tracers += (task ? 1 : -1); |
117 | } | 166 | } |
118 | static inline void set_bts_buffer_base(char *base, unsigned long value) | 167 | |
168 | static inline void put_tracer(struct task_struct *task) | ||
119 | { | 169 | { |
120 | (*(unsigned long *)(base + ds_cfg.bts_buffer_base.offset)) = value; | 170 | tracers -= (task ? 1 : -1); |
121 | } | 171 | } |
122 | static inline unsigned long get_bts_index(char *base) | 172 | |
173 | static inline int check_tracer(struct task_struct *task) | ||
123 | { | 174 | { |
124 | return *(unsigned long *)(base + ds_cfg.bts_index.offset); | 175 | return (task ? (tracers >= 0) : (tracers <= 0)); |
125 | } | 176 | } |
126 | static inline void set_bts_index(char *base, unsigned long value) | 177 | |
178 | |||
179 | /* | ||
180 | * The DS context is either attached to a thread or to a cpu: | ||
181 | * - in the former case, the thread_struct contains a pointer to the | ||
182 | * attached context. | ||
183 | * - in the latter case, we use a static array of per-cpu context | ||
184 | * pointers. | ||
185 | * | ||
186 | * Contexts are use-counted. They are allocated on first access and | ||
187 | * deallocated when the last user puts the context. | ||
188 | * | ||
189 | * We distinguish between an allocating and a non-allocating get of a | ||
190 | * context: | ||
191 | * - the allocating get is used for requesting BTS/PEBS resources. It | ||
192 | * requires the caller to hold the global ds_lock. | ||
193 | * - the non-allocating get is used for all other cases. A | ||
194 | * non-existing context indicates an error. It acquires and releases | ||
195 | * the ds_lock itself for obtaining the context. | ||
196 | * | ||
197 | * A context and its DS configuration are allocated and deallocated | ||
198 | * together. A context always has a DS configuration of the | ||
199 | * appropriate size. | ||
200 | */ | ||
201 | static DEFINE_PER_CPU(struct ds_context *, system_context); | ||
202 | |||
203 | #define this_system_context per_cpu(system_context, smp_processor_id()) | ||
204 | |||
205 | /* | ||
206 | * Returns the pointer to the parameter task's context or to the | ||
207 | * system-wide context, if task is NULL. | ||
208 | * | ||
209 | * Increases the use count of the returned context, if not NULL. | ||
210 | */ | ||
211 | static inline struct ds_context *ds_get_context(struct task_struct *task) | ||
127 | { | 212 | { |
128 | (*(unsigned long *)(base + ds_cfg.bts_index.offset)) = value; | 213 | struct ds_context *context; |
214 | |||
215 | spin_lock(&ds_lock); | ||
216 | |||
217 | context = (task ? task->thread.ds_ctx : this_system_context); | ||
218 | if (context) | ||
219 | context->count++; | ||
220 | |||
221 | spin_unlock(&ds_lock); | ||
222 | |||
223 | return context; | ||
129 | } | 224 | } |
130 | static inline unsigned long get_bts_absolute_maximum(char *base) | 225 | |
226 | /* | ||
227 | * Same as ds_get_context, but allocates the context and it's DS | ||
228 | * structure, if necessary; returns NULL; if out of memory. | ||
229 | * | ||
230 | * pre: requires ds_lock to be held | ||
231 | */ | ||
232 | static inline struct ds_context *ds_alloc_context(struct task_struct *task) | ||
131 | { | 233 | { |
132 | return *(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset); | 234 | struct ds_context **p_context = |
235 | (task ? &task->thread.ds_ctx : &this_system_context); | ||
236 | struct ds_context *context = *p_context; | ||
237 | |||
238 | if (!context) { | ||
239 | context = kzalloc(sizeof(*context), GFP_KERNEL); | ||
240 | |||
241 | if (!context) | ||
242 | return NULL; | ||
243 | |||
244 | context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); | ||
245 | if (!context->ds) { | ||
246 | kfree(context); | ||
247 | return NULL; | ||
248 | } | ||
249 | |||
250 | *p_context = context; | ||
251 | |||
252 | context->this = p_context; | ||
253 | context->task = task; | ||
254 | |||
255 | if (task) | ||
256 | set_tsk_thread_flag(task, TIF_DS_AREA_MSR); | ||
257 | |||
258 | if (!task || (task == current)) | ||
259 | wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); | ||
260 | |||
261 | get_tracer(task); | ||
262 | } | ||
263 | |||
264 | context->count++; | ||
265 | |||
266 | return context; | ||
133 | } | 267 | } |
134 | static inline void set_bts_absolute_maximum(char *base, unsigned long value) | 268 | |
269 | /* | ||
270 | * Decreases the use count of the parameter context, if not NULL. | ||
271 | * Deallocates the context, if the use count reaches zero. | ||
272 | */ | ||
273 | static inline void ds_put_context(struct ds_context *context) | ||
135 | { | 274 | { |
136 | (*(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset)) = value; | 275 | if (!context) |
276 | return; | ||
277 | |||
278 | spin_lock(&ds_lock); | ||
279 | |||
280 | if (--context->count) | ||
281 | goto out; | ||
282 | |||
283 | *(context->this) = NULL; | ||
284 | |||
285 | if (context->task) | ||
286 | clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR); | ||
287 | |||
288 | if (!context->task || (context->task == current)) | ||
289 | wrmsrl(MSR_IA32_DS_AREA, 0); | ||
290 | |||
291 | put_tracer(context->task); | ||
292 | |||
293 | /* free any leftover buffers from tracers that did not | ||
294 | * deallocate them properly. */ | ||
295 | kfree(context->buffer[ds_bts]); | ||
296 | kfree(context->buffer[ds_pebs]); | ||
297 | kfree(context->ds); | ||
298 | kfree(context); | ||
299 | out: | ||
300 | spin_unlock(&ds_lock); | ||
137 | } | 301 | } |
138 | static inline unsigned long get_bts_interrupt_threshold(char *base) | 302 | |
303 | |||
304 | /* | ||
305 | * Handle a buffer overflow | ||
306 | * | ||
307 | * task: the task whose buffers are overflowing; | ||
308 | * NULL for a buffer overflow on the current cpu | ||
309 | * context: the ds context | ||
310 | * qual: the buffer type | ||
311 | */ | ||
312 | static void ds_overflow(struct task_struct *task, struct ds_context *context, | ||
313 | enum ds_qualifier qual) | ||
139 | { | 314 | { |
140 | return *(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset); | 315 | if (!context) |
316 | return; | ||
317 | |||
318 | if (context->callback[qual]) | ||
319 | (*context->callback[qual])(task); | ||
320 | |||
321 | /* todo: do some more overflow handling */ | ||
141 | } | 322 | } |
142 | static inline void set_bts_interrupt_threshold(char *base, unsigned long value) | 323 | |
324 | |||
325 | /* | ||
326 | * Allocate a non-pageable buffer of the parameter size. | ||
327 | * Checks the memory and the locked memory rlimit. | ||
328 | * | ||
329 | * Returns the buffer, if successful; | ||
330 | * NULL, if out of memory or rlimit exceeded. | ||
331 | * | ||
332 | * size: the requested buffer size in bytes | ||
333 | * pages (out): if not NULL, contains the number of pages reserved | ||
334 | */ | ||
335 | static inline void *ds_allocate_buffer(size_t size, unsigned int *pages) | ||
143 | { | 336 | { |
144 | (*(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset)) = value; | 337 | unsigned long rlim, vm, pgsz; |
338 | void *buffer; | ||
339 | |||
340 | pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
341 | |||
342 | rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; | ||
343 | vm = current->mm->total_vm + pgsz; | ||
344 | if (rlim < vm) | ||
345 | return NULL; | ||
346 | |||
347 | rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; | ||
348 | vm = current->mm->locked_vm + pgsz; | ||
349 | if (rlim < vm) | ||
350 | return NULL; | ||
351 | |||
352 | buffer = kzalloc(size, GFP_KERNEL); | ||
353 | if (!buffer) | ||
354 | return NULL; | ||
355 | |||
356 | current->mm->total_vm += pgsz; | ||
357 | current->mm->locked_vm += pgsz; | ||
358 | |||
359 | if (pages) | ||
360 | *pages = pgsz; | ||
361 | |||
362 | return buffer; | ||
145 | } | 363 | } |
146 | static inline unsigned long get_from_ip(char *base) | 364 | |
365 | static int ds_request(struct task_struct *task, void *base, size_t size, | ||
366 | ds_ovfl_callback_t ovfl, enum ds_qualifier qual) | ||
147 | { | 367 | { |
148 | return *(unsigned long *)(base + ds_cfg.from_ip.offset); | 368 | struct ds_context *context; |
369 | unsigned long buffer, adj; | ||
370 | const unsigned long alignment = (1 << 3); | ||
371 | int error = 0; | ||
372 | |||
373 | if (!ds_cfg.sizeof_ds) | ||
374 | return -EOPNOTSUPP; | ||
375 | |||
376 | /* we require some space to do alignment adjustments below */ | ||
377 | if (size < (alignment + ds_cfg.sizeof_rec[qual])) | ||
378 | return -EINVAL; | ||
379 | |||
380 | /* buffer overflow notification is not yet implemented */ | ||
381 | if (ovfl) | ||
382 | return -EOPNOTSUPP; | ||
383 | |||
384 | |||
385 | spin_lock(&ds_lock); | ||
386 | |||
387 | if (!check_tracer(task)) | ||
388 | return -EPERM; | ||
389 | |||
390 | error = -ENOMEM; | ||
391 | context = ds_alloc_context(task); | ||
392 | if (!context) | ||
393 | goto out_unlock; | ||
394 | |||
395 | error = -EALREADY; | ||
396 | if (context->owner[qual] == current) | ||
397 | goto out_unlock; | ||
398 | error = -EPERM; | ||
399 | if (context->owner[qual] != NULL) | ||
400 | goto out_unlock; | ||
401 | context->owner[qual] = current; | ||
402 | |||
403 | spin_unlock(&ds_lock); | ||
404 | |||
405 | |||
406 | error = -ENOMEM; | ||
407 | if (!base) { | ||
408 | base = ds_allocate_buffer(size, &context->pages[qual]); | ||
409 | if (!base) | ||
410 | goto out_release; | ||
411 | |||
412 | context->buffer[qual] = base; | ||
413 | } | ||
414 | error = 0; | ||
415 | |||
416 | context->callback[qual] = ovfl; | ||
417 | |||
418 | /* adjust the buffer address and size to meet alignment | ||
419 | * constraints: | ||
420 | * - buffer is double-word aligned | ||
421 | * - size is multiple of record size | ||
422 | * | ||
423 | * We checked the size at the very beginning; we have enough | ||
424 | * space to do the adjustment. | ||
425 | */ | ||
426 | buffer = (unsigned long)base; | ||
427 | |||
428 | adj = ALIGN(buffer, alignment) - buffer; | ||
429 | buffer += adj; | ||
430 | size -= adj; | ||
431 | |||
432 | size /= ds_cfg.sizeof_rec[qual]; | ||
433 | size *= ds_cfg.sizeof_rec[qual]; | ||
434 | |||
435 | ds_set(context->ds, qual, ds_buffer_base, buffer); | ||
436 | ds_set(context->ds, qual, ds_index, buffer); | ||
437 | ds_set(context->ds, qual, ds_absolute_maximum, buffer + size); | ||
438 | |||
439 | if (ovfl) { | ||
440 | /* todo: select a suitable interrupt threshold */ | ||
441 | } else | ||
442 | ds_set(context->ds, qual, | ||
443 | ds_interrupt_threshold, buffer + size + 1); | ||
444 | |||
445 | /* we keep the context until ds_release */ | ||
446 | return error; | ||
447 | |||
448 | out_release: | ||
449 | context->owner[qual] = NULL; | ||
450 | ds_put_context(context); | ||
451 | return error; | ||
452 | |||
453 | out_unlock: | ||
454 | spin_unlock(&ds_lock); | ||
455 | ds_put_context(context); | ||
456 | return error; | ||
149 | } | 457 | } |
150 | static inline void set_from_ip(char *base, unsigned long value) | 458 | |
459 | int ds_request_bts(struct task_struct *task, void *base, size_t size, | ||
460 | ds_ovfl_callback_t ovfl) | ||
151 | { | 461 | { |
152 | (*(unsigned long *)(base + ds_cfg.from_ip.offset)) = value; | 462 | return ds_request(task, base, size, ovfl, ds_bts); |
153 | } | 463 | } |
154 | static inline unsigned long get_to_ip(char *base) | 464 | |
465 | int ds_request_pebs(struct task_struct *task, void *base, size_t size, | ||
466 | ds_ovfl_callback_t ovfl) | ||
155 | { | 467 | { |
156 | return *(unsigned long *)(base + ds_cfg.to_ip.offset); | 468 | return ds_request(task, base, size, ovfl, ds_pebs); |
157 | } | 469 | } |
158 | static inline void set_to_ip(char *base, unsigned long value) | 470 | |
471 | static int ds_release(struct task_struct *task, enum ds_qualifier qual) | ||
159 | { | 472 | { |
160 | (*(unsigned long *)(base + ds_cfg.to_ip.offset)) = value; | 473 | struct ds_context *context; |
474 | int error; | ||
475 | |||
476 | context = ds_get_context(task); | ||
477 | error = ds_validate_access(context, qual); | ||
478 | if (error < 0) | ||
479 | goto out; | ||
480 | |||
481 | kfree(context->buffer[qual]); | ||
482 | context->buffer[qual] = NULL; | ||
483 | |||
484 | current->mm->total_vm -= context->pages[qual]; | ||
485 | current->mm->locked_vm -= context->pages[qual]; | ||
486 | context->pages[qual] = 0; | ||
487 | context->owner[qual] = NULL; | ||
488 | |||
489 | /* | ||
490 | * we put the context twice: | ||
491 | * once for the ds_get_context | ||
492 | * once for the corresponding ds_request | ||
493 | */ | ||
494 | ds_put_context(context); | ||
495 | out: | ||
496 | ds_put_context(context); | ||
497 | return error; | ||
161 | } | 498 | } |
162 | static inline unsigned char get_info_type(char *base) | 499 | |
500 | int ds_release_bts(struct task_struct *task) | ||
163 | { | 501 | { |
164 | return *(unsigned char *)(base + ds_cfg.info_type.offset); | 502 | return ds_release(task, ds_bts); |
165 | } | 503 | } |
166 | static inline void set_info_type(char *base, unsigned char value) | 504 | |
505 | int ds_release_pebs(struct task_struct *task) | ||
167 | { | 506 | { |
168 | (*(unsigned char *)(base + ds_cfg.info_type.offset)) = value; | 507 | return ds_release(task, ds_pebs); |
169 | } | 508 | } |
170 | static inline unsigned long get_info_data(char *base) | 509 | |
510 | static int ds_get_index(struct task_struct *task, size_t *pos, | ||
511 | enum ds_qualifier qual) | ||
171 | { | 512 | { |
172 | return *(unsigned long *)(base + ds_cfg.info_data.offset); | 513 | struct ds_context *context; |
514 | unsigned long base, index; | ||
515 | int error; | ||
516 | |||
517 | context = ds_get_context(task); | ||
518 | error = ds_validate_access(context, qual); | ||
519 | if (error < 0) | ||
520 | goto out; | ||
521 | |||
522 | base = ds_get(context->ds, qual, ds_buffer_base); | ||
523 | index = ds_get(context->ds, qual, ds_index); | ||
524 | |||
525 | error = ((index - base) / ds_cfg.sizeof_rec[qual]); | ||
526 | if (pos) | ||
527 | *pos = error; | ||
528 | out: | ||
529 | ds_put_context(context); | ||
530 | return error; | ||
173 | } | 531 | } |
174 | static inline void set_info_data(char *base, unsigned long value) | 532 | |
533 | int ds_get_bts_index(struct task_struct *task, size_t *pos) | ||
175 | { | 534 | { |
176 | (*(unsigned long *)(base + ds_cfg.info_data.offset)) = value; | 535 | return ds_get_index(task, pos, ds_bts); |
177 | } | 536 | } |
178 | 537 | ||
538 | int ds_get_pebs_index(struct task_struct *task, size_t *pos) | ||
539 | { | ||
540 | return ds_get_index(task, pos, ds_pebs); | ||
541 | } | ||
179 | 542 | ||
180 | int ds_allocate(void **dsp, size_t bts_size_in_bytes) | 543 | static int ds_get_end(struct task_struct *task, size_t *pos, |
544 | enum ds_qualifier qual) | ||
181 | { | 545 | { |
182 | size_t bts_size_in_records; | 546 | struct ds_context *context; |
183 | unsigned long bts; | 547 | unsigned long base, end; |
184 | void *ds; | 548 | int error; |
549 | |||
550 | context = ds_get_context(task); | ||
551 | error = ds_validate_access(context, qual); | ||
552 | if (error < 0) | ||
553 | goto out; | ||
554 | |||
555 | base = ds_get(context->ds, qual, ds_buffer_base); | ||
556 | end = ds_get(context->ds, qual, ds_absolute_maximum); | ||
557 | |||
558 | error = ((end - base) / ds_cfg.sizeof_rec[qual]); | ||
559 | if (pos) | ||
560 | *pos = error; | ||
561 | out: | ||
562 | ds_put_context(context); | ||
563 | return error; | ||
564 | } | ||
185 | 565 | ||
186 | if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) | 566 | int ds_get_bts_end(struct task_struct *task, size_t *pos) |
187 | return -EOPNOTSUPP; | 567 | { |
568 | return ds_get_end(task, pos, ds_bts); | ||
569 | } | ||
188 | 570 | ||
189 | if (bts_size_in_bytes < 0) | 571 | int ds_get_pebs_end(struct task_struct *task, size_t *pos) |
190 | return -EINVAL; | 572 | { |
573 | return ds_get_end(task, pos, ds_pebs); | ||
574 | } | ||
191 | 575 | ||
192 | bts_size_in_records = | 576 | static int ds_access(struct task_struct *task, size_t index, |
193 | bts_size_in_bytes / ds_cfg.sizeof_bts; | 577 | const void **record, enum ds_qualifier qual) |
194 | bts_size_in_bytes = | 578 | { |
195 | bts_size_in_records * ds_cfg.sizeof_bts; | 579 | struct ds_context *context; |
580 | unsigned long base, idx; | ||
581 | int error; | ||
196 | 582 | ||
197 | if (bts_size_in_bytes <= 0) | 583 | if (!record) |
198 | return -EINVAL; | 584 | return -EINVAL; |
199 | 585 | ||
200 | bts = (unsigned long)kzalloc(bts_size_in_bytes, GFP_KERNEL); | 586 | context = ds_get_context(task); |
201 | 587 | error = ds_validate_access(context, qual); | |
202 | if (!bts) | 588 | if (error < 0) |
203 | return -ENOMEM; | 589 | goto out; |
204 | 590 | ||
205 | ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); | 591 | base = ds_get(context->ds, qual, ds_buffer_base); |
592 | idx = base + (index * ds_cfg.sizeof_rec[qual]); | ||
206 | 593 | ||
207 | if (!ds) { | 594 | error = -EINVAL; |
208 | kfree((void *)bts); | 595 | if (idx > ds_get(context->ds, qual, ds_absolute_maximum)) |
209 | return -ENOMEM; | 596 | goto out; |
210 | } | ||
211 | |||
212 | set_bts_buffer_base(ds, bts); | ||
213 | set_bts_index(ds, bts); | ||
214 | set_bts_absolute_maximum(ds, bts + bts_size_in_bytes); | ||
215 | set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1); | ||
216 | 597 | ||
217 | *dsp = ds; | 598 | *record = (const void *)idx; |
218 | return 0; | 599 | error = ds_cfg.sizeof_rec[qual]; |
600 | out: | ||
601 | ds_put_context(context); | ||
602 | return error; | ||
219 | } | 603 | } |
220 | 604 | ||
221 | int ds_free(void **dsp) | 605 | int ds_access_bts(struct task_struct *task, size_t index, const void **record) |
222 | { | 606 | { |
223 | if (*dsp) { | 607 | return ds_access(task, index, record, ds_bts); |
224 | kfree((void *)get_bts_buffer_base(*dsp)); | ||
225 | kfree(*dsp); | ||
226 | *dsp = NULL; | ||
227 | } | ||
228 | return 0; | ||
229 | } | 608 | } |
230 | 609 | ||
231 | int ds_get_bts_size(void *ds) | 610 | int ds_access_pebs(struct task_struct *task, size_t index, const void **record) |
232 | { | 611 | { |
233 | int size_in_bytes; | 612 | return ds_access(task, index, record, ds_pebs); |
234 | |||
235 | if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) | ||
236 | return -EOPNOTSUPP; | ||
237 | |||
238 | if (!ds) | ||
239 | return 0; | ||
240 | |||
241 | size_in_bytes = | ||
242 | get_bts_absolute_maximum(ds) - | ||
243 | get_bts_buffer_base(ds); | ||
244 | return size_in_bytes; | ||
245 | } | 613 | } |
246 | 614 | ||
247 | int ds_get_bts_end(void *ds) | 615 | static int ds_write(struct task_struct *task, const void *record, size_t size, |
616 | enum ds_qualifier qual, int force) | ||
248 | { | 617 | { |
249 | int size_in_bytes = ds_get_bts_size(ds); | 618 | struct ds_context *context; |
250 | 619 | int error; | |
251 | if (size_in_bytes <= 0) | ||
252 | return size_in_bytes; | ||
253 | 620 | ||
254 | return size_in_bytes / ds_cfg.sizeof_bts; | 621 | if (!record) |
255 | } | 622 | return -EINVAL; |
256 | 623 | ||
257 | int ds_get_bts_index(void *ds) | 624 | error = -EPERM; |
258 | { | 625 | context = ds_get_context(task); |
259 | int index_offset_in_bytes; | 626 | if (!context) |
627 | goto out; | ||
260 | 628 | ||
261 | if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) | 629 | if (!force) { |
262 | return -EOPNOTSUPP; | 630 | error = ds_validate_access(context, qual); |
631 | if (error < 0) | ||
632 | goto out; | ||
633 | } | ||
263 | 634 | ||
264 | index_offset_in_bytes = | 635 | error = 0; |
265 | get_bts_index(ds) - | 636 | while (size) { |
266 | get_bts_buffer_base(ds); | 637 | unsigned long base, index, end, write_end, int_th; |
638 | unsigned long write_size, adj_write_size; | ||
639 | |||
640 | /* | ||
641 | * write as much as possible without producing an | ||
642 | * overflow interrupt. | ||
643 | * | ||
644 | * interrupt_threshold must either be | ||
645 | * - bigger than absolute_maximum or | ||
646 | * - point to a record between buffer_base and absolute_maximum | ||
647 | * | ||
648 | * index points to a valid record. | ||
649 | */ | ||
650 | base = ds_get(context->ds, qual, ds_buffer_base); | ||
651 | index = ds_get(context->ds, qual, ds_index); | ||
652 | end = ds_get(context->ds, qual, ds_absolute_maximum); | ||
653 | int_th = ds_get(context->ds, qual, ds_interrupt_threshold); | ||
654 | |||
655 | write_end = min(end, int_th); | ||
656 | |||
657 | /* if we are already beyond the interrupt threshold, | ||
658 | * we fill the entire buffer */ | ||
659 | if (write_end <= index) | ||
660 | write_end = end; | ||
661 | |||
662 | if (write_end <= index) | ||
663 | goto out; | ||
664 | |||
665 | write_size = min((unsigned long) size, write_end - index); | ||
666 | memcpy((void *)index, record, write_size); | ||
667 | |||
668 | record = (const char *)record + write_size; | ||
669 | size -= write_size; | ||
670 | error += write_size; | ||
671 | |||
672 | adj_write_size = write_size / ds_cfg.sizeof_rec[qual]; | ||
673 | adj_write_size *= ds_cfg.sizeof_rec[qual]; | ||
674 | |||
675 | /* zero out trailing bytes */ | ||
676 | memset((char *)index + write_size, 0, | ||
677 | adj_write_size - write_size); | ||
678 | index += adj_write_size; | ||
679 | |||
680 | if (index >= end) | ||
681 | index = base; | ||
682 | ds_set(context->ds, qual, ds_index, index); | ||
683 | |||
684 | if (index >= int_th) | ||
685 | ds_overflow(task, context, qual); | ||
686 | } | ||
267 | 687 | ||
268 | return index_offset_in_bytes / ds_cfg.sizeof_bts; | 688 | out: |
689 | ds_put_context(context); | ||
690 | return error; | ||
269 | } | 691 | } |
270 | 692 | ||
271 | int ds_set_overflow(void *ds, int method) | 693 | int ds_write_bts(struct task_struct *task, const void *record, size_t size) |
272 | { | 694 | { |
273 | switch (method) { | 695 | return ds_write(task, record, size, ds_bts, /* force = */ 0); |
274 | case DS_O_SIGNAL: | ||
275 | return -EOPNOTSUPP; | ||
276 | case DS_O_WRAP: | ||
277 | return 0; | ||
278 | default: | ||
279 | return -EINVAL; | ||
280 | } | ||
281 | } | 696 | } |
282 | 697 | ||
283 | int ds_get_overflow(void *ds) | 698 | int ds_write_pebs(struct task_struct *task, const void *record, size_t size) |
284 | { | 699 | { |
285 | return DS_O_WRAP; | 700 | return ds_write(task, record, size, ds_pebs, /* force = */ 0); |
286 | } | 701 | } |
287 | 702 | ||
288 | int ds_clear(void *ds) | 703 | int ds_unchecked_write_bts(struct task_struct *task, |
704 | const void *record, size_t size) | ||
289 | { | 705 | { |
290 | int bts_size = ds_get_bts_size(ds); | 706 | return ds_write(task, record, size, ds_bts, /* force = */ 1); |
291 | unsigned long bts_base; | ||
292 | |||
293 | if (bts_size <= 0) | ||
294 | return bts_size; | ||
295 | |||
296 | bts_base = get_bts_buffer_base(ds); | ||
297 | memset((void *)bts_base, 0, bts_size); | ||
298 | |||
299 | set_bts_index(ds, bts_base); | ||
300 | return 0; | ||
301 | } | 707 | } |
302 | 708 | ||
303 | int ds_read_bts(void *ds, int index, struct bts_struct *out) | 709 | int ds_unchecked_write_pebs(struct task_struct *task, |
710 | const void *record, size_t size) | ||
304 | { | 711 | { |
305 | void *bts; | 712 | return ds_write(task, record, size, ds_pebs, /* force = */ 1); |
713 | } | ||
306 | 714 | ||
307 | if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) | 715 | static int ds_reset_or_clear(struct task_struct *task, |
308 | return -EOPNOTSUPP; | 716 | enum ds_qualifier qual, int clear) |
717 | { | ||
718 | struct ds_context *context; | ||
719 | unsigned long base, end; | ||
720 | int error; | ||
309 | 721 | ||
310 | if (index < 0) | 722 | context = ds_get_context(task); |
311 | return -EINVAL; | 723 | error = ds_validate_access(context, qual); |
724 | if (error < 0) | ||
725 | goto out; | ||
312 | 726 | ||
313 | if (index >= ds_get_bts_size(ds)) | 727 | base = ds_get(context->ds, qual, ds_buffer_base); |
314 | return -EINVAL; | 728 | end = ds_get(context->ds, qual, ds_absolute_maximum); |
315 | 729 | ||
316 | bts = (void *)(get_bts_buffer_base(ds) + (index * ds_cfg.sizeof_bts)); | 730 | if (clear) |
731 | memset((void *)base, 0, end - base); | ||
317 | 732 | ||
318 | memset(out, 0, sizeof(*out)); | 733 | ds_set(context->ds, qual, ds_index, base); |
319 | if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) { | ||
320 | out->qualifier = get_info_type(bts); | ||
321 | out->variant.jiffies = get_info_data(bts); | ||
322 | } else { | ||
323 | out->qualifier = BTS_BRANCH; | ||
324 | out->variant.lbr.from_ip = get_from_ip(bts); | ||
325 | out->variant.lbr.to_ip = get_to_ip(bts); | ||
326 | } | ||
327 | 734 | ||
328 | return sizeof(*out);; | 735 | error = 0; |
736 | out: | ||
737 | ds_put_context(context); | ||
738 | return error; | ||
329 | } | 739 | } |
330 | 740 | ||
331 | int ds_write_bts(void *ds, const struct bts_struct *in) | 741 | int ds_reset_bts(struct task_struct *task) |
332 | { | 742 | { |
333 | unsigned long bts; | 743 | return ds_reset_or_clear(task, ds_bts, /* clear = */ 0); |
334 | 744 | } | |
335 | if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) | ||
336 | return -EOPNOTSUPP; | ||
337 | |||
338 | if (ds_get_bts_size(ds) <= 0) | ||
339 | return -ENXIO; | ||
340 | 745 | ||
341 | bts = get_bts_index(ds); | 746 | int ds_reset_pebs(struct task_struct *task) |
747 | { | ||
748 | return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0); | ||
749 | } | ||
342 | 750 | ||
343 | memset((void *)bts, 0, ds_cfg.sizeof_bts); | 751 | int ds_clear_bts(struct task_struct *task) |
344 | switch (in->qualifier) { | 752 | { |
345 | case BTS_INVALID: | 753 | return ds_reset_or_clear(task, ds_bts, /* clear = */ 1); |
346 | break; | 754 | } |
347 | 755 | ||
348 | case BTS_BRANCH: | 756 | int ds_clear_pebs(struct task_struct *task) |
349 | set_from_ip((void *)bts, in->variant.lbr.from_ip); | 757 | { |
350 | set_to_ip((void *)bts, in->variant.lbr.to_ip); | 758 | return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1); |
351 | break; | 759 | } |
352 | 760 | ||
353 | case BTS_TASK_ARRIVES: | 761 | int ds_get_pebs_reset(struct task_struct *task, u64 *value) |
354 | case BTS_TASK_DEPARTS: | 762 | { |
355 | set_from_ip((void *)bts, BTS_ESCAPE_ADDRESS); | 763 | struct ds_context *context; |
356 | set_info_type((void *)bts, in->qualifier); | 764 | int error; |
357 | set_info_data((void *)bts, in->variant.jiffies); | ||
358 | break; | ||
359 | 765 | ||
360 | default: | 766 | if (!value) |
361 | return -EINVAL; | 767 | return -EINVAL; |
362 | } | ||
363 | 768 | ||
364 | bts = bts + ds_cfg.sizeof_bts; | 769 | context = ds_get_context(task); |
365 | if (bts >= get_bts_absolute_maximum(ds)) | 770 | error = ds_validate_access(context, ds_pebs); |
366 | bts = get_bts_buffer_base(ds); | 771 | if (error < 0) |
367 | set_bts_index(ds, bts); | 772 | goto out; |
368 | 773 | ||
369 | return ds_cfg.sizeof_bts; | 774 | *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)); |
775 | |||
776 | error = 0; | ||
777 | out: | ||
778 | ds_put_context(context); | ||
779 | return error; | ||
370 | } | 780 | } |
371 | 781 | ||
372 | unsigned long ds_debugctl_mask(void) | 782 | int ds_set_pebs_reset(struct task_struct *task, u64 value) |
373 | { | 783 | { |
374 | return ds_cfg.debugctl_mask; | 784 | struct ds_context *context; |
375 | } | 785 | int error; |
376 | 786 | ||
377 | #ifdef __i386__ | 787 | context = ds_get_context(task); |
378 | static const struct ds_configuration ds_cfg_netburst = { | 788 | error = ds_validate_access(context, ds_pebs); |
379 | .sizeof_ds = 9 * 4, | 789 | if (error < 0) |
380 | .bts_buffer_base = { 0, 4 }, | 790 | goto out; |
381 | .bts_index = { 4, 4 }, | ||
382 | .bts_absolute_maximum = { 8, 4 }, | ||
383 | .bts_interrupt_threshold = { 12, 4 }, | ||
384 | .sizeof_bts = 3 * 4, | ||
385 | .from_ip = { 0, 4 }, | ||
386 | .to_ip = { 4, 4 }, | ||
387 | .info_type = { 4, 1 }, | ||
388 | .info_data = { 8, 4 }, | ||
389 | .debugctl_mask = (1<<2)|(1<<3) | ||
390 | }; | ||
391 | 791 | ||
392 | static const struct ds_configuration ds_cfg_pentium_m = { | 792 | *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value; |
393 | .sizeof_ds = 9 * 4, | 793 | |
394 | .bts_buffer_base = { 0, 4 }, | 794 | error = 0; |
395 | .bts_index = { 4, 4 }, | 795 | out: |
396 | .bts_absolute_maximum = { 8, 4 }, | 796 | ds_put_context(context); |
397 | .bts_interrupt_threshold = { 12, 4 }, | 797 | return error; |
398 | .sizeof_bts = 3 * 4, | 798 | } |
399 | .from_ip = { 0, 4 }, | 799 | |
400 | .to_ip = { 4, 4 }, | 800 | static const struct ds_configuration ds_cfg_var = { |
401 | .info_type = { 4, 1 }, | 801 | .sizeof_ds = sizeof(long) * 12, |
402 | .info_data = { 8, 4 }, | 802 | .sizeof_field = sizeof(long), |
403 | .debugctl_mask = (1<<6)|(1<<7) | 803 | .sizeof_rec[ds_bts] = sizeof(long) * 3, |
804 | .sizeof_rec[ds_pebs] = sizeof(long) * 10 | ||
404 | }; | 805 | }; |
405 | #endif /* _i386_ */ | 806 | static const struct ds_configuration ds_cfg_64 = { |
406 | 807 | .sizeof_ds = 8 * 12, | |
407 | static const struct ds_configuration ds_cfg_core2 = { | 808 | .sizeof_field = 8, |
408 | .sizeof_ds = 9 * 8, | 809 | .sizeof_rec[ds_bts] = 8 * 3, |
409 | .bts_buffer_base = { 0, 8 }, | 810 | .sizeof_rec[ds_pebs] = 8 * 10 |
410 | .bts_index = { 8, 8 }, | ||
411 | .bts_absolute_maximum = { 16, 8 }, | ||
412 | .bts_interrupt_threshold = { 24, 8 }, | ||
413 | .sizeof_bts = 3 * 8, | ||
414 | .from_ip = { 0, 8 }, | ||
415 | .to_ip = { 8, 8 }, | ||
416 | .info_type = { 8, 1 }, | ||
417 | .info_data = { 16, 8 }, | ||
418 | .debugctl_mask = (1<<6)|(1<<7)|(1<<9) | ||
419 | }; | 811 | }; |
420 | 812 | ||
421 | static inline void | 813 | static inline void |
@@ -429,14 +821,13 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) | |||
429 | switch (c->x86) { | 821 | switch (c->x86) { |
430 | case 0x6: | 822 | case 0x6: |
431 | switch (c->x86_model) { | 823 | switch (c->x86_model) { |
432 | #ifdef __i386__ | ||
433 | case 0xD: | 824 | case 0xD: |
434 | case 0xE: /* Pentium M */ | 825 | case 0xE: /* Pentium M */ |
435 | ds_configure(&ds_cfg_pentium_m); | 826 | ds_configure(&ds_cfg_var); |
436 | break; | 827 | break; |
437 | #endif /* _i386_ */ | ||
438 | case 0xF: /* Core2 */ | 828 | case 0xF: /* Core2 */ |
439 | ds_configure(&ds_cfg_core2); | 829 | case 0x1C: /* Atom */ |
830 | ds_configure(&ds_cfg_64); | ||
440 | break; | 831 | break; |
441 | default: | 832 | default: |
442 | /* sorry, don't know about them */ | 833 | /* sorry, don't know about them */ |
@@ -445,13 +836,11 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) | |||
445 | break; | 836 | break; |
446 | case 0xF: | 837 | case 0xF: |
447 | switch (c->x86_model) { | 838 | switch (c->x86_model) { |
448 | #ifdef __i386__ | ||
449 | case 0x0: | 839 | case 0x0: |
450 | case 0x1: | 840 | case 0x1: |
451 | case 0x2: /* Netburst */ | 841 | case 0x2: /* Netburst */ |
452 | ds_configure(&ds_cfg_netburst); | 842 | ds_configure(&ds_cfg_var); |
453 | break; | 843 | break; |
454 | #endif /* _i386_ */ | ||
455 | default: | 844 | default: |
456 | /* sorry, don't know about them */ | 845 | /* sorry, don't know about them */ |
457 | break; | 846 | break; |
@@ -462,3 +851,14 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) | |||
462 | break; | 851 | break; |
463 | } | 852 | } |
464 | } | 853 | } |
854 | |||
855 | void ds_free(struct ds_context *context) | ||
856 | { | ||
857 | /* This is called when the task owning the parameter context | ||
858 | * is dying. There should not be any user of that context left | ||
859 | * to disturb us, anymore. */ | ||
860 | unsigned long leftovers = context->count; | ||
861 | while (leftovers--) | ||
862 | ds_put_context(context); | ||
863 | } | ||
864 | #endif /* CONFIG_X86_DS */ | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 9af89078f7bb..66e48aa2dd1b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1203,7 +1203,7 @@ static int __init parse_memmap_opt(char *p) | |||
1203 | if (!p) | 1203 | if (!p) |
1204 | return -EINVAL; | 1204 | return -EINVAL; |
1205 | 1205 | ||
1206 | if (!strcmp(p, "exactmap")) { | 1206 | if (!strncmp(p, "exactmap", 8)) { |
1207 | #ifdef CONFIG_CRASH_DUMP | 1207 | #ifdef CONFIG_CRASH_DUMP |
1208 | /* | 1208 | /* |
1209 | * If we are doing a crash dump, we still need to know | 1209 | * If we are doing a crash dump, we still need to know |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 06cc8d4254b1..945a31cdd81f 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -414,9 +414,11 @@ void __init efi_init(void) | |||
414 | if (memmap.map == NULL) | 414 | if (memmap.map == NULL) |
415 | printk(KERN_ERR "Could not map the EFI memory map!\n"); | 415 | printk(KERN_ERR "Could not map the EFI memory map!\n"); |
416 | memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); | 416 | memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); |
417 | |||
417 | if (memmap.desc_size != sizeof(efi_memory_desc_t)) | 418 | if (memmap.desc_size != sizeof(efi_memory_desc_t)) |
418 | printk(KERN_WARNING "Kernel-defined memdesc" | 419 | printk(KERN_WARNING |
419 | "doesn't match the one from EFI!\n"); | 420 | "Kernel-defined memdesc doesn't match the one from EFI!\n"); |
421 | |||
420 | if (add_efi_memmap) | 422 | if (add_efi_memmap) |
421 | do_add_efi_memmap(); | 423 | do_add_efi_memmap(); |
422 | 424 | ||
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c index 4b63c8e1f13b..5cab48ee61a4 100644 --- a/arch/x86/kernel/efi_32.c +++ b/arch/x86/kernel/efi_32.c | |||
@@ -53,7 +53,7 @@ void efi_call_phys_prelog(void) | |||
53 | * directory. If I have PAE, I just need to duplicate one entry in | 53 | * directory. If I have PAE, I just need to duplicate one entry in |
54 | * page directory. | 54 | * page directory. |
55 | */ | 55 | */ |
56 | cr4 = read_cr4(); | 56 | cr4 = read_cr4_safe(); |
57 | 57 | ||
58 | if (cr4 & X86_CR4_PAE) { | 58 | if (cr4 & X86_CR4_PAE) { |
59 | efi_bak_pg_dir_pointer[0].pgd = | 59 | efi_bak_pg_dir_pointer[0].pgd = |
@@ -91,7 +91,7 @@ void efi_call_phys_epilog(void) | |||
91 | gdt_descr.size = GDT_SIZE - 1; | 91 | gdt_descr.size = GDT_SIZE - 1; |
92 | load_gdt(&gdt_descr); | 92 | load_gdt(&gdt_descr); |
93 | 93 | ||
94 | cr4 = read_cr4(); | 94 | cr4 = read_cr4_safe(); |
95 | 95 | ||
96 | if (cr4 & X86_CR4_PAE) { | 96 | if (cr4 & X86_CR4_PAE) { |
97 | swapper_pg_dir[pgd_index(0)].pgd = | 97 | swapper_pg_dir[pgd_index(0)].pgd = |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 89434d439605..cf3a0b2d0059 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -275,9 +275,9 @@ ENTRY(native_usergs_sysret64) | |||
275 | ENTRY(ret_from_fork) | 275 | ENTRY(ret_from_fork) |
276 | CFI_DEFAULT_STACK | 276 | CFI_DEFAULT_STACK |
277 | push kernel_eflags(%rip) | 277 | push kernel_eflags(%rip) |
278 | CFI_ADJUST_CFA_OFFSET 4 | 278 | CFI_ADJUST_CFA_OFFSET 8 |
279 | popf # reset kernel eflags | 279 | popf # reset kernel eflags |
280 | CFI_ADJUST_CFA_OFFSET -4 | 280 | CFI_ADJUST_CFA_OFFSET -8 |
281 | call schedule_tail | 281 | call schedule_tail |
282 | GET_THREAD_INFO(%rcx) | 282 | GET_THREAD_INFO(%rcx) |
283 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | 283 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 2cfcbded888a..bfa837cb16be 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -222,7 +222,7 @@ static __init void map_low_mmrs(void) | |||
222 | 222 | ||
223 | enum map_type {map_wb, map_uc}; | 223 | enum map_type {map_wb, map_uc}; |
224 | 224 | ||
225 | static void map_high(char *id, unsigned long base, int shift, enum map_type map_type) | 225 | static __init void map_high(char *id, unsigned long base, int shift, enum map_type map_type) |
226 | { | 226 | { |
227 | unsigned long bytes, paddr; | 227 | unsigned long bytes, paddr; |
228 | 228 | ||
@@ -293,7 +293,9 @@ static __init void uv_rtc_init(void) | |||
293 | sn_rtc_cycles_per_second = ticks_per_sec; | 293 | sn_rtc_cycles_per_second = ticks_per_sec; |
294 | } | 294 | } |
295 | 295 | ||
296 | static __init void uv_system_init(void) | 296 | static bool uv_system_inited; |
297 | |||
298 | void __init uv_system_init(void) | ||
297 | { | 299 | { |
298 | union uvh_si_addr_map_config_u m_n_config; | 300 | union uvh_si_addr_map_config_u m_n_config; |
299 | union uvh_node_id_u node_id; | 301 | union uvh_node_id_u node_id; |
@@ -383,6 +385,7 @@ static __init void uv_system_init(void) | |||
383 | map_mmr_high(max_pnode); | 385 | map_mmr_high(max_pnode); |
384 | map_config_high(max_pnode); | 386 | map_config_high(max_pnode); |
385 | map_mmioh_high(max_pnode); | 387 | map_mmioh_high(max_pnode); |
388 | uv_system_inited = true; | ||
386 | } | 389 | } |
387 | 390 | ||
388 | /* | 391 | /* |
@@ -391,8 +394,7 @@ static __init void uv_system_init(void) | |||
391 | */ | 394 | */ |
392 | void __cpuinit uv_cpu_init(void) | 395 | void __cpuinit uv_cpu_init(void) |
393 | { | 396 | { |
394 | if (!uv_node_to_blade) | 397 | BUG_ON(!uv_system_inited); |
395 | uv_system_init(); | ||
396 | 398 | ||
397 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | 399 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; |
398 | 400 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 1b318e903bf6..d16084f90649 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -88,6 +88,7 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
88 | BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); | 88 | BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); |
89 | BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == | 89 | BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == |
90 | (__START_KERNEL & PGDIR_MASK))); | 90 | (__START_KERNEL & PGDIR_MASK))); |
91 | BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); | ||
91 | 92 | ||
92 | /* clear bss before set_intr_gate with early_idt_handler */ | 93 | /* clear bss before set_intr_gate with early_idt_handler */ |
93 | clear_bss(); | 94 | clear_bss(); |
@@ -107,12 +108,11 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
107 | } | 108 | } |
108 | load_idt((const struct desc_ptr *)&idt_descr); | 109 | load_idt((const struct desc_ptr *)&idt_descr); |
109 | 110 | ||
110 | early_printk("Kernel alive\n"); | 111 | if (console_loglevel == 10) |
112 | early_printk("Kernel alive\n"); | ||
111 | 113 | ||
112 | x86_64_init_pda(); | 114 | x86_64_init_pda(); |
113 | 115 | ||
114 | early_printk("Kernel really alive\n"); | ||
115 | |||
116 | x86_64_start_reservations(real_mode_data); | 116 | x86_64_start_reservations(real_mode_data); |
117 | } | 117 | } |
118 | 118 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ad2b15a1334d..73deaffadd03 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -210,8 +210,8 @@ static void hpet_legacy_clockevent_register(void) | |||
210 | /* Calculate the min / max delta */ | 210 | /* Calculate the min / max delta */ |
211 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | 211 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, |
212 | &hpet_clockevent); | 212 | &hpet_clockevent); |
213 | hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, | 213 | /* 5 usec minimum reprogramming delta. */ |
214 | &hpet_clockevent); | 214 | hpet_clockevent.min_delta_ns = 5000; |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * Start hpet with the boot cpu mask and make it | 217 | * Start hpet with the boot cpu mask and make it |
@@ -270,15 +270,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode, | |||
270 | } | 270 | } |
271 | 271 | ||
272 | static int hpet_legacy_next_event(unsigned long delta, | 272 | static int hpet_legacy_next_event(unsigned long delta, |
273 | struct clock_event_device *evt) | 273 | struct clock_event_device *evt) |
274 | { | 274 | { |
275 | unsigned long cnt; | 275 | u32 cnt; |
276 | 276 | ||
277 | cnt = hpet_readl(HPET_COUNTER); | 277 | cnt = hpet_readl(HPET_COUNTER); |
278 | cnt += delta; | 278 | cnt += (u32) delta; |
279 | hpet_writel(cnt, HPET_T0_CMP); | 279 | hpet_writel(cnt, HPET_T0_CMP); |
280 | 280 | ||
281 | return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; | 281 | /* |
282 | * We need to read back the CMP register to make sure that | ||
283 | * what we wrote hit the chip before we compare it to the | ||
284 | * counter. | ||
285 | */ | ||
286 | WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); | ||
287 | |||
288 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | ||
282 | } | 289 | } |
283 | 290 | ||
284 | /* | 291 | /* |
@@ -359,6 +366,7 @@ static int hpet_clocksource_register(void) | |||
359 | int __init hpet_enable(void) | 366 | int __init hpet_enable(void) |
360 | { | 367 | { |
361 | unsigned long id; | 368 | unsigned long id; |
369 | int i; | ||
362 | 370 | ||
363 | if (!is_hpet_capable()) | 371 | if (!is_hpet_capable()) |
364 | return 0; | 372 | return 0; |
@@ -369,6 +377,29 @@ int __init hpet_enable(void) | |||
369 | * Read the period and check for a sane value: | 377 | * Read the period and check for a sane value: |
370 | */ | 378 | */ |
371 | hpet_period = hpet_readl(HPET_PERIOD); | 379 | hpet_period = hpet_readl(HPET_PERIOD); |
380 | |||
381 | /* | ||
382 | * AMD SB700 based systems with spread spectrum enabled use a | ||
383 | * SMM based HPET emulation to provide proper frequency | ||
384 | * setting. The SMM code is initialized with the first HPET | ||
385 | * register access and takes some time to complete. During | ||
386 | * this time the config register reads 0xffffffff. We check | ||
387 | * for max. 1000 loops whether the config register reads a non | ||
388 | * 0xffffffff value to make sure that HPET is up and running | ||
389 | * before we go further. A counting loop is safe, as the HPET | ||
390 | * access takes thousands of CPU cycles. On non SB700 based | ||
391 | * machines this check is only done once and has no side | ||
392 | * effects. | ||
393 | */ | ||
394 | for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) { | ||
395 | if (i == 1000) { | ||
396 | printk(KERN_WARNING | ||
397 | "HPET config register value = 0xFFFFFFFF. " | ||
398 | "Disabling HPET\n"); | ||
399 | goto out_nohpet; | ||
400 | } | ||
401 | } | ||
402 | |||
372 | if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) | 403 | if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) |
373 | goto out_nohpet; | 404 | goto out_nohpet; |
374 | 405 | ||
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index 1c3a66a67f83..720d2607aacb 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c | |||
@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = { | |||
92 | DMI_MATCH(DMI_BOARD_NAME, "30BF") | 92 | DMI_MATCH(DMI_BOARD_NAME, "30BF") |
93 | } | 93 | } |
94 | }, | 94 | }, |
95 | { | ||
96 | .callback = dmi_io_delay_0xed_port, | ||
97 | .ident = "Presario F700", | ||
98 | .matches = { | ||
99 | DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), | ||
100 | DMI_MATCH(DMI_BOARD_NAME, "30D3") | ||
101 | } | ||
102 | }, | ||
95 | { } | 103 | { } |
96 | }; | 104 | }; |
97 | 105 | ||
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 50e5e4a31c85..191914302744 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | #include <linux/syscalls.h> | 16 | #include <linux/syscalls.h> |
17 | #include <asm/syscalls.h> | ||
17 | 18 | ||
18 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ | 19 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ |
19 | static void set_bitmap(unsigned long *bitmap, unsigned int base, | 20 | static void set_bitmap(unsigned long *bitmap, unsigned int base, |
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 3f7537b669d3..f1c688e46f35 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | #ifdef CONFIG_X86_32 | 21 | #ifdef CONFIG_X86_32 |
22 | #include <mach_apic.h> | 22 | #include <mach_apic.h> |
23 | #include <mach_ipi.h> | ||
24 | |||
23 | /* | 25 | /* |
24 | * the following functions deal with sending IPIs between CPUs. | 26 | * the following functions deal with sending IPIs between CPUs. |
25 | * | 27 | * |
@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
147 | } | 149 | } |
148 | 150 | ||
149 | /* must come after the send_IPI functions above for inlining */ | 151 | /* must come after the send_IPI functions above for inlining */ |
150 | #include <mach_ipi.h> | ||
151 | static int convert_apicid_to_cpu(int apic_id) | 152 | static int convert_apicid_to_cpu(int apic_id) |
152 | { | 153 | { |
153 | int i; | 154 | int i; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1cf8c1fcc088..b71e02d42f4f 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -325,7 +325,7 @@ skip: | |||
325 | for_each_online_cpu(j) | 325 | for_each_online_cpu(j) |
326 | seq_printf(p, "%10u ", | 326 | seq_printf(p, "%10u ", |
327 | per_cpu(irq_stat,j).irq_call_count); | 327 | per_cpu(irq_stat,j).irq_call_count); |
328 | seq_printf(p, " function call interrupts\n"); | 328 | seq_printf(p, " Function call interrupts\n"); |
329 | seq_printf(p, "TLB: "); | 329 | seq_printf(p, "TLB: "); |
330 | for_each_online_cpu(j) | 330 | for_each_online_cpu(j) |
331 | seq_printf(p, "%10u ", | 331 | seq_printf(p, "%10u ", |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 1f78b238d8d2..f065fe9071b9 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -129,7 +129,7 @@ skip: | |||
129 | seq_printf(p, "CAL: "); | 129 | seq_printf(p, "CAL: "); |
130 | for_each_online_cpu(j) | 130 | for_each_online_cpu(j) |
131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); | 131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); |
132 | seq_printf(p, " function call interrupts\n"); | 132 | seq_printf(p, " Function call interrupts\n"); |
133 | seq_printf(p, "TLB: "); | 133 | seq_printf(p, "TLB: "); |
134 | for_each_online_cpu(j) | 134 | for_each_online_cpu(j) |
135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); | 135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); |
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index f2d43bc75514..ff7d3b0124f1 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c | |||
@@ -139,6 +139,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) | |||
139 | if (PageHighMem(pg)) { | 139 | if (PageHighMem(pg)) { |
140 | data = ioremap_cache(pa_data, sizeof(*data)); | 140 | data = ioremap_cache(pa_data, sizeof(*data)); |
141 | if (!data) { | 141 | if (!data) { |
142 | kfree(node); | ||
142 | error = -ENXIO; | 143 | error = -ENXIO; |
143 | goto err_dir; | 144 | goto err_dir; |
144 | } | 145 | } |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index f47f0eb886b8..8282a2139681 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -69,6 +69,9 @@ static int gdb_x86vector = -1; | |||
69 | */ | 69 | */ |
70 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | 70 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) |
71 | { | 71 | { |
72 | #ifndef CONFIG_X86_32 | ||
73 | u32 *gdb_regs32 = (u32 *)gdb_regs; | ||
74 | #endif | ||
72 | gdb_regs[GDB_AX] = regs->ax; | 75 | gdb_regs[GDB_AX] = regs->ax; |
73 | gdb_regs[GDB_BX] = regs->bx; | 76 | gdb_regs[GDB_BX] = regs->bx; |
74 | gdb_regs[GDB_CX] = regs->cx; | 77 | gdb_regs[GDB_CX] = regs->cx; |
@@ -76,9 +79,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
76 | gdb_regs[GDB_SI] = regs->si; | 79 | gdb_regs[GDB_SI] = regs->si; |
77 | gdb_regs[GDB_DI] = regs->di; | 80 | gdb_regs[GDB_DI] = regs->di; |
78 | gdb_regs[GDB_BP] = regs->bp; | 81 | gdb_regs[GDB_BP] = regs->bp; |
79 | gdb_regs[GDB_PS] = regs->flags; | ||
80 | gdb_regs[GDB_PC] = regs->ip; | 82 | gdb_regs[GDB_PC] = regs->ip; |
81 | #ifdef CONFIG_X86_32 | 83 | #ifdef CONFIG_X86_32 |
84 | gdb_regs[GDB_PS] = regs->flags; | ||
82 | gdb_regs[GDB_DS] = regs->ds; | 85 | gdb_regs[GDB_DS] = regs->ds; |
83 | gdb_regs[GDB_ES] = regs->es; | 86 | gdb_regs[GDB_ES] = regs->es; |
84 | gdb_regs[GDB_CS] = regs->cs; | 87 | gdb_regs[GDB_CS] = regs->cs; |
@@ -94,6 +97,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
94 | gdb_regs[GDB_R13] = regs->r13; | 97 | gdb_regs[GDB_R13] = regs->r13; |
95 | gdb_regs[GDB_R14] = regs->r14; | 98 | gdb_regs[GDB_R14] = regs->r14; |
96 | gdb_regs[GDB_R15] = regs->r15; | 99 | gdb_regs[GDB_R15] = regs->r15; |
100 | gdb_regs32[GDB_PS] = regs->flags; | ||
101 | gdb_regs32[GDB_CS] = regs->cs; | ||
102 | gdb_regs32[GDB_SS] = regs->ss; | ||
97 | #endif | 103 | #endif |
98 | gdb_regs[GDB_SP] = regs->sp; | 104 | gdb_regs[GDB_SP] = regs->sp; |
99 | } | 105 | } |
@@ -112,6 +118,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
112 | */ | 118 | */ |
113 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | 119 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) |
114 | { | 120 | { |
121 | #ifndef CONFIG_X86_32 | ||
122 | u32 *gdb_regs32 = (u32 *)gdb_regs; | ||
123 | #endif | ||
115 | gdb_regs[GDB_AX] = 0; | 124 | gdb_regs[GDB_AX] = 0; |
116 | gdb_regs[GDB_BX] = 0; | 125 | gdb_regs[GDB_BX] = 0; |
117 | gdb_regs[GDB_CX] = 0; | 126 | gdb_regs[GDB_CX] = 0; |
@@ -129,8 +138,10 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |||
129 | gdb_regs[GDB_FS] = 0xFFFF; | 138 | gdb_regs[GDB_FS] = 0xFFFF; |
130 | gdb_regs[GDB_GS] = 0xFFFF; | 139 | gdb_regs[GDB_GS] = 0xFFFF; |
131 | #else | 140 | #else |
132 | gdb_regs[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); | 141 | gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); |
133 | gdb_regs[GDB_PC] = 0; | 142 | gdb_regs32[GDB_CS] = __KERNEL_CS; |
143 | gdb_regs32[GDB_SS] = __KERNEL_DS; | ||
144 | gdb_regs[GDB_PC] = p->thread.ip; | ||
134 | gdb_regs[GDB_R8] = 0; | 145 | gdb_regs[GDB_R8] = 0; |
135 | gdb_regs[GDB_R9] = 0; | 146 | gdb_regs[GDB_R9] = 0; |
136 | gdb_regs[GDB_R10] = 0; | 147 | gdb_regs[GDB_R10] = 0; |
@@ -153,6 +164,9 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |||
153 | */ | 164 | */ |
154 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | 165 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) |
155 | { | 166 | { |
167 | #ifndef CONFIG_X86_32 | ||
168 | u32 *gdb_regs32 = (u32 *)gdb_regs; | ||
169 | #endif | ||
156 | regs->ax = gdb_regs[GDB_AX]; | 170 | regs->ax = gdb_regs[GDB_AX]; |
157 | regs->bx = gdb_regs[GDB_BX]; | 171 | regs->bx = gdb_regs[GDB_BX]; |
158 | regs->cx = gdb_regs[GDB_CX]; | 172 | regs->cx = gdb_regs[GDB_CX]; |
@@ -160,9 +174,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
160 | regs->si = gdb_regs[GDB_SI]; | 174 | regs->si = gdb_regs[GDB_SI]; |
161 | regs->di = gdb_regs[GDB_DI]; | 175 | regs->di = gdb_regs[GDB_DI]; |
162 | regs->bp = gdb_regs[GDB_BP]; | 176 | regs->bp = gdb_regs[GDB_BP]; |
163 | regs->flags = gdb_regs[GDB_PS]; | ||
164 | regs->ip = gdb_regs[GDB_PC]; | 177 | regs->ip = gdb_regs[GDB_PC]; |
165 | #ifdef CONFIG_X86_32 | 178 | #ifdef CONFIG_X86_32 |
179 | regs->flags = gdb_regs[GDB_PS]; | ||
166 | regs->ds = gdb_regs[GDB_DS]; | 180 | regs->ds = gdb_regs[GDB_DS]; |
167 | regs->es = gdb_regs[GDB_ES]; | 181 | regs->es = gdb_regs[GDB_ES]; |
168 | regs->cs = gdb_regs[GDB_CS]; | 182 | regs->cs = gdb_regs[GDB_CS]; |
@@ -175,6 +189,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
175 | regs->r13 = gdb_regs[GDB_R13]; | 189 | regs->r13 = gdb_regs[GDB_R13]; |
176 | regs->r14 = gdb_regs[GDB_R14]; | 190 | regs->r14 = gdb_regs[GDB_R14]; |
177 | regs->r15 = gdb_regs[GDB_R15]; | 191 | regs->r15 = gdb_regs[GDB_R15]; |
192 | regs->flags = gdb_regs32[GDB_PS]; | ||
193 | regs->cs = gdb_regs32[GDB_CS]; | ||
194 | regs->ss = gdb_regs32[GDB_SS]; | ||
178 | #endif | 195 | #endif |
179 | } | 196 | } |
180 | 197 | ||
@@ -378,10 +395,8 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
378 | if (remcomInBuffer[0] == 's') { | 395 | if (remcomInBuffer[0] == 's') { |
379 | linux_regs->flags |= X86_EFLAGS_TF; | 396 | linux_regs->flags |= X86_EFLAGS_TF; |
380 | kgdb_single_step = 1; | 397 | kgdb_single_step = 1; |
381 | if (kgdb_contthread) { | 398 | atomic_set(&kgdb_cpu_doing_single_step, |
382 | atomic_set(&kgdb_cpu_doing_single_step, | 399 | raw_smp_processor_id()); |
383 | raw_smp_processor_id()); | ||
384 | } | ||
385 | } | 400 | } |
386 | 401 | ||
387 | get_debugreg(dr6, 6); | 402 | get_debugreg(dr6, 6); |
@@ -466,9 +481,15 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
466 | 481 | ||
467 | case DIE_DEBUG: | 482 | case DIE_DEBUG: |
468 | if (atomic_read(&kgdb_cpu_doing_single_step) == | 483 | if (atomic_read(&kgdb_cpu_doing_single_step) == |
469 | raw_smp_processor_id() && | 484 | raw_smp_processor_id()) { |
470 | user_mode(regs)) | 485 | if (user_mode(regs)) |
471 | return single_step_cont(regs, args); | 486 | return single_step_cont(regs, args); |
487 | break; | ||
488 | } else if (test_thread_flag(TIF_SINGLESTEP)) | ||
489 | /* This means a user thread is single stepping | ||
490 | * a system call which should be ignored | ||
491 | */ | ||
492 | return NOTIFY_DONE; | ||
472 | /* fall through */ | 493 | /* fall through */ |
473 | default: | 494 | default: |
474 | if (user_mode(regs)) | 495 | if (user_mode(regs)) |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8b7a3cf37d2b..478bca986eca 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -178,7 +178,7 @@ static void kvm_flush_tlb(void) | |||
178 | kvm_deferred_mmu_op(&ftlb, sizeof ftlb); | 178 | kvm_deferred_mmu_op(&ftlb, sizeof ftlb); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void kvm_release_pt(u32 pfn) | 181 | static void kvm_release_pt(unsigned long pfn) |
182 | { | 182 | { |
183 | struct kvm_mmu_op_release_pt rpt = { | 183 | struct kvm_mmu_op_release_pt rpt = { |
184 | .header.op = KVM_MMU_OP_RELEASE_PT, | 184 | .header.op = KVM_MMU_OP_RELEASE_PT, |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index b68e21f06f4f..0ed5f939b905 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/ldt.h> | 18 | #include <asm/ldt.h> |
19 | #include <asm/desc.h> | 19 | #include <asm/desc.h> |
20 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
21 | #include <asm/syscalls.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
23 | static void flush_ldt(void *current_mm) | 24 | static void flush_ldt(void *current_mm) |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 9fe478d98406..0732adba05ca 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/suspend.h> | ||
15 | 16 | ||
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
17 | #include <asm/pgalloc.h> | 18 | #include <asm/pgalloc.h> |
@@ -78,7 +79,7 @@ static void load_segments(void) | |||
78 | /* | 79 | /* |
79 | * A architecture hook called to validate the | 80 | * A architecture hook called to validate the |
80 | * proposed image and prepare the control pages | 81 | * proposed image and prepare the control pages |
81 | * as needed. The pages for KEXEC_CONTROL_CODE_SIZE | 82 | * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE |
82 | * have been allocated, but the segments have yet | 83 | * have been allocated, but the segments have yet |
83 | * been copied into the kernel. | 84 | * been copied into the kernel. |
84 | * | 85 | * |
@@ -113,6 +114,7 @@ void machine_kexec(struct kimage *image) | |||
113 | { | 114 | { |
114 | unsigned long page_list[PAGES_NR]; | 115 | unsigned long page_list[PAGES_NR]; |
115 | void *control_page; | 116 | void *control_page; |
117 | int save_ftrace_enabled; | ||
116 | asmlinkage unsigned long | 118 | asmlinkage unsigned long |
117 | (*relocate_kernel_ptr)(unsigned long indirection_page, | 119 | (*relocate_kernel_ptr)(unsigned long indirection_page, |
118 | unsigned long control_page, | 120 | unsigned long control_page, |
@@ -120,7 +122,12 @@ void machine_kexec(struct kimage *image) | |||
120 | unsigned int has_pae, | 122 | unsigned int has_pae, |
121 | unsigned int preserve_context); | 123 | unsigned int preserve_context); |
122 | 124 | ||
123 | tracer_disable(); | 125 | #ifdef CONFIG_KEXEC_JUMP |
126 | if (kexec_image->preserve_context) | ||
127 | save_processor_state(); | ||
128 | #endif | ||
129 | |||
130 | save_ftrace_enabled = __ftrace_enabled_save(); | ||
124 | 131 | ||
125 | /* Interrupts aren't acceptable while we reboot */ | 132 | /* Interrupts aren't acceptable while we reboot */ |
126 | local_irq_disable(); | 133 | local_irq_disable(); |
@@ -138,7 +145,7 @@ void machine_kexec(struct kimage *image) | |||
138 | } | 145 | } |
139 | 146 | ||
140 | control_page = page_address(image->control_code_page); | 147 | control_page = page_address(image->control_code_page); |
141 | memcpy(control_page, relocate_kernel, PAGE_SIZE/2); | 148 | memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); |
142 | 149 | ||
143 | relocate_kernel_ptr = control_page; | 150 | relocate_kernel_ptr = control_page; |
144 | page_list[PA_CONTROL_PAGE] = __pa(control_page); | 151 | page_list[PA_CONTROL_PAGE] = __pa(control_page); |
@@ -178,6 +185,13 @@ void machine_kexec(struct kimage *image) | |||
178 | (unsigned long)page_list, | 185 | (unsigned long)page_list, |
179 | image->start, cpu_has_pae, | 186 | image->start, cpu_has_pae, |
180 | image->preserve_context); | 187 | image->preserve_context); |
188 | |||
189 | #ifdef CONFIG_KEXEC_JUMP | ||
190 | if (kexec_image->preserve_context) | ||
191 | restore_processor_state(); | ||
192 | #endif | ||
193 | |||
194 | __ftrace_enabled_restore(save_ftrace_enabled); | ||
181 | } | 195 | } |
182 | 196 | ||
183 | void arch_crash_save_vmcoreinfo(void) | 197 | void arch_crash_save_vmcoreinfo(void) |
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 07c0f828f488..3b599518c322 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <asm/geode.h> | 34 | #include <asm/geode.h> |
35 | 35 | ||
36 | #define MFGPT_DEFAULT_IRQ 7 | ||
37 | |||
36 | static struct mfgpt_timer_t { | 38 | static struct mfgpt_timer_t { |
37 | unsigned int avail:1; | 39 | unsigned int avail:1; |
38 | } mfgpt_timers[MFGPT_MAX_TIMERS]; | 40 | } mfgpt_timers[MFGPT_MAX_TIMERS]; |
@@ -157,29 +159,48 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) | |||
157 | } | 159 | } |
158 | EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); | 160 | EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); |
159 | 161 | ||
160 | int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) | 162 | int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable) |
161 | { | 163 | { |
162 | u32 val, dummy; | 164 | u32 zsel, lpc, dummy; |
163 | int offset; | 165 | int shift; |
164 | 166 | ||
165 | if (timer < 0 || timer >= MFGPT_MAX_TIMERS) | 167 | if (timer < 0 || timer >= MFGPT_MAX_TIMERS) |
166 | return -EIO; | 168 | return -EIO; |
167 | 169 | ||
168 | if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) | 170 | /* |
171 | * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA | ||
172 | * is using the same CMP of the timer's Siamese twin, the IRQ is set to | ||
173 | * 2, and we mustn't use nor change it. | ||
174 | * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the | ||
175 | * IRQ of the 1st. This can only happen if forcing an IRQ, calling this | ||
176 | * with *irq==0 is safe. Currently there _are_ no 2 drivers. | ||
177 | */ | ||
178 | rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); | ||
179 | shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4; | ||
180 | if (((zsel >> shift) & 0xF) == 2) | ||
169 | return -EIO; | 181 | return -EIO; |
170 | 182 | ||
171 | rdmsr(MSR_PIC_ZSEL_LOW, val, dummy); | 183 | /* Choose IRQ: if none supplied, keep IRQ already set or use default */ |
184 | if (!*irq) | ||
185 | *irq = (zsel >> shift) & 0xF; | ||
186 | if (!*irq) | ||
187 | *irq = MFGPT_DEFAULT_IRQ; | ||
172 | 188 | ||
173 | offset = (timer % 4) * 4; | 189 | /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ |
174 | 190 | if (*irq < 1 || *irq == 2 || *irq > 15) | |
175 | val &= ~((0xF << offset) | (0xF << (offset + 16))); | 191 | return -EIO; |
192 | rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); | ||
193 | if (lpc & (1 << *irq)) | ||
194 | return -EIO; | ||
176 | 195 | ||
196 | /* All chosen and checked - go for it */ | ||
197 | if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) | ||
198 | return -EIO; | ||
177 | if (enable) { | 199 | if (enable) { |
178 | val |= (irq & 0x0F) << (offset); | 200 | zsel = (zsel & ~(0xF << shift)) | (*irq << shift); |
179 | val |= (irq & 0x0F) << (offset + 16); | 201 | wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); |
180 | } | 202 | } |
181 | 203 | ||
182 | wrmsr(MSR_PIC_ZSEL_LOW, val, dummy); | ||
183 | return 0; | 204 | return 0; |
184 | } | 205 | } |
185 | 206 | ||
@@ -242,7 +263,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer); | |||
242 | static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; | 263 | static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; |
243 | static u16 mfgpt_event_clock; | 264 | static u16 mfgpt_event_clock; |
244 | 265 | ||
245 | static int irq = 7; | 266 | static int irq; |
246 | static int __init mfgpt_setup(char *str) | 267 | static int __init mfgpt_setup(char *str) |
247 | { | 268 | { |
248 | get_option(&str, &irq); | 269 | get_option(&str, &irq); |
@@ -346,7 +367,7 @@ int __init mfgpt_timer_setup(void) | |||
346 | mfgpt_event_clock = timer; | 367 | mfgpt_event_clock = timer; |
347 | 368 | ||
348 | /* Set up the IRQ on the MFGPT side */ | 369 | /* Set up the IRQ on the MFGPT side */ |
349 | if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) { | 370 | if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) { |
350 | printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); | 371 | printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); |
351 | return -EIO; | 372 | return -EIO; |
352 | } | 373 | } |
@@ -374,13 +395,14 @@ int __init mfgpt_timer_setup(void) | |||
374 | &mfgpt_clockevent); | 395 | &mfgpt_clockevent); |
375 | 396 | ||
376 | printk(KERN_INFO | 397 | printk(KERN_INFO |
377 | "mfgpt-timer: registering the MFGPT timer as a clock event.\n"); | 398 | "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n", |
399 | timer, irq); | ||
378 | clockevents_register_device(&mfgpt_clockevent); | 400 | clockevents_register_device(&mfgpt_clockevent); |
379 | 401 | ||
380 | return 0; | 402 | return 0; |
381 | 403 | ||
382 | err: | 404 | err: |
383 | geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq); | 405 | geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq); |
384 | printk(KERN_ERR | 406 | printk(KERN_ERR |
385 | "mfgpt-timer: Unable to set up the MFGPT clock source\n"); | 407 | "mfgpt-timer: Unable to set up the MFGPT clock source\n"); |
386 | return -EIO; | 408 | return -EIO; |
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index fdfdc550b366..efc2f361fe85 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata mmconf_dmi_table[] = { | |||
238 | {} | 238 | {} |
239 | }; | 239 | }; |
240 | 240 | ||
241 | void __init check_enable_amd_mmconf_dmi(void) | 241 | void __cpuinit check_enable_amd_mmconf_dmi(void) |
242 | { | 242 | { |
243 | dmi_check_system(mmconf_dmi_table); | 243 | dmi_check_system(mmconf_dmi_table); |
244 | } | 244 | } |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 678090508a62..b3fb430725cb 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -49,7 +49,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) | |||
49 | return sum & 0xFF; | 49 | return sum & 0xFF; |
50 | } | 50 | } |
51 | 51 | ||
52 | static void __cpuinit MP_processor_info(struct mpc_config_processor *m) | 52 | static void __init MP_processor_info(struct mpc_config_processor *m) |
53 | { | 53 | { |
54 | int apicid; | 54 | int apicid; |
55 | char *bootup_cpu = ""; | 55 | char *bootup_cpu = ""; |
@@ -484,7 +484,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) | |||
484 | } | 484 | } |
485 | 485 | ||
486 | 486 | ||
487 | static void construct_ioapic_table(int mpc_default_type) | 487 | static void __init construct_ioapic_table(int mpc_default_type) |
488 | { | 488 | { |
489 | struct mpc_config_ioapic ioapic; | 489 | struct mpc_config_ioapic ioapic; |
490 | struct mpc_config_bus bus; | 490 | struct mpc_config_bus bus; |
@@ -529,7 +529,7 @@ static void construct_ioapic_table(int mpc_default_type) | |||
529 | construct_default_ioirq_mptable(mpc_default_type); | 529 | construct_default_ioirq_mptable(mpc_default_type); |
530 | } | 530 | } |
531 | #else | 531 | #else |
532 | static inline void construct_ioapic_table(int mpc_default_type) { } | 532 | static inline void __init construct_ioapic_table(int mpc_default_type) { } |
533 | #endif | 533 | #endif |
534 | 534 | ||
535 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) | 535 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 9fd809552447..2e2af5d18191 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -72,21 +72,28 @@ static ssize_t msr_read(struct file *file, char __user *buf, | |||
72 | u32 data[2]; | 72 | u32 data[2]; |
73 | u32 reg = *ppos; | 73 | u32 reg = *ppos; |
74 | int cpu = iminor(file->f_path.dentry->d_inode); | 74 | int cpu = iminor(file->f_path.dentry->d_inode); |
75 | int err; | 75 | int err = 0; |
76 | ssize_t bytes = 0; | ||
76 | 77 | ||
77 | if (count % 8) | 78 | if (count % 8) |
78 | return -EINVAL; /* Invalid chunk size */ | 79 | return -EINVAL; /* Invalid chunk size */ |
79 | 80 | ||
80 | for (; count; count -= 8) { | 81 | for (; count; count -= 8) { |
81 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); | 82 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); |
82 | if (err) | 83 | if (err) { |
83 | return -EIO; | 84 | if (err == -EFAULT) /* Fix idiotic error code */ |
84 | if (copy_to_user(tmp, &data, 8)) | 85 | err = -EIO; |
85 | return -EFAULT; | 86 | break; |
87 | } | ||
88 | if (copy_to_user(tmp, &data, 8)) { | ||
89 | err = -EFAULT; | ||
90 | break; | ||
91 | } | ||
86 | tmp += 2; | 92 | tmp += 2; |
93 | bytes += 8; | ||
87 | } | 94 | } |
88 | 95 | ||
89 | return ((char __user *)tmp) - buf; | 96 | return bytes ? bytes : err; |
90 | } | 97 | } |
91 | 98 | ||
92 | static ssize_t msr_write(struct file *file, const char __user *buf, | 99 | static ssize_t msr_write(struct file *file, const char __user *buf, |
@@ -96,21 +103,28 @@ static ssize_t msr_write(struct file *file, const char __user *buf, | |||
96 | u32 data[2]; | 103 | u32 data[2]; |
97 | u32 reg = *ppos; | 104 | u32 reg = *ppos; |
98 | int cpu = iminor(file->f_path.dentry->d_inode); | 105 | int cpu = iminor(file->f_path.dentry->d_inode); |
99 | int err; | 106 | int err = 0; |
107 | ssize_t bytes = 0; | ||
100 | 108 | ||
101 | if (count % 8) | 109 | if (count % 8) |
102 | return -EINVAL; /* Invalid chunk size */ | 110 | return -EINVAL; /* Invalid chunk size */ |
103 | 111 | ||
104 | for (; count; count -= 8) { | 112 | for (; count; count -= 8) { |
105 | if (copy_from_user(&data, tmp, 8)) | 113 | if (copy_from_user(&data, tmp, 8)) { |
106 | return -EFAULT; | 114 | err = -EFAULT; |
115 | break; | ||
116 | } | ||
107 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); | 117 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); |
108 | if (err) | 118 | if (err) { |
109 | return -EIO; | 119 | if (err == -EFAULT) /* Fix idiotic error code */ |
120 | err = -EIO; | ||
121 | break; | ||
122 | } | ||
110 | tmp += 2; | 123 | tmp += 2; |
124 | bytes += 8; | ||
111 | } | 125 | } |
112 | 126 | ||
113 | return ((char __user *)tmp) - buf; | 127 | return bytes ? bytes : err; |
114 | } | 128 | } |
115 | 129 | ||
116 | static int msr_open(struct inode *inode, struct file *file) | 130 | static int msr_open(struct inode *inode, struct file *file) |
@@ -131,7 +145,7 @@ static int msr_open(struct inode *inode, struct file *file) | |||
131 | ret = -EIO; /* MSR not supported */ | 145 | ret = -EIO; /* MSR not supported */ |
132 | out: | 146 | out: |
133 | unlock_kernel(); | 147 | unlock_kernel(); |
134 | return 0; | 148 | return ret; |
135 | } | 149 | } |
136 | 150 | ||
137 | /* | 151 | /* |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index ac6d51222e7d..2c97f07f1c2c 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -114,6 +114,23 @@ static __init void nmi_cpu_busy(void *data) | |||
114 | } | 114 | } |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | static void report_broken_nmi(int cpu, int *prev_nmi_count) | ||
118 | { | ||
119 | printk(KERN_CONT "\n"); | ||
120 | |||
121 | printk(KERN_WARNING | ||
122 | "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", | ||
123 | cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); | ||
124 | |||
125 | printk(KERN_WARNING | ||
126 | "Please report this to bugzilla.kernel.org,\n"); | ||
127 | printk(KERN_WARNING | ||
128 | "and attach the output of the 'dmesg' command.\n"); | ||
129 | |||
130 | per_cpu(wd_enabled, cpu) = 0; | ||
131 | atomic_dec(&nmi_active); | ||
132 | } | ||
133 | |||
117 | int __init check_nmi_watchdog(void) | 134 | int __init check_nmi_watchdog(void) |
118 | { | 135 | { |
119 | unsigned int *prev_nmi_count; | 136 | unsigned int *prev_nmi_count; |
@@ -141,15 +158,8 @@ int __init check_nmi_watchdog(void) | |||
141 | for_each_online_cpu(cpu) { | 158 | for_each_online_cpu(cpu) { |
142 | if (!per_cpu(wd_enabled, cpu)) | 159 | if (!per_cpu(wd_enabled, cpu)) |
143 | continue; | 160 | continue; |
144 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { | 161 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) |
145 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " | 162 | report_broken_nmi(cpu, prev_nmi_count); |
146 | "appears to be stuck (%d->%d)!\n", | ||
147 | cpu, | ||
148 | prev_nmi_count[cpu], | ||
149 | get_nmi_count(cpu)); | ||
150 | per_cpu(wd_enabled, cpu) = 0; | ||
151 | atomic_dec(&nmi_active); | ||
152 | } | ||
153 | } | 163 | } |
154 | endflag = 1; | 164 | endflag = 1; |
155 | if (!atomic_read(&nmi_active)) { | 165 | if (!atomic_read(&nmi_active)) { |
@@ -289,6 +299,15 @@ void acpi_nmi_disable(void) | |||
289 | on_each_cpu(__acpi_nmi_disable, NULL, 1); | 299 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
290 | } | 300 | } |
291 | 301 | ||
302 | /* | ||
303 | * This function is called as soon the LAPIC NMI watchdog driver has everything | ||
304 | * in place and it's ready to check if the NMIs belong to the NMI watchdog | ||
305 | */ | ||
306 | void cpu_nmi_set_wd_enabled(void) | ||
307 | { | ||
308 | __get_cpu_var(wd_enabled) = 1; | ||
309 | } | ||
310 | |||
292 | void setup_apic_nmi_watchdog(void *unused) | 311 | void setup_apic_nmi_watchdog(void *unused) |
293 | { | 312 | { |
294 | if (__get_cpu_var(wd_enabled)) | 313 | if (__get_cpu_var(wd_enabled)) |
@@ -301,8 +320,6 @@ void setup_apic_nmi_watchdog(void *unused) | |||
301 | 320 | ||
302 | switch (nmi_watchdog) { | 321 | switch (nmi_watchdog) { |
303 | case NMI_LOCAL_APIC: | 322 | case NMI_LOCAL_APIC: |
304 | /* enable it before to avoid race with handler */ | ||
305 | __get_cpu_var(wd_enabled) = 1; | ||
306 | if (lapic_watchdog_init(nmi_hz) < 0) { | 323 | if (lapic_watchdog_init(nmi_hz) < 0) { |
307 | __get_cpu_var(wd_enabled) = 0; | 324 | __get_cpu_var(wd_enabled) = 0; |
308 | return; | 325 | return; |
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index b8c45610b20a..eecc8c18f010 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c | |||
@@ -73,7 +73,7 @@ static void __init smp_dump_qct(void) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | 75 | ||
76 | void __init numaq_tsc_disable(void) | 76 | void __cpuinit numaq_tsc_disable(void) |
77 | { | 77 | { |
78 | if (!found_numaq) | 78 | if (!found_numaq) |
79 | return; | 79 | return; |
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 3e6672274807..7a13fac63a1f 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c | |||
@@ -190,12 +190,12 @@ EXPORT_SYMBOL_GPL(olpc_ec_cmd); | |||
190 | static void __init platform_detect(void) | 190 | static void __init platform_detect(void) |
191 | { | 191 | { |
192 | size_t propsize; | 192 | size_t propsize; |
193 | u32 rev; | 193 | __be32 rev; |
194 | 194 | ||
195 | if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, | 195 | if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, |
196 | &propsize) || propsize != 4) { | 196 | &propsize) || propsize != 4) { |
197 | printk(KERN_ERR "ofw: getprop call failed!\n"); | 197 | printk(KERN_ERR "ofw: getprop call failed!\n"); |
198 | rev = 0; | 198 | rev = cpu_to_be32(0); |
199 | } | 199 | } |
200 | olpc_platform_info.boardrev = be32_to_cpu(rev); | 200 | olpc_platform_info.boardrev = be32_to_cpu(rev); |
201 | } | 201 | } |
@@ -203,7 +203,7 @@ static void __init platform_detect(void) | |||
203 | static void __init platform_detect(void) | 203 | static void __init platform_detect(void) |
204 | { | 204 | { |
205 | /* stopgap until OFW support is added to the kernel */ | 205 | /* stopgap until OFW support is added to the kernel */ |
206 | olpc_platform_info.boardrev = be32_to_cpu(0xc2); | 206 | olpc_platform_info.boardrev = 0xc2; |
207 | } | 207 | } |
208 | #endif | 208 | #endif |
209 | 209 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 94da4d52d798..e2f43768723a 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
330 | #endif | 330 | #endif |
331 | .wbinvd = native_wbinvd, | 331 | .wbinvd = native_wbinvd, |
332 | .read_msr = native_read_msr_safe, | 332 | .read_msr = native_read_msr_safe, |
333 | .read_msr_amd = native_read_msr_amd_safe, | ||
333 | .write_msr = native_write_msr_safe, | 334 | .write_msr = native_write_msr_safe, |
334 | .read_tsc = native_read_tsc, | 335 | .read_tsc = native_read_tsc, |
335 | .read_pmc = native_read_pmc, | 336 | .read_pmc = native_read_pmc, |
@@ -471,7 +472,7 @@ struct pv_lock_ops pv_lock_ops = { | |||
471 | .spin_unlock = __ticket_spin_unlock, | 472 | .spin_unlock = __ticket_spin_unlock, |
472 | #endif | 473 | #endif |
473 | }; | 474 | }; |
474 | EXPORT_SYMBOL_GPL(pv_lock_ops); | 475 | EXPORT_SYMBOL(pv_lock_ops); |
475 | 476 | ||
476 | EXPORT_SYMBOL_GPL(pv_time_ops); | 477 | EXPORT_SYMBOL_GPL(pv_time_ops); |
477 | EXPORT_SYMBOL (pv_cpu_ops); | 478 | EXPORT_SYMBOL (pv_cpu_ops); |
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 58262218781b..9fe644f4861d 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c | |||
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
23 | start = start_##ops##_##x; \ | 23 | start = start_##ops##_##x; \ |
24 | end = end_##ops##_##x; \ | 24 | end = end_##ops##_##x; \ |
25 | goto patch_site | 25 | goto patch_site |
26 | switch(type) { | 26 | switch (type) { |
27 | PATCH_SITE(pv_irq_ops, irq_disable); | 27 | PATCH_SITE(pv_irq_ops, irq_disable); |
28 | PATCH_SITE(pv_irq_ops, irq_enable); | 28 | PATCH_SITE(pv_irq_ops, irq_enable); |
29 | PATCH_SITE(pv_irq_ops, restore_fl); | 29 | PATCH_SITE(pv_irq_ops, restore_fl); |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 02d19328525d..dcdac6c826e9 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -343,9 +343,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
343 | /* were we called with bad_dma_address? */ | 343 | /* were we called with bad_dma_address? */ |
344 | badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); | 344 | badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); |
345 | if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { | 345 | if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { |
346 | printk(KERN_ERR "Calgary: driver tried unmapping bad DMA " | 346 | WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " |
347 | "address 0x%Lx\n", dma_addr); | 347 | "address 0x%Lx\n", dma_addr); |
348 | WARN_ON(1); | ||
349 | return; | 348 | return; |
350 | } | 349 | } |
351 | 350 | ||
@@ -1269,13 +1268,15 @@ static inline int __init determine_tce_table_size(u64 ram) | |||
1269 | static int __init build_detail_arrays(void) | 1268 | static int __init build_detail_arrays(void) |
1270 | { | 1269 | { |
1271 | unsigned long ptr; | 1270 | unsigned long ptr; |
1272 | int i, scal_detail_size, rio_detail_size; | 1271 | unsigned numnodes, i; |
1272 | int scal_detail_size, rio_detail_size; | ||
1273 | 1273 | ||
1274 | if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){ | 1274 | numnodes = rio_table_hdr->num_scal_dev; |
1275 | if (numnodes > MAX_NUMNODES){ | ||
1275 | printk(KERN_WARNING | 1276 | printk(KERN_WARNING |
1276 | "Calgary: MAX_NUMNODES too low! Defined as %d, " | 1277 | "Calgary: MAX_NUMNODES too low! Defined as %d, " |
1277 | "but system has %d nodes.\n", | 1278 | "but system has %d nodes.\n", |
1278 | MAX_NUMNODES, rio_table_hdr->num_scal_dev); | 1279 | MAX_NUMNODES, numnodes); |
1279 | return -ENODEV; | 1280 | return -ENODEV; |
1280 | } | 1281 | } |
1281 | 1282 | ||
@@ -1296,8 +1297,7 @@ static int __init build_detail_arrays(void) | |||
1296 | } | 1297 | } |
1297 | 1298 | ||
1298 | ptr = ((unsigned long)rio_table_hdr) + 3; | 1299 | ptr = ((unsigned long)rio_table_hdr) + 3; |
1299 | for (i = 0; i < rio_table_hdr->num_scal_dev; | 1300 | for (i = 0; i < numnodes; i++, ptr += scal_detail_size) |
1300 | i++, ptr += scal_detail_size) | ||
1301 | scal_devs[i] = (struct scal_detail *)ptr; | 1301 | scal_devs[i] = (struct scal_detail *)ptr; |
1302 | 1302 | ||
1303 | for (i = 0; i < rio_table_hdr->num_rio_dev; | 1303 | for (i = 0; i < rio_table_hdr->num_rio_dev; |
@@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) | |||
1350 | * Function for kdump case. Get the tce tables from first kernel | 1350 | * Function for kdump case. Get the tce tables from first kernel |
1351 | * by reading the contents of the base adress register of calgary iommu | 1351 | * by reading the contents of the base adress register of calgary iommu |
1352 | */ | 1352 | */ |
1353 | static void get_tce_space_from_tar(void) | 1353 | static void __init get_tce_space_from_tar(void) |
1354 | { | 1354 | { |
1355 | int bus; | 1355 | int bus; |
1356 | void __iomem *target; | 1356 | void __iomem *target; |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 87d4d6964ec2..f704cb51ff82 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void) | |||
82 | * using 512M as goal | 82 | * using 512M as goal |
83 | */ | 83 | */ |
84 | align = 64ULL<<20; | 84 | align = 64ULL<<20; |
85 | size = round_up(dma32_bootmem_size, align); | 85 | size = roundup(dma32_bootmem_size, align); |
86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
87 | 512ULL<<20); | 87 | 512ULL<<20); |
88 | if (dma32_bootmem_ptr) | 88 | if (dma32_bootmem_ptr) |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 4d8efb05428d..1a895a582534 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -633,7 +633,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
633 | struct pci_dev *dev; | 633 | struct pci_dev *dev; |
634 | void *gatt; | 634 | void *gatt; |
635 | int i, error; | 635 | int i, error; |
636 | unsigned long start_pfn, end_pfn; | ||
637 | 636 | ||
638 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 637 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
639 | aper_size = aper_base = info->aper_size = 0; | 638 | aper_size = aper_base = info->aper_size = 0; |
@@ -679,12 +678,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
679 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 678 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
680 | aper_base, aper_size>>10); | 679 | aper_base, aper_size>>10); |
681 | 680 | ||
682 | /* need to map that range */ | ||
683 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
684 | if (end_pfn > max_low_pfn_mapped) { | ||
685 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
686 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
687 | } | ||
688 | return 0; | 681 | return 0; |
689 | 682 | ||
690 | nommu: | 683 | nommu: |
@@ -734,7 +727,8 @@ void __init gart_iommu_init(void) | |||
734 | { | 727 | { |
735 | struct agp_kern_info info; | 728 | struct agp_kern_info info; |
736 | unsigned long iommu_start; | 729 | unsigned long iommu_start; |
737 | unsigned long aper_size; | 730 | unsigned long aper_base, aper_size; |
731 | unsigned long start_pfn, end_pfn; | ||
738 | unsigned long scratch; | 732 | unsigned long scratch; |
739 | long i; | 733 | long i; |
740 | 734 | ||
@@ -772,8 +766,16 @@ void __init gart_iommu_init(void) | |||
772 | return; | 766 | return; |
773 | } | 767 | } |
774 | 768 | ||
769 | /* need to map that range */ | ||
770 | aper_size = info.aper_size << 20; | ||
771 | aper_base = info.aper_base; | ||
772 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
773 | if (end_pfn > max_low_pfn_mapped) { | ||
774 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
775 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
776 | } | ||
777 | |||
775 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); | 778 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
776 | aper_size = info.aper_size * 1024 * 1024; | ||
777 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 779 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
778 | iommu_pages = iommu_size >> PAGE_SHIFT; | 780 | iommu_pages = iommu_size >> PAGE_SHIFT; |
779 | 781 | ||
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c index bc1f2d3ea277..a311ffcaad16 100644 --- a/arch/x86/kernel/pcspeaker.c +++ b/arch/x86/kernel/pcspeaker.c | |||
@@ -1,20 +1,13 @@ | |||
1 | #include <linux/platform_device.h> | 1 | #include <linux/platform_device.h> |
2 | #include <linux/errno.h> | 2 | #include <linux/err.h> |
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | 4 | ||
5 | static __init int add_pcspkr(void) | 5 | static __init int add_pcspkr(void) |
6 | { | 6 | { |
7 | struct platform_device *pd; | 7 | struct platform_device *pd; |
8 | int ret; | ||
9 | 8 | ||
10 | pd = platform_device_alloc("pcspkr", -1); | 9 | pd = platform_device_register_simple("pcspkr", -1, NULL, 0); |
11 | if (!pd) | ||
12 | return -ENOMEM; | ||
13 | 10 | ||
14 | ret = platform_device_add(pd); | 11 | return IS_ERR(pd) ? PTR_ERR(pd) : 0; |
15 | if (ret) | ||
16 | platform_device_put(pd); | ||
17 | |||
18 | return ret; | ||
19 | } | 12 | } |
20 | device_initcall(add_pcspkr); | 13 | device_initcall(add_pcspkr); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7fc4d5b0a6a0..ec7a2ba9bce8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -185,7 +185,8 @@ static void mwait_idle(void) | |||
185 | static void poll_idle(void) | 185 | static void poll_idle(void) |
186 | { | 186 | { |
187 | local_irq_enable(); | 187 | local_irq_enable(); |
188 | cpu_relax(); | 188 | while (!need_resched()) |
189 | cpu_relax(); | ||
189 | } | 190 | } |
190 | 191 | ||
191 | /* | 192 | /* |
@@ -246,6 +247,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
246 | return 1; | 247 | return 1; |
247 | } | 248 | } |
248 | 249 | ||
250 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
251 | static int c1e_detected; | ||
252 | |||
253 | void c1e_remove_cpu(int cpu) | ||
254 | { | ||
255 | cpu_clear(cpu, c1e_mask); | ||
256 | } | ||
257 | |||
249 | /* | 258 | /* |
250 | * C1E aware idle routine. We check for C1E active in the interrupt | 259 | * C1E aware idle routine. We check for C1E active in the interrupt |
251 | * pending message MSR. If we detect C1E, then we handle it the same | 260 | * pending message MSR. If we detect C1E, then we handle it the same |
@@ -253,9 +262,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
253 | */ | 262 | */ |
254 | static void c1e_idle(void) | 263 | static void c1e_idle(void) |
255 | { | 264 | { |
256 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
257 | static int c1e_detected; | ||
258 | |||
259 | if (need_resched()) | 265 | if (need_resched()) |
260 | return; | 266 | return; |
261 | 267 | ||
@@ -265,8 +271,10 @@ static void c1e_idle(void) | |||
265 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 271 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
266 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 272 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
267 | c1e_detected = 1; | 273 | c1e_detected = 1; |
268 | mark_tsc_unstable("TSC halt in C1E"); | 274 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) |
269 | printk(KERN_INFO "System has C1E enabled\n"); | 275 | mark_tsc_unstable("TSC halt in AMD C1E"); |
276 | printk(KERN_INFO "System has AMD C1E enabled\n"); | ||
277 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); | ||
270 | } | 278 | } |
271 | } | 279 | } |
272 | 280 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 53bc653ed5ca..205188db9626 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/tick.h> | 37 | #include <linux/tick.h> |
38 | #include <linux/percpu.h> | 38 | #include <linux/percpu.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/dmi.h> | ||
40 | 41 | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
@@ -55,6 +56,9 @@ | |||
55 | #include <asm/tlbflush.h> | 56 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | 57 | #include <asm/cpu.h> |
57 | #include <asm/kdebug.h> | 58 | #include <asm/kdebug.h> |
59 | #include <asm/idle.h> | ||
60 | #include <asm/syscalls.h> | ||
61 | #include <asm/smp.h> | ||
58 | 62 | ||
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 63 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | 64 | ||
@@ -88,6 +92,7 @@ static void cpu_exit_clear(void) | |||
88 | cpu_clear(cpu, cpu_callin_map); | 92 | cpu_clear(cpu, cpu_callin_map); |
89 | 93 | ||
90 | numa_remove_cpu(cpu); | 94 | numa_remove_cpu(cpu); |
95 | c1e_remove_cpu(cpu); | ||
91 | } | 96 | } |
92 | 97 | ||
93 | /* We don't actually take CPU down, just spin without interrupts. */ | 98 | /* We don't actually take CPU down, just spin without interrupts. */ |
@@ -95,7 +100,6 @@ static inline void play_dead(void) | |||
95 | { | 100 | { |
96 | /* This must be done before dead CPU ack */ | 101 | /* This must be done before dead CPU ack */ |
97 | cpu_exit_clear(); | 102 | cpu_exit_clear(); |
98 | wbinvd(); | ||
99 | mb(); | 103 | mb(); |
100 | /* Ack it */ | 104 | /* Ack it */ |
101 | __get_cpu_var(cpu_state) = CPU_DEAD; | 105 | __get_cpu_var(cpu_state) = CPU_DEAD; |
@@ -104,8 +108,8 @@ static inline void play_dead(void) | |||
104 | * With physical CPU hotplug, we should halt the cpu | 108 | * With physical CPU hotplug, we should halt the cpu |
105 | */ | 109 | */ |
106 | local_irq_disable(); | 110 | local_irq_disable(); |
107 | while (1) | 111 | /* mask all interrupts, flush any and all caches, and halt */ |
108 | halt(); | 112 | wbinvd_halt(); |
109 | } | 113 | } |
110 | #else | 114 | #else |
111 | static inline void play_dead(void) | 115 | static inline void play_dead(void) |
@@ -160,6 +164,7 @@ void __show_registers(struct pt_regs *regs, int all) | |||
160 | unsigned long d0, d1, d2, d3, d6, d7; | 164 | unsigned long d0, d1, d2, d3, d6, d7; |
161 | unsigned long sp; | 165 | unsigned long sp; |
162 | unsigned short ss, gs; | 166 | unsigned short ss, gs; |
167 | const char *board; | ||
163 | 168 | ||
164 | if (user_mode_vm(regs)) { | 169 | if (user_mode_vm(regs)) { |
165 | sp = regs->sp; | 170 | sp = regs->sp; |
@@ -172,11 +177,15 @@ void __show_registers(struct pt_regs *regs, int all) | |||
172 | } | 177 | } |
173 | 178 | ||
174 | printk("\n"); | 179 | printk("\n"); |
175 | printk("Pid: %d, comm: %s %s (%s %.*s)\n", | 180 | |
181 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
182 | if (!board) | ||
183 | board = ""; | ||
184 | printk("Pid: %d, comm: %s %s (%s %.*s) %s\n", | ||
176 | task_pid_nr(current), current->comm, | 185 | task_pid_nr(current), current->comm, |
177 | print_tainted(), init_utsname()->release, | 186 | print_tainted(), init_utsname()->release, |
178 | (int)strcspn(init_utsname()->version, " "), | 187 | (int)strcspn(init_utsname()->version, " "), |
179 | init_utsname()->version); | 188 | init_utsname()->version, board); |
180 | 189 | ||
181 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | 190 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
182 | (u16)regs->cs, regs->ip, regs->flags, | 191 | (u16)regs->cs, regs->ip, regs->flags, |
@@ -276,6 +285,14 @@ void exit_thread(void) | |||
276 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; | 285 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
277 | put_cpu(); | 286 | put_cpu(); |
278 | } | 287 | } |
288 | #ifdef CONFIG_X86_DS | ||
289 | /* Free any DS contexts that have not been properly released. */ | ||
290 | if (unlikely(current->thread.ds_ctx)) { | ||
291 | /* we clear debugctl to make sure DS is not used. */ | ||
292 | update_debugctlmsr(0); | ||
293 | ds_free(current->thread.ds_ctx); | ||
294 | } | ||
295 | #endif /* CONFIG_X86_DS */ | ||
279 | } | 296 | } |
280 | 297 | ||
281 | void flush_thread(void) | 298 | void flush_thread(void) |
@@ -437,6 +454,35 @@ int set_tsc_mode(unsigned int val) | |||
437 | return 0; | 454 | return 0; |
438 | } | 455 | } |
439 | 456 | ||
457 | #ifdef CONFIG_X86_DS | ||
458 | static int update_debugctl(struct thread_struct *prev, | ||
459 | struct thread_struct *next, unsigned long debugctl) | ||
460 | { | ||
461 | unsigned long ds_prev = 0; | ||
462 | unsigned long ds_next = 0; | ||
463 | |||
464 | if (prev->ds_ctx) | ||
465 | ds_prev = (unsigned long)prev->ds_ctx->ds; | ||
466 | if (next->ds_ctx) | ||
467 | ds_next = (unsigned long)next->ds_ctx->ds; | ||
468 | |||
469 | if (ds_next != ds_prev) { | ||
470 | /* we clear debugctl to make sure DS | ||
471 | * is not in use when we change it */ | ||
472 | debugctl = 0; | ||
473 | update_debugctlmsr(0); | ||
474 | wrmsr(MSR_IA32_DS_AREA, ds_next, 0); | ||
475 | } | ||
476 | return debugctl; | ||
477 | } | ||
478 | #else | ||
479 | static int update_debugctl(struct thread_struct *prev, | ||
480 | struct thread_struct *next, unsigned long debugctl) | ||
481 | { | ||
482 | return debugctl; | ||
483 | } | ||
484 | #endif /* CONFIG_X86_DS */ | ||
485 | |||
440 | static noinline void | 486 | static noinline void |
441 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 487 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
442 | struct tss_struct *tss) | 488 | struct tss_struct *tss) |
@@ -447,14 +493,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
447 | prev = &prev_p->thread; | 493 | prev = &prev_p->thread; |
448 | next = &next_p->thread; | 494 | next = &next_p->thread; |
449 | 495 | ||
450 | debugctl = prev->debugctlmsr; | 496 | debugctl = update_debugctl(prev, next, prev->debugctlmsr); |
451 | if (next->ds_area_msr != prev->ds_area_msr) { | ||
452 | /* we clear debugctl to make sure DS | ||
453 | * is not in use when we change it */ | ||
454 | debugctl = 0; | ||
455 | update_debugctlmsr(0); | ||
456 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); | ||
457 | } | ||
458 | 497 | ||
459 | if (next->debugctlmsr != debugctl) | 498 | if (next->debugctlmsr != debugctl) |
460 | update_debugctlmsr(next->debugctlmsr); | 499 | update_debugctlmsr(next->debugctlmsr); |
@@ -478,13 +517,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
478 | hard_enable_TSC(); | 517 | hard_enable_TSC(); |
479 | } | 518 | } |
480 | 519 | ||
481 | #ifdef X86_BTS | 520 | #ifdef CONFIG_X86_PTRACE_BTS |
482 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) | 521 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
483 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | 522 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); |
484 | 523 | ||
485 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | 524 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) |
486 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | 525 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); |
487 | #endif | 526 | #endif /* CONFIG_X86_PTRACE_BTS */ |
488 | 527 | ||
489 | 528 | ||
490 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | 529 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3fb62a7d9a16..2a8ccb9238b4 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -37,11 +37,11 @@ | |||
37 | #include <linux/kdebug.h> | 37 | #include <linux/kdebug.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/uaccess.h> | ||
41 | #include <linux/io.h> | ||
40 | 42 | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/i387.h> | 46 | #include <asm/i387.h> |
47 | #include <asm/mmu_context.h> | 47 | #include <asm/mmu_context.h> |
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
52 | #include <asm/ia32.h> | 52 | #include <asm/ia32.h> |
53 | #include <asm/idle.h> | 53 | #include <asm/idle.h> |
54 | #include <asm/syscalls.h> | ||
54 | 55 | ||
55 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
56 | 57 | ||
@@ -88,19 +89,20 @@ void exit_idle(void) | |||
88 | #ifdef CONFIG_HOTPLUG_CPU | 89 | #ifdef CONFIG_HOTPLUG_CPU |
89 | DECLARE_PER_CPU(int, cpu_state); | 90 | DECLARE_PER_CPU(int, cpu_state); |
90 | 91 | ||
91 | #include <asm/nmi.h> | 92 | #include <linux/nmi.h> |
92 | /* We halt the CPU with physical CPU hotplug */ | 93 | /* We halt the CPU with physical CPU hotplug */ |
93 | static inline void play_dead(void) | 94 | static inline void play_dead(void) |
94 | { | 95 | { |
95 | idle_task_exit(); | 96 | idle_task_exit(); |
96 | wbinvd(); | 97 | c1e_remove_cpu(raw_smp_processor_id()); |
98 | |||
97 | mb(); | 99 | mb(); |
98 | /* Ack it */ | 100 | /* Ack it */ |
99 | __get_cpu_var(cpu_state) = CPU_DEAD; | 101 | __get_cpu_var(cpu_state) = CPU_DEAD; |
100 | 102 | ||
101 | local_irq_disable(); | 103 | local_irq_disable(); |
102 | while (1) | 104 | /* mask all interrupts, flush any and all caches, and halt */ |
103 | halt(); | 105 | wbinvd_halt(); |
104 | } | 106 | } |
105 | #else | 107 | #else |
106 | static inline void play_dead(void) | 108 | static inline void play_dead(void) |
@@ -152,7 +154,7 @@ void cpu_idle(void) | |||
152 | } | 154 | } |
153 | 155 | ||
154 | /* Prints also some state that isn't saved in the pt_regs */ | 156 | /* Prints also some state that isn't saved in the pt_regs */ |
155 | void __show_regs(struct pt_regs * regs) | 157 | void __show_regs(struct pt_regs *regs) |
156 | { | 158 | { |
157 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 159 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
158 | unsigned long d0, d1, d2, d3, d6, d7; | 160 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -161,59 +163,61 @@ void __show_regs(struct pt_regs * regs) | |||
161 | 163 | ||
162 | printk("\n"); | 164 | printk("\n"); |
163 | print_modules(); | 165 | print_modules(); |
164 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 166 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", |
165 | current->pid, current->comm, print_tainted(), | 167 | current->pid, current->comm, print_tainted(), |
166 | init_utsname()->release, | 168 | init_utsname()->release, |
167 | (int)strcspn(init_utsname()->version, " "), | 169 | (int)strcspn(init_utsname()->version, " "), |
168 | init_utsname()->version); | 170 | init_utsname()->version); |
169 | printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 171 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
170 | printk_address(regs->ip, 1); | 172 | printk_address(regs->ip, 1); |
171 | printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, | 173 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
172 | regs->flags); | 174 | regs->sp, regs->flags); |
173 | printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", | 175 | printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", |
174 | regs->ax, regs->bx, regs->cx); | 176 | regs->ax, regs->bx, regs->cx); |
175 | printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", | 177 | printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", |
176 | regs->dx, regs->si, regs->di); | 178 | regs->dx, regs->si, regs->di); |
177 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", | 179 | printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", |
178 | regs->bp, regs->r8, regs->r9); | 180 | regs->bp, regs->r8, regs->r9); |
179 | printk("R10: %016lx R11: %016lx R12: %016lx\n", | 181 | printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", |
180 | regs->r10, regs->r11, regs->r12); | 182 | regs->r10, regs->r11, regs->r12); |
181 | printk("R13: %016lx R14: %016lx R15: %016lx\n", | 183 | printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", |
182 | regs->r13, regs->r14, regs->r15); | 184 | regs->r13, regs->r14, regs->r15); |
183 | 185 | ||
184 | asm("movl %%ds,%0" : "=r" (ds)); | 186 | asm("movl %%ds,%0" : "=r" (ds)); |
185 | asm("movl %%cs,%0" : "=r" (cs)); | 187 | asm("movl %%cs,%0" : "=r" (cs)); |
186 | asm("movl %%es,%0" : "=r" (es)); | 188 | asm("movl %%es,%0" : "=r" (es)); |
187 | asm("movl %%fs,%0" : "=r" (fsindex)); | 189 | asm("movl %%fs,%0" : "=r" (fsindex)); |
188 | asm("movl %%gs,%0" : "=r" (gsindex)); | 190 | asm("movl %%gs,%0" : "=r" (gsindex)); |
189 | 191 | ||
190 | rdmsrl(MSR_FS_BASE, fs); | 192 | rdmsrl(MSR_FS_BASE, fs); |
191 | rdmsrl(MSR_GS_BASE, gs); | 193 | rdmsrl(MSR_GS_BASE, gs); |
192 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 194 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
193 | 195 | ||
194 | cr0 = read_cr0(); | 196 | cr0 = read_cr0(); |
195 | cr2 = read_cr2(); | 197 | cr2 = read_cr2(); |
196 | cr3 = read_cr3(); | 198 | cr3 = read_cr3(); |
197 | cr4 = read_cr4(); | 199 | cr4 = read_cr4(); |
198 | 200 | ||
199 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 201 | printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
200 | fs,fsindex,gs,gsindex,shadowgs); | 202 | fs, fsindex, gs, gsindex, shadowgs); |
201 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); | 203 | printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, |
202 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); | 204 | es, cr0); |
205 | printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, | ||
206 | cr4); | ||
203 | 207 | ||
204 | get_debugreg(d0, 0); | 208 | get_debugreg(d0, 0); |
205 | get_debugreg(d1, 1); | 209 | get_debugreg(d1, 1); |
206 | get_debugreg(d2, 2); | 210 | get_debugreg(d2, 2); |
207 | printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | 211 | printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); |
208 | get_debugreg(d3, 3); | 212 | get_debugreg(d3, 3); |
209 | get_debugreg(d6, 6); | 213 | get_debugreg(d6, 6); |
210 | get_debugreg(d7, 7); | 214 | get_debugreg(d7, 7); |
211 | printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 215 | printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); |
212 | } | 216 | } |
213 | 217 | ||
214 | void show_regs(struct pt_regs *regs) | 218 | void show_regs(struct pt_regs *regs) |
215 | { | 219 | { |
216 | printk("CPU %d:", smp_processor_id()); | 220 | printk(KERN_INFO "CPU %d:", smp_processor_id()); |
217 | __show_regs(regs); | 221 | __show_regs(regs); |
218 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 222 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
219 | } | 223 | } |
@@ -239,6 +243,14 @@ void exit_thread(void) | |||
239 | t->io_bitmap_max = 0; | 243 | t->io_bitmap_max = 0; |
240 | put_cpu(); | 244 | put_cpu(); |
241 | } | 245 | } |
246 | #ifdef CONFIG_X86_DS | ||
247 | /* Free any DS contexts that have not been properly released. */ | ||
248 | if (unlikely(t->ds_ctx)) { | ||
249 | /* we clear debugctl to make sure DS is not used. */ | ||
250 | update_debugctlmsr(0); | ||
251 | ds_free(t->ds_ctx); | ||
252 | } | ||
253 | #endif /* CONFIG_X86_DS */ | ||
242 | } | 254 | } |
243 | 255 | ||
244 | void flush_thread(void) | 256 | void flush_thread(void) |
@@ -314,10 +326,10 @@ void prepare_to_copy(struct task_struct *tsk) | |||
314 | 326 | ||
315 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 327 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
316 | unsigned long unused, | 328 | unsigned long unused, |
317 | struct task_struct * p, struct pt_regs * regs) | 329 | struct task_struct *p, struct pt_regs *regs) |
318 | { | 330 | { |
319 | int err; | 331 | int err; |
320 | struct pt_regs * childregs; | 332 | struct pt_regs *childregs; |
321 | struct task_struct *me = current; | 333 | struct task_struct *me = current; |
322 | 334 | ||
323 | childregs = ((struct pt_regs *) | 335 | childregs = ((struct pt_regs *) |
@@ -362,10 +374,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
362 | if (test_thread_flag(TIF_IA32)) | 374 | if (test_thread_flag(TIF_IA32)) |
363 | err = do_set_thread_area(p, -1, | 375 | err = do_set_thread_area(p, -1, |
364 | (struct user_desc __user *)childregs->si, 0); | 376 | (struct user_desc __user *)childregs->si, 0); |
365 | else | 377 | else |
366 | #endif | 378 | #endif |
367 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | 379 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); |
368 | if (err) | 380 | if (err) |
369 | goto out; | 381 | goto out; |
370 | } | 382 | } |
371 | err = 0; | 383 | err = 0; |
@@ -472,13 +484,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
472 | next = &next_p->thread; | 484 | next = &next_p->thread; |
473 | 485 | ||
474 | debugctl = prev->debugctlmsr; | 486 | debugctl = prev->debugctlmsr; |
475 | if (next->ds_area_msr != prev->ds_area_msr) { | 487 | |
476 | /* we clear debugctl to make sure DS | 488 | #ifdef CONFIG_X86_DS |
477 | * is not in use when we change it */ | 489 | { |
478 | debugctl = 0; | 490 | unsigned long ds_prev = 0, ds_next = 0; |
479 | update_debugctlmsr(0); | 491 | |
480 | wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); | 492 | if (prev->ds_ctx) |
493 | ds_prev = (unsigned long)prev->ds_ctx->ds; | ||
494 | if (next->ds_ctx) | ||
495 | ds_next = (unsigned long)next->ds_ctx->ds; | ||
496 | |||
497 | if (ds_next != ds_prev) { | ||
498 | /* | ||
499 | * We clear debugctl to make sure DS | ||
500 | * is not in use when we change it: | ||
501 | */ | ||
502 | debugctl = 0; | ||
503 | update_debugctlmsr(0); | ||
504 | wrmsrl(MSR_IA32_DS_AREA, ds_next); | ||
505 | } | ||
481 | } | 506 | } |
507 | #endif /* CONFIG_X86_DS */ | ||
482 | 508 | ||
483 | if (next->debugctlmsr != debugctl) | 509 | if (next->debugctlmsr != debugctl) |
484 | update_debugctlmsr(next->debugctlmsr); | 510 | update_debugctlmsr(next->debugctlmsr); |
@@ -516,13 +542,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
516 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | 542 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
517 | } | 543 | } |
518 | 544 | ||
519 | #ifdef X86_BTS | 545 | #ifdef CONFIG_X86_PTRACE_BTS |
520 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) | 546 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
521 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | 547 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); |
522 | 548 | ||
523 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | 549 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) |
524 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | 550 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); |
525 | #endif | 551 | #endif /* CONFIG_X86_PTRACE_BTS */ |
526 | } | 552 | } |
527 | 553 | ||
528 | /* | 554 | /* |
@@ -544,7 +570,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
544 | unsigned fsindex, gsindex; | 570 | unsigned fsindex, gsindex; |
545 | 571 | ||
546 | /* we're going to use this soon, after a few expensive things */ | 572 | /* we're going to use this soon, after a few expensive things */ |
547 | if (next_p->fpu_counter>5) | 573 | if (next_p->fpu_counter > 5) |
548 | prefetch(next->xstate); | 574 | prefetch(next->xstate); |
549 | 575 | ||
550 | /* | 576 | /* |
@@ -552,13 +578,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
552 | */ | 578 | */ |
553 | load_sp0(tss, next); | 579 | load_sp0(tss, next); |
554 | 580 | ||
555 | /* | 581 | /* |
556 | * Switch DS and ES. | 582 | * Switch DS and ES. |
557 | * This won't pick up thread selector changes, but I guess that is ok. | 583 | * This won't pick up thread selector changes, but I guess that is ok. |
558 | */ | 584 | */ |
559 | savesegment(es, prev->es); | 585 | savesegment(es, prev->es); |
560 | if (unlikely(next->es | prev->es)) | 586 | if (unlikely(next->es | prev->es)) |
561 | loadsegment(es, next->es); | 587 | loadsegment(es, next->es); |
562 | 588 | ||
563 | savesegment(ds, prev->ds); | 589 | savesegment(ds, prev->ds); |
564 | if (unlikely(next->ds | prev->ds)) | 590 | if (unlikely(next->ds | prev->ds)) |
@@ -584,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
584 | */ | 610 | */ |
585 | arch_leave_lazy_cpu_mode(); | 611 | arch_leave_lazy_cpu_mode(); |
586 | 612 | ||
587 | /* | 613 | /* |
588 | * Switch FS and GS. | 614 | * Switch FS and GS. |
589 | * | 615 | * |
590 | * Segment register != 0 always requires a reload. Also | 616 | * Segment register != 0 always requires a reload. Also |
@@ -593,13 +619,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
593 | */ | 619 | */ |
594 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 620 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
595 | loadsegment(fs, next->fsindex); | 621 | loadsegment(fs, next->fsindex); |
596 | /* | 622 | /* |
597 | * Check if the user used a selector != 0; if yes | 623 | * Check if the user used a selector != 0; if yes |
598 | * clear 64bit base, since overloaded base is always | 624 | * clear 64bit base, since overloaded base is always |
599 | * mapped to the Null selector | 625 | * mapped to the Null selector |
600 | */ | 626 | */ |
601 | if (fsindex) | 627 | if (fsindex) |
602 | prev->fs = 0; | 628 | prev->fs = 0; |
603 | } | 629 | } |
604 | /* when next process has a 64bit base use it */ | 630 | /* when next process has a 64bit base use it */ |
605 | if (next->fs) | 631 | if (next->fs) |
@@ -609,7 +635,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
609 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 635 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
610 | load_gs_index(next->gsindex); | 636 | load_gs_index(next->gsindex); |
611 | if (gsindex) | 637 | if (gsindex) |
612 | prev->gs = 0; | 638 | prev->gs = 0; |
613 | } | 639 | } |
614 | if (next->gs) | 640 | if (next->gs) |
615 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 641 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
@@ -618,12 +644,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
618 | /* Must be after DS reload */ | 644 | /* Must be after DS reload */ |
619 | unlazy_fpu(prev_p); | 645 | unlazy_fpu(prev_p); |
620 | 646 | ||
621 | /* | 647 | /* |
622 | * Switch the PDA and FPU contexts. | 648 | * Switch the PDA and FPU contexts. |
623 | */ | 649 | */ |
624 | prev->usersp = read_pda(oldrsp); | 650 | prev->usersp = read_pda(oldrsp); |
625 | write_pda(oldrsp, next->usersp); | 651 | write_pda(oldrsp, next->usersp); |
626 | write_pda(pcurrent, next_p); | 652 | write_pda(pcurrent, next_p); |
627 | 653 | ||
628 | write_pda(kernelstack, | 654 | write_pda(kernelstack, |
629 | (unsigned long)task_stack_page(next_p) + | 655 | (unsigned long)task_stack_page(next_p) + |
@@ -664,7 +690,7 @@ long sys_execve(char __user *name, char __user * __user *argv, | |||
664 | char __user * __user *envp, struct pt_regs *regs) | 690 | char __user * __user *envp, struct pt_regs *regs) |
665 | { | 691 | { |
666 | long error; | 692 | long error; |
667 | char * filename; | 693 | char *filename; |
668 | 694 | ||
669 | filename = getname(name); | 695 | filename = getname(name); |
670 | error = PTR_ERR(filename); | 696 | error = PTR_ERR(filename); |
@@ -722,55 +748,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs) | |||
722 | unsigned long get_wchan(struct task_struct *p) | 748 | unsigned long get_wchan(struct task_struct *p) |
723 | { | 749 | { |
724 | unsigned long stack; | 750 | unsigned long stack; |
725 | u64 fp,ip; | 751 | u64 fp, ip; |
726 | int count = 0; | 752 | int count = 0; |
727 | 753 | ||
728 | if (!p || p == current || p->state==TASK_RUNNING) | 754 | if (!p || p == current || p->state == TASK_RUNNING) |
729 | return 0; | 755 | return 0; |
730 | stack = (unsigned long)task_stack_page(p); | 756 | stack = (unsigned long)task_stack_page(p); |
731 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) | 757 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) |
732 | return 0; | 758 | return 0; |
733 | fp = *(u64 *)(p->thread.sp); | 759 | fp = *(u64 *)(p->thread.sp); |
734 | do { | 760 | do { |
735 | if (fp < (unsigned long)stack || | 761 | if (fp < (unsigned long)stack || |
736 | fp > (unsigned long)stack+THREAD_SIZE) | 762 | fp > (unsigned long)stack+THREAD_SIZE) |
737 | return 0; | 763 | return 0; |
738 | ip = *(u64 *)(fp+8); | 764 | ip = *(u64 *)(fp+8); |
739 | if (!in_sched_functions(ip)) | 765 | if (!in_sched_functions(ip)) |
740 | return ip; | 766 | return ip; |
741 | fp = *(u64 *)fp; | 767 | fp = *(u64 *)fp; |
742 | } while (count++ < 16); | 768 | } while (count++ < 16); |
743 | return 0; | 769 | return 0; |
744 | } | 770 | } |
745 | 771 | ||
746 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 772 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
747 | { | 773 | { |
748 | int ret = 0; | 774 | int ret = 0; |
749 | int doit = task == current; | 775 | int doit = task == current; |
750 | int cpu; | 776 | int cpu; |
751 | 777 | ||
752 | switch (code) { | 778 | switch (code) { |
753 | case ARCH_SET_GS: | 779 | case ARCH_SET_GS: |
754 | if (addr >= TASK_SIZE_OF(task)) | 780 | if (addr >= TASK_SIZE_OF(task)) |
755 | return -EPERM; | 781 | return -EPERM; |
756 | cpu = get_cpu(); | 782 | cpu = get_cpu(); |
757 | /* handle small bases via the GDT because that's faster to | 783 | /* handle small bases via the GDT because that's faster to |
758 | switch. */ | 784 | switch. */ |
759 | if (addr <= 0xffffffff) { | 785 | if (addr <= 0xffffffff) { |
760 | set_32bit_tls(task, GS_TLS, addr); | 786 | set_32bit_tls(task, GS_TLS, addr); |
761 | if (doit) { | 787 | if (doit) { |
762 | load_TLS(&task->thread, cpu); | 788 | load_TLS(&task->thread, cpu); |
763 | load_gs_index(GS_TLS_SEL); | 789 | load_gs_index(GS_TLS_SEL); |
764 | } | 790 | } |
765 | task->thread.gsindex = GS_TLS_SEL; | 791 | task->thread.gsindex = GS_TLS_SEL; |
766 | task->thread.gs = 0; | 792 | task->thread.gs = 0; |
767 | } else { | 793 | } else { |
768 | task->thread.gsindex = 0; | 794 | task->thread.gsindex = 0; |
769 | task->thread.gs = addr; | 795 | task->thread.gs = addr; |
770 | if (doit) { | 796 | if (doit) { |
771 | load_gs_index(0); | 797 | load_gs_index(0); |
772 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 798 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); |
773 | } | 799 | } |
774 | } | 800 | } |
775 | put_cpu(); | 801 | put_cpu(); |
776 | break; | 802 | break; |
@@ -824,8 +850,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
824 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 850 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
825 | else | 851 | else |
826 | base = task->thread.gs; | 852 | base = task->thread.gs; |
827 | } | 853 | } else |
828 | else | ||
829 | base = task->thread.gs; | 854 | base = task->thread.gs; |
830 | ret = put_user(base, (unsigned long __user *)addr); | 855 | ret = put_user(base, (unsigned long __user *)addr); |
831 | break; | 856 | break; |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index e37dccce85db..e375b658efc3 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
16 | #include <linux/regset.h> | 16 | #include <linux/regset.h> |
17 | #include <linux/tracehook.h> | ||
17 | #include <linux/user.h> | 18 | #include <linux/user.h> |
18 | #include <linux/elf.h> | 19 | #include <linux/elf.h> |
19 | #include <linux/security.h> | 20 | #include <linux/security.h> |
@@ -69,7 +70,7 @@ static inline bool invalid_selector(u16 value) | |||
69 | 70 | ||
70 | #define FLAG_MASK FLAG_MASK_32 | 71 | #define FLAG_MASK FLAG_MASK_32 |
71 | 72 | ||
72 | static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) | 73 | static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) |
73 | { | 74 | { |
74 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); | 75 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); |
75 | regno >>= 2; | 76 | regno >>= 2; |
@@ -554,45 +555,115 @@ static int ptrace_set_debugreg(struct task_struct *child, | |||
554 | return 0; | 555 | return 0; |
555 | } | 556 | } |
556 | 557 | ||
557 | #ifdef X86_BTS | 558 | #ifdef CONFIG_X86_PTRACE_BTS |
559 | /* | ||
560 | * The configuration for a particular BTS hardware implementation. | ||
561 | */ | ||
562 | struct bts_configuration { | ||
563 | /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */ | ||
564 | unsigned char sizeof_bts; | ||
565 | /* the size of a field in the BTS record in bytes */ | ||
566 | unsigned char sizeof_field; | ||
567 | /* a bitmask to enable/disable BTS in DEBUGCTL MSR */ | ||
568 | unsigned long debugctl_mask; | ||
569 | }; | ||
570 | static struct bts_configuration bts_cfg; | ||
571 | |||
572 | #define BTS_MAX_RECORD_SIZE (8 * 3) | ||
573 | |||
574 | |||
575 | /* | ||
576 | * Branch Trace Store (BTS) uses the following format. Different | ||
577 | * architectures vary in the size of those fields. | ||
578 | * - source linear address | ||
579 | * - destination linear address | ||
580 | * - flags | ||
581 | * | ||
582 | * Later architectures use 64bit pointers throughout, whereas earlier | ||
583 | * architectures use 32bit pointers in 32bit mode. | ||
584 | * | ||
585 | * We compute the base address for the first 8 fields based on: | ||
586 | * - the field size stored in the DS configuration | ||
587 | * - the relative field position | ||
588 | * | ||
589 | * In order to store additional information in the BTS buffer, we use | ||
590 | * a special source address to indicate that the record requires | ||
591 | * special interpretation. | ||
592 | * | ||
593 | * Netburst indicated via a bit in the flags field whether the branch | ||
594 | * was predicted; this is ignored. | ||
595 | */ | ||
596 | |||
597 | enum bts_field { | ||
598 | bts_from = 0, | ||
599 | bts_to, | ||
600 | bts_flags, | ||
601 | |||
602 | bts_escape = (unsigned long)-1, | ||
603 | bts_qual = bts_to, | ||
604 | bts_jiffies = bts_flags | ||
605 | }; | ||
606 | |||
607 | static inline unsigned long bts_get(const char *base, enum bts_field field) | ||
608 | { | ||
609 | base += (bts_cfg.sizeof_field * field); | ||
610 | return *(unsigned long *)base; | ||
611 | } | ||
558 | 612 | ||
559 | static int ptrace_bts_get_size(struct task_struct *child) | 613 | static inline void bts_set(char *base, enum bts_field field, unsigned long val) |
560 | { | 614 | { |
561 | if (!child->thread.ds_area_msr) | 615 | base += (bts_cfg.sizeof_field * field);; |
562 | return -ENXIO; | 616 | (*(unsigned long *)base) = val; |
617 | } | ||
563 | 618 | ||
564 | return ds_get_bts_index((void *)child->thread.ds_area_msr); | 619 | /* |
620 | * Translate a BTS record from the raw format into the bts_struct format | ||
621 | * | ||
622 | * out (out): bts_struct interpretation | ||
623 | * raw: raw BTS record | ||
624 | */ | ||
625 | static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw) | ||
626 | { | ||
627 | memset(out, 0, sizeof(*out)); | ||
628 | if (bts_get(raw, bts_from) == bts_escape) { | ||
629 | out->qualifier = bts_get(raw, bts_qual); | ||
630 | out->variant.jiffies = bts_get(raw, bts_jiffies); | ||
631 | } else { | ||
632 | out->qualifier = BTS_BRANCH; | ||
633 | out->variant.lbr.from_ip = bts_get(raw, bts_from); | ||
634 | out->variant.lbr.to_ip = bts_get(raw, bts_to); | ||
635 | } | ||
565 | } | 636 | } |
566 | 637 | ||
567 | static int ptrace_bts_read_record(struct task_struct *child, | 638 | static int ptrace_bts_read_record(struct task_struct *child, size_t index, |
568 | long index, | ||
569 | struct bts_struct __user *out) | 639 | struct bts_struct __user *out) |
570 | { | 640 | { |
571 | struct bts_struct ret; | 641 | struct bts_struct ret; |
572 | int retval; | 642 | const void *bts_record; |
573 | int bts_end; | 643 | size_t bts_index, bts_end; |
574 | int bts_index; | 644 | int error; |
575 | |||
576 | if (!child->thread.ds_area_msr) | ||
577 | return -ENXIO; | ||
578 | 645 | ||
579 | if (index < 0) | 646 | error = ds_get_bts_end(child, &bts_end); |
580 | return -EINVAL; | 647 | if (error < 0) |
648 | return error; | ||
581 | 649 | ||
582 | bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr); | ||
583 | if (bts_end <= index) | 650 | if (bts_end <= index) |
584 | return -EINVAL; | 651 | return -EINVAL; |
585 | 652 | ||
653 | error = ds_get_bts_index(child, &bts_index); | ||
654 | if (error < 0) | ||
655 | return error; | ||
656 | |||
586 | /* translate the ptrace bts index into the ds bts index */ | 657 | /* translate the ptrace bts index into the ds bts index */ |
587 | bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr); | 658 | bts_index += bts_end - (index + 1); |
588 | bts_index -= (index + 1); | 659 | if (bts_end <= bts_index) |
589 | if (bts_index < 0) | 660 | bts_index -= bts_end; |
590 | bts_index += bts_end; | ||
591 | 661 | ||
592 | retval = ds_read_bts((void *)child->thread.ds_area_msr, | 662 | error = ds_access_bts(child, bts_index, &bts_record); |
593 | bts_index, &ret); | 663 | if (error < 0) |
594 | if (retval < 0) | 664 | return error; |
595 | return retval; | 665 | |
666 | ptrace_bts_translate_record(&ret, bts_record); | ||
596 | 667 | ||
597 | if (copy_to_user(out, &ret, sizeof(ret))) | 668 | if (copy_to_user(out, &ret, sizeof(ret))) |
598 | return -EFAULT; | 669 | return -EFAULT; |
@@ -600,101 +671,106 @@ static int ptrace_bts_read_record(struct task_struct *child, | |||
600 | return sizeof(ret); | 671 | return sizeof(ret); |
601 | } | 672 | } |
602 | 673 | ||
603 | static int ptrace_bts_clear(struct task_struct *child) | ||
604 | { | ||
605 | if (!child->thread.ds_area_msr) | ||
606 | return -ENXIO; | ||
607 | |||
608 | return ds_clear((void *)child->thread.ds_area_msr); | ||
609 | } | ||
610 | |||
611 | static int ptrace_bts_drain(struct task_struct *child, | 674 | static int ptrace_bts_drain(struct task_struct *child, |
612 | long size, | 675 | long size, |
613 | struct bts_struct __user *out) | 676 | struct bts_struct __user *out) |
614 | { | 677 | { |
615 | int end, i; | 678 | struct bts_struct ret; |
616 | void *ds = (void *)child->thread.ds_area_msr; | 679 | const unsigned char *raw; |
617 | 680 | size_t end, i; | |
618 | if (!ds) | 681 | int error; |
619 | return -ENXIO; | ||
620 | 682 | ||
621 | end = ds_get_bts_index(ds); | 683 | error = ds_get_bts_index(child, &end); |
622 | if (end <= 0) | 684 | if (error < 0) |
623 | return end; | 685 | return error; |
624 | 686 | ||
625 | if (size < (end * sizeof(struct bts_struct))) | 687 | if (size < (end * sizeof(struct bts_struct))) |
626 | return -EIO; | 688 | return -EIO; |
627 | 689 | ||
628 | for (i = 0; i < end; i++, out++) { | 690 | error = ds_access_bts(child, 0, (const void **)&raw); |
629 | struct bts_struct ret; | 691 | if (error < 0) |
630 | int retval; | 692 | return error; |
631 | 693 | ||
632 | retval = ds_read_bts(ds, i, &ret); | 694 | for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) { |
633 | if (retval < 0) | 695 | ptrace_bts_translate_record(&ret, raw); |
634 | return retval; | ||
635 | 696 | ||
636 | if (copy_to_user(out, &ret, sizeof(ret))) | 697 | if (copy_to_user(out, &ret, sizeof(ret))) |
637 | return -EFAULT; | 698 | return -EFAULT; |
638 | } | 699 | } |
639 | 700 | ||
640 | ds_clear(ds); | 701 | error = ds_clear_bts(child); |
702 | if (error < 0) | ||
703 | return error; | ||
641 | 704 | ||
642 | return end; | 705 | return end; |
643 | } | 706 | } |
644 | 707 | ||
708 | static void ptrace_bts_ovfl(struct task_struct *child) | ||
709 | { | ||
710 | send_sig(child->thread.bts_ovfl_signal, child, 0); | ||
711 | } | ||
712 | |||
645 | static int ptrace_bts_config(struct task_struct *child, | 713 | static int ptrace_bts_config(struct task_struct *child, |
646 | long cfg_size, | 714 | long cfg_size, |
647 | const struct ptrace_bts_config __user *ucfg) | 715 | const struct ptrace_bts_config __user *ucfg) |
648 | { | 716 | { |
649 | struct ptrace_bts_config cfg; | 717 | struct ptrace_bts_config cfg; |
650 | int bts_size, ret = 0; | 718 | int error = 0; |
651 | void *ds; | 719 | |
720 | error = -EOPNOTSUPP; | ||
721 | if (!bts_cfg.sizeof_bts) | ||
722 | goto errout; | ||
652 | 723 | ||
724 | error = -EIO; | ||
653 | if (cfg_size < sizeof(cfg)) | 725 | if (cfg_size < sizeof(cfg)) |
654 | return -EIO; | 726 | goto errout; |
655 | 727 | ||
728 | error = -EFAULT; | ||
656 | if (copy_from_user(&cfg, ucfg, sizeof(cfg))) | 729 | if (copy_from_user(&cfg, ucfg, sizeof(cfg))) |
657 | return -EFAULT; | 730 | goto errout; |
658 | 731 | ||
659 | if ((int)cfg.size < 0) | 732 | error = -EINVAL; |
660 | return -EINVAL; | 733 | if ((cfg.flags & PTRACE_BTS_O_SIGNAL) && |
734 | !(cfg.flags & PTRACE_BTS_O_ALLOC)) | ||
735 | goto errout; | ||
661 | 736 | ||
662 | bts_size = 0; | 737 | if (cfg.flags & PTRACE_BTS_O_ALLOC) { |
663 | ds = (void *)child->thread.ds_area_msr; | 738 | ds_ovfl_callback_t ovfl = NULL; |
664 | if (ds) { | 739 | unsigned int sig = 0; |
665 | bts_size = ds_get_bts_size(ds); | 740 | |
666 | if (bts_size < 0) | 741 | /* we ignore the error in case we were not tracing child */ |
667 | return bts_size; | 742 | (void)ds_release_bts(child); |
668 | } | ||
669 | cfg.size = PAGE_ALIGN(cfg.size); | ||
670 | 743 | ||
671 | if (bts_size != cfg.size) { | 744 | if (cfg.flags & PTRACE_BTS_O_SIGNAL) { |
672 | ret = ptrace_bts_realloc(child, cfg.size, | 745 | if (!cfg.signal) |
673 | cfg.flags & PTRACE_BTS_O_CUT_SIZE); | 746 | goto errout; |
674 | if (ret < 0) | 747 | |
748 | sig = cfg.signal; | ||
749 | ovfl = ptrace_bts_ovfl; | ||
750 | } | ||
751 | |||
752 | error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl); | ||
753 | if (error < 0) | ||
675 | goto errout; | 754 | goto errout; |
676 | 755 | ||
677 | ds = (void *)child->thread.ds_area_msr; | 756 | child->thread.bts_ovfl_signal = sig; |
678 | } | 757 | } |
679 | 758 | ||
680 | if (cfg.flags & PTRACE_BTS_O_SIGNAL) | 759 | error = -EINVAL; |
681 | ret = ds_set_overflow(ds, DS_O_SIGNAL); | 760 | if (!child->thread.ds_ctx && cfg.flags) |
682 | else | ||
683 | ret = ds_set_overflow(ds, DS_O_WRAP); | ||
684 | if (ret < 0) | ||
685 | goto errout; | 761 | goto errout; |
686 | 762 | ||
687 | if (cfg.flags & PTRACE_BTS_O_TRACE) | 763 | if (cfg.flags & PTRACE_BTS_O_TRACE) |
688 | child->thread.debugctlmsr |= ds_debugctl_mask(); | 764 | child->thread.debugctlmsr |= bts_cfg.debugctl_mask; |
689 | else | 765 | else |
690 | child->thread.debugctlmsr &= ~ds_debugctl_mask(); | 766 | child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; |
691 | 767 | ||
692 | if (cfg.flags & PTRACE_BTS_O_SCHED) | 768 | if (cfg.flags & PTRACE_BTS_O_SCHED) |
693 | set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); | 769 | set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); |
694 | else | 770 | else |
695 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); | 771 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); |
696 | 772 | ||
697 | ret = sizeof(cfg); | 773 | error = sizeof(cfg); |
698 | 774 | ||
699 | out: | 775 | out: |
700 | if (child->thread.debugctlmsr) | 776 | if (child->thread.debugctlmsr) |
@@ -702,10 +778,10 @@ out: | |||
702 | else | 778 | else |
703 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); | 779 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); |
704 | 780 | ||
705 | return ret; | 781 | return error; |
706 | 782 | ||
707 | errout: | 783 | errout: |
708 | child->thread.debugctlmsr &= ~ds_debugctl_mask(); | 784 | child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; |
709 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); | 785 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); |
710 | goto out; | 786 | goto out; |
711 | } | 787 | } |
@@ -714,29 +790,40 @@ static int ptrace_bts_status(struct task_struct *child, | |||
714 | long cfg_size, | 790 | long cfg_size, |
715 | struct ptrace_bts_config __user *ucfg) | 791 | struct ptrace_bts_config __user *ucfg) |
716 | { | 792 | { |
717 | void *ds = (void *)child->thread.ds_area_msr; | ||
718 | struct ptrace_bts_config cfg; | 793 | struct ptrace_bts_config cfg; |
794 | size_t end; | ||
795 | const void *base, *max; | ||
796 | int error; | ||
719 | 797 | ||
720 | if (cfg_size < sizeof(cfg)) | 798 | if (cfg_size < sizeof(cfg)) |
721 | return -EIO; | 799 | return -EIO; |
722 | 800 | ||
723 | memset(&cfg, 0, sizeof(cfg)); | 801 | error = ds_get_bts_end(child, &end); |
802 | if (error < 0) | ||
803 | return error; | ||
724 | 804 | ||
725 | if (ds) { | 805 | error = ds_access_bts(child, /* index = */ 0, &base); |
726 | cfg.size = ds_get_bts_size(ds); | 806 | if (error < 0) |
807 | return error; | ||
727 | 808 | ||
728 | if (ds_get_overflow(ds) == DS_O_SIGNAL) | 809 | error = ds_access_bts(child, /* index = */ end, &max); |
729 | cfg.flags |= PTRACE_BTS_O_SIGNAL; | 810 | if (error < 0) |
811 | return error; | ||
730 | 812 | ||
731 | if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && | 813 | memset(&cfg, 0, sizeof(cfg)); |
732 | child->thread.debugctlmsr & ds_debugctl_mask()) | 814 | cfg.size = (max - base); |
733 | cfg.flags |= PTRACE_BTS_O_TRACE; | 815 | cfg.signal = child->thread.bts_ovfl_signal; |
816 | cfg.bts_size = sizeof(struct bts_struct); | ||
734 | 817 | ||
735 | if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) | 818 | if (cfg.signal) |
736 | cfg.flags |= PTRACE_BTS_O_SCHED; | 819 | cfg.flags |= PTRACE_BTS_O_SIGNAL; |
737 | } | ||
738 | 820 | ||
739 | cfg.bts_size = sizeof(struct bts_struct); | 821 | if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && |
822 | child->thread.debugctlmsr & bts_cfg.debugctl_mask) | ||
823 | cfg.flags |= PTRACE_BTS_O_TRACE; | ||
824 | |||
825 | if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) | ||
826 | cfg.flags |= PTRACE_BTS_O_SCHED; | ||
740 | 827 | ||
741 | if (copy_to_user(ucfg, &cfg, sizeof(cfg))) | 828 | if (copy_to_user(ucfg, &cfg, sizeof(cfg))) |
742 | return -EFAULT; | 829 | return -EFAULT; |
@@ -744,89 +831,38 @@ static int ptrace_bts_status(struct task_struct *child, | |||
744 | return sizeof(cfg); | 831 | return sizeof(cfg); |
745 | } | 832 | } |
746 | 833 | ||
747 | |||
748 | static int ptrace_bts_write_record(struct task_struct *child, | 834 | static int ptrace_bts_write_record(struct task_struct *child, |
749 | const struct bts_struct *in) | 835 | const struct bts_struct *in) |
750 | { | 836 | { |
751 | int retval; | 837 | unsigned char bts_record[BTS_MAX_RECORD_SIZE]; |
752 | 838 | ||
753 | if (!child->thread.ds_area_msr) | 839 | BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts); |
754 | return -ENXIO; | ||
755 | 840 | ||
756 | retval = ds_write_bts((void *)child->thread.ds_area_msr, in); | 841 | memset(bts_record, 0, bts_cfg.sizeof_bts); |
757 | if (retval) | 842 | switch (in->qualifier) { |
758 | return retval; | 843 | case BTS_INVALID: |
844 | break; | ||
759 | 845 | ||
760 | return sizeof(*in); | 846 | case BTS_BRANCH: |
761 | } | 847 | bts_set(bts_record, bts_from, in->variant.lbr.from_ip); |
848 | bts_set(bts_record, bts_to, in->variant.lbr.to_ip); | ||
849 | break; | ||
762 | 850 | ||
763 | static int ptrace_bts_realloc(struct task_struct *child, | 851 | case BTS_TASK_ARRIVES: |
764 | int size, int reduce_size) | 852 | case BTS_TASK_DEPARTS: |
765 | { | 853 | bts_set(bts_record, bts_from, bts_escape); |
766 | unsigned long rlim, vm; | 854 | bts_set(bts_record, bts_qual, in->qualifier); |
767 | int ret, old_size; | 855 | bts_set(bts_record, bts_jiffies, in->variant.jiffies); |
856 | break; | ||
768 | 857 | ||
769 | if (size < 0) | 858 | default: |
770 | return -EINVAL; | 859 | return -EINVAL; |
771 | |||
772 | old_size = ds_get_bts_size((void *)child->thread.ds_area_msr); | ||
773 | if (old_size < 0) | ||
774 | return old_size; | ||
775 | |||
776 | ret = ds_free((void **)&child->thread.ds_area_msr); | ||
777 | if (ret < 0) | ||
778 | goto out; | ||
779 | |||
780 | size >>= PAGE_SHIFT; | ||
781 | old_size >>= PAGE_SHIFT; | ||
782 | |||
783 | current->mm->total_vm -= old_size; | ||
784 | current->mm->locked_vm -= old_size; | ||
785 | |||
786 | if (size == 0) | ||
787 | goto out; | ||
788 | |||
789 | rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; | ||
790 | vm = current->mm->total_vm + size; | ||
791 | if (rlim < vm) { | ||
792 | ret = -ENOMEM; | ||
793 | |||
794 | if (!reduce_size) | ||
795 | goto out; | ||
796 | |||
797 | size = rlim - current->mm->total_vm; | ||
798 | if (size <= 0) | ||
799 | goto out; | ||
800 | } | ||
801 | |||
802 | rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; | ||
803 | vm = current->mm->locked_vm + size; | ||
804 | if (rlim < vm) { | ||
805 | ret = -ENOMEM; | ||
806 | |||
807 | if (!reduce_size) | ||
808 | goto out; | ||
809 | |||
810 | size = rlim - current->mm->locked_vm; | ||
811 | if (size <= 0) | ||
812 | goto out; | ||
813 | } | 860 | } |
814 | 861 | ||
815 | ret = ds_allocate((void **)&child->thread.ds_area_msr, | 862 | /* The writing task will be the switched-to task on a context |
816 | size << PAGE_SHIFT); | 863 | * switch. It needs to write into the switched-from task's BTS |
817 | if (ret < 0) | 864 | * buffer. */ |
818 | goto out; | 865 | return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts); |
819 | |||
820 | current->mm->total_vm += size; | ||
821 | current->mm->locked_vm += size; | ||
822 | |||
823 | out: | ||
824 | if (child->thread.ds_area_msr) | ||
825 | set_tsk_thread_flag(child, TIF_DS_AREA_MSR); | ||
826 | else | ||
827 | clear_tsk_thread_flag(child, TIF_DS_AREA_MSR); | ||
828 | |||
829 | return ret; | ||
830 | } | 866 | } |
831 | 867 | ||
832 | void ptrace_bts_take_timestamp(struct task_struct *tsk, | 868 | void ptrace_bts_take_timestamp(struct task_struct *tsk, |
@@ -839,7 +875,66 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk, | |||
839 | 875 | ||
840 | ptrace_bts_write_record(tsk, &rec); | 876 | ptrace_bts_write_record(tsk, &rec); |
841 | } | 877 | } |
842 | #endif /* X86_BTS */ | 878 | |
879 | static const struct bts_configuration bts_cfg_netburst = { | ||
880 | .sizeof_bts = sizeof(long) * 3, | ||
881 | .sizeof_field = sizeof(long), | ||
882 | .debugctl_mask = (1<<2)|(1<<3)|(1<<5) | ||
883 | }; | ||
884 | |||
885 | static const struct bts_configuration bts_cfg_pentium_m = { | ||
886 | .sizeof_bts = sizeof(long) * 3, | ||
887 | .sizeof_field = sizeof(long), | ||
888 | .debugctl_mask = (1<<6)|(1<<7) | ||
889 | }; | ||
890 | |||
891 | static const struct bts_configuration bts_cfg_core2 = { | ||
892 | .sizeof_bts = 8 * 3, | ||
893 | .sizeof_field = 8, | ||
894 | .debugctl_mask = (1<<6)|(1<<7)|(1<<9) | ||
895 | }; | ||
896 | |||
897 | static inline void bts_configure(const struct bts_configuration *cfg) | ||
898 | { | ||
899 | bts_cfg = *cfg; | ||
900 | } | ||
901 | |||
902 | void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c) | ||
903 | { | ||
904 | switch (c->x86) { | ||
905 | case 0x6: | ||
906 | switch (c->x86_model) { | ||
907 | case 0xD: | ||
908 | case 0xE: /* Pentium M */ | ||
909 | bts_configure(&bts_cfg_pentium_m); | ||
910 | break; | ||
911 | case 0xF: /* Core2 */ | ||
912 | case 0x1C: /* Atom */ | ||
913 | bts_configure(&bts_cfg_core2); | ||
914 | break; | ||
915 | default: | ||
916 | /* sorry, don't know about them */ | ||
917 | break; | ||
918 | } | ||
919 | break; | ||
920 | case 0xF: | ||
921 | switch (c->x86_model) { | ||
922 | case 0x0: | ||
923 | case 0x1: | ||
924 | case 0x2: /* Netburst */ | ||
925 | bts_configure(&bts_cfg_netburst); | ||
926 | break; | ||
927 | default: | ||
928 | /* sorry, don't know about them */ | ||
929 | break; | ||
930 | } | ||
931 | break; | ||
932 | default: | ||
933 | /* sorry, don't know about them */ | ||
934 | break; | ||
935 | } | ||
936 | } | ||
937 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
843 | 938 | ||
844 | /* | 939 | /* |
845 | * Called by kernel/ptrace.c when detaching.. | 940 | * Called by kernel/ptrace.c when detaching.. |
@@ -852,15 +947,15 @@ void ptrace_disable(struct task_struct *child) | |||
852 | #ifdef TIF_SYSCALL_EMU | 947 | #ifdef TIF_SYSCALL_EMU |
853 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | 948 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
854 | #endif | 949 | #endif |
855 | if (child->thread.ds_area_msr) { | 950 | #ifdef CONFIG_X86_PTRACE_BTS |
856 | #ifdef X86_BTS | 951 | (void)ds_release_bts(child); |
857 | ptrace_bts_realloc(child, 0, 0); | 952 | |
858 | #endif | 953 | child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask; |
859 | child->thread.debugctlmsr &= ~ds_debugctl_mask(); | 954 | if (!child->thread.debugctlmsr) |
860 | if (!child->thread.debugctlmsr) | 955 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); |
861 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); | 956 | |
862 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); | 957 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); |
863 | } | 958 | #endif /* CONFIG_X86_PTRACE_BTS */ |
864 | } | 959 | } |
865 | 960 | ||
866 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | 961 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
@@ -980,7 +1075,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
980 | /* | 1075 | /* |
981 | * These bits need more cooking - not enabled yet: | 1076 | * These bits need more cooking - not enabled yet: |
982 | */ | 1077 | */ |
983 | #ifdef X86_BTS | 1078 | #ifdef CONFIG_X86_PTRACE_BTS |
984 | case PTRACE_BTS_CONFIG: | 1079 | case PTRACE_BTS_CONFIG: |
985 | ret = ptrace_bts_config | 1080 | ret = ptrace_bts_config |
986 | (child, data, (struct ptrace_bts_config __user *)addr); | 1081 | (child, data, (struct ptrace_bts_config __user *)addr); |
@@ -992,7 +1087,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
992 | break; | 1087 | break; |
993 | 1088 | ||
994 | case PTRACE_BTS_SIZE: | 1089 | case PTRACE_BTS_SIZE: |
995 | ret = ptrace_bts_get_size(child); | 1090 | ret = ds_get_bts_index(child, /* pos = */ NULL); |
996 | break; | 1091 | break; |
997 | 1092 | ||
998 | case PTRACE_BTS_GET: | 1093 | case PTRACE_BTS_GET: |
@@ -1001,14 +1096,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1001 | break; | 1096 | break; |
1002 | 1097 | ||
1003 | case PTRACE_BTS_CLEAR: | 1098 | case PTRACE_BTS_CLEAR: |
1004 | ret = ptrace_bts_clear(child); | 1099 | ret = ds_clear_bts(child); |
1005 | break; | 1100 | break; |
1006 | 1101 | ||
1007 | case PTRACE_BTS_DRAIN: | 1102 | case PTRACE_BTS_DRAIN: |
1008 | ret = ptrace_bts_drain | 1103 | ret = ptrace_bts_drain |
1009 | (child, data, (struct bts_struct __user *) addr); | 1104 | (child, data, (struct bts_struct __user *) addr); |
1010 | break; | 1105 | break; |
1011 | #endif | 1106 | #endif /* CONFIG_X86_PTRACE_BTS */ |
1012 | 1107 | ||
1013 | default: | 1108 | default: |
1014 | ret = ptrace_request(child, request, addr, data); | 1109 | ret = ptrace_request(child, request, addr, data); |
@@ -1375,30 +1470,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | |||
1375 | force_sig_info(SIGTRAP, &info, tsk); | 1470 | force_sig_info(SIGTRAP, &info, tsk); |
1376 | } | 1471 | } |
1377 | 1472 | ||
1378 | static void syscall_trace(struct pt_regs *regs) | ||
1379 | { | ||
1380 | if (!(current->ptrace & PT_PTRACED)) | ||
1381 | return; | ||
1382 | |||
1383 | #if 0 | ||
1384 | printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n", | ||
1385 | current->comm, | ||
1386 | regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0), | ||
1387 | current_thread_info()->flags, current->ptrace); | ||
1388 | #endif | ||
1389 | |||
1390 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
1391 | ? 0x80 : 0)); | ||
1392 | /* | ||
1393 | * this isn't the same as continuing with a signal, but it will do | ||
1394 | * for normal use. strace only continues with a signal if the | ||
1395 | * stopping signal is not SIGTRAP. -brl | ||
1396 | */ | ||
1397 | if (current->exit_code) { | ||
1398 | send_sig(current->exit_code, current, 1); | ||
1399 | current->exit_code = 0; | ||
1400 | } | ||
1401 | } | ||
1402 | 1473 | ||
1403 | #ifdef CONFIG_X86_32 | 1474 | #ifdef CONFIG_X86_32 |
1404 | # define IS_IA32 1 | 1475 | # define IS_IA32 1 |
@@ -1432,8 +1503,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
1432 | if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) | 1503 | if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) |
1433 | ret = -1L; | 1504 | ret = -1L; |
1434 | 1505 | ||
1435 | if (ret || test_thread_flag(TIF_SYSCALL_TRACE)) | 1506 | if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && |
1436 | syscall_trace(regs); | 1507 | tracehook_report_syscall_entry(regs)) |
1508 | ret = -1L; | ||
1437 | 1509 | ||
1438 | if (unlikely(current->audit_context)) { | 1510 | if (unlikely(current->audit_context)) { |
1439 | if (IS_IA32) | 1511 | if (IS_IA32) |
@@ -1459,7 +1531,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1459 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1531 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
1460 | 1532 | ||
1461 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1533 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1462 | syscall_trace(regs); | 1534 | tracehook_report_syscall_exit(regs, 0); |
1463 | 1535 | ||
1464 | /* | 1536 | /* |
1465 | * If TIF_SYSCALL_EMU is set, we only get here because of | 1537 | * If TIF_SYSCALL_EMU is set, we only get here because of |
@@ -1475,6 +1547,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1475 | * system call instruction. | 1547 | * system call instruction. |
1476 | */ | 1548 | */ |
1477 | if (test_thread_flag(TIF_SINGLESTEP) && | 1549 | if (test_thread_flag(TIF_SINGLESTEP) && |
1478 | (current->ptrace & PT_PTRACED)) | 1550 | tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL)) |
1479 | send_sigtrap(current, regs, 0); | 1551 | send_sigtrap(current, regs, 0); |
1480 | } | 1552 | } |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 724adfc63cb9..f4c93f1cfc19 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(pm_power_off); | |||
29 | 29 | ||
30 | static const struct desc_ptr no_idt = {}; | 30 | static const struct desc_ptr no_idt = {}; |
31 | static int reboot_mode; | 31 | static int reboot_mode; |
32 | enum reboot_type reboot_type = BOOT_KBD; | 32 | /* |
33 | * Keyboard reset and triple fault may result in INIT, not RESET, which | ||
34 | * doesn't work when we're in vmx root mode. Try ACPI first. | ||
35 | */ | ||
36 | enum reboot_type reboot_type = BOOT_ACPI; | ||
33 | int reboot_force; | 37 | int reboot_force; |
34 | 38 | ||
35 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) | 39 | #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) |
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 703310a99023..6f50664b2ba5 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S | |||
@@ -20,10 +20,11 @@ | |||
20 | #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 20 | #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
21 | #define PAE_PGD_ATTR (_PAGE_PRESENT) | 21 | #define PAE_PGD_ATTR (_PAGE_PRESENT) |
22 | 22 | ||
23 | /* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are | 23 | /* control_page + KEXEC_CONTROL_CODE_MAX_SIZE |
24 | * used to save some data for jumping back | 24 | * ~ control_page + PAGE_SIZE are used as data storage and stack for |
25 | * jumping back | ||
25 | */ | 26 | */ |
26 | #define DATA(offset) (PAGE_SIZE/2+(offset)) | 27 | #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) |
27 | 28 | ||
28 | /* Minimal CPU state */ | 29 | /* Minimal CPU state */ |
29 | #define ESP DATA(0x0) | 30 | #define ESP DATA(0x0) |
@@ -376,3 +377,6 @@ swap_pages: | |||
376 | popl %ebx | 377 | popl %ebx |
377 | popl %ebp | 378 | popl %ebp |
378 | ret | 379 | ret |
380 | |||
381 | .globl kexec_control_code_size | ||
382 | .set kexec_control_code_size, . - relocate_kernel | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 68b48e3fbcbd..141efab52400 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -223,6 +223,9 @@ unsigned long saved_video_mode; | |||
223 | #define RAMDISK_LOAD_FLAG 0x4000 | 223 | #define RAMDISK_LOAD_FLAG 0x4000 |
224 | 224 | ||
225 | static char __initdata command_line[COMMAND_LINE_SIZE]; | 225 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
226 | #ifdef CONFIG_CMDLINE_BOOL | ||
227 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
228 | #endif | ||
226 | 229 | ||
227 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | 230 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) |
228 | struct edd edd; | 231 | struct edd edd; |
@@ -445,7 +448,7 @@ static void __init reserve_early_setup_data(void) | |||
445 | * @size: Size of the crashkernel memory to reserve. | 448 | * @size: Size of the crashkernel memory to reserve. |
446 | * Returns the base address on success, and -1ULL on failure. | 449 | * Returns the base address on success, and -1ULL on failure. |
447 | */ | 450 | */ |
448 | unsigned long long find_and_reserve_crashkernel(unsigned long long size) | 451 | unsigned long long __init find_and_reserve_crashkernel(unsigned long long size) |
449 | { | 452 | { |
450 | const unsigned long long alignment = 16<<20; /* 16M */ | 453 | const unsigned long long alignment = 16<<20; /* 16M */ |
451 | unsigned long long start = 0LL; | 454 | unsigned long long start = 0LL; |
@@ -604,14 +607,6 @@ void __init setup_arch(char **cmdline_p) | |||
604 | early_cpu_init(); | 607 | early_cpu_init(); |
605 | early_ioremap_init(); | 608 | early_ioremap_init(); |
606 | 609 | ||
607 | #if defined(CONFIG_VMI) && defined(CONFIG_X86_32) | ||
608 | /* | ||
609 | * Must be before kernel pagetables are setup | ||
610 | * or fixmap area is touched. | ||
611 | */ | ||
612 | vmi_init(); | ||
613 | #endif | ||
614 | |||
615 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); | 610 | ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); |
616 | screen_info = boot_params.screen_info; | 611 | screen_info = boot_params.screen_info; |
617 | edid_info = boot_params.edid_info; | 612 | edid_info = boot_params.edid_info; |
@@ -673,11 +668,36 @@ void __init setup_arch(char **cmdline_p) | |||
673 | bss_resource.start = virt_to_phys(&__bss_start); | 668 | bss_resource.start = virt_to_phys(&__bss_start); |
674 | bss_resource.end = virt_to_phys(&__bss_stop)-1; | 669 | bss_resource.end = virt_to_phys(&__bss_stop)-1; |
675 | 670 | ||
671 | #ifdef CONFIG_CMDLINE_BOOL | ||
672 | #ifdef CONFIG_CMDLINE_OVERRIDE | ||
673 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
674 | #else | ||
675 | if (builtin_cmdline[0]) { | ||
676 | /* append boot loader cmdline to builtin */ | ||
677 | strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); | ||
678 | strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); | ||
679 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
680 | } | ||
681 | #endif | ||
682 | #endif | ||
683 | |||
676 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | 684 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
677 | *cmdline_p = command_line; | 685 | *cmdline_p = command_line; |
678 | 686 | ||
679 | parse_early_param(); | 687 | parse_early_param(); |
680 | 688 | ||
689 | #ifdef CONFIG_X86_64 | ||
690 | check_efer(); | ||
691 | #endif | ||
692 | |||
693 | #if defined(CONFIG_VMI) && defined(CONFIG_X86_32) | ||
694 | /* | ||
695 | * Must be before kernel pagetables are setup | ||
696 | * or fixmap area is touched. | ||
697 | */ | ||
698 | vmi_init(); | ||
699 | #endif | ||
700 | |||
681 | /* after early param, so could get panic from serial */ | 701 | /* after early param, so could get panic from serial */ |
682 | reserve_early_setup_data(); | 702 | reserve_early_setup_data(); |
683 | 703 | ||
@@ -738,7 +758,6 @@ void __init setup_arch(char **cmdline_p) | |||
738 | #else | 758 | #else |
739 | num_physpages = max_pfn; | 759 | num_physpages = max_pfn; |
740 | 760 | ||
741 | check_efer(); | ||
742 | 761 | ||
743 | /* How many end-of-memory variables you have, grandma! */ | 762 | /* How many end-of-memory variables you have, grandma! */ |
744 | /* need this before calling reserve_initrd */ | 763 | /* need this before calling reserve_initrd */ |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 76e305e064f9..0e67f72d9316 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -162,9 +162,16 @@ void __init setup_per_cpu_areas(void) | |||
162 | printk(KERN_INFO | 162 | printk(KERN_INFO |
163 | "cpu %d has no node %d or node-local memory\n", | 163 | "cpu %d has no node %d or node-local memory\n", |
164 | cpu, node); | 164 | cpu, node); |
165 | if (ptr) | ||
166 | printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", | ||
167 | cpu, __pa(ptr)); | ||
165 | } | 168 | } |
166 | else | 169 | else { |
167 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | 170 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); |
171 | if (ptr) | ||
172 | printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", | ||
173 | cpu, node, __pa(ptr)); | ||
174 | } | ||
168 | #endif | 175 | #endif |
169 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 176 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
170 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 177 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h index 72bbb519d2dc..8b4956e800ac 100644 --- a/arch/x86/kernel/sigframe.h +++ b/arch/x86/kernel/sigframe.h | |||
@@ -24,4 +24,9 @@ struct rt_sigframe { | |||
24 | struct ucontext uc; | 24 | struct ucontext uc; |
25 | struct siginfo info; | 25 | struct siginfo info; |
26 | }; | 26 | }; |
27 | |||
28 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
29 | sigset_t *set, struct pt_regs *regs); | ||
30 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
31 | sigset_t *set, struct pt_regs *regs); | ||
27 | #endif | 32 | #endif |
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 6fb5bcdd8933..2a2435d3037d 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
20 | #include <linux/tracehook.h> | ||
20 | #include <linux/elf.h> | 21 | #include <linux/elf.h> |
21 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
@@ -26,6 +27,7 @@ | |||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | #include <asm/i387.h> | 28 | #include <asm/i387.h> |
28 | #include <asm/vdso.h> | 29 | #include <asm/vdso.h> |
30 | #include <asm/syscalls.h> | ||
29 | 31 | ||
30 | #include "sigframe.h" | 32 | #include "sigframe.h" |
31 | 33 | ||
@@ -558,8 +560,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
558 | * handler too. | 560 | * handler too. |
559 | */ | 561 | */ |
560 | regs->flags &= ~X86_EFLAGS_TF; | 562 | regs->flags &= ~X86_EFLAGS_TF; |
561 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
562 | ptrace_notify(SIGTRAP); | ||
563 | 563 | ||
564 | spin_lock_irq(¤t->sighand->siglock); | 564 | spin_lock_irq(¤t->sighand->siglock); |
565 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 565 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
@@ -568,6 +568,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
568 | recalc_sigpending(); | 568 | recalc_sigpending(); |
569 | spin_unlock_irq(¤t->sighand->siglock); | 569 | spin_unlock_irq(¤t->sighand->siglock); |
570 | 570 | ||
571 | tracehook_signal_handler(sig, info, ka, regs, | ||
572 | test_thread_flag(TIF_SINGLESTEP)); | ||
573 | |||
571 | return 0; | 574 | return 0; |
572 | } | 575 | } |
573 | 576 | ||
@@ -661,5 +664,10 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
661 | if (thread_info_flags & _TIF_SIGPENDING) | 664 | if (thread_info_flags & _TIF_SIGPENDING) |
662 | do_signal(regs); | 665 | do_signal(regs); |
663 | 666 | ||
667 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
668 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
669 | tracehook_notify_resume(regs); | ||
670 | } | ||
671 | |||
664 | clear_thread_flag(TIF_IRET); | 672 | clear_thread_flag(TIF_IRET); |
665 | } | 673 | } |
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index b45ef8ddd651..694aa888bb19 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
@@ -15,17 +15,21 @@ | |||
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/tracehook.h> | ||
18 | #include <linux/unistd.h> | 19 | #include <linux/unistd.h> |
19 | #include <linux/stddef.h> | 20 | #include <linux/stddef.h> |
20 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
21 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/uaccess.h> | ||
24 | |||
22 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
23 | #include <asm/ucontext.h> | 26 | #include <asm/ucontext.h> |
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/i387.h> | 27 | #include <asm/i387.h> |
26 | #include <asm/proto.h> | 28 | #include <asm/proto.h> |
27 | #include <asm/ia32_unistd.h> | 29 | #include <asm/ia32_unistd.h> |
28 | #include <asm/mce.h> | 30 | #include <asm/mce.h> |
31 | #include <asm/syscall.h> | ||
32 | #include <asm/syscalls.h> | ||
29 | #include "sigframe.h" | 33 | #include "sigframe.h" |
30 | 34 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
@@ -41,11 +45,6 @@ | |||
41 | # define FIX_EFLAGS __FIX_EFLAGS | 45 | # define FIX_EFLAGS __FIX_EFLAGS |
42 | #endif | 46 | #endif |
43 | 47 | ||
44 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
45 | sigset_t *set, struct pt_regs * regs); | ||
46 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
47 | sigset_t *set, struct pt_regs * regs); | ||
48 | |||
49 | asmlinkage long | 48 | asmlinkage long |
50 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 49 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
51 | struct pt_regs *regs) | 50 | struct pt_regs *regs) |
@@ -104,7 +103,16 @@ static inline int restore_i387(struct _fpstate __user *buf) | |||
104 | clts(); | 103 | clts(); |
105 | task_thread_info(current)->status |= TS_USEDFPU; | 104 | task_thread_info(current)->status |= TS_USEDFPU; |
106 | } | 105 | } |
107 | return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | 106 | err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf); |
107 | if (unlikely(err)) { | ||
108 | /* | ||
109 | * Encountered an error while doing the restore from the | ||
110 | * user buffer, clear the fpu state. | ||
111 | */ | ||
112 | clear_fpu(tsk); | ||
113 | clear_used_math(); | ||
114 | } | ||
115 | return err; | ||
108 | } | 116 | } |
109 | 117 | ||
110 | /* | 118 | /* |
@@ -119,7 +127,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
119 | /* Always make any pending restarted system calls return -EINTR */ | 127 | /* Always make any pending restarted system calls return -EINTR */ |
120 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 128 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
121 | 129 | ||
122 | #define COPY(x) err |= __get_user(regs->x, &sc->x) | 130 | #define COPY(x) (err |= __get_user(regs->x, &sc->x)) |
123 | 131 | ||
124 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); | 132 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
125 | COPY(dx); COPY(cx); COPY(ip); | 133 | COPY(dx); COPY(cx); COPY(ip); |
@@ -149,7 +157,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
149 | } | 157 | } |
150 | 158 | ||
151 | { | 159 | { |
152 | struct _fpstate __user * buf; | 160 | struct _fpstate __user *buf; |
153 | err |= __get_user(buf, &sc->fpstate); | 161 | err |= __get_user(buf, &sc->fpstate); |
154 | 162 | ||
155 | if (buf) { | 163 | if (buf) { |
@@ -189,7 +197,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
189 | current->blocked = set; | 197 | current->blocked = set; |
190 | recalc_sigpending(); | 198 | recalc_sigpending(); |
191 | spin_unlock_irq(¤t->sighand->siglock); | 199 | spin_unlock_irq(¤t->sighand->siglock); |
192 | 200 | ||
193 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 201 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
194 | goto badframe; | 202 | goto badframe; |
195 | 203 | ||
@@ -199,16 +207,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
199 | return ax; | 207 | return ax; |
200 | 208 | ||
201 | badframe: | 209 | badframe: |
202 | signal_fault(regs,frame,"sigreturn"); | 210 | signal_fault(regs, frame, "sigreturn"); |
203 | return 0; | 211 | return 0; |
204 | } | 212 | } |
205 | 213 | ||
206 | /* | 214 | /* |
207 | * Set up a signal frame. | 215 | * Set up a signal frame. |
208 | */ | 216 | */ |
209 | 217 | ||
210 | static inline int | 218 | static inline int |
211 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) | 219 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, |
220 | unsigned long mask, struct task_struct *me) | ||
212 | { | 221 | { |
213 | int err = 0; | 222 | int err = 0; |
214 | 223 | ||
@@ -264,35 +273,35 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) | |||
264 | } | 273 | } |
265 | 274 | ||
266 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 275 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
267 | sigset_t *set, struct pt_regs * regs) | 276 | sigset_t *set, struct pt_regs *regs) |
268 | { | 277 | { |
269 | struct rt_sigframe __user *frame; | 278 | struct rt_sigframe __user *frame; |
270 | struct _fpstate __user *fp = NULL; | 279 | struct _fpstate __user *fp = NULL; |
271 | int err = 0; | 280 | int err = 0; |
272 | struct task_struct *me = current; | 281 | struct task_struct *me = current; |
273 | 282 | ||
274 | if (used_math()) { | 283 | if (used_math()) { |
275 | fp = get_stack(ka, regs, sizeof(struct _fpstate)); | 284 | fp = get_stack(ka, regs, sizeof(struct _fpstate)); |
276 | frame = (void __user *)round_down( | 285 | frame = (void __user *)round_down( |
277 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | 286 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; |
278 | 287 | ||
279 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) | 288 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) |
280 | goto give_sigsegv; | 289 | goto give_sigsegv; |
281 | 290 | ||
282 | if (save_i387(fp) < 0) | 291 | if (save_i387(fp) < 0) |
283 | err |= -1; | 292 | err |= -1; |
284 | } else | 293 | } else |
285 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; | 294 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; |
286 | 295 | ||
287 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 296 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
288 | goto give_sigsegv; | 297 | goto give_sigsegv; |
289 | 298 | ||
290 | if (ka->sa.sa_flags & SA_SIGINFO) { | 299 | if (ka->sa.sa_flags & SA_SIGINFO) { |
291 | err |= copy_siginfo_to_user(&frame->info, info); | 300 | err |= copy_siginfo_to_user(&frame->info, info); |
292 | if (err) | 301 | if (err) |
293 | goto give_sigsegv; | 302 | goto give_sigsegv; |
294 | } | 303 | } |
295 | 304 | ||
296 | /* Create the ucontext. */ | 305 | /* Create the ucontext. */ |
297 | err |= __put_user(0, &frame->uc.uc_flags); | 306 | err |= __put_user(0, &frame->uc.uc_flags); |
298 | err |= __put_user(0, &frame->uc.uc_link); | 307 | err |= __put_user(0, &frame->uc.uc_link); |
@@ -302,9 +311,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
302 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | 311 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); |
303 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); | 312 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); |
304 | err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); | 313 | err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); |
305 | if (sizeof(*set) == 16) { | 314 | if (sizeof(*set) == 16) { |
306 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); | 315 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); |
307 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); | 316 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); |
308 | } else | 317 | } else |
309 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 318 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
310 | 319 | ||
@@ -315,7 +324,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
315 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); | 324 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); |
316 | } else { | 325 | } else { |
317 | /* could use a vstub here */ | 326 | /* could use a vstub here */ |
318 | goto give_sigsegv; | 327 | goto give_sigsegv; |
319 | } | 328 | } |
320 | 329 | ||
321 | if (err) | 330 | if (err) |
@@ -323,7 +332,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
323 | 332 | ||
324 | /* Set up registers for signal handler */ | 333 | /* Set up registers for signal handler */ |
325 | regs->di = sig; | 334 | regs->di = sig; |
326 | /* In case the signal handler was declared without prototypes */ | 335 | /* In case the signal handler was declared without prototypes */ |
327 | regs->ax = 0; | 336 | regs->ax = 0; |
328 | 337 | ||
329 | /* This also works for non SA_SIGINFO handlers because they expect the | 338 | /* This also works for non SA_SIGINFO handlers because they expect the |
@@ -346,37 +355,8 @@ give_sigsegv: | |||
346 | } | 355 | } |
347 | 356 | ||
348 | /* | 357 | /* |
349 | * Return -1L or the syscall number that @regs is executing. | ||
350 | */ | ||
351 | static long current_syscall(struct pt_regs *regs) | ||
352 | { | ||
353 | /* | ||
354 | * We always sign-extend a -1 value being set here, | ||
355 | * so this is always either -1L or a syscall number. | ||
356 | */ | ||
357 | return regs->orig_ax; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * Return a value that is -EFOO if the system call in @regs->orig_ax | ||
362 | * returned an error. This only works for @regs from @current. | ||
363 | */ | ||
364 | static long current_syscall_ret(struct pt_regs *regs) | ||
365 | { | ||
366 | #ifdef CONFIG_IA32_EMULATION | ||
367 | if (test_thread_flag(TIF_IA32)) | ||
368 | /* | ||
369 | * Sign-extend the value so (int)-EFOO becomes (long)-EFOO | ||
370 | * and will match correctly in comparisons. | ||
371 | */ | ||
372 | return (int) regs->ax; | ||
373 | #endif | ||
374 | return regs->ax; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * OK, we're invoking a handler | 358 | * OK, we're invoking a handler |
379 | */ | 359 | */ |
380 | 360 | ||
381 | static int | 361 | static int |
382 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 362 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
@@ -385,9 +365,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
385 | int ret; | 365 | int ret; |
386 | 366 | ||
387 | /* Are we from a system call? */ | 367 | /* Are we from a system call? */ |
388 | if (current_syscall(regs) >= 0) { | 368 | if (syscall_get_nr(current, regs) >= 0) { |
389 | /* If so, check system call restarting.. */ | 369 | /* If so, check system call restarting.. */ |
390 | switch (current_syscall_ret(regs)) { | 370 | switch (syscall_get_error(current, regs)) { |
391 | case -ERESTART_RESTARTBLOCK: | 371 | case -ERESTART_RESTARTBLOCK: |
392 | case -ERESTARTNOHAND: | 372 | case -ERESTARTNOHAND: |
393 | regs->ax = -EINTR; | 373 | regs->ax = -EINTR; |
@@ -420,7 +400,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
420 | ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); | 400 | ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); |
421 | else | 401 | else |
422 | ret = ia32_setup_frame(sig, ka, oldset, regs); | 402 | ret = ia32_setup_frame(sig, ka, oldset, regs); |
423 | } else | 403 | } else |
424 | #endif | 404 | #endif |
425 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 405 | ret = setup_rt_frame(sig, ka, info, oldset, regs); |
426 | 406 | ||
@@ -444,15 +424,16 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
444 | * handler too. | 424 | * handler too. |
445 | */ | 425 | */ |
446 | regs->flags &= ~X86_EFLAGS_TF; | 426 | regs->flags &= ~X86_EFLAGS_TF; |
447 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
448 | ptrace_notify(SIGTRAP); | ||
449 | 427 | ||
450 | spin_lock_irq(¤t->sighand->siglock); | 428 | spin_lock_irq(¤t->sighand->siglock); |
451 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 429 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
452 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 430 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
453 | sigaddset(¤t->blocked,sig); | 431 | sigaddset(¤t->blocked, sig); |
454 | recalc_sigpending(); | 432 | recalc_sigpending(); |
455 | spin_unlock_irq(¤t->sighand->siglock); | 433 | spin_unlock_irq(¤t->sighand->siglock); |
434 | |||
435 | tracehook_signal_handler(sig, info, ka, regs, | ||
436 | test_thread_flag(TIF_SINGLESTEP)); | ||
456 | } | 437 | } |
457 | 438 | ||
458 | return ret; | 439 | return ret; |
@@ -509,9 +490,9 @@ static void do_signal(struct pt_regs *regs) | |||
509 | } | 490 | } |
510 | 491 | ||
511 | /* Did we come from a system call? */ | 492 | /* Did we come from a system call? */ |
512 | if (current_syscall(regs) >= 0) { | 493 | if (syscall_get_nr(current, regs) >= 0) { |
513 | /* Restart the system call - no handlers present */ | 494 | /* Restart the system call - no handlers present */ |
514 | switch (current_syscall_ret(regs)) { | 495 | switch (syscall_get_error(current, regs)) { |
515 | case -ERESTARTNOHAND: | 496 | case -ERESTARTNOHAND: |
516 | case -ERESTARTSYS: | 497 | case -ERESTARTSYS: |
517 | case -ERESTARTNOINTR: | 498 | case -ERESTARTNOINTR: |
@@ -549,17 +530,23 @@ void do_notify_resume(struct pt_regs *regs, void *unused, | |||
549 | /* deal with pending signal delivery */ | 530 | /* deal with pending signal delivery */ |
550 | if (thread_info_flags & _TIF_SIGPENDING) | 531 | if (thread_info_flags & _TIF_SIGPENDING) |
551 | do_signal(regs); | 532 | do_signal(regs); |
533 | |||
534 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
535 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
536 | tracehook_notify_resume(regs); | ||
537 | } | ||
552 | } | 538 | } |
553 | 539 | ||
554 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 540 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
555 | { | 541 | { |
556 | struct task_struct *me = current; | 542 | struct task_struct *me = current; |
557 | if (show_unhandled_signals && printk_ratelimit()) { | 543 | if (show_unhandled_signals && printk_ratelimit()) { |
558 | printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", | 544 | printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", |
559 | me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); | 545 | me->comm, me->pid, where, frame, regs->ip, |
546 | regs->sp, regs->orig_ax); | ||
560 | print_vma_addr(" in ", regs->ip); | 547 | print_vma_addr(" in ", regs->ip); |
561 | printk("\n"); | 548 | printk("\n"); |
562 | } | 549 | } |
563 | 550 | ||
564 | force_sig(SIGSEGV, me); | 551 | force_sig(SIGSEGV, me); |
565 | } | 552 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 91055d7fc1b0..45531e3ba194 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -88,7 +88,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |||
88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | 88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | 89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
90 | #else | 90 | #else |
91 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | 91 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | 92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | 93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) |
94 | #endif | 94 | #endif |
@@ -129,7 +129,7 @@ static int boot_cpu_logical_apicid; | |||
129 | static cpumask_t cpu_sibling_setup_map; | 129 | static cpumask_t cpu_sibling_setup_map; |
130 | 130 | ||
131 | /* Set if we find a B stepping CPU */ | 131 | /* Set if we find a B stepping CPU */ |
132 | int __cpuinitdata smp_b_stepping; | 132 | static int __cpuinitdata smp_b_stepping; |
133 | 133 | ||
134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
135 | 135 | ||
@@ -756,6 +756,14 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
756 | } | 756 | } |
757 | 757 | ||
758 | #ifdef CONFIG_X86_64 | 758 | #ifdef CONFIG_X86_64 |
759 | |||
760 | /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */ | ||
761 | static void __ref free_bootmem_pda(struct x8664_pda *oldpda) | ||
762 | { | ||
763 | if (!after_bootmem) | ||
764 | free_bootmem((unsigned long)oldpda, sizeof(*oldpda)); | ||
765 | } | ||
766 | |||
759 | /* | 767 | /* |
760 | * Allocate node local memory for the AP pda. | 768 | * Allocate node local memory for the AP pda. |
761 | * | 769 | * |
@@ -784,8 +792,7 @@ int __cpuinit get_local_pda(int cpu) | |||
784 | 792 | ||
785 | if (oldpda) { | 793 | if (oldpda) { |
786 | memcpy(newpda, oldpda, size); | 794 | memcpy(newpda, oldpda, size); |
787 | if (!after_bootmem) | 795 | free_bootmem_pda(oldpda); |
788 | free_bootmem((unsigned long)oldpda, size); | ||
789 | } | 796 | } |
790 | 797 | ||
791 | newpda->in_bootmem = 0; | 798 | newpda->in_bootmem = 0; |
@@ -994,17 +1001,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
994 | flush_tlb_all(); | 1001 | flush_tlb_all(); |
995 | low_mappings = 1; | 1002 | low_mappings = 1; |
996 | 1003 | ||
997 | #ifdef CONFIG_X86_PC | ||
998 | if (def_to_bigsmp && apicid > 8) { | ||
999 | printk(KERN_WARNING | ||
1000 | "More than 8 CPUs detected - skipping them.\n" | ||
1001 | "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); | ||
1002 | err = -1; | ||
1003 | } else | ||
1004 | err = do_boot_cpu(apicid, cpu); | ||
1005 | #else | ||
1006 | err = do_boot_cpu(apicid, cpu); | 1004 | err = do_boot_cpu(apicid, cpu); |
1007 | #endif | ||
1008 | 1005 | ||
1009 | zap_low_mappings(); | 1006 | zap_low_mappings(); |
1010 | low_mappings = 0; | 1007 | low_mappings = 0; |
@@ -1058,6 +1055,34 @@ static __init void disable_smp(void) | |||
1058 | static int __init smp_sanity_check(unsigned max_cpus) | 1055 | static int __init smp_sanity_check(unsigned max_cpus) |
1059 | { | 1056 | { |
1060 | preempt_disable(); | 1057 | preempt_disable(); |
1058 | |||
1059 | #if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) | ||
1060 | if (def_to_bigsmp && nr_cpu_ids > 8) { | ||
1061 | unsigned int cpu; | ||
1062 | unsigned nr; | ||
1063 | |||
1064 | printk(KERN_WARNING | ||
1065 | "More than 8 CPUs detected - skipping them.\n" | ||
1066 | "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); | ||
1067 | |||
1068 | nr = 0; | ||
1069 | for_each_present_cpu(cpu) { | ||
1070 | if (nr >= 8) | ||
1071 | cpu_clear(cpu, cpu_present_map); | ||
1072 | nr++; | ||
1073 | } | ||
1074 | |||
1075 | nr = 0; | ||
1076 | for_each_possible_cpu(cpu) { | ||
1077 | if (nr >= 8) | ||
1078 | cpu_clear(cpu, cpu_possible_map); | ||
1079 | nr++; | ||
1080 | } | ||
1081 | |||
1082 | nr_cpu_ids = 8; | ||
1083 | } | ||
1084 | #endif | ||
1085 | |||
1061 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { | 1086 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
1062 | printk(KERN_WARNING "weird, boot CPU (#%d) not listed" | 1087 | printk(KERN_WARNING "weird, boot CPU (#%d) not listed" |
1063 | "by the BIOS.\n", hard_smp_processor_id()); | 1088 | "by the BIOS.\n", hard_smp_processor_id()); |
@@ -1196,6 +1221,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1196 | printk(KERN_INFO "CPU%d: ", 0); | 1221 | printk(KERN_INFO "CPU%d: ", 0); |
1197 | print_cpu_info(&cpu_data(0)); | 1222 | print_cpu_info(&cpu_data(0)); |
1198 | setup_boot_clock(); | 1223 | setup_boot_clock(); |
1224 | |||
1225 | if (is_uv_system()) | ||
1226 | uv_system_init(); | ||
1199 | out: | 1227 | out: |
1200 | preempt_enable(); | 1228 | preempt_enable(); |
1201 | } | 1229 | } |
@@ -1285,16 +1313,13 @@ __init void prefill_possible_map(void) | |||
1285 | if (!num_processors) | 1313 | if (!num_processors) |
1286 | num_processors = 1; | 1314 | num_processors = 1; |
1287 | 1315 | ||
1288 | #ifdef CONFIG_HOTPLUG_CPU | ||
1289 | if (additional_cpus == -1) { | 1316 | if (additional_cpus == -1) { |
1290 | if (disabled_cpus > 0) | 1317 | if (disabled_cpus > 0) |
1291 | additional_cpus = disabled_cpus; | 1318 | additional_cpus = disabled_cpus; |
1292 | else | 1319 | else |
1293 | additional_cpus = 0; | 1320 | additional_cpus = 0; |
1294 | } | 1321 | } |
1295 | #else | 1322 | |
1296 | additional_cpus = 0; | ||
1297 | #endif | ||
1298 | possible = num_processors + additional_cpus; | 1323 | possible = num_processors + additional_cpus; |
1299 | if (possible > NR_CPUS) | 1324 | if (possible > NR_CPUS) |
1300 | possible = NR_CPUS; | 1325 | possible = NR_CPUS; |
@@ -1386,17 +1411,3 @@ void __cpu_die(unsigned int cpu) | |||
1386 | BUG(); | 1411 | BUG(); |
1387 | } | 1412 | } |
1388 | #endif | 1413 | #endif |
1389 | |||
1390 | /* | ||
1391 | * If the BIOS enumerates physical processors before logical, | ||
1392 | * maxcpus=N at enumeration-time can be used to disable HT. | ||
1393 | */ | ||
1394 | static int __init parse_maxcpus(char *arg) | ||
1395 | { | ||
1396 | extern unsigned int maxcpus; | ||
1397 | |||
1398 | if (arg) | ||
1399 | maxcpus = simple_strtoul(arg, NULL, 0); | ||
1400 | return 0; | ||
1401 | } | ||
1402 | early_param("maxcpus", parse_maxcpus); | ||
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 99941b37eca0..397e309839dd 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c | |||
@@ -8,18 +8,21 @@ | |||
8 | DEFINE_PER_CPU(unsigned long, this_cpu_off); | 8 | DEFINE_PER_CPU(unsigned long, this_cpu_off); |
9 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | 9 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); |
10 | 10 | ||
11 | /* Initialize the CPU's GDT. This is either the boot CPU doing itself | 11 | /* |
12 | (still using the master per-cpu area), or a CPU doing it for a | 12 | * Initialize the CPU's GDT. This is either the boot CPU doing itself |
13 | secondary which will soon come up. */ | 13 | * (still using the master per-cpu area), or a CPU doing it for a |
14 | * secondary which will soon come up. | ||
15 | */ | ||
14 | __cpuinit void init_gdt(int cpu) | 16 | __cpuinit void init_gdt(int cpu) |
15 | { | 17 | { |
16 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | 18 | struct desc_struct gdt; |
17 | 19 | ||
18 | pack_descriptor(&gdt[GDT_ENTRY_PERCPU], | 20 | pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF, |
19 | __per_cpu_offset[cpu], 0xFFFFF, | ||
20 | 0x2 | DESCTYPE_S, 0x8); | 21 | 0x2 | DESCTYPE_S, 0x8); |
22 | gdt.s = 1; | ||
21 | 23 | ||
22 | gdt[GDT_ENTRY_PERCPU].s = 1; | 24 | write_gdt_entry(get_cpu_gdt_table(cpu), |
25 | GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); | ||
23 | 26 | ||
24 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; | 27 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; |
25 | per_cpu(cpu_number, cpu) = cpu; | 28 | per_cpu(cpu_number, cpu) = cpu; |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 7066cb855a60..1884a8d12bfa 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
24 | 24 | ||
25 | #include <asm/syscalls.h> | ||
26 | |||
25 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | 27 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, |
26 | unsigned long prot, unsigned long flags, | 28 | unsigned long prot, unsigned long flags, |
27 | unsigned long fd, unsigned long pgoff) | 29 | unsigned long fd, unsigned long pgoff) |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 3b360ef33817..6bc211accf08 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -13,15 +13,17 @@ | |||
13 | #include <linux/utsname.h> | 13 | #include <linux/utsname.h> |
14 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/uaccess.h> | ||
16 | 17 | ||
17 | #include <asm/uaccess.h> | ||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | ||
19 | 20 | ||
20 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, | 21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, |
21 | unsigned long fd, unsigned long off) | 22 | unsigned long prot, unsigned long flags, |
23 | unsigned long fd, unsigned long off) | ||
22 | { | 24 | { |
23 | long error; | 25 | long error; |
24 | struct file * file; | 26 | struct file *file; |
25 | 27 | ||
26 | error = -EINVAL; | 28 | error = -EINVAL; |
27 | if (off & ~PAGE_MASK) | 29 | if (off & ~PAGE_MASK) |
@@ -56,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
56 | unmapped base down for this case. This can give | 58 | unmapped base down for this case. This can give |
57 | conflicts with the heap, but we assume that glibc | 59 | conflicts with the heap, but we assume that glibc |
58 | malloc knows how to fall back to mmap. Give it 1GB | 60 | malloc knows how to fall back to mmap. Give it 1GB |
59 | of playground for now. -AK */ | 61 | of playground for now. -AK */ |
60 | *begin = 0x40000000; | 62 | *begin = 0x40000000; |
61 | *end = 0x80000000; | 63 | *end = 0x80000000; |
62 | if (current->flags & PF_RANDOMIZE) { | 64 | if (current->flags & PF_RANDOMIZE) { |
63 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); | 65 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); |
64 | if (new_begin) | 66 | if (new_begin) |
@@ -66,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
66 | } | 68 | } |
67 | } else { | 69 | } else { |
68 | *begin = TASK_UNMAPPED_BASE; | 70 | *begin = TASK_UNMAPPED_BASE; |
69 | *end = TASK_SIZE; | 71 | *end = TASK_SIZE; |
70 | } | 72 | } |
71 | } | 73 | } |
72 | 74 | ||
73 | unsigned long | 75 | unsigned long |
74 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | 76 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
@@ -78,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
78 | struct vm_area_struct *vma; | 80 | struct vm_area_struct *vma; |
79 | unsigned long start_addr; | 81 | unsigned long start_addr; |
80 | unsigned long begin, end; | 82 | unsigned long begin, end; |
81 | 83 | ||
82 | if (flags & MAP_FIXED) | 84 | if (flags & MAP_FIXED) |
83 | return addr; | 85 | return addr; |
84 | 86 | ||
85 | find_start_end(flags, &begin, &end); | 87 | find_start_end(flags, &begin, &end); |
86 | 88 | ||
87 | if (len > end) | 89 | if (len > end) |
88 | return -ENOMEM; | 90 | return -ENOMEM; |
@@ -96,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
96 | } | 98 | } |
97 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) | 99 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) |
98 | && len <= mm->cached_hole_size) { | 100 | && len <= mm->cached_hole_size) { |
99 | mm->cached_hole_size = 0; | 101 | mm->cached_hole_size = 0; |
100 | mm->free_area_cache = begin; | 102 | mm->free_area_cache = begin; |
101 | } | 103 | } |
102 | addr = mm->free_area_cache; | 104 | addr = mm->free_area_cache; |
103 | if (addr < begin) | 105 | if (addr < begin) |
104 | addr = begin; | 106 | addr = begin; |
105 | start_addr = addr; | 107 | start_addr = addr; |
106 | 108 | ||
107 | full_search: | 109 | full_search: |
@@ -127,7 +129,7 @@ full_search: | |||
127 | return addr; | 129 | return addr; |
128 | } | 130 | } |
129 | if (addr + mm->cached_hole_size < vma->vm_start) | 131 | if (addr + mm->cached_hole_size < vma->vm_start) |
130 | mm->cached_hole_size = vma->vm_start - addr; | 132 | mm->cached_hole_size = vma->vm_start - addr; |
131 | 133 | ||
132 | addr = vma->vm_end; | 134 | addr = vma->vm_end; |
133 | } | 135 | } |
@@ -177,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
177 | vma = find_vma(mm, addr-len); | 179 | vma = find_vma(mm, addr-len); |
178 | if (!vma || addr <= vma->vm_start) | 180 | if (!vma || addr <= vma->vm_start) |
179 | /* remember the address as a hint for next time */ | 181 | /* remember the address as a hint for next time */ |
180 | return (mm->free_area_cache = addr-len); | 182 | return mm->free_area_cache = addr-len; |
181 | } | 183 | } |
182 | 184 | ||
183 | if (mm->mmap_base < len) | 185 | if (mm->mmap_base < len) |
@@ -194,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
194 | vma = find_vma(mm, addr); | 196 | vma = find_vma(mm, addr); |
195 | if (!vma || addr+len <= vma->vm_start) | 197 | if (!vma || addr+len <= vma->vm_start) |
196 | /* remember the address as a hint for next time */ | 198 | /* remember the address as a hint for next time */ |
197 | return (mm->free_area_cache = addr); | 199 | return mm->free_area_cache = addr; |
198 | 200 | ||
199 | /* remember the largest hole we saw so far */ | 201 | /* remember the largest hole we saw so far */ |
200 | if (addr + mm->cached_hole_size < vma->vm_start) | 202 | if (addr + mm->cached_hole_size < vma->vm_start) |
@@ -224,13 +226,13 @@ bottomup: | |||
224 | } | 226 | } |
225 | 227 | ||
226 | 228 | ||
227 | asmlinkage long sys_uname(struct new_utsname __user * name) | 229 | asmlinkage long sys_uname(struct new_utsname __user *name) |
228 | { | 230 | { |
229 | int err; | 231 | int err; |
230 | down_read(&uts_sem); | 232 | down_read(&uts_sem); |
231 | err = copy_to_user(name, utsname(), sizeof (*name)); | 233 | err = copy_to_user(name, utsname(), sizeof(*name)); |
232 | up_read(&uts_sem); | 234 | up_read(&uts_sem); |
233 | if (personality(current->personality) == PER_LINUX32) | 235 | if (personality(current->personality) == PER_LINUX32) |
234 | err |= copy_to_user(&name->machine, "i686", 5); | 236 | err |= copy_to_user(&name->machine, "i686", 5); |
235 | return err ? -EFAULT : 0; | 237 | return err ? -EFAULT : 0; |
236 | } | 238 | } |
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c index 170d43c17487..3d1be4f0fac5 100644 --- a/arch/x86/kernel/syscall_64.c +++ b/arch/x86/kernel/syscall_64.c | |||
@@ -8,12 +8,12 @@ | |||
8 | #define __NO_STUBS | 8 | #define __NO_STUBS |
9 | 9 | ||
10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | 10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; |
11 | #undef _ASM_X86_64_UNISTD_H_ | 11 | #undef ASM_X86__UNISTD_64_H |
12 | #include <asm/unistd_64.h> | 12 | #include <asm/unistd_64.h> |
13 | 13 | ||
14 | #undef __SYSCALL | 14 | #undef __SYSCALL |
15 | #define __SYSCALL(nr, sym) [nr] = sym, | 15 | #define __SYSCALL(nr, sym) [nr] = sym, |
16 | #undef _ASM_X86_64_UNISTD_H_ | 16 | #undef ASM_X86__UNISTD_64_H |
17 | 17 | ||
18 | typedef void (*sys_call_ptr_t)(void); | 18 | typedef void (*sys_call_ptr_t)(void); |
19 | 19 | ||
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index ffe3c664afc0..bbecf8b6bf96 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/arch_hooks.h> | 36 | #include <asm/arch_hooks.h> |
37 | #include <asm/hpet.h> | 37 | #include <asm/hpet.h> |
38 | #include <asm/time.h> | 38 | #include <asm/time.h> |
39 | #include <asm/timer.h> | ||
39 | 40 | ||
40 | #include "do_timer.h" | 41 | #include "do_timer.h" |
41 | 42 | ||
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index d0fbb7712ab0..8b8c0d6640fa 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/genapic.h> | 17 | #include <asm/genapic.h> |
18 | #include <asm/idle.h> | 18 | #include <asm/idle.h> |
19 | #include <asm/tsc.h> | 19 | #include <asm/tsc.h> |
20 | #include <asm/irq_vectors.h> | ||
20 | 21 | ||
21 | #include <mach_apic.h> | 22 | #include <mach_apic.h> |
22 | 23 | ||
@@ -783,7 +784,7 @@ static int __init uv_bau_init(void) | |||
783 | uv_init_blade(blade, node, cur_cpu); | 784 | uv_init_blade(blade, node, cur_cpu); |
784 | cur_cpu += uv_blade_nr_possible_cpus(blade); | 785 | cur_cpu += uv_blade_nr_possible_cpus(blade); |
785 | } | 786 | } |
786 | set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); | 787 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); |
787 | uv_enable_timeouts(); | 788 | uv_enable_timeouts(); |
788 | 789 | ||
789 | return 0; | 790 | return 0; |
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index ab6bf375a307..6bb7b8579e70 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/ldt.h> | 10 | #include <asm/ldt.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/proto.h> | 12 | #include <asm/proto.h> |
13 | #include <asm/syscalls.h> | ||
13 | 14 | ||
14 | #include "tls.h" | 15 | #include "tls.h" |
15 | 16 | ||
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 3f18d73f420c..7a31f104bef9 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/bug.h> | 32 | #include <linux/bug.h> |
33 | #include <linux/nmi.h> | 33 | #include <linux/nmi.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/smp.h> | ||
36 | #include <linux/io.h> | ||
35 | 37 | ||
36 | #if defined(CONFIG_EDAC) | 38 | #if defined(CONFIG_EDAC) |
37 | #include <linux/edac.h> | 39 | #include <linux/edac.h> |
@@ -45,9 +47,6 @@ | |||
45 | #include <asm/unwind.h> | 47 | #include <asm/unwind.h> |
46 | #include <asm/desc.h> | 48 | #include <asm/desc.h> |
47 | #include <asm/i387.h> | 49 | #include <asm/i387.h> |
48 | #include <asm/nmi.h> | ||
49 | #include <asm/smp.h> | ||
50 | #include <asm/io.h> | ||
51 | #include <asm/pgalloc.h> | 50 | #include <asm/pgalloc.h> |
52 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
53 | #include <asm/pda.h> | 52 | #include <asm/pda.h> |
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
85 | 84 | ||
86 | void printk_address(unsigned long address, int reliable) | 85 | void printk_address(unsigned long address, int reliable) |
87 | { | 86 | { |
88 | printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); | 87 | printk(" [<%016lx>] %s%pS\n", |
88 | address, reliable ? "" : "? ", (void *) address); | ||
89 | } | 89 | } |
90 | 90 | ||
91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
98 | [STACKFAULT_STACK - 1] = "#SS", | 98 | [STACKFAULT_STACK - 1] = "#SS", |
99 | [MCE_STACK - 1] = "#MC", | 99 | [MCE_STACK - 1] = "#MC", |
100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | 100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ |
101 | [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | 101 | [N_EXCEPTION_STACKS ... |
102 | N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | ||
102 | #endif | 103 | #endif |
103 | }; | 104 | }; |
104 | unsigned k; | 105 | unsigned k; |
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
163 | } | 164 | } |
164 | 165 | ||
165 | /* | 166 | /* |
166 | * x86-64 can have up to three kernel stacks: | 167 | * x86-64 can have up to three kernel stacks: |
167 | * process stack | 168 | * process stack |
168 | * interrupt stack | 169 | * interrupt stack |
169 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 170 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
219 | const struct stacktrace_ops *ops, void *data) | 220 | const struct stacktrace_ops *ops, void *data) |
220 | { | 221 | { |
221 | const unsigned cpu = get_cpu(); | 222 | const unsigned cpu = get_cpu(); |
222 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; | 223 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; |
223 | unsigned used = 0; | 224 | unsigned used = 0; |
224 | struct thread_info *tinfo; | 225 | struct thread_info *tinfo; |
225 | 226 | ||
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
237 | if (!bp) { | 238 | if (!bp) { |
238 | if (task == current) { | 239 | if (task == current) { |
239 | /* Grab bp right from our regs */ | 240 | /* Grab bp right from our regs */ |
240 | asm("movq %%rbp, %0" : "=r" (bp) :); | 241 | asm("movq %%rbp, %0" : "=r" (bp) : ); |
241 | } else { | 242 | } else { |
242 | /* bp is the last reg pushed by switch_to */ | 243 | /* bp is the last reg pushed by switch_to */ |
243 | bp = *(unsigned long *) task->thread.sp; | 244 | bp = *(unsigned long *) task->thread.sp; |
@@ -339,9 +340,8 @@ static void | |||
339 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 340 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
340 | unsigned long *stack, unsigned long bp, char *log_lvl) | 341 | unsigned long *stack, unsigned long bp, char *log_lvl) |
341 | { | 342 | { |
342 | printk("\nCall Trace:\n"); | 343 | printk("Call Trace:\n"); |
343 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); | 344 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
344 | printk("\n"); | ||
345 | } | 345 | } |
346 | 346 | ||
347 | void show_trace(struct task_struct *task, struct pt_regs *regs, | 347 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
@@ -357,11 +357,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
357 | unsigned long *stack; | 357 | unsigned long *stack; |
358 | int i; | 358 | int i; |
359 | const int cpu = smp_processor_id(); | 359 | const int cpu = smp_processor_id(); |
360 | unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); | 360 | unsigned long *irqstack_end = |
361 | unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | 361 | (unsigned long *) (cpu_pda(cpu)->irqstackptr); |
362 | unsigned long *irqstack = | ||
363 | (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | ||
362 | 364 | ||
363 | // debugging aid: "show_stack(NULL, NULL);" prints the | 365 | /* |
364 | // back trace for this cpu. | 366 | * debugging aid: "show_stack(NULL, NULL);" prints the |
367 | * back trace for this cpu. | ||
368 | */ | ||
365 | 369 | ||
366 | if (sp == NULL) { | 370 | if (sp == NULL) { |
367 | if (task) | 371 | if (task) |
@@ -386,6 +390,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
386 | printk(" %016lx", *stack++); | 390 | printk(" %016lx", *stack++); |
387 | touch_nmi_watchdog(); | 391 | touch_nmi_watchdog(); |
388 | } | 392 | } |
393 | printk("\n"); | ||
389 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); | 394 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
390 | } | 395 | } |
391 | 396 | ||
@@ -404,7 +409,7 @@ void dump_stack(void) | |||
404 | 409 | ||
405 | #ifdef CONFIG_FRAME_POINTER | 410 | #ifdef CONFIG_FRAME_POINTER |
406 | if (!bp) | 411 | if (!bp) |
407 | asm("movq %%rbp, %0" : "=r" (bp):); | 412 | asm("movq %%rbp, %0" : "=r" (bp) : ); |
408 | #endif | 413 | #endif |
409 | 414 | ||
410 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 415 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
@@ -414,7 +419,6 @@ void dump_stack(void) | |||
414 | init_utsname()->version); | 419 | init_utsname()->version); |
415 | show_trace(NULL, NULL, &stack, bp); | 420 | show_trace(NULL, NULL, &stack, bp); |
416 | } | 421 | } |
417 | |||
418 | EXPORT_SYMBOL(dump_stack); | 422 | EXPORT_SYMBOL(dump_stack); |
419 | 423 | ||
420 | void show_registers(struct pt_regs *regs) | 424 | void show_registers(struct pt_regs *regs) |
@@ -443,7 +447,6 @@ void show_registers(struct pt_regs *regs) | |||
443 | printk("Stack: "); | 447 | printk("Stack: "); |
444 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, | 448 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
445 | regs->bp, ""); | 449 | regs->bp, ""); |
446 | printk("\n"); | ||
447 | 450 | ||
448 | printk(KERN_EMERG "Code: "); | 451 | printk(KERN_EMERG "Code: "); |
449 | 452 | ||
@@ -493,7 +496,7 @@ unsigned __kprobes long oops_begin(void) | |||
493 | raw_local_irq_save(flags); | 496 | raw_local_irq_save(flags); |
494 | cpu = smp_processor_id(); | 497 | cpu = smp_processor_id(); |
495 | if (!__raw_spin_trylock(&die_lock)) { | 498 | if (!__raw_spin_trylock(&die_lock)) { |
496 | if (cpu == die_owner) | 499 | if (cpu == die_owner) |
497 | /* nested oops. should stop eventually */; | 500 | /* nested oops. should stop eventually */; |
498 | else | 501 | else |
499 | __raw_spin_lock(&die_lock); | 502 | __raw_spin_lock(&die_lock); |
@@ -638,7 +641,7 @@ kernel_trap: | |||
638 | } | 641 | } |
639 | 642 | ||
640 | #define DO_ERROR(trapnr, signr, str, name) \ | 643 | #define DO_ERROR(trapnr, signr, str, name) \ |
641 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 644 | asmlinkage void do_##name(struct pt_regs *regs, long error_code) \ |
642 | { \ | 645 | { \ |
643 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 646 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
644 | == NOTIFY_STOP) \ | 647 | == NOTIFY_STOP) \ |
@@ -648,7 +651,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
648 | } | 651 | } |
649 | 652 | ||
650 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 653 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
651 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 654 | asmlinkage void do_##name(struct pt_regs *regs, long error_code) \ |
652 | { \ | 655 | { \ |
653 | siginfo_t info; \ | 656 | siginfo_t info; \ |
654 | info.si_signo = signr; \ | 657 | info.si_signo = signr; \ |
@@ -683,7 +686,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | |||
683 | preempt_conditional_cli(regs); | 686 | preempt_conditional_cli(regs); |
684 | } | 687 | } |
685 | 688 | ||
686 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | 689 | asmlinkage void do_double_fault(struct pt_regs *regs, long error_code) |
687 | { | 690 | { |
688 | static const char str[] = "double fault"; | 691 | static const char str[] = "double fault"; |
689 | struct task_struct *tsk = current; | 692 | struct task_struct *tsk = current; |
@@ -778,9 +781,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
778 | } | 781 | } |
779 | 782 | ||
780 | static notrace __kprobes void | 783 | static notrace __kprobes void |
781 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | 784 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) |
782 | { | 785 | { |
783 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 786 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == |
787 | NOTIFY_STOP) | ||
784 | return; | 788 | return; |
785 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 789 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
786 | reason); | 790 | reason); |
@@ -882,7 +886,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
882 | else if (user_mode(eregs)) | 886 | else if (user_mode(eregs)) |
883 | regs = task_pt_regs(current); | 887 | regs = task_pt_regs(current); |
884 | /* Exception from kernel and interrupts are enabled. Move to | 888 | /* Exception from kernel and interrupts are enabled. Move to |
885 | kernel process stack. */ | 889 | kernel process stack. */ |
886 | else if (eregs->flags & X86_EFLAGS_IF) | 890 | else if (eregs->flags & X86_EFLAGS_IF) |
887 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | 891 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); |
888 | if (eregs != regs) | 892 | if (eregs != regs) |
@@ -891,7 +895,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
891 | } | 895 | } |
892 | 896 | ||
893 | /* runs on IST stack. */ | 897 | /* runs on IST stack. */ |
894 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, | 898 | asmlinkage void __kprobes do_debug(struct pt_regs *regs, |
895 | unsigned long error_code) | 899 | unsigned long error_code) |
896 | { | 900 | { |
897 | struct task_struct *tsk = current; | 901 | struct task_struct *tsk = current; |
@@ -1035,7 +1039,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs) | |||
1035 | 1039 | ||
1036 | asmlinkage void bad_intr(void) | 1040 | asmlinkage void bad_intr(void) |
1037 | { | 1041 | { |
1038 | printk("bad interrupt"); | 1042 | printk("bad interrupt"); |
1039 | } | 1043 | } |
1040 | 1044 | ||
1041 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | 1045 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) |
@@ -1047,7 +1051,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1047 | 1051 | ||
1048 | conditional_sti(regs); | 1052 | conditional_sti(regs); |
1049 | if (!user_mode(regs) && | 1053 | if (!user_mode(regs) && |
1050 | kernel_math_error(regs, "kernel simd math error", 19)) | 1054 | kernel_math_error(regs, "kernel simd math error", 19)) |
1051 | return; | 1055 | return; |
1052 | 1056 | ||
1053 | /* | 1057 | /* |
@@ -1092,7 +1096,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1092 | force_sig_info(SIGFPE, &info, task); | 1096 | force_sig_info(SIGFPE, &info, task); |
1093 | } | 1097 | } |
1094 | 1098 | ||
1095 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) | 1099 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs) |
1096 | { | 1100 | { |
1097 | } | 1101 | } |
1098 | 1102 | ||
@@ -1131,7 +1135,14 @@ asmlinkage void math_state_restore(void) | |||
1131 | } | 1135 | } |
1132 | 1136 | ||
1133 | clts(); /* Allow maths ops (or we recurse) */ | 1137 | clts(); /* Allow maths ops (or we recurse) */ |
1134 | restore_fpu_checking(&me->thread.xstate->fxsave); | 1138 | /* |
1139 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
1140 | */ | ||
1141 | if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) { | ||
1142 | stts(); | ||
1143 | force_sig(SIGSEGV, me); | ||
1144 | return; | ||
1145 | } | ||
1135 | task_thread_info(me)->status |= TS_USEDFPU; | 1146 | task_thread_info(me)->status |= TS_USEDFPU; |
1136 | me->fpu_counter++; | 1147 | me->fpu_counter++; |
1137 | } | 1148 | } |
@@ -1142,8 +1153,10 @@ void __init trap_init(void) | |||
1142 | set_intr_gate(0, ÷_error); | 1153 | set_intr_gate(0, ÷_error); |
1143 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | 1154 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
1144 | set_intr_gate_ist(2, &nmi, NMI_STACK); | 1155 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
1145 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ | 1156 | /* int3 can be called from all */ |
1146 | set_system_gate(4, &overflow); /* int4 can be called from all */ | 1157 | set_system_gate_ist(3, &int3, DEBUG_STACK); |
1158 | /* int4 can be called from all */ | ||
1159 | set_system_gate(4, &overflow); | ||
1147 | set_intr_gate(5, &bounds); | 1160 | set_intr_gate(5, &bounds); |
1148 | set_intr_gate(6, &invalid_op); | 1161 | set_intr_gate(6, &invalid_op); |
1149 | set_intr_gate(7, &device_not_available); | 1162 | set_intr_gate(7, &device_not_available); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7603c0553909..161bb850fc47 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup); | |||
104 | /* | 104 | /* |
105 | * Read TSC and the reference counters. Take care of SMI disturbance | 105 | * Read TSC and the reference counters. Take care of SMI disturbance |
106 | */ | 106 | */ |
107 | static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) | 107 | static u64 tsc_read_refs(u64 *p, int hpet) |
108 | { | 108 | { |
109 | u64 t1, t2; | 109 | u64 t1, t2; |
110 | int i; | 110 | int i; |
@@ -112,9 +112,9 @@ static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) | |||
112 | for (i = 0; i < MAX_RETRIES; i++) { | 112 | for (i = 0; i < MAX_RETRIES; i++) { |
113 | t1 = get_cycles(); | 113 | t1 = get_cycles(); |
114 | if (hpet) | 114 | if (hpet) |
115 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; | 115 | *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
116 | else | 116 | else |
117 | *pm = acpi_pm_read_early(); | 117 | *p = acpi_pm_read_early(); |
118 | t2 = get_cycles(); | 118 | t2 = get_cycles(); |
119 | if ((t2 - t1) < SMI_TRESHOLD) | 119 | if ((t2 - t1) < SMI_TRESHOLD) |
120 | return t2; | 120 | return t2; |
@@ -122,80 +122,390 @@ static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) | |||
122 | return ULLONG_MAX; | 122 | return ULLONG_MAX; |
123 | } | 123 | } |
124 | 124 | ||
125 | /** | 125 | /* |
126 | * native_calibrate_tsc - calibrate the tsc on boot | 126 | * Calculate the TSC frequency from HPET reference |
127 | */ | 127 | */ |
128 | unsigned long native_calibrate_tsc(void) | 128 | static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) |
129 | { | 129 | { |
130 | unsigned long flags; | 130 | u64 tmp; |
131 | u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; | ||
132 | int hpet = is_hpet_enabled(); | ||
133 | unsigned int tsc_khz_val = 0; | ||
134 | 131 | ||
135 | local_irq_save(flags); | 132 | if (hpet2 < hpet1) |
133 | hpet2 += 0x100000000ULL; | ||
134 | hpet2 -= hpet1; | ||
135 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | ||
136 | do_div(tmp, 1000000); | ||
137 | do_div(deltatsc, tmp); | ||
138 | |||
139 | return (unsigned long) deltatsc; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Calculate the TSC frequency from PMTimer reference | ||
144 | */ | ||
145 | static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) | ||
146 | { | ||
147 | u64 tmp; | ||
136 | 148 | ||
137 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | 149 | if (!pm1 && !pm2) |
150 | return ULONG_MAX; | ||
151 | |||
152 | if (pm2 < pm1) | ||
153 | pm2 += (u64)ACPI_PM_OVRRUN; | ||
154 | pm2 -= pm1; | ||
155 | tmp = pm2 * 1000000000LL; | ||
156 | do_div(tmp, PMTMR_TICKS_PER_SEC); | ||
157 | do_div(deltatsc, tmp); | ||
158 | |||
159 | return (unsigned long) deltatsc; | ||
160 | } | ||
161 | |||
162 | #define CAL_MS 10 | ||
163 | #define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS)) | ||
164 | #define CAL_PIT_LOOPS 1000 | ||
165 | |||
166 | #define CAL2_MS 50 | ||
167 | #define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS)) | ||
168 | #define CAL2_PIT_LOOPS 5000 | ||
169 | |||
170 | |||
171 | /* | ||
172 | * Try to calibrate the TSC against the Programmable | ||
173 | * Interrupt Timer and return the frequency of the TSC | ||
174 | * in kHz. | ||
175 | * | ||
176 | * Return ULONG_MAX on failure to calibrate. | ||
177 | */ | ||
178 | static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) | ||
179 | { | ||
180 | u64 tsc, t1, t2, delta; | ||
181 | unsigned long tscmin, tscmax; | ||
182 | int pitcnt; | ||
138 | 183 | ||
184 | /* Set the Gate high, disable speaker */ | ||
139 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | 185 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
140 | 186 | ||
187 | /* | ||
188 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | ||
189 | * count mode), binary count. Set the latch register to 50ms | ||
190 | * (LSB then MSB) to begin countdown. | ||
191 | */ | ||
141 | outb(0xb0, 0x43); | 192 | outb(0xb0, 0x43); |
142 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | 193 | outb(latch & 0xff, 0x42); |
143 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | 194 | outb(latch >> 8, 0x42); |
144 | tr1 = get_cycles(); | 195 | |
145 | while ((inb(0x61) & 0x20) == 0); | 196 | tsc = t1 = t2 = get_cycles(); |
146 | tr2 = get_cycles(); | 197 | |
198 | pitcnt = 0; | ||
199 | tscmax = 0; | ||
200 | tscmin = ULONG_MAX; | ||
201 | while ((inb(0x61) & 0x20) == 0) { | ||
202 | t2 = get_cycles(); | ||
203 | delta = t2 - tsc; | ||
204 | tsc = t2; | ||
205 | if ((unsigned long) delta < tscmin) | ||
206 | tscmin = (unsigned int) delta; | ||
207 | if ((unsigned long) delta > tscmax) | ||
208 | tscmax = (unsigned int) delta; | ||
209 | pitcnt++; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Sanity checks: | ||
214 | * | ||
215 | * If we were not able to read the PIT more than loopmin | ||
216 | * times, then we have been hit by a massive SMI | ||
217 | * | ||
218 | * If the maximum is 10 times larger than the minimum, | ||
219 | * then we got hit by an SMI as well. | ||
220 | */ | ||
221 | if (pitcnt < loopmin || tscmax > 10 * tscmin) | ||
222 | return ULONG_MAX; | ||
223 | |||
224 | /* Calculate the PIT value */ | ||
225 | delta = t2 - t1; | ||
226 | do_div(delta, ms); | ||
227 | return delta; | ||
228 | } | ||
147 | 229 | ||
148 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | 230 | /* |
231 | * This reads the current MSB of the PIT counter, and | ||
232 | * checks if we are running on sufficiently fast and | ||
233 | * non-virtualized hardware. | ||
234 | * | ||
235 | * Our expectations are: | ||
236 | * | ||
237 | * - the PIT is running at roughly 1.19MHz | ||
238 | * | ||
239 | * - each IO is going to take about 1us on real hardware, | ||
240 | * but we allow it to be much faster (by a factor of 10) or | ||
241 | * _slightly_ slower (ie we allow up to a 2us read+counter | ||
242 | * update - anything else implies a unacceptably slow CPU | ||
243 | * or PIT for the fast calibration to work. | ||
244 | * | ||
245 | * - with 256 PIT ticks to read the value, we have 214us to | ||
246 | * see the same MSB (and overhead like doing a single TSC | ||
247 | * read per MSB value etc). | ||
248 | * | ||
249 | * - We're doing 2 reads per loop (LSB, MSB), and we expect | ||
250 | * them each to take about a microsecond on real hardware. | ||
251 | * So we expect a count value of around 100. But we'll be | ||
252 | * generous, and accept anything over 50. | ||
253 | * | ||
254 | * - if the PIT is stuck, and we see *many* more reads, we | ||
255 | * return early (and the next caller of pit_expect_msb() | ||
256 | * then consider it a failure when they don't see the | ||
257 | * next expected value). | ||
258 | * | ||
259 | * These expectations mean that we know that we have seen the | ||
260 | * transition from one expected value to another with a fairly | ||
261 | * high accuracy, and we didn't miss any events. We can thus | ||
262 | * use the TSC value at the transitions to calculate a pretty | ||
263 | * good value for the TSC frequencty. | ||
264 | */ | ||
265 | static inline int pit_expect_msb(unsigned char val) | ||
266 | { | ||
267 | int count = 0; | ||
149 | 268 | ||
269 | for (count = 0; count < 50000; count++) { | ||
270 | /* Ignore LSB */ | ||
271 | inb(0x42); | ||
272 | if (inb(0x42) != val) | ||
273 | break; | ||
274 | } | ||
275 | return count > 50; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * How many MSB values do we want to see? We aim for a | ||
280 | * 15ms calibration, which assuming a 2us counter read | ||
281 | * error should give us roughly 150 ppm precision for | ||
282 | * the calibration. | ||
283 | */ | ||
284 | #define QUICK_PIT_MS 15 | ||
285 | #define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) | ||
286 | |||
287 | static unsigned long quick_pit_calibrate(void) | ||
288 | { | ||
289 | /* Set the Gate high, disable speaker */ | ||
290 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
291 | |||
292 | /* | ||
293 | * Counter 2, mode 0 (one-shot), binary count | ||
294 | * | ||
295 | * NOTE! Mode 2 decrements by two (and then the | ||
296 | * output is flipped each time, giving the same | ||
297 | * final output frequency as a decrement-by-one), | ||
298 | * so mode 0 is much better when looking at the | ||
299 | * individual counts. | ||
300 | */ | ||
301 | outb(0xb0, 0x43); | ||
302 | |||
303 | /* Start at 0xffff */ | ||
304 | outb(0xff, 0x42); | ||
305 | outb(0xff, 0x42); | ||
306 | |||
307 | if (pit_expect_msb(0xff)) { | ||
308 | int i; | ||
309 | u64 t1, t2, delta; | ||
310 | unsigned char expect = 0xfe; | ||
311 | |||
312 | t1 = get_cycles(); | ||
313 | for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) { | ||
314 | if (!pit_expect_msb(expect)) | ||
315 | goto failed; | ||
316 | } | ||
317 | t2 = get_cycles(); | ||
318 | |||
319 | /* | ||
320 | * Make sure we can rely on the second TSC timestamp: | ||
321 | */ | ||
322 | if (!pit_expect_msb(expect)) | ||
323 | goto failed; | ||
324 | |||
325 | /* | ||
326 | * Ok, if we get here, then we've seen the | ||
327 | * MSB of the PIT decrement QUICK_PIT_ITERATIONS | ||
328 | * times, and each MSB had many hits, so we never | ||
329 | * had any sudden jumps. | ||
330 | * | ||
331 | * As a result, we can depend on there not being | ||
332 | * any odd delays anywhere, and the TSC reads are | ||
333 | * reliable. | ||
334 | * | ||
335 | * kHz = ticks / time-in-seconds / 1000; | ||
336 | * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000 | ||
337 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000) | ||
338 | */ | ||
339 | delta = (t2 - t1)*PIT_TICK_RATE; | ||
340 | do_div(delta, QUICK_PIT_ITERATIONS*256*1000); | ||
341 | printk("Fast TSC calibration using PIT\n"); | ||
342 | return delta; | ||
343 | } | ||
344 | failed: | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * native_calibrate_tsc - calibrate the tsc on boot | ||
350 | */ | ||
351 | unsigned long native_calibrate_tsc(void) | ||
352 | { | ||
353 | u64 tsc1, tsc2, delta, ref1, ref2; | ||
354 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; | ||
355 | unsigned long flags, latch, ms, fast_calibrate; | ||
356 | int hpet = is_hpet_enabled(), i, loopmin; | ||
357 | |||
358 | local_irq_save(flags); | ||
359 | fast_calibrate = quick_pit_calibrate(); | ||
150 | local_irq_restore(flags); | 360 | local_irq_restore(flags); |
361 | if (fast_calibrate) | ||
362 | return fast_calibrate; | ||
151 | 363 | ||
152 | /* | 364 | /* |
153 | * Preset the result with the raw and inaccurate PIT | 365 | * Run 5 calibration loops to get the lowest frequency value |
154 | * calibration value | 366 | * (the best estimate). We use two different calibration modes |
367 | * here: | ||
368 | * | ||
369 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and | ||
370 | * load a timeout of 50ms. We read the time right after we | ||
371 | * started the timer and wait until the PIT count down reaches | ||
372 | * zero. In each wait loop iteration we read the TSC and check | ||
373 | * the delta to the previous read. We keep track of the min | ||
374 | * and max values of that delta. The delta is mostly defined | ||
375 | * by the IO time of the PIT access, so we can detect when a | ||
376 | * SMI/SMM disturbance happend between the two reads. If the | ||
377 | * maximum time is significantly larger than the minimum time, | ||
378 | * then we discard the result and have another try. | ||
379 | * | ||
380 | * 2) Reference counter. If available we use the HPET or the | ||
381 | * PMTIMER as a reference to check the sanity of that value. | ||
382 | * We use separate TSC readouts and check inside of the | ||
383 | * reference read for a SMI/SMM disturbance. We dicard | ||
384 | * disturbed values here as well. We do that around the PIT | ||
385 | * calibration delay loop as we have to wait for a certain | ||
386 | * amount of time anyway. | ||
155 | */ | 387 | */ |
156 | delta = (tr2 - tr1); | 388 | |
157 | do_div(delta, 50); | 389 | /* Preset PIT loop values */ |
158 | tsc_khz_val = delta; | 390 | latch = CAL_LATCH; |
159 | 391 | ms = CAL_MS; | |
160 | /* hpet or pmtimer available ? */ | 392 | loopmin = CAL_PIT_LOOPS; |
161 | if (!hpet && !pm1 && !pm2) { | 393 | |
162 | printk(KERN_INFO "TSC calibrated against PIT\n"); | 394 | for (i = 0; i < 3; i++) { |
163 | goto out; | 395 | unsigned long tsc_pit_khz; |
396 | |||
397 | /* | ||
398 | * Read the start value and the reference count of | ||
399 | * hpet/pmtimer when available. Then do the PIT | ||
400 | * calibration, which will take at least 50ms, and | ||
401 | * read the end value. | ||
402 | */ | ||
403 | local_irq_save(flags); | ||
404 | tsc1 = tsc_read_refs(&ref1, hpet); | ||
405 | tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); | ||
406 | tsc2 = tsc_read_refs(&ref2, hpet); | ||
407 | local_irq_restore(flags); | ||
408 | |||
409 | /* Pick the lowest PIT TSC calibration so far */ | ||
410 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); | ||
411 | |||
412 | /* hpet or pmtimer available ? */ | ||
413 | if (!hpet && !ref1 && !ref2) | ||
414 | continue; | ||
415 | |||
416 | /* Check, whether the sampling was disturbed by an SMI */ | ||
417 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) | ||
418 | continue; | ||
419 | |||
420 | tsc2 = (tsc2 - tsc1) * 1000000LL; | ||
421 | if (hpet) | ||
422 | tsc2 = calc_hpet_ref(tsc2, ref1, ref2); | ||
423 | else | ||
424 | tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); | ||
425 | |||
426 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); | ||
427 | |||
428 | /* Check the reference deviation */ | ||
429 | delta = ((u64) tsc_pit_min) * 100; | ||
430 | do_div(delta, tsc_ref_min); | ||
431 | |||
432 | /* | ||
433 | * If both calibration results are inside a 10% window | ||
434 | * then we can be sure, that the calibration | ||
435 | * succeeded. We break out of the loop right away. We | ||
436 | * use the reference value, as it is more precise. | ||
437 | */ | ||
438 | if (delta >= 90 && delta <= 110) { | ||
439 | printk(KERN_INFO | ||
440 | "TSC: PIT calibration matches %s. %d loops\n", | ||
441 | hpet ? "HPET" : "PMTIMER", i + 1); | ||
442 | return tsc_ref_min; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Check whether PIT failed more than once. This | ||
447 | * happens in virtualized environments. We need to | ||
448 | * give the virtual PC a slightly longer timeframe for | ||
449 | * the HPET/PMTIMER to make the result precise. | ||
450 | */ | ||
451 | if (i == 1 && tsc_pit_min == ULONG_MAX) { | ||
452 | latch = CAL2_LATCH; | ||
453 | ms = CAL2_MS; | ||
454 | loopmin = CAL2_PIT_LOOPS; | ||
455 | } | ||
164 | } | 456 | } |
165 | 457 | ||
166 | /* Check, whether the sampling was disturbed by an SMI */ | 458 | /* |
167 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { | 459 | * Now check the results. |
168 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | 460 | */ |
169 | "using PIT calibration result\n"); | 461 | if (tsc_pit_min == ULONG_MAX) { |
170 | goto out; | 462 | /* PIT gave no useful value */ |
463 | printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); | ||
464 | |||
465 | /* We don't have an alternative source, disable TSC */ | ||
466 | if (!hpet && !ref1 && !ref2) { | ||
467 | printk("TSC: No reference (HPET/PMTIMER) available\n"); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | /* The alternative source failed as well, disable TSC */ | ||
472 | if (tsc_ref_min == ULONG_MAX) { | ||
473 | printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " | ||
474 | "failed.\n"); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | /* Use the alternative source */ | ||
479 | printk(KERN_INFO "TSC: using %s reference calibration\n", | ||
480 | hpet ? "HPET" : "PMTIMER"); | ||
481 | |||
482 | return tsc_ref_min; | ||
171 | } | 483 | } |
172 | 484 | ||
173 | tsc2 = (tsc2 - tsc1) * 1000000LL; | 485 | /* We don't have an alternative source, use the PIT calibration value */ |
174 | 486 | if (!hpet && !ref1 && !ref2) { | |
175 | if (hpet) { | 487 | printk(KERN_INFO "TSC: Using PIT calibration value\n"); |
176 | printk(KERN_INFO "TSC calibrated against HPET\n"); | 488 | return tsc_pit_min; |
177 | if (hpet2 < hpet1) | ||
178 | hpet2 += 0x100000000ULL; | ||
179 | hpet2 -= hpet1; | ||
180 | tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | ||
181 | do_div(tsc1, 1000000); | ||
182 | } else { | ||
183 | printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); | ||
184 | if (pm2 < pm1) | ||
185 | pm2 += (u64)ACPI_PM_OVRRUN; | ||
186 | pm2 -= pm1; | ||
187 | tsc1 = pm2 * 1000000000LL; | ||
188 | do_div(tsc1, PMTMR_TICKS_PER_SEC); | ||
189 | } | 489 | } |
190 | 490 | ||
191 | do_div(tsc2, tsc1); | 491 | /* The alternative source failed, use the PIT calibration value */ |
192 | tsc_khz_val = tsc2; | 492 | if (tsc_ref_min == ULONG_MAX) { |
493 | printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. " | ||
494 | "Using PIT calibration\n"); | ||
495 | return tsc_pit_min; | ||
496 | } | ||
193 | 497 | ||
194 | out: | 498 | /* |
195 | return tsc_khz_val; | 499 | * The calibration values differ too much. In doubt, we use |
500 | * the PIT value as we know that there are PMTIMERs around | ||
501 | * running at double speed. At least we let the user know: | ||
502 | */ | ||
503 | printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", | ||
504 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); | ||
505 | printk(KERN_INFO "TSC: Using PIT calibration value\n"); | ||
506 | return tsc_pit_min; | ||
196 | } | 507 | } |
197 | 508 | ||
198 | |||
199 | #ifdef CONFIG_X86_32 | 509 | #ifdef CONFIG_X86_32 |
200 | /* Only called from the Powernow K7 cpu freq driver */ | 510 | /* Only called from the Powernow K7 cpu freq driver */ |
201 | int recalibrate_cpu_khz(void) | 511 | int recalibrate_cpu_khz(void) |
@@ -314,7 +624,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
314 | mark_tsc_unstable("cpufreq changes"); | 624 | mark_tsc_unstable("cpufreq changes"); |
315 | } | 625 | } |
316 | 626 | ||
317 | set_cyc2ns_scale(tsc_khz_ref, freq->cpu); | 627 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
318 | 628 | ||
319 | return 0; | 629 | return 0; |
320 | } | 630 | } |
@@ -325,6 +635,10 @@ static struct notifier_block time_cpufreq_notifier_block = { | |||
325 | 635 | ||
326 | static int __init cpufreq_tsc(void) | 636 | static int __init cpufreq_tsc(void) |
327 | { | 637 | { |
638 | if (!cpu_has_tsc) | ||
639 | return 0; | ||
640 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
641 | return 0; | ||
328 | cpufreq_register_notifier(&time_cpufreq_notifier_block, | 642 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
329 | CPUFREQ_TRANSITION_NOTIFIER); | 643 | CPUFREQ_TRANSITION_NOTIFIER); |
330 | return 0; | 644 | return 0; |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 0577825cf89b..9ffb01c31c40 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -88,11 +88,9 @@ static __cpuinit void check_tsc_warp(void) | |||
88 | __raw_spin_unlock(&sync_lock); | 88 | __raw_spin_unlock(&sync_lock); |
89 | } | 89 | } |
90 | } | 90 | } |
91 | if (!(now-start)) { | 91 | WARN(!(now-start), |
92 | printk("Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", | 92 | "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", |
93 | now-start, end-start); | 93 | now-start, end-start); |
94 | WARN_ON(1); | ||
95 | } | ||
96 | } | 94 | } |
97 | 95 | ||
98 | /* | 96 | /* |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 41e01b145c48..61a97e616f70 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -25,45 +25,31 @@ | |||
25 | #include <asm/visws/cobalt.h> | 25 | #include <asm/visws/cobalt.h> |
26 | #include <asm/visws/piix4.h> | 26 | #include <asm/visws/piix4.h> |
27 | #include <asm/arch_hooks.h> | 27 | #include <asm/arch_hooks.h> |
28 | #include <asm/io_apic.h> | ||
28 | #include <asm/fixmap.h> | 29 | #include <asm/fixmap.h> |
29 | #include <asm/reboot.h> | 30 | #include <asm/reboot.h> |
30 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
31 | #include <asm/e820.h> | 32 | #include <asm/e820.h> |
32 | #include <asm/smp.h> | ||
33 | #include <asm/io.h> | 33 | #include <asm/io.h> |
34 | 34 | ||
35 | #include <mach_ipi.h> | 35 | #include <mach_ipi.h> |
36 | 36 | ||
37 | #include "mach_apic.h" | 37 | #include "mach_apic.h" |
38 | 38 | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/smp.h> | ||
41 | |||
42 | #include <linux/kernel_stat.h> | 39 | #include <linux/kernel_stat.h> |
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/init.h> | ||
45 | 40 | ||
46 | #include <asm/io.h> | ||
47 | #include <asm/apic.h> | ||
48 | #include <asm/i8259.h> | 41 | #include <asm/i8259.h> |
49 | #include <asm/irq_vectors.h> | 42 | #include <asm/irq_vectors.h> |
50 | #include <asm/visws/cobalt.h> | ||
51 | #include <asm/visws/lithium.h> | 43 | #include <asm/visws/lithium.h> |
52 | #include <asm/visws/piix4.h> | ||
53 | 44 | ||
54 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
55 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
56 | #include <linux/init.h> | ||
57 | #include <linux/pci.h> | 47 | #include <linux/pci.h> |
58 | #include <linux/pci_ids.h> | 48 | #include <linux/pci_ids.h> |
59 | 49 | ||
60 | extern int no_broadcast; | 50 | extern int no_broadcast; |
61 | 51 | ||
62 | #include <asm/io.h> | ||
63 | #include <asm/apic.h> | 52 | #include <asm/apic.h> |
64 | #include <asm/arch_hooks.h> | ||
65 | #include <asm/visws/cobalt.h> | ||
66 | #include <asm/visws/lithium.h> | ||
67 | 53 | ||
68 | char visws_board_type = -1; | 54 | char visws_board_type = -1; |
69 | char visws_board_rev = -1; | 55 | char visws_board_rev = -1; |
@@ -184,8 +170,6 @@ static int __init visws_get_smp_config(unsigned int early) | |||
184 | return 1; | 170 | return 1; |
185 | } | 171 | } |
186 | 172 | ||
187 | extern unsigned int __cpuinitdata maxcpus; | ||
188 | |||
189 | /* | 173 | /* |
190 | * The Visual Workstation is Intel MP compliant in the hardware | 174 | * The Visual Workstation is Intel MP compliant in the hardware |
191 | * sense, but it doesn't have a BIOS(-configuration table). | 175 | * sense, but it doesn't have a BIOS(-configuration table). |
@@ -244,8 +228,8 @@ static int __init visws_find_smp_config(unsigned int reserve) | |||
244 | ncpus = CO_CPU_MAX; | 228 | ncpus = CO_CPU_MAX; |
245 | } | 229 | } |
246 | 230 | ||
247 | if (ncpus > maxcpus) | 231 | if (ncpus > setup_max_cpus) |
248 | ncpus = maxcpus; | 232 | ncpus = setup_max_cpus; |
249 | 233 | ||
250 | #ifdef CONFIG_X86_LOCAL_APIC | 234 | #ifdef CONFIG_X86_LOCAL_APIC |
251 | smp_found_config = 1; | 235 | smp_found_config = 1; |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 38f566fa27d2..4eeb5cf9720d 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | #include <asm/tlbflush.h> | 47 | #include <asm/tlbflush.h> |
48 | #include <asm/irq.h> | 48 | #include <asm/irq.h> |
49 | #include <asm/syscalls.h> | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Known problems: | 52 | * Known problems: |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 6ca515d6db54..8c9ad02af5a2 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, | |||
235 | const void *desc) | 235 | const void *desc) |
236 | { | 236 | { |
237 | u32 *ldt_entry = (u32 *)desc; | 237 | u32 *ldt_entry = (u32 *)desc; |
238 | vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); | 238 | vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); |
239 | } | 239 | } |
240 | 240 | ||
241 | static void vmi_load_sp0(struct tss_struct *tss, | 241 | static void vmi_load_sp0(struct tss_struct *tss, |
@@ -393,13 +393,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) | |||
393 | } | 393 | } |
394 | #endif | 394 | #endif |
395 | 395 | ||
396 | static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn) | 396 | static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) |
397 | { | 397 | { |
398 | vmi_set_page_type(pfn, VMI_PAGE_L1); | 398 | vmi_set_page_type(pfn, VMI_PAGE_L1); |
399 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | 399 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); |
400 | } | 400 | } |
401 | 401 | ||
402 | static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) | 402 | static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn) |
403 | { | 403 | { |
404 | /* | 404 | /* |
405 | * This call comes in very early, before mem_map is setup. | 405 | * This call comes in very early, before mem_map is setup. |
@@ -410,20 +410,20 @@ static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) | |||
410 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); | 410 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); |
411 | } | 411 | } |
412 | 412 | ||
413 | static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) | 413 | static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count) |
414 | { | 414 | { |
415 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); | 415 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); |
416 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); | 416 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); |
417 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); | 417 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); |
418 | } | 418 | } |
419 | 419 | ||
420 | static void vmi_release_pte(u32 pfn) | 420 | static void vmi_release_pte(unsigned long pfn) |
421 | { | 421 | { |
422 | vmi_ops.release_page(pfn, VMI_PAGE_L1); | 422 | vmi_ops.release_page(pfn, VMI_PAGE_L1); |
423 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | 423 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); |
424 | } | 424 | } |
425 | 425 | ||
426 | static void vmi_release_pmd(u32 pfn) | 426 | static void vmi_release_pmd(unsigned long pfn) |
427 | { | 427 | { |
428 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | 428 | vmi_ops.release_page(pfn, VMI_PAGE_L2); |
429 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | 429 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); |
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index cdb2363697d2..af5bdad84604 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
@@ -209,3 +209,11 @@ SECTIONS | |||
209 | 209 | ||
210 | DWARF_DEBUG | 210 | DWARF_DEBUG |
211 | } | 211 | } |
212 | |||
213 | #ifdef CONFIG_KEXEC | ||
214 | /* Link time checks */ | ||
215 | #include <asm/kexec.h> | ||
216 | |||
217 | ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, | ||
218 | "kexec control code size is too big") | ||
219 | #endif | ||
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 0c029e8959c7..7766d36983fc 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -61,7 +61,7 @@ static void vsmp_irq_enable(void) | |||
61 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | 61 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); |
62 | } | 62 | } |
63 | 63 | ||
64 | static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, | 64 | static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, |
65 | unsigned long addr, unsigned len) | 65 | unsigned long addr, unsigned len) |
66 | { | 66 | { |
67 | switch (type) { | 67 | switch (type) { |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0bfe2bd305eb..3da2508eb22a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -711,6 +711,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) | |||
711 | u64 *spte; | 711 | u64 *spte; |
712 | int young = 0; | 712 | int young = 0; |
713 | 713 | ||
714 | /* always return old for EPT */ | ||
715 | if (!shadow_accessed_mask) | ||
716 | return 0; | ||
717 | |||
714 | spte = rmap_next(kvm, rmapp, NULL); | 718 | spte = rmap_next(kvm, rmapp, NULL); |
715 | while (spte) { | 719 | while (spte) { |
716 | int _young; | 720 | int _young; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index f72ac1fa35f0..4a814bff21f2 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -345,7 +345,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
345 | shadow_addr = __pa(shadow_page->spt); | 345 | shadow_addr = __pa(shadow_page->spt); |
346 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK | 346 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK |
347 | | PT_WRITABLE_MASK | PT_USER_MASK; | 347 | | PT_WRITABLE_MASK | PT_USER_MASK; |
348 | *shadow_ent = shadow_pte; | 348 | set_shadow_pte(shadow_ent, shadow_pte); |
349 | } | 349 | } |
350 | 350 | ||
351 | mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, | 351 | mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e2ee264740c7..8233b86c778c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -62,6 +62,7 @@ static int npt = 1; | |||
62 | module_param(npt, int, S_IRUGO); | 62 | module_param(npt, int, S_IRUGO); |
63 | 63 | ||
64 | static void kvm_reput_irq(struct vcpu_svm *svm); | 64 | static void kvm_reput_irq(struct vcpu_svm *svm); |
65 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); | ||
65 | 66 | ||
66 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) | 67 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
67 | { | 68 | { |
@@ -878,6 +879,10 @@ set: | |||
878 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 879 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
879 | { | 880 | { |
880 | unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; | 881 | unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; |
882 | unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; | ||
883 | |||
884 | if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) | ||
885 | force_new_asid(vcpu); | ||
881 | 886 | ||
882 | vcpu->arch.cr4 = cr4; | 887 | vcpu->arch.cr4 = cr4; |
883 | if (!npt_enabled) | 888 | if (!npt_enabled) |
@@ -1027,6 +1032,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1027 | KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code, | 1032 | KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code, |
1028 | (u32)fault_address, (u32)(fault_address >> 32), | 1033 | (u32)fault_address, (u32)(fault_address >> 32), |
1029 | handler); | 1034 | handler); |
1035 | /* | ||
1036 | * FIXME: Tis shouldn't be necessary here, but there is a flush | ||
1037 | * missing in the MMU code. Until we find this bug, flush the | ||
1038 | * complete TLB here on an NPF | ||
1039 | */ | ||
1040 | if (npt_enabled) | ||
1041 | svm_flush_tlb(&svm->vcpu); | ||
1030 | 1042 | ||
1031 | if (event_injection) | 1043 | if (event_injection) |
1032 | kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); | 1044 | kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2a69773e3b26..7041cc52b562 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3301,8 +3301,7 @@ static int __init vmx_init(void) | |||
3301 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3301 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3302 | VMX_EPT_WRITABLE_MASK | | 3302 | VMX_EPT_WRITABLE_MASK | |
3303 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | 3303 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); |
3304 | kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, | 3304 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3305 | VMX_EPT_FAKE_DIRTY_MASK, 0ull, | ||
3306 | VMX_EPT_EXECUTABLE_MASK); | 3305 | VMX_EPT_EXECUTABLE_MASK); |
3307 | kvm_enable_tdp(); | 3306 | kvm_enable_tdp(); |
3308 | } else | 3307 | } else |
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 425a13436b3f..23e8373507ad 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h | |||
@@ -370,8 +370,6 @@ enum vmcs_field { | |||
370 | #define VMX_EPT_READABLE_MASK 0x1ull | 370 | #define VMX_EPT_READABLE_MASK 0x1ull |
371 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 371 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
372 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 372 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
373 | #define VMX_EPT_FAKE_ACCESSED_MASK (1ull << 62) | ||
374 | #define VMX_EPT_FAKE_DIRTY_MASK (1ull << 63) | ||
375 | 373 | ||
376 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 374 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
377 | 375 | ||
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index d5a2b39f882b..321cf720dbb6 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -16,36 +16,46 @@ static void __rdmsr_on_cpu(void *info) | |||
16 | rdmsr(rv->msr_no, rv->l, rv->h); | 16 | rdmsr(rv->msr_no, rv->l, rv->h); |
17 | } | 17 | } |
18 | 18 | ||
19 | static void __rdmsr_safe_on_cpu(void *info) | 19 | static void __wrmsr_on_cpu(void *info) |
20 | { | 20 | { |
21 | struct msr_info *rv = info; | 21 | struct msr_info *rv = info; |
22 | 22 | ||
23 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); | 23 | wrmsr(rv->msr_no, rv->l, rv->h); |
24 | } | 24 | } |
25 | 25 | ||
26 | static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | 26 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
27 | { | 27 | { |
28 | int err = 0; | 28 | int err; |
29 | struct msr_info rv; | 29 | struct msr_info rv; |
30 | 30 | ||
31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
32 | if (safe) { | 32 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); | ||
34 | err = rv.err; | ||
35 | } else { | ||
36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); | ||
37 | } | ||
38 | *l = rv.l; | 33 | *l = rv.l; |
39 | *h = rv.h; | 34 | *h = rv.h; |
40 | 35 | ||
41 | return err; | 36 | return err; |
42 | } | 37 | } |
43 | 38 | ||
44 | static void __wrmsr_on_cpu(void *info) | 39 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
40 | { | ||
41 | int err; | ||
42 | struct msr_info rv; | ||
43 | |||
44 | rv.msr_no = msr_no; | ||
45 | rv.l = l; | ||
46 | rv.h = h; | ||
47 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
48 | |||
49 | return err; | ||
50 | } | ||
51 | |||
52 | /* These "safe" variants are slower and should be used when the target MSR | ||
53 | may not actually exist. */ | ||
54 | static void __rdmsr_safe_on_cpu(void *info) | ||
45 | { | 55 | { |
46 | struct msr_info *rv = info; | 56 | struct msr_info *rv = info; |
47 | 57 | ||
48 | wrmsr(rv->msr_no, rv->l, rv->h); | 58 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); |
49 | } | 59 | } |
50 | 60 | ||
51 | static void __wrmsr_safe_on_cpu(void *info) | 61 | static void __wrmsr_safe_on_cpu(void *info) |
@@ -55,44 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info) | |||
55 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); | 65 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); |
56 | } | 66 | } |
57 | 67 | ||
58 | static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | 68 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
59 | { | 69 | { |
60 | int err = 0; | 70 | int err; |
61 | struct msr_info rv; | 71 | struct msr_info rv; |
62 | 72 | ||
63 | rv.msr_no = msr_no; | 73 | rv.msr_no = msr_no; |
64 | rv.l = l; | 74 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
65 | rv.h = h; | 75 | *l = rv.l; |
66 | if (safe) { | 76 | *h = rv.h; |
67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | ||
68 | err = rv.err; | ||
69 | } else { | ||
70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
71 | } | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | ||
77 | { | ||
78 | _wrmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
79 | } | ||
80 | 77 | ||
81 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 78 | return err ? err : rv.err; |
82 | { | ||
83 | _rdmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
84 | } | 79 | } |
85 | 80 | ||
86 | /* These "safe" variants are slower and should be used when the target MSR | ||
87 | may not actually exist. */ | ||
88 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 81 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
89 | { | 82 | { |
90 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); | 83 | int err; |
91 | } | 84 | struct msr_info rv; |
92 | 85 | ||
93 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 86 | rv.msr_no = msr_no; |
94 | { | 87 | rv.l = l; |
95 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); | 88 | rv.h = h; |
89 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | ||
90 | |||
91 | return err ? err : rv.err; | ||
96 | } | 92 | } |
97 | 93 | ||
98 | EXPORT_SYMBOL(rdmsr_on_cpu); | 94 | EXPORT_SYMBOL(rdmsr_on_cpu); |
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index 94972e7c094d..82004d2bf05e 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c | |||
@@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src) | |||
22 | "testb %%al,%%al\n\t" | 22 | "testb %%al,%%al\n\t" |
23 | "jne 1b" | 23 | "jne 1b" |
24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) | 24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) |
25 | :"0" (src), "1" (dest) : "memory"); | 25 | : "0" (src), "1" (dest) : "memory"); |
26 | return dest; | 26 | return dest; |
27 | } | 27 | } |
28 | EXPORT_SYMBOL(strcpy); | 28 | EXPORT_SYMBOL(strcpy); |
@@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count) | |||
42 | "stosb\n" | 42 | "stosb\n" |
43 | "2:" | 43 | "2:" |
44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) | 44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) |
45 | :"0" (src), "1" (dest), "2" (count) : "memory"); | 45 | : "0" (src), "1" (dest), "2" (count) : "memory"); |
46 | return dest; | 46 | return dest; |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(strncpy); | 48 | EXPORT_SYMBOL(strncpy); |
@@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src) | |||
60 | "testb %%al,%%al\n\t" | 60 | "testb %%al,%%al\n\t" |
61 | "jne 1b" | 61 | "jne 1b" |
62 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) | 62 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) |
63 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory"); | 63 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory"); |
64 | return dest; | 64 | return dest; |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(strcat); | 66 | EXPORT_SYMBOL(strcat); |
@@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct) | |||
105 | "2:\tsbbl %%eax,%%eax\n\t" | 105 | "2:\tsbbl %%eax,%%eax\n\t" |
106 | "orb $1,%%al\n" | 106 | "orb $1,%%al\n" |
107 | "3:" | 107 | "3:" |
108 | :"=a" (res), "=&S" (d0), "=&D" (d1) | 108 | : "=a" (res), "=&S" (d0), "=&D" (d1) |
109 | :"1" (cs), "2" (ct) | 109 | : "1" (cs), "2" (ct) |
110 | :"memory"); | 110 | : "memory"); |
111 | return res; | 111 | return res; |
112 | } | 112 | } |
113 | EXPORT_SYMBOL(strcmp); | 113 | EXPORT_SYMBOL(strcmp); |
@@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count) | |||
130 | "3:\tsbbl %%eax,%%eax\n\t" | 130 | "3:\tsbbl %%eax,%%eax\n\t" |
131 | "orb $1,%%al\n" | 131 | "orb $1,%%al\n" |
132 | "4:" | 132 | "4:" |
133 | :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) | 133 | : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) |
134 | :"1" (cs), "2" (ct), "3" (count) | 134 | : "1" (cs), "2" (ct), "3" (count) |
135 | :"memory"); | 135 | : "memory"); |
136 | return res; | 136 | return res; |
137 | } | 137 | } |
138 | EXPORT_SYMBOL(strncmp); | 138 | EXPORT_SYMBOL(strncmp); |
@@ -152,9 +152,9 @@ char *strchr(const char *s, int c) | |||
152 | "movl $1,%1\n" | 152 | "movl $1,%1\n" |
153 | "2:\tmovl %1,%0\n\t" | 153 | "2:\tmovl %1,%0\n\t" |
154 | "decl %0" | 154 | "decl %0" |
155 | :"=a" (res), "=&S" (d0) | 155 | : "=a" (res), "=&S" (d0) |
156 | :"1" (s), "0" (c) | 156 | : "1" (s), "0" (c) |
157 | :"memory"); | 157 | : "memory"); |
158 | return res; | 158 | return res; |
159 | } | 159 | } |
160 | EXPORT_SYMBOL(strchr); | 160 | EXPORT_SYMBOL(strchr); |
@@ -169,9 +169,9 @@ size_t strlen(const char *s) | |||
169 | "scasb\n\t" | 169 | "scasb\n\t" |
170 | "notl %0\n\t" | 170 | "notl %0\n\t" |
171 | "decl %0" | 171 | "decl %0" |
172 | :"=c" (res), "=&D" (d0) | 172 | : "=c" (res), "=&D" (d0) |
173 | :"1" (s), "a" (0), "0" (0xffffffffu) | 173 | : "1" (s), "a" (0), "0" (0xffffffffu) |
174 | :"memory"); | 174 | : "memory"); |
175 | return res; | 175 | return res; |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(strlen); | 177 | EXPORT_SYMBOL(strlen); |
@@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count) | |||
189 | "je 1f\n\t" | 189 | "je 1f\n\t" |
190 | "movl $1,%0\n" | 190 | "movl $1,%0\n" |
191 | "1:\tdecl %0" | 191 | "1:\tdecl %0" |
192 | :"=D" (res), "=&c" (d0) | 192 | : "=D" (res), "=&c" (d0) |
193 | :"a" (c), "0" (cs), "1" (count) | 193 | : "a" (c), "0" (cs), "1" (count) |
194 | :"memory"); | 194 | : "memory"); |
195 | return res; | 195 | return res; |
196 | } | 196 | } |
197 | EXPORT_SYMBOL(memchr); | 197 | EXPORT_SYMBOL(memchr); |
@@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count) | |||
228 | "cmpl $-1,%1\n\t" | 228 | "cmpl $-1,%1\n\t" |
229 | "jne 1b\n" | 229 | "jne 1b\n" |
230 | "3:\tsubl %2,%0" | 230 | "3:\tsubl %2,%0" |
231 | :"=a" (res), "=&d" (d0) | 231 | : "=a" (res), "=&d" (d0) |
232 | :"c" (s), "1" (count) | 232 | : "c" (s), "1" (count) |
233 | :"memory"); | 233 | : "memory"); |
234 | return res; | 234 | return res; |
235 | } | 235 | } |
236 | EXPORT_SYMBOL(strnlen); | 236 | EXPORT_SYMBOL(strnlen); |
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index 42e8a50303f3..8e2d55f754bf 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c | |||
@@ -23,9 +23,9 @@ __asm__ __volatile__( | |||
23 | "jne 1b\n\t" | 23 | "jne 1b\n\t" |
24 | "xorl %%eax,%%eax\n\t" | 24 | "xorl %%eax,%%eax\n\t" |
25 | "2:" | 25 | "2:" |
26 | :"=a" (__res), "=&c" (d0), "=&S" (d1) | 26 | : "=a" (__res), "=&c" (d0), "=&S" (d1) |
27 | :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) | 27 | : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) |
28 | :"dx", "di"); | 28 | : "dx", "di"); |
29 | return __res; | 29 | return __res; |
30 | } | 30 | } |
31 | 31 | ||
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 3d317836be9e..3f2cf11f201a 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -10,13 +10,15 @@ | |||
10 | #include <asm/e820.h> | 10 | #include <asm/e820.h> |
11 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
12 | 12 | ||
13 | #include <mach_ipi.h> | ||
14 | |||
13 | #ifdef CONFIG_HOTPLUG_CPU | 15 | #ifdef CONFIG_HOTPLUG_CPU |
14 | #define DEFAULT_SEND_IPI (1) | 16 | #define DEFAULT_SEND_IPI (1) |
15 | #else | 17 | #else |
16 | #define DEFAULT_SEND_IPI (0) | 18 | #define DEFAULT_SEND_IPI (0) |
17 | #endif | 19 | #endif |
18 | 20 | ||
19 | int no_broadcast=DEFAULT_SEND_IPI; | 21 | int no_broadcast = DEFAULT_SEND_IPI; |
20 | 22 | ||
21 | /** | 23 | /** |
22 | * pre_intr_init_hook - initialisation prior to setting up interrupt vectors | 24 | * pre_intr_init_hook - initialisation prior to setting up interrupt vectors |
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c index a037041817c7..4f4e50c3ad3b 100644 --- a/arch/x86/mach-rdc321x/platform.c +++ b/arch/x86/mach-rdc321x/platform.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/list.h> | 25 | #include <linux/list.h> |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/version.h> | ||
29 | #include <linux/leds.h> | 28 | #include <linux/leds.h> |
30 | 29 | ||
31 | #include <asm/gpio.h> | 30 | #include <asm/gpio.h> |
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 62fa440678d8..847c164725f4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn, | |||
328 | 328 | ||
329 | get_memcfg_numa(); | 329 | get_memcfg_numa(); |
330 | 330 | ||
331 | kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); | 331 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); |
332 | 332 | ||
333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); |
334 | do { | 334 | do { |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a20d1fa64b4e..e7277cbcfb40 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, | |||
148 | * we have now. "break" is either changing perms, levels or | 148 | * we have now. "break" is either changing perms, levels or |
149 | * address space marker. | 149 | * address space marker. |
150 | */ | 150 | */ |
151 | prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); | 151 | prot = pgprot_val(new_prot) & PTE_FLAGS_MASK; |
152 | cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); | 152 | cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK; |
153 | 153 | ||
154 | if (!st->level) { | 154 | if (!st->level) { |
155 | /* First entry */ | 155 | /* First entry */ |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 455f3fe67b42..8f92cac4e6db 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/proto.h> | 36 | #include <asm/proto.h> |
37 | #include <asm-generic/sections.h> | 37 | #include <asm-generic/sections.h> |
38 | #include <asm/traps.h> | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Page fault error code bits | 41 | * Page fault error code bits |
@@ -357,8 +358,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address) | |||
357 | return 0; | 358 | return 0; |
358 | } | 359 | } |
359 | 360 | ||
360 | void do_invalid_op(struct pt_regs *, unsigned long); | ||
361 | |||
362 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | 361 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
363 | { | 362 | { |
364 | #ifdef CONFIG_X86_F00F_BUG | 363 | #ifdef CONFIG_X86_F00F_BUG |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index d37f29376b0c..6b9a9358b330 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/paravirt.h> | 47 | #include <asm/paravirt.h> |
48 | #include <asm/setup.h> | 48 | #include <asm/setup.h> |
49 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
50 | #include <asm/smp.h> | ||
50 | 51 | ||
51 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 52 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
52 | 53 | ||
@@ -458,11 +459,7 @@ static void __init pagetable_init(void) | |||
458 | { | 459 | { |
459 | pgd_t *pgd_base = swapper_pg_dir; | 460 | pgd_t *pgd_base = swapper_pg_dir; |
460 | 461 | ||
461 | paravirt_pagetable_setup_start(pgd_base); | ||
462 | |||
463 | permanent_kmaps_init(pgd_base); | 462 | permanent_kmaps_init(pgd_base); |
464 | |||
465 | paravirt_pagetable_setup_done(pgd_base); | ||
466 | } | 463 | } |
467 | 464 | ||
468 | #ifdef CONFIG_ACPI_SLEEP | 465 | #ifdef CONFIG_ACPI_SLEEP |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 129618ca0ea2..770536ebf7e9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata; | |||
60 | 60 | ||
61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
62 | 62 | ||
63 | int direct_gbpages __meminitdata | 63 | int direct_gbpages |
64 | #ifdef CONFIG_DIRECT_GBPAGES | 64 | #ifdef CONFIG_DIRECT_GBPAGES |
65 | = 1 | 65 | = 1 |
66 | #endif | 66 | #endif |
@@ -88,7 +88,11 @@ early_param("gbpages", parse_direct_gbpages_on); | |||
88 | 88 | ||
89 | int after_bootmem; | 89 | int after_bootmem; |
90 | 90 | ||
91 | static __init void *spp_getpage(void) | 91 | /* |
92 | * NOTE: This function is marked __ref because it calls __init function | ||
93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | ||
94 | */ | ||
95 | static __ref void *spp_getpage(void) | ||
92 | { | 96 | { |
93 | void *ptr; | 97 | void *ptr; |
94 | 98 | ||
@@ -221,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |||
221 | void __init cleanup_highmap(void) | 225 | void __init cleanup_highmap(void) |
222 | { | 226 | { |
223 | unsigned long vaddr = __START_KERNEL_map; | 227 | unsigned long vaddr = __START_KERNEL_map; |
224 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | 228 | unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; |
225 | pmd_t *pmd = level2_kernel_pgt; | 229 | pmd_t *pmd = level2_kernel_pgt; |
226 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; |
227 | 231 | ||
@@ -237,7 +241,7 @@ static unsigned long __initdata table_start; | |||
237 | static unsigned long __meminitdata table_end; | 241 | static unsigned long __meminitdata table_end; |
238 | static unsigned long __meminitdata table_top; | 242 | static unsigned long __meminitdata table_top; |
239 | 243 | ||
240 | static __meminit void *alloc_low_page(unsigned long *phys) | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
241 | { | 245 | { |
242 | unsigned long pfn = table_end++; | 246 | unsigned long pfn = table_end++; |
243 | void *adr; | 247 | void *adr; |
@@ -258,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys) | |||
258 | return adr; | 262 | return adr; |
259 | } | 263 | } |
260 | 264 | ||
261 | static __meminit void unmap_low_page(void *adr) | 265 | static __ref void unmap_low_page(void *adr) |
262 | { | 266 | { |
263 | if (after_bootmem) | 267 | if (after_bootmem) |
264 | return; | 268 | return; |
@@ -314,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
314 | { | 318 | { |
315 | unsigned long pages = 0; | 319 | unsigned long pages = 0; |
316 | unsigned long last_map_addr = end; | 320 | unsigned long last_map_addr = end; |
321 | unsigned long start = address; | ||
317 | 322 | ||
318 | int i = pmd_index(address); | 323 | int i = pmd_index(address); |
319 | 324 | ||
@@ -331,16 +336,24 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
331 | } | 336 | } |
332 | 337 | ||
333 | if (pmd_val(*pmd)) { | 338 | if (pmd_val(*pmd)) { |
334 | if (!pmd_large(*pmd)) | 339 | if (!pmd_large(*pmd)) { |
340 | spin_lock(&init_mm.page_table_lock); | ||
335 | last_map_addr = phys_pte_update(pmd, address, | 341 | last_map_addr = phys_pte_update(pmd, address, |
336 | end); | 342 | end); |
343 | spin_unlock(&init_mm.page_table_lock); | ||
344 | } | ||
345 | /* Count entries we're using from level2_ident_pgt */ | ||
346 | if (start == 0) | ||
347 | pages++; | ||
337 | continue; | 348 | continue; |
338 | } | 349 | } |
339 | 350 | ||
340 | if (page_size_mask & (1<<PG_LEVEL_2M)) { | 351 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
341 | pages++; | 352 | pages++; |
353 | spin_lock(&init_mm.page_table_lock); | ||
342 | set_pte((pte_t *)pmd, | 354 | set_pte((pte_t *)pmd, |
343 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 355 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
356 | spin_unlock(&init_mm.page_table_lock); | ||
344 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; | 357 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
345 | continue; | 358 | continue; |
346 | } | 359 | } |
@@ -349,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
349 | last_map_addr = phys_pte_init(pte, address, end); | 362 | last_map_addr = phys_pte_init(pte, address, end); |
350 | unmap_low_page(pte); | 363 | unmap_low_page(pte); |
351 | 364 | ||
365 | spin_lock(&init_mm.page_table_lock); | ||
352 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); | 366 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
367 | spin_unlock(&init_mm.page_table_lock); | ||
353 | } | 368 | } |
354 | update_page_count(PG_LEVEL_2M, pages); | 369 | update_page_count(PG_LEVEL_2M, pages); |
355 | return last_map_addr; | 370 | return last_map_addr; |
@@ -362,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | |||
362 | pmd_t *pmd = pmd_offset(pud, 0); | 377 | pmd_t *pmd = pmd_offset(pud, 0); |
363 | unsigned long last_map_addr; | 378 | unsigned long last_map_addr; |
364 | 379 | ||
365 | spin_lock(&init_mm.page_table_lock); | ||
366 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); | 380 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
367 | spin_unlock(&init_mm.page_table_lock); | ||
368 | __flush_tlb_all(); | 381 | __flush_tlb_all(); |
369 | return last_map_addr; | 382 | return last_map_addr; |
370 | } | 383 | } |
@@ -400,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
400 | 413 | ||
401 | if (page_size_mask & (1<<PG_LEVEL_1G)) { | 414 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
402 | pages++; | 415 | pages++; |
416 | spin_lock(&init_mm.page_table_lock); | ||
403 | set_pte((pte_t *)pud, | 417 | set_pte((pte_t *)pud, |
404 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 418 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
419 | spin_unlock(&init_mm.page_table_lock); | ||
405 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; | 420 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
406 | continue; | 421 | continue; |
407 | } | 422 | } |
408 | 423 | ||
409 | pmd = alloc_low_page(&pmd_phys); | 424 | pmd = alloc_low_page(&pmd_phys); |
410 | |||
411 | spin_lock(&init_mm.page_table_lock); | ||
412 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); | 425 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
413 | unmap_low_page(pmd); | 426 | unmap_low_page(pmd); |
427 | |||
428 | spin_lock(&init_mm.page_table_lock); | ||
414 | pud_populate(&init_mm, pud, __va(pmd_phys)); | 429 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
415 | spin_unlock(&init_mm.page_table_lock); | 430 | spin_unlock(&init_mm.page_table_lock); |
416 | |||
417 | } | 431 | } |
418 | __flush_tlb_all(); | 432 | __flush_tlb_all(); |
419 | update_page_count(PG_LEVEL_1G, pages); | 433 | update_page_count(PG_LEVEL_1G, pages); |
@@ -437,14 +451,14 @@ static void __init find_early_table_space(unsigned long end) | |||
437 | unsigned long puds, pmds, ptes, tables, start; | 451 | unsigned long puds, pmds, ptes, tables, start; |
438 | 452 | ||
439 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
440 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); | 454 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
441 | if (direct_gbpages) { | 455 | if (direct_gbpages) { |
442 | unsigned long extra; | 456 | unsigned long extra; |
443 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); |
444 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
445 | } else | 459 | } else |
446 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
447 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | 461 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
448 | 462 | ||
449 | if (cpu_has_pse) { | 463 | if (cpu_has_pse) { |
450 | unsigned long extra; | 464 | unsigned long extra; |
@@ -452,7 +466,7 @@ static void __init find_early_table_space(unsigned long end) | |||
452 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
453 | } else | 467 | } else |
454 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
455 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | 469 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
456 | 470 | ||
457 | /* | 471 | /* |
458 | * RED-PEN putting page tables only on node 0 could | 472 | * RED-PEN putting page tables only on node 0 could |
@@ -505,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
505 | continue; | 519 | continue; |
506 | } | 520 | } |
507 | 521 | ||
508 | if (after_bootmem) | 522 | pud = alloc_low_page(&pud_phys); |
509 | pud = pud_offset(pgd, start & PGDIR_MASK); | ||
510 | else | ||
511 | pud = alloc_low_page(&pud_phys); | ||
512 | |||
513 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), | 523 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
514 | page_size_mask); | 524 | page_size_mask); |
515 | unmap_low_page(pud); | 525 | unmap_low_page(pud); |
516 | pgd_populate(&init_mm, pgd_offset_k(start), | 526 | |
517 | __va(pud_phys)); | 527 | spin_lock(&init_mm.page_table_lock); |
528 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | ||
529 | spin_unlock(&init_mm.page_table_lock); | ||
518 | } | 530 | } |
519 | 531 | ||
520 | return last_map_addr; | 532 | return last_map_addr; |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 016f335bbeea..cac6da54203b 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
170 | phys_addr &= PAGE_MASK; | 170 | phys_addr &= PAGE_MASK; |
171 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 171 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
172 | 172 | ||
173 | retval = reserve_memtype(phys_addr, phys_addr + size, | 173 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
174 | prot_val, &new_prot_val); | 174 | prot_val, &new_prot_val); |
175 | if (retval) { | 175 | if (retval) { |
176 | pr_debug("Warning: reserve_memtype returned %d\n", retval); | 176 | pr_debug("Warning: reserve_memtype returned %d\n", retval); |
@@ -421,7 +421,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |||
421 | return; | 421 | return; |
422 | } | 422 | } |
423 | 423 | ||
424 | int __initdata early_ioremap_debug; | 424 | static int __initdata early_ioremap_debug; |
425 | 425 | ||
426 | static int __init early_ioremap_debug_setup(char *str) | 426 | static int __init early_ioremap_debug_setup(char *str) |
427 | { | 427 | { |
@@ -547,19 +547,17 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | 549 | ||
550 | int __initdata early_ioremap_nested; | 550 | static int __initdata early_ioremap_nested; |
551 | 551 | ||
552 | static int __init check_early_ioremap_leak(void) | 552 | static int __init check_early_ioremap_leak(void) |
553 | { | 553 | { |
554 | if (!early_ioremap_nested) | 554 | if (!early_ioremap_nested) |
555 | return 0; | 555 | return 0; |
556 | 556 | WARN(1, KERN_WARNING | |
557 | printk(KERN_WARNING | ||
558 | "Debug warning: early ioremap leak of %d areas detected.\n", | 557 | "Debug warning: early ioremap leak of %d areas detected.\n", |
559 | early_ioremap_nested); | 558 | early_ioremap_nested); |
560 | printk(KERN_WARNING | 559 | printk(KERN_WARNING |
561 | "please boot with early_ioremap_debug and report the dmesg.\n"); | 560 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
562 | WARN_ON(1); | ||
563 | 561 | ||
564 | return 1; | 562 | return 1; |
565 | } | 563 | } |
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index e7397e108beb..635b50e85581 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void) | |||
430 | "may miss events.\n"); | 430 | "may miss events.\n"); |
431 | } | 431 | } |
432 | 432 | ||
433 | static void leave_uniprocessor(void) | 433 | /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, |
434 | but this whole function is ifdefed CONFIG_HOTPLUG_CPU */ | ||
435 | static void __ref leave_uniprocessor(void) | ||
434 | { | 436 | { |
435 | int cpu; | 437 | int cpu; |
436 | int err; | 438 | int err; |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a4dd793d6003..cebcbf152d46 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
79 | return 0; | 79 | return 0; |
80 | 80 | ||
81 | addr = 0x8000; | 81 | addr = 0x8000; |
82 | nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 82 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, | 83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, |
84 | nodemap_size, L1_CACHE_BYTES); | 84 | nodemap_size, L1_CACHE_BYTES); |
85 | if (nodemap_addr == -1UL) { | 85 | if (nodemap_addr == -1UL) { |
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; | 176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; |
177 | unsigned long bootmap_start, nodedata_phys; | 177 | unsigned long bootmap_start, nodedata_phys; |
178 | void *bootmap; | 178 | void *bootmap; |
179 | const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); | 179 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
180 | int nid; | 180 | int nid; |
181 | 181 | ||
182 | start = round_up(start, ZONE_ALIGN); | 182 | start = roundup(start, ZONE_ALIGN); |
183 | 183 | ||
184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, | 184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, |
185 | start, end); | 185 | start, end); |
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); | 210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); |
211 | nid = phys_to_nid(nodedata_phys); | 211 | nid = phys_to_nid(nodedata_phys); |
212 | if (nid == nodeid) | 212 | if (nid == nodeid) |
213 | bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); | 213 | bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE); |
214 | else | 214 | else |
215 | bootmap_start = round_up(start, PAGE_SIZE); | 215 | bootmap_start = roundup(start, PAGE_SIZE); |
216 | /* | 216 | /* |
217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like | 217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like |
218 | * to use that to align to PAGE_SIZE | 218 | * to use that to align to PAGE_SIZE |
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 0dcd42eb94e6..d4aa503caaa2 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c | |||
@@ -221,8 +221,7 @@ static int pageattr_test(void) | |||
221 | failed += print_split(&sc); | 221 | failed += print_split(&sc); |
222 | 222 | ||
223 | if (failed) { | 223 | if (failed) { |
224 | printk(KERN_ERR "NOT PASSED. Please report.\n"); | 224 | WARN(1, KERN_ERR "NOT PASSED. Please report.\n"); |
225 | WARN_ON(1); | ||
226 | return -EINVAL; | 225 | return -EINVAL; |
227 | } else { | 226 | } else { |
228 | if (print) | 227 | if (print) |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 65c6e46bf059..898fad617abe 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -55,13 +55,19 @@ static void split_page_count(int level) | |||
55 | 55 | ||
56 | int arch_report_meminfo(char *page) | 56 | int arch_report_meminfo(char *page) |
57 | { | 57 | { |
58 | int n = sprintf(page, "DirectMap4k: %8lu\n" | 58 | int n = sprintf(page, "DirectMap4k: %8lu kB\n", |
59 | "DirectMap2M: %8lu\n", | 59 | direct_pages_count[PG_LEVEL_4K] << 2); |
60 | direct_pages_count[PG_LEVEL_4K], | 60 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
61 | direct_pages_count[PG_LEVEL_2M]); | 61 | n += sprintf(page + n, "DirectMap2M: %8lu kB\n", |
62 | direct_pages_count[PG_LEVEL_2M] << 11); | ||
63 | #else | ||
64 | n += sprintf(page + n, "DirectMap4M: %8lu kB\n", | ||
65 | direct_pages_count[PG_LEVEL_2M] << 12); | ||
66 | #endif | ||
62 | #ifdef CONFIG_X86_64 | 67 | #ifdef CONFIG_X86_64 |
63 | n += sprintf(page + n, "DirectMap1G: %8lu\n", | 68 | if (direct_gbpages) |
64 | direct_pages_count[PG_LEVEL_1G]); | 69 | n += sprintf(page + n, "DirectMap1G: %8lu kB\n", |
70 | direct_pages_count[PG_LEVEL_1G] << 20); | ||
65 | #endif | 71 | #endif |
66 | return n; | 72 | return n; |
67 | } | 73 | } |
@@ -78,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void) | |||
78 | 84 | ||
79 | static inline unsigned long highmap_end_pfn(void) | 85 | static inline unsigned long highmap_end_pfn(void) |
80 | { | 86 | { |
81 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | 87 | return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; |
82 | } | 88 | } |
83 | 89 | ||
84 | #endif | 90 | #endif |
@@ -592,10 +598,9 @@ repeat: | |||
592 | if (!pte_val(old_pte)) { | 598 | if (!pte_val(old_pte)) { |
593 | if (!primary) | 599 | if (!primary) |
594 | return 0; | 600 | return 0; |
595 | printk(KERN_WARNING "CPA: called for zero pte. " | 601 | WARN(1, KERN_WARNING "CPA: called for zero pte. " |
596 | "vaddr = %lx cpa->vaddr = %lx\n", address, | 602 | "vaddr = %lx cpa->vaddr = %lx\n", address, |
597 | cpa->vaddr); | 603 | cpa->vaddr); |
598 | WARN_ON(1); | ||
599 | return -EINVAL; | 604 | return -EINVAL; |
600 | } | 605 | } |
601 | 606 | ||
@@ -844,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages) | |||
844 | /* | 849 | /* |
845 | * for now UC MINUS. see comments in ioremap_nocache() | 850 | * for now UC MINUS. see comments in ioremap_nocache() |
846 | */ | 851 | */ |
847 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | 852 | if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
848 | _PAGE_CACHE_UC_MINUS, NULL)) | 853 | _PAGE_CACHE_UC_MINUS, NULL)) |
849 | return -EINVAL; | 854 | return -EINVAL; |
850 | 855 | ||
@@ -863,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages) | |||
863 | if (!pat_enabled) | 868 | if (!pat_enabled) |
864 | return set_memory_uc(addr, numpages); | 869 | return set_memory_uc(addr, numpages); |
865 | 870 | ||
866 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | 871 | if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
867 | _PAGE_CACHE_WC, NULL)) | 872 | _PAGE_CACHE_WC, NULL)) |
868 | return -EINVAL; | 873 | return -EINVAL; |
869 | 874 | ||
@@ -879,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages) | |||
879 | 884 | ||
880 | int set_memory_wb(unsigned long addr, int numpages) | 885 | int set_memory_wb(unsigned long addr, int numpages) |
881 | { | 886 | { |
882 | free_memtype(addr, addr + numpages * PAGE_SIZE); | 887 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
883 | 888 | ||
884 | return _set_memory_wb(addr, numpages); | 889 | return _set_memory_wb(addr, numpages); |
885 | } | 890 | } |
@@ -901,11 +906,13 @@ int set_memory_ro(unsigned long addr, int numpages) | |||
901 | { | 906 | { |
902 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); | 907 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
903 | } | 908 | } |
909 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
904 | 910 | ||
905 | int set_memory_rw(unsigned long addr, int numpages) | 911 | int set_memory_rw(unsigned long addr, int numpages) |
906 | { | 912 | { |
907 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); | 913 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
908 | } | 914 | } |
915 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
909 | 916 | ||
910 | int set_memory_np(unsigned long addr, int numpages) | 917 | int set_memory_np(unsigned long addr, int numpages) |
911 | { | 918 | { |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 2fe30916d4b6..2a50e0fa64a5 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry, | |||
207 | return -EBUSY; | 207 | return -EBUSY; |
208 | } | 208 | } |
209 | 209 | ||
210 | static struct memtype *cached_entry; | ||
211 | static u64 cached_start; | ||
212 | |||
210 | /* | 213 | /* |
211 | * req_type typically has one of the: | 214 | * req_type typically has one of the: |
212 | * - _PAGE_CACHE_WB | 215 | * - _PAGE_CACHE_WB |
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
280 | 283 | ||
281 | spin_lock(&memtype_lock); | 284 | spin_lock(&memtype_lock); |
282 | 285 | ||
286 | if (cached_entry && start >= cached_start) | ||
287 | entry = cached_entry; | ||
288 | else | ||
289 | entry = list_entry(&memtype_list, struct memtype, nd); | ||
290 | |||
283 | /* Search for existing mapping that overlaps the current range */ | 291 | /* Search for existing mapping that overlaps the current range */ |
284 | where = NULL; | 292 | where = NULL; |
285 | list_for_each_entry(entry, &memtype_list, nd) { | 293 | list_for_each_entry_continue(entry, &memtype_list, nd) { |
286 | if (end <= entry->start) { | 294 | if (end <= entry->start) { |
287 | where = entry->nd.prev; | 295 | where = entry->nd.prev; |
296 | cached_entry = list_entry(where, struct memtype, nd); | ||
288 | break; | 297 | break; |
289 | } else if (start <= entry->start) { /* end > entry->start */ | 298 | } else if (start <= entry->start) { /* end > entry->start */ |
290 | err = chk_conflict(new, entry, new_type); | 299 | err = chk_conflict(new, entry, new_type); |
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
292 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 301 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
293 | entry->start, entry->end); | 302 | entry->start, entry->end); |
294 | where = entry->nd.prev; | 303 | where = entry->nd.prev; |
304 | cached_entry = list_entry(where, | ||
305 | struct memtype, nd); | ||
295 | } | 306 | } |
296 | break; | 307 | break; |
297 | } else if (start < entry->end) { /* start > entry->start */ | 308 | } else if (start < entry->end) { /* start > entry->start */ |
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
299 | if (!err) { | 310 | if (!err) { |
300 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 311 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
301 | entry->start, entry->end); | 312 | entry->start, entry->end); |
302 | where = &entry->nd; | 313 | cached_entry = list_entry(entry->nd.prev, |
314 | struct memtype, nd); | ||
315 | |||
316 | /* | ||
317 | * Move to right position in the linked | ||
318 | * list to add this new entry | ||
319 | */ | ||
320 | list_for_each_entry_continue(entry, | ||
321 | &memtype_list, nd) { | ||
322 | if (start <= entry->start) { | ||
323 | where = entry->nd.prev; | ||
324 | break; | ||
325 | } | ||
326 | } | ||
303 | } | 327 | } |
304 | break; | 328 | break; |
305 | } | 329 | } |
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
314 | return err; | 338 | return err; |
315 | } | 339 | } |
316 | 340 | ||
341 | cached_start = start; | ||
342 | |||
317 | if (where) | 343 | if (where) |
318 | list_add(&new->nd, where); | 344 | list_add(&new->nd, where); |
319 | else | 345 | else |
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end) | |||
343 | spin_lock(&memtype_lock); | 369 | spin_lock(&memtype_lock); |
344 | list_for_each_entry(entry, &memtype_list, nd) { | 370 | list_for_each_entry(entry, &memtype_list, nd) { |
345 | if (entry->start == start && entry->end == end) { | 371 | if (entry->start == start && entry->end == end) { |
372 | if (cached_entry == entry || cached_start == start) | ||
373 | cached_entry = NULL; | ||
374 | |||
346 | list_del(&entry->nd); | 375 | list_del(&entry->nd); |
347 | kfree(entry); | 376 | kfree(entry); |
348 | err = 0; | 377 | err = 0; |
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end) | |||
361 | } | 390 | } |
362 | 391 | ||
363 | 392 | ||
364 | /* | ||
365 | * /dev/mem mmap interface. The memtype used for mapping varies: | ||
366 | * - Use UC for mappings with O_SYNC flag | ||
367 | * - Without O_SYNC flag, if there is any conflict in reserve_memtype, | ||
368 | * inherit the memtype from existing mapping. | ||
369 | * - Else use UC_MINUS memtype (for backward compatibility with existing | ||
370 | * X drivers. | ||
371 | */ | ||
372 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 393 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
373 | unsigned long size, pgprot_t vma_prot) | 394 | unsigned long size, pgprot_t vma_prot) |
374 | { | 395 | { |
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
406 | unsigned long size, pgprot_t *vma_prot) | 427 | unsigned long size, pgprot_t *vma_prot) |
407 | { | 428 | { |
408 | u64 offset = ((u64) pfn) << PAGE_SHIFT; | 429 | u64 offset = ((u64) pfn) << PAGE_SHIFT; |
409 | unsigned long flags = _PAGE_CACHE_UC_MINUS; | 430 | unsigned long flags = -1; |
410 | int retval; | 431 | int retval; |
411 | 432 | ||
412 | if (!range_is_allowed(pfn, size)) | 433 | if (!range_is_allowed(pfn, size)) |
413 | return 0; | 434 | return 0; |
414 | 435 | ||
415 | if (file->f_flags & O_SYNC) { | 436 | if (file->f_flags & O_SYNC) { |
416 | flags = _PAGE_CACHE_UC; | 437 | flags = _PAGE_CACHE_UC_MINUS; |
417 | } | 438 | } |
418 | 439 | ||
419 | #ifdef CONFIG_X86_32 | 440 | #ifdef CONFIG_X86_32 |
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
436 | #endif | 457 | #endif |
437 | 458 | ||
438 | /* | 459 | /* |
439 | * With O_SYNC, we can only take UC mapping. Fail if we cannot. | 460 | * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot. |
461 | * | ||
440 | * Without O_SYNC, we want to get | 462 | * Without O_SYNC, we want to get |
441 | * - WB for WB-able memory and no other conflicting mappings | 463 | * - WB for WB-able memory and no other conflicting mappings |
442 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | 464 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings |
443 | * - Inherit from confliting mappings otherwise | 465 | * - Inherit from confliting mappings otherwise |
444 | */ | 466 | */ |
445 | if (flags != _PAGE_CACHE_UC_MINUS) { | 467 | if (flags != -1) { |
446 | retval = reserve_memtype(offset, offset + size, flags, NULL); | 468 | retval = reserve_memtype(offset, offset + size, flags, NULL); |
447 | } else { | 469 | } else { |
448 | retval = reserve_memtype(offset, offset + size, -1, &flags); | 470 | retval = reserve_memtype(offset, offset + size, -1, &flags); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index d50302774fe2..86f2ffc43c3d 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -63,10 +63,8 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
63 | #define UNSHARED_PTRS_PER_PGD \ | 63 | #define UNSHARED_PTRS_PER_PGD \ |
64 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 64 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
65 | 65 | ||
66 | static void pgd_ctor(void *p) | 66 | static void pgd_ctor(pgd_t *pgd) |
67 | { | 67 | { |
68 | pgd_t *pgd = p; | ||
69 | |||
70 | /* If the pgd points to a shared pagetable level (either the | 68 | /* If the pgd points to a shared pagetable level (either the |
71 | ptes in non-PAE, or shared PMD in PAE), then just copy the | 69 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
72 | references from swapper_pg_dir. */ | 70 | references from swapper_pg_dir. */ |
@@ -87,7 +85,7 @@ static void pgd_ctor(void *p) | |||
87 | pgd_list_add(pgd); | 85 | pgd_list_add(pgd); |
88 | } | 86 | } |
89 | 87 | ||
90 | static void pgd_dtor(void *pgd) | 88 | static void pgd_dtor(pgd_t *pgd) |
91 | { | 89 | { |
92 | unsigned long flags; /* can be called from interrupt context */ | 90 | unsigned long flags; /* can be called from interrupt context */ |
93 | 91 | ||
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index cab0abbd1ebe..0951db9ee519 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -123,7 +123,8 @@ static int __init parse_vmalloc(char *arg) | |||
123 | if (!arg) | 123 | if (!arg) |
124 | return -EINVAL; | 124 | return -EINVAL; |
125 | 125 | ||
126 | __VMALLOC_RESERVE = memparse(arg, &arg); | 126 | /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ |
127 | __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; | ||
127 | return 0; | 128 | return 0; |
128 | } | 129 | } |
129 | early_param("vmalloc", parse_vmalloc); | 130 | early_param("vmalloc", parse_vmalloc); |
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 1eb2973a301c..16ae70fc57e7 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
@@ -178,7 +178,7 @@ void acpi_numa_arch_fixup(void) | |||
178 | * start of the node, and that the current "end" address is after | 178 | * start of the node, and that the current "end" address is after |
179 | * the previous one. | 179 | * the previous one. |
180 | */ | 180 | */ |
181 | static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) | 181 | static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) |
182 | { | 182 | { |
183 | /* | 183 | /* |
184 | * Only add present memory as told by the e820. | 184 | * Only add present memory as told by the e820. |
@@ -189,10 +189,10 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c | |||
189 | if (memory_chunk->start_pfn >= max_pfn) { | 189 | if (memory_chunk->start_pfn >= max_pfn) { |
190 | printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", | 190 | printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", |
191 | memory_chunk->start_pfn, memory_chunk->end_pfn); | 191 | memory_chunk->start_pfn, memory_chunk->end_pfn); |
192 | return; | 192 | return -1; |
193 | } | 193 | } |
194 | if (memory_chunk->nid != nid) | 194 | if (memory_chunk->nid != nid) |
195 | return; | 195 | return -1; |
196 | 196 | ||
197 | if (!node_has_online_mem(nid)) | 197 | if (!node_has_online_mem(nid)) |
198 | node_start_pfn[nid] = memory_chunk->start_pfn; | 198 | node_start_pfn[nid] = memory_chunk->start_pfn; |
@@ -202,6 +202,8 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c | |||
202 | 202 | ||
203 | if (node_end_pfn[nid] < memory_chunk->end_pfn) | 203 | if (node_end_pfn[nid] < memory_chunk->end_pfn) |
204 | node_end_pfn[nid] = memory_chunk->end_pfn; | 204 | node_end_pfn[nid] = memory_chunk->end_pfn; |
205 | |||
206 | return 0; | ||
205 | } | 207 | } |
206 | 208 | ||
207 | int __init get_memcfg_from_srat(void) | 209 | int __init get_memcfg_from_srat(void) |
@@ -259,7 +261,9 @@ int __init get_memcfg_from_srat(void) | |||
259 | printk(KERN_DEBUG | 261 | printk(KERN_DEBUG |
260 | "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", | 262 | "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", |
261 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); | 263 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); |
262 | node_read_chunk(chunk->nid, chunk); | 264 | if (node_read_chunk(chunk->nid, chunk)) |
265 | continue; | ||
266 | |||
263 | e820_register_active_regions(chunk->nid, chunk->start_pfn, | 267 | e820_register_active_regions(chunk->nid, chunk->start_pfn, |
264 | min(chunk->end_pfn, max_pfn)); | 268 | min(chunk->end_pfn, max_pfn)); |
265 | } | 269 | } |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 3f90289410e6..8a5f1614a3d5 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/moduleparam.h> | 16 | #include <linux/moduleparam.h> |
17 | #include <linux/kdebug.h> | 17 | #include <linux/kdebug.h> |
18 | #include <linux/cpu.h> | ||
18 | #include <asm/nmi.h> | 19 | #include <asm/nmi.h> |
19 | #include <asm/msr.h> | 20 | #include <asm/msr.h> |
20 | #include <asm/apic.h> | 21 | #include <asm/apic.h> |
@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | |||
28 | 29 | ||
29 | static int nmi_start(void); | 30 | static int nmi_start(void); |
30 | static void nmi_stop(void); | 31 | static void nmi_stop(void); |
32 | static void nmi_cpu_start(void *dummy); | ||
33 | static void nmi_cpu_stop(void *dummy); | ||
31 | 34 | ||
32 | /* 0 == registered but off, 1 == registered and on */ | 35 | /* 0 == registered but off, 1 == registered and on */ |
33 | static int nmi_enabled = 0; | 36 | static int nmi_enabled = 0; |
34 | 37 | ||
38 | #ifdef CONFIG_SMP | ||
39 | static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | ||
40 | void *data) | ||
41 | { | ||
42 | int cpu = (unsigned long)data; | ||
43 | switch (action) { | ||
44 | case CPU_DOWN_FAILED: | ||
45 | case CPU_ONLINE: | ||
46 | smp_call_function_single(cpu, nmi_cpu_start, NULL, 0); | ||
47 | break; | ||
48 | case CPU_DOWN_PREPARE: | ||
49 | smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1); | ||
50 | break; | ||
51 | } | ||
52 | return NOTIFY_DONE; | ||
53 | } | ||
54 | |||
55 | static struct notifier_block oprofile_cpu_nb = { | ||
56 | .notifier_call = oprofile_cpu_notifier | ||
57 | }; | ||
58 | #endif | ||
59 | |||
35 | #ifdef CONFIG_PM | 60 | #ifdef CONFIG_PM |
36 | 61 | ||
37 | static int nmi_suspend(struct sys_device *dev, pm_message_t state) | 62 | static int nmi_suspend(struct sys_device *dev, pm_message_t state) |
38 | { | 63 | { |
64 | /* Only one CPU left, just stop that one */ | ||
39 | if (nmi_enabled == 1) | 65 | if (nmi_enabled == 1) |
40 | nmi_stop(); | 66 | nmi_cpu_stop(NULL); |
41 | return 0; | 67 | return 0; |
42 | } | 68 | } |
43 | 69 | ||
44 | static int nmi_resume(struct sys_device *dev) | 70 | static int nmi_resume(struct sys_device *dev) |
45 | { | 71 | { |
46 | if (nmi_enabled == 1) | 72 | if (nmi_enabled == 1) |
47 | nmi_start(); | 73 | nmi_cpu_start(NULL); |
48 | return 0; | 74 | return 0; |
49 | } | 75 | } |
50 | 76 | ||
@@ -269,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy) | |||
269 | 295 | ||
270 | static void nmi_shutdown(void) | 296 | static void nmi_shutdown(void) |
271 | { | 297 | { |
272 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); | 298 | struct op_msrs *msrs; |
299 | |||
273 | nmi_enabled = 0; | 300 | nmi_enabled = 0; |
274 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 301 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
275 | unregister_die_notifier(&profile_exceptions_nb); | 302 | unregister_die_notifier(&profile_exceptions_nb); |
303 | msrs = &get_cpu_var(cpu_msrs); | ||
276 | model->shutdown(msrs); | 304 | model->shutdown(msrs); |
277 | free_msrs(); | 305 | free_msrs(); |
278 | put_cpu_var(cpu_msrs); | 306 | put_cpu_var(cpu_msrs); |
@@ -463,6 +491,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
463 | } | 491 | } |
464 | 492 | ||
465 | init_sysfs(); | 493 | init_sysfs(); |
494 | #ifdef CONFIG_SMP | ||
495 | register_cpu_notifier(&oprofile_cpu_nb); | ||
496 | #endif | ||
466 | using_nmi = 1; | 497 | using_nmi = 1; |
467 | ops->create_files = nmi_create_files; | 498 | ops->create_files = nmi_create_files; |
468 | ops->setup = nmi_setup; | 499 | ops->setup = nmi_setup; |
@@ -476,6 +507,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
476 | 507 | ||
477 | void op_nmi_exit(void) | 508 | void op_nmi_exit(void) |
478 | { | 509 | { |
479 | if (using_nmi) | 510 | if (using_nmi) { |
480 | exit_sysfs(); | 511 | exit_sysfs(); |
512 | #ifdef CONFIG_SMP | ||
513 | unregister_cpu_notifier(&oprofile_cpu_nb); | ||
514 | #endif | ||
515 | } | ||
481 | } | 516 | } |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 56b4757a1f47..43ac5af338d8 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -10,11 +10,12 @@ | |||
10 | 10 | ||
11 | #include <linux/oprofile.h> | 11 | #include <linux/oprofile.h> |
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/ptrace.h> | ||
14 | #include <linux/nmi.h> | ||
13 | #include <asm/msr.h> | 15 | #include <asm/msr.h> |
14 | #include <asm/ptrace.h> | ||
15 | #include <asm/fixmap.h> | 16 | #include <asm/fixmap.h> |
16 | #include <asm/apic.h> | 17 | #include <asm/apic.h> |
17 | #include <asm/nmi.h> | 18 | |
18 | 19 | ||
19 | #include "op_x86_model.h" | 20 | #include "op_x86_model.h" |
20 | #include "op_counter.h" | 21 | #include "op_counter.h" |
@@ -40,7 +41,7 @@ static unsigned int num_controls = NUM_CONTROLS_NON_HT; | |||
40 | static inline void setup_num_counters(void) | 41 | static inline void setup_num_counters(void) |
41 | { | 42 | { |
42 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
43 | if (smp_num_siblings == 2){ | 44 | if (smp_num_siblings == 2) { |
44 | num_counters = NUM_COUNTERS_HT2; | 45 | num_counters = NUM_COUNTERS_HT2; |
45 | num_controls = NUM_CONTROLS_HT2; | 46 | num_controls = NUM_CONTROLS_HT2; |
46 | } | 47 | } |
@@ -86,7 +87,7 @@ struct p4_event_binding { | |||
86 | #define CTR_FLAME_2 (1 << 6) | 87 | #define CTR_FLAME_2 (1 << 6) |
87 | #define CTR_IQ_5 (1 << 7) | 88 | #define CTR_IQ_5 (1 << 7) |
88 | 89 | ||
89 | static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { | 90 | static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = { |
90 | { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, | 91 | { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, |
91 | { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, | 92 | { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, |
92 | { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, | 93 | { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, |
@@ -97,32 +98,32 @@ static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { | |||
97 | { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } | 98 | { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } |
98 | }; | 99 | }; |
99 | 100 | ||
100 | #define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT | 101 | #define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT) |
101 | 102 | ||
102 | /* p4 event codes in libop/op_event.h are indices into this table. */ | 103 | /* p4 event codes in libop/op_event.h are indices into this table. */ |
103 | 104 | ||
104 | static struct p4_event_binding p4_events[NUM_EVENTS] = { | 105 | static struct p4_event_binding p4_events[NUM_EVENTS] = { |
105 | 106 | ||
106 | { /* BRANCH_RETIRED */ | 107 | { /* BRANCH_RETIRED */ |
107 | 0x05, 0x06, | 108 | 0x05, 0x06, |
108 | { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, | 109 | { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, |
109 | {CTR_IQ_5, MSR_P4_CRU_ESCR3} } | 110 | {CTR_IQ_5, MSR_P4_CRU_ESCR3} } |
110 | }, | 111 | }, |
111 | 112 | ||
112 | { /* MISPRED_BRANCH_RETIRED */ | 113 | { /* MISPRED_BRANCH_RETIRED */ |
113 | 0x04, 0x03, | 114 | 0x04, 0x03, |
114 | { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, | 115 | { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, |
115 | { CTR_IQ_5, MSR_P4_CRU_ESCR1} } | 116 | { CTR_IQ_5, MSR_P4_CRU_ESCR1} } |
116 | }, | 117 | }, |
117 | 118 | ||
118 | { /* TC_DELIVER_MODE */ | 119 | { /* TC_DELIVER_MODE */ |
119 | 0x01, 0x01, | 120 | 0x01, 0x01, |
120 | { { CTR_MS_0, MSR_P4_TC_ESCR0}, | 121 | { { CTR_MS_0, MSR_P4_TC_ESCR0}, |
121 | { CTR_MS_2, MSR_P4_TC_ESCR1} } | 122 | { CTR_MS_2, MSR_P4_TC_ESCR1} } |
122 | }, | 123 | }, |
123 | 124 | ||
124 | { /* BPU_FETCH_REQUEST */ | 125 | { /* BPU_FETCH_REQUEST */ |
125 | 0x00, 0x03, | 126 | 0x00, 0x03, |
126 | { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, | 127 | { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, |
127 | { CTR_BPU_2, MSR_P4_BPU_ESCR1} } | 128 | { CTR_BPU_2, MSR_P4_BPU_ESCR1} } |
128 | }, | 129 | }, |
@@ -146,7 +147,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
146 | }, | 147 | }, |
147 | 148 | ||
148 | { /* LOAD_PORT_REPLAY */ | 149 | { /* LOAD_PORT_REPLAY */ |
149 | 0x02, 0x04, | 150 | 0x02, 0x04, |
150 | { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, | 151 | { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, |
151 | { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } | 152 | { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } |
152 | }, | 153 | }, |
@@ -170,43 +171,43 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
170 | }, | 171 | }, |
171 | 172 | ||
172 | { /* BSQ_CACHE_REFERENCE */ | 173 | { /* BSQ_CACHE_REFERENCE */ |
173 | 0x07, 0x0c, | 174 | 0x07, 0x0c, |
174 | { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, | 175 | { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, |
175 | { CTR_BPU_2, MSR_P4_BSU_ESCR1} } | 176 | { CTR_BPU_2, MSR_P4_BSU_ESCR1} } |
176 | }, | 177 | }, |
177 | 178 | ||
178 | { /* IOQ_ALLOCATION */ | 179 | { /* IOQ_ALLOCATION */ |
179 | 0x06, 0x03, | 180 | 0x06, 0x03, |
180 | { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, | 181 | { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, |
181 | { 0, 0 } } | 182 | { 0, 0 } } |
182 | }, | 183 | }, |
183 | 184 | ||
184 | { /* IOQ_ACTIVE_ENTRIES */ | 185 | { /* IOQ_ACTIVE_ENTRIES */ |
185 | 0x06, 0x1a, | 186 | 0x06, 0x1a, |
186 | { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, | 187 | { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, |
187 | { 0, 0 } } | 188 | { 0, 0 } } |
188 | }, | 189 | }, |
189 | 190 | ||
190 | { /* FSB_DATA_ACTIVITY */ | 191 | { /* FSB_DATA_ACTIVITY */ |
191 | 0x06, 0x17, | 192 | 0x06, 0x17, |
192 | { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, | 193 | { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, |
193 | { CTR_BPU_2, MSR_P4_FSB_ESCR1} } | 194 | { CTR_BPU_2, MSR_P4_FSB_ESCR1} } |
194 | }, | 195 | }, |
195 | 196 | ||
196 | { /* BSQ_ALLOCATION */ | 197 | { /* BSQ_ALLOCATION */ |
197 | 0x07, 0x05, | 198 | 0x07, 0x05, |
198 | { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, | 199 | { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, |
199 | { 0, 0 } } | 200 | { 0, 0 } } |
200 | }, | 201 | }, |
201 | 202 | ||
202 | { /* BSQ_ACTIVE_ENTRIES */ | 203 | { /* BSQ_ACTIVE_ENTRIES */ |
203 | 0x07, 0x06, | 204 | 0x07, 0x06, |
204 | { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, | 205 | { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, |
205 | { 0, 0 } } | 206 | { 0, 0 } } |
206 | }, | 207 | }, |
207 | 208 | ||
208 | { /* X87_ASSIST */ | 209 | { /* X87_ASSIST */ |
209 | 0x05, 0x03, | 210 | 0x05, 0x03, |
210 | { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, | 211 | { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, |
211 | { CTR_IQ_5, MSR_P4_CRU_ESCR3} } | 212 | { CTR_IQ_5, MSR_P4_CRU_ESCR3} } |
212 | }, | 213 | }, |
@@ -216,21 +217,21 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
216 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 217 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
217 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 218 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
218 | }, | 219 | }, |
219 | 220 | ||
220 | { /* PACKED_SP_UOP */ | 221 | { /* PACKED_SP_UOP */ |
221 | 0x01, 0x08, | 222 | 0x01, 0x08, |
222 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 223 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
223 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 224 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
224 | }, | 225 | }, |
225 | 226 | ||
226 | { /* PACKED_DP_UOP */ | 227 | { /* PACKED_DP_UOP */ |
227 | 0x01, 0x0c, | 228 | 0x01, 0x0c, |
228 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 229 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
229 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 230 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
230 | }, | 231 | }, |
231 | 232 | ||
232 | { /* SCALAR_SP_UOP */ | 233 | { /* SCALAR_SP_UOP */ |
233 | 0x01, 0x0a, | 234 | 0x01, 0x0a, |
234 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 235 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
235 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 236 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
236 | }, | 237 | }, |
@@ -242,31 +243,31 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
242 | }, | 243 | }, |
243 | 244 | ||
244 | { /* 64BIT_MMX_UOP */ | 245 | { /* 64BIT_MMX_UOP */ |
245 | 0x01, 0x02, | 246 | 0x01, 0x02, |
246 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 247 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
247 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 248 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
248 | }, | 249 | }, |
249 | 250 | ||
250 | { /* 128BIT_MMX_UOP */ | 251 | { /* 128BIT_MMX_UOP */ |
251 | 0x01, 0x1a, | 252 | 0x01, 0x1a, |
252 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 253 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
253 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 254 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
254 | }, | 255 | }, |
255 | 256 | ||
256 | { /* X87_FP_UOP */ | 257 | { /* X87_FP_UOP */ |
257 | 0x01, 0x04, | 258 | 0x01, 0x04, |
258 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 259 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
259 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 260 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
260 | }, | 261 | }, |
261 | 262 | ||
262 | { /* X87_SIMD_MOVES_UOP */ | 263 | { /* X87_SIMD_MOVES_UOP */ |
263 | 0x01, 0x2e, | 264 | 0x01, 0x2e, |
264 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, | 265 | { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, |
265 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } | 266 | { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } |
266 | }, | 267 | }, |
267 | 268 | ||
268 | { /* MACHINE_CLEAR */ | 269 | { /* MACHINE_CLEAR */ |
269 | 0x05, 0x02, | 270 | 0x05, 0x02, |
270 | { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, | 271 | { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, |
271 | { CTR_IQ_5, MSR_P4_CRU_ESCR3} } | 272 | { CTR_IQ_5, MSR_P4_CRU_ESCR3} } |
272 | }, | 273 | }, |
@@ -276,9 +277,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
276 | { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, | 277 | { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, |
277 | { CTR_BPU_2, MSR_P4_FSB_ESCR1} } | 278 | { CTR_BPU_2, MSR_P4_FSB_ESCR1} } |
278 | }, | 279 | }, |
279 | 280 | ||
280 | { /* TC_MS_XFER */ | 281 | { /* TC_MS_XFER */ |
281 | 0x00, 0x05, | 282 | 0x00, 0x05, |
282 | { { CTR_MS_0, MSR_P4_MS_ESCR0}, | 283 | { { CTR_MS_0, MSR_P4_MS_ESCR0}, |
283 | { CTR_MS_2, MSR_P4_MS_ESCR1} } | 284 | { CTR_MS_2, MSR_P4_MS_ESCR1} } |
284 | }, | 285 | }, |
@@ -308,7 +309,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
308 | }, | 309 | }, |
309 | 310 | ||
310 | { /* INSTR_RETIRED */ | 311 | { /* INSTR_RETIRED */ |
311 | 0x04, 0x02, | 312 | 0x04, 0x02, |
312 | { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, | 313 | { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, |
313 | { CTR_IQ_5, MSR_P4_CRU_ESCR1} } | 314 | { CTR_IQ_5, MSR_P4_CRU_ESCR1} } |
314 | }, | 315 | }, |
@@ -319,14 +320,14 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
319 | { CTR_IQ_5, MSR_P4_CRU_ESCR1} } | 320 | { CTR_IQ_5, MSR_P4_CRU_ESCR1} } |
320 | }, | 321 | }, |
321 | 322 | ||
322 | { /* UOP_TYPE */ | 323 | { /* UOP_TYPE */ |
323 | 0x02, 0x02, | 324 | 0x02, 0x02, |
324 | { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, | 325 | { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, |
325 | { CTR_IQ_5, MSR_P4_RAT_ESCR1} } | 326 | { CTR_IQ_5, MSR_P4_RAT_ESCR1} } |
326 | }, | 327 | }, |
327 | 328 | ||
328 | { /* RETIRED_MISPRED_BRANCH_TYPE */ | 329 | { /* RETIRED_MISPRED_BRANCH_TYPE */ |
329 | 0x02, 0x05, | 330 | 0x02, 0x05, |
330 | { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, | 331 | { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, |
331 | { CTR_MS_2, MSR_P4_TBPU_ESCR1} } | 332 | { CTR_MS_2, MSR_P4_TBPU_ESCR1} } |
332 | }, | 333 | }, |
@@ -349,8 +350,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
349 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) | 350 | #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) |
350 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) | 351 | #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) |
351 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) | 352 | #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) |
352 | #define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) | 353 | #define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) |
353 | #define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) | 354 | #define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) |
354 | 355 | ||
355 | #define CCCR_RESERVED_BITS 0x38030FFF | 356 | #define CCCR_RESERVED_BITS 0x38030FFF |
356 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) | 357 | #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) |
@@ -360,15 +361,15 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { | |||
360 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) | 361 | #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) |
361 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) | 362 | #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) |
362 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) | 363 | #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) |
363 | #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) | 364 | #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) |
364 | #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) | 365 | #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) |
365 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) | 366 | #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) |
366 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) | 367 | #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) |
367 | 368 | ||
368 | #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) | 369 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) |
369 | #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) | 370 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) |
370 | #define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) | 371 | #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) |
371 | #define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) | 372 | #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) |
372 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) | 373 | #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) |
373 | 374 | ||
374 | 375 | ||
@@ -380,7 +381,7 @@ static unsigned int get_stagger(void) | |||
380 | #ifdef CONFIG_SMP | 381 | #ifdef CONFIG_SMP |
381 | int cpu = smp_processor_id(); | 382 | int cpu = smp_processor_id(); |
382 | return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); | 383 | return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); |
383 | #endif | 384 | #endif |
384 | return 0; | 385 | return 0; |
385 | } | 386 | } |
386 | 387 | ||
@@ -395,25 +396,23 @@ static unsigned long reset_value[NUM_COUNTERS_NON_HT]; | |||
395 | 396 | ||
396 | static void p4_fill_in_addresses(struct op_msrs * const msrs) | 397 | static void p4_fill_in_addresses(struct op_msrs * const msrs) |
397 | { | 398 | { |
398 | unsigned int i; | 399 | unsigned int i; |
399 | unsigned int addr, cccraddr, stag; | 400 | unsigned int addr, cccraddr, stag; |
400 | 401 | ||
401 | setup_num_counters(); | 402 | setup_num_counters(); |
402 | stag = get_stagger(); | 403 | stag = get_stagger(); |
403 | 404 | ||
404 | /* initialize some registers */ | 405 | /* initialize some registers */ |
405 | for (i = 0; i < num_counters; ++i) { | 406 | for (i = 0; i < num_counters; ++i) |
406 | msrs->counters[i].addr = 0; | 407 | msrs->counters[i].addr = 0; |
407 | } | 408 | for (i = 0; i < num_controls; ++i) |
408 | for (i = 0; i < num_controls; ++i) { | ||
409 | msrs->controls[i].addr = 0; | 409 | msrs->controls[i].addr = 0; |
410 | } | 410 | |
411 | |||
412 | /* the counter & cccr registers we pay attention to */ | 411 | /* the counter & cccr registers we pay attention to */ |
413 | for (i = 0; i < num_counters; ++i) { | 412 | for (i = 0; i < num_counters; ++i) { |
414 | addr = p4_counters[VIRT_CTR(stag, i)].counter_address; | 413 | addr = p4_counters[VIRT_CTR(stag, i)].counter_address; |
415 | cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; | 414 | cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; |
416 | if (reserve_perfctr_nmi(addr)){ | 415 | if (reserve_perfctr_nmi(addr)) { |
417 | msrs->counters[i].addr = addr; | 416 | msrs->counters[i].addr = addr; |
418 | msrs->controls[i].addr = cccraddr; | 417 | msrs->controls[i].addr = cccraddr; |
419 | } | 418 | } |
@@ -447,22 +446,22 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs) | |||
447 | if (reserve_evntsel_nmi(addr)) | 446 | if (reserve_evntsel_nmi(addr)) |
448 | msrs->controls[i].addr = addr; | 447 | msrs->controls[i].addr = addr; |
449 | } | 448 | } |
450 | 449 | ||
451 | for (addr = MSR_P4_MS_ESCR0 + stag; | 450 | for (addr = MSR_P4_MS_ESCR0 + stag; |
452 | addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { | 451 | addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { |
453 | if (reserve_evntsel_nmi(addr)) | 452 | if (reserve_evntsel_nmi(addr)) |
454 | msrs->controls[i].addr = addr; | 453 | msrs->controls[i].addr = addr; |
455 | } | 454 | } |
456 | 455 | ||
457 | for (addr = MSR_P4_IX_ESCR0 + stag; | 456 | for (addr = MSR_P4_IX_ESCR0 + stag; |
458 | addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { | 457 | addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { |
459 | if (reserve_evntsel_nmi(addr)) | 458 | if (reserve_evntsel_nmi(addr)) |
460 | msrs->controls[i].addr = addr; | 459 | msrs->controls[i].addr = addr; |
461 | } | 460 | } |
462 | 461 | ||
463 | /* there are 2 remaining non-contiguously located ESCRs */ | 462 | /* there are 2 remaining non-contiguously located ESCRs */ |
464 | 463 | ||
465 | if (num_counters == NUM_COUNTERS_NON_HT) { | 464 | if (num_counters == NUM_COUNTERS_NON_HT) { |
466 | /* standard non-HT CPUs handle both remaining ESCRs*/ | 465 | /* standard non-HT CPUs handle both remaining ESCRs*/ |
467 | if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) | 466 | if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) |
468 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 467 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; |
@@ -498,20 +497,20 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
498 | unsigned int stag; | 497 | unsigned int stag; |
499 | 498 | ||
500 | stag = get_stagger(); | 499 | stag = get_stagger(); |
501 | 500 | ||
502 | /* convert from counter *number* to counter *bit* */ | 501 | /* convert from counter *number* to counter *bit* */ |
503 | counter_bit = 1 << VIRT_CTR(stag, ctr); | 502 | counter_bit = 1 << VIRT_CTR(stag, ctr); |
504 | 503 | ||
505 | /* find our event binding structure. */ | 504 | /* find our event binding structure. */ |
506 | if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { | 505 | if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { |
507 | printk(KERN_ERR | 506 | printk(KERN_ERR |
508 | "oprofile: P4 event code 0x%lx out of range\n", | 507 | "oprofile: P4 event code 0x%lx out of range\n", |
509 | counter_config[ctr].event); | 508 | counter_config[ctr].event); |
510 | return; | 509 | return; |
511 | } | 510 | } |
512 | 511 | ||
513 | ev = &(p4_events[counter_config[ctr].event - 1]); | 512 | ev = &(p4_events[counter_config[ctr].event - 1]); |
514 | 513 | ||
515 | for (i = 0; i < maxbind; i++) { | 514 | for (i = 0; i < maxbind; i++) { |
516 | if (ev->bindings[i].virt_counter & counter_bit) { | 515 | if (ev->bindings[i].virt_counter & counter_bit) { |
517 | 516 | ||
@@ -526,25 +525,24 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) | |||
526 | ESCR_SET_OS_1(escr, counter_config[ctr].kernel); | 525 | ESCR_SET_OS_1(escr, counter_config[ctr].kernel); |
527 | } | 526 | } |
528 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); | 527 | ESCR_SET_EVENT_SELECT(escr, ev->event_select); |
529 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); | 528 | ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); |
530 | ESCR_WRITE(escr, high, ev, i); | 529 | ESCR_WRITE(escr, high, ev, i); |
531 | 530 | ||
532 | /* modify CCCR */ | 531 | /* modify CCCR */ |
533 | CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); | 532 | CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); |
534 | CCCR_CLEAR(cccr); | 533 | CCCR_CLEAR(cccr); |
535 | CCCR_SET_REQUIRED_BITS(cccr); | 534 | CCCR_SET_REQUIRED_BITS(cccr); |
536 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); | 535 | CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); |
537 | if (stag == 0) { | 536 | if (stag == 0) |
538 | CCCR_SET_PMI_OVF_0(cccr); | 537 | CCCR_SET_PMI_OVF_0(cccr); |
539 | } else { | 538 | else |
540 | CCCR_SET_PMI_OVF_1(cccr); | 539 | CCCR_SET_PMI_OVF_1(cccr); |
541 | } | ||
542 | CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); | 540 | CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); |
543 | return; | 541 | return; |
544 | } | 542 | } |
545 | } | 543 | } |
546 | 544 | ||
547 | printk(KERN_ERR | 545 | printk(KERN_ERR |
548 | "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", | 546 | "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", |
549 | counter_config[ctr].event, stag, ctr); | 547 | counter_config[ctr].event, stag, ctr); |
550 | } | 548 | } |
@@ -559,14 +557,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
559 | stag = get_stagger(); | 557 | stag = get_stagger(); |
560 | 558 | ||
561 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); | 559 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); |
562 | if (! MISC_PMC_ENABLED_P(low)) { | 560 | if (!MISC_PMC_ENABLED_P(low)) { |
563 | printk(KERN_ERR "oprofile: P4 PMC not available\n"); | 561 | printk(KERN_ERR "oprofile: P4 PMC not available\n"); |
564 | return; | 562 | return; |
565 | } | 563 | } |
566 | 564 | ||
567 | /* clear the cccrs we will use */ | 565 | /* clear the cccrs we will use */ |
568 | for (i = 0 ; i < num_counters ; i++) { | 566 | for (i = 0 ; i < num_counters ; i++) { |
569 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | 567 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) |
570 | continue; | 568 | continue; |
571 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); | 569 | rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); |
572 | CCCR_CLEAR(low); | 570 | CCCR_CLEAR(low); |
@@ -576,14 +574,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) | |||
576 | 574 | ||
577 | /* clear all escrs (including those outside our concern) */ | 575 | /* clear all escrs (including those outside our concern) */ |
578 | for (i = num_counters; i < num_controls; i++) { | 576 | for (i = num_counters; i < num_controls; i++) { |
579 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | 577 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) |
580 | continue; | 578 | continue; |
581 | wrmsr(msrs->controls[i].addr, 0, 0); | 579 | wrmsr(msrs->controls[i].addr, 0, 0); |
582 | } | 580 | } |
583 | 581 | ||
584 | /* setup all counters */ | 582 | /* setup all counters */ |
585 | for (i = 0 ; i < num_counters ; ++i) { | 583 | for (i = 0 ; i < num_counters ; ++i) { |
586 | if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) { | 584 | if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { |
587 | reset_value[i] = counter_config[i].count; | 585 | reset_value[i] = counter_config[i].count; |
588 | pmc_setup_one_p4_counter(i); | 586 | pmc_setup_one_p4_counter(i); |
589 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); | 587 | CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); |
@@ -603,11 +601,11 @@ static int p4_check_ctrs(struct pt_regs * const regs, | |||
603 | stag = get_stagger(); | 601 | stag = get_stagger(); |
604 | 602 | ||
605 | for (i = 0; i < num_counters; ++i) { | 603 | for (i = 0; i < num_counters; ++i) { |
606 | 604 | ||
607 | if (!reset_value[i]) | 605 | if (!reset_value[i]) |
608 | continue; | 606 | continue; |
609 | 607 | ||
610 | /* | 608 | /* |
611 | * there is some eccentricity in the hardware which | 609 | * there is some eccentricity in the hardware which |
612 | * requires that we perform 2 extra corrections: | 610 | * requires that we perform 2 extra corrections: |
613 | * | 611 | * |
@@ -616,24 +614,24 @@ static int p4_check_ctrs(struct pt_regs * const regs, | |||
616 | * | 614 | * |
617 | * - write the counter back twice to ensure it gets | 615 | * - write the counter back twice to ensure it gets |
618 | * updated properly. | 616 | * updated properly. |
619 | * | 617 | * |
620 | * the former seems to be related to extra NMIs happening | 618 | * the former seems to be related to extra NMIs happening |
621 | * during the current NMI; the latter is reported as errata | 619 | * during the current NMI; the latter is reported as errata |
622 | * N15 in intel doc 249199-029, pentium 4 specification | 620 | * N15 in intel doc 249199-029, pentium 4 specification |
623 | * update, though their suggested work-around does not | 621 | * update, though their suggested work-around does not |
624 | * appear to solve the problem. | 622 | * appear to solve the problem. |
625 | */ | 623 | */ |
626 | 624 | ||
627 | real = VIRT_CTR(stag, i); | 625 | real = VIRT_CTR(stag, i); |
628 | 626 | ||
629 | CCCR_READ(low, high, real); | 627 | CCCR_READ(low, high, real); |
630 | CTR_READ(ctr, high, real); | 628 | CTR_READ(ctr, high, real); |
631 | if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { | 629 | if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { |
632 | oprofile_add_sample(regs, i); | 630 | oprofile_add_sample(regs, i); |
633 | CTR_WRITE(reset_value[i], real); | 631 | CTR_WRITE(reset_value[i], real); |
634 | CCCR_CLEAR_OVF(low); | 632 | CCCR_CLEAR_OVF(low); |
635 | CCCR_WRITE(low, high, real); | 633 | CCCR_WRITE(low, high, real); |
636 | CTR_WRITE(reset_value[i], real); | 634 | CTR_WRITE(reset_value[i], real); |
637 | } | 635 | } |
638 | } | 636 | } |
639 | 637 | ||
@@ -683,15 +681,16 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
683 | int i; | 681 | int i; |
684 | 682 | ||
685 | for (i = 0 ; i < num_counters ; ++i) { | 683 | for (i = 0 ; i < num_counters ; ++i) { |
686 | if (CTR_IS_RESERVED(msrs,i)) | 684 | if (CTR_IS_RESERVED(msrs, i)) |
687 | release_perfctr_nmi(msrs->counters[i].addr); | 685 | release_perfctr_nmi(msrs->counters[i].addr); |
688 | } | 686 | } |
689 | /* some of the control registers are specially reserved in | 687 | /* |
688 | * some of the control registers are specially reserved in | ||
690 | * conjunction with the counter registers (hence the starting offset). | 689 | * conjunction with the counter registers (hence the starting offset). |
691 | * This saves a few bits. | 690 | * This saves a few bits. |
692 | */ | 691 | */ |
693 | for (i = num_counters ; i < num_controls ; ++i) { | 692 | for (i = num_counters ; i < num_controls ; ++i) { |
694 | if (CTRL_IS_RESERVED(msrs,i)) | 693 | if (CTRL_IS_RESERVED(msrs, i)) |
695 | release_evntsel_nmi(msrs->controls[i].addr); | 694 | release_evntsel_nmi(msrs->controls[i].addr); |
696 | } | 695 | } |
697 | } | 696 | } |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index dbf532369711..22e057665e55 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/pci.h> | 2 | #include <linux/pci.h> |
3 | #include <linux/topology.h> | 3 | #include <linux/topology.h> |
4 | #include <linux/cpu.h> | ||
4 | #include "pci.h" | 5 | #include "pci.h" |
5 | 6 | ||
6 | #ifdef CONFIG_X86_64 | 7 | #ifdef CONFIG_X86_64 |
@@ -555,15 +556,17 @@ static int __init early_fill_mp_bus_info(void) | |||
555 | return 0; | 556 | return 0; |
556 | } | 557 | } |
557 | 558 | ||
558 | postcore_initcall(early_fill_mp_bus_info); | 559 | #else /* !CONFIG_X86_64 */ |
559 | 560 | ||
560 | #endif | 561 | static int __init early_fill_mp_bus_info(void) { return 0; } |
562 | |||
563 | #endif /* !CONFIG_X86_64 */ | ||
561 | 564 | ||
562 | /* common 32/64 bit code */ | 565 | /* common 32/64 bit code */ |
563 | 566 | ||
564 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) | 567 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) |
565 | 568 | ||
566 | static void enable_pci_io_ecs_per_cpu(void *unused) | 569 | static void enable_pci_io_ecs(void *unused) |
567 | { | 570 | { |
568 | u64 reg; | 571 | u64 reg; |
569 | rdmsrl(MSR_AMD64_NB_CFG, reg); | 572 | rdmsrl(MSR_AMD64_NB_CFG, reg); |
@@ -573,14 +576,51 @@ static void enable_pci_io_ecs_per_cpu(void *unused) | |||
573 | } | 576 | } |
574 | } | 577 | } |
575 | 578 | ||
576 | static int __init enable_pci_io_ecs(void) | 579 | static int __cpuinit amd_cpu_notify(struct notifier_block *self, |
580 | unsigned long action, void *hcpu) | ||
577 | { | 581 | { |
582 | int cpu = (long)hcpu; | ||
583 | switch (action) { | ||
584 | case CPU_ONLINE: | ||
585 | case CPU_ONLINE_FROZEN: | ||
586 | smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); | ||
587 | break; | ||
588 | default: | ||
589 | break; | ||
590 | } | ||
591 | return NOTIFY_OK; | ||
592 | } | ||
593 | |||
594 | static struct notifier_block __cpuinitdata amd_cpu_notifier = { | ||
595 | .notifier_call = amd_cpu_notify, | ||
596 | }; | ||
597 | |||
598 | static int __init pci_io_ecs_init(void) | ||
599 | { | ||
600 | int cpu; | ||
601 | |||
578 | /* assume all cpus from fam10h have IO ECS */ | 602 | /* assume all cpus from fam10h have IO ECS */ |
579 | if (boot_cpu_data.x86 < 0x10) | 603 | if (boot_cpu_data.x86 < 0x10) |
580 | return 0; | 604 | return 0; |
581 | on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1); | 605 | |
606 | register_cpu_notifier(&amd_cpu_notifier); | ||
607 | for_each_online_cpu(cpu) | ||
608 | amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, | ||
609 | (void *)(long)cpu); | ||
582 | pci_probe |= PCI_HAS_IO_ECS; | 610 | pci_probe |= PCI_HAS_IO_ECS; |
611 | |||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | static int __init amd_postcore_init(void) | ||
616 | { | ||
617 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
618 | return 0; | ||
619 | |||
620 | early_fill_mp_bus_info(); | ||
621 | pci_io_ecs_init(); | ||
622 | |||
583 | return 0; | 623 | return 0; |
584 | } | 624 | } |
585 | 625 | ||
586 | postcore_initcall(enable_pci_io_ecs); | 626 | postcore_initcall(amd_postcore_init); |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 5807d1bc73f7..8791fc55e715 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -128,8 +128,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
128 | pr = pci_find_parent_resource(dev, r); | 128 | pr = pci_find_parent_resource(dev, r); |
129 | if (!r->start || !pr || | 129 | if (!r->start || !pr || |
130 | request_resource(pr, r) < 0) { | 130 | request_resource(pr, r) < 0) { |
131 | dev_err(&dev->dev, "BAR %d: can't " | 131 | dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); |
132 | "allocate resource\n", idx); | ||
133 | /* | 132 | /* |
134 | * Something is wrong with the region. | 133 | * Something is wrong with the region. |
135 | * Invalidate the resource to prevent | 134 | * Invalidate the resource to prevent |
@@ -164,15 +163,13 @@ static void __init pcibios_allocate_resources(int pass) | |||
164 | else | 163 | else |
165 | disabled = !(command & PCI_COMMAND_MEMORY); | 164 | disabled = !(command & PCI_COMMAND_MEMORY); |
166 | if (pass == disabled) { | 165 | if (pass == disabled) { |
167 | dev_dbg(&dev->dev, "resource %#08llx-%#08llx " | 166 | dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n", |
168 | "(f=%lx, d=%d, p=%d)\n", | ||
169 | (unsigned long long) r->start, | 167 | (unsigned long long) r->start, |
170 | (unsigned long long) r->end, | 168 | (unsigned long long) r->end, |
171 | r->flags, disabled, pass); | 169 | r->flags, disabled, pass); |
172 | pr = pci_find_parent_resource(dev, r); | 170 | pr = pci_find_parent_resource(dev, r); |
173 | if (!pr || request_resource(pr, r) < 0) { | 171 | if (!pr || request_resource(pr, r) < 0) { |
174 | dev_err(&dev->dev, "BAR %d: can't " | 172 | dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); |
175 | "allocate resource\n", idx); | ||
176 | /* We'll assign a new address later */ | 173 | /* We'll assign a new address later */ |
177 | r->end -= r->start; | 174 | r->end -= r->start; |
178 | r->start = 0; | 175 | r->start = 0; |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index fec0123b33a9..006599db0dc7 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
590 | case PCI_DEVICE_ID_INTEL_ICH10_1: | 590 | case PCI_DEVICE_ID_INTEL_ICH10_1: |
591 | case PCI_DEVICE_ID_INTEL_ICH10_2: | 591 | case PCI_DEVICE_ID_INTEL_ICH10_2: |
592 | case PCI_DEVICE_ID_INTEL_ICH10_3: | 592 | case PCI_DEVICE_ID_INTEL_ICH10_3: |
593 | case PCI_DEVICE_ID_INTEL_PCH_0: | ||
594 | case PCI_DEVICE_ID_INTEL_PCH_1: | ||
593 | r->name = "PIIX/ICH"; | 595 | r->name = "PIIX/ICH"; |
594 | r->get = pirq_piix_get; | 596 | r->get = pirq_piix_get; |
595 | r->set = pirq_piix_set; | 597 | r->set = pirq_piix_set; |
@@ -1041,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void) | |||
1041 | if (io_apic_assign_pci_irqs) { | 1043 | if (io_apic_assign_pci_irqs) { |
1042 | int irq; | 1044 | int irq; |
1043 | 1045 | ||
1044 | if (pin) { | 1046 | if (!pin) |
1045 | /* | 1047 | continue; |
1046 | * interrupt pins are numbered starting | 1048 | |
1047 | * from 1 | 1049 | /* |
1048 | */ | 1050 | * interrupt pins are numbered starting from 1 |
1049 | pin--; | 1051 | */ |
1050 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, | 1052 | pin--; |
1051 | PCI_SLOT(dev->devfn), pin); | 1053 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, |
1052 | /* | 1054 | PCI_SLOT(dev->devfn), pin); |
1053 | * Busses behind bridges are typically not listed in the MP-table. | 1055 | /* |
1054 | * In this case we have to look up the IRQ based on the parent bus, | 1056 | * Busses behind bridges are typically not listed in the |
1055 | * parent slot, and pin number. The SMP code detects such bridged | 1057 | * MP-table. In this case we have to look up the IRQ |
1056 | * busses itself so we should get into this branch reliably. | 1058 | * based on the parent bus, parent slot, and pin number. |
1057 | */ | 1059 | * The SMP code detects such bridged busses itself so we |
1058 | if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ | 1060 | * should get into this branch reliably. |
1059 | struct pci_dev *bridge = dev->bus->self; | 1061 | */ |
1060 | 1062 | if (irq < 0 && dev->bus->parent) { | |
1061 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | 1063 | /* go back to the bridge */ |
1062 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1064 | struct pci_dev *bridge = dev->bus->self; |
1063 | PCI_SLOT(bridge->devfn), pin); | 1065 | int bus; |
1064 | if (irq >= 0) | 1066 | |
1065 | dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", | 1067 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; |
1066 | pci_name(bridge), | 1068 | bus = bridge->bus->number; |
1067 | 'A' + pin, irq); | 1069 | irq = IO_APIC_get_PCI_irq_vector(bus, |
1068 | } | 1070 | PCI_SLOT(bridge->devfn), pin); |
1069 | if (irq >= 0) { | 1071 | if (irq >= 0) |
1070 | dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); | 1072 | dev_warn(&dev->dev, |
1071 | dev->irq = irq; | 1073 | "using bridge %s INT %c to " |
1072 | } | 1074 | "get IRQ %d\n", |
1075 | pci_name(bridge), | ||
1076 | 'A' + pin, irq); | ||
1077 | } | ||
1078 | if (irq >= 0) { | ||
1079 | dev_info(&dev->dev, | ||
1080 | "PCI->APIC IRQ transform: INT %c " | ||
1081 | "-> IRQ %d\n", | ||
1082 | 'A' + pin, irq); | ||
1083 | dev->irq = irq; | ||
1073 | } | 1084 | } |
1074 | } | 1085 | } |
1075 | #endif | 1086 | #endif |
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index ec9ce35e44d6..b722dd481b39 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c | |||
@@ -14,7 +14,7 @@ static void __devinit pcibios_fixup_peer_bridges(void) | |||
14 | int n, devfn; | 14 | int n, devfn; |
15 | long node; | 15 | long node; |
16 | 16 | ||
17 | if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) | 17 | if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff) |
18 | return; | 18 | return; |
19 | DBG("PCI: Peer bridge fixup\n"); | 19 | DBG("PCI: Peer bridge fixup\n"); |
20 | 20 | ||
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 23faaa890ffc..d9635764ce3d 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -293,7 +293,7 @@ static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl, | |||
293 | return AE_OK; | 293 | return AE_OK; |
294 | } | 294 | } |
295 | 295 | ||
296 | static int __init is_acpi_reserved(unsigned long start, unsigned long end) | 296 | static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used) |
297 | { | 297 | { |
298 | struct resource mcfg_res; | 298 | struct resource mcfg_res; |
299 | 299 | ||
@@ -310,6 +310,41 @@ static int __init is_acpi_reserved(unsigned long start, unsigned long end) | |||
310 | return mcfg_res.flags; | 310 | return mcfg_res.flags; |
311 | } | 311 | } |
312 | 312 | ||
313 | typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); | ||
314 | |||
315 | static int __init is_mmconf_reserved(check_reserved_t is_reserved, | ||
316 | u64 addr, u64 size, int i, | ||
317 | typeof(pci_mmcfg_config[0]) *cfg, int with_e820) | ||
318 | { | ||
319 | u64 old_size = size; | ||
320 | int valid = 0; | ||
321 | |||
322 | while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { | ||
323 | size >>= 1; | ||
324 | if (size < (16UL<<20)) | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | if (size >= (16UL<<20) || size == old_size) { | ||
329 | printk(KERN_NOTICE | ||
330 | "PCI: MCFG area at %Lx reserved in %s\n", | ||
331 | addr, with_e820?"E820":"ACPI motherboard resources"); | ||
332 | valid = 1; | ||
333 | |||
334 | if (old_size != size) { | ||
335 | /* update end_bus_number */ | ||
336 | cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1); | ||
337 | printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx " | ||
338 | "segment %hu buses %u - %u\n", | ||
339 | i, (unsigned long)cfg->address, cfg->pci_segment, | ||
340 | (unsigned int)cfg->start_bus_number, | ||
341 | (unsigned int)cfg->end_bus_number); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | return valid; | ||
346 | } | ||
347 | |||
313 | static void __init pci_mmcfg_reject_broken(int early) | 348 | static void __init pci_mmcfg_reject_broken(int early) |
314 | { | 349 | { |
315 | typeof(pci_mmcfg_config[0]) *cfg; | 350 | typeof(pci_mmcfg_config[0]) *cfg; |
@@ -324,21 +359,22 @@ static void __init pci_mmcfg_reject_broken(int early) | |||
324 | 359 | ||
325 | for (i = 0; i < pci_mmcfg_config_num; i++) { | 360 | for (i = 0; i < pci_mmcfg_config_num; i++) { |
326 | int valid = 0; | 361 | int valid = 0; |
327 | u32 size = (cfg->end_bus_number + 1) << 20; | 362 | u64 addr, size; |
363 | |||
328 | cfg = &pci_mmcfg_config[i]; | 364 | cfg = &pci_mmcfg_config[i]; |
365 | addr = cfg->start_bus_number; | ||
366 | addr <<= 20; | ||
367 | addr += cfg->address; | ||
368 | size = cfg->end_bus_number + 1 - cfg->start_bus_number; | ||
369 | size <<= 20; | ||
329 | printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " | 370 | printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " |
330 | "segment %hu buses %u - %u\n", | 371 | "segment %hu buses %u - %u\n", |
331 | i, (unsigned long)cfg->address, cfg->pci_segment, | 372 | i, (unsigned long)cfg->address, cfg->pci_segment, |
332 | (unsigned int)cfg->start_bus_number, | 373 | (unsigned int)cfg->start_bus_number, |
333 | (unsigned int)cfg->end_bus_number); | 374 | (unsigned int)cfg->end_bus_number); |
334 | 375 | ||
335 | if (!early && | 376 | if (!early) |
336 | is_acpi_reserved(cfg->address, cfg->address + size - 1)) { | 377 | valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0); |
337 | printk(KERN_NOTICE "PCI: MCFG area at %Lx reserved " | ||
338 | "in ACPI motherboard resources\n", | ||
339 | cfg->address); | ||
340 | valid = 1; | ||
341 | } | ||
342 | 378 | ||
343 | if (valid) | 379 | if (valid) |
344 | continue; | 380 | continue; |
@@ -347,16 +383,11 @@ static void __init pci_mmcfg_reject_broken(int early) | |||
347 | printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" | 383 | printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" |
348 | " reserved in ACPI motherboard resources\n", | 384 | " reserved in ACPI motherboard resources\n", |
349 | cfg->address); | 385 | cfg->address); |
386 | |||
350 | /* Don't try to do this check unless configuration | 387 | /* Don't try to do this check unless configuration |
351 | type 1 is available. how about type 2 ?*/ | 388 | type 1 is available. how about type 2 ?*/ |
352 | if (raw_pci_ops && e820_all_mapped(cfg->address, | 389 | if (raw_pci_ops) |
353 | cfg->address + size - 1, | 390 | valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1); |
354 | E820_RESERVED)) { | ||
355 | printk(KERN_NOTICE | ||
356 | "PCI: MCFG area at %Lx reserved in E820\n", | ||
357 | cfg->address); | ||
358 | valid = 1; | ||
359 | } | ||
360 | 391 | ||
361 | if (!valid) | 392 | if (!valid) |
362 | goto reject; | 393 | goto reject; |
@@ -365,7 +396,7 @@ static void __init pci_mmcfg_reject_broken(int early) | |||
365 | return; | 396 | return; |
366 | 397 | ||
367 | reject: | 398 | reject: |
368 | printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); | 399 | printk(KERN_INFO "PCI: Not using MMCONFIG.\n"); |
369 | pci_mmcfg_arch_free(); | 400 | pci_mmcfg_arch_free(); |
370 | kfree(pci_mmcfg_config); | 401 | kfree(pci_mmcfg_config); |
371 | pci_mmcfg_config = NULL; | 402 | pci_mmcfg_config = NULL; |
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 7dc5d5cf50a2..d3e083dea720 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c | |||
@@ -45,7 +45,7 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
45 | ctxt->cr0 = read_cr0(); | 45 | ctxt->cr0 = read_cr0(); |
46 | ctxt->cr2 = read_cr2(); | 46 | ctxt->cr2 = read_cr2(); |
47 | ctxt->cr3 = read_cr3(); | 47 | ctxt->cr3 = read_cr3(); |
48 | ctxt->cr4 = read_cr4(); | 48 | ctxt->cr4 = read_cr4_safe(); |
49 | } | 49 | } |
50 | 50 | ||
51 | /* Needed by apm.c */ | 51 | /* Needed by apm.c */ |
@@ -98,7 +98,9 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
98 | /* | 98 | /* |
99 | * control registers | 99 | * control registers |
100 | */ | 100 | */ |
101 | write_cr4(ctxt->cr4); | 101 | /* cr4 was introduced in the Pentium CPU */ |
102 | if (ctxt->cr4) | ||
103 | write_cr4(ctxt->cr4); | ||
102 | write_cr3(ctxt->cr3); | 104 | write_cr3(ctxt->cr3); |
103 | write_cr2(ctxt->cr2); | 105 | write_cr2(ctxt->cr2); |
104 | write_cr0(ctxt->cr0); | 106 | write_cr0(ctxt->cr0); |
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index b95aa6cfe3cb..d1e9b53f9d33 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S | |||
@@ -1,5 +1,3 @@ | |||
1 | .text | ||
2 | |||
3 | /* | 1 | /* |
4 | * This may not use any stack, nor any variable that is not "NoSave": | 2 | * This may not use any stack, nor any variable that is not "NoSave": |
5 | * | 3 | * |
@@ -12,25 +10,26 @@ | |||
12 | #include <asm/segment.h> | 10 | #include <asm/segment.h> |
13 | #include <asm/page.h> | 11 | #include <asm/page.h> |
14 | #include <asm/asm-offsets.h> | 12 | #include <asm/asm-offsets.h> |
13 | #include <asm/processor-flags.h> | ||
15 | 14 | ||
16 | .text | 15 | .text |
17 | 16 | ||
18 | ENTRY(swsusp_arch_suspend) | 17 | ENTRY(swsusp_arch_suspend) |
19 | |||
20 | movl %esp, saved_context_esp | 18 | movl %esp, saved_context_esp |
21 | movl %ebx, saved_context_ebx | 19 | movl %ebx, saved_context_ebx |
22 | movl %ebp, saved_context_ebp | 20 | movl %ebp, saved_context_ebp |
23 | movl %esi, saved_context_esi | 21 | movl %esi, saved_context_esi |
24 | movl %edi, saved_context_edi | 22 | movl %edi, saved_context_edi |
25 | pushfl ; popl saved_context_eflags | 23 | pushfl |
24 | popl saved_context_eflags | ||
26 | 25 | ||
27 | call swsusp_save | 26 | call swsusp_save |
28 | ret | 27 | ret |
29 | 28 | ||
30 | ENTRY(restore_image) | 29 | ENTRY(restore_image) |
31 | movl resume_pg_dir, %ecx | 30 | movl resume_pg_dir, %eax |
32 | subl $__PAGE_OFFSET, %ecx | 31 | subl $__PAGE_OFFSET, %eax |
33 | movl %ecx, %cr3 | 32 | movl %eax, %cr3 |
34 | 33 | ||
35 | movl restore_pblist, %edx | 34 | movl restore_pblist, %edx |
36 | .p2align 4,,7 | 35 | .p2align 4,,7 |
@@ -52,17 +51,21 @@ copy_loop: | |||
52 | 51 | ||
53 | done: | 52 | done: |
54 | /* go back to the original page tables */ | 53 | /* go back to the original page tables */ |
55 | movl $swapper_pg_dir, %ecx | 54 | movl $swapper_pg_dir, %eax |
56 | subl $__PAGE_OFFSET, %ecx | 55 | subl $__PAGE_OFFSET, %eax |
57 | movl %ecx, %cr3 | 56 | movl %eax, %cr3 |
58 | /* Flush TLB, including "global" things (vmalloc) */ | 57 | /* Flush TLB, including "global" things (vmalloc) */ |
59 | movl mmu_cr4_features, %eax | 58 | movl mmu_cr4_features, %ecx |
60 | movl %eax, %edx | 59 | jecxz 1f # cr4 Pentium and higher, skip if zero |
61 | andl $~(1<<7), %edx; # PGE | 60 | movl %ecx, %edx |
61 | andl $~(X86_CR4_PGE), %edx | ||
62 | movl %edx, %cr4; # turn off PGE | 62 | movl %edx, %cr4; # turn off PGE |
63 | movl %cr3, %ecx; # flush TLB | 63 | 1: |
64 | movl %ecx, %cr3 | 64 | movl %cr3, %eax; # flush TLB |
65 | movl %eax, %cr4; # turn PGE back on | 65 | movl %eax, %cr3 |
66 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
67 | movl %ecx, %cr4; # turn PGE back on | ||
68 | 1: | ||
66 | 69 | ||
67 | movl saved_context_esp, %esp | 70 | movl saved_context_esp, %esp |
68 | movl saved_context_ebp, %ebp | 71 | movl saved_context_ebp, %ebp |
@@ -70,7 +73,8 @@ done: | |||
70 | movl saved_context_esi, %esi | 73 | movl saved_context_esi, %esi |
71 | movl saved_context_edi, %edi | 74 | movl saved_context_edi, %edi |
72 | 75 | ||
73 | pushl saved_context_eflags ; popfl | 76 | pushl saved_context_eflags |
77 | popfl | ||
74 | 78 | ||
75 | xorl %eax, %eax | 79 | xorl %eax, %eax |
76 | 80 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 9ff6e3cbf08f..7dcd321a0508 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -812,7 +812,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | |||
812 | 812 | ||
813 | /* Early in boot, while setting up the initial pagetable, assume | 813 | /* Early in boot, while setting up the initial pagetable, assume |
814 | everything is pinned. */ | 814 | everything is pinned. */ |
815 | static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) | 815 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
816 | { | 816 | { |
817 | #ifdef CONFIG_FLATMEM | 817 | #ifdef CONFIG_FLATMEM |
818 | BUG_ON(mem_map); /* should only be used early */ | 818 | BUG_ON(mem_map); /* should only be used early */ |
@@ -822,7 +822,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) | |||
822 | 822 | ||
823 | /* Early release_pte assumes that all pts are pinned, since there's | 823 | /* Early release_pte assumes that all pts are pinned, since there's |
824 | only init_mm and anything attached to that is pinned. */ | 824 | only init_mm and anything attached to that is pinned. */ |
825 | static void xen_release_pte_init(u32 pfn) | 825 | static void xen_release_pte_init(unsigned long pfn) |
826 | { | 826 | { |
827 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 827 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
828 | } | 828 | } |
@@ -838,7 +838,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |||
838 | 838 | ||
839 | /* This needs to make sure the new pte page is pinned iff its being | 839 | /* This needs to make sure the new pte page is pinned iff its being |
840 | attached to a pinned pagetable. */ | 840 | attached to a pinned pagetable. */ |
841 | static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) | 841 | static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) |
842 | { | 842 | { |
843 | struct page *page = pfn_to_page(pfn); | 843 | struct page *page = pfn_to_page(pfn); |
844 | 844 | ||
@@ -856,12 +856,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) | |||
856 | } | 856 | } |
857 | } | 857 | } |
858 | 858 | ||
859 | static void xen_alloc_pte(struct mm_struct *mm, u32 pfn) | 859 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
860 | { | 860 | { |
861 | xen_alloc_ptpage(mm, pfn, PT_PTE); | 861 | xen_alloc_ptpage(mm, pfn, PT_PTE); |
862 | } | 862 | } |
863 | 863 | ||
864 | static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn) | 864 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
865 | { | 865 | { |
866 | xen_alloc_ptpage(mm, pfn, PT_PMD); | 866 | xen_alloc_ptpage(mm, pfn, PT_PMD); |
867 | } | 867 | } |
@@ -909,7 +909,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
909 | } | 909 | } |
910 | 910 | ||
911 | /* This should never happen until we're OK to use struct page */ | 911 | /* This should never happen until we're OK to use struct page */ |
912 | static void xen_release_ptpage(u32 pfn, unsigned level) | 912 | static void xen_release_ptpage(unsigned long pfn, unsigned level) |
913 | { | 913 | { |
914 | struct page *page = pfn_to_page(pfn); | 914 | struct page *page = pfn_to_page(pfn); |
915 | 915 | ||
@@ -923,23 +923,23 @@ static void xen_release_ptpage(u32 pfn, unsigned level) | |||
923 | } | 923 | } |
924 | } | 924 | } |
925 | 925 | ||
926 | static void xen_release_pte(u32 pfn) | 926 | static void xen_release_pte(unsigned long pfn) |
927 | { | 927 | { |
928 | xen_release_ptpage(pfn, PT_PTE); | 928 | xen_release_ptpage(pfn, PT_PTE); |
929 | } | 929 | } |
930 | 930 | ||
931 | static void xen_release_pmd(u32 pfn) | 931 | static void xen_release_pmd(unsigned long pfn) |
932 | { | 932 | { |
933 | xen_release_ptpage(pfn, PT_PMD); | 933 | xen_release_ptpage(pfn, PT_PMD); |
934 | } | 934 | } |
935 | 935 | ||
936 | #if PAGETABLE_LEVELS == 4 | 936 | #if PAGETABLE_LEVELS == 4 |
937 | static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) | 937 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
938 | { | 938 | { |
939 | xen_alloc_ptpage(mm, pfn, PT_PUD); | 939 | xen_alloc_ptpage(mm, pfn, PT_PUD); |
940 | } | 940 | } |
941 | 941 | ||
942 | static void xen_release_pud(u32 pfn) | 942 | static void xen_release_pud(unsigned long pfn) |
943 | { | 943 | { |
944 | xen_release_ptpage(pfn, PT_PUD); | 944 | xen_release_ptpage(pfn, PT_PUD); |
945 | } | 945 | } |
@@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1324 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | 1324 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, |
1325 | 1325 | ||
1326 | .pte_val = xen_pte_val, | 1326 | .pte_val = xen_pte_val, |
1327 | .pte_flags = native_pte_val, | 1327 | .pte_flags = native_pte_flags, |
1328 | .pgd_val = xen_pgd_val, | 1328 | .pgd_val = xen_pgd_val, |
1329 | 1329 | ||
1330 | .make_pte = xen_make_pte, | 1330 | .make_pte = xen_make_pte, |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index b6acc3a0af46..d67901083888 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -42,7 +42,7 @@ char * __init xen_memory_setup(void) | |||
42 | 42 | ||
43 | e820.nr_map = 0; | 43 | e820.nr_map = 0; |
44 | 44 | ||
45 | e820_add_region(0, PFN_PHYS(max_pfn), E820_RAM); | 45 | e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM); |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Even though this is normal, usable memory under Xen, reserve | 48 | * Even though this is normal, usable memory under Xen, reserve |