aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/Makefile23
-rw-r--r--arch/x86/boot/header.S29
-rw-r--r--arch/x86/boot/pm.c44
-rw-r--r--arch/x86/boot/tools/build.c9
-rw-r--r--arch/x86/include/asm/boot.h4
-rwxr-xr-xarch/x86/include/asm/cpu_debug.h193
-rw-r--r--arch/x86/include/asm/desc.h3
-rw-r--r--arch/x86/include/asm/highmem.h1
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rwxr-xr-xarch/x86/kernel/cpu/cpu_debug.c785
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/mm/highmem_32.c19
-rw-r--r--arch/x86/mm/iomap_32.c13
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/pageattr.c11
17 files changed, 1037 insertions, 116 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 87717f3687d2..7fcf85182681 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -931,6 +931,12 @@ config X86_CPUID
931 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to 931 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
932 /dev/cpu/31/cpuid. 932 /dev/cpu/31/cpuid.
933 933
934config X86_CPU_DEBUG
935 tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
936 ---help---
937 If you select this option, this will provide various x86 CPUs
938 information through debugfs.
939
934choice 940choice
935 prompt "High Memory Support" 941 prompt "High Memory Support"
936 default HIGHMEM4G if !X86_NUMAQ 942 default HIGHMEM4G if !X86_NUMAQ
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index c70eff69a1fb..57a29fecf6bb 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -6,26 +6,23 @@
6# for more details. 6# for more details.
7# 7#
8# Copyright (C) 1994 by Linus Torvalds 8# Copyright (C) 1994 by Linus Torvalds
9# Changed by many, many contributors over the years.
9# 10#
10 11
11# ROOT_DEV specifies the default root-device when making the image. 12# ROOT_DEV specifies the default root-device when making the image.
12# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case 13# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
13# the default of FLOPPY is used by 'build'. 14# the default of FLOPPY is used by 'build'.
14 15
15ROOT_DEV := CURRENT 16ROOT_DEV := CURRENT
16 17
17# If you want to preset the SVGA mode, uncomment the next line and 18# If you want to preset the SVGA mode, uncomment the next line and
18# set SVGA_MODE to whatever number you want. 19# set SVGA_MODE to whatever number you want.
19# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode. 20# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
20# The number is the same as you would ordinarily press at bootup. 21# The number is the same as you would ordinarily press at bootup.
21 22
22SVGA_MODE := -DSVGA_MODE=NORMAL_VGA 23SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
23 24
24# If you want the RAM disk device, define this to be the size in blocks. 25targets := vmlinux.bin setup.bin setup.elf bzImage
25
26#RAMDISK := -DRAMDISK=512
27
28targets := vmlinux.bin setup.bin setup.elf zImage bzImage
29subdir- := compressed 26subdir- := compressed
30 27
31setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o 28setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
@@ -71,17 +68,13 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
71KBUILD_CFLAGS += $(call cc-option,-m32) 68KBUILD_CFLAGS += $(call cc-option,-m32)
72KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 69KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
73 70
74$(obj)/zImage: asflags-y := $(SVGA_MODE) $(RAMDISK) 71$(obj)/bzImage: asflags-y := $(SVGA_MODE)
75$(obj)/bzImage: ccflags-y := -D__BIG_KERNEL__
76$(obj)/bzImage: asflags-y := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__
77$(obj)/bzImage: BUILDFLAGS := -b
78 72
79quiet_cmd_image = BUILD $@ 73quiet_cmd_image = BUILD $@
80cmd_image = $(obj)/tools/build $(BUILDFLAGS) $(obj)/setup.bin \ 74cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
81 $(obj)/vmlinux.bin $(ROOT_DEV) > $@ 75 $(ROOT_DEV) > $@
82 76
83$(obj)/zImage $(obj)/bzImage: $(obj)/setup.bin \ 77$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
84 $(obj)/vmlinux.bin $(obj)/tools/build FORCE
85 $(call if_changed,image) 78 $(call if_changed,image)
86 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 79 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
87 80
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 7ccff4884a23..5d84d1c74e4c 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -24,12 +24,8 @@
24#include "boot.h" 24#include "boot.h"
25#include "offsets.h" 25#include "offsets.h"
26 26
27SETUPSECTS = 4 /* default nr of setup-sectors */
28BOOTSEG = 0x07C0 /* original address of boot-sector */ 27BOOTSEG = 0x07C0 /* original address of boot-sector */
29SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */ 28SYSSEG = 0x1000 /* historical load address >> 4 */
30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */
31 /* to be loaded */
32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */
33 29
34#ifndef SVGA_MODE 30#ifndef SVGA_MODE
35#define SVGA_MODE ASK_VGA 31#define SVGA_MODE ASK_VGA
@@ -97,12 +93,12 @@ bugger_off_msg:
97 .section ".header", "a" 93 .section ".header", "a"
98 .globl hdr 94 .globl hdr
99hdr: 95hdr:
100setup_sects: .byte SETUPSECTS 96setup_sects: .byte 0 /* Filled in by build.c */
101root_flags: .word ROOT_RDONLY 97root_flags: .word ROOT_RDONLY
102syssize: .long SYSSIZE 98syssize: .long 0 /* Filled in by build.c */
103ram_size: .word RAMDISK 99ram_size: .word 0 /* Obsolete */
104vid_mode: .word SVGA_MODE 100vid_mode: .word SVGA_MODE
105root_dev: .word ROOT_DEV 101root_dev: .word 0 /* Filled in by build.c */
106boot_flag: .word 0xAA55 102boot_flag: .word 0xAA55
107 103
108 # offset 512, entry point 104 # offset 512, entry point
@@ -123,14 +119,15 @@ _start:
123 # or else old loadlin-1.5 will fail) 119 # or else old loadlin-1.5 will fail)
124 .globl realmode_swtch 120 .globl realmode_swtch
125realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 121realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
126start_sys_seg: .word SYSSEG 122start_sys_seg: .word SYSSEG # obsolete and meaningless, but just
123 # in case something decided to "use" it
127 .word kernel_version-512 # pointing to kernel version string 124 .word kernel_version-512 # pointing to kernel version string
128 # above section of header is compatible 125 # above section of header is compatible
129 # with loadlin-1.5 (header v1.5). Don't 126 # with loadlin-1.5 (header v1.5). Don't
130 # change it. 127 # change it.
131 128
132type_of_loader: .byte 0 # = 0, old one (LILO, Loadlin, 129type_of_loader: .byte 0 # 0 means ancient bootloader, newer
133 # Bootlin, SYSLX, bootsect...) 130 # bootloaders know to change this.
134 # See Documentation/i386/boot.txt for 131 # See Documentation/i386/boot.txt for
135 # assigned ids 132 # assigned ids
136 133
@@ -142,11 +139,7 @@ CAN_USE_HEAP = 0x80 # If set, the loader also has set
142 # space behind setup.S can be used for 139 # space behind setup.S can be used for
143 # heap purposes. 140 # heap purposes.
144 # Only the loader knows what is free 141 # Only the loader knows what is free
145#ifndef __BIG_KERNEL__
146 .byte 0
147#else
148 .byte LOADED_HIGH 142 .byte LOADED_HIGH
149#endif
150 143
151setup_move_size: .word 0x8000 # size to move, when setup is not 144setup_move_size: .word 0x8000 # size to move, when setup is not
152 # loaded at 0x90000. We will move setup 145 # loaded at 0x90000. We will move setup
@@ -157,11 +150,7 @@ setup_move_size: .word 0x8000 # size to move, when setup is not
157 150
158code32_start: # here loaders can put a different 151code32_start: # here loaders can put a different
159 # start address for 32-bit code. 152 # start address for 32-bit code.
160#ifndef __BIG_KERNEL__
161 .long 0x1000 # 0x1000 = default for zImage
162#else
163 .long 0x100000 # 0x100000 = default for big kernel 153 .long 0x100000 # 0x100000 = default for big kernel
164#endif
165 154
166ramdisk_image: .long 0 # address of loaded ramdisk image 155ramdisk_image: .long 0 # address of loaded ramdisk image
167 # Here the loader puts the 32-bit 156 # Here the loader puts the 32-bit
diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c
index 85a1cd8a8ff8..8062f8915250 100644
--- a/arch/x86/boot/pm.c
+++ b/arch/x86/boot/pm.c
@@ -33,47 +33,6 @@ static void realmode_switch_hook(void)
33} 33}
34 34
35/* 35/*
36 * A zImage kernel is loaded at 0x10000 but wants to run at 0x1000.
37 * A bzImage kernel is loaded and runs at 0x100000.
38 */
39static void move_kernel_around(void)
40{
41 /* Note: rely on the compile-time option here rather than
42 the LOADED_HIGH flag. The Qemu kernel loader unconditionally
43 sets the loadflags to zero. */
44#ifndef __BIG_KERNEL__
45 u16 dst_seg, src_seg;
46 u32 syssize;
47
48 dst_seg = 0x1000 >> 4;
49 src_seg = 0x10000 >> 4;
50 syssize = boot_params.hdr.syssize; /* Size in 16-byte paragraphs */
51
52 while (syssize) {
53 int paras = (syssize >= 0x1000) ? 0x1000 : syssize;
54 int dwords = paras << 2;
55
56 asm volatile("pushw %%es ; "
57 "pushw %%ds ; "
58 "movw %1,%%es ; "
59 "movw %2,%%ds ; "
60 "xorw %%di,%%di ; "
61 "xorw %%si,%%si ; "
62 "rep;movsl ; "
63 "popw %%ds ; "
64 "popw %%es"
65 : "+c" (dwords)
66 : "r" (dst_seg), "r" (src_seg)
67 : "esi", "edi");
68
69 syssize -= paras;
70 dst_seg += paras;
71 src_seg += paras;
72 }
73#endif
74}
75
76/*
77 * Disable all interrupts at the legacy PIC. 36 * Disable all interrupts at the legacy PIC.
78 */ 37 */
79static void mask_all_interrupts(void) 38static void mask_all_interrupts(void)
@@ -147,9 +106,6 @@ void go_to_protected_mode(void)
147 /* Hook before leaving real mode, also disables interrupts */ 106 /* Hook before leaving real mode, also disables interrupts */
148 realmode_switch_hook(); 107 realmode_switch_hook();
149 108
150 /* Move the kernel/setup to their final resting places */
151 move_kernel_around();
152
153 /* Enable the A20 gate */ 109 /* Enable the A20 gate */
154 if (enable_a20()) { 110 if (enable_a20()) {
155 puts("A20 gate not responding, unable to boot...\n"); 111 puts("A20 gate not responding, unable to boot...\n");
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 44dc1923c0e3..ee3a4ea923ac 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -130,7 +130,7 @@ static void die(const char * str, ...)
130 130
131static void usage(void) 131static void usage(void)
132{ 132{
133 die("Usage: build [-b] setup system [rootdev] [> image]"); 133 die("Usage: build setup system [rootdev] [> image]");
134} 134}
135 135
136int main(int argc, char ** argv) 136int main(int argc, char ** argv)
@@ -145,11 +145,6 @@ int main(int argc, char ** argv)
145 void *kernel; 145 void *kernel;
146 u32 crc = 0xffffffffUL; 146 u32 crc = 0xffffffffUL;
147 147
148 if (argc > 2 && !strcmp(argv[1], "-b"))
149 {
150 is_big_kernel = 1;
151 argc--, argv++;
152 }
153 if ((argc < 3) || (argc > 4)) 148 if ((argc < 3) || (argc > 4))
154 usage(); 149 usage();
155 if (argc > 3) { 150 if (argc > 3) {
@@ -216,8 +211,6 @@ int main(int argc, char ** argv)
216 die("Unable to mmap '%s': %m", argv[2]); 211 die("Unable to mmap '%s': %m", argv[2]);
217 /* Number of 16-byte paragraphs, including space for a 4-byte CRC */ 212 /* Number of 16-byte paragraphs, including space for a 4-byte CRC */
218 sys_size = (sz + 15 + 4) / 16; 213 sys_size = (sz + 15 + 4) / 16;
219 if (!is_big_kernel && sys_size > DEF_SYSSIZE)
220 die("System is too big. Try using bzImage or modules.");
221 214
222 /* Patch the setup code with the appropriate size parameters */ 215 /* Patch the setup code with the appropriate size parameters */
223 buf[0x1f1] = setup_sectors-1; 216 buf[0x1f1] = setup_sectors-1;
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 6526cf08b0e4..6ba23dd9fc92 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_X86_BOOT_H 1#ifndef _ASM_X86_BOOT_H
2#define _ASM_X86_BOOT_H 2#define _ASM_X86_BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_SYSSEG 0x1000
6#define DEF_SYSSIZE 0x7F00
7
8/* Internal svga startup constants */ 4/* Internal svga startup constants */
9#define NORMAL_VGA 0xffff /* 80x25 mode */ 5#define NORMAL_VGA 0xffff /* 80x25 mode */
10#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 6#define EXTENDED_VGA 0xfffe /* 80x50 mode */
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
new file mode 100755
index 000000000000..d24d64fcee04
--- /dev/null
+++ b/arch/x86/include/asm/cpu_debug.h
@@ -0,0 +1,193 @@
1#ifndef _ASM_X86_CPU_DEBUG_H
2#define _ASM_X86_CPU_DEBUG_H
3
4/*
5 * CPU x86 architecture debug
6 *
7 * Copyright(C) 2009 Jaswinder Singh Rajput
8 */
9
10/* Register flags */
11enum cpu_debug_bit {
12/* Model Specific Registers (MSRs) */
13 CPU_MC_BIT, /* Machine Check */
14 CPU_MONITOR_BIT, /* Monitor */
15 CPU_TIME_BIT, /* Time */
16 CPU_PMC_BIT, /* Performance Monitor */
17 CPU_PLATFORM_BIT, /* Platform */
18 CPU_APIC_BIT, /* APIC */
19 CPU_POWERON_BIT, /* Power-on */
20 CPU_CONTROL_BIT, /* Control */
21 CPU_FEATURES_BIT, /* Features control */
22 CPU_LBRANCH_BIT, /* Last Branch */
23 CPU_BIOS_BIT, /* BIOS */
24 CPU_FREQ_BIT, /* Frequency */
25 CPU_MTTR_BIT, /* MTRR */
26 CPU_PERF_BIT, /* Performance */
27 CPU_CACHE_BIT, /* Cache */
28 CPU_SYSENTER_BIT, /* Sysenter */
29 CPU_THERM_BIT, /* Thermal */
30 CPU_MISC_BIT, /* Miscellaneous */
31 CPU_DEBUG_BIT, /* Debug */
32 CPU_PAT_BIT, /* PAT */
33 CPU_VMX_BIT, /* VMX */
34 CPU_CALL_BIT, /* System Call */
35 CPU_BASE_BIT, /* BASE Address */
36 CPU_SMM_BIT, /* System mgmt mode */
37 CPU_SVM_BIT, /*Secure Virtual Machine*/
38 CPU_OSVM_BIT, /* OS-Visible Workaround*/
39/* Standard Registers */
40 CPU_TSS_BIT, /* Task Stack Segment */
41 CPU_CR_BIT, /* Control Registers */
42 CPU_DT_BIT, /* Descriptor Table */
43/* End of Registers flags */
44 CPU_REG_ALL_BIT, /* Select all Registers */
45};
46
47#define CPU_REG_ALL (~0) /* Select all Registers */
48
49#define CPU_MC (1 << CPU_MC_BIT)
50#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
51#define CPU_TIME (1 << CPU_TIME_BIT)
52#define CPU_PMC (1 << CPU_PMC_BIT)
53#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
54#define CPU_APIC (1 << CPU_APIC_BIT)
55#define CPU_POWERON (1 << CPU_POWERON_BIT)
56#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
57#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
58#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
59#define CPU_BIOS (1 << CPU_BIOS_BIT)
60#define CPU_FREQ (1 << CPU_FREQ_BIT)
61#define CPU_MTRR (1 << CPU_MTTR_BIT)
62#define CPU_PERF (1 << CPU_PERF_BIT)
63#define CPU_CACHE (1 << CPU_CACHE_BIT)
64#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
65#define CPU_THERM (1 << CPU_THERM_BIT)
66#define CPU_MISC (1 << CPU_MISC_BIT)
67#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
68#define CPU_PAT (1 << CPU_PAT_BIT)
69#define CPU_VMX (1 << CPU_VMX_BIT)
70#define CPU_CALL (1 << CPU_CALL_BIT)
71#define CPU_BASE (1 << CPU_BASE_BIT)
72#define CPU_SMM (1 << CPU_SMM_BIT)
73#define CPU_SVM (1 << CPU_SVM_BIT)
74#define CPU_OSVM (1 << CPU_OSVM_BIT)
75#define CPU_TSS (1 << CPU_TSS_BIT)
76#define CPU_CR (1 << CPU_CR_BIT)
77#define CPU_DT (1 << CPU_DT_BIT)
78
79/* Register file flags */
80enum cpu_file_bit {
81 CPU_INDEX_BIT, /* index */
82 CPU_VALUE_BIT, /* value */
83};
84
85#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
86
87/*
88 * DisplayFamily_DisplayModel Processor Families/Processor Number Series
89 * -------------------------- ------------------------------------------
90 * 05_01, 05_02, 05_04 Pentium, Pentium with MMX
91 *
92 * 06_01 Pentium Pro
93 * 06_03, 06_05 Pentium II Xeon, Pentium II
94 * 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III
95 *
96 * 06_09, 060D Pentium M
97 *
98 * 06_0E Core Duo, Core Solo
99 *
100 * 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series,
101 * Core 2 Quad, Core 2 Extreme, Core 2 Duo,
102 * Pentium dual-core
103 * 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650
104 *
105 * 06_1C Atom
106 *
107 * 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4
108 * 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D
109 *
110 * 0F_06 Xeon 7100, 5000 Series, Xeon MP,
111 * Pentium 4, Pentium D
112 */
113
114/* Register processors bits */
115enum cpu_processor_bit {
116 CPU_NONE,
117/* Intel */
118 CPU_INTEL_PENTIUM_BIT,
119 CPU_INTEL_P6_BIT,
120 CPU_INTEL_PENTIUM_M_BIT,
121 CPU_INTEL_CORE_BIT,
122 CPU_INTEL_CORE2_BIT,
123 CPU_INTEL_ATOM_BIT,
124 CPU_INTEL_XEON_P4_BIT,
125 CPU_INTEL_XEON_MP_BIT,
126};
127
128#define CPU_ALL (~0) /* Select all CPUs */
129
130#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT)
131#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT)
132#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT)
133#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT)
134#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT)
135#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT)
136#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT)
137#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT)
138
139#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
140#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2)
141#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
142#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM)
143#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
144#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM)
145#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON)
146#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON)
147#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
148#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON)
149#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON)
150#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT)
151#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX)
152#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE)
153#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
154#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
155#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
156#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT)
157#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE)
158
159/* Select all Intel CPUs*/
160#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
161
162#define MAX_CPU_FILES 512
163
164struct cpu_private {
165 unsigned cpu;
166 unsigned type;
167 unsigned reg;
168 unsigned file;
169};
170
171struct cpu_debug_base {
172 char *name; /* Register name */
173 unsigned flag; /* Register flag */
174};
175
176struct cpu_cpuX_base {
177 struct dentry *dentry; /* Register dentry */
178 int init; /* Register index file */
179};
180
181struct cpu_file_base {
182 char *name; /* Register file name */
183 unsigned flag; /* Register file flag */
184};
185
186struct cpu_debug_range {
187 unsigned min; /* Register range min */
188 unsigned max; /* Register range max */
189 unsigned flag; /* Supported flags */
190 unsigned model; /* Supported models */
191};
192
193#endif /* _ASM_X86_CPU_DEBUG_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index dc27705f5443..5623c50d67b2 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -91,7 +91,6 @@ static inline int desc_empty(const void *ptr)
91#define store_gdt(dtr) native_store_gdt(dtr) 91#define store_gdt(dtr) native_store_gdt(dtr)
92#define store_idt(dtr) native_store_idt(dtr) 92#define store_idt(dtr) native_store_idt(dtr)
93#define store_tr(tr) (tr = native_store_tr()) 93#define store_tr(tr) (tr = native_store_tr())
94#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
95 94
96#define load_TLS(t, cpu) native_load_tls(t, cpu) 95#define load_TLS(t, cpu) native_load_tls(t, cpu)
97#define set_ldt native_set_ldt 96#define set_ldt native_set_ldt
@@ -112,6 +111,8 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
112} 111}
113#endif /* CONFIG_PARAVIRT */ 112#endif /* CONFIG_PARAVIRT */
114 113
114#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
115
115static inline void native_write_idt_entry(gate_desc *idt, int entry, 116static inline void native_write_idt_entry(gate_desc *idt, int entry,
116 const gate_desc *gate) 117 const gate_desc *gate)
117{ 118{
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index bf9276bea660..014c2b85ae45 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
63void *kmap_atomic(struct page *page, enum km_type type); 63void *kmap_atomic(struct page *page, enum km_type type);
64void kunmap_atomic(void *kvaddr, enum km_type type); 64void kunmap_atomic(void *kvaddr, enum km_type type);
65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
66struct page *kmap_atomic_to_page(void *ptr); 67struct page *kmap_atomic_to_page(void *ptr);
67 68
68#ifndef CONFIG_PARAVIRT 69#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 82db7f45e2de..d4356f8b7522 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,6 +14,8 @@ obj-y += vmware.o hypervisor.o
14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o 14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
15obj-$(CONFIG_X86_64) += bugs_64.o 15obj-$(CONFIG_X86_64) += bugs_64.o
16 16
17obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
18
17obj-$(CONFIG_CPU_SUP_INTEL) += intel.o 19obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
18obj-$(CONFIG_CPU_SUP_AMD) += amd.o 20obj-$(CONFIG_CPU_SUP_AMD) += amd.o
19obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 21obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 826d5c876278..f8869978bbb7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1078,8 +1078,7 @@ void __cpuinit cpu_init(void)
1078 1078
1079 atomic_inc(&init_mm.mm_count); 1079 atomic_inc(&init_mm.mm_count);
1080 me->active_mm = &init_mm; 1080 me->active_mm = &init_mm;
1081 if (me->mm) 1081 BUG_ON(me->mm);
1082 BUG();
1083 enter_lazy_tlb(&init_mm, me); 1082 enter_lazy_tlb(&init_mm, me);
1084 1083
1085 load_sp0(t, &current->thread); 1084 load_sp0(t, &current->thread);
@@ -1145,8 +1144,7 @@ void __cpuinit cpu_init(void)
1145 */ 1144 */
1146 atomic_inc(&init_mm.mm_count); 1145 atomic_inc(&init_mm.mm_count);
1147 curr->active_mm = &init_mm; 1146 curr->active_mm = &init_mm;
1148 if (curr->mm) 1147 BUG_ON(curr->mm);
1149 BUG();
1150 enter_lazy_tlb(&init_mm, curr); 1148 enter_lazy_tlb(&init_mm, curr);
1151 1149
1152 load_sp0(t, thread); 1150 load_sp0(t, thread);
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
new file mode 100755
index 000000000000..9abbcbd933cc
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -0,0 +1,785 @@
1/*
2 * CPU x86 architecture debug code
3 *
4 * Copyright(C) 2009 Jaswinder Singh Rajput
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/interrupt.h>
10#include <linux/compiler.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/kprobes.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/percpu.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/smp.h>
24
25#include <asm/cpu_debug.h>
26#include <asm/paravirt.h>
27#include <asm/system.h>
28#include <asm/traps.h>
29#include <asm/apic.h>
30#include <asm/desc.h>
31
32static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
33static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
34static DEFINE_PER_CPU(unsigned, cpu_modelflag);
35static DEFINE_PER_CPU(int, cpu_priv_count);
36static DEFINE_PER_CPU(unsigned, cpu_model);
37
38static DEFINE_MUTEX(cpu_debug_lock);
39
40static struct dentry *cpu_debugfs_dir;
41
42static struct cpu_debug_base cpu_base[] = {
43 { "mc", CPU_MC }, /* Machine Check */
44 { "monitor", CPU_MONITOR }, /* Monitor */
45 { "time", CPU_TIME }, /* Time */
46 { "pmc", CPU_PMC }, /* Performance Monitor */
47 { "platform", CPU_PLATFORM }, /* Platform */
48 { "apic", CPU_APIC }, /* APIC */
49 { "poweron", CPU_POWERON }, /* Power-on */
50 { "control", CPU_CONTROL }, /* Control */
51 { "features", CPU_FEATURES }, /* Features control */
52 { "lastbranch", CPU_LBRANCH }, /* Last Branch */
53 { "bios", CPU_BIOS }, /* BIOS */
54 { "freq", CPU_FREQ }, /* Frequency */
55 { "mtrr", CPU_MTRR }, /* MTRR */
56 { "perf", CPU_PERF }, /* Performance */
57 { "cache", CPU_CACHE }, /* Cache */
58 { "sysenter", CPU_SYSENTER }, /* Sysenter */
59 { "therm", CPU_THERM }, /* Thermal */
60 { "misc", CPU_MISC }, /* Miscellaneous */
61 { "debug", CPU_DEBUG }, /* Debug */
62 { "pat", CPU_PAT }, /* PAT */
63 { "vmx", CPU_VMX }, /* VMX */
64 { "call", CPU_CALL }, /* System Call */
65 { "base", CPU_BASE }, /* BASE Address */
66 { "smm", CPU_SMM }, /* System mgmt mode */
67 { "svm", CPU_SVM }, /*Secure Virtial Machine*/
68 { "osvm", CPU_OSVM }, /* OS-Visible Workaround*/
69 { "tss", CPU_TSS }, /* Task Stack Segment */
70 { "cr", CPU_CR }, /* Control Registers */
71 { "dt", CPU_DT }, /* Descriptor Table */
72 { "registers", CPU_REG_ALL }, /* Select all Registers */
73};
74
75static struct cpu_file_base cpu_file[] = {
76 { "index", CPU_REG_ALL }, /* index */
77 { "value", CPU_REG_ALL }, /* value */
78};
79
80/* Intel Registers Range */
81static struct cpu_debug_range cpu_intel_range[] = {
82 { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
83 { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
84 { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
85 { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
86 { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
87 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
88
89 { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
90 { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
91 { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
92 { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
93
94 { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
95 { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
96 { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
97 { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
98
99 { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
100 { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
101 { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
102 { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
103
104 { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
105 { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
106 { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
107 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
108
109 { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
110 { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
111 { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
112 { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
113 { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
114
115 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
116 { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
117 { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
118 { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
119 { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
120 { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
121 { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
122 { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
123
124 { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
125 { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
126 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
127 { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
128 { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
129 { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
130 { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
131 { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
132
133 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
134 { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
135 { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
136 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
137 { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
138 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
139
140 { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
141 { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
142 { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
143 { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
144 { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
145 { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
146 { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
147 { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
148 { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
149 { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
150 { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
151 { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
152
153 { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
154 { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
155 { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
156 { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
157 { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
158 { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
159 { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
160 { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
161 { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
162 { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
163 { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
164
165 { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
166 { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
167 { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
168
169 { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
170
171 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
172 { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
173 { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
174 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
175};
176
177/* AMD Registers Range */
178static struct cpu_debug_range cpu_amd_range[] = {
179 { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
180 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
181 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
182
183 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
184 { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
185 { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
186 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
187 { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
188
189 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
190 { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
191 { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
192 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
193 { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
194 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
195
196 { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
197
198 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
199 { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
200 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
201 { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
202
203 { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
204
205 { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
206 { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
207 { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
208 { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
209 { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
210 { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
211 { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
212 { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
213 { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
214 { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
215 { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
216 { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
217 { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
218};
219
220
221static int get_cpu_modelflag(unsigned cpu)
222{
223 int flag;
224
225 switch (per_cpu(cpu_model, cpu)) {
226 /* Intel */
227 case 0x0501:
228 case 0x0502:
229 case 0x0504:
230 flag = CPU_INTEL_PENTIUM;
231 break;
232 case 0x0601:
233 case 0x0603:
234 case 0x0605:
235 case 0x0607:
236 case 0x0608:
237 case 0x060A:
238 case 0x060B:
239 flag = CPU_INTEL_P6;
240 break;
241 case 0x0609:
242 case 0x060D:
243 flag = CPU_INTEL_PENTIUM_M;
244 break;
245 case 0x060E:
246 flag = CPU_INTEL_CORE;
247 break;
248 case 0x060F:
249 case 0x0617:
250 flag = CPU_INTEL_CORE2;
251 break;
252 case 0x061C:
253 flag = CPU_INTEL_ATOM;
254 break;
255 case 0x0F00:
256 case 0x0F01:
257 case 0x0F02:
258 case 0x0F03:
259 case 0x0F04:
260 flag = CPU_INTEL_XEON_P4;
261 break;
262 case 0x0F06:
263 flag = CPU_INTEL_XEON_MP;
264 break;
265 default:
266 flag = CPU_NONE;
267 break;
268 }
269
270 return flag;
271}
272
273static int get_cpu_range_count(unsigned cpu)
274{
275 int index;
276
277 switch (per_cpu(cpu_model, cpu) >> 16) {
278 case X86_VENDOR_INTEL:
279 index = ARRAY_SIZE(cpu_intel_range);
280 break;
281 case X86_VENDOR_AMD:
282 index = ARRAY_SIZE(cpu_amd_range);
283 break;
284 default:
285 index = 0;
286 break;
287 }
288
289 return index;
290}
291
292static int is_typeflag_valid(unsigned cpu, unsigned flag)
293{
294 unsigned vendor, modelflag;
295 int i, index;
296
297 /* Standard Registers should be always valid */
298 if (flag >= CPU_TSS)
299 return 1;
300
301 modelflag = per_cpu(cpu_modelflag, cpu);
302 vendor = per_cpu(cpu_model, cpu) >> 16;
303 index = get_cpu_range_count(cpu);
304
305 for (i = 0; i < index; i++) {
306 switch (vendor) {
307 case X86_VENDOR_INTEL:
308 if ((cpu_intel_range[i].model & modelflag) &&
309 (cpu_intel_range[i].flag & flag))
310 return 1;
311 break;
312 case X86_VENDOR_AMD:
313 if (cpu_amd_range[i].flag & flag)
314 return 1;
315 break;
316 }
317 }
318
319 /* Invalid */
320 return 0;
321}
322
323static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
324 int index, unsigned flag)
325{
326 unsigned modelflag;
327
328 modelflag = per_cpu(cpu_modelflag, cpu);
329 *max = 0;
330 switch (per_cpu(cpu_model, cpu) >> 16) {
331 case X86_VENDOR_INTEL:
332 if ((cpu_intel_range[index].model & modelflag) &&
333 (cpu_intel_range[index].flag & flag)) {
334 *min = cpu_intel_range[index].min;
335 *max = cpu_intel_range[index].max;
336 }
337 break;
338 case X86_VENDOR_AMD:
339 if (cpu_amd_range[index].flag & flag) {
340 *min = cpu_amd_range[index].min;
341 *max = cpu_amd_range[index].max;
342 }
343 break;
344 }
345
346 return *max;
347}
348
349/* This function can also be called with seq = NULL for printk */
350static void print_cpu_data(struct seq_file *seq, unsigned type,
351 u32 low, u32 high)
352{
353 struct cpu_private *priv;
354 u64 val = high;
355
356 if (seq) {
357 priv = seq->private;
358 if (priv->file) {
359 val = (val << 32) | low;
360 seq_printf(seq, "0x%llx\n", val);
361 } else
362 seq_printf(seq, " %08x: %08x_%08x\n",
363 type, high, low);
364 } else
365 printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
366}
367
368/* This function can also be called with seq = NULL for printk */
369static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
370{
371 unsigned msr, msr_min, msr_max;
372 struct cpu_private *priv;
373 u32 low, high;
374 int i, range;
375
376 if (seq) {
377 priv = seq->private;
378 if (priv->file) {
379 if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
380 &low, &high))
381 print_cpu_data(seq, priv->reg, low, high);
382 return;
383 }
384 }
385
386 range = get_cpu_range_count(cpu);
387
388 for (i = 0; i < range; i++) {
389 if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
390 continue;
391
392 for (msr = msr_min; msr <= msr_max; msr++) {
393 if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
394 continue;
395 print_cpu_data(seq, msr, low, high);
396 }
397 }
398}
399
400static void print_tss(void *arg)
401{
402 struct pt_regs *regs = task_pt_regs(current);
403 struct seq_file *seq = arg;
404 unsigned int seg;
405
406 seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
407 seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
408 seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
409 seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
410
411 seq_printf(seq, " RSI\t: %016lx\n", regs->si);
412 seq_printf(seq, " RDI\t: %016lx\n", regs->di);
413 seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
414 seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
415
416#ifdef CONFIG_X86_64
417 seq_printf(seq, " R08\t: %016lx\n", regs->r8);
418 seq_printf(seq, " R09\t: %016lx\n", regs->r9);
419 seq_printf(seq, " R10\t: %016lx\n", regs->r10);
420 seq_printf(seq, " R11\t: %016lx\n", regs->r11);
421 seq_printf(seq, " R12\t: %016lx\n", regs->r12);
422 seq_printf(seq, " R13\t: %016lx\n", regs->r13);
423 seq_printf(seq, " R14\t: %016lx\n", regs->r14);
424 seq_printf(seq, " R15\t: %016lx\n", regs->r15);
425#endif
426
427 asm("movl %%cs,%0" : "=r" (seg));
428 seq_printf(seq, " CS\t: %04x\n", seg);
429 asm("movl %%ds,%0" : "=r" (seg));
430 seq_printf(seq, " DS\t: %04x\n", seg);
431 seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
432 asm("movl %%es,%0" : "=r" (seg));
433 seq_printf(seq, " ES\t: %04x\n", seg);
434 asm("movl %%fs,%0" : "=r" (seg));
435 seq_printf(seq, " FS\t: %04x\n", seg);
436 asm("movl %%gs,%0" : "=r" (seg));
437 seq_printf(seq, " GS\t: %04x\n", seg);
438
439 seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
440
441 seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
442}
443
444static void print_cr(void *arg)
445{
446 struct seq_file *seq = arg;
447
448 seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
449 seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
450 seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
451 seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
452#ifdef CONFIG_X86_64
453 seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
454#endif
455}
456
457static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
458{
459 seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
460}
461
462static void print_dt(void *seq)
463{
464 struct desc_ptr dt;
465 unsigned long ldt;
466
467 /* IDT */
468 store_idt((struct desc_ptr *)&dt);
469 print_desc_ptr("IDT", seq, dt);
470
471 /* GDT */
472 store_gdt((struct desc_ptr *)&dt);
473 print_desc_ptr("GDT", seq, dt);
474
475 /* LDT */
476 store_ldt(ldt);
477 seq_printf(seq, " LDT\t: %016lx\n", ldt);
478
479 /* TR */
480 store_tr(ldt);
481 seq_printf(seq, " TR\t: %016lx\n", ldt);
482}
483
484static void print_dr(void *arg)
485{
486 struct seq_file *seq = arg;
487 unsigned long dr;
488 int i;
489
490 for (i = 0; i < 8; i++) {
491 /* Ignore db4, db5 */
492 if ((i == 4) || (i == 5))
493 continue;
494 get_debugreg(dr, i);
495 seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
496 }
497
498 seq_printf(seq, "\n MSR\t:\n");
499}
500
501static void print_apic(void *arg)
502{
503 struct seq_file *seq = arg;
504
505#ifdef CONFIG_X86_LOCAL_APIC
506 seq_printf(seq, " LAPIC\t:\n");
507 seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
508 seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
509 seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
510 seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
511 seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
512 seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
513 seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
514 seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
515 seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
516 seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
517 seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
518 seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
519 seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
520 seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
521 seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
522 seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
523 seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
524 seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
525 seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
526 seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
527 seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
528#endif /* CONFIG_X86_LOCAL_APIC */
529
530 seq_printf(seq, "\n MSR\t:\n");
531}
532
533static int cpu_seq_show(struct seq_file *seq, void *v)
534{
535 struct cpu_private *priv = seq->private;
536
537 if (priv == NULL)
538 return -EINVAL;
539
540 switch (cpu_base[priv->type].flag) {
541 case CPU_TSS:
542 smp_call_function_single(priv->cpu, print_tss, seq, 1);
543 break;
544 case CPU_CR:
545 smp_call_function_single(priv->cpu, print_cr, seq, 1);
546 break;
547 case CPU_DT:
548 smp_call_function_single(priv->cpu, print_dt, seq, 1);
549 break;
550 case CPU_DEBUG:
551 if (priv->file == CPU_INDEX_BIT)
552 smp_call_function_single(priv->cpu, print_dr, seq, 1);
553 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
554 break;
555 case CPU_APIC:
556 if (priv->file == CPU_INDEX_BIT)
557 smp_call_function_single(priv->cpu, print_apic, seq, 1);
558 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
559 break;
560
561 default:
562 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
563 break;
564 }
565 seq_printf(seq, "\n");
566
567 return 0;
568}
569
570static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
571{
572 if (*pos == 0) /* One time is enough ;-) */
573 return seq;
574
575 return NULL;
576}
577
578static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
579{
580 (*pos)++;
581
582 return cpu_seq_start(seq, pos);
583}
584
585static void cpu_seq_stop(struct seq_file *seq, void *v)
586{
587}
588
589static const struct seq_operations cpu_seq_ops = {
590 .start = cpu_seq_start,
591 .next = cpu_seq_next,
592 .stop = cpu_seq_stop,
593 .show = cpu_seq_show,
594};
595
596static int cpu_seq_open(struct inode *inode, struct file *file)
597{
598 struct cpu_private *priv = inode->i_private;
599 struct seq_file *seq;
600 int err;
601
602 err = seq_open(file, &cpu_seq_ops);
603 if (!err) {
604 seq = file->private_data;
605 seq->private = priv;
606 }
607
608 return err;
609}
610
611static const struct file_operations cpu_fops = {
612 .open = cpu_seq_open,
613 .read = seq_read,
614 .llseek = seq_lseek,
615 .release = seq_release,
616};
617
618static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
619 unsigned file, struct dentry *dentry)
620{
621 struct cpu_private *priv = NULL;
622
623 /* Already intialized */
624 if (file == CPU_INDEX_BIT)
625 if (per_cpu(cpu_arr[type].init, cpu))
626 return 0;
627
628 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
629 if (priv == NULL)
630 return -ENOMEM;
631
632 priv->cpu = cpu;
633 priv->type = type;
634 priv->reg = reg;
635 priv->file = file;
636 mutex_lock(&cpu_debug_lock);
637 per_cpu(priv_arr[type], cpu) = priv;
638 per_cpu(cpu_priv_count, cpu)++;
639 mutex_unlock(&cpu_debug_lock);
640
641 if (file)
642 debugfs_create_file(cpu_file[file].name, S_IRUGO,
643 dentry, (void *)priv, &cpu_fops);
644 else {
645 debugfs_create_file(cpu_base[type].name, S_IRUGO,
646 per_cpu(cpu_arr[type].dentry, cpu),
647 (void *)priv, &cpu_fops);
648 mutex_lock(&cpu_debug_lock);
649 per_cpu(cpu_arr[type].init, cpu) = 1;
650 mutex_unlock(&cpu_debug_lock);
651 }
652
653 return 0;
654}
655
656static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
657 struct dentry *dentry)
658{
659 unsigned file;
660 int err = 0;
661
662 for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
663 err = cpu_create_file(cpu, type, reg, file, dentry);
664 if (err)
665 return err;
666 }
667
668 return err;
669}
670
671static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
672{
673 struct dentry *cpu_dentry = NULL;
674 unsigned reg, reg_min, reg_max;
675 int i, range, err = 0;
676 char reg_dir[12];
677 u32 low, high;
678
679 range = get_cpu_range_count(cpu);
680
681 for (i = 0; i < range; i++) {
682 if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
683 cpu_base[type].flag))
684 continue;
685
686 for (reg = reg_min; reg <= reg_max; reg++) {
687 if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
688 continue;
689
690 sprintf(reg_dir, "0x%x", reg);
691 cpu_dentry = debugfs_create_dir(reg_dir, dentry);
692 err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
693 if (err)
694 return err;
695 }
696 }
697
698 return err;
699}
700
701static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
702{
703 struct dentry *cpu_dentry = NULL;
704 unsigned type;
705 int err = 0;
706
707 for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
708 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
709 continue;
710 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
711 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
712
713 if (type < CPU_TSS_BIT)
714 err = cpu_init_msr(cpu, type, cpu_dentry);
715 else
716 err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
717 cpu_dentry);
718 if (err)
719 return err;
720 }
721
722 return err;
723}
724
725static int cpu_init_cpu(void)
726{
727 struct dentry *cpu_dentry = NULL;
728 struct cpuinfo_x86 *cpui;
729 char cpu_dir[12];
730 unsigned cpu;
731 int err = 0;
732
733 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
734 cpui = &cpu_data(cpu);
735 if (!cpu_has(cpui, X86_FEATURE_MSR))
736 continue;
737 per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
738 (cpui->x86 << 8) |
739 (cpui->x86_model));
740 per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
741
742 sprintf(cpu_dir, "cpu%d", cpu);
743 cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
744 err = cpu_init_allreg(cpu, cpu_dentry);
745
746 pr_info("cpu%d(%d) debug files %d\n",
747 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
748 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
749 pr_err("Register files count %d exceeds limit %d\n",
750 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
751 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
752 err = -ENFILE;
753 }
754 if (err)
755 return err;
756 }
757
758 return err;
759}
760
761static int __init cpu_debug_init(void)
762{
763 cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
764
765 return cpu_init_cpu();
766}
767
768static void __exit cpu_debug_exit(void)
769{
770 int i, cpu;
771
772 if (cpu_debugfs_dir)
773 debugfs_remove_recursive(cpu_debugfs_dir);
774
775 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
776 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
777 kfree(per_cpu(priv_arr[i], cpu));
778}
779
780module_init(cpu_debug_init);
781module_exit(cpu_debug_exit);
782
783MODULE_AUTHOR("Jaswinder Singh Rajput");
784MODULE_DESCRIPTION("CPU Debug module");
785MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 309949e9e1c1..6a5a2970f4c5 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -74,8 +74,7 @@ static void ich_force_hpet_resume(void)
74 if (!force_hpet_address) 74 if (!force_hpet_address)
75 return; 75 return;
76 76
77 if (rcba_base == NULL) 77 BUG_ON(rcba_base == NULL);
78 BUG();
79 78
80 /* read the Function Disable register, dword mode only */ 79 /* read the Function Disable register, dword mode only */
81 val = readl(rcba_base + 0x3404); 80 val = readl(rcba_base + 0x3404);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index d11745334a67..f256e73542d7 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -121,23 +121,30 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
121 pagefault_enable(); 121 pagefault_enable();
122} 122}
123 123
124/* This is the same as kmap_atomic() but can map memory that doesn't 124void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
125 * have a struct page associated with it.
126 */
127void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
128{ 125{
129 enum fixed_addresses idx; 126 enum fixed_addresses idx;
130 unsigned long vaddr; 127 unsigned long vaddr;
131 128
132 pagefault_disable(); 129 pagefault_disable();
133 130
134 idx = type + KM_TYPE_NR*smp_processor_id(); 131 debug_kmap_atomic_prot(type);
132
133 idx = type + KM_TYPE_NR * smp_processor_id();
135 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 134 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
136 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); 135 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
137 arch_flush_lazy_mmu_mode(); 136 arch_flush_lazy_mmu_mode();
138 137
139 return (void*) vaddr; 138 return (void*) vaddr;
140} 139}
140
141/* This is the same as kmap_atomic() but can map memory that doesn't
142 * have a struct page associated with it.
143 */
144void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
145{
146 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
147}
141EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ 148EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
142 149
143struct page *kmap_atomic_to_page(void *ptr) 150struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 04102d42ff42..592984e5496b 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/iomap.h> 19#include <asm/iomap.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/highmem.h>
21#include <linux/module.h> 22#include <linux/module.h>
22 23
23int is_io_mapping_possible(resource_size_t base, unsigned long size) 24int is_io_mapping_possible(resource_size_t base, unsigned long size)
@@ -36,11 +37,6 @@ EXPORT_SYMBOL_GPL(is_io_mapping_possible);
36void * 37void *
37iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 38iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
38{ 39{
39 enum fixed_addresses idx;
40 unsigned long vaddr;
41
42 pagefault_disable();
43
44 /* 40 /*
45 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. 41 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
46 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the 42 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
@@ -50,12 +46,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
50 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 46 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
51 prot = PAGE_KERNEL_UC_MINUS; 47 prot = PAGE_KERNEL_UC_MINUS;
52 48
53 idx = type + KM_TYPE_NR*smp_processor_id(); 49 return kmap_atomic_prot_pfn(pfn, type, prot);
54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
56 arch_flush_lazy_mmu_mode();
57
58 return (void*) vaddr;
59} 50}
60EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 51EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
61 52
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 6a518dd08a36..4f115e00486b 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -310,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); 310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
311 311
312 if (!ctx->active) { 312 if (!ctx->active) {
313 pr_warning("kmmio: spurious debug trap on CPU %d.\n", 313 pr_debug("kmmio: spurious debug trap on CPU %d.\n",
314 smp_processor_id()); 314 smp_processor_id());
315 goto out; 315 goto out;
316 } 316 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8253bc97587e..9c4294986af7 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -522,6 +522,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
522 * primary protection behavior: 522 * primary protection behavior:
523 */ 523 */
524 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); 524 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
525
526 /*
527 * Intel Atom errata AAH41 workaround.
528 *
529 * The real fix should be in hw or in a microcode update, but
530 * we also probabilistically try to reduce the window of having
531 * a large TLB mixed with 4K TLBs while instruction fetches are
532 * going on.
533 */
534 __flush_tlb_all();
535
525 base = NULL; 536 base = NULL;
526 537
527out_unlock: 538out_unlock: