diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-03 20:29:19 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-03 20:29:19 -0500 |
commit | 91d75e209bd59695f0708d66964d928d45b3b2f3 (patch) | |
tree | 32cab1359d951e4193bebb181a0f0319824a2b95 /arch/x86 | |
parent | 9976b39b5031bbf76f715893cf080b6a17683881 (diff) | |
parent | 8b0e5860cb099d7958d13b00ffbc35ad02735700 (diff) |
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'arch/x86')
57 files changed, 974 insertions, 1382 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8015641478bd..f5cef3fbf9a5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -40,6 +40,9 @@ config X86 | |||
40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
42 | select USER_STACKTRACE_SUPPORT | 42 | select USER_STACKTRACE_SUPPORT |
43 | select HAVE_KERNEL_GZIP | ||
44 | select HAVE_KERNEL_BZIP2 | ||
45 | select HAVE_KERNEL_LZMA | ||
43 | 46 | ||
44 | config ARCH_DEFCONFIG | 47 | config ARCH_DEFCONFIG |
45 | string | 48 | string |
@@ -1825,7 +1828,7 @@ config DMAR | |||
1825 | remapping devices. | 1828 | remapping devices. |
1826 | 1829 | ||
1827 | config DMAR_DEFAULT_ON | 1830 | config DMAR_DEFAULT_ON |
1828 | def_bool n | 1831 | def_bool y |
1829 | prompt "Enable DMA Remapping Devices by default" | 1832 | prompt "Enable DMA Remapping Devices by default" |
1830 | depends on DMAR | 1833 | depends on DMAR |
1831 | help | 1834 | help |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 1771c804e02f..3ca4c194b8e5 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o | 7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o |
8 | 8 | ||
9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC | 10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC |
@@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y) | |||
47 | ifdef CONFIG_RELOCATABLE | 47 | ifdef CONFIG_RELOCATABLE |
48 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE | 48 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE |
49 | $(call if_changed,gzip) | 49 | $(call if_changed,gzip) |
50 | $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE | ||
51 | $(call if_changed,bzip2) | ||
52 | $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE | ||
53 | $(call if_changed,lzma) | ||
50 | else | 54 | else |
51 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE | 55 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE |
52 | $(call if_changed,gzip) | 56 | $(call if_changed,gzip) |
57 | $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE | ||
58 | $(call if_changed,bzip2) | ||
59 | $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE | ||
60 | $(call if_changed,lzma) | ||
53 | endif | 61 | endif |
54 | LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T | 62 | LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T |
55 | 63 | ||
56 | else | 64 | else |
65 | |||
57 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE | 66 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE |
58 | $(call if_changed,gzip) | 67 | $(call if_changed,gzip) |
68 | $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE | ||
69 | $(call if_changed,bzip2) | ||
70 | $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE | ||
71 | $(call if_changed,lzma) | ||
59 | 72 | ||
60 | LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T | 73 | LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T |
61 | endif | 74 | endif |
62 | 75 | ||
63 | $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE | 76 | suffix_$(CONFIG_KERNEL_GZIP) = gz |
77 | suffix_$(CONFIG_KERNEL_BZIP2) = bz2 | ||
78 | suffix_$(CONFIG_KERNEL_LZMA) = lzma | ||
79 | |||
80 | $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE | ||
64 | $(call if_changed,ld) | 81 | $(call if_changed,ld) |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index da062216948a..e45be73684ff 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -116,71 +116,13 @@ | |||
116 | /* | 116 | /* |
117 | * gzip declarations | 117 | * gzip declarations |
118 | */ | 118 | */ |
119 | |||
120 | #define OF(args) args | ||
121 | #define STATIC static | 119 | #define STATIC static |
122 | 120 | ||
123 | #undef memset | 121 | #undef memset |
124 | #undef memcpy | 122 | #undef memcpy |
125 | #define memzero(s, n) memset((s), 0, (n)) | 123 | #define memzero(s, n) memset((s), 0, (n)) |
126 | 124 | ||
127 | typedef unsigned char uch; | ||
128 | typedef unsigned short ush; | ||
129 | typedef unsigned long ulg; | ||
130 | |||
131 | /* | ||
132 | * Window size must be at least 32k, and a power of two. | ||
133 | * We don't actually have a window just a huge output buffer, | ||
134 | * so we report a 2G window size, as that should always be | ||
135 | * larger than our output buffer: | ||
136 | */ | ||
137 | #define WSIZE 0x80000000 | ||
138 | |||
139 | /* Input buffer: */ | ||
140 | static unsigned char *inbuf; | ||
141 | |||
142 | /* Sliding window buffer (and final output buffer): */ | ||
143 | static unsigned char *window; | ||
144 | |||
145 | /* Valid bytes in inbuf: */ | ||
146 | static unsigned insize; | ||
147 | |||
148 | /* Index of next byte to be processed in inbuf: */ | ||
149 | static unsigned inptr; | ||
150 | |||
151 | /* Bytes in output buffer: */ | ||
152 | static unsigned outcnt; | ||
153 | |||
154 | /* gzip flag byte */ | ||
155 | #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ | ||
156 | #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */ | ||
157 | #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ | ||
158 | #define ORIG_NAM 0x08 /* bit 3 set: original file name present */ | ||
159 | #define COMMENT 0x10 /* bit 4 set: file comment present */ | ||
160 | #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ | ||
161 | #define RESERVED 0xC0 /* bit 6, 7: reserved */ | ||
162 | |||
163 | #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) | ||
164 | |||
165 | /* Diagnostic functions */ | ||
166 | #ifdef DEBUG | ||
167 | # define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0) | ||
168 | # define Trace(x) do { fprintf x; } while (0) | ||
169 | # define Tracev(x) do { if (verbose) fprintf x ; } while (0) | ||
170 | # define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0) | ||
171 | # define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0) | ||
172 | # define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0) | ||
173 | #else | ||
174 | # define Assert(cond, msg) | ||
175 | # define Trace(x) | ||
176 | # define Tracev(x) | ||
177 | # define Tracevv(x) | ||
178 | # define Tracec(c, x) | ||
179 | # define Tracecv(c, x) | ||
180 | #endif | ||
181 | 125 | ||
182 | static int fill_inbuf(void); | ||
183 | static void flush_window(void); | ||
184 | static void error(char *m); | 126 | static void error(char *m); |
185 | 127 | ||
186 | /* | 128 | /* |
@@ -189,13 +131,8 @@ static void error(char *m); | |||
189 | static struct boot_params *real_mode; /* Pointer to real-mode data */ | 131 | static struct boot_params *real_mode; /* Pointer to real-mode data */ |
190 | static int quiet; | 132 | static int quiet; |
191 | 133 | ||
192 | extern unsigned char input_data[]; | ||
193 | extern int input_len; | ||
194 | |||
195 | static long bytes_out; | ||
196 | |||
197 | static void *memset(void *s, int c, unsigned n); | 134 | static void *memset(void *s, int c, unsigned n); |
198 | static void *memcpy(void *dest, const void *src, unsigned n); | 135 | void *memcpy(void *dest, const void *src, unsigned n); |
199 | 136 | ||
200 | static void __putstr(int, const char *); | 137 | static void __putstr(int, const char *); |
201 | #define putstr(__x) __putstr(0, __x) | 138 | #define putstr(__x) __putstr(0, __x) |
@@ -213,7 +150,17 @@ static char *vidmem; | |||
213 | static int vidport; | 150 | static int vidport; |
214 | static int lines, cols; | 151 | static int lines, cols; |
215 | 152 | ||
216 | #include "../../../../lib/inflate.c" | 153 | #ifdef CONFIG_KERNEL_GZIP |
154 | #include "../../../../lib/decompress_inflate.c" | ||
155 | #endif | ||
156 | |||
157 | #ifdef CONFIG_KERNEL_BZIP2 | ||
158 | #include "../../../../lib/decompress_bunzip2.c" | ||
159 | #endif | ||
160 | |||
161 | #ifdef CONFIG_KERNEL_LZMA | ||
162 | #include "../../../../lib/decompress_unlzma.c" | ||
163 | #endif | ||
217 | 164 | ||
218 | static void scroll(void) | 165 | static void scroll(void) |
219 | { | 166 | { |
@@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n) | |||
282 | return s; | 229 | return s; |
283 | } | 230 | } |
284 | 231 | ||
285 | static void *memcpy(void *dest, const void *src, unsigned n) | 232 | void *memcpy(void *dest, const void *src, unsigned n) |
286 | { | 233 | { |
287 | int i; | 234 | int i; |
288 | const char *s = src; | 235 | const char *s = src; |
@@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n) | |||
293 | return dest; | 240 | return dest; |
294 | } | 241 | } |
295 | 242 | ||
296 | /* =========================================================================== | ||
297 | * Fill the input buffer. This is called only when the buffer is empty | ||
298 | * and at least one byte is really needed. | ||
299 | */ | ||
300 | static int fill_inbuf(void) | ||
301 | { | ||
302 | error("ran out of input data"); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | /* =========================================================================== | ||
307 | * Write the output window window[0..outcnt-1] and update crc and bytes_out. | ||
308 | * (Used for the decompressed data only.) | ||
309 | */ | ||
310 | static void flush_window(void) | ||
311 | { | ||
312 | /* With my window equal to my output buffer | ||
313 | * I only need to compute the crc here. | ||
314 | */ | ||
315 | unsigned long c = crc; /* temporary variable */ | ||
316 | unsigned n; | ||
317 | unsigned char *in, ch; | ||
318 | |||
319 | in = window; | ||
320 | for (n = 0; n < outcnt; n++) { | ||
321 | ch = *in++; | ||
322 | c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); | ||
323 | } | ||
324 | crc = c; | ||
325 | bytes_out += (unsigned long)outcnt; | ||
326 | outcnt = 0; | ||
327 | } | ||
328 | 243 | ||
329 | static void error(char *x) | 244 | static void error(char *x) |
330 | { | 245 | { |
@@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
407 | lines = real_mode->screen_info.orig_video_lines; | 322 | lines = real_mode->screen_info.orig_video_lines; |
408 | cols = real_mode->screen_info.orig_video_cols; | 323 | cols = real_mode->screen_info.orig_video_cols; |
409 | 324 | ||
410 | window = output; /* Output buffer (Normally at 1M) */ | ||
411 | free_mem_ptr = heap; /* Heap */ | 325 | free_mem_ptr = heap; /* Heap */ |
412 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; | 326 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; |
413 | inbuf = input_data; /* Input buffer */ | ||
414 | insize = input_len; | ||
415 | inptr = 0; | ||
416 | 327 | ||
417 | #ifdef CONFIG_X86_64 | 328 | #ifdef CONFIG_X86_64 |
418 | if ((unsigned long)output & (__KERNEL_ALIGN - 1)) | 329 | if ((unsigned long)output & (__KERNEL_ALIGN - 1)) |
@@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
430 | #endif | 341 | #endif |
431 | #endif | 342 | #endif |
432 | 343 | ||
433 | makecrc(); | ||
434 | if (!quiet) | 344 | if (!quiet) |
435 | putstr("\nDecompressing Linux... "); | 345 | putstr("\nDecompressing Linux... "); |
436 | gunzip(); | 346 | decompress(input_data, input_len, NULL, NULL, output, NULL, error); |
437 | parse_elf(output); | 347 | parse_elf(output); |
438 | if (!quiet) | 348 | if (!quiet) |
439 | putstr("done.\nBooting the kernel.\n"); | 349 | putstr("done.\nBooting the kernel.\n"); |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 5c023f6f652c..235b81d0f6f2 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.29-rc4 | 3 | # Linux kernel version: 2.6.29-rc4 |
4 | # Thu Feb 12 12:57:57 2009 | 4 | # Tue Feb 24 15:50:58 2009 |
5 | # | 5 | # |
6 | # CONFIG_64BIT is not set | 6 | # CONFIG_64BIT is not set |
7 | CONFIG_X86_32=y | 7 | CONFIG_X86_32=y |
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y | |||
266 | CONFIG_X86_LOCAL_APIC=y | 266 | CONFIG_X86_LOCAL_APIC=y |
267 | CONFIG_X86_IO_APIC=y | 267 | CONFIG_X86_IO_APIC=y |
268 | CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y | 268 | CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y |
269 | # CONFIG_X86_MCE is not set | 269 | CONFIG_X86_MCE=y |
270 | CONFIG_X86_MCE_NONFATAL=y | ||
271 | CONFIG_X86_MCE_P4THERMAL=y | ||
270 | CONFIG_VM86=y | 272 | CONFIG_VM86=y |
271 | # CONFIG_TOSHIBA is not set | 273 | # CONFIG_TOSHIBA is not set |
272 | # CONFIG_I8K is not set | 274 | # CONFIG_I8K is not set |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 4157cc4a2bde..9fe5d212ab4c 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.29-rc4 | 3 | # Linux kernel version: 2.6.29-rc4 |
4 | # Thu Feb 12 12:57:29 2009 | 4 | # Tue Feb 24 15:44:16 2009 |
5 | # | 5 | # |
6 | CONFIG_64BIT=y | 6 | CONFIG_64BIT=y |
7 | # CONFIG_X86_32 is not set | 7 | # CONFIG_X86_32 is not set |
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y | |||
266 | CONFIG_X86_LOCAL_APIC=y | 266 | CONFIG_X86_LOCAL_APIC=y |
267 | CONFIG_X86_IO_APIC=y | 267 | CONFIG_X86_IO_APIC=y |
268 | CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y | 268 | CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y |
269 | # CONFIG_X86_MCE is not set | 269 | CONFIG_X86_MCE=y |
270 | CONFIG_X86_MCE_INTEL=y | ||
271 | CONFIG_X86_MCE_AMD=y | ||
270 | # CONFIG_I8K is not set | 272 | # CONFIG_I8K is not set |
271 | CONFIG_MICROCODE=y | 273 | CONFIG_MICROCODE=y |
272 | CONFIG_MICROCODE_INTEL=y | 274 | CONFIG_MICROCODE_INTEL=y |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index a6208dc74633..4ef949c1972e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -75,7 +75,14 @@ static inline void default_inquire_remote_apic(int apicid) | |||
75 | #define setup_secondary_clock setup_secondary_APIC_clock | 75 | #define setup_secondary_clock setup_secondary_APIC_clock |
76 | #endif | 76 | #endif |
77 | 77 | ||
78 | #ifdef CONFIG_X86_VSMP | ||
78 | extern int is_vsmp_box(void); | 79 | extern int is_vsmp_box(void); |
80 | #else | ||
81 | static inline int is_vsmp_box(void) | ||
82 | { | ||
83 | return 0; | ||
84 | } | ||
85 | #endif | ||
79 | extern void xapic_wait_icr_idle(void); | 86 | extern void xapic_wait_icr_idle(void); |
80 | extern u32 safe_xapic_wait_icr_idle(void); | 87 | extern u32 safe_xapic_wait_icr_idle(void); |
81 | extern void xapic_icr_write(u32, u32); | 88 | extern void xapic_icr_write(u32, u32); |
@@ -306,7 +313,7 @@ struct apic { | |||
306 | void (*send_IPI_self)(int vector); | 313 | void (*send_IPI_self)(int vector); |
307 | 314 | ||
308 | /* wakeup_secondary_cpu */ | 315 | /* wakeup_secondary_cpu */ |
309 | int (*wakeup_cpu)(int apicid, unsigned long start_eip); | 316 | int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); |
310 | 317 | ||
311 | int trampoline_phys_low; | 318 | int trampoline_phys_low; |
312 | int trampoline_phys_high; | 319 | int trampoline_phys_high; |
@@ -324,8 +331,21 @@ struct apic { | |||
324 | u32 (*safe_wait_icr_idle)(void); | 331 | u32 (*safe_wait_icr_idle)(void); |
325 | }; | 332 | }; |
326 | 333 | ||
334 | /* | ||
335 | * Pointer to the local APIC driver in use on this system (there's | ||
336 | * always just one such driver in use - the kernel decides via an | ||
337 | * early probing process which one it picks - and then sticks to it): | ||
338 | */ | ||
327 | extern struct apic *apic; | 339 | extern struct apic *apic; |
328 | 340 | ||
341 | /* | ||
342 | * APIC functionality to boot other CPUs - only used on SMP: | ||
343 | */ | ||
344 | #ifdef CONFIG_SMP | ||
345 | extern atomic_t init_deasserted; | ||
346 | extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); | ||
347 | #endif | ||
348 | |||
329 | static inline u32 apic_read(u32 reg) | 349 | static inline u32 apic_read(u32 reg) |
330 | { | 350 | { |
331 | return apic->read(reg); | 351 | return apic->read(reg); |
@@ -384,9 +404,7 @@ static inline unsigned default_get_apic_id(unsigned long x) | |||
384 | #define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 | 404 | #define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 |
385 | #define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 | 405 | #define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 |
386 | 406 | ||
387 | #ifdef CONFIG_X86_32 | 407 | #ifdef CONFIG_X86_64 |
388 | extern void es7000_update_apic_to_cluster(void); | ||
389 | #else | ||
390 | extern struct apic apic_flat; | 408 | extern struct apic apic_flat; |
391 | extern struct apic apic_physflat; | 409 | extern struct apic apic_physflat; |
392 | extern struct apic apic_x2apic_cluster; | 410 | extern struct apic apic_x2apic_cluster; |
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index dd61616cb73d..6526cf08b0e4 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h | |||
@@ -10,17 +10,31 @@ | |||
10 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ | 10 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ |
11 | #define ASK_VGA 0xfffd /* ask for it at bootup */ | 11 | #define ASK_VGA 0xfffd /* ask for it at bootup */ |
12 | 12 | ||
13 | #ifdef __KERNEL__ | ||
14 | |||
13 | /* Physical address where kernel should be loaded. */ | 15 | /* Physical address where kernel should be loaded. */ |
14 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ | 16 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ |
15 | + (CONFIG_PHYSICAL_ALIGN - 1)) \ | 17 | + (CONFIG_PHYSICAL_ALIGN - 1)) \ |
16 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) | 18 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) |
17 | 19 | ||
20 | #ifdef CONFIG_KERNEL_BZIP2 | ||
21 | #define BOOT_HEAP_SIZE 0x400000 | ||
22 | #else /* !CONFIG_KERNEL_BZIP2 */ | ||
23 | |||
18 | #ifdef CONFIG_X86_64 | 24 | #ifdef CONFIG_X86_64 |
19 | #define BOOT_HEAP_SIZE 0x7000 | 25 | #define BOOT_HEAP_SIZE 0x7000 |
20 | #define BOOT_STACK_SIZE 0x4000 | ||
21 | #else | 26 | #else |
22 | #define BOOT_HEAP_SIZE 0x4000 | 27 | #define BOOT_HEAP_SIZE 0x4000 |
28 | #endif | ||
29 | |||
30 | #endif /* !CONFIG_KERNEL_BZIP2 */ | ||
31 | |||
32 | #ifdef CONFIG_X86_64 | ||
33 | #define BOOT_STACK_SIZE 0x4000 | ||
34 | #else | ||
23 | #define BOOT_STACK_SIZE 0x1000 | 35 | #define BOOT_STACK_SIZE 0x1000 |
24 | #endif | 36 | #endif |
25 | 37 | ||
38 | #endif /* __KERNEL__ */ | ||
39 | |||
26 | #endif /* _ASM_X86_BOOT_H */ | 40 | #endif /* _ASM_X86_BOOT_H */ |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 23696d44a0af..dca8f03da5b2 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -1,11 +1,155 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
11 | * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 | ||
12 | */ | ||
13 | |||
1 | #ifndef _ASM_X86_FIXMAP_H | 14 | #ifndef _ASM_X86_FIXMAP_H |
2 | #define _ASM_X86_FIXMAP_H | 15 | #define _ASM_X86_FIXMAP_H |
3 | 16 | ||
17 | #ifndef __ASSEMBLY__ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <asm/acpi.h> | ||
20 | #include <asm/apicdef.h> | ||
21 | #include <asm/page.h> | ||
22 | #ifdef CONFIG_X86_32 | ||
23 | #include <linux/threads.h> | ||
24 | #include <asm/kmap_types.h> | ||
25 | #else | ||
26 | #include <asm/vsyscall.h> | ||
27 | #ifdef CONFIG_EFI | ||
28 | #include <asm/efi.h> | ||
29 | #endif | ||
30 | #endif | ||
31 | |||
32 | /* | ||
33 | * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall | ||
34 | * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. | ||
35 | * Because of this, FIXADDR_TOP x86 integration was left as later work. | ||
36 | */ | ||
37 | #ifdef CONFIG_X86_32 | ||
38 | /* used by vmalloc.c, vsyscall.lds.S. | ||
39 | * | ||
40 | * Leave one empty page between vmalloc'ed areas and | ||
41 | * the start of the fixmap. | ||
42 | */ | ||
43 | extern unsigned long __FIXADDR_TOP; | ||
44 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) | ||
45 | |||
46 | #define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) | ||
47 | #define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) | ||
48 | #else | ||
49 | #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) | ||
50 | |||
51 | /* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ | ||
52 | #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) | ||
53 | #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) | ||
54 | #endif | ||
55 | |||
56 | |||
57 | /* | ||
58 | * Here we define all the compile-time 'special' virtual | ||
59 | * addresses. The point is to have a constant address at | ||
60 | * compile time, but to set the physical address only | ||
61 | * in the boot process. | ||
62 | * for x86_32: We allocate these special addresses | ||
63 | * from the end of virtual memory (0xfffff000) backwards. | ||
64 | * Also this lets us do fail-safe vmalloc(), we | ||
65 | * can guarantee that these special addresses and | ||
66 | * vmalloc()-ed addresses never overlap. | ||
67 | * | ||
68 | * These 'compile-time allocated' memory buffers are | ||
69 | * fixed-size 4k pages (or larger if used with an increment | ||
70 | * higher than 1). Use set_fixmap(idx,phys) to associate | ||
71 | * physical memory with fixmap indices. | ||
72 | * | ||
73 | * TLB entries of such buffers will not be flushed across | ||
74 | * task switches. | ||
75 | */ | ||
76 | enum fixed_addresses { | ||
4 | #ifdef CONFIG_X86_32 | 77 | #ifdef CONFIG_X86_32 |
5 | # include "fixmap_32.h" | 78 | FIX_HOLE, |
79 | FIX_VDSO, | ||
6 | #else | 80 | #else |
7 | # include "fixmap_64.h" | 81 | VSYSCALL_LAST_PAGE, |
82 | VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE | ||
83 | + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, | ||
84 | VSYSCALL_HPET, | ||
8 | #endif | 85 | #endif |
86 | FIX_DBGP_BASE, | ||
87 | FIX_EARLYCON_MEM_BASE, | ||
88 | #ifdef CONFIG_X86_LOCAL_APIC | ||
89 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | ||
90 | #endif | ||
91 | #ifdef CONFIG_X86_IO_APIC | ||
92 | FIX_IO_APIC_BASE_0, | ||
93 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, | ||
94 | #endif | ||
95 | #ifdef CONFIG_X86_64 | ||
96 | #ifdef CONFIG_EFI | ||
97 | FIX_EFI_IO_MAP_LAST_PAGE, | ||
98 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE | ||
99 | + MAX_EFI_IO_PAGES - 1, | ||
100 | #endif | ||
101 | #endif | ||
102 | #ifdef CONFIG_X86_VISWS_APIC | ||
103 | FIX_CO_CPU, /* Cobalt timer */ | ||
104 | FIX_CO_APIC, /* Cobalt APIC Redirection Table */ | ||
105 | FIX_LI_PCIA, /* Lithium PCI Bridge A */ | ||
106 | FIX_LI_PCIB, /* Lithium PCI Bridge B */ | ||
107 | #endif | ||
108 | #ifdef CONFIG_X86_F00F_BUG | ||
109 | FIX_F00F_IDT, /* Virtual mapping for IDT */ | ||
110 | #endif | ||
111 | #ifdef CONFIG_X86_CYCLONE_TIMER | ||
112 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ | ||
113 | #endif | ||
114 | #ifdef CONFIG_X86_32 | ||
115 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
116 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
117 | #ifdef CONFIG_PCI_MMCONFIG | ||
118 | FIX_PCIE_MCFG, | ||
119 | #endif | ||
120 | #endif | ||
121 | #ifdef CONFIG_PARAVIRT | ||
122 | FIX_PARAVIRT_BOOTMAP, | ||
123 | #endif | ||
124 | __end_of_permanent_fixed_addresses, | ||
125 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
126 | FIX_OHCI1394_BASE, | ||
127 | #endif | ||
128 | /* | ||
129 | * 256 temporary boot-time mappings, used by early_ioremap(), | ||
130 | * before ioremap() is functional. | ||
131 | * | ||
132 | * We round it up to the next 256 pages boundary so that we | ||
133 | * can have a single pgd entry and a single pte table: | ||
134 | */ | ||
135 | #define NR_FIX_BTMAPS 64 | ||
136 | #define FIX_BTMAPS_SLOTS 4 | ||
137 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | ||
138 | (__end_of_permanent_fixed_addresses & 255), | ||
139 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | ||
140 | #ifdef CONFIG_X86_32 | ||
141 | FIX_WP_TEST, | ||
142 | #endif | ||
143 | __end_of_fixed_addresses | ||
144 | }; | ||
145 | |||
146 | |||
147 | extern void reserve_top_address(unsigned long reserve); | ||
148 | |||
149 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
150 | #define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
151 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | ||
152 | #define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) | ||
9 | 153 | ||
10 | extern int fixmaps_set; | 154 | extern int fixmaps_set; |
11 | 155 | ||
@@ -69,4 +213,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
69 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | 213 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); |
70 | return __virt_to_fix(vaddr); | 214 | return __virt_to_fix(vaddr); |
71 | } | 215 | } |
216 | #endif /* !__ASSEMBLY__ */ | ||
72 | #endif /* _ASM_X86_FIXMAP_H */ | 217 | #endif /* _ASM_X86_FIXMAP_H */ |
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h deleted file mode 100644 index 047d9bab2b31..000000000000 --- a/arch/x86/include/asm/fixmap_32.h +++ /dev/null | |||
@@ -1,115 +0,0 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_X86_FIXMAP_32_H | ||
14 | #define _ASM_X86_FIXMAP_32_H | ||
15 | |||
16 | |||
17 | /* used by vmalloc.c, vsyscall.lds.S. | ||
18 | * | ||
19 | * Leave one empty page between vmalloc'ed areas and | ||
20 | * the start of the fixmap. | ||
21 | */ | ||
22 | extern unsigned long __FIXADDR_TOP; | ||
23 | #define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) | ||
24 | #define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | #include <linux/kernel.h> | ||
28 | #include <asm/acpi.h> | ||
29 | #include <asm/apicdef.h> | ||
30 | #include <asm/page.h> | ||
31 | #include <linux/threads.h> | ||
32 | #include <asm/kmap_types.h> | ||
33 | |||
34 | /* | ||
35 | * Here we define all the compile-time 'special' virtual | ||
36 | * addresses. The point is to have a constant address at | ||
37 | * compile time, but to set the physical address only | ||
38 | * in the boot process. We allocate these special addresses | ||
39 | * from the end of virtual memory (0xfffff000) backwards. | ||
40 | * Also this lets us do fail-safe vmalloc(), we | ||
41 | * can guarantee that these special addresses and | ||
42 | * vmalloc()-ed addresses never overlap. | ||
43 | * | ||
44 | * these 'compile-time allocated' memory buffers are | ||
45 | * fixed-size 4k pages. (or larger if used with an increment | ||
46 | * highger than 1) use fixmap_set(idx,phys) to associate | ||
47 | * physical memory with fixmap indices. | ||
48 | * | ||
49 | * TLB entries of such buffers will not be flushed across | ||
50 | * task switches. | ||
51 | */ | ||
52 | enum fixed_addresses { | ||
53 | FIX_HOLE, | ||
54 | FIX_VDSO, | ||
55 | FIX_DBGP_BASE, | ||
56 | FIX_EARLYCON_MEM_BASE, | ||
57 | #ifdef CONFIG_X86_LOCAL_APIC | ||
58 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | ||
59 | #endif | ||
60 | #ifdef CONFIG_X86_IO_APIC | ||
61 | FIX_IO_APIC_BASE_0, | ||
62 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, | ||
63 | #endif | ||
64 | #ifdef CONFIG_X86_VISWS_APIC | ||
65 | FIX_CO_CPU, /* Cobalt timer */ | ||
66 | FIX_CO_APIC, /* Cobalt APIC Redirection Table */ | ||
67 | FIX_LI_PCIA, /* Lithium PCI Bridge A */ | ||
68 | FIX_LI_PCIB, /* Lithium PCI Bridge B */ | ||
69 | #endif | ||
70 | #ifdef CONFIG_X86_F00F_BUG | ||
71 | FIX_F00F_IDT, /* Virtual mapping for IDT */ | ||
72 | #endif | ||
73 | #ifdef CONFIG_X86_CYCLONE_TIMER | ||
74 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ | ||
75 | #endif | ||
76 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
77 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
78 | #ifdef CONFIG_PCI_MMCONFIG | ||
79 | FIX_PCIE_MCFG, | ||
80 | #endif | ||
81 | #ifdef CONFIG_PARAVIRT | ||
82 | FIX_PARAVIRT_BOOTMAP, | ||
83 | #endif | ||
84 | __end_of_permanent_fixed_addresses, | ||
85 | /* | ||
86 | * 256 temporary boot-time mappings, used by early_ioremap(), | ||
87 | * before ioremap() is functional. | ||
88 | * | ||
89 | * We round it up to the next 256 pages boundary so that we | ||
90 | * can have a single pgd entry and a single pte table: | ||
91 | */ | ||
92 | #define NR_FIX_BTMAPS 64 | ||
93 | #define FIX_BTMAPS_SLOTS 4 | ||
94 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | ||
95 | (__end_of_permanent_fixed_addresses & 255), | ||
96 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | ||
97 | FIX_WP_TEST, | ||
98 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
99 | FIX_OHCI1394_BASE, | ||
100 | #endif | ||
101 | __end_of_fixed_addresses | ||
102 | }; | ||
103 | |||
104 | extern void reserve_top_address(unsigned long reserve); | ||
105 | |||
106 | |||
107 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) | ||
108 | |||
109 | #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
110 | #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
111 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) | ||
112 | #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) | ||
113 | |||
114 | #endif /* !__ASSEMBLY__ */ | ||
115 | #endif /* _ASM_X86_FIXMAP_32_H */ | ||
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h deleted file mode 100644 index 298d9ba3faeb..000000000000 --- a/arch/x86/include/asm/fixmap_64.h +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | */ | ||
10 | |||
11 | #ifndef _ASM_X86_FIXMAP_64_H | ||
12 | #define _ASM_X86_FIXMAP_64_H | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <asm/acpi.h> | ||
16 | #include <asm/apicdef.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/vsyscall.h> | ||
19 | #include <asm/efi.h> | ||
20 | |||
21 | /* | ||
22 | * Here we define all the compile-time 'special' virtual | ||
23 | * addresses. The point is to have a constant address at | ||
24 | * compile time, but to set the physical address only | ||
25 | * in the boot process. | ||
26 | * | ||
27 | * These 'compile-time allocated' memory buffers are | ||
28 | * fixed-size 4k pages (or larger if used with an increment | ||
29 | * higher than 1). Use set_fixmap(idx,phys) to associate | ||
30 | * physical memory with fixmap indices. | ||
31 | * | ||
32 | * TLB entries of such buffers will not be flushed across | ||
33 | * task switches. | ||
34 | */ | ||
35 | |||
36 | enum fixed_addresses { | ||
37 | VSYSCALL_LAST_PAGE, | ||
38 | VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE | ||
39 | + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, | ||
40 | VSYSCALL_HPET, | ||
41 | FIX_DBGP_BASE, | ||
42 | FIX_EARLYCON_MEM_BASE, | ||
43 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | ||
44 | FIX_IO_APIC_BASE_0, | ||
45 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, | ||
46 | FIX_EFI_IO_MAP_LAST_PAGE, | ||
47 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE | ||
48 | + MAX_EFI_IO_PAGES - 1, | ||
49 | #ifdef CONFIG_PARAVIRT | ||
50 | FIX_PARAVIRT_BOOTMAP, | ||
51 | #endif | ||
52 | __end_of_permanent_fixed_addresses, | ||
53 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
54 | FIX_OHCI1394_BASE, | ||
55 | #endif | ||
56 | /* | ||
57 | * 256 temporary boot-time mappings, used by early_ioremap(), | ||
58 | * before ioremap() is functional. | ||
59 | * | ||
60 | * We round it up to the next 256 pages boundary so that we | ||
61 | * can have a single pgd entry and a single pte table: | ||
62 | */ | ||
63 | #define NR_FIX_BTMAPS 64 | ||
64 | #define FIX_BTMAPS_SLOTS 4 | ||
65 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | ||
66 | (__end_of_permanent_fixed_addresses & 255), | ||
67 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | ||
68 | __end_of_fixed_addresses | ||
69 | }; | ||
70 | |||
71 | #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) | ||
72 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
73 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | ||
74 | |||
75 | /* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ | ||
76 | #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) | ||
77 | #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) | ||
78 | |||
79 | #endif /* _ASM_X86_FIXMAP_64_H */ | ||
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index c1f06289b14b..86af26091d6c 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
@@ -23,6 +23,9 @@ | |||
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | 25 | ||
26 | int | ||
27 | is_io_mapping_possible(resource_size_t base, unsigned long size); | ||
28 | |||
26 | void * | 29 | void * |
27 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
28 | 31 | ||
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index e9f5db796244..a37229011b56 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h | |||
@@ -4,8 +4,12 @@ | |||
4 | extern int pxm_to_nid(int pxm); | 4 | extern int pxm_to_nid(int pxm); |
5 | extern void numa_remove_cpu(int cpu); | 5 | extern void numa_remove_cpu(int cpu); |
6 | 6 | ||
7 | #ifdef CONFIG_NUMA | 7 | #ifdef CONFIG_HIGHMEM |
8 | extern void set_highmem_pages_init(void); | 8 | extern void set_highmem_pages_init(void); |
9 | #else | ||
10 | static inline void set_highmem_pages_init(void) | ||
11 | { | ||
12 | } | ||
9 | #endif | 13 | #endif |
10 | 14 | ||
11 | #endif /* _ASM_X86_NUMA_32_H */ | 15 | #endif /* _ASM_X86_NUMA_32_H */ |
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 9709fdff6615..b0e70056838e 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h | |||
@@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end, | |||
15 | unsigned long req_type, unsigned long *ret_type); | 15 | unsigned long req_type, unsigned long *ret_type); |
16 | extern int free_memtype(u64 start, u64 end); | 16 | extern int free_memtype(u64 start, u64 end); |
17 | 17 | ||
18 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, | ||
19 | unsigned long flag); | ||
20 | |||
18 | #endif /* _ASM_X86_PAT_H */ | 21 | #endif /* _ASM_X86_PAT_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c7a98f738210..76139506c3e4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -248,7 +248,6 @@ struct x86_hw_tss { | |||
248 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | 248 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) |
249 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | 249 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) |
250 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | 250 | #define INVALID_IO_BITMAP_OFFSET 0x8000 |
251 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | ||
252 | 251 | ||
253 | struct tss_struct { | 252 | struct tss_struct { |
254 | /* | 253 | /* |
@@ -263,11 +262,6 @@ struct tss_struct { | |||
263 | * be within the limit. | 262 | * be within the limit. |
264 | */ | 263 | */ |
265 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | 264 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
266 | /* | ||
267 | * Cache the current maximum and the last task that used the bitmap: | ||
268 | */ | ||
269 | unsigned long io_bitmap_max; | ||
270 | struct thread_struct *io_bitmap_owner; | ||
271 | 265 | ||
272 | /* | 266 | /* |
273 | * .. and then another 0x100 bytes for the emergency kernel stack: | 267 | * .. and then another 0x100 bytes for the emergency kernel stack: |
diff --git a/arch/x86/include/asm/seccomp_32.h b/arch/x86/include/asm/seccomp_32.h index a6ad87b352c4..b811d6f5780c 100644 --- a/arch/x86/include/asm/seccomp_32.h +++ b/arch/x86/include/asm/seccomp_32.h | |||
@@ -1,12 +1,6 @@ | |||
1 | #ifndef _ASM_X86_SECCOMP_32_H | 1 | #ifndef _ASM_X86_SECCOMP_32_H |
2 | #define _ASM_X86_SECCOMP_32_H | 2 | #define _ASM_X86_SECCOMP_32_H |
3 | 3 | ||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | #ifdef TIF_32BIT | ||
7 | #error "unexpected TIF_32BIT on i386" | ||
8 | #endif | ||
9 | |||
10 | #include <linux/unistd.h> | 4 | #include <linux/unistd.h> |
11 | 5 | ||
12 | #define __NR_seccomp_read __NR_read | 6 | #define __NR_seccomp_read __NR_read |
diff --git a/arch/x86/include/asm/seccomp_64.h b/arch/x86/include/asm/seccomp_64.h index 4171bb794e9e..84ec1bd161a5 100644 --- a/arch/x86/include/asm/seccomp_64.h +++ b/arch/x86/include/asm/seccomp_64.h | |||
@@ -1,14 +1,6 @@ | |||
1 | #ifndef _ASM_X86_SECCOMP_64_H | 1 | #ifndef _ASM_X86_SECCOMP_64_H |
2 | #define _ASM_X86_SECCOMP_64_H | 2 | #define _ASM_X86_SECCOMP_64_H |
3 | 3 | ||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | #ifdef TIF_32BIT | ||
7 | #error "unexpected TIF_32BIT on x86_64" | ||
8 | #else | ||
9 | #define TIF_32BIT TIF_IA32 | ||
10 | #endif | ||
11 | |||
12 | #include <linux/unistd.h> | 4 | #include <linux/unistd.h> |
13 | #include <asm/ia32_unistd.h> | 5 | #include <asm/ia32_unistd.h> |
14 | 6 | ||
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 66801cb72f69..05c6f6b11fd5 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -31,7 +31,6 @@ struct x86_quirks { | |||
31 | void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, | 31 | void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, |
32 | unsigned short oemsize); | 32 | unsigned short oemsize); |
33 | int (*setup_ioapic_ids)(void); | 33 | int (*setup_ioapic_ids)(void); |
34 | int (*update_apic)(void); | ||
35 | }; | 34 | }; |
36 | 35 | ||
37 | extern void x86_quirk_pre_intr_init(void); | 36 | extern void x86_quirk_pre_intr_init(void); |
@@ -65,7 +64,11 @@ extern void x86_quirk_time_init(void); | |||
65 | #include <asm/bootparam.h> | 64 | #include <asm/bootparam.h> |
66 | 65 | ||
67 | /* Interrupt control for vSMPowered x86_64 systems */ | 66 | /* Interrupt control for vSMPowered x86_64 systems */ |
67 | #ifdef CONFIG_X86_VSMP | ||
68 | void vsmp_init(void); | 68 | void vsmp_init(void); |
69 | #else | ||
70 | static inline void vsmp_init(void) { } | ||
71 | #endif | ||
69 | 72 | ||
70 | void setup_bios_corruption_check(void); | 73 | void setup_bios_corruption_check(void); |
71 | 74 | ||
@@ -77,8 +80,6 @@ static inline void visws_early_detect(void) { } | |||
77 | static inline int is_visws_box(void) { return 0; } | 80 | static inline int is_visws_box(void) { return 0; } |
78 | #endif | 81 | #endif |
79 | 82 | ||
80 | extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); | ||
81 | extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); | ||
82 | extern struct x86_quirks *x86_quirks; | 83 | extern struct x86_quirks *x86_quirks; |
83 | extern unsigned long saved_video_mode; | 84 | extern unsigned long saved_video_mode; |
84 | 85 | ||
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index c00bfdbdd456..643c59b4bc6e 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -20,6 +20,9 @@ | |||
20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | 20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
21 | struct task_struct *__switch_to(struct task_struct *prev, | 21 | struct task_struct *__switch_to(struct task_struct *prev, |
22 | struct task_struct *next); | 22 | struct task_struct *next); |
23 | struct tss_struct; | ||
24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
25 | struct tss_struct *tss); | ||
23 | 26 | ||
24 | #ifdef CONFIG_X86_32 | 27 | #ifdef CONFIG_X86_32 |
25 | 28 | ||
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 987a2c10fe20..8cc687326eb8 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -188,30 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | |||
188 | extern long __copy_user_nocache(void *dst, const void __user *src, | 188 | extern long __copy_user_nocache(void *dst, const void __user *src, |
189 | unsigned size, int zerorest); | 189 | unsigned size, int zerorest); |
190 | 190 | ||
191 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, | 191 | static inline int |
192 | unsigned size) | 192 | __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) |
193 | { | 193 | { |
194 | might_sleep(); | 194 | might_sleep(); |
195 | /* | 195 | return __copy_user_nocache(dst, src, size, 1); |
196 | * In practice this limit means that large file write()s | ||
197 | * which get chunked to 4K copies get handled via | ||
198 | * non-temporal stores here. Smaller writes get handled | ||
199 | * via regular __copy_from_user(): | ||
200 | */ | ||
201 | if (likely(size >= PAGE_SIZE)) | ||
202 | return __copy_user_nocache(dst, src, size, 1); | ||
203 | else | ||
204 | return __copy_from_user(dst, src, size); | ||
205 | } | 196 | } |
206 | 197 | ||
207 | static inline int __copy_from_user_inatomic_nocache(void *dst, | 198 | static inline int |
208 | const void __user *src, | 199 | __copy_from_user_inatomic_nocache(void *dst, const void __user *src, |
209 | unsigned size) | 200 | unsigned size) |
210 | { | 201 | { |
211 | if (likely(size >= PAGE_SIZE)) | 202 | return __copy_user_nocache(dst, src, size, 0); |
212 | return __copy_user_nocache(dst, src, size, 0); | ||
213 | else | ||
214 | return __copy_from_user_inatomic(dst, src, size); | ||
215 | } | 203 | } |
216 | 204 | ||
217 | unsigned long | 205 | unsigned long |
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index 8242bf965812..c0a01b5d985b 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h | |||
@@ -12,7 +12,6 @@ extern enum uv_system_type get_uv_system_type(void); | |||
12 | extern int is_uv_system(void); | 12 | extern int is_uv_system(void); |
13 | extern void uv_cpu_init(void); | 13 | extern void uv_cpu_init(void); |
14 | extern void uv_system_init(void); | 14 | extern void uv_system_init(void); |
15 | extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); | ||
16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 15 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
17 | struct mm_struct *mm, | 16 | struct mm_struct *mm, |
18 | unsigned long va, | 17 | unsigned long va, |
@@ -24,8 +23,6 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } | |||
24 | static inline int is_uv_system(void) { return 0; } | 23 | static inline int is_uv_system(void) { return 0; } |
25 | static inline void uv_cpu_init(void) { } | 24 | static inline void uv_cpu_init(void) { } |
26 | static inline void uv_system_init(void) { } | 25 | static inline void uv_system_init(void) { } |
27 | static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | ||
28 | { return 1; } | ||
29 | static inline const struct cpumask * | 26 | static inline const struct cpumask * |
30 | uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, | 27 | uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, |
31 | unsigned long va, unsigned int cpu) | 28 | unsigned long va, unsigned int cpu) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index de5657c039e9..95f216bbfaf1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -70,7 +70,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | |||
70 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 70 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
71 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 71 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
72 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 72 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
73 | obj-y += vsmp_64.o | 73 | obj-$(CONFIG_X86_VSMP) += vsmp_64.o |
74 | obj-$(CONFIG_KPROBES) += kprobes.o | 74 | obj-$(CONFIG_KPROBES) += kprobes.o |
75 | obj-$(CONFIG_MODULES) += module_$(BITS).o | 75 | obj-$(CONFIG_MODULES) += module_$(BITS).o |
76 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o | 76 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a84ac7b570e6..6907b8e85d52 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) | |||
498 | */ | 498 | */ |
499 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | 499 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) |
500 | { | 500 | { |
501 | unsigned long flags; | ||
502 | char *vaddr; | 501 | char *vaddr; |
503 | int nr_pages = 2; | 502 | int nr_pages = 2; |
504 | struct page *pages[2]; | 503 | struct page *pages[2]; |
505 | int i; | 504 | int i; |
506 | 505 | ||
506 | might_sleep(); | ||
507 | if (!core_kernel_text((unsigned long)addr)) { | 507 | if (!core_kernel_text((unsigned long)addr)) { |
508 | pages[0] = vmalloc_to_page(addr); | 508 | pages[0] = vmalloc_to_page(addr); |
509 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | 509 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
@@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | |||
517 | nr_pages = 1; | 517 | nr_pages = 1; |
518 | vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); | 518 | vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); |
519 | BUG_ON(!vaddr); | 519 | BUG_ON(!vaddr); |
520 | local_irq_save(flags); | 520 | local_irq_disable(); |
521 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); | 521 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
522 | local_irq_restore(flags); | 522 | local_irq_enable(); |
523 | vunmap(vaddr); | 523 | vunmap(vaddr); |
524 | sync_core(); | 524 | sync_core(); |
525 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 525 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 3b002995e145..f933822dba18 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -222,7 +222,6 @@ struct apic apic_flat = { | |||
222 | .send_IPI_all = flat_send_IPI_all, | 222 | .send_IPI_all = flat_send_IPI_all, |
223 | .send_IPI_self = apic_send_IPI_self, | 223 | .send_IPI_self = apic_send_IPI_self, |
224 | 224 | ||
225 | .wakeup_cpu = NULL, | ||
226 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 225 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
227 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 226 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
228 | .wait_for_init_deassert = NULL, | 227 | .wait_for_init_deassert = NULL, |
@@ -373,7 +372,6 @@ struct apic apic_physflat = { | |||
373 | .send_IPI_all = physflat_send_IPI_all, | 372 | .send_IPI_all = physflat_send_IPI_all, |
374 | .send_IPI_self = apic_send_IPI_self, | 373 | .send_IPI_self = apic_send_IPI_self, |
375 | 374 | ||
376 | .wakeup_cpu = NULL, | ||
377 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 375 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
378 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 376 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
379 | .wait_for_init_deassert = NULL, | 377 | .wait_for_init_deassert = NULL, |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 0b1093394fdf..d806ecaa948f 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -16,17 +16,17 @@ | |||
16 | #include <asm/apic.h> | 16 | #include <asm/apic.h> |
17 | #include <asm/ipi.h> | 17 | #include <asm/ipi.h> |
18 | 18 | ||
19 | static inline unsigned bigsmp_get_apic_id(unsigned long x) | 19 | static unsigned bigsmp_get_apic_id(unsigned long x) |
20 | { | 20 | { |
21 | return (x >> 24) & 0xFF; | 21 | return (x >> 24) & 0xFF; |
22 | } | 22 | } |
23 | 23 | ||
24 | static inline int bigsmp_apic_id_registered(void) | 24 | static int bigsmp_apic_id_registered(void) |
25 | { | 25 | { |
26 | return 1; | 26 | return 1; |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline const cpumask_t *bigsmp_target_cpus(void) | 29 | static const cpumask_t *bigsmp_target_cpus(void) |
30 | { | 30 | { |
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | return &cpu_online_map; | 32 | return &cpu_online_map; |
@@ -35,13 +35,12 @@ static inline const cpumask_t *bigsmp_target_cpus(void) | |||
35 | #endif | 35 | #endif |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline unsigned long | 38 | static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) |
39 | bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
40 | { | 39 | { |
41 | return 0; | 40 | return 0; |
42 | } | 41 | } |
43 | 42 | ||
44 | static inline unsigned long bigsmp_check_apicid_present(int bit) | 43 | static unsigned long bigsmp_check_apicid_present(int bit) |
45 | { | 44 | { |
46 | return 1; | 45 | return 1; |
47 | } | 46 | } |
@@ -64,7 +63,7 @@ static inline unsigned long calculate_ldr(int cpu) | |||
64 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | 63 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel |
65 | * document number 292116). So here it goes... | 64 | * document number 292116). So here it goes... |
66 | */ | 65 | */ |
67 | static inline void bigsmp_init_apic_ldr(void) | 66 | static void bigsmp_init_apic_ldr(void) |
68 | { | 67 | { |
69 | unsigned long val; | 68 | unsigned long val; |
70 | int cpu = smp_processor_id(); | 69 | int cpu = smp_processor_id(); |
@@ -74,19 +73,19 @@ static inline void bigsmp_init_apic_ldr(void) | |||
74 | apic_write(APIC_LDR, val); | 73 | apic_write(APIC_LDR, val); |
75 | } | 74 | } |
76 | 75 | ||
77 | static inline void bigsmp_setup_apic_routing(void) | 76 | static void bigsmp_setup_apic_routing(void) |
78 | { | 77 | { |
79 | printk(KERN_INFO | 78 | printk(KERN_INFO |
80 | "Enabling APIC mode: Physflat. Using %d I/O APICs\n", | 79 | "Enabling APIC mode: Physflat. Using %d I/O APICs\n", |
81 | nr_ioapics); | 80 | nr_ioapics); |
82 | } | 81 | } |
83 | 82 | ||
84 | static inline int bigsmp_apicid_to_node(int logical_apicid) | 83 | static int bigsmp_apicid_to_node(int logical_apicid) |
85 | { | 84 | { |
86 | return apicid_2_node[hard_smp_processor_id()]; | 85 | return apicid_2_node[hard_smp_processor_id()]; |
87 | } | 86 | } |
88 | 87 | ||
89 | static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) | 88 | static int bigsmp_cpu_present_to_apicid(int mps_cpu) |
90 | { | 89 | { |
91 | if (mps_cpu < nr_cpu_ids) | 90 | if (mps_cpu < nr_cpu_ids) |
92 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 91 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
@@ -94,7 +93,7 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) | |||
94 | return BAD_APICID; | 93 | return BAD_APICID; |
95 | } | 94 | } |
96 | 95 | ||
97 | static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) | 96 | static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) |
98 | { | 97 | { |
99 | return physid_mask_of_physid(phys_apicid); | 98 | return physid_mask_of_physid(phys_apicid); |
100 | } | 99 | } |
@@ -107,29 +106,24 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu) | |||
107 | return cpu_physical_id(cpu); | 106 | return cpu_physical_id(cpu); |
108 | } | 107 | } |
109 | 108 | ||
110 | static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) | 109 | static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) |
111 | { | 110 | { |
112 | /* For clustered we don't have a good way to do this yet - hack */ | 111 | /* For clustered we don't have a good way to do this yet - hack */ |
113 | return physids_promote(0xFFL); | 112 | return physids_promote(0xFFL); |
114 | } | 113 | } |
115 | 114 | ||
116 | static inline void bigsmp_setup_portio_remap(void) | 115 | static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) |
117 | { | ||
118 | } | ||
119 | |||
120 | static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
121 | { | 116 | { |
122 | return 1; | 117 | return 1; |
123 | } | 118 | } |
124 | 119 | ||
125 | /* As we are using single CPU as destination, pick only one CPU here */ | 120 | /* As we are using single CPU as destination, pick only one CPU here */ |
126 | static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) | 121 | static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) |
127 | { | 122 | { |
128 | return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); | 123 | return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); |
129 | } | 124 | } |
130 | 125 | ||
131 | static inline unsigned int | 126 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
132 | bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
133 | const struct cpumask *andmask) | 127 | const struct cpumask *andmask) |
134 | { | 128 | { |
135 | int cpu; | 129 | int cpu; |
@@ -148,7 +142,7 @@ bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
148 | return BAD_APICID; | 142 | return BAD_APICID; |
149 | } | 143 | } |
150 | 144 | ||
151 | static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) | 145 | static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) |
152 | { | 146 | { |
153 | return cpuid_apic >> index_msb; | 147 | return cpuid_apic >> index_msb; |
154 | } | 148 | } |
@@ -158,12 +152,12 @@ static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) | |||
158 | default_send_IPI_mask_sequence_phys(mask, vector); | 152 | default_send_IPI_mask_sequence_phys(mask, vector); |
159 | } | 153 | } |
160 | 154 | ||
161 | static inline void bigsmp_send_IPI_allbutself(int vector) | 155 | static void bigsmp_send_IPI_allbutself(int vector) |
162 | { | 156 | { |
163 | default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); | 157 | default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); |
164 | } | 158 | } |
165 | 159 | ||
166 | static inline void bigsmp_send_IPI_all(int vector) | 160 | static void bigsmp_send_IPI_all(int vector) |
167 | { | 161 | { |
168 | bigsmp_send_IPI_mask(cpu_online_mask, vector); | 162 | bigsmp_send_IPI_mask(cpu_online_mask, vector); |
169 | } | 163 | } |
@@ -256,7 +250,6 @@ struct apic apic_bigsmp = { | |||
256 | .send_IPI_all = bigsmp_send_IPI_all, | 250 | .send_IPI_all = bigsmp_send_IPI_all, |
257 | .send_IPI_self = default_send_IPI_self, | 251 | .send_IPI_self = default_send_IPI_self, |
258 | 252 | ||
259 | .wakeup_cpu = NULL, | ||
260 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 253 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
261 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 254 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
262 | 255 | ||
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 320f2d2e4e54..19588f2770ee 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -163,22 +163,17 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | |||
163 | return 0; | 163 | return 0; |
164 | } | 164 | } |
165 | 165 | ||
166 | static int __init es7000_update_apic(void) | 166 | static int es7000_apic_is_cluster(void) |
167 | { | 167 | { |
168 | apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; | ||
169 | |||
170 | /* MPENTIUMIII */ | 168 | /* MPENTIUMIII */ |
171 | if (boot_cpu_data.x86 == 6 && | 169 | if (boot_cpu_data.x86 == 6 && |
172 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { | 170 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) |
173 | es7000_update_apic_to_cluster(); | 171 | return 1; |
174 | apic->wait_for_init_deassert = NULL; | ||
175 | apic->wakeup_cpu = wakeup_secondary_cpu_via_mip; | ||
176 | } | ||
177 | 172 | ||
178 | return 0; | 173 | return 0; |
179 | } | 174 | } |
180 | 175 | ||
181 | static void __init setup_unisys(void) | 176 | static void setup_unisys(void) |
182 | { | 177 | { |
183 | /* | 178 | /* |
184 | * Determine the generation of the ES7000 currently running. | 179 | * Determine the generation of the ES7000 currently running. |
@@ -192,14 +187,12 @@ static void __init setup_unisys(void) | |||
192 | else | 187 | else |
193 | es7000_plat = ES7000_CLASSIC; | 188 | es7000_plat = ES7000_CLASSIC; |
194 | ioapic_renumber_irq = es7000_rename_gsi; | 189 | ioapic_renumber_irq = es7000_rename_gsi; |
195 | |||
196 | x86_quirks->update_apic = es7000_update_apic; | ||
197 | } | 190 | } |
198 | 191 | ||
199 | /* | 192 | /* |
200 | * Parse the OEM Table: | 193 | * Parse the OEM Table: |
201 | */ | 194 | */ |
202 | static int __init parse_unisys_oem(char *oemptr) | 195 | static int parse_unisys_oem(char *oemptr) |
203 | { | 196 | { |
204 | int i; | 197 | int i; |
205 | int success = 0; | 198 | int success = 0; |
@@ -261,7 +254,7 @@ static int __init parse_unisys_oem(char *oemptr) | |||
261 | } | 254 | } |
262 | 255 | ||
263 | #ifdef CONFIG_ACPI | 256 | #ifdef CONFIG_ACPI |
264 | static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) | 257 | static int find_unisys_acpi_oem_table(unsigned long *oem_addr) |
265 | { | 258 | { |
266 | struct acpi_table_header *header = NULL; | 259 | struct acpi_table_header *header = NULL; |
267 | struct es7000_oem_table *table; | 260 | struct es7000_oem_table *table; |
@@ -292,7 +285,7 @@ static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) | |||
292 | return 0; | 285 | return 0; |
293 | } | 286 | } |
294 | 287 | ||
295 | static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) | 288 | static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) |
296 | { | 289 | { |
297 | if (!oem_addr) | 290 | if (!oem_addr) |
298 | return; | 291 | return; |
@@ -310,8 +303,10 @@ static int es7000_check_dsdt(void) | |||
310 | return 0; | 303 | return 0; |
311 | } | 304 | } |
312 | 305 | ||
306 | static int es7000_acpi_ret; | ||
307 | |||
313 | /* Hook from generic ACPI tables.c */ | 308 | /* Hook from generic ACPI tables.c */ |
314 | static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 309 | static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
315 | { | 310 | { |
316 | unsigned long oem_addr = 0; | 311 | unsigned long oem_addr = 0; |
317 | int check_dsdt; | 312 | int check_dsdt; |
@@ -332,10 +327,26 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
332 | */ | 327 | */ |
333 | unmap_unisys_acpi_oem_table(oem_addr); | 328 | unmap_unisys_acpi_oem_table(oem_addr); |
334 | } | 329 | } |
335 | return ret; | 330 | |
331 | es7000_acpi_ret = ret; | ||
332 | |||
333 | return ret && !es7000_apic_is_cluster(); | ||
336 | } | 334 | } |
335 | |||
336 | static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) | ||
337 | { | ||
338 | int ret = es7000_acpi_ret; | ||
339 | |||
340 | return ret && es7000_apic_is_cluster(); | ||
341 | } | ||
342 | |||
337 | #else /* !CONFIG_ACPI: */ | 343 | #else /* !CONFIG_ACPI: */ |
338 | static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 344 | static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
345 | { | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id) | ||
339 | { | 350 | { |
340 | return 0; | 351 | return 0; |
341 | } | 352 | } |
@@ -349,8 +360,7 @@ static void es7000_spin(int n) | |||
349 | rep_nop(); | 360 | rep_nop(); |
350 | } | 361 | } |
351 | 362 | ||
352 | static int __init | 363 | static int es7000_mip_write(struct mip_reg *mip_reg) |
353 | es7000_mip_write(struct mip_reg *mip_reg) | ||
354 | { | 364 | { |
355 | int status = 0; | 365 | int status = 0; |
356 | int spin; | 366 | int spin; |
@@ -383,7 +393,7 @@ es7000_mip_write(struct mip_reg *mip_reg) | |||
383 | return status; | 393 | return status; |
384 | } | 394 | } |
385 | 395 | ||
386 | static void __init es7000_enable_apic_mode(void) | 396 | static void es7000_enable_apic_mode(void) |
387 | { | 397 | { |
388 | struct mip_reg es7000_mip_reg; | 398 | struct mip_reg es7000_mip_reg; |
389 | int mip_status; | 399 | int mip_status; |
@@ -416,11 +426,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
416 | 426 | ||
417 | static void es7000_wait_for_init_deassert(atomic_t *deassert) | 427 | static void es7000_wait_for_init_deassert(atomic_t *deassert) |
418 | { | 428 | { |
419 | #ifndef CONFIG_ES7000_CLUSTERED_APIC | ||
420 | while (!atomic_read(deassert)) | 429 | while (!atomic_read(deassert)) |
421 | cpu_relax(); | 430 | cpu_relax(); |
422 | #endif | ||
423 | return; | ||
424 | } | 431 | } |
425 | 432 | ||
426 | static unsigned int es7000_get_apic_id(unsigned long x) | 433 | static unsigned int es7000_get_apic_id(unsigned long x) |
@@ -565,72 +572,24 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) | |||
565 | return 1; | 572 | return 1; |
566 | } | 573 | } |
567 | 574 | ||
568 | static unsigned int | ||
569 | es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | ||
570 | { | ||
571 | int cpus_found = 0; | ||
572 | int num_bits_set; | ||
573 | int apicid; | ||
574 | int cpu; | ||
575 | |||
576 | num_bits_set = cpumask_weight(cpumask); | ||
577 | /* Return id to all */ | ||
578 | if (num_bits_set == nr_cpu_ids) | ||
579 | return 0xFF; | ||
580 | /* | ||
581 | * The cpus in the mask must all be on the apic cluster. If are not | ||
582 | * on the same apicid cluster return default value of target_cpus(): | ||
583 | */ | ||
584 | cpu = cpumask_first(cpumask); | ||
585 | apicid = es7000_cpu_to_logical_apicid(cpu); | ||
586 | |||
587 | while (cpus_found < num_bits_set) { | ||
588 | if (cpumask_test_cpu(cpu, cpumask)) { | ||
589 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); | ||
590 | |||
591 | if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | ||
592 | WARN(1, "Not a valid mask!"); | ||
593 | |||
594 | return 0xFF; | ||
595 | } | ||
596 | apicid = new_apicid; | ||
597 | cpus_found++; | ||
598 | } | ||
599 | cpu++; | ||
600 | } | ||
601 | return apicid; | ||
602 | } | ||
603 | |||
604 | static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) | 575 | static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) |
605 | { | 576 | { |
606 | int cpus_found = 0; | 577 | unsigned int round = 0; |
607 | int num_bits_set; | 578 | int cpu, uninitialized_var(apicid); |
608 | int apicid; | ||
609 | int cpu; | ||
610 | 579 | ||
611 | num_bits_set = cpus_weight(*cpumask); | ||
612 | /* Return id to all */ | ||
613 | if (num_bits_set == nr_cpu_ids) | ||
614 | return es7000_cpu_to_logical_apicid(0); | ||
615 | /* | 580 | /* |
616 | * The cpus in the mask must all be on the apic cluster. If are not | 581 | * The cpus in the mask must all be on the apic cluster. |
617 | * on the same apicid cluster return default value of target_cpus(): | ||
618 | */ | 582 | */ |
619 | cpu = first_cpu(*cpumask); | 583 | for_each_cpu(cpu, cpumask) { |
620 | apicid = es7000_cpu_to_logical_apicid(cpu); | 584 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); |
621 | while (cpus_found < num_bits_set) { | ||
622 | if (cpu_isset(cpu, *cpumask)) { | ||
623 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); | ||
624 | 585 | ||
625 | if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 586 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
626 | printk("%s: Not a valid mask!\n", __func__); | 587 | WARN(1, "Not a valid mask!"); |
627 | 588 | ||
628 | return es7000_cpu_to_logical_apicid(0); | 589 | return BAD_APICID; |
629 | } | ||
630 | apicid = new_apicid; | ||
631 | cpus_found++; | ||
632 | } | 590 | } |
633 | cpu++; | 591 | apicid = new_apicid; |
592 | round++; | ||
634 | } | 593 | } |
635 | return apicid; | 594 | return apicid; |
636 | } | 595 | } |
@@ -659,37 +618,103 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) | |||
659 | return cpuid_apic >> index_msb; | 618 | return cpuid_apic >> index_msb; |
660 | } | 619 | } |
661 | 620 | ||
662 | void __init es7000_update_apic_to_cluster(void) | ||
663 | { | ||
664 | apic->target_cpus = target_cpus_cluster; | ||
665 | apic->irq_delivery_mode = dest_LowestPrio; | ||
666 | /* logical delivery broadcast to all procs: */ | ||
667 | apic->irq_dest_mode = 1; | ||
668 | |||
669 | apic->init_apic_ldr = es7000_init_apic_ldr_cluster; | ||
670 | |||
671 | apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster; | ||
672 | } | ||
673 | |||
674 | static int probe_es7000(void) | 621 | static int probe_es7000(void) |
675 | { | 622 | { |
676 | /* probed later in mptable/ACPI hooks */ | 623 | /* probed later in mptable/ACPI hooks */ |
677 | return 0; | 624 | return 0; |
678 | } | 625 | } |
679 | 626 | ||
680 | static __init int | 627 | static int es7000_mps_ret; |
681 | es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) | 628 | static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem, |
629 | char *productid) | ||
682 | { | 630 | { |
631 | int ret = 0; | ||
632 | |||
683 | if (mpc->oemptr) { | 633 | if (mpc->oemptr) { |
684 | struct mpc_oemtable *oem_table = | 634 | struct mpc_oemtable *oem_table = |
685 | (struct mpc_oemtable *)mpc->oemptr; | 635 | (struct mpc_oemtable *)mpc->oemptr; |
686 | 636 | ||
687 | if (!strncmp(oem, "UNISYS", 6)) | 637 | if (!strncmp(oem, "UNISYS", 6)) |
688 | return parse_unisys_oem((char *)oem_table); | 638 | ret = parse_unisys_oem((char *)oem_table); |
689 | } | 639 | } |
690 | return 0; | 640 | |
641 | es7000_mps_ret = ret; | ||
642 | |||
643 | return ret && !es7000_apic_is_cluster(); | ||
691 | } | 644 | } |
692 | 645 | ||
646 | static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, | ||
647 | char *productid) | ||
648 | { | ||
649 | int ret = es7000_mps_ret; | ||
650 | |||
651 | return ret && es7000_apic_is_cluster(); | ||
652 | } | ||
653 | |||
654 | struct apic apic_es7000_cluster = { | ||
655 | |||
656 | .name = "es7000", | ||
657 | .probe = probe_es7000, | ||
658 | .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster, | ||
659 | .apic_id_registered = es7000_apic_id_registered, | ||
660 | |||
661 | .irq_delivery_mode = dest_LowestPrio, | ||
662 | /* logical delivery broadcast to all procs: */ | ||
663 | .irq_dest_mode = 1, | ||
664 | |||
665 | .target_cpus = target_cpus_cluster, | ||
666 | .disable_esr = 1, | ||
667 | .dest_logical = 0, | ||
668 | .check_apicid_used = es7000_check_apicid_used, | ||
669 | .check_apicid_present = es7000_check_apicid_present, | ||
670 | |||
671 | .vector_allocation_domain = es7000_vector_allocation_domain, | ||
672 | .init_apic_ldr = es7000_init_apic_ldr_cluster, | ||
673 | |||
674 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, | ||
675 | .setup_apic_routing = es7000_setup_apic_routing, | ||
676 | .multi_timer_check = NULL, | ||
677 | .apicid_to_node = es7000_apicid_to_node, | ||
678 | .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, | ||
679 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, | ||
680 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, | ||
681 | .setup_portio_remap = NULL, | ||
682 | .check_phys_apicid_present = es7000_check_phys_apicid_present, | ||
683 | .enable_apic_mode = es7000_enable_apic_mode, | ||
684 | .phys_pkg_id = es7000_phys_pkg_id, | ||
685 | .mps_oem_check = es7000_mps_oem_check_cluster, | ||
686 | |||
687 | .get_apic_id = es7000_get_apic_id, | ||
688 | .set_apic_id = NULL, | ||
689 | .apic_id_mask = 0xFF << 24, | ||
690 | |||
691 | .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, | ||
692 | .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, | ||
693 | |||
694 | .send_IPI_mask = es7000_send_IPI_mask, | ||
695 | .send_IPI_mask_allbutself = NULL, | ||
696 | .send_IPI_allbutself = es7000_send_IPI_allbutself, | ||
697 | .send_IPI_all = es7000_send_IPI_all, | ||
698 | .send_IPI_self = default_send_IPI_self, | ||
699 | |||
700 | .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip, | ||
701 | |||
702 | .trampoline_phys_low = 0x467, | ||
703 | .trampoline_phys_high = 0x469, | ||
704 | |||
705 | .wait_for_init_deassert = NULL, | ||
706 | |||
707 | /* Nothing to do for most platforms, since cleared by the INIT cycle: */ | ||
708 | .smp_callin_clear_local_apic = NULL, | ||
709 | .inquire_remote_apic = default_inquire_remote_apic, | ||
710 | |||
711 | .read = native_apic_mem_read, | ||
712 | .write = native_apic_mem_write, | ||
713 | .icr_read = native_apic_icr_read, | ||
714 | .icr_write = native_apic_icr_write, | ||
715 | .wait_icr_idle = native_apic_wait_icr_idle, | ||
716 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | ||
717 | }; | ||
693 | 718 | ||
694 | struct apic apic_es7000 = { | 719 | struct apic apic_es7000 = { |
695 | 720 | ||
@@ -737,8 +762,6 @@ struct apic apic_es7000 = { | |||
737 | .send_IPI_all = es7000_send_IPI_all, | 762 | .send_IPI_all = es7000_send_IPI_all, |
738 | .send_IPI_self = default_send_IPI_self, | 763 | .send_IPI_self = default_send_IPI_self, |
739 | 764 | ||
740 | .wakeup_cpu = NULL, | ||
741 | |||
742 | .trampoline_phys_low = 0x467, | 765 | .trampoline_phys_low = 0x467, |
743 | .trampoline_phys_high = 0x469, | 766 | .trampoline_phys_high = 0x469, |
744 | 767 | ||
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index d9d6d61eed82..ba2fc6465534 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -69,7 +69,7 @@ struct mpc_trans { | |||
69 | /* x86_quirks member */ | 69 | /* x86_quirks member */ |
70 | static int mpc_record; | 70 | static int mpc_record; |
71 | 71 | ||
72 | static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; | 72 | static struct mpc_trans *translation_table[MAX_MPC_ENTRY]; |
73 | 73 | ||
74 | int mp_bus_id_to_node[MAX_MP_BUSSES]; | 74 | int mp_bus_id_to_node[MAX_MP_BUSSES]; |
75 | int mp_bus_id_to_local[MAX_MP_BUSSES]; | 75 | int mp_bus_id_to_local[MAX_MP_BUSSES]; |
@@ -256,13 +256,6 @@ static int __init numaq_setup_ioapic_ids(void) | |||
256 | return 1; | 256 | return 1; |
257 | } | 257 | } |
258 | 258 | ||
259 | static int __init numaq_update_apic(void) | ||
260 | { | ||
261 | apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi; | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static struct x86_quirks numaq_x86_quirks __initdata = { | 259 | static struct x86_quirks numaq_x86_quirks __initdata = { |
267 | .arch_pre_time_init = numaq_pre_time_init, | 260 | .arch_pre_time_init = numaq_pre_time_init, |
268 | .arch_time_init = NULL, | 261 | .arch_time_init = NULL, |
@@ -278,7 +271,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = { | |||
278 | .mpc_oem_pci_bus = mpc_oem_pci_bus, | 271 | .mpc_oem_pci_bus = mpc_oem_pci_bus, |
279 | .smp_read_mpc_oem = smp_read_mpc_oem, | 272 | .smp_read_mpc_oem = smp_read_mpc_oem, |
280 | .setup_ioapic_ids = numaq_setup_ioapic_ids, | 273 | .setup_ioapic_ids = numaq_setup_ioapic_ids, |
281 | .update_apic = numaq_update_apic, | ||
282 | }; | 274 | }; |
283 | 275 | ||
284 | static __init void early_check_numaq(void) | 276 | static __init void early_check_numaq(void) |
@@ -546,7 +538,7 @@ struct apic apic_numaq = { | |||
546 | .send_IPI_all = numaq_send_IPI_all, | 538 | .send_IPI_all = numaq_send_IPI_all, |
547 | .send_IPI_self = default_send_IPI_self, | 539 | .send_IPI_self = default_send_IPI_self, |
548 | 540 | ||
549 | .wakeup_cpu = NULL, | 541 | .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi, |
550 | .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, | 542 | .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, |
551 | .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, | 543 | .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, |
552 | 544 | ||
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 3a730fa574bb..141c99a1c264 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -138,7 +138,6 @@ struct apic apic_default = { | |||
138 | .send_IPI_all = default_send_IPI_all, | 138 | .send_IPI_all = default_send_IPI_all, |
139 | .send_IPI_self = default_send_IPI_self, | 139 | .send_IPI_self = default_send_IPI_self, |
140 | 140 | ||
141 | .wakeup_cpu = NULL, | ||
142 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 141 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
143 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 142 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
144 | 143 | ||
@@ -159,6 +158,7 @@ extern struct apic apic_numaq; | |||
159 | extern struct apic apic_summit; | 158 | extern struct apic apic_summit; |
160 | extern struct apic apic_bigsmp; | 159 | extern struct apic apic_bigsmp; |
161 | extern struct apic apic_es7000; | 160 | extern struct apic apic_es7000; |
161 | extern struct apic apic_es7000_cluster; | ||
162 | extern struct apic apic_default; | 162 | extern struct apic apic_default; |
163 | 163 | ||
164 | struct apic *apic = &apic_default; | 164 | struct apic *apic = &apic_default; |
@@ -176,6 +176,7 @@ static struct apic *apic_probe[] __initdata = { | |||
176 | #endif | 176 | #endif |
177 | #ifdef CONFIG_X86_ES7000 | 177 | #ifdef CONFIG_X86_ES7000 |
178 | &apic_es7000, | 178 | &apic_es7000, |
179 | &apic_es7000_cluster, | ||
179 | #endif | 180 | #endif |
180 | &apic_default, /* must be last */ | 181 | &apic_default, /* must be last */ |
181 | NULL, | 182 | NULL, |
@@ -197,9 +198,6 @@ static int __init parse_apic(char *arg) | |||
197 | } | 198 | } |
198 | } | 199 | } |
199 | 200 | ||
200 | if (x86_quirks->update_apic) | ||
201 | x86_quirks->update_apic(); | ||
202 | |||
203 | /* Parsed again by __setup for debug/verbose */ | 201 | /* Parsed again by __setup for debug/verbose */ |
204 | return 0; | 202 | return 0; |
205 | } | 203 | } |
@@ -218,8 +216,6 @@ void __init generic_bigsmp_probe(void) | |||
218 | if (!cmdline_apic && apic == &apic_default) { | 216 | if (!cmdline_apic && apic == &apic_default) { |
219 | if (apic_bigsmp.probe()) { | 217 | if (apic_bigsmp.probe()) { |
220 | apic = &apic_bigsmp; | 218 | apic = &apic_bigsmp; |
221 | if (x86_quirks->update_apic) | ||
222 | x86_quirks->update_apic(); | ||
223 | printk(KERN_INFO "Overriding APIC driver with %s\n", | 219 | printk(KERN_INFO "Overriding APIC driver with %s\n", |
224 | apic->name); | 220 | apic->name); |
225 | } | 221 | } |
@@ -240,9 +236,6 @@ void __init generic_apic_probe(void) | |||
240 | /* Not visible without early console */ | 236 | /* Not visible without early console */ |
241 | if (!apic_probe[i]) | 237 | if (!apic_probe[i]) |
242 | panic("Didn't find an APIC driver"); | 238 | panic("Didn't find an APIC driver"); |
243 | |||
244 | if (x86_quirks->update_apic) | ||
245 | x86_quirks->update_apic(); | ||
246 | } | 239 | } |
247 | printk(KERN_INFO "Using APIC driver %s\n", apic->name); | 240 | printk(KERN_INFO "Using APIC driver %s\n", apic->name); |
248 | } | 241 | } |
@@ -262,8 +255,6 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) | |||
262 | 255 | ||
263 | if (!cmdline_apic) { | 256 | if (!cmdline_apic) { |
264 | apic = apic_probe[i]; | 257 | apic = apic_probe[i]; |
265 | if (x86_quirks->update_apic) | ||
266 | x86_quirks->update_apic(); | ||
267 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 258 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
268 | apic->name); | 259 | apic->name); |
269 | } | 260 | } |
@@ -284,8 +275,6 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
284 | 275 | ||
285 | if (!cmdline_apic) { | 276 | if (!cmdline_apic) { |
286 | apic = apic_probe[i]; | 277 | apic = apic_probe[i]; |
287 | if (x86_quirks->update_apic) | ||
288 | x86_quirks->update_apic(); | ||
289 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 278 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
290 | apic->name); | 279 | apic->name); |
291 | } | 280 | } |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index e7c163661c77..8d7748efe6a8 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -68,9 +68,6 @@ void __init default_setup_apic_routing(void) | |||
68 | apic = &apic_physflat; | 68 | apic = &apic_physflat; |
69 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 69 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
70 | } | 70 | } |
71 | |||
72 | if (x86_quirks->update_apic) | ||
73 | x86_quirks->update_apic(); | ||
74 | } | 71 | } |
75 | 72 | ||
76 | /* Same for both flat and physical. */ | 73 | /* Same for both flat and physical. */ |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index cfe7b09015d8..aac52fa873ff 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include <linux/gfp.h> | 48 | #include <linux/gfp.h> |
49 | #include <linux/smp.h> | 49 | #include <linux/smp.h> |
50 | 50 | ||
51 | static inline unsigned summit_get_apic_id(unsigned long x) | 51 | static unsigned summit_get_apic_id(unsigned long x) |
52 | { | 52 | { |
53 | return (x >> 24) & 0xFF; | 53 | return (x >> 24) & 0xFF; |
54 | } | 54 | } |
@@ -58,7 +58,7 @@ static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) | |||
58 | default_send_IPI_mask_sequence_logical(mask, vector); | 58 | default_send_IPI_mask_sequence_logical(mask, vector); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void summit_send_IPI_allbutself(int vector) | 61 | static void summit_send_IPI_allbutself(int vector) |
62 | { | 62 | { |
63 | cpumask_t mask = cpu_online_map; | 63 | cpumask_t mask = cpu_online_map; |
64 | cpu_clear(smp_processor_id(), mask); | 64 | cpu_clear(smp_processor_id(), mask); |
@@ -67,7 +67,7 @@ static inline void summit_send_IPI_allbutself(int vector) | |||
67 | summit_send_IPI_mask(&mask, vector); | 67 | summit_send_IPI_mask(&mask, vector); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline void summit_send_IPI_all(int vector) | 70 | static void summit_send_IPI_all(int vector) |
71 | { | 71 | { |
72 | summit_send_IPI_mask(&cpu_online_map, vector); | 72 | summit_send_IPI_mask(&cpu_online_map, vector); |
73 | } | 73 | } |
@@ -77,13 +77,13 @@ static inline void summit_send_IPI_all(int vector) | |||
77 | extern int use_cyclone; | 77 | extern int use_cyclone; |
78 | 78 | ||
79 | #ifdef CONFIG_X86_SUMMIT_NUMA | 79 | #ifdef CONFIG_X86_SUMMIT_NUMA |
80 | extern void setup_summit(void); | 80 | static void setup_summit(void); |
81 | #else | 81 | #else |
82 | #define setup_summit() {} | 82 | static inline void setup_summit(void) {} |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | static inline int | 85 | static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, |
86 | summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) | 86 | char *productid) |
87 | { | 87 | { |
88 | if (!strncmp(oem, "IBM ENSW", 8) && | 88 | if (!strncmp(oem, "IBM ENSW", 8) && |
89 | (!strncmp(productid, "VIGIL SMP", 9) | 89 | (!strncmp(productid, "VIGIL SMP", 9) |
@@ -98,7 +98,7 @@ summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | /* Hook from generic ACPI tables.c */ | 100 | /* Hook from generic ACPI tables.c */ |
101 | static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 101 | static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
102 | { | 102 | { |
103 | if (!strncmp(oem_id, "IBM", 3) && | 103 | if (!strncmp(oem_id, "IBM", 3) && |
104 | (!strncmp(oem_table_id, "SERVIGIL", 8) | 104 | (!strncmp(oem_table_id, "SERVIGIL", 8) |
@@ -186,7 +186,7 @@ static inline int is_WPEG(struct rio_detail *rio){ | |||
186 | 186 | ||
187 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 187 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
188 | 188 | ||
189 | static inline const cpumask_t *summit_target_cpus(void) | 189 | static const cpumask_t *summit_target_cpus(void) |
190 | { | 190 | { |
191 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 191 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
192 | * dest_LowestPrio mode logical clustered apic interrupt routing | 192 | * dest_LowestPrio mode logical clustered apic interrupt routing |
@@ -195,19 +195,18 @@ static inline const cpumask_t *summit_target_cpus(void) | |||
195 | return &cpumask_of_cpu(0); | 195 | return &cpumask_of_cpu(0); |
196 | } | 196 | } |
197 | 197 | ||
198 | static inline unsigned long | 198 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) |
199 | summit_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
200 | { | 199 | { |
201 | return 0; | 200 | return 0; |
202 | } | 201 | } |
203 | 202 | ||
204 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | 203 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ |
205 | static inline unsigned long summit_check_apicid_present(int bit) | 204 | static unsigned long summit_check_apicid_present(int bit) |
206 | { | 205 | { |
207 | return 1; | 206 | return 1; |
208 | } | 207 | } |
209 | 208 | ||
210 | static inline void summit_init_apic_ldr(void) | 209 | static void summit_init_apic_ldr(void) |
211 | { | 210 | { |
212 | unsigned long val, id; | 211 | unsigned long val, id; |
213 | int count = 0; | 212 | int count = 0; |
@@ -234,18 +233,18 @@ static inline void summit_init_apic_ldr(void) | |||
234 | apic_write(APIC_LDR, val); | 233 | apic_write(APIC_LDR, val); |
235 | } | 234 | } |
236 | 235 | ||
237 | static inline int summit_apic_id_registered(void) | 236 | static int summit_apic_id_registered(void) |
238 | { | 237 | { |
239 | return 1; | 238 | return 1; |
240 | } | 239 | } |
241 | 240 | ||
242 | static inline void summit_setup_apic_routing(void) | 241 | static void summit_setup_apic_routing(void) |
243 | { | 242 | { |
244 | printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", | 243 | printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", |
245 | nr_ioapics); | 244 | nr_ioapics); |
246 | } | 245 | } |
247 | 246 | ||
248 | static inline int summit_apicid_to_node(int logical_apicid) | 247 | static int summit_apicid_to_node(int logical_apicid) |
249 | { | 248 | { |
250 | #ifdef CONFIG_SMP | 249 | #ifdef CONFIG_SMP |
251 | return apicid_2_node[hard_smp_processor_id()]; | 250 | return apicid_2_node[hard_smp_processor_id()]; |
@@ -266,7 +265,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu) | |||
266 | #endif | 265 | #endif |
267 | } | 266 | } |
268 | 267 | ||
269 | static inline int summit_cpu_present_to_apicid(int mps_cpu) | 268 | static int summit_cpu_present_to_apicid(int mps_cpu) |
270 | { | 269 | { |
271 | if (mps_cpu < nr_cpu_ids) | 270 | if (mps_cpu < nr_cpu_ids) |
272 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 271 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
@@ -274,64 +273,44 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu) | |||
274 | return BAD_APICID; | 273 | return BAD_APICID; |
275 | } | 274 | } |
276 | 275 | ||
277 | static inline physid_mask_t | 276 | static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map) |
278 | summit_ioapic_phys_id_map(physid_mask_t phys_id_map) | ||
279 | { | 277 | { |
280 | /* For clustered we don't have a good way to do this yet - hack */ | 278 | /* For clustered we don't have a good way to do this yet - hack */ |
281 | return physids_promote(0x0F); | 279 | return physids_promote(0x0F); |
282 | } | 280 | } |
283 | 281 | ||
284 | static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) | 282 | static physid_mask_t summit_apicid_to_cpu_present(int apicid) |
285 | { | 283 | { |
286 | return physid_mask_of_physid(0); | 284 | return physid_mask_of_physid(0); |
287 | } | 285 | } |
288 | 286 | ||
289 | static inline void summit_setup_portio_remap(void) | 287 | static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) |
290 | { | ||
291 | } | ||
292 | |||
293 | static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
294 | { | 288 | { |
295 | return 1; | 289 | return 1; |
296 | } | 290 | } |
297 | 291 | ||
298 | static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) | 292 | static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) |
299 | { | 293 | { |
300 | int cpus_found = 0; | 294 | unsigned int round = 0; |
301 | int num_bits_set; | 295 | int cpu, apicid = 0; |
302 | int apicid; | 296 | |
303 | int cpu; | ||
304 | |||
305 | num_bits_set = cpus_weight(*cpumask); | ||
306 | /* Return id to all */ | ||
307 | if (num_bits_set >= nr_cpu_ids) | ||
308 | return 0xFF; | ||
309 | /* | 297 | /* |
310 | * The cpus in the mask must all be on the apic cluster. If are not | 298 | * The cpus in the mask must all be on the apic cluster. |
311 | * on the same apicid cluster return default value of target_cpus(): | ||
312 | */ | 299 | */ |
313 | cpu = first_cpu(*cpumask); | 300 | for_each_cpu(cpu, cpumask) { |
314 | apicid = summit_cpu_to_logical_apicid(cpu); | 301 | int new_apicid = summit_cpu_to_logical_apicid(cpu); |
315 | 302 | ||
316 | while (cpus_found < num_bits_set) { | 303 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
317 | if (cpu_isset(cpu, *cpumask)) { | 304 | printk("%s: Not a valid mask!\n", __func__); |
318 | int new_apicid = summit_cpu_to_logical_apicid(cpu); | 305 | return BAD_APICID; |
319 | |||
320 | if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | ||
321 | printk ("%s: Not a valid mask!\n", __func__); | ||
322 | |||
323 | return 0xFF; | ||
324 | } | ||
325 | apicid = apicid | new_apicid; | ||
326 | cpus_found++; | ||
327 | } | 306 | } |
328 | cpu++; | 307 | apicid |= new_apicid; |
308 | round++; | ||
329 | } | 309 | } |
330 | return apicid; | 310 | return apicid; |
331 | } | 311 | } |
332 | 312 | ||
333 | static inline unsigned int | 313 | static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, |
334 | summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
335 | const struct cpumask *andmask) | 314 | const struct cpumask *andmask) |
336 | { | 315 | { |
337 | int apicid = summit_cpu_to_logical_apicid(0); | 316 | int apicid = summit_cpu_to_logical_apicid(0); |
@@ -356,7 +335,7 @@ summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, | |||
356 | * | 335 | * |
357 | * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. | 336 | * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. |
358 | */ | 337 | */ |
359 | static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) | 338 | static int summit_phys_pkg_id(int cpuid_apic, int index_msb) |
360 | { | 339 | { |
361 | return hard_smp_processor_id() >> index_msb; | 340 | return hard_smp_processor_id() >> index_msb; |
362 | } | 341 | } |
@@ -381,15 +360,15 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
381 | } | 360 | } |
382 | 361 | ||
383 | #ifdef CONFIG_X86_SUMMIT_NUMA | 362 | #ifdef CONFIG_X86_SUMMIT_NUMA |
384 | static struct rio_table_hdr *rio_table_hdr __initdata; | 363 | static struct rio_table_hdr *rio_table_hdr; |
385 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; | 364 | static struct scal_detail *scal_devs[MAX_NUMNODES]; |
386 | static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; | 365 | static struct rio_detail *rio_devs[MAX_NUMNODES*4]; |
387 | 366 | ||
388 | #ifndef CONFIG_X86_NUMAQ | 367 | #ifndef CONFIG_X86_NUMAQ |
389 | static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; | 368 | static int mp_bus_id_to_node[MAX_MP_BUSSES]; |
390 | #endif | 369 | #endif |
391 | 370 | ||
392 | static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) | 371 | static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) |
393 | { | 372 | { |
394 | int twister = 0, node = 0; | 373 | int twister = 0, node = 0; |
395 | int i, bus, num_buses; | 374 | int i, bus, num_buses; |
@@ -451,7 +430,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) | |||
451 | return bus; | 430 | return bus; |
452 | } | 431 | } |
453 | 432 | ||
454 | static int __init build_detail_arrays(void) | 433 | static int build_detail_arrays(void) |
455 | { | 434 | { |
456 | unsigned long ptr; | 435 | unsigned long ptr; |
457 | int i, scal_detail_size, rio_detail_size; | 436 | int i, scal_detail_size, rio_detail_size; |
@@ -485,7 +464,7 @@ static int __init build_detail_arrays(void) | |||
485 | return 1; | 464 | return 1; |
486 | } | 465 | } |
487 | 466 | ||
488 | void __init setup_summit(void) | 467 | void setup_summit(void) |
489 | { | 468 | { |
490 | unsigned long ptr; | 469 | unsigned long ptr; |
491 | unsigned short offset; | 470 | unsigned short offset; |
@@ -583,7 +562,6 @@ struct apic apic_summit = { | |||
583 | .send_IPI_all = summit_send_IPI_all, | 562 | .send_IPI_all = summit_send_IPI_all, |
584 | .send_IPI_self = default_send_IPI_self, | 563 | .send_IPI_self = default_send_IPI_self, |
585 | 564 | ||
586 | .wakeup_cpu = NULL, | ||
587 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 565 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
588 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 566 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
589 | 567 | ||
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 354b9c45601d..8fb87b6dd633 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -224,7 +224,6 @@ struct apic apic_x2apic_cluster = { | |||
224 | .send_IPI_all = x2apic_send_IPI_all, | 224 | .send_IPI_all = x2apic_send_IPI_all, |
225 | .send_IPI_self = x2apic_send_IPI_self, | 225 | .send_IPI_self = x2apic_send_IPI_self, |
226 | 226 | ||
227 | .wakeup_cpu = NULL, | ||
228 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 227 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
229 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 228 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
230 | .wait_for_init_deassert = NULL, | 229 | .wait_for_init_deassert = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 5bcb174409bc..23625b9f98b2 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -213,7 +213,6 @@ struct apic apic_x2apic_phys = { | |||
213 | .send_IPI_all = x2apic_send_IPI_all, | 213 | .send_IPI_all = x2apic_send_IPI_all, |
214 | .send_IPI_self = x2apic_send_IPI_self, | 214 | .send_IPI_self = x2apic_send_IPI_self, |
215 | 215 | ||
216 | .wakeup_cpu = NULL, | ||
217 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 216 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
218 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 217 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
219 | .wait_for_init_deassert = NULL, | 218 | .wait_for_init_deassert = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 20b4ad07c3a1..1bd6da1f8fad 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -7,28 +7,28 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/threads.h> | ||
13 | #include <linux/cpu.h> | ||
14 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | #include <linux/hardirq.h> | ||
12 | #include <linux/proc_fs.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
16 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | ||
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/hardirq.h> | ||
21 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
22 | #include <linux/proc_fs.h> | 20 | #include <linux/cpu.h> |
23 | #include <asm/current.h> | 21 | #include <linux/init.h> |
24 | #include <asm/smp.h> | 22 | |
25 | #include <asm/apic.h> | ||
26 | #include <asm/ipi.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/uv/uv.h> | ||
29 | #include <asm/uv/uv_mmrs.h> | 23 | #include <asm/uv/uv_mmrs.h> |
30 | #include <asm/uv/uv_hub.h> | 24 | #include <asm/uv/uv_hub.h> |
25 | #include <asm/current.h> | ||
26 | #include <asm/pgtable.h> | ||
31 | #include <asm/uv/bios.h> | 27 | #include <asm/uv/bios.h> |
28 | #include <asm/uv/uv.h> | ||
29 | #include <asm/apic.h> | ||
30 | #include <asm/ipi.h> | ||
31 | #include <asm/smp.h> | ||
32 | 32 | ||
33 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 33 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
34 | 34 | ||
@@ -91,24 +91,28 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
91 | cpumask_set_cpu(cpu, retmask); | 91 | cpumask_set_cpu(cpu, retmask); |
92 | } | 92 | } |
93 | 93 | ||
94 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 94 | static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
95 | { | 95 | { |
96 | #ifdef CONFIG_SMP | ||
96 | unsigned long val; | 97 | unsigned long val; |
97 | int pnode; | 98 | int pnode; |
98 | 99 | ||
99 | pnode = uv_apicid_to_pnode(phys_apicid); | 100 | pnode = uv_apicid_to_pnode(phys_apicid); |
100 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 101 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
101 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 102 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
102 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 103 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
103 | APIC_DM_INIT; | 104 | APIC_DM_INIT; |
104 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 105 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
105 | mdelay(10); | 106 | mdelay(10); |
106 | 107 | ||
107 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 108 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
108 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 109 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
109 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 110 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
110 | APIC_DM_STARTUP; | 111 | APIC_DM_STARTUP; |
111 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 112 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
113 | |||
114 | atomic_set(&init_deasserted, 1); | ||
115 | #endif | ||
112 | return 0; | 116 | return 0; |
113 | } | 117 | } |
114 | 118 | ||
@@ -285,7 +289,7 @@ struct apic apic_x2apic_uv_x = { | |||
285 | .send_IPI_all = uv_send_IPI_all, | 289 | .send_IPI_all = uv_send_IPI_all, |
286 | .send_IPI_self = uv_send_IPI_self, | 290 | .send_IPI_self = uv_send_IPI_self, |
287 | 291 | ||
288 | .wakeup_cpu = NULL, | 292 | .wakeup_secondary_cpu = uv_wakeup_secondary, |
289 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | 293 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, |
290 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 294 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
291 | .wait_for_init_deassert = NULL, | 295 | .wait_for_init_deassert = NULL, |
@@ -365,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift, | |||
365 | paddr = base << shift; | 369 | paddr = base << shift; |
366 | bytes = (1UL << shift) * (max_pnode + 1); | 370 | bytes = (1UL << shift) * (max_pnode + 1); |
367 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, | 371 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, |
368 | paddr + bytes); | 372 | paddr + bytes); |
369 | if (map_type == map_uc) | 373 | if (map_type == map_uc) |
370 | init_extra_mapping_uc(paddr, bytes); | 374 | init_extra_mapping_uc(paddr, bytes); |
371 | else | 375 | else |
@@ -528,7 +532,7 @@ late_initcall(uv_init_heartbeat); | |||
528 | 532 | ||
529 | /* | 533 | /* |
530 | * Called on each cpu to initialize the per_cpu UV data area. | 534 | * Called on each cpu to initialize the per_cpu UV data area. |
531 | * ZZZ hotplug not supported yet | 535 | * FIXME: hotplug not supported yet |
532 | */ | 536 | */ |
533 | void __cpuinit uv_cpu_init(void) | 537 | void __cpuinit uv_cpu_init(void) |
534 | { | 538 | { |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 01b1244ef1c0..d67e0e48bc2d 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -7,11 +7,10 @@ | |||
7 | /* | 7 | /* |
8 | * Get CPU information for use by the procfs. | 8 | * Get CPU information for use by the procfs. |
9 | */ | 9 | */ |
10 | #ifdef CONFIG_X86_32 | ||
11 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | 10 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, |
12 | unsigned int cpu) | 11 | unsigned int cpu) |
13 | { | 12 | { |
14 | #ifdef CONFIG_X86_HT | 13 | #ifdef CONFIG_SMP |
15 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
16 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
17 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
@@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
24 | #endif | 23 | #endif |
25 | } | 24 | } |
26 | 25 | ||
26 | #ifdef CONFIG_X86_32 | ||
27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
28 | { | 28 | { |
29 | /* | 29 | /* |
@@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
50 | c->wp_works_ok ? "yes" : "no"); | 50 | c->wp_works_ok ? "yes" : "no"); |
51 | } | 51 | } |
52 | #else | 52 | #else |
53 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
54 | unsigned int cpu) | ||
55 | { | ||
56 | #ifdef CONFIG_SMP | ||
57 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
58 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
59 | seq_printf(m, "siblings\t: %d\n", | ||
60 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
61 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
62 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
63 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
64 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
65 | } | ||
66 | #endif | ||
67 | } | ||
68 | |||
69 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 53 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
70 | { | 54 | { |
71 | seq_printf(m, | 55 | seq_printf(m, |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index e85826829cf2..508bec1cee27 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) | |||
858 | */ | 858 | */ |
859 | void __init reserve_early(u64 start, u64 end, char *name) | 859 | void __init reserve_early(u64 start, u64 end, char *name) |
860 | { | 860 | { |
861 | if (start >= end) | ||
862 | return; | ||
863 | |||
861 | drop_overlaps_that_are_ok(start, end); | 864 | drop_overlaps_that_are_ok(start, end); |
862 | __reserve_early(start, end, name, 0); | 865 | __reserve_early(start, end, name, 0); |
863 | } | 866 | } |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index e41980a373ab..99c4d308f16b 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | |||
85 | 85 | ||
86 | t->io_bitmap_max = bytes; | 86 | t->io_bitmap_max = bytes; |
87 | 87 | ||
88 | #ifdef CONFIG_X86_32 | ||
89 | /* | ||
90 | * Sets the lazy trigger so that the next I/O operation will | ||
91 | * reload the correct bitmap. | ||
92 | * Reset the owner so that a process switch will not set | ||
93 | * tss->io_bitmap_base to IO_BITMAP_OFFSET. | ||
94 | */ | ||
95 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; | ||
96 | tss->io_bitmap_owner = NULL; | ||
97 | #else | ||
98 | /* Update the TSS: */ | 88 | /* Update the TSS: */ |
99 | memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); | 89 | memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); |
100 | #endif | ||
101 | 90 | ||
102 | put_cpu(); | 91 | put_cpu(); |
103 | 92 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 87b69d4fac16..6afa5232dbb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -1,8 +1,8 @@ | |||
1 | #include <linux/errno.h> | 1 | #include <linux/errno.h> |
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | #include <asm/idle.h> | ||
5 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
5 | #include <linux/prctl.h> | ||
6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
@@ -11,6 +11,9 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
14 | #include <asm/idle.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/i387.h> | ||
14 | 17 | ||
15 | unsigned long idle_halt; | 18 | unsigned long idle_halt; |
16 | EXPORT_SYMBOL(idle_halt); | 19 | EXPORT_SYMBOL(idle_halt); |
@@ -56,6 +59,192 @@ void arch_task_cache_init(void) | |||
56 | } | 59 | } |
57 | 60 | ||
58 | /* | 61 | /* |
62 | * Free current thread data structures etc.. | ||
63 | */ | ||
64 | void exit_thread(void) | ||
65 | { | ||
66 | struct task_struct *me = current; | ||
67 | struct thread_struct *t = &me->thread; | ||
68 | |||
69 | if (me->thread.io_bitmap_ptr) { | ||
70 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
71 | |||
72 | kfree(t->io_bitmap_ptr); | ||
73 | t->io_bitmap_ptr = NULL; | ||
74 | clear_thread_flag(TIF_IO_BITMAP); | ||
75 | /* | ||
76 | * Careful, clear this in the TSS too: | ||
77 | */ | ||
78 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
79 | t->io_bitmap_max = 0; | ||
80 | put_cpu(); | ||
81 | } | ||
82 | |||
83 | ds_exit_thread(current); | ||
84 | } | ||
85 | |||
86 | void flush_thread(void) | ||
87 | { | ||
88 | struct task_struct *tsk = current; | ||
89 | |||
90 | #ifdef CONFIG_X86_64 | ||
91 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
92 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
93 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
94 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
95 | } else { | ||
96 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
97 | current_thread_info()->status |= TS_COMPAT; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
103 | |||
104 | tsk->thread.debugreg0 = 0; | ||
105 | tsk->thread.debugreg1 = 0; | ||
106 | tsk->thread.debugreg2 = 0; | ||
107 | tsk->thread.debugreg3 = 0; | ||
108 | tsk->thread.debugreg6 = 0; | ||
109 | tsk->thread.debugreg7 = 0; | ||
110 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
111 | /* | ||
112 | * Forget coprocessor state.. | ||
113 | */ | ||
114 | tsk->fpu_counter = 0; | ||
115 | clear_fpu(tsk); | ||
116 | clear_used_math(); | ||
117 | } | ||
118 | |||
119 | static void hard_disable_TSC(void) | ||
120 | { | ||
121 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
122 | } | ||
123 | |||
124 | void disable_TSC(void) | ||
125 | { | ||
126 | preempt_disable(); | ||
127 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
128 | /* | ||
129 | * Must flip the CPU state synchronously with | ||
130 | * TIF_NOTSC in the current running context. | ||
131 | */ | ||
132 | hard_disable_TSC(); | ||
133 | preempt_enable(); | ||
134 | } | ||
135 | |||
136 | static void hard_enable_TSC(void) | ||
137 | { | ||
138 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
139 | } | ||
140 | |||
141 | static void enable_TSC(void) | ||
142 | { | ||
143 | preempt_disable(); | ||
144 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
145 | /* | ||
146 | * Must flip the CPU state synchronously with | ||
147 | * TIF_NOTSC in the current running context. | ||
148 | */ | ||
149 | hard_enable_TSC(); | ||
150 | preempt_enable(); | ||
151 | } | ||
152 | |||
153 | int get_tsc_mode(unsigned long adr) | ||
154 | { | ||
155 | unsigned int val; | ||
156 | |||
157 | if (test_thread_flag(TIF_NOTSC)) | ||
158 | val = PR_TSC_SIGSEGV; | ||
159 | else | ||
160 | val = PR_TSC_ENABLE; | ||
161 | |||
162 | return put_user(val, (unsigned int __user *)adr); | ||
163 | } | ||
164 | |||
165 | int set_tsc_mode(unsigned int val) | ||
166 | { | ||
167 | if (val == PR_TSC_SIGSEGV) | ||
168 | disable_TSC(); | ||
169 | else if (val == PR_TSC_ENABLE) | ||
170 | enable_TSC(); | ||
171 | else | ||
172 | return -EINVAL; | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
178 | struct tss_struct *tss) | ||
179 | { | ||
180 | struct thread_struct *prev, *next; | ||
181 | |||
182 | prev = &prev_p->thread; | ||
183 | next = &next_p->thread; | ||
184 | |||
185 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
186 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
187 | ds_switch_to(prev_p, next_p); | ||
188 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
189 | update_debugctlmsr(next->debugctlmsr); | ||
190 | |||
191 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
192 | set_debugreg(next->debugreg0, 0); | ||
193 | set_debugreg(next->debugreg1, 1); | ||
194 | set_debugreg(next->debugreg2, 2); | ||
195 | set_debugreg(next->debugreg3, 3); | ||
196 | /* no 4 and 5 */ | ||
197 | set_debugreg(next->debugreg6, 6); | ||
198 | set_debugreg(next->debugreg7, 7); | ||
199 | } | ||
200 | |||
201 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
202 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
203 | /* prev and next are different */ | ||
204 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
205 | hard_disable_TSC(); | ||
206 | else | ||
207 | hard_enable_TSC(); | ||
208 | } | ||
209 | |||
210 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
211 | /* | ||
212 | * Copy the relevant range of the IO bitmap. | ||
213 | * Normally this is 128 bytes or less: | ||
214 | */ | ||
215 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
216 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
217 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
218 | /* | ||
219 | * Clear any possible leftover bits: | ||
220 | */ | ||
221 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | int sys_fork(struct pt_regs *regs) | ||
226 | { | ||
227 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * This is trivial, and on the face of it looks like it | ||
232 | * could equally well be done in user mode. | ||
233 | * | ||
234 | * Not so, for quite unobvious reasons - register pressure. | ||
235 | * In user mode vfork() cannot have a stack frame, and if | ||
236 | * done by calling the "clone()" system call directly, you | ||
237 | * do not have enough call-clobbered registers to hold all | ||
238 | * the information you need. | ||
239 | */ | ||
240 | int sys_vfork(struct pt_regs *regs) | ||
241 | { | ||
242 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
243 | NULL, NULL); | ||
244 | } | ||
245 | |||
246 | |||
247 | /* | ||
59 | * Idle related variables and functions | 248 | * Idle related variables and functions |
60 | */ | 249 | */ |
61 | unsigned long boot_option_idle_override = 0; | 250 | unsigned long boot_option_idle_override = 0; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 646da41a620a..14014d766cad 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -230,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
230 | } | 230 | } |
231 | EXPORT_SYMBOL(kernel_thread); | 231 | EXPORT_SYMBOL(kernel_thread); |
232 | 232 | ||
233 | /* | ||
234 | * Free current thread data structures etc.. | ||
235 | */ | ||
236 | void exit_thread(void) | ||
237 | { | ||
238 | /* The process may have allocated an io port bitmap... nuke it. */ | ||
239 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { | ||
240 | struct task_struct *tsk = current; | ||
241 | struct thread_struct *t = &tsk->thread; | ||
242 | int cpu = get_cpu(); | ||
243 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | ||
244 | |||
245 | kfree(t->io_bitmap_ptr); | ||
246 | t->io_bitmap_ptr = NULL; | ||
247 | clear_thread_flag(TIF_IO_BITMAP); | ||
248 | /* | ||
249 | * Careful, clear this in the TSS too: | ||
250 | */ | ||
251 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | ||
252 | t->io_bitmap_max = 0; | ||
253 | tss->io_bitmap_owner = NULL; | ||
254 | tss->io_bitmap_max = 0; | ||
255 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; | ||
256 | put_cpu(); | ||
257 | } | ||
258 | |||
259 | ds_exit_thread(current); | ||
260 | } | ||
261 | |||
262 | void flush_thread(void) | ||
263 | { | ||
264 | struct task_struct *tsk = current; | ||
265 | |||
266 | tsk->thread.debugreg0 = 0; | ||
267 | tsk->thread.debugreg1 = 0; | ||
268 | tsk->thread.debugreg2 = 0; | ||
269 | tsk->thread.debugreg3 = 0; | ||
270 | tsk->thread.debugreg6 = 0; | ||
271 | tsk->thread.debugreg7 = 0; | ||
272 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
273 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
274 | /* | ||
275 | * Forget coprocessor state.. | ||
276 | */ | ||
277 | tsk->fpu_counter = 0; | ||
278 | clear_fpu(tsk); | ||
279 | clear_used_math(); | ||
280 | } | ||
281 | |||
282 | void release_thread(struct task_struct *dead_task) | 233 | void release_thread(struct task_struct *dead_task) |
283 | { | 234 | { |
284 | BUG_ON(dead_task->mm); | 235 | BUG_ON(dead_task->mm); |
@@ -366,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
366 | } | 317 | } |
367 | EXPORT_SYMBOL_GPL(start_thread); | 318 | EXPORT_SYMBOL_GPL(start_thread); |
368 | 319 | ||
369 | static void hard_disable_TSC(void) | ||
370 | { | ||
371 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
372 | } | ||
373 | |||
374 | void disable_TSC(void) | ||
375 | { | ||
376 | preempt_disable(); | ||
377 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
378 | /* | ||
379 | * Must flip the CPU state synchronously with | ||
380 | * TIF_NOTSC in the current running context. | ||
381 | */ | ||
382 | hard_disable_TSC(); | ||
383 | preempt_enable(); | ||
384 | } | ||
385 | |||
386 | static void hard_enable_TSC(void) | ||
387 | { | ||
388 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
389 | } | ||
390 | |||
391 | static void enable_TSC(void) | ||
392 | { | ||
393 | preempt_disable(); | ||
394 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
395 | /* | ||
396 | * Must flip the CPU state synchronously with | ||
397 | * TIF_NOTSC in the current running context. | ||
398 | */ | ||
399 | hard_enable_TSC(); | ||
400 | preempt_enable(); | ||
401 | } | ||
402 | |||
403 | int get_tsc_mode(unsigned long adr) | ||
404 | { | ||
405 | unsigned int val; | ||
406 | |||
407 | if (test_thread_flag(TIF_NOTSC)) | ||
408 | val = PR_TSC_SIGSEGV; | ||
409 | else | ||
410 | val = PR_TSC_ENABLE; | ||
411 | |||
412 | return put_user(val, (unsigned int __user *)adr); | ||
413 | } | ||
414 | |||
415 | int set_tsc_mode(unsigned int val) | ||
416 | { | ||
417 | if (val == PR_TSC_SIGSEGV) | ||
418 | disable_TSC(); | ||
419 | else if (val == PR_TSC_ENABLE) | ||
420 | enable_TSC(); | ||
421 | else | ||
422 | return -EINVAL; | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static noinline void | ||
428 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
429 | struct tss_struct *tss) | ||
430 | { | ||
431 | struct thread_struct *prev, *next; | ||
432 | |||
433 | prev = &prev_p->thread; | ||
434 | next = &next_p->thread; | ||
435 | |||
436 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
437 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
438 | ds_switch_to(prev_p, next_p); | ||
439 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
440 | update_debugctlmsr(next->debugctlmsr); | ||
441 | |||
442 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
443 | set_debugreg(next->debugreg0, 0); | ||
444 | set_debugreg(next->debugreg1, 1); | ||
445 | set_debugreg(next->debugreg2, 2); | ||
446 | set_debugreg(next->debugreg3, 3); | ||
447 | /* no 4 and 5 */ | ||
448 | set_debugreg(next->debugreg6, 6); | ||
449 | set_debugreg(next->debugreg7, 7); | ||
450 | } | ||
451 | |||
452 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
453 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
454 | /* prev and next are different */ | ||
455 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
456 | hard_disable_TSC(); | ||
457 | else | ||
458 | hard_enable_TSC(); | ||
459 | } | ||
460 | |||
461 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
462 | /* | ||
463 | * Disable the bitmap via an invalid offset. We still cache | ||
464 | * the previous bitmap owner and the IO bitmap contents: | ||
465 | */ | ||
466 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; | ||
467 | return; | ||
468 | } | ||
469 | |||
470 | if (likely(next == tss->io_bitmap_owner)) { | ||
471 | /* | ||
472 | * Previous owner of the bitmap (hence the bitmap content) | ||
473 | * matches the next task, we dont have to do anything but | ||
474 | * to set a valid offset in the TSS: | ||
475 | */ | ||
476 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | ||
477 | return; | ||
478 | } | ||
479 | /* | ||
480 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | ||
481 | * and we let the task to get a GPF in case an I/O instruction | ||
482 | * is performed. The handler of the GPF will verify that the | ||
483 | * faulting task has a valid I/O bitmap and, it true, does the | ||
484 | * real copy and restart the instruction. This will save us | ||
485 | * redundant copies when the currently switched task does not | ||
486 | * perform any I/O during its timeslice. | ||
487 | */ | ||
488 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; | ||
489 | } | ||
490 | 320 | ||
491 | /* | 321 | /* |
492 | * switch_to(x,yn) should switch tasks from x to y. | 322 | * switch_to(x,yn) should switch tasks from x to y. |
@@ -600,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
600 | return prev_p; | 430 | return prev_p; |
601 | } | 431 | } |
602 | 432 | ||
603 | int sys_fork(struct pt_regs *regs) | ||
604 | { | ||
605 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
606 | } | ||
607 | |||
608 | int sys_clone(struct pt_regs *regs) | 433 | int sys_clone(struct pt_regs *regs) |
609 | { | 434 | { |
610 | unsigned long clone_flags; | 435 | unsigned long clone_flags; |
@@ -621,21 +446,6 @@ int sys_clone(struct pt_regs *regs) | |||
621 | } | 446 | } |
622 | 447 | ||
623 | /* | 448 | /* |
624 | * This is trivial, and on the face of it looks like it | ||
625 | * could equally well be done in user mode. | ||
626 | * | ||
627 | * Not so, for quite unobvious reasons - register pressure. | ||
628 | * In user mode vfork() cannot have a stack frame, and if | ||
629 | * done by calling the "clone()" system call directly, you | ||
630 | * do not have enough call-clobbered registers to hold all | ||
631 | * the information you need. | ||
632 | */ | ||
633 | int sys_vfork(struct pt_regs *regs) | ||
634 | { | ||
635 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
636 | } | ||
637 | |||
638 | /* | ||
639 | * sys_execve() executes a new program. | 449 | * sys_execve() executes a new program. |
640 | */ | 450 | */ |
641 | int sys_execve(struct pt_regs *regs) | 451 | int sys_execve(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 836ef6575f01..abb7e6a7f0c6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs) | |||
237 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 237 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* | ||
241 | * Free current thread data structures etc.. | ||
242 | */ | ||
243 | void exit_thread(void) | ||
244 | { | ||
245 | struct task_struct *me = current; | ||
246 | struct thread_struct *t = &me->thread; | ||
247 | |||
248 | if (me->thread.io_bitmap_ptr) { | ||
249 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | ||
250 | |||
251 | kfree(t->io_bitmap_ptr); | ||
252 | t->io_bitmap_ptr = NULL; | ||
253 | clear_thread_flag(TIF_IO_BITMAP); | ||
254 | /* | ||
255 | * Careful, clear this in the TSS too: | ||
256 | */ | ||
257 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | ||
258 | t->io_bitmap_max = 0; | ||
259 | put_cpu(); | ||
260 | } | ||
261 | |||
262 | ds_exit_thread(current); | ||
263 | } | ||
264 | |||
265 | void flush_thread(void) | ||
266 | { | ||
267 | struct task_struct *tsk = current; | ||
268 | |||
269 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
270 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
271 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
272 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
273 | } else { | ||
274 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
275 | current_thread_info()->status |= TS_COMPAT; | ||
276 | } | ||
277 | } | ||
278 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
279 | |||
280 | tsk->thread.debugreg0 = 0; | ||
281 | tsk->thread.debugreg1 = 0; | ||
282 | tsk->thread.debugreg2 = 0; | ||
283 | tsk->thread.debugreg3 = 0; | ||
284 | tsk->thread.debugreg6 = 0; | ||
285 | tsk->thread.debugreg7 = 0; | ||
286 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | ||
287 | /* | ||
288 | * Forget coprocessor state.. | ||
289 | */ | ||
290 | tsk->fpu_counter = 0; | ||
291 | clear_fpu(tsk); | ||
292 | clear_used_math(); | ||
293 | } | ||
294 | |||
295 | void release_thread(struct task_struct *dead_task) | 240 | void release_thread(struct task_struct *dead_task) |
296 | { | 241 | { |
297 | if (dead_task->mm) { | 242 | if (dead_task->mm) { |
@@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |||
425 | } | 370 | } |
426 | EXPORT_SYMBOL_GPL(start_thread); | 371 | EXPORT_SYMBOL_GPL(start_thread); |
427 | 372 | ||
428 | static void hard_disable_TSC(void) | ||
429 | { | ||
430 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
431 | } | ||
432 | |||
433 | void disable_TSC(void) | ||
434 | { | ||
435 | preempt_disable(); | ||
436 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
437 | /* | ||
438 | * Must flip the CPU state synchronously with | ||
439 | * TIF_NOTSC in the current running context. | ||
440 | */ | ||
441 | hard_disable_TSC(); | ||
442 | preempt_enable(); | ||
443 | } | ||
444 | |||
445 | static void hard_enable_TSC(void) | ||
446 | { | ||
447 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
448 | } | ||
449 | |||
450 | static void enable_TSC(void) | ||
451 | { | ||
452 | preempt_disable(); | ||
453 | if (test_and_clear_thread_flag(TIF_NOTSC)) | ||
454 | /* | ||
455 | * Must flip the CPU state synchronously with | ||
456 | * TIF_NOTSC in the current running context. | ||
457 | */ | ||
458 | hard_enable_TSC(); | ||
459 | preempt_enable(); | ||
460 | } | ||
461 | |||
462 | int get_tsc_mode(unsigned long adr) | ||
463 | { | ||
464 | unsigned int val; | ||
465 | |||
466 | if (test_thread_flag(TIF_NOTSC)) | ||
467 | val = PR_TSC_SIGSEGV; | ||
468 | else | ||
469 | val = PR_TSC_ENABLE; | ||
470 | |||
471 | return put_user(val, (unsigned int __user *)adr); | ||
472 | } | ||
473 | |||
474 | int set_tsc_mode(unsigned int val) | ||
475 | { | ||
476 | if (val == PR_TSC_SIGSEGV) | ||
477 | disable_TSC(); | ||
478 | else if (val == PR_TSC_ENABLE) | ||
479 | enable_TSC(); | ||
480 | else | ||
481 | return -EINVAL; | ||
482 | |||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * This special macro can be used to load a debugging register | ||
488 | */ | ||
489 | #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r) | ||
490 | |||
491 | static inline void __switch_to_xtra(struct task_struct *prev_p, | ||
492 | struct task_struct *next_p, | ||
493 | struct tss_struct *tss) | ||
494 | { | ||
495 | struct thread_struct *prev, *next; | ||
496 | |||
497 | prev = &prev_p->thread, | ||
498 | next = &next_p->thread; | ||
499 | |||
500 | if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || | ||
501 | test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) | ||
502 | ds_switch_to(prev_p, next_p); | ||
503 | else if (next->debugctlmsr != prev->debugctlmsr) | ||
504 | update_debugctlmsr(next->debugctlmsr); | ||
505 | |||
506 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
507 | loaddebug(next, 0); | ||
508 | loaddebug(next, 1); | ||
509 | loaddebug(next, 2); | ||
510 | loaddebug(next, 3); | ||
511 | /* no 4 and 5 */ | ||
512 | loaddebug(next, 6); | ||
513 | loaddebug(next, 7); | ||
514 | } | ||
515 | |||
516 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
517 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
518 | /* prev and next are different */ | ||
519 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
520 | hard_disable_TSC(); | ||
521 | else | ||
522 | hard_enable_TSC(); | ||
523 | } | ||
524 | |||
525 | if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | ||
526 | /* | ||
527 | * Copy the relevant range of the IO bitmap. | ||
528 | * Normally this is 128 bytes or less: | ||
529 | */ | ||
530 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | ||
531 | max(prev->io_bitmap_max, next->io_bitmap_max)); | ||
532 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | ||
533 | /* | ||
534 | * Clear any possible leftover bits: | ||
535 | */ | ||
536 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | /* | 373 | /* |
541 | * switch_to(x,y) should switch tasks from x to y. | 374 | * switch_to(x,y) should switch tasks from x to y. |
542 | * | 375 | * |
@@ -694,11 +527,6 @@ void set_personality_64bit(void) | |||
694 | current->personality &= ~READ_IMPLIES_EXEC; | 527 | current->personality &= ~READ_IMPLIES_EXEC; |
695 | } | 528 | } |
696 | 529 | ||
697 | asmlinkage long sys_fork(struct pt_regs *regs) | ||
698 | { | ||
699 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
700 | } | ||
701 | |||
702 | asmlinkage long | 530 | asmlinkage long |
703 | sys_clone(unsigned long clone_flags, unsigned long newsp, | 531 | sys_clone(unsigned long clone_flags, unsigned long newsp, |
704 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) | 532 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) |
@@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
708 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | 536 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); |
709 | } | 537 | } |
710 | 538 | ||
711 | /* | ||
712 | * This is trivial, and on the face of it looks like it | ||
713 | * could equally well be done in user mode. | ||
714 | * | ||
715 | * Not so, for quite unobvious reasons - register pressure. | ||
716 | * In user mode vfork() cannot have a stack frame, and if | ||
717 | * done by calling the "clone()" system call directly, you | ||
718 | * do not have enough call-clobbered registers to hold all | ||
719 | * the information you need. | ||
720 | */ | ||
721 | asmlinkage long sys_vfork(struct pt_regs *regs) | ||
722 | { | ||
723 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | ||
724 | NULL, NULL); | ||
725 | } | ||
726 | |||
727 | unsigned long get_wchan(struct task_struct *p) | 539 | unsigned long get_wchan(struct task_struct *p) |
728 | { | 540 | { |
729 | unsigned long stack; | 541 | unsigned long stack; |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index fb2159a5c817..3d9672e59c16 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1383,7 +1383,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |||
1383 | #ifdef CONFIG_X86_32 | 1383 | #ifdef CONFIG_X86_32 |
1384 | # define IS_IA32 1 | 1384 | # define IS_IA32 1 |
1385 | #elif defined CONFIG_IA32_EMULATION | 1385 | #elif defined CONFIG_IA32_EMULATION |
1386 | # define IS_IA32 test_thread_flag(TIF_IA32) | 1386 | # define IS_IA32 is_compat_task() |
1387 | #else | 1387 | #else |
1388 | # define IS_IA32 0 | 1388 | # define IS_IA32 0 |
1389 | #endif | 1389 | #endif |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5b85759e7972..4c54bc0d8ff3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -600,19 +600,7 @@ static int __init setup_elfcorehdr(char *arg) | |||
600 | early_param("elfcorehdr", setup_elfcorehdr); | 600 | early_param("elfcorehdr", setup_elfcorehdr); |
601 | #endif | 601 | #endif |
602 | 602 | ||
603 | static int __init default_update_apic(void) | 603 | static struct x86_quirks default_x86_quirks __initdata; |
604 | { | ||
605 | #ifdef CONFIG_SMP | ||
606 | if (!apic->wakeup_cpu) | ||
607 | apic->wakeup_cpu = wakeup_secondary_cpu_via_init; | ||
608 | #endif | ||
609 | |||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static struct x86_quirks default_x86_quirks __initdata = { | ||
614 | .update_apic = default_update_apic, | ||
615 | }; | ||
616 | 604 | ||
617 | struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; | 605 | struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; |
618 | 606 | ||
@@ -875,9 +863,7 @@ void __init setup_arch(char **cmdline_p) | |||
875 | 863 | ||
876 | reserve_initrd(); | 864 | reserve_initrd(); |
877 | 865 | ||
878 | #ifdef CONFIG_X86_64 | ||
879 | vsmp_init(); | 866 | vsmp_init(); |
880 | #endif | ||
881 | 867 | ||
882 | io_delay_init(); | 868 | io_delay_init(); |
883 | 869 | ||
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 7cdcd16885ed..d2cc6428c587 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -187,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, | |||
187 | /* | 187 | /* |
188 | * Set up a signal frame. | 188 | * Set up a signal frame. |
189 | */ | 189 | */ |
190 | #ifdef CONFIG_X86_32 | ||
191 | static const struct { | ||
192 | u16 poplmovl; | ||
193 | u32 val; | ||
194 | u16 int80; | ||
195 | } __attribute__((packed)) retcode = { | ||
196 | 0xb858, /* popl %eax; movl $..., %eax */ | ||
197 | __NR_sigreturn, | ||
198 | 0x80cd, /* int $0x80 */ | ||
199 | }; | ||
200 | |||
201 | static const struct { | ||
202 | u8 movl; | ||
203 | u32 val; | ||
204 | u16 int80; | ||
205 | u8 pad; | ||
206 | } __attribute__((packed)) rt_retcode = { | ||
207 | 0xb8, /* movl $..., %eax */ | ||
208 | __NR_rt_sigreturn, | ||
209 | 0x80cd, /* int $0x80 */ | ||
210 | 0 | ||
211 | }; | ||
212 | 190 | ||
213 | /* | 191 | /* |
214 | * Determine which stack to use.. | 192 | * Determine which stack to use.. |
215 | */ | 193 | */ |
194 | static unsigned long align_sigframe(unsigned long sp) | ||
195 | { | ||
196 | #ifdef CONFIG_X86_32 | ||
197 | /* | ||
198 | * Align the stack pointer according to the i386 ABI, | ||
199 | * i.e. so that on function entry ((sp + 4) & 15) == 0. | ||
200 | */ | ||
201 | sp = ((sp + 4) & -16ul) - 4; | ||
202 | #else /* !CONFIG_X86_32 */ | ||
203 | sp = round_down(sp, 16) - 8; | ||
204 | #endif | ||
205 | return sp; | ||
206 | } | ||
207 | |||
216 | static inline void __user * | 208 | static inline void __user * |
217 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | 209 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
218 | void **fpstate) | 210 | void __user **fpstate) |
219 | { | 211 | { |
220 | unsigned long sp; | ||
221 | |||
222 | /* Default to using normal stack */ | 212 | /* Default to using normal stack */ |
223 | sp = regs->sp; | 213 | unsigned long sp = regs->sp; |
214 | |||
215 | #ifdef CONFIG_X86_64 | ||
216 | /* redzone */ | ||
217 | sp -= 128; | ||
218 | #endif /* CONFIG_X86_64 */ | ||
224 | 219 | ||
225 | /* | 220 | /* |
226 | * If we are on the alternate signal stack and would overflow it, don't. | 221 | * If we are on the alternate signal stack and would overflow it, don't. |
@@ -234,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
234 | if (sas_ss_flags(sp) == 0) | 229 | if (sas_ss_flags(sp) == 0) |
235 | sp = current->sas_ss_sp + current->sas_ss_size; | 230 | sp = current->sas_ss_sp + current->sas_ss_size; |
236 | } else { | 231 | } else { |
232 | #ifdef CONFIG_X86_32 | ||
237 | /* This is the legacy signal stack switching. */ | 233 | /* This is the legacy signal stack switching. */ |
238 | if ((regs->ss & 0xffff) != __USER_DS && | 234 | if ((regs->ss & 0xffff) != __USER_DS && |
239 | !(ka->sa.sa_flags & SA_RESTORER) && | 235 | !(ka->sa.sa_flags & SA_RESTORER) && |
240 | ka->sa.sa_restorer) | 236 | ka->sa.sa_restorer) |
241 | sp = (unsigned long) ka->sa.sa_restorer; | 237 | sp = (unsigned long) ka->sa.sa_restorer; |
238 | #endif /* CONFIG_X86_32 */ | ||
242 | } | 239 | } |
243 | 240 | ||
244 | if (used_math()) { | 241 | if (used_math()) { |
245 | sp = sp - sig_xstate_size; | 242 | sp -= sig_xstate_size; |
246 | *fpstate = (struct _fpstate *) sp; | 243 | #ifdef CONFIG_X86_64 |
244 | sp = round_down(sp, 64); | ||
245 | #endif /* CONFIG_X86_64 */ | ||
246 | *fpstate = (void __user *)sp; | ||
247 | |||
247 | if (save_i387_xstate(*fpstate) < 0) | 248 | if (save_i387_xstate(*fpstate) < 0) |
248 | return (void __user *)-1L; | 249 | return (void __user *)-1L; |
249 | } | 250 | } |
250 | 251 | ||
251 | sp -= frame_size; | 252 | return (void __user *)align_sigframe(sp - frame_size); |
252 | /* | ||
253 | * Align the stack pointer according to the i386 ABI, | ||
254 | * i.e. so that on function entry ((sp + 4) & 15) == 0. | ||
255 | */ | ||
256 | sp = ((sp + 4) & -16ul) - 4; | ||
257 | |||
258 | return (void __user *) sp; | ||
259 | } | 253 | } |
260 | 254 | ||
255 | #ifdef CONFIG_X86_32 | ||
256 | static const struct { | ||
257 | u16 poplmovl; | ||
258 | u32 val; | ||
259 | u16 int80; | ||
260 | } __attribute__((packed)) retcode = { | ||
261 | 0xb858, /* popl %eax; movl $..., %eax */ | ||
262 | __NR_sigreturn, | ||
263 | 0x80cd, /* int $0x80 */ | ||
264 | }; | ||
265 | |||
266 | static const struct { | ||
267 | u8 movl; | ||
268 | u32 val; | ||
269 | u16 int80; | ||
270 | u8 pad; | ||
271 | } __attribute__((packed)) rt_retcode = { | ||
272 | 0xb8, /* movl $..., %eax */ | ||
273 | __NR_rt_sigreturn, | ||
274 | 0x80cd, /* int $0x80 */ | ||
275 | 0 | ||
276 | }; | ||
277 | |||
261 | static int | 278 | static int |
262 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | 279 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, |
263 | struct pt_regs *regs) | 280 | struct pt_regs *regs) |
@@ -388,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
388 | return 0; | 405 | return 0; |
389 | } | 406 | } |
390 | #else /* !CONFIG_X86_32 */ | 407 | #else /* !CONFIG_X86_32 */ |
391 | /* | ||
392 | * Determine which stack to use.. | ||
393 | */ | ||
394 | static void __user * | ||
395 | get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size) | ||
396 | { | ||
397 | /* Default to using normal stack - redzone*/ | ||
398 | sp -= 128; | ||
399 | |||
400 | /* This is the X/Open sanctioned signal stack switching. */ | ||
401 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
402 | if (sas_ss_flags(sp) == 0) | ||
403 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
404 | } | ||
405 | |||
406 | return (void __user *)round_down(sp - size, 64); | ||
407 | } | ||
408 | |||
409 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 408 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
410 | sigset_t *set, struct pt_regs *regs) | 409 | sigset_t *set, struct pt_regs *regs) |
411 | { | 410 | { |
@@ -414,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
414 | int err = 0; | 413 | int err = 0; |
415 | struct task_struct *me = current; | 414 | struct task_struct *me = current; |
416 | 415 | ||
417 | if (used_math()) { | 416 | frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); |
418 | fp = get_stack(ka, regs->sp, sig_xstate_size); | ||
419 | frame = (void __user *)round_down( | ||
420 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | ||
421 | |||
422 | if (save_i387_xstate(fp) < 0) | ||
423 | return -EFAULT; | ||
424 | } else | ||
425 | frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8; | ||
426 | 417 | ||
427 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 418 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
428 | return -EFAULT; | 419 | return -EFAULT; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9ce666387f37..249334f5080a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -112,7 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |||
112 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 112 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
113 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 113 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
114 | 114 | ||
115 | static atomic_t init_deasserted; | 115 | atomic_t init_deasserted; |
116 | 116 | ||
117 | 117 | ||
118 | /* Set if we find a B stepping CPU */ | 118 | /* Set if we find a B stepping CPU */ |
@@ -614,12 +614,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
614 | unsigned long send_status, accept_status = 0; | 614 | unsigned long send_status, accept_status = 0; |
615 | int maxlvt, num_starts, j; | 615 | int maxlvt, num_starts, j; |
616 | 616 | ||
617 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) { | ||
618 | send_status = uv_wakeup_secondary(phys_apicid, start_eip); | ||
619 | atomic_set(&init_deasserted, 1); | ||
620 | return send_status; | ||
621 | } | ||
622 | |||
623 | maxlvt = lapic_get_maxlvt(); | 617 | maxlvt = lapic_get_maxlvt(); |
624 | 618 | ||
625 | /* | 619 | /* |
@@ -748,7 +742,8 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
748 | /* | 742 | /* |
749 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | 743 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
750 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | 744 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
751 | * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. | 745 | * Returns zero if CPU booted OK, else error code from |
746 | * ->wakeup_secondary_cpu. | ||
752 | */ | 747 | */ |
753 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | 748 | static int __cpuinit do_boot_cpu(int apicid, int cpu) |
754 | { | 749 | { |
@@ -835,9 +830,13 @@ do_rest: | |||
835 | } | 830 | } |
836 | 831 | ||
837 | /* | 832 | /* |
838 | * Starting actual IPI sequence... | 833 | * Kick the secondary CPU. Use the method in the APIC driver |
834 | * if it's defined - or use an INIT boot APIC message otherwise: | ||
839 | */ | 835 | */ |
840 | boot_error = apic->wakeup_cpu(apicid, start_ip); | 836 | if (apic->wakeup_secondary_cpu) |
837 | boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); | ||
838 | else | ||
839 | boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); | ||
841 | 840 | ||
842 | if (!boot_error) { | 841 | if (!boot_error) { |
843 | /* | 842 | /* |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c05430ac1b44..a1d288327ff0 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -118,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) | |||
118 | if (!user_mode_vm(regs)) | 118 | if (!user_mode_vm(regs)) |
119 | die(str, regs, err); | 119 | die(str, regs, err); |
120 | } | 120 | } |
121 | |||
122 | /* | ||
123 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | ||
124 | * invalid offset set (the LAZY one) and the faulting thread has | ||
125 | * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS, | ||
126 | * we set the offset field correctly and return 1. | ||
127 | */ | ||
128 | static int lazy_iobitmap_copy(void) | ||
129 | { | ||
130 | struct thread_struct *thread; | ||
131 | struct tss_struct *tss; | ||
132 | int cpu; | ||
133 | |||
134 | cpu = get_cpu(); | ||
135 | tss = &per_cpu(init_tss, cpu); | ||
136 | thread = ¤t->thread; | ||
137 | |||
138 | if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && | ||
139 | thread->io_bitmap_ptr) { | ||
140 | memcpy(tss->io_bitmap, thread->io_bitmap_ptr, | ||
141 | thread->io_bitmap_max); | ||
142 | /* | ||
143 | * If the previously set map was extending to higher ports | ||
144 | * than the current one, pad extra space with 0xff (no access). | ||
145 | */ | ||
146 | if (thread->io_bitmap_max < tss->io_bitmap_max) { | ||
147 | memset((char *) tss->io_bitmap + | ||
148 | thread->io_bitmap_max, 0xff, | ||
149 | tss->io_bitmap_max - thread->io_bitmap_max); | ||
150 | } | ||
151 | tss->io_bitmap_max = thread->io_bitmap_max; | ||
152 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | ||
153 | tss->io_bitmap_owner = thread; | ||
154 | put_cpu(); | ||
155 | |||
156 | return 1; | ||
157 | } | ||
158 | put_cpu(); | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | #endif | 121 | #endif |
163 | 122 | ||
164 | static void __kprobes | 123 | static void __kprobes |
@@ -309,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code) | |||
309 | conditional_sti(regs); | 268 | conditional_sti(regs); |
310 | 269 | ||
311 | #ifdef CONFIG_X86_32 | 270 | #ifdef CONFIG_X86_32 |
312 | if (lazy_iobitmap_copy()) { | ||
313 | /* restart the faulting instruction */ | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | if (regs->flags & X86_VM_MASK) | 271 | if (regs->flags & X86_VM_MASK) |
318 | goto gp_in_vm86; | 272 | goto gp_in_vm86; |
319 | #endif | 273 | #endif |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index c609205df594..74de562812cc 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/paravirt.h> | 22 | #include <asm/paravirt.h> |
23 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
24 | 24 | ||
25 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT | 25 | #ifdef CONFIG_PARAVIRT |
26 | /* | 26 | /* |
27 | * Interrupt control on vSMPowered systems: | 27 | * Interrupt control on vSMPowered systems: |
28 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' | 28 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' |
@@ -114,7 +114,6 @@ static void __init set_vsmp_pv_ops(void) | |||
114 | } | 114 | } |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | #ifdef CONFIG_PCI | ||
118 | static int is_vsmp = -1; | 117 | static int is_vsmp = -1; |
119 | 118 | ||
120 | static void __init detect_vsmp_box(void) | 119 | static void __init detect_vsmp_box(void) |
@@ -139,15 +138,6 @@ int is_vsmp_box(void) | |||
139 | return 0; | 138 | return 0; |
140 | } | 139 | } |
141 | } | 140 | } |
142 | #else | ||
143 | static void __init detect_vsmp_box(void) | ||
144 | { | ||
145 | } | ||
146 | int is_vsmp_box(void) | ||
147 | { | ||
148 | return 0; | ||
149 | } | ||
150 | #endif | ||
151 | 141 | ||
152 | void __init vsmp_init(void) | 142 | void __init vsmp_init(void) |
153 | { | 143 | { |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 2b938a384910..08537747cb58 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o pgtable.o gup.o | 2 | pat.o pgtable.o gup.o |
3 | 3 | ||
4 | obj-$(CONFIG_SMP) += tlb.o | 4 | obj-$(CONFIG_SMP) += tlb.o |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index bcc079c282dd..00f127c80b0e 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/highmem.h> | 1 | #include <linux/highmem.h> |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/swap.h> /* for totalram_pages */ | ||
3 | 4 | ||
4 | void *kmap(struct page *page) | 5 | void *kmap(struct page *page) |
5 | { | 6 | { |
@@ -156,3 +157,36 @@ EXPORT_SYMBOL(kmap); | |||
156 | EXPORT_SYMBOL(kunmap); | 157 | EXPORT_SYMBOL(kunmap); |
157 | EXPORT_SYMBOL(kmap_atomic); | 158 | EXPORT_SYMBOL(kmap_atomic); |
158 | EXPORT_SYMBOL(kunmap_atomic); | 159 | EXPORT_SYMBOL(kunmap_atomic); |
160 | |||
161 | #ifdef CONFIG_NUMA | ||
162 | void __init set_highmem_pages_init(void) | ||
163 | { | ||
164 | struct zone *zone; | ||
165 | int nid; | ||
166 | |||
167 | for_each_zone(zone) { | ||
168 | unsigned long zone_start_pfn, zone_end_pfn; | ||
169 | |||
170 | if (!is_highmem(zone)) | ||
171 | continue; | ||
172 | |||
173 | zone_start_pfn = zone->zone_start_pfn; | ||
174 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | ||
175 | |||
176 | nid = zone_to_nid(zone); | ||
177 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | ||
178 | zone->name, nid, zone_start_pfn, zone_end_pfn); | ||
179 | |||
180 | add_highpages_with_active_regions(nid, zone_start_pfn, | ||
181 | zone_end_pfn); | ||
182 | } | ||
183 | totalram_pages += totalhigh_pages; | ||
184 | } | ||
185 | #else | ||
186 | void __init set_highmem_pages_init(void) | ||
187 | { | ||
188 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); | ||
189 | |||
190 | totalram_pages += totalhigh_pages; | ||
191 | } | ||
192 | #endif /* CONFIG_NUMA */ | ||
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c new file mode 100644 index 000000000000..ce6a722587d8 --- /dev/null +++ b/arch/x86/mm/init.c | |||
@@ -0,0 +1,49 @@ | |||
1 | #include <linux/swap.h> | ||
2 | #include <asm/cacheflush.h> | ||
3 | #include <asm/page.h> | ||
4 | #include <asm/sections.h> | ||
5 | #include <asm/system.h> | ||
6 | |||
7 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
8 | { | ||
9 | unsigned long addr = begin; | ||
10 | |||
11 | if (addr >= end) | ||
12 | return; | ||
13 | |||
14 | /* | ||
15 | * If debugging page accesses then do not free this memory but | ||
16 | * mark them not present - any buggy init-section access will | ||
17 | * create a kernel page fault: | ||
18 | */ | ||
19 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
20 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | ||
21 | begin, PAGE_ALIGN(end)); | ||
22 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | ||
23 | #else | ||
24 | /* | ||
25 | * We just marked the kernel text read only above, now that | ||
26 | * we are going to free part of that, we need to make that | ||
27 | * writeable first. | ||
28 | */ | ||
29 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | ||
30 | |||
31 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | ||
32 | |||
33 | for (; addr < end; addr += PAGE_SIZE) { | ||
34 | ClearPageReserved(virt_to_page(addr)); | ||
35 | init_page_count(virt_to_page(addr)); | ||
36 | memset((void *)(addr & ~(PAGE_SIZE-1)), | ||
37 | POISON_FREE_INITMEM, PAGE_SIZE); | ||
38 | free_page(addr); | ||
39 | totalram_pages++; | ||
40 | } | ||
41 | #endif | ||
42 | } | ||
43 | |||
44 | void free_initmem(void) | ||
45 | { | ||
46 | free_init_pages("unused kernel memory", | ||
47 | (unsigned long)(&__init_begin), | ||
48 | (unsigned long)(&__init_end)); | ||
49 | } | ||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ef0bb941cdf5..47df0e1bbeb9 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -50,8 +50,6 @@ | |||
50 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
51 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
52 | 52 | ||
53 | unsigned int __VMALLOC_RESERVE = 128 << 20; | ||
54 | |||
55 | unsigned long max_low_pfn_mapped; | 53 | unsigned long max_low_pfn_mapped; |
56 | unsigned long max_pfn_mapped; | 54 | unsigned long max_pfn_mapped; |
57 | 55 | ||
@@ -486,22 +484,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |||
486 | work_with_active_regions(nid, add_highpages_work_fn, &data); | 484 | work_with_active_regions(nid, add_highpages_work_fn, &data); |
487 | } | 485 | } |
488 | 486 | ||
489 | #ifndef CONFIG_NUMA | ||
490 | static void __init set_highmem_pages_init(void) | ||
491 | { | ||
492 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); | ||
493 | |||
494 | totalram_pages += totalhigh_pages; | ||
495 | } | ||
496 | #endif /* !CONFIG_NUMA */ | ||
497 | |||
498 | #else | 487 | #else |
499 | static inline void permanent_kmaps_init(pgd_t *pgd_base) | 488 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
500 | { | 489 | { |
501 | } | 490 | } |
502 | static inline void set_highmem_pages_init(void) | ||
503 | { | ||
504 | } | ||
505 | #endif /* CONFIG_HIGHMEM */ | 491 | #endif /* CONFIG_HIGHMEM */ |
506 | 492 | ||
507 | void __init native_pagetable_setup_start(pgd_t *base) | 493 | void __init native_pagetable_setup_start(pgd_t *base) |
@@ -864,10 +850,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
864 | unsigned long puds, pmds, ptes, tables, start; | 850 | unsigned long puds, pmds, ptes, tables, start; |
865 | 851 | ||
866 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 852 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
867 | tables = PAGE_ALIGN(puds * sizeof(pud_t)); | 853 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
868 | 854 | ||
869 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 855 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
870 | tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); | 856 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
871 | 857 | ||
872 | if (use_pse) { | 858 | if (use_pse) { |
873 | unsigned long extra; | 859 | unsigned long extra; |
@@ -878,10 +864,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
878 | } else | 864 | } else |
879 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 865 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
880 | 866 | ||
881 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); | 867 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
882 | 868 | ||
883 | /* for fixmap */ | 869 | /* for fixmap */ |
884 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); | 870 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
885 | 871 | ||
886 | /* | 872 | /* |
887 | * RED-PEN putting page tables only on node 0 could | 873 | * RED-PEN putting page tables only on node 0 could |
@@ -1231,45 +1217,6 @@ void mark_rodata_ro(void) | |||
1231 | } | 1217 | } |
1232 | #endif | 1218 | #endif |
1233 | 1219 | ||
1234 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
1235 | { | ||
1236 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1237 | /* | ||
1238 | * If debugging page accesses then do not free this memory but | ||
1239 | * mark them not present - any buggy init-section access will | ||
1240 | * create a kernel page fault: | ||
1241 | */ | ||
1242 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | ||
1243 | begin, PAGE_ALIGN(end)); | ||
1244 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | ||
1245 | #else | ||
1246 | unsigned long addr; | ||
1247 | |||
1248 | /* | ||
1249 | * We just marked the kernel text read only above, now that | ||
1250 | * we are going to free part of that, we need to make that | ||
1251 | * writeable first. | ||
1252 | */ | ||
1253 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | ||
1254 | |||
1255 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
1256 | ClearPageReserved(virt_to_page(addr)); | ||
1257 | init_page_count(virt_to_page(addr)); | ||
1258 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
1259 | free_page(addr); | ||
1260 | totalram_pages++; | ||
1261 | } | ||
1262 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | ||
1263 | #endif | ||
1264 | } | ||
1265 | |||
1266 | void free_initmem(void) | ||
1267 | { | ||
1268 | free_init_pages("unused kernel memory", | ||
1269 | (unsigned long)(&__init_begin), | ||
1270 | (unsigned long)(&__init_end)); | ||
1271 | } | ||
1272 | |||
1273 | #ifdef CONFIG_BLK_DEV_INITRD | 1220 | #ifdef CONFIG_BLK_DEV_INITRD |
1274 | void free_initrd_mem(unsigned long start, unsigned long end) | 1221 | void free_initrd_mem(unsigned long start, unsigned long end) |
1275 | { | 1222 | { |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7d4e76da3368..11981fc8570a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -748,6 +748,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
748 | pos = start_pfn << PAGE_SHIFT; | 748 | pos = start_pfn << PAGE_SHIFT; |
749 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | 749 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) |
750 | << (PMD_SHIFT - PAGE_SHIFT); | 750 | << (PMD_SHIFT - PAGE_SHIFT); |
751 | if (end_pfn > (end >> PAGE_SHIFT)) | ||
752 | end_pfn = end >> PAGE_SHIFT; | ||
751 | if (start_pfn < end_pfn) { | 753 | if (start_pfn < end_pfn) { |
752 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 754 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
753 | pos = end_pfn << PAGE_SHIFT; | 755 | pos = end_pfn << PAGE_SHIFT; |
@@ -979,43 +981,6 @@ void __init mem_init(void) | |||
979 | initsize >> 10); | 981 | initsize >> 10); |
980 | } | 982 | } |
981 | 983 | ||
982 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
983 | { | ||
984 | unsigned long addr = begin; | ||
985 | |||
986 | if (addr >= end) | ||
987 | return; | ||
988 | |||
989 | /* | ||
990 | * If debugging page accesses then do not free this memory but | ||
991 | * mark them not present - any buggy init-section access will | ||
992 | * create a kernel page fault: | ||
993 | */ | ||
994 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
995 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | ||
996 | begin, PAGE_ALIGN(end)); | ||
997 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | ||
998 | #else | ||
999 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | ||
1000 | |||
1001 | for (; addr < end; addr += PAGE_SIZE) { | ||
1002 | ClearPageReserved(virt_to_page(addr)); | ||
1003 | init_page_count(virt_to_page(addr)); | ||
1004 | memset((void *)(addr & ~(PAGE_SIZE-1)), | ||
1005 | POISON_FREE_INITMEM, PAGE_SIZE); | ||
1006 | free_page(addr); | ||
1007 | totalram_pages++; | ||
1008 | } | ||
1009 | #endif | ||
1010 | } | ||
1011 | |||
1012 | void free_initmem(void) | ||
1013 | { | ||
1014 | free_init_pages("unused kernel memory", | ||
1015 | (unsigned long)(&__init_begin), | ||
1016 | (unsigned long)(&__init_end)); | ||
1017 | } | ||
1018 | |||
1019 | #ifdef CONFIG_DEBUG_RODATA | 984 | #ifdef CONFIG_DEBUG_RODATA |
1020 | const int rodata_test_data = 0xC3; | 985 | const int rodata_test_data = 0xC3; |
1021 | EXPORT_SYMBOL_GPL(rodata_test_data); | 986 | EXPORT_SYMBOL_GPL(rodata_test_data); |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index ca53224fc56c..04102d42ff42 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -20,6 +20,17 @@ | |||
20 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | 22 | ||
23 | int is_io_mapping_possible(resource_size_t base, unsigned long size) | ||
24 | { | ||
25 | #ifndef CONFIG_X86_PAE | ||
26 | /* There is no way to map greater than 1 << 32 address without PAE */ | ||
27 | if (base + size > 0x100000000ULL) | ||
28 | return 0; | ||
29 | #endif | ||
30 | return 1; | ||
31 | } | ||
32 | EXPORT_SYMBOL_GPL(is_io_mapping_possible); | ||
33 | |||
23 | /* Map 'pfn' using fixed map 'type' and protections 'prot' | 34 | /* Map 'pfn' using fixed map 'type' and protections 'prot' |
24 | */ | 35 | */ |
25 | void * | 36 | void * |
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 9cab18b0b857..0bcd7883d036 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -9,44 +9,44 @@ | |||
9 | 9 | ||
10 | #include <asm/e820.h> | 10 | #include <asm/e820.h> |
11 | 11 | ||
12 | static void __init memtest(unsigned long start_phys, unsigned long size, | 12 | static u64 patterns[] __initdata = { |
13 | unsigned pattern) | 13 | 0, |
14 | 0xffffffffffffffffULL, | ||
15 | 0x5555555555555555ULL, | ||
16 | 0xaaaaaaaaaaaaaaaaULL, | ||
17 | 0x1111111111111111ULL, | ||
18 | 0x2222222222222222ULL, | ||
19 | 0x4444444444444444ULL, | ||
20 | 0x8888888888888888ULL, | ||
21 | 0x3333333333333333ULL, | ||
22 | 0x6666666666666666ULL, | ||
23 | 0x9999999999999999ULL, | ||
24 | 0xccccccccccccccccULL, | ||
25 | 0x7777777777777777ULL, | ||
26 | 0xbbbbbbbbbbbbbbbbULL, | ||
27 | 0xddddddddddddddddULL, | ||
28 | 0xeeeeeeeeeeeeeeeeULL, | ||
29 | 0x7a6c7258554e494cULL, /* yeah ;-) */ | ||
30 | }; | ||
31 | |||
32 | static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) | ||
14 | { | 33 | { |
15 | unsigned long i; | 34 | printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n", |
16 | unsigned long *start; | 35 | (unsigned long long) pattern, |
17 | unsigned long start_bad; | 36 | (unsigned long long) start_bad, |
18 | unsigned long last_bad; | 37 | (unsigned long long) end_bad); |
19 | unsigned long val; | 38 | reserve_early(start_bad, end_bad, "BAD RAM"); |
20 | unsigned long start_phys_aligned; | 39 | } |
21 | unsigned long count; | ||
22 | unsigned long incr; | ||
23 | |||
24 | switch (pattern) { | ||
25 | case 0: | ||
26 | val = 0UL; | ||
27 | break; | ||
28 | case 1: | ||
29 | val = -1UL; | ||
30 | break; | ||
31 | case 2: | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | val = 0x5555555555555555UL; | ||
34 | #else | ||
35 | val = 0x55555555UL; | ||
36 | #endif | ||
37 | break; | ||
38 | case 3: | ||
39 | #ifdef CONFIG_X86_64 | ||
40 | val = 0xaaaaaaaaaaaaaaaaUL; | ||
41 | #else | ||
42 | val = 0xaaaaaaaaUL; | ||
43 | #endif | ||
44 | break; | ||
45 | default: | ||
46 | return; | ||
47 | } | ||
48 | 40 | ||
49 | incr = sizeof(unsigned long); | 41 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) |
42 | { | ||
43 | u64 i, count; | ||
44 | u64 *start; | ||
45 | u64 start_bad, last_bad; | ||
46 | u64 start_phys_aligned; | ||
47 | size_t incr; | ||
48 | |||
49 | incr = sizeof(pattern); | ||
50 | start_phys_aligned = ALIGN(start_phys, incr); | 50 | start_phys_aligned = ALIGN(start_phys, incr); |
51 | count = (size - (start_phys_aligned - start_phys))/incr; | 51 | count = (size - (start_phys_aligned - start_phys))/incr; |
52 | start = __va(start_phys_aligned); | 52 | start = __va(start_phys_aligned); |
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size, | |||
54 | last_bad = 0; | 54 | last_bad = 0; |
55 | 55 | ||
56 | for (i = 0; i < count; i++) | 56 | for (i = 0; i < count; i++) |
57 | start[i] = val; | 57 | start[i] = pattern; |
58 | for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { | 58 | for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { |
59 | if (*start != val) { | 59 | if (*start == pattern) |
60 | if (start_phys_aligned == last_bad + incr) { | 60 | continue; |
61 | last_bad += incr; | 61 | if (start_phys_aligned == last_bad + incr) { |
62 | } else { | 62 | last_bad += incr; |
63 | if (start_bad) { | 63 | continue; |
64 | printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", | ||
65 | val, start_bad, last_bad + incr); | ||
66 | reserve_early(start_bad, last_bad + incr, "BAD RAM"); | ||
67 | } | ||
68 | start_bad = last_bad = start_phys_aligned; | ||
69 | } | ||
70 | } | 64 | } |
65 | if (start_bad) | ||
66 | reserve_bad_mem(pattern, start_bad, last_bad + incr); | ||
67 | start_bad = last_bad = start_phys_aligned; | ||
71 | } | 68 | } |
72 | if (start_bad) { | 69 | if (start_bad) |
73 | printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", | 70 | reserve_bad_mem(pattern, start_bad, last_bad + incr); |
74 | val, start_bad, last_bad + incr); | 71 | } |
75 | reserve_early(start_bad, last_bad + incr, "BAD RAM"); | 72 | |
73 | static void __init do_one_pass(u64 pattern, u64 start, u64 end) | ||
74 | { | ||
75 | u64 size = 0; | ||
76 | |||
77 | while (start < end) { | ||
78 | start = find_e820_area_size(start, &size, 1); | ||
79 | |||
80 | /* done ? */ | ||
81 | if (start >= end) | ||
82 | break; | ||
83 | if (start + size > end) | ||
84 | size = end - start; | ||
85 | |||
86 | printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", | ||
87 | (unsigned long long) start, | ||
88 | (unsigned long long) start + size, | ||
89 | (unsigned long long) cpu_to_be64(pattern)); | ||
90 | memtest(pattern, start, size); | ||
91 | |||
92 | start += size; | ||
76 | } | 93 | } |
77 | } | 94 | } |
78 | 95 | ||
@@ -90,33 +107,22 @@ early_param("memtest", parse_memtest); | |||
90 | 107 | ||
91 | void __init early_memtest(unsigned long start, unsigned long end) | 108 | void __init early_memtest(unsigned long start, unsigned long end) |
92 | { | 109 | { |
93 | u64 t_start, t_size; | 110 | unsigned int i; |
94 | unsigned pattern; | 111 | unsigned int idx = 0; |
95 | 112 | ||
96 | if (!memtest_pattern) | 113 | if (!memtest_pattern) |
97 | return; | 114 | return; |
98 | 115 | ||
99 | printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); | 116 | printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); |
100 | for (pattern = 0; pattern < memtest_pattern; pattern++) { | 117 | for (i = 0; i < memtest_pattern; i++) { |
101 | t_start = start; | 118 | idx = i % ARRAY_SIZE(patterns); |
102 | t_size = 0; | 119 | do_one_pass(patterns[idx], start, end); |
103 | while (t_start < end) { | 120 | } |
104 | t_start = find_e820_area_size(t_start, &t_size, 1); | ||
105 | |||
106 | /* done ? */ | ||
107 | if (t_start >= end) | ||
108 | break; | ||
109 | if (t_start + t_size > end) | ||
110 | t_size = end - t_start; | ||
111 | |||
112 | printk(KERN_CONT "\n %010llx - %010llx pattern %d", | ||
113 | (unsigned long long)t_start, | ||
114 | (unsigned long long)t_start + t_size, pattern); | ||
115 | |||
116 | memtest(t_start, t_size, pattern); | ||
117 | 121 | ||
118 | t_start += t_size; | 122 | if (idx > 0) { |
119 | } | 123 | printk(KERN_INFO "early_memtest: wipe out " |
124 | "test pattern from memory\n"); | ||
125 | /* additional test with pattern 0 will do this */ | ||
126 | do_one_pass(0, start, end); | ||
120 | } | 127 | } |
121 | printk(KERN_CONT "\n"); | ||
122 | } | 128 | } |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 3957cd6d6454..451fe95a0352 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -423,32 +423,6 @@ void __init initmem_init(unsigned long start_pfn, | |||
423 | setup_bootmem_allocator(); | 423 | setup_bootmem_allocator(); |
424 | } | 424 | } |
425 | 425 | ||
426 | void __init set_highmem_pages_init(void) | ||
427 | { | ||
428 | #ifdef CONFIG_HIGHMEM | ||
429 | struct zone *zone; | ||
430 | int nid; | ||
431 | |||
432 | for_each_zone(zone) { | ||
433 | unsigned long zone_start_pfn, zone_end_pfn; | ||
434 | |||
435 | if (!is_highmem(zone)) | ||
436 | continue; | ||
437 | |||
438 | zone_start_pfn = zone->zone_start_pfn; | ||
439 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | ||
440 | |||
441 | nid = zone_to_nid(zone); | ||
442 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | ||
443 | zone->name, nid, zone_start_pfn, zone_end_pfn); | ||
444 | |||
445 | add_highpages_with_active_regions(nid, zone_start_pfn, | ||
446 | zone_end_pfn); | ||
447 | } | ||
448 | totalram_pages += totalhigh_pages; | ||
449 | #endif | ||
450 | } | ||
451 | |||
452 | #ifdef CONFIG_MEMORY_HOTPLUG | 426 | #ifdef CONFIG_MEMORY_HOTPLUG |
453 | static int paddr_to_nid(u64 addr) | 427 | static int paddr_to_nid(u64 addr) |
454 | { | 428 | { |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 05f9aef6818a..2ed37158012d 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
14 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
15 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
@@ -634,6 +635,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |||
634 | } | 635 | } |
635 | 636 | ||
636 | /* | 637 | /* |
638 | * Change the memory type for the physial address range in kernel identity | ||
639 | * mapping space if that range is a part of identity map. | ||
640 | */ | ||
641 | int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) | ||
642 | { | ||
643 | unsigned long id_sz; | ||
644 | |||
645 | if (!pat_enabled || base >= __pa(high_memory)) | ||
646 | return 0; | ||
647 | |||
648 | id_sz = (__pa(high_memory) < base + size) ? | ||
649 | __pa(high_memory) - base : | ||
650 | size; | ||
651 | |||
652 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { | ||
653 | printk(KERN_INFO | ||
654 | "%s:%d ioremap_change_attr failed %s " | ||
655 | "for %Lx-%Lx\n", | ||
656 | current->comm, current->pid, | ||
657 | cattr_name(flags), | ||
658 | base, (unsigned long long)(base + size)); | ||
659 | return -EINVAL; | ||
660 | } | ||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | /* | ||
637 | * Internal interface to reserve a range of physical memory with prot. | 665 | * Internal interface to reserve a range of physical memory with prot. |
638 | * Reserved non RAM regions only and after successful reserve_memtype, | 666 | * Reserved non RAM regions only and after successful reserve_memtype, |
639 | * this func also keeps identity mapping (if any) in sync with this new prot. | 667 | * this func also keeps identity mapping (if any) in sync with this new prot. |
@@ -642,7 +670,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
642 | int strict_prot) | 670 | int strict_prot) |
643 | { | 671 | { |
644 | int is_ram = 0; | 672 | int is_ram = 0; |
645 | int id_sz, ret; | 673 | int ret; |
646 | unsigned long flags; | 674 | unsigned long flags; |
647 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); | 675 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); |
648 | 676 | ||
@@ -679,23 +707,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
679 | flags); | 707 | flags); |
680 | } | 708 | } |
681 | 709 | ||
682 | /* Need to keep identity mapping in sync */ | 710 | if (kernel_map_sync_memtype(paddr, size, flags) < 0) { |
683 | if (paddr >= __pa(high_memory)) | ||
684 | return 0; | ||
685 | |||
686 | id_sz = (__pa(high_memory) < paddr + size) ? | ||
687 | __pa(high_memory) - paddr : | ||
688 | size; | ||
689 | |||
690 | if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { | ||
691 | free_memtype(paddr, paddr + size); | 711 | free_memtype(paddr, paddr + size); |
692 | printk(KERN_ERR | ||
693 | "%s:%d reserve_pfn_range ioremap_change_attr failed %s " | ||
694 | "for %Lx-%Lx\n", | ||
695 | current->comm, current->pid, | ||
696 | cattr_name(flags), | ||
697 | (unsigned long long)paddr, | ||
698 | (unsigned long long)(paddr + size)); | ||
699 | return -EINVAL; | 712 | return -EINVAL; |
700 | } | 713 | } |
701 | return 0; | 714 | return 0; |
@@ -877,6 +890,7 @@ pgprot_t pgprot_writecombine(pgprot_t prot) | |||
877 | else | 890 | else |
878 | return pgprot_noncached(prot); | 891 | return pgprot_noncached(prot); |
879 | } | 892 | } |
893 | EXPORT_SYMBOL_GPL(pgprot_writecombine); | ||
880 | 894 | ||
881 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) | 895 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
882 | 896 | ||
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 86f2ffc43c3d..5b7c7c8464fe 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -313,6 +313,24 @@ int ptep_clear_flush_young(struct vm_area_struct *vma, | |||
313 | return young; | 313 | return young; |
314 | } | 314 | } |
315 | 315 | ||
316 | /** | ||
317 | * reserve_top_address - reserves a hole in the top of kernel address space | ||
318 | * @reserve - size of hole to reserve | ||
319 | * | ||
320 | * Can be used to relocate the fixmap area and poke a hole in the top | ||
321 | * of kernel address space to make room for a hypervisor. | ||
322 | */ | ||
323 | void __init reserve_top_address(unsigned long reserve) | ||
324 | { | ||
325 | #ifdef CONFIG_X86_32 | ||
326 | BUG_ON(fixmaps_set > 0); | ||
327 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | ||
328 | (int)-reserve); | ||
329 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | ||
330 | __VMALLOC_RESERVE += reserve; | ||
331 | #endif | ||
332 | } | ||
333 | |||
316 | int fixmaps_set; | 334 | int fixmaps_set; |
317 | 335 | ||
318 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) | 336 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 0951db9ee519..f2e477c91c1b 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <asm/tlb.h> | 20 | #include <asm/tlb.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | 22 | ||
23 | unsigned int __VMALLOC_RESERVE = 128 << 20; | ||
24 | |||
23 | /* | 25 | /* |
24 | * Associate a virtual page frame with a given physical page frame | 26 | * Associate a virtual page frame with a given physical page frame |
25 | * and protection flags for that frame. | 27 | * and protection flags for that frame. |
@@ -97,22 +99,6 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
97 | unsigned long __FIXADDR_TOP = 0xfffff000; | 99 | unsigned long __FIXADDR_TOP = 0xfffff000; |
98 | EXPORT_SYMBOL(__FIXADDR_TOP); | 100 | EXPORT_SYMBOL(__FIXADDR_TOP); |
99 | 101 | ||
100 | /** | ||
101 | * reserve_top_address - reserves a hole in the top of kernel address space | ||
102 | * @reserve - size of hole to reserve | ||
103 | * | ||
104 | * Can be used to relocate the fixmap area and poke a hole in the top | ||
105 | * of kernel address space to make room for a hypervisor. | ||
106 | */ | ||
107 | void __init reserve_top_address(unsigned long reserve) | ||
108 | { | ||
109 | BUG_ON(fixmaps_set > 0); | ||
110 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | ||
111 | (int)-reserve); | ||
112 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | ||
113 | __VMALLOC_RESERVE += reserve; | ||
114 | } | ||
115 | |||
116 | /* | 102 | /* |
117 | * vmalloc=size forces the vmalloc area to be exactly 'size' | 103 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
118 | * bytes. This can be used to increase (or decrease) the | 104 | * bytes. This can be used to increase (or decrease) the |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index e9f80c744cf3..10131fbdaada 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -78,8 +78,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
78 | if (cpu_has_arch_perfmon) { | 78 | if (cpu_has_arch_perfmon) { |
79 | union cpuid10_eax eax; | 79 | union cpuid10_eax eax; |
80 | eax.full = cpuid_eax(0xa); | 80 | eax.full = cpuid_eax(0xa); |
81 | if (counter_width < eax.split.bit_width) | 81 | |
82 | counter_width = eax.split.bit_width; | 82 | /* |
83 | * For Core2 (family 6, model 15), don't reset the | ||
84 | * counter width: | ||
85 | */ | ||
86 | if (!(eax.split.version_id == 0 && | ||
87 | current_cpu_data.x86 == 6 && | ||
88 | current_cpu_data.x86_model == 15)) { | ||
89 | |||
90 | if (counter_width < eax.split.bit_width) | ||
91 | counter_width = eax.split.bit_width; | ||
92 | } | ||
83 | } | 93 | } |
84 | 94 | ||
85 | /* clear all counters */ | 95 | /* clear all counters */ |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 352ea6830659..82cd39a6cbd3 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -942,6 +942,9 @@ asmlinkage void __init xen_start_kernel(void) | |||
942 | possible map and a non-dummy shared_info. */ | 942 | possible map and a non-dummy shared_info. */ |
943 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 943 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
944 | 944 | ||
945 | local_irq_disable(); | ||
946 | early_boot_irqs_off(); | ||
947 | |||
945 | xen_raw_console_write("mapping kernel into physical memory\n"); | 948 | xen_raw_console_write("mapping kernel into physical memory\n"); |
946 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); | 949 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |
947 | 950 | ||