aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-10 02:29:57 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-10 02:29:57 -0500
commit0b6ca82af83a79f3d1001c8a0701ed34ac38126e (patch)
treedef8eb112c513b21e826e370f2f34249e97914eb /arch/x86/kernel
parentbfc1de0c40a26c6daa46c297e28138aecb4c5664 (diff)
parentfac84939609a683503947f41eb93e1917d026263 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (32 commits) x86: cpa, strict range check in try_preserve_large_page() x86: cpa, enable CONFIG_DEBUG_PAGEALLOC on 64-bit x86: cpa, use page pool x86: introduce page pool in cpa x86: DEBUG_PAGEALLOC: enable after mem_init() brk: help text typo fix lguest: accept guest _PAGE_PWT page table entries x86 PM: update stale comments x86 PM: consolidate suspend and hibernation code x86 PM: rename 32-bit files in arch/x86/power x86 PM: move 64-bit hibernation files to arch/x86/power x86: trivial printk optimizations x86: fix early_ioremap pagetable ops x86: construct 32-bit boot time page tables in native format. x86, core: remove CONFIG_FORCED_INLINING x86: avoid unused variable warning in mm/init_64.c x86: fixup more paravirt fallout brk: document randomize_va_space and CONFIG_COMPAT_BRK (was Re: x86: fix sparse warnings in acpi/bus.c x86: fix sparse warning in topology.c ...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/entry_32.S15
-rw-r--r--arch/x86/kernel/entry_64.S18
-rw-r--r--arch/x86/kernel/geode_32.c5
-rw-r--r--arch/x86/kernel/head_32.S151
-rw-r--r--arch/x86/kernel/mfgpt_32.c123
-rw-r--r--arch/x86/kernel/setup_32.c4
-rw-r--r--arch/x86/kernel/suspend_64.c320
-rw-r--r--arch/x86/kernel/suspend_asm_64.S141
-rw-r--r--arch/x86/kernel/topology.c2
11 files changed, 207 insertions, 576 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 21dc1a061bf1..76ec0f8f138a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -84,8 +84,6 @@ ifeq ($(CONFIG_X86_64),y)
84 obj-y += genapic_64.o genapic_flat_64.o 84 obj-y += genapic_64.o genapic_flat_64.o
85 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 85 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
86 obj-$(CONFIG_AUDIT) += audit_64.o 86 obj-$(CONFIG_AUDIT) += audit_64.o
87 obj-$(CONFIG_PM) += suspend_64.o
88 obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o
89 87
90 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 88 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
91 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 89 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 24885be5c48c..9b7e01daa1ca 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -118,7 +118,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
118 118
119static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 119static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
120{ 120{
121 return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 121 sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
122} 122}
123 123
124/* Mutex protecting device creation against CPU hotplug */ 124/* Mutex protecting device creation against CPU hotplug */
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index be5c31d04884..824e21b80aad 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -409,7 +409,8 @@ restore_nocheck_notrace:
409 RESTORE_REGS 409 RESTORE_REGS
410 addl $4, %esp # skip orig_eax/error_code 410 addl $4, %esp # skip orig_eax/error_code
411 CFI_ADJUST_CFA_OFFSET -4 411 CFI_ADJUST_CFA_OFFSET -4
4121: INTERRUPT_RETURN 412ENTRY(irq_return)
413 INTERRUPT_RETURN
413.section .fixup,"ax" 414.section .fixup,"ax"
414iret_exc: 415iret_exc:
415 pushl $0 # no error code 416 pushl $0 # no error code
@@ -418,7 +419,7 @@ iret_exc:
418.previous 419.previous
419.section __ex_table,"a" 420.section __ex_table,"a"
420 .align 4 421 .align 4
421 .long 1b,iret_exc 422 .long irq_return,iret_exc
422.previous 423.previous
423 424
424 CFI_RESTORE_STATE 425 CFI_RESTORE_STATE
@@ -865,20 +866,16 @@ nmi_espfix_stack:
865 RESTORE_REGS 866 RESTORE_REGS
866 lss 12+4(%esp), %esp # back to espfix stack 867 lss 12+4(%esp), %esp # back to espfix stack
867 CFI_ADJUST_CFA_OFFSET -24 868 CFI_ADJUST_CFA_OFFSET -24
8681: INTERRUPT_RETURN 869 jmp irq_return
869 CFI_ENDPROC 870 CFI_ENDPROC
870.section __ex_table,"a"
871 .align 4
872 .long 1b,iret_exc
873.previous
874KPROBE_END(nmi) 871KPROBE_END(nmi)
875 872
876#ifdef CONFIG_PARAVIRT 873#ifdef CONFIG_PARAVIRT
877ENTRY(native_iret) 874ENTRY(native_iret)
8781: iret 875 iret
879.section __ex_table,"a" 876.section __ex_table,"a"
880 .align 4 877 .align 4
881 .long 1b,iret_exc 878 .long native_iret, iret_exc
882.previous 879.previous
883END(native_iret) 880END(native_iret)
884 881
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index c7341e81941c..6be39a387c5a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -581,16 +581,24 @@ retint_restore_args: /* return to kernel space */
581 */ 581 */
582 TRACE_IRQS_IRETQ 582 TRACE_IRQS_IRETQ
583restore_args: 583restore_args:
584 RESTORE_ARGS 0,8,0 584 RESTORE_ARGS 0,8,0
585#ifdef CONFIG_PARAVIRT 585
586ENTRY(irq_return)
586 INTERRUPT_RETURN 587 INTERRUPT_RETURN
587#endif 588
589 .section __ex_table, "a"
590 .quad irq_return, bad_iret
591 .previous
592
593#ifdef CONFIG_PARAVIRT
588ENTRY(native_iret) 594ENTRY(native_iret)
589 iretq 595 iretq
590 596
591 .section __ex_table,"a" 597 .section __ex_table,"a"
592 .quad native_iret, bad_iret 598 .quad native_iret, bad_iret
593 .previous 599 .previous
600#endif
601
594 .section .fixup,"ax" 602 .section .fixup,"ax"
595bad_iret: 603bad_iret:
596 /* 604 /*
@@ -804,7 +812,7 @@ paranoid_swapgs\trace:
804 SWAPGS_UNSAFE_STACK 812 SWAPGS_UNSAFE_STACK
805paranoid_restore\trace: 813paranoid_restore\trace:
806 RESTORE_ALL 8 814 RESTORE_ALL 8
807 INTERRUPT_RETURN 815 jmp irq_return
808paranoid_userspace\trace: 816paranoid_userspace\trace:
809 GET_THREAD_INFO(%rcx) 817 GET_THREAD_INFO(%rcx)
810 movl threadinfo_flags(%rcx),%ebx 818 movl threadinfo_flags(%rcx),%ebx
@@ -919,7 +927,7 @@ error_kernelspace:
919 iret run with kernel gs again, so don't set the user space flag. 927 iret run with kernel gs again, so don't set the user space flag.
920 B stepping K8s sometimes report an truncated RIP for IRET 928 B stepping K8s sometimes report an truncated RIP for IRET
921 exceptions returning to compat mode. Check for these here too. */ 929 exceptions returning to compat mode. Check for these here too. */
922 leaq native_iret(%rip),%rbp 930 leaq irq_return(%rip),%rbp
923 cmpq %rbp,RIP(%rsp) 931 cmpq %rbp,RIP(%rsp)
924 je error_swapgs 932 je error_swapgs
925 movl %ebp,%ebp /* zero extend */ 933 movl %ebp,%ebp /* zero extend */
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
index 9c7f7d395968..9dad6ca6cd70 100644
--- a/arch/x86/kernel/geode_32.c
+++ b/arch/x86/kernel/geode_32.c
@@ -163,14 +163,11 @@ EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
163 163
164static int __init geode_southbridge_init(void) 164static int __init geode_southbridge_init(void)
165{ 165{
166 int timers;
167
168 if (!is_geode()) 166 if (!is_geode())
169 return -ENODEV; 167 return -ENODEV;
170 168
171 init_lbars(); 169 init_lbars();
172 timers = geode_mfgpt_detect(); 170 (void) mfgpt_timer_setup();
173 printk(KERN_INFO "geode: %d MFGPT timers available.\n", timers);
174 return 0; 171 return 0;
175} 172}
176 173
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5d8c5730686b..74ef4a41f224 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,6 +19,10 @@
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/processor-flags.h>
23
24/* Physical address */
25#define pa(X) ((X) - __PAGE_OFFSET)
22 26
23/* 27/*
24 * References to members of the new_cpu_data structure. 28 * References to members of the new_cpu_data structure.
@@ -80,10 +84,6 @@ INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_
80 */ 84 */
81.section .text.head,"ax",@progbits 85.section .text.head,"ax",@progbits
82ENTRY(startup_32) 86ENTRY(startup_32)
83 /* check to see if KEEP_SEGMENTS flag is meaningful */
84 cmpw $0x207, BP_version(%esi)
85 jb 1f
86
87 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 87 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
88 us to not reload segments */ 88 us to not reload segments */
89 testb $(1<<6), BP_loadflags(%esi) 89 testb $(1<<6), BP_loadflags(%esi)
@@ -92,7 +92,7 @@ ENTRY(startup_32)
92/* 92/*
93 * Set segments to known values. 93 * Set segments to known values.
94 */ 94 */
951: lgdt boot_gdt_descr - __PAGE_OFFSET 95 lgdt pa(boot_gdt_descr)
96 movl $(__BOOT_DS),%eax 96 movl $(__BOOT_DS),%eax
97 movl %eax,%ds 97 movl %eax,%ds
98 movl %eax,%es 98 movl %eax,%es
@@ -105,8 +105,8 @@ ENTRY(startup_32)
105 */ 105 */
106 cld 106 cld
107 xorl %eax,%eax 107 xorl %eax,%eax
108 movl $__bss_start - __PAGE_OFFSET,%edi 108 movl $pa(__bss_start),%edi
109 movl $__bss_stop - __PAGE_OFFSET,%ecx 109 movl $pa(__bss_stop),%ecx
110 subl %edi,%ecx 110 subl %edi,%ecx
111 shrl $2,%ecx 111 shrl $2,%ecx
112 rep ; stosl 112 rep ; stosl
@@ -118,31 +118,32 @@ ENTRY(startup_32)
118 * (kexec on panic case). Hence copy out the parameters before initializing 118 * (kexec on panic case). Hence copy out the parameters before initializing
119 * page tables. 119 * page tables.
120 */ 120 */
121 movl $(boot_params - __PAGE_OFFSET),%edi 121 movl $pa(boot_params),%edi
122 movl $(PARAM_SIZE/4),%ecx 122 movl $(PARAM_SIZE/4),%ecx
123 cld 123 cld
124 rep 124 rep
125 movsl 125 movsl
126 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 126 movl pa(boot_params) + NEW_CL_POINTER,%esi
127 andl %esi,%esi 127 andl %esi,%esi
128 jz 1f # No comand line 128 jz 1f # No comand line
129 movl $(boot_command_line - __PAGE_OFFSET),%edi 129 movl $pa(boot_command_line),%edi
130 movl $(COMMAND_LINE_SIZE/4),%ecx 130 movl $(COMMAND_LINE_SIZE/4),%ecx
131 rep 131 rep
132 movsl 132 movsl
1331: 1331:
134 134
135#ifdef CONFIG_PARAVIRT 135#ifdef CONFIG_PARAVIRT
136 cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET) 136 /* This is can only trip for a broken bootloader... */
137 cmpw $0x207, pa(boot_params + BP_version)
137 jb default_entry 138 jb default_entry
138 139
139 /* Paravirt-compatible boot parameters. Look to see what architecture 140 /* Paravirt-compatible boot parameters. Look to see what architecture
140 we're booting under. */ 141 we're booting under. */
141 movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax 142 movl pa(boot_params + BP_hardware_subarch), %eax
142 cmpl $num_subarch_entries, %eax 143 cmpl $num_subarch_entries, %eax
143 jae bad_subarch 144 jae bad_subarch
144 145
145 movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax 146 movl pa(subarch_entries)(,%eax,4), %eax
146 subl $__PAGE_OFFSET, %eax 147 subl $__PAGE_OFFSET, %eax
147 jmp *%eax 148 jmp *%eax
148 149
@@ -170,17 +171,68 @@ num_subarch_entries = (. - subarch_entries) / 4
170 * Mappings are created both at virtual address 0 (identity mapping) 171 * Mappings are created both at virtual address 0 (identity mapping)
171 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 172 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END.
172 * 173 *
173 * Warning: don't use %esi or the stack in this code. However, %esp 174 * Note that the stack is not yet set up!
174 * can be used as a GPR if you really need it...
175 */ 175 */
176page_pde_offset = (__PAGE_OFFSET >> 20); 176#define PTE_ATTR 0x007 /* PRESENT+RW+USER */
177#define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
178#define PGD_ATTR 0x001 /* PRESENT (no other attributes) */
177 179
178default_entry: 180default_entry:
179 movl $(pg0 - __PAGE_OFFSET), %edi 181#ifdef CONFIG_X86_PAE
180 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 182
181 movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 183 /*
184 * In PAE mode swapper_pg_dir is statically defined to contain enough
185 * entries to cover the VMSPLIT option (that is the top 1, 2 or 3
186 * entries). The identity mapping is handled by pointing two PGD
187 * entries to the first kernel PMD.
188 *
189 * Note the upper half of each PMD or PTE are always zero at
190 * this stage.
191 */
192
193#define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */
194
195 xorl %ebx,%ebx /* %ebx is kept at zero */
196
197 movl $pa(pg0), %edi
198 movl $pa(swapper_pg_pmd), %edx
199 movl $PTE_ATTR, %eax
20010:
201 leal PDE_ATTR(%edi),%ecx /* Create PMD entry */
202 movl %ecx,(%edx) /* Store PMD entry */
203 /* Upper half already zero */
204 addl $8,%edx
205 movl $512,%ecx
20611:
207 stosl
208 xchgl %eax,%ebx
209 stosl
210 xchgl %eax,%ebx
211 addl $0x1000,%eax
212 loop 11b
213
214 /*
215 * End condition: we must map up to and including INIT_MAP_BEYOND_END
216 * bytes beyond the end of our own page tables.
217 */
218 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
219 cmpl %ebp,%eax
220 jb 10b
2211:
222 movl %edi,pa(init_pg_tables_end)
223
224 /* Do early initialization of the fixmap area */
225 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
226 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
227#else /* Not PAE */
228
229page_pde_offset = (__PAGE_OFFSET >> 20);
230
231 movl $pa(pg0), %edi
232 movl $pa(swapper_pg_dir), %edx
233 movl $PTE_ATTR, %eax
18210: 23410:
183 leal 0x007(%edi),%ecx /* Create PDE entry */ 235 leal PDE_ATTR(%edi),%ecx /* Create PDE entry */
184 movl %ecx,(%edx) /* Store identity PDE entry */ 236 movl %ecx,(%edx) /* Store identity PDE entry */
185 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 237 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
186 addl $4,%edx 238 addl $4,%edx
@@ -189,19 +241,20 @@ default_entry:
189 stosl 241 stosl
190 addl $0x1000,%eax 242 addl $0x1000,%eax
191 loop 11b 243 loop 11b
192 /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ 244 /*
193 /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ 245 * End condition: we must map up to and including INIT_MAP_BEYOND_END
194 leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp 246 * bytes beyond the end of our own page tables; the +0x007 is
247 * the attribute bits
248 */
249 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
195 cmpl %ebp,%eax 250 cmpl %ebp,%eax
196 jb 10b 251 jb 10b
197 movl %edi,(init_pg_tables_end - __PAGE_OFFSET) 252 movl %edi,pa(init_pg_tables_end)
198
199 /* Do an early initialization of the fixmap area */
200 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
201 movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
202 addl $0x67, %eax /* 0x67 == _PAGE_TABLE */
203 movl %eax, 4092(%edx)
204 253
254 /* Do early initialization of the fixmap area */
255 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
256 movl %eax,pa(swapper_pg_dir+0xffc)
257#endif
205 jmp 3f 258 jmp 3f
206/* 259/*
207 * Non-boot CPU entry point; entered from trampoline.S 260 * Non-boot CPU entry point; entered from trampoline.S
@@ -241,7 +294,7 @@ ENTRY(startup_32_smp)
241 * NOTE! We have to correct for the fact that we're 294 * NOTE! We have to correct for the fact that we're
242 * not yet offset PAGE_OFFSET.. 295 * not yet offset PAGE_OFFSET..
243 */ 296 */
244#define cr4_bits mmu_cr4_features-__PAGE_OFFSET 297#define cr4_bits pa(mmu_cr4_features)
245 movl cr4_bits,%edx 298 movl cr4_bits,%edx
246 andl %edx,%edx 299 andl %edx,%edx
247 jz 6f 300 jz 6f
@@ -276,10 +329,10 @@ ENTRY(startup_32_smp)
276/* 329/*
277 * Enable paging 330 * Enable paging
278 */ 331 */
279 movl $swapper_pg_dir-__PAGE_OFFSET,%eax 332 movl $pa(swapper_pg_dir),%eax
280 movl %eax,%cr3 /* set the page table pointer.. */ 333 movl %eax,%cr3 /* set the page table pointer.. */
281 movl %cr0,%eax 334 movl %cr0,%eax
282 orl $0x80000000,%eax 335 orl $X86_CR0_PG,%eax
283 movl %eax,%cr0 /* ..and set paging (PG) bit */ 336 movl %eax,%cr0 /* ..and set paging (PG) bit */
284 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 337 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
2851: 3381:
@@ -552,16 +605,44 @@ ENTRY(_stext)
552 */ 605 */
553.section ".bss.page_aligned","wa" 606.section ".bss.page_aligned","wa"
554 .align PAGE_SIZE_asm 607 .align PAGE_SIZE_asm
608#ifdef CONFIG_X86_PAE
609ENTRY(swapper_pg_pmd)
610 .fill 1024*KPMDS,4,0
611#else
555ENTRY(swapper_pg_dir) 612ENTRY(swapper_pg_dir)
556 .fill 1024,4,0 613 .fill 1024,4,0
557ENTRY(swapper_pg_pmd) 614#endif
615ENTRY(swapper_pg_fixmap)
558 .fill 1024,4,0 616 .fill 1024,4,0
559ENTRY(empty_zero_page) 617ENTRY(empty_zero_page)
560 .fill 4096,1,0 618 .fill 4096,1,0
561
562/* 619/*
563 * This starts the data section. 620 * This starts the data section.
564 */ 621 */
622#ifdef CONFIG_X86_PAE
623.section ".data.page_aligned","wa"
624 /* Page-aligned for the benefit of paravirt? */
625 .align PAGE_SIZE_asm
626ENTRY(swapper_pg_dir)
627 .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */
628# if KPMDS == 3
629 .long pa(swapper_pg_pmd+PGD_ATTR),0
630 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
631 .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0
632# elif KPMDS == 2
633 .long 0,0
634 .long pa(swapper_pg_pmd+PGD_ATTR),0
635 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
636# elif KPMDS == 1
637 .long 0,0
638 .long 0,0
639 .long pa(swapper_pg_pmd+PGD_ATTR),0
640# else
641# error "Kernel PMDs should be 1, 2 or 3"
642# endif
643 .align PAGE_SIZE_asm /* needs to be page-sized too */
644#endif
645
565.data 646.data
566ENTRY(stack_start) 647ENTRY(stack_start)
567 .long init_thread_union+THREAD_SIZE 648 .long init_thread_union+THREAD_SIZE
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 219f86eb6123..027fc067b399 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -12,48 +12,37 @@
12 */ 12 */
13 13
14/* 14/*
15 * We are using the 32Khz input clock - its the only one that has the 15 * We are using the 32.768kHz input clock - it's the only one that has the
16 * ranges we find desirable. The following table lists the suitable 16 * ranges we find desirable. The following table lists the suitable
17 * divisors and the associated hz, minimum interval 17 * divisors and the associated Hz, minimum interval and the maximum interval:
18 * and the maximum interval:
19 * 18 *
20 * Divisor Hz Min Delta (S) Max Delta (S) 19 * Divisor Hz Min Delta (s) Max Delta (s)
21 * 1 32000 .0005 2.048 20 * 1 32768 .00048828125 2.000
22 * 2 16000 .001 4.096 21 * 2 16384 .0009765625 4.000
23 * 4 8000 .002 8.192 22 * 4 8192 .001953125 8.000
24 * 8 4000 .004 16.384 23 * 8 4096 .00390625 16.000
25 * 16 2000 .008 32.768 24 * 16 2048 .0078125 32.000
26 * 32 1000 .016 65.536 25 * 32 1024 .015625 64.000
27 * 64 500 .032 131.072 26 * 64 512 .03125 128.000
28 * 128 250 .064 262.144 27 * 128 256 .0625 256.000
29 * 256 125 .128 524.288 28 * 256 128 .125 512.000
30 */ 29 */
31 30
32#include <linux/kernel.h> 31#include <linux/kernel.h>
33#include <linux/interrupt.h> 32#include <linux/interrupt.h>
34#include <linux/module.h>
35#include <asm/geode.h> 33#include <asm/geode.h>
36 34
37#define F_AVAIL 0x01
38
39static struct mfgpt_timer_t { 35static struct mfgpt_timer_t {
40 int flags; 36 unsigned int avail:1;
41 struct module *owner;
42} mfgpt_timers[MFGPT_MAX_TIMERS]; 37} mfgpt_timers[MFGPT_MAX_TIMERS];
43 38
44/* Selected from the table above */ 39/* Selected from the table above */
45 40
46#define MFGPT_DIVISOR 16 41#define MFGPT_DIVISOR 16
47#define MFGPT_SCALE 4 /* divisor = 2^(scale) */ 42#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
48#define MFGPT_HZ (32000 / MFGPT_DIVISOR) 43#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
49#define MFGPT_PERIODIC (MFGPT_HZ / HZ) 44#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
50 45
51#ifdef CONFIG_GEODE_MFGPT_TIMER
52static int __init mfgpt_timer_setup(void);
53#else
54#define mfgpt_timer_setup() (0)
55#endif
56
57/* Allow for disabling of MFGPTs */ 46/* Allow for disabling of MFGPTs */
58static int disable; 47static int disable;
59static int __init mfgpt_disable(char *s) 48static int __init mfgpt_disable(char *s)
@@ -85,28 +74,37 @@ __setup("mfgptfix", mfgpt_fix);
85 * In other cases (such as with VSAless OpenFirmware), the system firmware 74 * In other cases (such as with VSAless OpenFirmware), the system firmware
86 * leaves timers available for us to use. 75 * leaves timers available for us to use.
87 */ 76 */
88int __init geode_mfgpt_detect(void) 77
78
79static int timers = -1;
80
81static void geode_mfgpt_detect(void)
89{ 82{
90 int count = 0, i; 83 int i;
91 u16 val; 84 u16 val;
92 85
86 timers = 0;
87
93 if (disable) { 88 if (disable) {
94 printk(KERN_INFO "geode-mfgpt: Skipping MFGPT setup\n"); 89 printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n");
95 return 0; 90 goto done;
91 }
92
93 if (!geode_get_dev_base(GEODE_DEV_MFGPT)) {
94 printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n");
95 goto done;
96 } 96 }
97 97
98 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 98 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
99 val = geode_mfgpt_read(i, MFGPT_REG_SETUP); 99 val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
100 if (!(val & MFGPT_SETUP_SETUP)) { 100 if (!(val & MFGPT_SETUP_SETUP)) {
101 mfgpt_timers[i].flags = F_AVAIL; 101 mfgpt_timers[i].avail = 1;
102 count++; 102 timers++;
103 } 103 }
104 } 104 }
105 105
106 /* set up clock event device, if desired */ 106done:
107 i = mfgpt_timer_setup(); 107 printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers);
108
109 return count;
110} 108}
111 109
112int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) 110int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
@@ -183,36 +181,41 @@ int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable)
183 return 0; 181 return 0;
184} 182}
185 183
186static int mfgpt_get(int timer, struct module *owner) 184static int mfgpt_get(int timer)
187{ 185{
188 mfgpt_timers[timer].flags &= ~F_AVAIL; 186 mfgpt_timers[timer].avail = 0;
189 mfgpt_timers[timer].owner = owner;
190 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer); 187 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
191 return timer; 188 return timer;
192} 189}
193 190
194int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner) 191int geode_mfgpt_alloc_timer(int timer, int domain)
195{ 192{
196 int i; 193 int i;
197 194
198 if (!geode_get_dev_base(GEODE_DEV_MFGPT)) 195 if (timers == -1) {
199 return -ENODEV; 196 /* timers haven't been detected yet */
197 geode_mfgpt_detect();
198 }
199
200 if (!timers)
201 return -1;
202
200 if (timer >= MFGPT_MAX_TIMERS) 203 if (timer >= MFGPT_MAX_TIMERS)
201 return -EIO; 204 return -1;
202 205
203 if (timer < 0) { 206 if (timer < 0) {
204 /* Try to find an available timer */ 207 /* Try to find an available timer */
205 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 208 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
206 if (mfgpt_timers[i].flags & F_AVAIL) 209 if (mfgpt_timers[i].avail)
207 return mfgpt_get(i, owner); 210 return mfgpt_get(i);
208 211
209 if (i == 5 && domain == MFGPT_DOMAIN_WORKING) 212 if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
210 break; 213 break;
211 } 214 }
212 } else { 215 } else {
213 /* If they requested a specific timer, try to honor that */ 216 /* If they requested a specific timer, try to honor that */
214 if (mfgpt_timers[timer].flags & F_AVAIL) 217 if (mfgpt_timers[timer].avail)
215 return mfgpt_get(timer, owner); 218 return mfgpt_get(timer);
216 } 219 }
217 220
218 /* No timers available - too bad */ 221 /* No timers available - too bad */
@@ -244,10 +247,11 @@ static int __init mfgpt_setup(char *str)
244} 247}
245__setup("mfgpt_irq=", mfgpt_setup); 248__setup("mfgpt_irq=", mfgpt_setup);
246 249
247static inline void mfgpt_disable_timer(u16 clock) 250static void mfgpt_disable_timer(u16 clock)
248{ 251{
249 u16 val = geode_mfgpt_read(clock, MFGPT_REG_SETUP); 252 /* avoid races by clearing CMP1 and CMP2 unconditionally */
250 geode_mfgpt_write(clock, MFGPT_REG_SETUP, val & ~MFGPT_SETUP_CNTEN); 253 geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN |
254 MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2);
251} 255}
252 256
253static int mfgpt_next_event(unsigned long, struct clock_event_device *); 257static int mfgpt_next_event(unsigned long, struct clock_event_device *);
@@ -263,7 +267,7 @@ static struct clock_event_device mfgpt_clockevent = {
263 .shift = 32 267 .shift = 32
264}; 268};
265 269
266static inline void mfgpt_start_timer(u16 clock, u16 delta) 270static void mfgpt_start_timer(u16 delta)
267{ 271{
268 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta); 272 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
269 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); 273 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
@@ -278,21 +282,25 @@ static void mfgpt_set_mode(enum clock_event_mode mode,
278 mfgpt_disable_timer(mfgpt_event_clock); 282 mfgpt_disable_timer(mfgpt_event_clock);
279 283
280 if (mode == CLOCK_EVT_MODE_PERIODIC) 284 if (mode == CLOCK_EVT_MODE_PERIODIC)
281 mfgpt_start_timer(mfgpt_event_clock, MFGPT_PERIODIC); 285 mfgpt_start_timer(MFGPT_PERIODIC);
282 286
283 mfgpt_tick_mode = mode; 287 mfgpt_tick_mode = mode;
284} 288}
285 289
286static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) 290static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
287{ 291{
288 mfgpt_start_timer(mfgpt_event_clock, delta); 292 mfgpt_start_timer(delta);
289 return 0; 293 return 0;
290} 294}
291 295
292/* Assume (foolishly?), that this interrupt was due to our tick */
293
294static irqreturn_t mfgpt_tick(int irq, void *dev_id) 296static irqreturn_t mfgpt_tick(int irq, void *dev_id)
295{ 297{
298 u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP);
299
300 /* See if the interrupt was for us */
301 if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
302 return IRQ_NONE;
303
296 /* Turn off the clock (and clear the event) */ 304 /* Turn off the clock (and clear the event) */
297 mfgpt_disable_timer(mfgpt_event_clock); 305 mfgpt_disable_timer(mfgpt_event_clock);
298 306
@@ -320,13 +328,12 @@ static struct irqaction mfgptirq = {
320 .name = "mfgpt-timer" 328 .name = "mfgpt-timer"
321}; 329};
322 330
323static int __init mfgpt_timer_setup(void) 331int __init mfgpt_timer_setup(void)
324{ 332{
325 int timer, ret; 333 int timer, ret;
326 u16 val; 334 u16 val;
327 335
328 timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING, 336 timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
329 THIS_MODULE);
330 if (timer < 0) { 337 if (timer < 0) {
331 printk(KERN_ERR 338 printk(KERN_ERR
332 "mfgpt-timer: Could not allocate a MFPGT timer\n"); 339 "mfgpt-timer: Could not allocate a MFPGT timer\n");
@@ -363,7 +370,7 @@ static int __init mfgpt_timer_setup(void)
363 &mfgpt_clockevent); 370 &mfgpt_clockevent);
364 371
365 printk(KERN_INFO 372 printk(KERN_INFO
366 "mfgpt-timer: registering the MFGT timer as a clock event.\n"); 373 "mfgpt-timer: registering the MFGPT timer as a clock event.\n");
367 clockevents_register_device(&mfgpt_clockevent); 374 clockevents_register_device(&mfgpt_clockevent);
368 375
369 return 0; 376 return 0;
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index d1d8c347cc0b..691ab4cb167b 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -154,7 +154,11 @@ struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
154struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 154struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
155EXPORT_SYMBOL(boot_cpu_data); 155EXPORT_SYMBOL(boot_cpu_data);
156 156
157#ifndef CONFIG_X86_PAE
157unsigned long mmu_cr4_features; 158unsigned long mmu_cr4_features;
159#else
160unsigned long mmu_cr4_features = X86_CR4_PAE;
161#endif
158 162
159/* for MCA, but anyone else can use it if they want */ 163/* for MCA, but anyone else can use it if they want */
160unsigned int machine_id; 164unsigned int machine_id;
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
deleted file mode 100644
index 7ac7130022f1..000000000000
--- a/arch/x86/kernel/suspend_64.c
+++ /dev/null
@@ -1,320 +0,0 @@
1/*
2 * Suspend support specific for i386.
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
10#include <linux/smp.h>
11#include <linux/suspend.h>
12#include <asm/proto.h>
13#include <asm/page.h>
14#include <asm/pgtable.h>
15#include <asm/mtrr.h>
16
17/* References to section boundaries */
18extern const void __nosave_begin, __nosave_end;
19
20static void fix_processor_context(void);
21
22struct saved_context saved_context;
23
24/**
25 * __save_processor_state - save CPU registers before creating a
26 * hibernation image and before restoring the memory state from it
27 * @ctxt - structure to store the registers contents in
28 *
29 * NOTE: If there is a CPU register the modification of which by the
30 * boot kernel (ie. the kernel used for loading the hibernation image)
31 * might affect the operations of the restored target kernel (ie. the one
32 * saved in the hibernation image), then its contents must be saved by this
33 * function. In other words, if kernel A is hibernated and different
34 * kernel B is used for loading the hibernation image into memory, the
35 * kernel A's __save_processor_state() function must save all registers
36 * needed by kernel A, so that it can operate correctly after the resume
37 * regardless of what kernel B does in the meantime.
38 */
39static void __save_processor_state(struct saved_context *ctxt)
40{
41 kernel_fpu_begin();
42
43 /*
44 * descriptor tables
45 */
46 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
47 store_idt((struct desc_ptr *)&ctxt->idt_limit);
48 store_tr(ctxt->tr);
49
50 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
51 /*
52 * segment registers
53 */
54 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
55 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
56 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
57 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
58 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
59
60 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
61 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
62 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
63 mtrr_save_fixed_ranges(NULL);
64
65 /*
66 * control registers
67 */
68 rdmsrl(MSR_EFER, ctxt->efer);
69 ctxt->cr0 = read_cr0();
70 ctxt->cr2 = read_cr2();
71 ctxt->cr3 = read_cr3();
72 ctxt->cr4 = read_cr4();
73 ctxt->cr8 = read_cr8();
74}
75
76void save_processor_state(void)
77{
78 __save_processor_state(&saved_context);
79}
80
81static void do_fpu_end(void)
82{
83 /*
84 * Restore FPU regs if necessary
85 */
86 kernel_fpu_end();
87}
88
89/**
90 * __restore_processor_state - restore the contents of CPU registers saved
91 * by __save_processor_state()
92 * @ctxt - structure to load the registers contents from
93 */
94static void __restore_processor_state(struct saved_context *ctxt)
95{
96 /*
97 * control registers
98 */
99 wrmsrl(MSR_EFER, ctxt->efer);
100 write_cr8(ctxt->cr8);
101 write_cr4(ctxt->cr4);
102 write_cr3(ctxt->cr3);
103 write_cr2(ctxt->cr2);
104 write_cr0(ctxt->cr0);
105
106 /*
107 * now restore the descriptor tables to their proper values
108 * ltr is done i fix_processor_context().
109 */
110 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
111 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
112
113
114 /*
115 * segment registers
116 */
117 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
118 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
119 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
120 load_gs_index(ctxt->gs);
121 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
122
123 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
124 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
125 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
126
127 fix_processor_context();
128
129 do_fpu_end();
130 mtrr_ap_init();
131}
132
133void restore_processor_state(void)
134{
135 __restore_processor_state(&saved_context);
136}
137
138static void fix_processor_context(void)
139{
140 int cpu = smp_processor_id();
141 struct tss_struct *t = &per_cpu(init_tss, cpu);
142
143 /*
144 * This just modifies memory; should not be necessary. But... This
145 * is necessary, because 386 hardware has concept of busy TSS or some
146 * similar stupidity.
147 */
148 set_tss_desc(cpu, t);
149
150 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
151
152 syscall_init(); /* This sets MSR_*STAR and related */
153 load_TR_desc(); /* This does ltr */
154 load_LDT(&current->active_mm->context); /* This does lldt */
155
156 /*
157 * Now maybe reload the debug registers
158 */
159 if (current->thread.debugreg7){
160 loaddebug(&current->thread, 0);
161 loaddebug(&current->thread, 1);
162 loaddebug(&current->thread, 2);
163 loaddebug(&current->thread, 3);
164 /* no 4 and 5 */
165 loaddebug(&current->thread, 6);
166 loaddebug(&current->thread, 7);
167 }
168}
169
170#ifdef CONFIG_HIBERNATION
171/* Defined in arch/x86_64/kernel/suspend_asm.S */
172extern int restore_image(void);
173
174/*
175 * Address to jump to in the last phase of restore in order to get to the image
176 * kernel's text (this value is passed in the image header).
177 */
178unsigned long restore_jump_address;
179
180/*
181 * Value of the cr3 register from before the hibernation (this value is passed
182 * in the image header).
183 */
184unsigned long restore_cr3;
185
186pgd_t *temp_level4_pgt;
187
188void *relocated_restore_code;
189
190static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
191{
192 long i, j;
193
194 i = pud_index(address);
195 pud = pud + i;
196 for (; i < PTRS_PER_PUD; pud++, i++) {
197 unsigned long paddr;
198 pmd_t *pmd;
199
200 paddr = address + i*PUD_SIZE;
201 if (paddr >= end)
202 break;
203
204 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
205 if (!pmd)
206 return -ENOMEM;
207 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
208 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
209 unsigned long pe;
210
211 if (paddr >= end)
212 break;
213 pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
214 pe &= __supported_pte_mask;
215 set_pmd(pmd, __pmd(pe));
216 }
217 }
218 return 0;
219}
220
221static int set_up_temporary_mappings(void)
222{
223 unsigned long start, end, next;
224 int error;
225
226 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
227 if (!temp_level4_pgt)
228 return -ENOMEM;
229
230 /* It is safe to reuse the original kernel mapping */
231 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
232 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
233
234 /* Set up the direct mapping from scratch */
235 start = (unsigned long)pfn_to_kaddr(0);
236 end = (unsigned long)pfn_to_kaddr(end_pfn);
237
238 for (; start < end; start = next) {
239 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
240 if (!pud)
241 return -ENOMEM;
242 next = start + PGDIR_SIZE;
243 if (next > end)
244 next = end;
245 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
246 return error;
247 set_pgd(temp_level4_pgt + pgd_index(start),
248 mk_kernel_pgd(__pa(pud)));
249 }
250 return 0;
251}
252
253int swsusp_arch_resume(void)
254{
255 int error;
256
257 /* We have got enough memory and from now on we cannot recover */
258 if ((error = set_up_temporary_mappings()))
259 return error;
260
261 relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
262 if (!relocated_restore_code)
263 return -ENOMEM;
264 memcpy(relocated_restore_code, &core_restore_code,
265 &restore_registers - &core_restore_code);
266
267 restore_image();
268 return 0;
269}
270
271/*
272 * pfn_is_nosave - check if given pfn is in the 'nosave' section
273 */
274
275int pfn_is_nosave(unsigned long pfn)
276{
277 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
278 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
279 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
280}
281
282struct restore_data_record {
283 unsigned long jump_address;
284 unsigned long cr3;
285 unsigned long magic;
286};
287
288#define RESTORE_MAGIC 0x0123456789ABCDEFUL
289
290/**
291 * arch_hibernation_header_save - populate the architecture specific part
292 * of a hibernation image header
293 * @addr: address to save the data at
294 */
295int arch_hibernation_header_save(void *addr, unsigned int max_size)
296{
297 struct restore_data_record *rdr = addr;
298
299 if (max_size < sizeof(struct restore_data_record))
300 return -EOVERFLOW;
301 rdr->jump_address = restore_jump_address;
302 rdr->cr3 = restore_cr3;
303 rdr->magic = RESTORE_MAGIC;
304 return 0;
305}
306
307/**
308 * arch_hibernation_header_restore - read the architecture specific data
309 * from the hibernation image header
310 * @addr: address to read the data from
311 */
312int arch_hibernation_header_restore(void *addr)
313{
314 struct restore_data_record *rdr = addr;
315
316 restore_jump_address = rdr->jump_address;
317 restore_cr3 = rdr->cr3;
318 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
319}
320#endif /* CONFIG_HIBERNATION */
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
deleted file mode 100644
index aeb9a4d7681e..000000000000
--- a/arch/x86/kernel/suspend_asm_64.S
+++ /dev/null
@@ -1,141 +0,0 @@
1/* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl>
2 *
3 * Distribute under GPLv2.
4 *
5 * swsusp_arch_resume must not use any stack or any nonlocal variables while
6 * copying pages:
7 *
8 * Its rewriting one kernel image with another. What is stack in "old"
9 * image could very well be data page in "new" image, and overwriting
10 * your own stack under you is bad idea.
11 */
12
13 .text
14#include <linux/linkage.h>
15#include <asm/segment.h>
16#include <asm/page.h>
17#include <asm/asm-offsets.h>
18
19ENTRY(swsusp_arch_suspend)
20 movq $saved_context, %rax
21 movq %rsp, pt_regs_sp(%rax)
22 movq %rbp, pt_regs_bp(%rax)
23 movq %rsi, pt_regs_si(%rax)
24 movq %rdi, pt_regs_di(%rax)
25 movq %rbx, pt_regs_bx(%rax)
26 movq %rcx, pt_regs_cx(%rax)
27 movq %rdx, pt_regs_dx(%rax)
28 movq %r8, pt_regs_r8(%rax)
29 movq %r9, pt_regs_r9(%rax)
30 movq %r10, pt_regs_r10(%rax)
31 movq %r11, pt_regs_r11(%rax)
32 movq %r12, pt_regs_r12(%rax)
33 movq %r13, pt_regs_r13(%rax)
34 movq %r14, pt_regs_r14(%rax)
35 movq %r15, pt_regs_r15(%rax)
36 pushfq
37 popq pt_regs_flags(%rax)
38
39 /* save the address of restore_registers */
40 movq $restore_registers, %rax
41 movq %rax, restore_jump_address(%rip)
42 /* save cr3 */
43 movq %cr3, %rax
44 movq %rax, restore_cr3(%rip)
45
46 call swsusp_save
47 ret
48
49ENTRY(restore_image)
50 /* switch to temporary page tables */
51 movq $__PAGE_OFFSET, %rdx
52 movq temp_level4_pgt(%rip), %rax
53 subq %rdx, %rax
54 movq %rax, %cr3
55 /* Flush TLB */
56 movq mmu_cr4_features(%rip), %rax
57 movq %rax, %rdx
58 andq $~(1<<7), %rdx # PGE
59 movq %rdx, %cr4; # turn off PGE
60 movq %cr3, %rcx; # flush TLB
61 movq %rcx, %cr3;
62 movq %rax, %cr4; # turn PGE back on
63
64 /* prepare to jump to the image kernel */
65 movq restore_jump_address(%rip), %rax
66 movq restore_cr3(%rip), %rbx
67
68 /* prepare to copy image data to their original locations */
69 movq restore_pblist(%rip), %rdx
70 movq relocated_restore_code(%rip), %rcx
71 jmpq *%rcx
72
73 /* code below has been relocated to a safe page */
74ENTRY(core_restore_code)
75loop:
76 testq %rdx, %rdx
77 jz done
78
79 /* get addresses from the pbe and copy the page */
80 movq pbe_address(%rdx), %rsi
81 movq pbe_orig_address(%rdx), %rdi
82 movq $(PAGE_SIZE >> 3), %rcx
83 rep
84 movsq
85
86 /* progress to the next pbe */
87 movq pbe_next(%rdx), %rdx
88 jmp loop
89done:
90 /* jump to the restore_registers address from the image header */
91 jmpq *%rax
92 /*
93 * NOTE: This assumes that the boot kernel's text mapping covers the
94 * image kernel's page containing restore_registers and the address of
95 * this page is the same as in the image kernel's text mapping (it
96 * should always be true, because the text mapping is linear, starting
97 * from 0, and is supposed to cover the entire kernel text for every
98 * kernel).
99 *
100 * code below belongs to the image kernel
101 */
102
103ENTRY(restore_registers)
104 /* go back to the original page tables */
105 movq %rbx, %cr3
106
107 /* Flush TLB, including "global" things (vmalloc) */
108 movq mmu_cr4_features(%rip), %rax
109 movq %rax, %rdx
110 andq $~(1<<7), %rdx; # PGE
111 movq %rdx, %cr4; # turn off PGE
112 movq %cr3, %rcx; # flush TLB
113 movq %rcx, %cr3
114 movq %rax, %cr4; # turn PGE back on
115
116 /* We don't restore %rax, it must be 0 anyway */
117 movq $saved_context, %rax
118 movq pt_regs_sp(%rax), %rsp
119 movq pt_regs_bp(%rax), %rbp
120 movq pt_regs_si(%rax), %rsi
121 movq pt_regs_di(%rax), %rdi
122 movq pt_regs_bx(%rax), %rbx
123 movq pt_regs_cx(%rax), %rcx
124 movq pt_regs_dx(%rax), %rdx
125 movq pt_regs_r8(%rax), %r8
126 movq pt_regs_r9(%rax), %r9
127 movq pt_regs_r10(%rax), %r10
128 movq pt_regs_r11(%rax), %r11
129 movq pt_regs_r12(%rax), %r12
130 movq pt_regs_r13(%rax), %r13
131 movq pt_regs_r14(%rax), %r14
132 movq pt_regs_r15(%rax), %r15
133 pushq pt_regs_flags(%rax)
134 popfq
135
136 xorq %rax, %rax
137
138 /* tell the hibernation core that we've just restored the memory */
139 movq %rax, in_suspend(%rip)
140
141 ret
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index e6757aaa202b..a40051b71d9b 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arch_register_cpu);
53 53
54void arch_unregister_cpu(int num) 54void arch_unregister_cpu(int num)
55{ 55{
56 return unregister_cpu(&per_cpu(cpu_devices, num).cpu); 56 unregister_cpu(&per_cpu(cpu_devices, num).cpu);
57} 57}
58EXPORT_SYMBOL(arch_unregister_cpu); 58EXPORT_SYMBOL(arch_unregister_cpu);
59#else 59#else