aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/ia32/ia32_signal.c6
-rw-r--r--arch/x86_64/kernel/head.S40
-rw-r--r--arch/x86_64/kernel/kprobes.c8
-rw-r--r--arch/x86_64/kernel/mce.c10
-rw-r--r--arch/x86_64/kernel/setup.c27
-rw-r--r--arch/x86_64/kernel/setup64.c4
-rw-r--r--arch/x86_64/kernel/smpboot.c6
-rw-r--r--arch/x86_64/kernel/suspend.c127
-rw-r--r--arch/x86_64/kernel/suspend_asm.S17
-rw-r--r--arch/x86_64/mm/numa.c10
-rw-r--r--arch/x86_64/mm/pageattr.c2
11 files changed, 196 insertions, 61 deletions
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index 66e2821533db..0903cc1faef2 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -425,7 +425,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
425 rsp = (unsigned long) ka->sa.sa_restorer; 425 rsp = (unsigned long) ka->sa.sa_restorer;
426 } 426 }
427 427
428 return (void __user *)((rsp - frame_size) & -8UL); 428 rsp -= frame_size;
429 /* Align the stack pointer according to the i386 ABI,
430 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
431 rsp = ((rsp + 4) & -16ul) - 4;
432 return (void __user *) rsp;
429} 433}
430 434
431int ia32_setup_frame(int sig, struct k_sigaction *ka, 435int ia32_setup_frame(int sig, struct k_sigaction *ka,
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 4592bf21fcaf..b92e5f45ed46 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt)
270.org 0x4000 270.org 0x4000
271ENTRY(level2_ident_pgt) 271ENTRY(level2_ident_pgt)
272 /* 40MB for bootup. */ 272 /* 40MB for bootup. */
273 .quad 0x0000000000000183 273 .quad 0x0000000000000083
274 .quad 0x0000000000200183 274 .quad 0x0000000000200083
275 .quad 0x0000000000400183 275 .quad 0x0000000000400083
276 .quad 0x0000000000600183 276 .quad 0x0000000000600083
277 .quad 0x0000000000800183 277 .quad 0x0000000000800083
278 .quad 0x0000000000A00183 278 .quad 0x0000000000A00083
279 .quad 0x0000000000C00183 279 .quad 0x0000000000C00083
280 .quad 0x0000000000E00183 280 .quad 0x0000000000E00083
281 .quad 0x0000000001000183 281 .quad 0x0000000001000083
282 .quad 0x0000000001200183 282 .quad 0x0000000001200083
283 .quad 0x0000000001400183 283 .quad 0x0000000001400083
284 .quad 0x0000000001600183 284 .quad 0x0000000001600083
285 .quad 0x0000000001800183 285 .quad 0x0000000001800083
286 .quad 0x0000000001A00183 286 .quad 0x0000000001A00083
287 .quad 0x0000000001C00183 287 .quad 0x0000000001C00083
288 .quad 0x0000000001E00183 288 .quad 0x0000000001E00083
289 .quad 0x0000000002000183 289 .quad 0x0000000002000083
290 .quad 0x0000000002200183 290 .quad 0x0000000002200083
291 .quad 0x0000000002400183 291 .quad 0x0000000002400083
292 .quad 0x0000000002600183 292 .quad 0x0000000002600083
293 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ 293 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
294 .globl temp_boot_pmds 294 .globl temp_boot_pmds
295temp_boot_pmds: 295temp_boot_pmds:
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index df08c43276a0..76a28b007be9 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
77int __kprobes arch_prepare_kprobe(struct kprobe *p) 77int __kprobes arch_prepare_kprobe(struct kprobe *p)
78{ 78{
79 /* insn: must be on special executable page on x86_64. */ 79 /* insn: must be on special executable page on x86_64. */
80 up(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 down(&kprobe_mutex); 80 down(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 up(&kprobe_mutex);
83 if (!p->ainsn.insn) { 83 if (!p->ainsn.insn) {
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
@@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
231 231
232void __kprobes arch_remove_kprobe(struct kprobe *p) 232void __kprobes arch_remove_kprobe(struct kprobe *p)
233{ 233{
234 up(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 down(&kprobe_mutex); 234 down(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 up(&kprobe_mutex);
237} 237}
238 238
239static inline void save_previous_kprobe(void) 239static inline void save_previous_kprobe(void)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 08203b07f4bd..69541db5ff2c 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -54,9 +54,12 @@ void mce_log(struct mce *mce)
54{ 54{
55 unsigned next, entry; 55 unsigned next, entry;
56 mce->finished = 0; 56 mce->finished = 0;
57 smp_wmb(); 57 wmb();
58 for (;;) { 58 for (;;) {
59 entry = rcu_dereference(mcelog.next); 59 entry = rcu_dereference(mcelog.next);
60 /* The rmb forces the compiler to reload next in each
61 iteration */
62 rmb();
60 for (;;) { 63 for (;;) {
61 /* When the buffer fills up discard new entries. Assume 64 /* When the buffer fills up discard new entries. Assume
62 that the earlier errors are the more interesting. */ 65 that the earlier errors are the more interesting. */
@@ -69,6 +72,7 @@ void mce_log(struct mce *mce)
69 entry++; 72 entry++;
70 continue; 73 continue;
71 } 74 }
75 break;
72 } 76 }
73 smp_rmb(); 77 smp_rmb();
74 next = entry + 1; 78 next = entry + 1;
@@ -76,9 +80,9 @@ void mce_log(struct mce *mce)
76 break; 80 break;
77 } 81 }
78 memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); 82 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
79 smp_wmb(); 83 wmb();
80 mcelog.entry[entry].finished = 1; 84 mcelog.entry[entry].finished = 1;
81 smp_wmb(); 85 wmb();
82 86
83 if (!test_and_set_bit(0, &console_logged)) 87 if (!test_and_set_bit(0, &console_logged))
84 notify_user = 1; 88 notify_user = 1;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 238f73e1a834..cb28df14ff6f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -831,8 +831,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
831#endif 831#endif
832} 832}
833 833
834#define HWCR 0xc0010015
835
836static int __init init_amd(struct cpuinfo_x86 *c) 834static int __init init_amd(struct cpuinfo_x86 *c)
837{ 835{
838 int r; 836 int r;
@@ -841,14 +839,18 @@ static int __init init_amd(struct cpuinfo_x86 *c)
841#ifdef CONFIG_SMP 839#ifdef CONFIG_SMP
842 unsigned long value; 840 unsigned long value;
843 841
844 // Disable TLB flush filter by setting HWCR.FFDIS: 842 /*
845 // bit 6 of msr C001_0015 843 * Disable TLB flush filter by setting HWCR.FFDIS on K8
846 // 844 * bit 6 of msr C001_0015
847 // Errata 63 for SH-B3 steppings 845 *
848 // Errata 122 for all(?) steppings 846 * Errata 63 for SH-B3 steppings
849 rdmsrl(HWCR, value); 847 * Errata 122 for all steppings (F+ have it disabled by default)
850 value |= 1 << 6; 848 */
851 wrmsrl(HWCR, value); 849 if (c->x86 == 15) {
850 rdmsrl(MSR_K8_HWCR, value);
851 value |= 1 << 6;
852 wrmsrl(MSR_K8_HWCR, value);
853 }
852#endif 854#endif
853 855
854 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 856 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
@@ -965,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
965static void srat_detect_node(void) 967static void srat_detect_node(void)
966{ 968{
967#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
968 unsigned apicid, node; 970 unsigned node;
969 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
970 972
971 /* Don't do the funky fallback heuristics the AMD version employs 973 /* Don't do the funky fallback heuristics the AMD version employs
972 for now. */ 974 for now. */
973 apicid = phys_proc_id[cpu]; 975 node = apicid_to_node[hard_smp_processor_id()];
974 node = apicid_to_node[apicid];
975 if (node == NUMA_NO_NODE) 976 if (node == NUMA_NO_NODE)
976 node = 0; 977 node = 0;
977 cpu_to_node[cpu] = node; 978 cpu_to_node[cpu] = node;
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index bd33be24a386..79190891fbc5 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -87,6 +87,10 @@ void __init setup_per_cpu_areas(void)
87 int i; 87 int i;
88 unsigned long size; 88 unsigned long size;
89 89
90#ifdef CONFIG_HOTPLUG_CPU
91 prefill_possible_map();
92#endif
93
90 /* Copy section for each CPU (we discard the original) */ 94 /* Copy section for each CPU (we discard the original) */
91 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); 95 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
92#ifdef CONFIG_MODULES 96#ifdef CONFIG_MODULES
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index e12d7baeb33e..658a81b33f3b 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -892,7 +892,7 @@ static __init void disable_smp(void)
892 * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. 892 * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range.
893 * - Ashok Raj 893 * - Ashok Raj
894 */ 894 */
895static void prefill_possible_map(void) 895__init void prefill_possible_map(void)
896{ 896{
897 int i; 897 int i;
898 for (i = 0; i < NR_CPUS; i++) 898 for (i = 0; i < NR_CPUS; i++)
@@ -967,10 +967,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
967 current_cpu_data = boot_cpu_data; 967 current_cpu_data = boot_cpu_data;
968 current_thread_info()->cpu = 0; /* needed? */ 968 current_thread_info()->cpu = 0; /* needed? */
969 969
970#ifdef CONFIG_HOTPLUG_CPU
971 prefill_possible_map();
972#endif
973
974 if (smp_sanity_check(max_cpus) < 0) { 970 if (smp_sanity_check(max_cpus) < 0) {
975 printk(KERN_INFO "SMP disabled\n"); 971 printk(KERN_INFO "SMP disabled\n");
976 disable_smp(); 972 disable_smp();
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ebb9abf3ce6d..f066c6ab3618 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -11,6 +11,8 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <asm/proto.h> 13#include <asm/proto.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
14 16
15struct saved_context saved_context; 17struct saved_context saved_context;
16 18
@@ -140,4 +142,129 @@ void fix_processor_context(void)
140 142
141} 143}
142 144
145#ifdef CONFIG_SOFTWARE_SUSPEND
146/* Defined in arch/x86_64/kernel/suspend_asm.S */
147extern int restore_image(void);
143 148
149pgd_t *temp_level4_pgt;
150
151static void **pages;
152
153static inline void *__add_page(void)
154{
155 void **c;
156
157 c = (void **)get_usable_page(GFP_ATOMIC);
158 if (c) {
159 *c = pages;
160 pages = c;
161 }
162 return c;
163}
164
165static inline void *__next_page(void)
166{
167 void **c;
168
169 c = pages;
170 if (c) {
171 pages = *c;
172 *c = NULL;
173 }
174 return c;
175}
176
177/*
178 * Try to allocate as many usable pages as needed and daisy chain them.
179 * If one allocation fails, free the pages allocated so far
180 */
181static int alloc_usable_pages(unsigned long n)
182{
183 void *p;
184
185 pages = NULL;
186 do
187 if (!__add_page())
188 break;
189 while (--n);
190 if (n) {
191 p = __next_page();
192 while (p) {
193 free_page((unsigned long)p);
194 p = __next_page();
195 }
196 return -ENOMEM;
197 }
198 return 0;
199}
200
201static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
202{
203 long i, j;
204
205 i = pud_index(address);
206 pud = pud + i;
207 for (; i < PTRS_PER_PUD; pud++, i++) {
208 unsigned long paddr;
209 pmd_t *pmd;
210
211 paddr = address + i*PUD_SIZE;
212 if (paddr >= end)
213 break;
214
215 pmd = (pmd_t *)__next_page();
216 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
217 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
218 unsigned long pe;
219
220 if (paddr >= end)
221 break;
222 pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
223 pe &= __supported_pte_mask;
224 set_pmd(pmd, __pmd(pe));
225 }
226 }
227}
228
229static void set_up_temporary_mappings(void)
230{
231 unsigned long start, end, next;
232
233 temp_level4_pgt = (pgd_t *)__next_page();
234
235 /* It is safe to reuse the original kernel mapping */
236 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
237 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
238
239 /* Set up the direct mapping from scratch */
240 start = (unsigned long)pfn_to_kaddr(0);
241 end = (unsigned long)pfn_to_kaddr(end_pfn);
242
243 for (; start < end; start = next) {
244 pud_t *pud = (pud_t *)__next_page();
245 next = start + PGDIR_SIZE;
246 if (next > end)
247 next = end;
248 res_phys_pud_init(pud, __pa(start), __pa(next));
249 set_pgd(temp_level4_pgt + pgd_index(start),
250 mk_kernel_pgd(__pa(pud)));
251 }
252}
253
254int swsusp_arch_resume(void)
255{
256 unsigned long n;
257
258 n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
259 n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
260 pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
261 if (alloc_usable_pages(n)) {
262 free_eaten_memory();
263 return -ENOMEM;
264 }
265 /* We have got enough memory and from now on we cannot recover */
266 set_up_temporary_mappings();
267 restore_image();
268 return 0;
269}
270#endif /* CONFIG_SOFTWARE_SUSPEND */
diff --git a/arch/x86_64/kernel/suspend_asm.S b/arch/x86_64/kernel/suspend_asm.S
index 4d659e97df10..320b6fb00cca 100644
--- a/arch/x86_64/kernel/suspend_asm.S
+++ b/arch/x86_64/kernel/suspend_asm.S
@@ -39,12 +39,13 @@ ENTRY(swsusp_arch_suspend)
39 call swsusp_save 39 call swsusp_save
40 ret 40 ret
41 41
42ENTRY(swsusp_arch_resume) 42ENTRY(restore_image)
43 /* set up cr3 */ 43 /* switch to temporary page tables */
44 leaq init_level4_pgt(%rip),%rax 44 movq $__PAGE_OFFSET, %rdx
45 subq $__START_KERNEL_map,%rax 45 movq temp_level4_pgt(%rip), %rax
46 movq %rax,%cr3 46 subq %rdx, %rax
47 47 movq %rax, %cr3
48 /* Flush TLB */
48 movq mmu_cr4_features(%rip), %rax 49 movq mmu_cr4_features(%rip), %rax
49 movq %rax, %rdx 50 movq %rax, %rdx
50 andq $~(1<<7), %rdx # PGE 51 andq $~(1<<7), %rdx # PGE
@@ -69,6 +70,10 @@ loop:
69 movq pbe_next(%rdx), %rdx 70 movq pbe_next(%rdx), %rdx
70 jmp loop 71 jmp loop
71done: 72done:
73 /* go back to the original page tables */
74 leaq init_level4_pgt(%rip), %rax
75 subq $__START_KERNEL_map, %rax
76 movq %rax, %cr3
72 /* Flush TLB, including "global" things (vmalloc) */ 77 /* Flush TLB, including "global" things (vmalloc) */
73 movq mmu_cr4_features(%rip), %rax 78 movq mmu_cr4_features(%rip), %rax
74 movq %rax, %rdx 79 movq %rax, %rdx
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 80a49d9bd8a7..214803821001 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -167,18 +167,16 @@ void __init numa_init_array(void)
167 mapping. To avoid this fill in the mapping for all possible 167 mapping. To avoid this fill in the mapping for all possible
168 CPUs, as the number of CPUs is not known yet. 168 CPUs, as the number of CPUs is not known yet.
169 We round robin the existing nodes. */ 169 We round robin the existing nodes. */
170 rr = 0; 170 rr = first_node(node_online_map);
171 for (i = 0; i < NR_CPUS; i++) { 171 for (i = 0; i < NR_CPUS; i++) {
172 if (cpu_to_node[i] != NUMA_NO_NODE) 172 if (cpu_to_node[i] != NUMA_NO_NODE)
173 continue; 173 continue;
174 cpu_to_node[i] = rr;
174 rr = next_node(rr, node_online_map); 175 rr = next_node(rr, node_online_map);
175 if (rr == MAX_NUMNODES) 176 if (rr == MAX_NUMNODES)
176 rr = first_node(node_online_map); 177 rr = first_node(node_online_map);
177 cpu_to_node[i] = rr;
178 rr++;
179 } 178 }
180 179
181 set_bit(0, &node_to_cpumask[cpu_to_node(0)]);
182} 180}
183 181
184#ifdef CONFIG_NUMA_EMU 182#ifdef CONFIG_NUMA_EMU
@@ -266,9 +264,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
266 264
267__cpuinit void numa_add_cpu(int cpu) 265__cpuinit void numa_add_cpu(int cpu)
268{ 266{
269 /* BP is initialized elsewhere */ 267 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
270 if (cpu)
271 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
272} 268}
273 269
274unsigned long __init numa_free_all_bootmem(void) 270unsigned long __init numa_free_all_bootmem(void)
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 94862e1ec032..b90e8fe9eeb0 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -220,8 +220,6 @@ void global_flush_tlb(void)
220 down_read(&init_mm.mmap_sem); 220 down_read(&init_mm.mmap_sem);
221 df = xchg(&df_list, NULL); 221 df = xchg(&df_list, NULL);
222 up_read(&init_mm.mmap_sem); 222 up_read(&init_mm.mmap_sem);
223 if (!df)
224 return;
225 flush_map((df && !df->next) ? df->address : 0); 223 flush_map((df && !df->next) ? df->address : 0);
226 for (; df; df = next_df) { 224 for (; df; df = next_df) {
227 next_df = df->next; 225 next_df = df->next;