aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-20 13:41:44 -0400
committerTony Luck <tony.luck@intel.com>2005-10-20 13:41:44 -0400
commit9cec58dc138d6fcad9f447a19c8ff69f6540e667 (patch)
tree4fe1cca94fdba8b705c87615bee06d3346f687ce /arch/x86_64/kernel
parent17e5ad6c0ce5a970e2830d0de8bdd60a2f077d38 (diff)
parentac9b9c667c2e1194e22ebe0a441ae1c37aaa9b90 (diff)
Update from upstream with manual merge of Yasunori Goto's
changes to swiotlb.c made in commit 281dd25cdc0d6903929b79183816d151ea626341 since this file has been moved from arch/ia64/lib/swiotlb.c to lib/swiotlb.c Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/head.S40
-rw-r--r--arch/x86_64/kernel/kprobes.c8
-rw-r--r--arch/x86_64/kernel/mce.c10
-rw-r--r--arch/x86_64/kernel/setup.c22
-rw-r--r--arch/x86_64/kernel/setup64.c4
-rw-r--r--arch/x86_64/kernel/smpboot.c6
-rw-r--r--arch/x86_64/kernel/suspend.c127
-rw-r--r--arch/x86_64/kernel/suspend_asm.S17
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c4
11 files changed, 195 insertions, 48 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 4e34b0f9d613..ab3f87aaff70 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -17,6 +17,8 @@
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/kexec.h> 19#include <linux/kexec.h>
20#include <linux/module.h>
21
20#include <asm/page.h> 22#include <asm/page.h>
21#include <asm/e820.h> 23#include <asm/e820.h>
22#include <asm/proto.h> 24#include <asm/proto.h>
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 4592bf21fcaf..b92e5f45ed46 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt)
270.org 0x4000 270.org 0x4000
271ENTRY(level2_ident_pgt) 271ENTRY(level2_ident_pgt)
272 /* 40MB for bootup. */ 272 /* 40MB for bootup. */
273 .quad 0x0000000000000183 273 .quad 0x0000000000000083
274 .quad 0x0000000000200183 274 .quad 0x0000000000200083
275 .quad 0x0000000000400183 275 .quad 0x0000000000400083
276 .quad 0x0000000000600183 276 .quad 0x0000000000600083
277 .quad 0x0000000000800183 277 .quad 0x0000000000800083
278 .quad 0x0000000000A00183 278 .quad 0x0000000000A00083
279 .quad 0x0000000000C00183 279 .quad 0x0000000000C00083
280 .quad 0x0000000000E00183 280 .quad 0x0000000000E00083
281 .quad 0x0000000001000183 281 .quad 0x0000000001000083
282 .quad 0x0000000001200183 282 .quad 0x0000000001200083
283 .quad 0x0000000001400183 283 .quad 0x0000000001400083
284 .quad 0x0000000001600183 284 .quad 0x0000000001600083
285 .quad 0x0000000001800183 285 .quad 0x0000000001800083
286 .quad 0x0000000001A00183 286 .quad 0x0000000001A00083
287 .quad 0x0000000001C00183 287 .quad 0x0000000001C00083
288 .quad 0x0000000001E00183 288 .quad 0x0000000001E00083
289 .quad 0x0000000002000183 289 .quad 0x0000000002000083
290 .quad 0x0000000002200183 290 .quad 0x0000000002200083
291 .quad 0x0000000002400183 291 .quad 0x0000000002400083
292 .quad 0x0000000002600183 292 .quad 0x0000000002600083
293 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ 293 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
294 .globl temp_boot_pmds 294 .globl temp_boot_pmds
295temp_boot_pmds: 295temp_boot_pmds:
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index df08c43276a0..76a28b007be9 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
77int __kprobes arch_prepare_kprobe(struct kprobe *p) 77int __kprobes arch_prepare_kprobe(struct kprobe *p)
78{ 78{
79 /* insn: must be on special executable page on x86_64. */ 79 /* insn: must be on special executable page on x86_64. */
80 up(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 down(&kprobe_mutex); 80 down(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 up(&kprobe_mutex);
83 if (!p->ainsn.insn) { 83 if (!p->ainsn.insn) {
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
@@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
231 231
232void __kprobes arch_remove_kprobe(struct kprobe *p) 232void __kprobes arch_remove_kprobe(struct kprobe *p)
233{ 233{
234 up(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 down(&kprobe_mutex); 234 down(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 up(&kprobe_mutex);
237} 237}
238 238
239static inline void save_previous_kprobe(void) 239static inline void save_previous_kprobe(void)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 08203b07f4bd..69541db5ff2c 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -54,9 +54,12 @@ void mce_log(struct mce *mce)
54{ 54{
55 unsigned next, entry; 55 unsigned next, entry;
56 mce->finished = 0; 56 mce->finished = 0;
57 smp_wmb(); 57 wmb();
58 for (;;) { 58 for (;;) {
59 entry = rcu_dereference(mcelog.next); 59 entry = rcu_dereference(mcelog.next);
60 /* The rmb forces the compiler to reload next in each
61 iteration */
62 rmb();
60 for (;;) { 63 for (;;) {
61 /* When the buffer fills up discard new entries. Assume 64 /* When the buffer fills up discard new entries. Assume
62 that the earlier errors are the more interesting. */ 65 that the earlier errors are the more interesting. */
@@ -69,6 +72,7 @@ void mce_log(struct mce *mce)
69 entry++; 72 entry++;
70 continue; 73 continue;
71 } 74 }
75 break;
72 } 76 }
73 smp_rmb(); 77 smp_rmb();
74 next = entry + 1; 78 next = entry + 1;
@@ -76,9 +80,9 @@ void mce_log(struct mce *mce)
76 break; 80 break;
77 } 81 }
78 memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); 82 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
79 smp_wmb(); 83 wmb();
80 mcelog.entry[entry].finished = 1; 84 mcelog.entry[entry].finished = 1;
81 smp_wmb(); 85 wmb();
82 86
83 if (!test_and_set_bit(0, &console_logged)) 87 if (!test_and_set_bit(0, &console_logged))
84 notify_user = 1; 88 notify_user = 1;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 351d8d64c2fb..cb28df14ff6f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -836,6 +836,23 @@ static int __init init_amd(struct cpuinfo_x86 *c)
836 int r; 836 int r;
837 int level; 837 int level;
838 838
839#ifdef CONFIG_SMP
840 unsigned long value;
841
842 /*
843 * Disable TLB flush filter by setting HWCR.FFDIS on K8
844 * bit 6 of msr C001_0015
845 *
846 * Errata 63 for SH-B3 steppings
847 * Errata 122 for all steppings (F+ have it disabled by default)
848 */
849 if (c->x86 == 15) {
850 rdmsrl(MSR_K8_HWCR, value);
851 value |= 1 << 6;
852 wrmsrl(MSR_K8_HWCR, value);
853 }
854#endif
855
839 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 856 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
840 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 857 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
841 clear_bit(0*32+31, &c->x86_capability); 858 clear_bit(0*32+31, &c->x86_capability);
@@ -950,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
950static void srat_detect_node(void) 967static void srat_detect_node(void)
951{ 968{
952#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
953 unsigned apicid, node; 970 unsigned node;
954 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
955 972
956 /* Don't do the funky fallback heuristics the AMD version employs 973 /* Don't do the funky fallback heuristics the AMD version employs
957 for now. */ 974 for now. */
958 apicid = phys_proc_id[cpu]; 975 node = apicid_to_node[hard_smp_processor_id()];
959 node = apicid_to_node[apicid];
960 if (node == NUMA_NO_NODE) 976 if (node == NUMA_NO_NODE)
961 node = 0; 977 node = 0;
962 cpu_to_node[cpu] = node; 978 cpu_to_node[cpu] = node;
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index bd33be24a386..79190891fbc5 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -87,6 +87,10 @@ void __init setup_per_cpu_areas(void)
87 int i; 87 int i;
88 unsigned long size; 88 unsigned long size;
89 89
90#ifdef CONFIG_HOTPLUG_CPU
91 prefill_possible_map();
92#endif
93
90 /* Copy section for each CPU (we discard the original) */ 94 /* Copy section for each CPU (we discard the original) */
91 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); 95 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
92#ifdef CONFIG_MODULES 96#ifdef CONFIG_MODULES
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index e12d7baeb33e..658a81b33f3b 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -892,7 +892,7 @@ static __init void disable_smp(void)
892 * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. 892 * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range.
893 * - Ashok Raj 893 * - Ashok Raj
894 */ 894 */
895static void prefill_possible_map(void) 895__init void prefill_possible_map(void)
896{ 896{
897 int i; 897 int i;
898 for (i = 0; i < NR_CPUS; i++) 898 for (i = 0; i < NR_CPUS; i++)
@@ -967,10 +967,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
967 current_cpu_data = boot_cpu_data; 967 current_cpu_data = boot_cpu_data;
968 current_thread_info()->cpu = 0; /* needed? */ 968 current_thread_info()->cpu = 0; /* needed? */
969 969
970#ifdef CONFIG_HOTPLUG_CPU
971 prefill_possible_map();
972#endif
973
974 if (smp_sanity_check(max_cpus) < 0) { 970 if (smp_sanity_check(max_cpus) < 0) {
975 printk(KERN_INFO "SMP disabled\n"); 971 printk(KERN_INFO "SMP disabled\n");
976 disable_smp(); 972 disable_smp();
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ebb9abf3ce6d..f066c6ab3618 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -11,6 +11,8 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <asm/proto.h> 13#include <asm/proto.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
14 16
15struct saved_context saved_context; 17struct saved_context saved_context;
16 18
@@ -140,4 +142,129 @@ void fix_processor_context(void)
140 142
141} 143}
142 144
145#ifdef CONFIG_SOFTWARE_SUSPEND
146/* Defined in arch/x86_64/kernel/suspend_asm.S */
147extern int restore_image(void);
143 148
149pgd_t *temp_level4_pgt;
150
151static void **pages;
152
153static inline void *__add_page(void)
154{
155 void **c;
156
157 c = (void **)get_usable_page(GFP_ATOMIC);
158 if (c) {
159 *c = pages;
160 pages = c;
161 }
162 return c;
163}
164
165static inline void *__next_page(void)
166{
167 void **c;
168
169 c = pages;
170 if (c) {
171 pages = *c;
172 *c = NULL;
173 }
174 return c;
175}
176
177/*
178 * Try to allocate as many usable pages as needed and daisy chain them.
179 * If one allocation fails, free the pages allocated so far
180 */
181static int alloc_usable_pages(unsigned long n)
182{
183 void *p;
184
185 pages = NULL;
186 do
187 if (!__add_page())
188 break;
189 while (--n);
190 if (n) {
191 p = __next_page();
192 while (p) {
193 free_page((unsigned long)p);
194 p = __next_page();
195 }
196 return -ENOMEM;
197 }
198 return 0;
199}
200
201static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
202{
203 long i, j;
204
205 i = pud_index(address);
206 pud = pud + i;
207 for (; i < PTRS_PER_PUD; pud++, i++) {
208 unsigned long paddr;
209 pmd_t *pmd;
210
211 paddr = address + i*PUD_SIZE;
212 if (paddr >= end)
213 break;
214
215 pmd = (pmd_t *)__next_page();
216 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
217 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
218 unsigned long pe;
219
220 if (paddr >= end)
221 break;
222 pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
223 pe &= __supported_pte_mask;
224 set_pmd(pmd, __pmd(pe));
225 }
226 }
227}
228
229static void set_up_temporary_mappings(void)
230{
231 unsigned long start, end, next;
232
233 temp_level4_pgt = (pgd_t *)__next_page();
234
235 /* It is safe to reuse the original kernel mapping */
236 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
237 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
238
239 /* Set up the direct mapping from scratch */
240 start = (unsigned long)pfn_to_kaddr(0);
241 end = (unsigned long)pfn_to_kaddr(end_pfn);
242
243 for (; start < end; start = next) {
244 pud_t *pud = (pud_t *)__next_page();
245 next = start + PGDIR_SIZE;
246 if (next > end)
247 next = end;
248 res_phys_pud_init(pud, __pa(start), __pa(next));
249 set_pgd(temp_level4_pgt + pgd_index(start),
250 mk_kernel_pgd(__pa(pud)));
251 }
252}
253
254int swsusp_arch_resume(void)
255{
256 unsigned long n;
257
258 n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
259 n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
260 pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
261 if (alloc_usable_pages(n)) {
262 free_eaten_memory();
263 return -ENOMEM;
264 }
265 /* We have got enough memory and from now on we cannot recover */
266 set_up_temporary_mappings();
267 restore_image();
268 return 0;
269}
270#endif /* CONFIG_SOFTWARE_SUSPEND */
diff --git a/arch/x86_64/kernel/suspend_asm.S b/arch/x86_64/kernel/suspend_asm.S
index 4d659e97df10..320b6fb00cca 100644
--- a/arch/x86_64/kernel/suspend_asm.S
+++ b/arch/x86_64/kernel/suspend_asm.S
@@ -39,12 +39,13 @@ ENTRY(swsusp_arch_suspend)
39 call swsusp_save 39 call swsusp_save
40 ret 40 ret
41 41
42ENTRY(swsusp_arch_resume) 42ENTRY(restore_image)
43 /* set up cr3 */ 43 /* switch to temporary page tables */
44 leaq init_level4_pgt(%rip),%rax 44 movq $__PAGE_OFFSET, %rdx
45 subq $__START_KERNEL_map,%rax 45 movq temp_level4_pgt(%rip), %rax
46 movq %rax,%cr3 46 subq %rdx, %rax
47 47 movq %rax, %cr3
48 /* Flush TLB */
48 movq mmu_cr4_features(%rip), %rax 49 movq mmu_cr4_features(%rip), %rax
49 movq %rax, %rdx 50 movq %rax, %rdx
50 andq $~(1<<7), %rdx # PGE 51 andq $~(1<<7), %rdx # PGE
@@ -69,6 +70,10 @@ loop:
69 movq pbe_next(%rdx), %rdx 70 movq pbe_next(%rdx), %rdx
70 jmp loop 71 jmp loop
71done: 72done:
73 /* go back to the original page tables */
74 leaq init_level4_pgt(%rip), %rax
75 subq $__START_KERNEL_map, %rax
76 movq %rax, %cr3
72 /* Flush TLB, including "global" things (vmalloc) */ 77 /* Flush TLB, including "global" things (vmalloc) */
73 movq mmu_cr4_features(%rip), %rax 78 movq mmu_cr4_features(%rip), %rax
74 movq %rax, %rdx 79 movq %rax, %rdx
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 2373cb8b8625..703acde2a1a5 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -959,9 +959,6 @@ static __init int unsynchronized_tsc(void)
959 are handled in the OEM check above. */ 959 are handled in the OEM check above. */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
961 return 0; 961 return 0;
962 /* All in a single socket - should be synchronized */
963 if (cpus_weight(cpu_core_map[0]) == num_online_cpus())
964 return 0;
965#endif 962#endif
966 /* Assume multi socket systems are not synchronized */ 963 /* Assume multi socket systems are not synchronized */
967 return num_online_cpus() > 1; 964 return num_online_cpus() > 1;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 68ec03070e5a..fd99ddd009bc 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -178,10 +178,6 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
178 178
179EXPORT_SYMBOL(empty_zero_page); 179EXPORT_SYMBOL(empty_zero_page);
180 180
181#ifdef CONFIG_HAVE_DEC_LOCK
182EXPORT_SYMBOL(_atomic_dec_and_lock);
183#endif
184
185EXPORT_SYMBOL(die_chain); 181EXPORT_SYMBOL(die_chain);
186EXPORT_SYMBOL(register_die_notifier); 182EXPORT_SYMBOL(register_die_notifier);
187 183