aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/kprobes.c8
-rw-r--r--arch/x86_64/kernel/mce.c10
-rw-r--r--arch/x86_64/kernel/setup.c27
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/mm/numa.c10
5 files changed, 28 insertions, 30 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index df08c43276a0..76a28b007be9 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
77int __kprobes arch_prepare_kprobe(struct kprobe *p) 77int __kprobes arch_prepare_kprobe(struct kprobe *p)
78{ 78{
79 /* insn: must be on special executable page on x86_64. */ 79 /* insn: must be on special executable page on x86_64. */
80 up(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 down(&kprobe_mutex); 80 down(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 up(&kprobe_mutex);
83 if (!p->ainsn.insn) { 83 if (!p->ainsn.insn) {
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
@@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
231 231
232void __kprobes arch_remove_kprobe(struct kprobe *p) 232void __kprobes arch_remove_kprobe(struct kprobe *p)
233{ 233{
234 up(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 down(&kprobe_mutex); 234 down(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 up(&kprobe_mutex);
237} 237}
238 238
239static inline void save_previous_kprobe(void) 239static inline void save_previous_kprobe(void)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 08203b07f4bd..69541db5ff2c 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -54,9 +54,12 @@ void mce_log(struct mce *mce)
54{ 54{
55 unsigned next, entry; 55 unsigned next, entry;
56 mce->finished = 0; 56 mce->finished = 0;
57 smp_wmb(); 57 wmb();
58 for (;;) { 58 for (;;) {
59 entry = rcu_dereference(mcelog.next); 59 entry = rcu_dereference(mcelog.next);
60 /* The rmb forces the compiler to reload next in each
61 iteration */
62 rmb();
60 for (;;) { 63 for (;;) {
61 /* When the buffer fills up discard new entries. Assume 64 /* When the buffer fills up discard new entries. Assume
62 that the earlier errors are the more interesting. */ 65 that the earlier errors are the more interesting. */
@@ -69,6 +72,7 @@ void mce_log(struct mce *mce)
69 entry++; 72 entry++;
70 continue; 73 continue;
71 } 74 }
75 break;
72 } 76 }
73 smp_rmb(); 77 smp_rmb();
74 next = entry + 1; 78 next = entry + 1;
@@ -76,9 +80,9 @@ void mce_log(struct mce *mce)
76 break; 80 break;
77 } 81 }
78 memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); 82 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
79 smp_wmb(); 83 wmb();
80 mcelog.entry[entry].finished = 1; 84 mcelog.entry[entry].finished = 1;
81 smp_wmb(); 85 wmb();
82 86
83 if (!test_and_set_bit(0, &console_logged)) 87 if (!test_and_set_bit(0, &console_logged))
84 notify_user = 1; 88 notify_user = 1;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 238f73e1a834..cb28df14ff6f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -831,8 +831,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
831#endif 831#endif
832} 832}
833 833
834#define HWCR 0xc0010015
835
836static int __init init_amd(struct cpuinfo_x86 *c) 834static int __init init_amd(struct cpuinfo_x86 *c)
837{ 835{
838 int r; 836 int r;
@@ -841,14 +839,18 @@ static int __init init_amd(struct cpuinfo_x86 *c)
841#ifdef CONFIG_SMP 839#ifdef CONFIG_SMP
842 unsigned long value; 840 unsigned long value;
843 841
844 // Disable TLB flush filter by setting HWCR.FFDIS: 842 /*
845 // bit 6 of msr C001_0015 843 * Disable TLB flush filter by setting HWCR.FFDIS on K8
846 // 844 * bit 6 of msr C001_0015
847 // Errata 63 for SH-B3 steppings 845 *
848 // Errata 122 for all(?) steppings 846 * Errata 63 for SH-B3 steppings
849 rdmsrl(HWCR, value); 847 * Errata 122 for all steppings (F+ have it disabled by default)
850 value |= 1 << 6; 848 */
851 wrmsrl(HWCR, value); 849 if (c->x86 == 15) {
850 rdmsrl(MSR_K8_HWCR, value);
851 value |= 1 << 6;
852 wrmsrl(MSR_K8_HWCR, value);
853 }
852#endif 854#endif
853 855
854 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 856 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
@@ -965,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
965static void srat_detect_node(void) 967static void srat_detect_node(void)
966{ 968{
967#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
968 unsigned apicid, node; 970 unsigned node;
969 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
970 972
971 /* Don't do the funky fallback heuristics the AMD version employs 973 /* Don't do the funky fallback heuristics the AMD version employs
972 for now. */ 974 for now. */
973 apicid = phys_proc_id[cpu]; 975 node = apicid_to_node[hard_smp_processor_id()];
974 node = apicid_to_node[apicid];
975 if (node == NUMA_NO_NODE) 976 if (node == NUMA_NO_NODE)
976 node = 0; 977 node = 0;
977 cpu_to_node[cpu] = node; 978 cpu_to_node[cpu] = node;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 2373cb8b8625..703acde2a1a5 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -959,9 +959,6 @@ static __init int unsynchronized_tsc(void)
959 are handled in the OEM check above. */ 959 are handled in the OEM check above. */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
961 return 0; 961 return 0;
962 /* All in a single socket - should be synchronized */
963 if (cpus_weight(cpu_core_map[0]) == num_online_cpus())
964 return 0;
965#endif 962#endif
966 /* Assume multi socket systems are not synchronized */ 963 /* Assume multi socket systems are not synchronized */
967 return num_online_cpus() > 1; 964 return num_online_cpus() > 1;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 80a49d9bd8a7..214803821001 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -167,18 +167,16 @@ void __init numa_init_array(void)
167 mapping. To avoid this fill in the mapping for all possible 167 mapping. To avoid this fill in the mapping for all possible
168 CPUs, as the number of CPUs is not known yet. 168 CPUs, as the number of CPUs is not known yet.
169 We round robin the existing nodes. */ 169 We round robin the existing nodes. */
170 rr = 0; 170 rr = first_node(node_online_map);
171 for (i = 0; i < NR_CPUS; i++) { 171 for (i = 0; i < NR_CPUS; i++) {
172 if (cpu_to_node[i] != NUMA_NO_NODE) 172 if (cpu_to_node[i] != NUMA_NO_NODE)
173 continue; 173 continue;
174 cpu_to_node[i] = rr;
174 rr = next_node(rr, node_online_map); 175 rr = next_node(rr, node_online_map);
175 if (rr == MAX_NUMNODES) 176 if (rr == MAX_NUMNODES)
176 rr = first_node(node_online_map); 177 rr = first_node(node_online_map);
177 cpu_to_node[i] = rr;
178 rr++;
179 } 178 }
180 179
181 set_bit(0, &node_to_cpumask[cpu_to_node(0)]);
182} 180}
183 181
184#ifdef CONFIG_NUMA_EMU 182#ifdef CONFIG_NUMA_EMU
@@ -266,9 +264,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
266 264
267__cpuinit void numa_add_cpu(int cpu) 265__cpuinit void numa_add_cpu(int cpu)
268{ 266{
269 /* BP is initialized elsewhere */ 267 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
270 if (cpu)
271 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
272} 268}
273 269
274unsigned long __init numa_free_all_bootmem(void) 270unsigned long __init numa_free_all_bootmem(void)