diff options
| -rw-r--r-- | arch/x86/include/asm/i387.h | 9 | ||||
| -rw-r--r-- | arch/x86/include/asm/xsave.h | 10 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/es7000_32.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mtrr/cleanup.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 56 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/vmware.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/xsave.c | 12 | ||||
| -rw-r--r-- | arch/x86/vdso/vma.c | 1 |
9 files changed, 83 insertions, 28 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 815c5b2b9f57..f1accc625beb 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
| @@ -127,6 +127,15 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 127 | { | 127 | { |
| 128 | int err; | 128 | int err; |
| 129 | 129 | ||
| 130 | /* | ||
| 131 | * Clear the bytes not touched by the fxsave and reserved | ||
| 132 | * for the SW usage. | ||
| 133 | */ | ||
| 134 | err = __clear_user(&fx->sw_reserved, | ||
| 135 | sizeof(struct _fpx_sw_bytes)); | ||
| 136 | if (unlikely(err)) | ||
| 137 | return -EFAULT; | ||
| 138 | |||
| 130 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | 139 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
| 131 | "2:\n" | 140 | "2:\n" |
| 132 | ".section .fixup,\"ax\"\n" | 141 | ".section .fixup,\"ax\"\n" |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 32c36668fa7b..06acdbd7570a 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
| @@ -65,6 +65,16 @@ static inline int fpu_xrstor_checking(struct fpu *fpu) | |||
| 65 | static inline int xsave_user(struct xsave_struct __user *buf) | 65 | static inline int xsave_user(struct xsave_struct __user *buf) |
| 66 | { | 66 | { |
| 67 | int err; | 67 | int err; |
| 68 | |||
| 69 | /* | ||
| 70 | * Clear the xsave header first, so that reserved fields are | ||
| 71 | * initialized to zero. | ||
| 72 | */ | ||
| 73 | err = __clear_user(&buf->xsave_hdr, | ||
| 74 | sizeof(struct xsave_hdr_struct)); | ||
| 75 | if (unlikely(err)) | ||
| 76 | return -EFAULT; | ||
| 77 | |||
| 68 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | 78 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" |
| 69 | "2:\n" | 79 | "2:\n" |
| 70 | ".section .fixup,\"ax\"\n" | 80 | ".section .fixup,\"ax\"\n" |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 425e53a87feb..8593582d8022 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
| @@ -129,7 +129,6 @@ int es7000_plat; | |||
| 129 | * GSI override for ES7000 platforms. | 129 | * GSI override for ES7000 platforms. |
| 130 | */ | 130 | */ |
| 131 | 131 | ||
| 132 | static unsigned int base; | ||
| 133 | 132 | ||
| 134 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | 133 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) |
| 135 | { | 134 | { |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 06130b52f012..c5f59d071425 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
| @@ -632,9 +632,9 @@ static void __init mtrr_print_out_one_result(int i) | |||
| 632 | unsigned long gran_base, chunk_base, lose_base; | 632 | unsigned long gran_base, chunk_base, lose_base; |
| 633 | char gran_factor, chunk_factor, lose_factor; | 633 | char gran_factor, chunk_factor, lose_factor; |
| 634 | 634 | ||
| 635 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | 635 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor); |
| 636 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | 636 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor); |
| 637 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | 637 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor); |
| 638 | 638 | ||
| 639 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", | 639 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
| 640 | result[i].bad ? "*BAD*" : " ", | 640 | result[i].bad ? "*BAD*" : " ", |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 79556bd9b602..01c0f3ee6cc3 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | #include <linux/types.h> /* FIXME: kvm_para.h needs this */ | 36 | #include <linux/types.h> /* FIXME: kvm_para.h needs this */ |
| 37 | 37 | ||
| 38 | #include <linux/stop_machine.h> | ||
| 38 | #include <linux/kvm_para.h> | 39 | #include <linux/kvm_para.h> |
| 39 | #include <linux/uaccess.h> | 40 | #include <linux/uaccess.h> |
| 40 | #include <linux/module.h> | 41 | #include <linux/module.h> |
| @@ -143,22 +144,28 @@ struct set_mtrr_data { | |||
| 143 | mtrr_type smp_type; | 144 | mtrr_type smp_type; |
| 144 | }; | 145 | }; |
| 145 | 146 | ||
| 147 | static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work); | ||
| 148 | |||
| 146 | /** | 149 | /** |
| 147 | * ipi_handler - Synchronisation handler. Executed by "other" CPUs. | 150 | * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs. |
| 148 | * @info: pointer to mtrr configuration data | 151 | * @info: pointer to mtrr configuration data |
| 149 | * | 152 | * |
| 150 | * Returns nothing. | 153 | * Returns nothing. |
| 151 | */ | 154 | */ |
| 152 | static void ipi_handler(void *info) | 155 | static int mtrr_work_handler(void *info) |
| 153 | { | 156 | { |
| 154 | #ifdef CONFIG_SMP | 157 | #ifdef CONFIG_SMP |
| 155 | struct set_mtrr_data *data = info; | 158 | struct set_mtrr_data *data = info; |
| 156 | unsigned long flags; | 159 | unsigned long flags; |
| 157 | 160 | ||
| 161 | atomic_dec(&data->count); | ||
| 162 | while (!atomic_read(&data->gate)) | ||
| 163 | cpu_relax(); | ||
| 164 | |||
| 158 | local_irq_save(flags); | 165 | local_irq_save(flags); |
| 159 | 166 | ||
| 160 | atomic_dec(&data->count); | 167 | atomic_dec(&data->count); |
| 161 | while (!atomic_read(&data->gate)) | 168 | while (atomic_read(&data->gate)) |
| 162 | cpu_relax(); | 169 | cpu_relax(); |
| 163 | 170 | ||
| 164 | /* The master has cleared me to execute */ | 171 | /* The master has cleared me to execute */ |
| @@ -173,12 +180,13 @@ static void ipi_handler(void *info) | |||
| 173 | } | 180 | } |
| 174 | 181 | ||
| 175 | atomic_dec(&data->count); | 182 | atomic_dec(&data->count); |
| 176 | while (atomic_read(&data->gate)) | 183 | while (!atomic_read(&data->gate)) |
| 177 | cpu_relax(); | 184 | cpu_relax(); |
| 178 | 185 | ||
| 179 | atomic_dec(&data->count); | 186 | atomic_dec(&data->count); |
| 180 | local_irq_restore(flags); | 187 | local_irq_restore(flags); |
| 181 | #endif | 188 | #endif |
| 189 | return 0; | ||
| 182 | } | 190 | } |
| 183 | 191 | ||
| 184 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) | 192 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) |
| @@ -198,7 +206,7 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) | |||
| 198 | * | 206 | * |
| 199 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: | 207 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: |
| 200 | * | 208 | * |
| 201 | * 1. Send IPI to do the following: | 209 | * 1. Queue work to do the following on all processors: |
| 202 | * 2. Disable Interrupts | 210 | * 2. Disable Interrupts |
| 203 | * 3. Wait for all procs to do so | 211 | * 3. Wait for all procs to do so |
| 204 | * 4. Enter no-fill cache mode | 212 | * 4. Enter no-fill cache mode |
| @@ -215,14 +223,17 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) | |||
| 215 | * 15. Enable interrupts. | 223 | * 15. Enable interrupts. |
| 216 | * | 224 | * |
| 217 | * What does that mean for us? Well, first we set data.count to the number | 225 | * What does that mean for us? Well, first we set data.count to the number |
| 218 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait | 226 | * of CPUs. As each CPU announces that it started the rendezvous handler by |
| 219 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. | 227 | * decrementing the count, We reset data.count and set the data.gate flag |
| 220 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each | 228 | * allowing all the cpu's to proceed with the work. As each cpu disables |
| 229 | * interrupts, it'll decrement data.count once. We wait until it hits 0 and | ||
| 230 | * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they | ||
| 231 | * are waiting for that flag to be cleared. Once it's cleared, each | ||
| 221 | * CPU goes through the transition of updating MTRRs. | 232 | * CPU goes through the transition of updating MTRRs. |
| 222 | * The CPU vendors may each do it differently, | 233 | * The CPU vendors may each do it differently, |
| 223 | * so we call mtrr_if->set() callback and let them take care of it. | 234 | * so we call mtrr_if->set() callback and let them take care of it. |
| 224 | * When they're done, they again decrement data->count and wait for data.gate | 235 | * When they're done, they again decrement data->count and wait for data.gate |
| 225 | * to be reset. | 236 | * to be set. |
| 226 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag | 237 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag |
| 227 | * Everyone then enables interrupts and we all continue on. | 238 | * Everyone then enables interrupts and we all continue on. |
| 228 | * | 239 | * |
| @@ -234,6 +245,9 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
| 234 | { | 245 | { |
| 235 | struct set_mtrr_data data; | 246 | struct set_mtrr_data data; |
| 236 | unsigned long flags; | 247 | unsigned long flags; |
| 248 | int cpu; | ||
| 249 | |||
| 250 | preempt_disable(); | ||
| 237 | 251 | ||
| 238 | data.smp_reg = reg; | 252 | data.smp_reg = reg; |
| 239 | data.smp_base = base; | 253 | data.smp_base = base; |
| @@ -246,10 +260,15 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
| 246 | atomic_set(&data.gate, 0); | 260 | atomic_set(&data.gate, 0); |
| 247 | 261 | ||
| 248 | /* Start the ball rolling on other CPUs */ | 262 | /* Start the ball rolling on other CPUs */ |
| 249 | if (smp_call_function(ipi_handler, &data, 0) != 0) | 263 | for_each_online_cpu(cpu) { |
| 250 | panic("mtrr: timed out waiting for other CPUs\n"); | 264 | struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu); |
| 265 | |||
| 266 | if (cpu == smp_processor_id()) | ||
| 267 | continue; | ||
| 268 | |||
| 269 | stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work); | ||
| 270 | } | ||
| 251 | 271 | ||
| 252 | local_irq_save(flags); | ||
| 253 | 272 | ||
| 254 | while (atomic_read(&data.count)) | 273 | while (atomic_read(&data.count)) |
| 255 | cpu_relax(); | 274 | cpu_relax(); |
| @@ -259,6 +278,16 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
| 259 | smp_wmb(); | 278 | smp_wmb(); |
| 260 | atomic_set(&data.gate, 1); | 279 | atomic_set(&data.gate, 1); |
| 261 | 280 | ||
| 281 | local_irq_save(flags); | ||
| 282 | |||
| 283 | while (atomic_read(&data.count)) | ||
| 284 | cpu_relax(); | ||
| 285 | |||
| 286 | /* Ok, reset count and toggle gate */ | ||
| 287 | atomic_set(&data.count, num_booting_cpus() - 1); | ||
| 288 | smp_wmb(); | ||
| 289 | atomic_set(&data.gate, 0); | ||
| 290 | |||
| 262 | /* Do our MTRR business */ | 291 | /* Do our MTRR business */ |
| 263 | 292 | ||
| 264 | /* | 293 | /* |
| @@ -279,7 +308,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
| 279 | 308 | ||
| 280 | atomic_set(&data.count, num_booting_cpus() - 1); | 309 | atomic_set(&data.count, num_booting_cpus() - 1); |
| 281 | smp_wmb(); | 310 | smp_wmb(); |
| 282 | atomic_set(&data.gate, 0); | 311 | atomic_set(&data.gate, 1); |
| 283 | 312 | ||
| 284 | /* | 313 | /* |
| 285 | * Wait here for everyone to have seen the gate change | 314 | * Wait here for everyone to have seen the gate change |
| @@ -289,6 +318,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
| 289 | cpu_relax(); | 318 | cpu_relax(); |
| 290 | 319 | ||
| 291 | local_irq_restore(flags); | 320 | local_irq_restore(flags); |
| 321 | preempt_enable(); | ||
| 292 | } | 322 | } |
| 293 | 323 | ||
| 294 | /** | 324 | /** |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index b9d1ff588445..227b0448960d 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -51,7 +51,7 @@ static inline int __vmware_platform(void) | |||
| 51 | 51 | ||
| 52 | static unsigned long vmware_get_tsc_khz(void) | 52 | static unsigned long vmware_get_tsc_khz(void) |
| 53 | { | 53 | { |
| 54 | uint64_t tsc_hz; | 54 | uint64_t tsc_hz, lpj; |
| 55 | uint32_t eax, ebx, ecx, edx; | 55 | uint32_t eax, ebx, ecx, edx; |
| 56 | 56 | ||
| 57 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 57 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
| @@ -62,6 +62,13 @@ static unsigned long vmware_get_tsc_khz(void) | |||
| 62 | printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", | 62 | printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", |
| 63 | (unsigned long) tsc_hz / 1000, | 63 | (unsigned long) tsc_hz / 1000, |
| 64 | (unsigned long) tsc_hz % 1000); | 64 | (unsigned long) tsc_hz % 1000); |
| 65 | |||
| 66 | if (!preset_lpj) { | ||
| 67 | lpj = ((u64)tsc_hz * 1000); | ||
| 68 | do_div(lpj, HZ); | ||
| 69 | preset_lpj = lpj; | ||
| 70 | } | ||
| 71 | |||
| 65 | return tsc_hz; | 72 | return tsc_hz; |
| 66 | } | 73 | } |
| 67 | 74 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c4f33b2e77d6..11015fd1abbc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -816,6 +816,13 @@ do_rest: | |||
| 816 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) | 816 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) |
| 817 | break; /* It has booted */ | 817 | break; /* It has booted */ |
| 818 | udelay(100); | 818 | udelay(100); |
| 819 | /* | ||
| 820 | * Allow other tasks to run while we wait for the | ||
| 821 | * AP to come online. This also gives a chance | ||
| 822 | * for the MTRR work(triggered by the AP coming online) | ||
| 823 | * to be completed in the stop machine context. | ||
| 824 | */ | ||
| 825 | schedule(); | ||
| 819 | } | 826 | } |
| 820 | 827 | ||
| 821 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) | 828 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 980149867a19..a4ae302f03aa 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
| @@ -92,14 +92,6 @@ int save_i387_xstate(void __user *buf) | |||
| 92 | return 0; | 92 | return 0; |
| 93 | 93 | ||
| 94 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 94 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
| 95 | /* | ||
| 96 | * Start with clearing the user buffer. This will present a | ||
| 97 | * clean context for the bytes not touched by the fxsave/xsave. | ||
| 98 | */ | ||
| 99 | err = __clear_user(buf, sig_xstate_size); | ||
| 100 | if (err) | ||
| 101 | return err; | ||
| 102 | |||
| 103 | if (use_xsave()) | 95 | if (use_xsave()) |
| 104 | err = xsave_user(buf); | 96 | err = xsave_user(buf); |
| 105 | else | 97 | else |
| @@ -185,8 +177,8 @@ static int restore_user_xstate(void __user *buf) | |||
| 185 | * init the state skipped by the user. | 177 | * init the state skipped by the user. |
| 186 | */ | 178 | */ |
| 187 | mask = pcntxt_mask & ~mask; | 179 | mask = pcntxt_mask & ~mask; |
| 188 | 180 | if (unlikely(mask)) | |
| 189 | xrstor_state(init_xstate_buf, mask); | 181 | xrstor_state(init_xstate_buf, mask); |
| 190 | 182 | ||
| 191 | return 0; | 183 | return 0; |
| 192 | 184 | ||
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 43456ee17692..4b5d26f108bb 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
| @@ -67,6 +67,7 @@ static int __init init_vdso_vars(void) | |||
| 67 | *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; | 67 | *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; |
| 68 | #include "vextern.h" | 68 | #include "vextern.h" |
| 69 | #undef VEXTERN | 69 | #undef VEXTERN |
| 70 | vunmap(vbase); | ||
| 70 | return 0; | 71 | return 0; |
| 71 | 72 | ||
| 72 | oom: | 73 | oom: |
