aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2010-03-05 12:42:03 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 07:49:06 -0500
commit938179b4f8cf8a4f11234ebf2dff2eb48400acfe (patch)
tree3dd4f2f66b68bdf0f488856e567fae898b29d4f5
parent522dba7134d6b2e5821d3457f7941ec34f668e6d (diff)
x86: Improve Intel microcode loader performance
We've noticed that on large SGI UV system configurations, running microcode.ctl can take very long periods of time. This is due to the large number of vmalloc/vfree calls made by the Intel generic_load_microcode() logic. By reusing allocated space, the following patch reduces the time to run microcode.ctl on a 1024 cpu system from approximately 80 seconds down to 1 or 2 seconds. Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Acked-by: Dmitry Adamushko <dmitry.adamushko@gmail.com> Cc: Avi Kivity <avi@redhat.com> Cc: Bill Davidsen <davidsen@tmr.com> LKML-Reference: <20100305174203.GA19638@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/microcode_intel.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 85a343e28937..356170262a93 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -343,10 +343,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
343 int (*get_ucode_data)(void *, const void *, size_t)) 343 int (*get_ucode_data)(void *, const void *, size_t))
344{ 344{
345 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 345 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
346 u8 *ucode_ptr = data, *new_mc = NULL, *mc; 346 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
347 int new_rev = uci->cpu_sig.rev; 347 int new_rev = uci->cpu_sig.rev;
348 unsigned int leftover = size; 348 unsigned int leftover = size;
349 enum ucode_state state = UCODE_OK; 349 enum ucode_state state = UCODE_OK;
350 unsigned int curr_mc_size = 0;
350 351
351 while (leftover) { 352 while (leftover) {
352 struct microcode_header_intel mc_header; 353 struct microcode_header_intel mc_header;
@@ -361,9 +362,15 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
361 break; 362 break;
362 } 363 }
363 364
364 mc = vmalloc(mc_size); 365 /* For performance reasons, reuse mc area when possible */
365 if (!mc) 366 if (!mc || mc_size > curr_mc_size) {
366 break; 367 if (mc)
368 vfree(mc);
369 mc = vmalloc(mc_size);
370 if (!mc)
371 break;
372 curr_mc_size = mc_size;
373 }
367 374
368 if (get_ucode_data(mc, ucode_ptr, mc_size) || 375 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
369 microcode_sanity_check(mc) < 0) { 376 microcode_sanity_check(mc) < 0) {
@@ -376,13 +383,16 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
376 vfree(new_mc); 383 vfree(new_mc);
377 new_rev = mc_header.rev; 384 new_rev = mc_header.rev;
378 new_mc = mc; 385 new_mc = mc;
379 } else 386 mc = NULL; /* trigger new vmalloc */
380 vfree(mc); 387 }
381 388
382 ucode_ptr += mc_size; 389 ucode_ptr += mc_size;
383 leftover -= mc_size; 390 leftover -= mc_size;
384 } 391 }
385 392
393 if (mc)
394 vfree(mc);
395
386 if (leftover) { 396 if (leftover) {
387 if (new_mc) 397 if (new_mc)
388 vfree(new_mc); 398 vfree(new_mc);