diff options
-rw-r--r-- | arch/powerpc/platforms/pseries/setup.c | 190 | ||||
-rw-r--r-- | include/asm-powerpc/hvcall.h | 1 |
2 files changed, 71 insertions, 120 deletions
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 44d5c7fdcd97..213bf983242f 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -81,8 +81,8 @@ extern void find_udbg_vterm(void); | |||
81 | 81 | ||
82 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ | 82 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ |
83 | 83 | ||
84 | static void pseries_shared_idle(void); | 84 | static void pseries_shared_idle_sleep(void); |
85 | static void pseries_dedicated_idle(void); | 85 | static void pseries_dedicated_idle_sleep(void); |
86 | 86 | ||
87 | struct mpic *pSeries_mpic; | 87 | struct mpic *pSeries_mpic; |
88 | 88 | ||
@@ -236,14 +236,13 @@ static void __init pSeries_setup_arch(void) | |||
236 | vpa_init(boot_cpuid); | 236 | vpa_init(boot_cpuid); |
237 | if (get_lppaca()->shared_proc) { | 237 | if (get_lppaca()->shared_proc) { |
238 | printk(KERN_INFO "Using shared processor idle loop\n"); | 238 | printk(KERN_INFO "Using shared processor idle loop\n"); |
239 | ppc_md.idle_loop = pseries_shared_idle; | 239 | ppc_md.power_save = pseries_shared_idle_sleep; |
240 | } else { | 240 | } else { |
241 | printk(KERN_INFO "Using dedicated idle loop\n"); | 241 | printk(KERN_INFO "Using dedicated idle loop\n"); |
242 | ppc_md.idle_loop = pseries_dedicated_idle; | 242 | ppc_md.power_save = pseries_dedicated_idle_sleep; |
243 | } | 243 | } |
244 | } else { | 244 | } else { |
245 | printk(KERN_INFO "Using default idle loop\n"); | 245 | printk(KERN_INFO "Using default idle loop\n"); |
246 | ppc_md.idle_loop = default_idle; | ||
247 | } | 246 | } |
248 | 247 | ||
249 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 248 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
@@ -393,136 +392,87 @@ static int __init pSeries_probe(int platform) | |||
393 | 392 | ||
394 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | 393 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); |
395 | 394 | ||
396 | static inline void dedicated_idle_sleep(unsigned int cpu) | 395 | static void pseries_dedicated_idle_sleep(void) |
397 | { | ||
398 | struct lppaca *plppaca = &lppaca[cpu ^ 1]; | ||
399 | |||
400 | /* Only sleep if the other thread is not idle */ | ||
401 | if (!(plppaca->idle)) { | ||
402 | local_irq_disable(); | ||
403 | |||
404 | /* | ||
405 | * We are about to sleep the thread and so wont be polling any | ||
406 | * more. | ||
407 | */ | ||
408 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
409 | smp_mb__after_clear_bit(); | ||
410 | |||
411 | /* | ||
412 | * SMT dynamic mode. Cede will result in this thread going | ||
413 | * dormant, if the partner thread is still doing work. Thread | ||
414 | * wakes up if partner goes idle, an interrupt is presented, or | ||
415 | * a prod occurs. Returning from the cede enables external | ||
416 | * interrupts. | ||
417 | */ | ||
418 | if (!need_resched()) | ||
419 | cede_processor(); | ||
420 | else | ||
421 | local_irq_enable(); | ||
422 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
423 | } else { | ||
424 | /* | ||
425 | * Give the HV an opportunity at the processor, since we are | ||
426 | * not doing any work. | ||
427 | */ | ||
428 | poll_pending(); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void pseries_dedicated_idle(void) | ||
433 | { | 396 | { |
434 | unsigned int cpu = smp_processor_id(); | 397 | unsigned int cpu = smp_processor_id(); |
435 | unsigned long start_snooze; | 398 | unsigned long start_snooze; |
436 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); | 399 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); |
437 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
438 | |||
439 | while (1) { | ||
440 | /* | ||
441 | * Indicate to the HV that we are idle. Now would be | ||
442 | * a good time to find other work to dispatch. | ||
443 | */ | ||
444 | get_lppaca()->idle = 1; | ||
445 | |||
446 | if (!need_resched()) { | ||
447 | start_snooze = get_tb() + | ||
448 | *smt_snooze_delay * tb_ticks_per_usec; | ||
449 | |||
450 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
451 | ppc64_runlatch_off(); | ||
452 | |||
453 | /* | ||
454 | * Go into low thread priority and possibly | ||
455 | * low power mode. | ||
456 | */ | ||
457 | HMT_low(); | ||
458 | HMT_very_low(); | ||
459 | |||
460 | if (*smt_snooze_delay != 0 && | ||
461 | get_tb() > start_snooze) { | ||
462 | HMT_medium(); | ||
463 | dedicated_idle_sleep(cpu); | ||
464 | } | ||
465 | |||
466 | } | ||
467 | |||
468 | HMT_medium(); | ||
469 | } | ||
470 | 400 | ||
471 | get_lppaca()->idle = 0; | 401 | /* |
472 | ppc64_runlatch_on(); | 402 | * Indicate to the HV that we are idle. Now would be |
403 | * a good time to find other work to dispatch. | ||
404 | */ | ||
405 | get_lppaca()->idle = 1; | ||
473 | 406 | ||
474 | preempt_enable_no_resched(); | 407 | /* |
475 | schedule(); | 408 | * We come in with interrupts disabled, and need_resched() |
476 | preempt_disable(); | 409 | * has been checked recently. If we should poll for a little |
410 | * while, do so. | ||
411 | */ | ||
412 | if (*smt_snooze_delay) { | ||
413 | start_snooze = get_tb() + | ||
414 | *smt_snooze_delay * tb_ticks_per_usec; | ||
415 | local_irq_enable(); | ||
416 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
477 | 417 | ||
478 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | 418 | while (get_tb() < start_snooze) { |
479 | cpu_die(); | 419 | if (need_resched() || cpu_is_offline(cpu)) |
420 | goto out; | ||
421 | ppc64_runlatch_off(); | ||
422 | HMT_low(); | ||
423 | HMT_very_low(); | ||
424 | } | ||
425 | |||
426 | HMT_medium(); | ||
427 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
428 | smp_mb(); | ||
429 | local_irq_disable(); | ||
430 | if (need_resched() || cpu_is_offline(cpu)) | ||
431 | goto out; | ||
480 | } | 432 | } |
433 | |||
434 | /* | ||
435 | * Cede if the other thread is not idle, so that it can | ||
436 | * go single-threaded. If the other thread is idle, | ||
437 | * we ask the hypervisor if it has pending work it | ||
438 | * wants to do and cede if it does. Otherwise we keep | ||
439 | * polling in order to reduce interrupt latency. | ||
440 | * | ||
441 | * Doing the cede when the other thread is active will | ||
442 | * result in this thread going dormant, meaning the other | ||
443 | * thread gets to run in single-threaded (ST) mode, which | ||
444 | * is slightly faster than SMT mode with this thread at | ||
445 | * very low priority. The cede enables interrupts, which | ||
446 | * doesn't matter here. | ||
447 | */ | ||
448 | if (!lppaca[cpu ^ 1].idle || poll_pending() == H_Pending) | ||
449 | cede_processor(); | ||
450 | |||
451 | out: | ||
452 | HMT_medium(); | ||
453 | get_lppaca()->idle = 0; | ||
481 | } | 454 | } |
482 | 455 | ||
483 | static void pseries_shared_idle(void) | 456 | static void pseries_shared_idle_sleep(void) |
484 | { | 457 | { |
485 | unsigned int cpu = smp_processor_id(); | 458 | unsigned int cpu = smp_processor_id(); |
486 | 459 | ||
487 | while (1) { | 460 | /* |
488 | /* | 461 | * Indicate to the HV that we are idle. Now would be |
489 | * Indicate to the HV that we are idle. Now would be | 462 | * a good time to find other work to dispatch. |
490 | * a good time to find other work to dispatch. | 463 | */ |
491 | */ | 464 | get_lppaca()->idle = 1; |
492 | get_lppaca()->idle = 1; | ||
493 | |||
494 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
495 | local_irq_disable(); | ||
496 | ppc64_runlatch_off(); | ||
497 | |||
498 | /* | ||
499 | * Yield the processor to the hypervisor. We return if | ||
500 | * an external interrupt occurs (which are driven prior | ||
501 | * to returning here) or if a prod occurs from another | ||
502 | * processor. When returning here, external interrupts | ||
503 | * are enabled. | ||
504 | * | ||
505 | * Check need_resched() again with interrupts disabled | ||
506 | * to avoid a race. | ||
507 | */ | ||
508 | if (!need_resched()) | ||
509 | cede_processor(); | ||
510 | else | ||
511 | local_irq_enable(); | ||
512 | |||
513 | HMT_medium(); | ||
514 | } | ||
515 | |||
516 | get_lppaca()->idle = 0; | ||
517 | ppc64_runlatch_on(); | ||
518 | 465 | ||
519 | preempt_enable_no_resched(); | 466 | /* |
520 | schedule(); | 467 | * Yield the processor to the hypervisor. We return if |
521 | preempt_disable(); | 468 | * an external interrupt occurs (which are driven prior |
469 | * to returning here) or if a prod occurs from another | ||
470 | * processor. When returning here, external interrupts | ||
471 | * are enabled. | ||
472 | */ | ||
473 | cede_processor(); | ||
522 | 474 | ||
523 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | 475 | get_lppaca()->idle = 0; |
524 | cpu_die(); | ||
525 | } | ||
526 | } | 476 | } |
527 | 477 | ||
528 | static int pSeries_pci_probe_mode(struct pci_bus *bus) | 478 | static int pSeries_pci_probe_mode(struct pci_bus *bus) |
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h index 38ca9ad6110d..b72c04f3f551 100644 --- a/include/asm-powerpc/hvcall.h +++ b/include/asm-powerpc/hvcall.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #define H_Closed 2 /* Resource closed */ | 9 | #define H_Closed 2 /* Resource closed */ |
10 | #define H_Constrained 4 /* Resource request constrained to max allowed */ | 10 | #define H_Constrained 4 /* Resource request constrained to max allowed */ |
11 | #define H_InProgress 14 /* Kind of like busy */ | 11 | #define H_InProgress 14 /* Kind of like busy */ |
12 | #define H_Pending 17 /* returned from H_POLL_PENDING */ | ||
12 | #define H_Continue 18 /* Returned from H_Join on success */ | 13 | #define H_Continue 18 /* Returned from H_Join on success */ |
13 | #define H_LongBusyStartRange 9900 /* Start of long busy range */ | 14 | #define H_LongBusyStartRange 9900 /* Start of long busy range */ |
14 | #define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ | 15 | #define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ |