diff options
author | Anton Blanchard <anton@samba.org> | 2005-07-07 20:56:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-07 21:23:41 -0400 |
commit | 050a09389e045f37e5bf08718cf36909766e20d1 (patch) | |
tree | 55911d552e81729ddf825b44463805ade84eb1f0 /arch | |
parent | 3c57bb9f454e8fc7b3d815b991b0dec43c766641 (diff) |
[PATCH] ppc64: pSeries idle fixups
- separate out sleep logic in dedicated_idle, it was so far indented
that it got squashed against the right side of the screen.
- add runlatch support, looping on runlatch disable.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ppc64/kernel/pSeries_setup.c | 113 |
1 files changed, 62 insertions, 51 deletions
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c index 3f3be8ae9351..5bec956e44a0 100644 --- a/arch/ppc64/kernel/pSeries_setup.c +++ b/arch/ppc64/kernel/pSeries_setup.c | |||
@@ -83,8 +83,8 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */ | |||
83 | extern void pSeries_system_reset_exception(struct pt_regs *regs); | 83 | extern void pSeries_system_reset_exception(struct pt_regs *regs); |
84 | extern int pSeries_machine_check_exception(struct pt_regs *regs); | 84 | extern int pSeries_machine_check_exception(struct pt_regs *regs); |
85 | 85 | ||
86 | static int shared_idle(void); | 86 | static int pseries_shared_idle(void); |
87 | static int dedicated_idle(void); | 87 | static int pseries_dedicated_idle(void); |
88 | 88 | ||
89 | static volatile void __iomem * chrp_int_ack_special; | 89 | static volatile void __iomem * chrp_int_ack_special; |
90 | struct mpic *pSeries_mpic; | 90 | struct mpic *pSeries_mpic; |
@@ -238,10 +238,10 @@ static void __init pSeries_setup_arch(void) | |||
238 | if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { | 238 | if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { |
239 | if (get_paca()->lppaca.shared_proc) { | 239 | if (get_paca()->lppaca.shared_proc) { |
240 | printk(KERN_INFO "Using shared processor idle loop\n"); | 240 | printk(KERN_INFO "Using shared processor idle loop\n"); |
241 | ppc_md.idle_loop = shared_idle; | 241 | ppc_md.idle_loop = pseries_shared_idle; |
242 | } else { | 242 | } else { |
243 | printk(KERN_INFO "Using dedicated idle loop\n"); | 243 | printk(KERN_INFO "Using dedicated idle loop\n"); |
244 | ppc_md.idle_loop = dedicated_idle; | 244 | ppc_md.idle_loop = pseries_dedicated_idle; |
245 | } | 245 | } |
246 | } else { | 246 | } else { |
247 | printk(KERN_INFO "Using default idle loop\n"); | 247 | printk(KERN_INFO "Using default idle loop\n"); |
@@ -438,15 +438,47 @@ static int __init pSeries_probe(int platform) | |||
438 | 438 | ||
439 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | 439 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); |
440 | 440 | ||
441 | int dedicated_idle(void) | 441 | static inline void dedicated_idle_sleep(unsigned int cpu) |
442 | { | ||
443 | struct paca_struct *ppaca = &paca[cpu ^ 1]; | ||
444 | |||
445 | /* Only sleep if the other thread is not idle */ | ||
446 | if (!(ppaca->lppaca.idle)) { | ||
447 | local_irq_disable(); | ||
448 | |||
449 | /* | ||
450 | * We are about to sleep the thread and so wont be polling any | ||
451 | * more. | ||
452 | */ | ||
453 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
454 | |||
455 | /* | ||
456 | * SMT dynamic mode. Cede will result in this thread going | ||
457 | * dormant, if the partner thread is still doing work. Thread | ||
458 | * wakes up if partner goes idle, an interrupt is presented, or | ||
459 | * a prod occurs. Returning from the cede enables external | ||
460 | * interrupts. | ||
461 | */ | ||
462 | if (!need_resched()) | ||
463 | cede_processor(); | ||
464 | else | ||
465 | local_irq_enable(); | ||
466 | } else { | ||
467 | /* | ||
468 | * Give the HV an opportunity at the processor, since we are | ||
469 | * not doing any work. | ||
470 | */ | ||
471 | poll_pending(); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | static int pseries_dedicated_idle(void) | ||
442 | { | 476 | { |
443 | long oldval; | 477 | long oldval; |
444 | struct paca_struct *lpaca = get_paca(), *ppaca; | 478 | struct paca_struct *lpaca = get_paca(); |
479 | unsigned int cpu = smp_processor_id(); | ||
445 | unsigned long start_snooze; | 480 | unsigned long start_snooze; |
446 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); | 481 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); |
447 | unsigned int cpu = smp_processor_id(); | ||
448 | |||
449 | ppaca = &paca[cpu ^ 1]; | ||
450 | 482 | ||
451 | while (1) { | 483 | while (1) { |
452 | /* | 484 | /* |
@@ -458,9 +490,13 @@ int dedicated_idle(void) | |||
458 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | 490 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); |
459 | if (!oldval) { | 491 | if (!oldval) { |
460 | set_thread_flag(TIF_POLLING_NRFLAG); | 492 | set_thread_flag(TIF_POLLING_NRFLAG); |
493 | |||
461 | start_snooze = __get_tb() + | 494 | start_snooze = __get_tb() + |
462 | *smt_snooze_delay * tb_ticks_per_usec; | 495 | *smt_snooze_delay * tb_ticks_per_usec; |
496 | |||
463 | while (!need_resched() && !cpu_is_offline(cpu)) { | 497 | while (!need_resched() && !cpu_is_offline(cpu)) { |
498 | ppc64_runlatch_off(); | ||
499 | |||
464 | /* | 500 | /* |
465 | * Go into low thread priority and possibly | 501 | * Go into low thread priority and possibly |
466 | * low power mode. | 502 | * low power mode. |
@@ -468,60 +504,31 @@ int dedicated_idle(void) | |||
468 | HMT_low(); | 504 | HMT_low(); |
469 | HMT_very_low(); | 505 | HMT_very_low(); |
470 | 506 | ||
471 | if (*smt_snooze_delay == 0 || | 507 | if (*smt_snooze_delay != 0 && |
472 | __get_tb() < start_snooze) | 508 | __get_tb() > start_snooze) { |
473 | continue; | 509 | HMT_medium(); |
474 | 510 | dedicated_idle_sleep(cpu); | |
475 | HMT_medium(); | ||
476 | |||
477 | if (!(ppaca->lppaca.idle)) { | ||
478 | local_irq_disable(); | ||
479 | |||
480 | /* | ||
481 | * We are about to sleep the thread | ||
482 | * and so wont be polling any | ||
483 | * more. | ||
484 | */ | ||
485 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
486 | |||
487 | /* | ||
488 | * SMT dynamic mode. Cede will result | ||
489 | * in this thread going dormant, if the | ||
490 | * partner thread is still doing work. | ||
491 | * Thread wakes up if partner goes idle, | ||
492 | * an interrupt is presented, or a prod | ||
493 | * occurs. Returning from the cede | ||
494 | * enables external interrupts. | ||
495 | */ | ||
496 | if (!need_resched()) | ||
497 | cede_processor(); | ||
498 | else | ||
499 | local_irq_enable(); | ||
500 | } else { | ||
501 | /* | ||
502 | * Give the HV an opportunity at the | ||
503 | * processor, since we are not doing | ||
504 | * any work. | ||
505 | */ | ||
506 | poll_pending(); | ||
507 | } | 511 | } |
512 | |||
508 | } | 513 | } |
509 | 514 | ||
515 | HMT_medium(); | ||
510 | clear_thread_flag(TIF_POLLING_NRFLAG); | 516 | clear_thread_flag(TIF_POLLING_NRFLAG); |
511 | } else { | 517 | } else { |
512 | set_need_resched(); | 518 | set_need_resched(); |
513 | } | 519 | } |
514 | 520 | ||
515 | HMT_medium(); | ||
516 | lpaca->lppaca.idle = 0; | 521 | lpaca->lppaca.idle = 0; |
522 | ppc64_runlatch_on(); | ||
523 | |||
517 | schedule(); | 524 | schedule(); |
525 | |||
518 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | 526 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) |
519 | cpu_die(); | 527 | cpu_die(); |
520 | } | 528 | } |
521 | return 0; | ||
522 | } | 529 | } |
523 | 530 | ||
524 | static int shared_idle(void) | 531 | static int pseries_shared_idle(void) |
525 | { | 532 | { |
526 | struct paca_struct *lpaca = get_paca(); | 533 | struct paca_struct *lpaca = get_paca(); |
527 | unsigned int cpu = smp_processor_id(); | 534 | unsigned int cpu = smp_processor_id(); |
@@ -535,6 +542,7 @@ static int shared_idle(void) | |||
535 | 542 | ||
536 | while (!need_resched() && !cpu_is_offline(cpu)) { | 543 | while (!need_resched() && !cpu_is_offline(cpu)) { |
537 | local_irq_disable(); | 544 | local_irq_disable(); |
545 | ppc64_runlatch_off(); | ||
538 | 546 | ||
539 | /* | 547 | /* |
540 | * Yield the processor to the hypervisor. We return if | 548 | * Yield the processor to the hypervisor. We return if |
@@ -550,13 +558,16 @@ static int shared_idle(void) | |||
550 | cede_processor(); | 558 | cede_processor(); |
551 | else | 559 | else |
552 | local_irq_enable(); | 560 | local_irq_enable(); |
561 | |||
562 | HMT_medium(); | ||
553 | } | 563 | } |
554 | 564 | ||
555 | HMT_medium(); | ||
556 | lpaca->lppaca.idle = 0; | 565 | lpaca->lppaca.idle = 0; |
566 | ppc64_runlatch_on(); | ||
567 | |||
557 | schedule(); | 568 | schedule(); |
558 | if (cpu_is_offline(smp_processor_id()) && | 569 | |
559 | system_state == SYSTEM_RUNNING) | 570 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) |
560 | cpu_die(); | 571 | cpu_die(); |
561 | } | 572 | } |
562 | 573 | ||