aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries/rtasd.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2009-05-25 16:25:49 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-01 20:35:32 -0400
commitf8729e8531cd92e892dd85a3d10669834ad0b23c (patch)
tree8e44e0dadbc7b7dc0aa1be7d68aed111cd7dcd41 /arch/powerpc/platforms/pseries/rtasd.c
parent0512a9a8e277a9de2820211eef964473b714ae65 (diff)
powerpc: Convert RTAS event scan from kernel thread to workqueue
RTAS event scan has to run across all cpus. Right now we use a kernel thread and set_cpus_allowed but in doing so we wake up the previous cpu unnecessarily. Some ftrace output shows this: previous cpu (2): [002] 7.022331: sched_switch: task swapper:0 [140] ==> rtasd:194 [120] [002] 7.022338: sched_switch: task rtasd:194 [120] ==> migration/2:9 [0] [002] 7.022344: sched_switch: task migration/2:9 [0] ==> swapper:0 [140] next cpu (3): [003] 7.022345: sched_switch: task swapper:0 [140] ==> rtasd:194 [120] [003] 7.022371: sched_switch: task rtasd:194 [120] ==> swapper:0 [140] We can use schedule_delayed_work_on and avoid the unnecessary wakeup. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/platforms/pseries/rtasd.c')
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c76
1 files changed, 38 insertions, 38 deletions
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index afad9f5ac0ac..b3cbac855924 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -19,7 +19,7 @@
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/delay.h> 22#include <linux/workqueue.h>
23 23
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/io.h> 25#include <asm/io.h>
@@ -387,36 +387,51 @@ static void do_event_scan(void)
387 } while(error == 0); 387 } while(error == 0);
388} 388}
389 389
390static void do_event_scan_all_cpus(long delay) 390static void rtas_event_scan(struct work_struct *w);
391DECLARE_DELAYED_WORK(event_scan_work, rtas_event_scan);
392
393/*
394 * Delay should be at least one second since some machines have problems if
395 * we call event-scan too quickly.
396 */
397static unsigned long event_scan_delay = 1*HZ;
398static int first_pass = 1;
399
400static void rtas_event_scan(struct work_struct *w)
391{ 401{
392 int cpu; 402 unsigned int cpu;
403
404 do_event_scan();
393 405
394 get_online_cpus(); 406 get_online_cpus();
395 cpu = first_cpu(cpu_online_map); 407
396 for (;;) { 408 cpu = next_cpu(smp_processor_id(), cpu_online_map);
397 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 409 if (cpu == NR_CPUS) {
398 do_event_scan(); 410 cpu = first_cpu(cpu_online_map);
399 set_cpus_allowed(current, CPU_MASK_ALL); 411
400 412 if (first_pass) {
401 /* Drop hotplug lock, and sleep for the specified delay */ 413 first_pass = 0;
402 put_online_cpus(); 414 event_scan_delay = 30*HZ/rtas_event_scan_rate;
403 msleep_interruptible(delay); 415
404 get_online_cpus(); 416 if (surveillance_timeout != -1) {
405 417 pr_debug("rtasd: enabling surveillance\n");
406 cpu = next_cpu(cpu, cpu_online_map); 418 enable_surveillance(surveillance_timeout);
407 if (cpu == NR_CPUS) 419 pr_debug("rtasd: surveillance enabled\n");
408 break; 420 }
421 }
409 } 422 }
423
424 schedule_delayed_work_on(cpu, &event_scan_work,
425 __round_jiffies_relative(event_scan_delay, cpu));
426
410 put_online_cpus(); 427 put_online_cpus();
411} 428}
412 429
413static int rtasd(void *unused) 430static void start_event_scan(void)
414{ 431{
415 unsigned int err_type; 432 unsigned int err_type;
416 int rc; 433 int rc;
417 434
418 daemonize("rtasd");
419
420 printk(KERN_DEBUG "RTAS daemon started\n"); 435 printk(KERN_DEBUG "RTAS daemon started\n");
421 pr_debug("rtasd: will sleep for %d milliseconds\n", 436 pr_debug("rtasd: will sleep for %d milliseconds\n",
422 (30000 / rtas_event_scan_rate)); 437 (30000 / rtas_event_scan_rate));
@@ -434,22 +449,8 @@ static int rtasd(void *unused)
434 } 449 }
435 } 450 }
436 451
437 /* First pass. */ 452 schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
438 do_event_scan_all_cpus(1000); 453 event_scan_delay);
439
440 if (surveillance_timeout != -1) {
441 pr_debug("rtasd: enabling surveillance\n");
442 enable_surveillance(surveillance_timeout);
443 pr_debug("rtasd: surveillance enabled\n");
444 }
445
446 /* Delay should be at least one second since some
447 * machines have problems if we call event-scan too
448 * quickly. */
449 for (;;)
450 do_event_scan_all_cpus(30000/rtas_event_scan_rate);
451
452 return -EINVAL;
453} 454}
454 455
455static int __init rtas_init(void) 456static int __init rtas_init(void)
@@ -487,8 +488,7 @@ static int __init rtas_init(void)
487 if (!entry) 488 if (!entry)
488 printk(KERN_ERR "Failed to create error_log proc entry\n"); 489 printk(KERN_ERR "Failed to create error_log proc entry\n");
489 490
490 if (kernel_thread(rtasd, NULL, CLONE_FS) < 0) 491 start_event_scan();
491 printk(KERN_ERR "Failed to start RTAS daemon\n");
492 492
493 return 0; 493 return 0;
494} 494}