aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@us.ibm.com>2006-01-08 04:01:35 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:13:40 -0500
commit4369ef3c3e9d3bd9b879580678778f558d481e90 (patch)
tree32b443b32c6de858c419f75dad07c46028359afe /drivers/oprofile
parenteafbaa94691f6a1fa67c3b076caa3ce4e2920100 (diff)
[PATCH] Make RCU task_struct safe for oprofile
Applying RCU to the task structure broke oprofile, because free_task_notify() can now be called from softirq. This means that the task_mortuary lock must be acquired with irq disabled in order to avoid intermittent self-deadlock. Since irq is now disabled, the critical section within process_task_mortuary() has been restructured to be O(1) in order to maximize scalability and minimize realtime latency degradation. Kudos to Wu Fengguang for finding this problem! CC: Wu Fengguang <wfg@mail.ustc.edu.cn> Cc: Philippe Elie <phil.el@wanadoo.fr> Cc: John Levon <levon@movementarian.org> Signed-off-by: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/oprofile')
-rw-r--r--drivers/oprofile/buffer_sync.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 531b07313141..b2e8e49c8659 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -43,13 +43,16 @@ static void process_task_mortuary(void);
43 * list for processing. Only after two full buffer syncs 43 * list for processing. Only after two full buffer syncs
44 * does the task eventually get freed, because by then 44 * does the task eventually get freed, because by then
45 * we are sure we will not reference it again. 45 * we are sure we will not reference it again.
46 * Can be invoked from softirq via RCU callback due to
47 * call_rcu() of the task struct, hence the _irqsave.
46 */ 48 */
47static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) 49static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
48{ 50{
51 unsigned long flags;
49 struct task_struct * task = data; 52 struct task_struct * task = data;
50 spin_lock(&task_mortuary); 53 spin_lock_irqsave(&task_mortuary, flags);
51 list_add(&task->tasks, &dying_tasks); 54 list_add(&task->tasks, &dying_tasks);
52 spin_unlock(&task_mortuary); 55 spin_unlock_irqrestore(&task_mortuary, flags);
53 return NOTIFY_OK; 56 return NOTIFY_OK;
54} 57}
55 58
@@ -431,25 +434,22 @@ static void increment_tail(struct oprofile_cpu_buffer * b)
431 */ 434 */
432static void process_task_mortuary(void) 435static void process_task_mortuary(void)
433{ 436{
434 struct list_head * pos; 437 unsigned long flags;
435 struct list_head * pos2; 438 LIST_HEAD(local_dead_tasks);
436 struct task_struct * task; 439 struct task_struct * task;
440 struct task_struct * ttask;
437 441
438 spin_lock(&task_mortuary); 442 spin_lock_irqsave(&task_mortuary, flags);
439 443
440 list_for_each_safe(pos, pos2, &dead_tasks) { 444 list_splice_init(&dead_tasks, &local_dead_tasks);
441 task = list_entry(pos, struct task_struct, tasks); 445 list_splice_init(&dying_tasks, &dead_tasks);
442 list_del(&task->tasks);
443 free_task(task);
444 }
445 446
446 list_for_each_safe(pos, pos2, &dying_tasks) { 447 spin_unlock_irqrestore(&task_mortuary, flags);
447 task = list_entry(pos, struct task_struct, tasks); 448
449 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
448 list_del(&task->tasks); 450 list_del(&task->tasks);
449 list_add_tail(&task->tasks, &dead_tasks); 451 free_task(task);
450 } 452 }
451
452 spin_unlock(&task_mortuary);
453} 453}
454 454
455 455