aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-05-09 03:37:30 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-05-16 08:42:42 -0400
commitd5e50a51ccbda36b379aba9d1131a852eb908dda (patch)
tree1c23ccc1e5836c2ca5a85b930b34c04bf69d4875 /arch/s390/mm
parent473e66baad1e83e6c5dfdca65aba03bf21727202 (diff)
s390/pfault: fix task state race
When setting the current task state to TASK_UNINTERRUPTIBLE this can race with a different cpu. The other cpu could set the task state after it inspected it (while it was still TASK_RUNNING) to TASK_RUNNING which would change the state from TASK_UNINTERRUPTIBLE to TASK_RUNNING again. This race was always present in the pfault interrupt code but didn't cause anything harmful before commit f2db2e6c "[S390] pfault: cpu hotplug vs missing completion interrupts" which relied on the fact that after setting the task state to TASK_UNINTERRUPTIBLE the task would really sleep. Since this is not necessarily the case the result may be a list corruption of the pfault_list or, as observed, a use-after-free bug while trying to access the task_struct of a task which terminated itself already. To fix this, we need to get a reference of the affected task when receiving the initial pfault interrupt and add special handling if we receive yet another initial pfault interrupt when the task is already enqueued in the pfault list. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: <stable@vger.kernel.org> # needed for v3.0 and newer Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index b9aeaca26d3a..67e2d4d14ae6 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -574,6 +574,7 @@ static void pfault_interrupt(struct ext_code ext_code,
574 tsk->thread.pfault_wait = 0; 574 tsk->thread.pfault_wait = 0;
575 list_del(&tsk->thread.list); 575 list_del(&tsk->thread.list);
576 wake_up_process(tsk); 576 wake_up_process(tsk);
577 put_task_struct(tsk);
577 } else { 578 } else {
578 /* Completion interrupt was faster than initial 579 /* Completion interrupt was faster than initial
579 * interrupt. Set pfault_wait to -1 so the initial 580 * interrupt. Set pfault_wait to -1 so the initial
@@ -588,14 +589,22 @@ static void pfault_interrupt(struct ext_code ext_code,
588 put_task_struct(tsk); 589 put_task_struct(tsk);
589 } else { 590 } else {
590 /* signal bit not set -> a real page is missing. */ 591 /* signal bit not set -> a real page is missing. */
591 if (tsk->thread.pfault_wait == -1) { 592 if (tsk->thread.pfault_wait == 1) {
593 /* Already on the list with a reference: put to sleep */
594 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
595 set_tsk_need_resched(tsk);
596 } else if (tsk->thread.pfault_wait == -1) {
592 /* Completion interrupt was faster than the initial 597 /* Completion interrupt was faster than the initial
593 * interrupt (pfault_wait == -1). Set pfault_wait 598 * interrupt (pfault_wait == -1). Set pfault_wait
594 * back to zero and exit. */ 599 * back to zero and exit. */
595 tsk->thread.pfault_wait = 0; 600 tsk->thread.pfault_wait = 0;
596 } else { 601 } else {
597 /* Initial interrupt arrived before completion 602 /* Initial interrupt arrived before completion
598 * interrupt. Let the task sleep. */ 603 * interrupt. Let the task sleep.
604 * An extra task reference is needed since a different
605 * cpu may set the task state to TASK_RUNNING again
606 * before the scheduler is reached. */
607 get_task_struct(tsk);
599 tsk->thread.pfault_wait = 1; 608 tsk->thread.pfault_wait = 1;
600 list_add(&tsk->thread.list, &pfault_list); 609 list_add(&tsk->thread.list, &pfault_list);
601 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 610 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
@@ -620,6 +629,7 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
620 list_del(&thread->list); 629 list_del(&thread->list);
621 tsk = container_of(thread, struct task_struct, thread); 630 tsk = container_of(thread, struct task_struct, thread);
622 wake_up_process(tsk); 631 wake_up_process(tsk);
632 put_task_struct(tsk);
623 } 633 }
624 spin_unlock_irq(&pfault_lock); 634 spin_unlock_irq(&pfault_lock);
625 break; 635 break;