aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-10-29 11:20:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-10-29 11:20:00 -0400
commit3242f9804ba992c867360e2b57efc268b8e4e175 (patch)
tree96fbdbc1344aa67588ce26765f308c674b91a75f /kernel
parent23756692147c5dfd3328afd42e16e9d943ff756c (diff)
parent7456b0405d8fc063c49628f969cdb23be060fc80 (diff)
Merge branch 'hwpoison-2.6.32' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6
* 'hwpoison-2.6.32' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6: HWPOISON: fix invalid page count in printk output HWPOISON: Allow schedule_on_each_cpu() from keventd HWPOISON: fix/proc/meminfo alignment HWPOISON: fix oops on ksm pages HWPOISON: Fix page count leak in hwpoison late kill in do_swap_page HWPOISON: return early on non-LRU pages HWPOISON: Add brief hwpoison description to Documentation HWPOISON: Clean up PR_MCE_KILL interface
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sys.c23
-rw-r--r--kernel/workqueue.c21
2 files changed, 37 insertions, 7 deletions
diff --git a/kernel/sys.c b/kernel/sys.c
index 1828f8d10844..ce17760d9c51 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1548,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1548 if (arg4 | arg5) 1548 if (arg4 | arg5)
1549 return -EINVAL; 1549 return -EINVAL;
1550 switch (arg2) { 1550 switch (arg2) {
1551 case 0: 1551 case PR_MCE_KILL_CLEAR:
1552 if (arg3 != 0) 1552 if (arg3 != 0)
1553 return -EINVAL; 1553 return -EINVAL;
1554 current->flags &= ~PF_MCE_PROCESS; 1554 current->flags &= ~PF_MCE_PROCESS;
1555 break; 1555 break;
1556 case 1: 1556 case PR_MCE_KILL_SET:
1557 current->flags |= PF_MCE_PROCESS; 1557 current->flags |= PF_MCE_PROCESS;
1558 if (arg3 != 0) 1558 if (arg3 == PR_MCE_KILL_EARLY)
1559 current->flags |= PF_MCE_EARLY; 1559 current->flags |= PF_MCE_EARLY;
1560 else 1560 else if (arg3 == PR_MCE_KILL_LATE)
1561 current->flags &= ~PF_MCE_EARLY; 1561 current->flags &= ~PF_MCE_EARLY;
1562 else if (arg3 == PR_MCE_KILL_DEFAULT)
1563 current->flags &=
1564 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1565 else
1566 return -EINVAL;
1562 break; 1567 break;
1563 default: 1568 default:
1564 return -EINVAL; 1569 return -EINVAL;
1565 } 1570 }
1566 error = 0; 1571 error = 0;
1567 break; 1572 break;
1568 1573 case PR_MCE_KILL_GET:
1574 if (arg2 | arg3 | arg4 | arg5)
1575 return -EINVAL;
1576 if (current->flags & PF_MCE_PROCESS)
1577 error = (current->flags & PF_MCE_EARLY) ?
1578 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1579 else
1580 error = PR_MCE_KILL_DEFAULT;
1581 break;
1569 default: 1582 default:
1570 error = -EINVAL; 1583 error = -EINVAL;
1571 break; 1584 break;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 47cdd7e76f2b..12328147132c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -685,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
685int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
686{ 686{
687 int cpu; 687 int cpu;
688 int orig = -1;
688 struct work_struct *works; 689 struct work_struct *works;
689 690
690 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
691 if (!works) 692 if (!works)
692 return -ENOMEM; 693 return -ENOMEM;
693 694
695 /*
696 * when running in keventd don't schedule a work item on itself.
697 * Can just call directly because the work queue is already bound.
698 * This also is faster.
699 * Make this a generic parameter for other workqueues?
700 */
701 if (current_is_keventd()) {
702 orig = raw_smp_processor_id();
703 INIT_WORK(per_cpu_ptr(works, orig), func);
704 func(per_cpu_ptr(works, orig));
705 }
706
694 get_online_cpus(); 707 get_online_cpus();
695 for_each_online_cpu(cpu) { 708 for_each_online_cpu(cpu) {
696 struct work_struct *work = per_cpu_ptr(works, cpu); 709 struct work_struct *work = per_cpu_ptr(works, cpu);
697 710
711 if (cpu == orig)
712 continue;
698 INIT_WORK(work, func); 713 INIT_WORK(work, func);
699 schedule_work_on(cpu, work); 714 schedule_work_on(cpu, work);
700 } 715 }
701 for_each_online_cpu(cpu) 716 for_each_online_cpu(cpu) {
702 flush_work(per_cpu_ptr(works, cpu)); 717 if (cpu != orig)
718 flush_work(per_cpu_ptr(works, cpu));
719 }
703 put_online_cpus(); 720 put_online_cpus();
704 free_percpu(works); 721 free_percpu(works);
705 return 0; 722 return 0;