aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-03-22 19:13:54 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-22 19:13:54 -0500
commitf01c18456993bab43067b678f56c87ca954aa43b (patch)
tree3e0cd0cdf1a57618202b46a7126125902e3ab832 /kernel
parent949ec2c8e6b7b89179b85baf6309c009e1a1b951 (diff)
parent1c2e02750b992703a8a18634e08b04353face243 (diff)
Merge branch 'master'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/softirq.c20
-rw-r--r--kernel/workqueue.c29
4 files changed, 55 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b373322ca497..9bd7b65ee418 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1534,6 +1534,12 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
1534 1534
1535 check_unshare_flags(&unshare_flags); 1535 check_unshare_flags(&unshare_flags);
1536 1536
1537 /* Return -EINVAL for all unsupported flags */
1538 err = -EINVAL;
1539 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1540 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
1541 goto bad_unshare_out;
1542
1537 if ((err = unshare_thread(unshare_flags))) 1543 if ((err = unshare_thread(unshare_flags)))
1538 goto bad_unshare_out; 1544 goto bad_unshare_out;
1539 if ((err = unshare_fs(unshare_flags, &new_fs))) 1545 if ((err = unshare_fs(unshare_flags, &new_fs)))
diff --git a/kernel/sched.c b/kernel/sched.c
index 4d46e90f59c3..6b6e0d70eb30 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -707,12 +707,6 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
707 DEF_TIMESLICE); 707 DEF_TIMESLICE);
708 } else { 708 } else {
709 /* 709 /*
710 * The lower the sleep avg a task has the more
711 * rapidly it will rise with sleep time.
712 */
713 sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
714
715 /*
716 * Tasks waking from uninterruptible sleep are 710 * Tasks waking from uninterruptible sleep are
717 * limited in their sleep_avg rise as they 711 * limited in their sleep_avg rise as they
718 * are likely to be waiting on I/O 712 * are likely to be waiting on I/O
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ad3295cdded5..ec8fed42a86f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -16,6 +16,7 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/kthread.h> 17#include <linux/kthread.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/smp.h>
19 20
20#include <asm/irq.h> 21#include <asm/irq.h>
21/* 22/*
@@ -495,3 +496,22 @@ __init int spawn_ksoftirqd(void)
495 register_cpu_notifier(&cpu_nfb); 496 register_cpu_notifier(&cpu_nfb);
496 return 0; 497 return 0;
497} 498}
499
500#ifdef CONFIG_SMP
501/*
502 * Call a function on all processors
503 */
504int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
505{
506 int ret = 0;
507
508 preempt_disable();
509 ret = smp_call_function(func, info, retry, wait);
510 local_irq_disable();
511 func(info);
512 local_irq_enable();
513 preempt_enable();
514 return ret;
515}
516EXPORT_SYMBOL(on_each_cpu);
517#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b052e2c4c710..e9e464a90376 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -27,6 +27,7 @@
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/kthread.h> 29#include <linux/kthread.h>
30#include <linux/hardirq.h>
30 31
31/* 32/*
32 * The per-CPU workqueue (if single thread, we always use the first 33 * The per-CPU workqueue (if single thread, we always use the first
@@ -476,6 +477,34 @@ void cancel_rearming_delayed_work(struct work_struct *work)
476} 477}
477EXPORT_SYMBOL(cancel_rearming_delayed_work); 478EXPORT_SYMBOL(cancel_rearming_delayed_work);
478 479
480/**
481 * execute_in_process_context - reliably execute the routine with user context
482 * @fn: the function to execute
483 * @data: data to pass to the function
484 * @ew: guaranteed storage for the execute work structure (must
485 * be available when the work executes)
486 *
487 * Executes the function immediately if process context is available,
488 * otherwise schedules the function for delayed execution.
489 *
490 * Returns: 0 - function was executed
491 * 1 - function was scheduled for execution
492 */
493int execute_in_process_context(void (*fn)(void *data), void *data,
494 struct execute_work *ew)
495{
496 if (!in_interrupt()) {
497 fn(data);
498 return 0;
499 }
500
501 INIT_WORK(&ew->work, fn, data);
502 schedule_work(&ew->work);
503
504 return 1;
505}
506EXPORT_SYMBOL_GPL(execute_in_process_context);
507
479int keventd_up(void) 508int keventd_up(void)
480{ 509{
481 return keventd_wq != NULL; 510 return keventd_wq != NULL;