aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c21
-rw-r--r--kernel/cpuset.c24
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/rcuclassic.c16
-rw-r--r--kernel/rcupreempt.c20
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/softlockup.c1
-rw-r--r--kernel/workqueue.c2
10 files changed, 82 insertions, 25 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index cfbe44299488..901e0fdc3fff 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -121,6 +121,27 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
121 * uninteresting and/or not to be changed. 121 * uninteresting and/or not to be changed.
122 */ 122 */
123 123
124/*
125 * Atomically modify the effective capabilities returning the original
126 * value. No permission check is performed here - it is assumed that the
127 * caller is permitted to set the desired effective capabilities.
128 */
129kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
130{
131 kernel_cap_t pE_old;
132
133 spin_lock(&task_capability_lock);
134
135 pE_old = current->cap_effective;
136 current->cap_effective = pE_new;
137
138 spin_unlock(&task_capability_lock);
139
140 return pE_old;
141}
142
143EXPORT_SYMBOL(cap_set_effective);
144
124/** 145/**
125 * sys_capget - get the capabilities of a given process. 146 * sys_capget - get the capabilities of a given process.
126 * @header: pointer to struct that contains capability version and 147 * @header: pointer to struct that contains capability version and
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fceb97e989c..798b3ab054eb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1882,7 +1882,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1882 * in order to minimize text size. 1882 * in order to minimize text size.
1883 */ 1883 */
1884 1884
1885static void common_cpu_mem_hotplug_unplug(void) 1885static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
1886{ 1886{
1887 cgroup_lock(); 1887 cgroup_lock();
1888 1888
@@ -1894,7 +1894,8 @@ static void common_cpu_mem_hotplug_unplug(void)
1894 * Scheduler destroys domains on hotplug events. 1894 * Scheduler destroys domains on hotplug events.
1895 * Rebuild them based on the current settings. 1895 * Rebuild them based on the current settings.
1896 */ 1896 */
1897 rebuild_sched_domains(); 1897 if (rebuild_sd)
1898 rebuild_sched_domains();
1898 1899
1899 cgroup_unlock(); 1900 cgroup_unlock();
1900} 1901}
@@ -1912,11 +1913,22 @@ static void common_cpu_mem_hotplug_unplug(void)
1912static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, 1913static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1913 unsigned long phase, void *unused_cpu) 1914 unsigned long phase, void *unused_cpu)
1914{ 1915{
1915 if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) 1916 switch (phase) {
1917 case CPU_UP_CANCELED:
1918 case CPU_UP_CANCELED_FROZEN:
1919 case CPU_DOWN_FAILED:
1920 case CPU_DOWN_FAILED_FROZEN:
1921 case CPU_ONLINE:
1922 case CPU_ONLINE_FROZEN:
1923 case CPU_DEAD:
1924 case CPU_DEAD_FROZEN:
1925 common_cpu_mem_hotplug_unplug(1);
1926 break;
1927 default:
1916 return NOTIFY_DONE; 1928 return NOTIFY_DONE;
1929 }
1917 1930
1918 common_cpu_mem_hotplug_unplug(); 1931 return NOTIFY_OK;
1919 return 0;
1920} 1932}
1921 1933
1922#ifdef CONFIG_MEMORY_HOTPLUG 1934#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1929,7 +1941,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1929 1941
1930void cpuset_track_online_nodes(void) 1942void cpuset_track_online_nodes(void)
1931{ 1943{
1932 common_cpu_mem_hotplug_unplug(); 1944 common_cpu_mem_hotplug_unplug(0);
1933} 1945}
1934#endif 1946#endif
1935 1947
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 421be5fe5cc7..ab80515008f4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1003,10 +1003,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1003 */ 1003 */
1004 raise = timer->state == HRTIMER_STATE_PENDING; 1004 raise = timer->state == HRTIMER_STATE_PENDING;
1005 1005
1006 /*
1007 * We use preempt_disable to prevent this task from migrating after
1008 * setting up the softirq and raising it. Otherwise, if me migrate
1009 * we will raise the softirq on the wrong CPU.
1010 */
1011 preempt_disable();
1012
1006 unlock_hrtimer_base(timer, &flags); 1013 unlock_hrtimer_base(timer, &flags);
1007 1014
1008 if (raise) 1015 if (raise)
1009 hrtimer_raise_softirq(); 1016 hrtimer_raise_softirq();
1017 preempt_enable();
1010 1018
1011 return ret; 1019 return ret;
1012} 1020}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d4998f81e229..1485ca8d0e00 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -79,7 +79,7 @@ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
79 * 79 *
80 * For such cases, we now have a blacklist 80 * For such cases, we now have a blacklist
81 */ 81 */
82struct kprobe_blackpoint kprobe_blacklist[] = { 82static struct kprobe_blackpoint kprobe_blacklist[] = {
83 {"preempt_schedule",}, 83 {"preempt_schedule",},
84 {NULL} /* Terminator */ 84 {NULL} /* Terminator */
85}; 85};
diff --git a/kernel/printk.c b/kernel/printk.c
index 8fb01c32aa3b..e2129e83fd75 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -666,7 +666,7 @@ static int acquire_console_semaphore_for_printk(unsigned int cpu)
666 return retval; 666 return retval;
667} 667}
668 668
669const char printk_recursion_bug_msg [] = 669static const char printk_recursion_bug_msg [] =
670 KERN_CRIT "BUG: recent printk recursion!\n"; 670 KERN_CRIT "BUG: recent printk recursion!\n";
671static int printk_recursion_bug; 671static int printk_recursion_bug;
672 672
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index f4ffbd0f306f..a38895a5b8e2 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -89,8 +89,22 @@ static void force_quiescent_state(struct rcu_data *rdp,
89 /* 89 /*
90 * Don't send IPI to itself. With irqs disabled, 90 * Don't send IPI to itself. With irqs disabled,
91 * rdp->cpu is the current cpu. 91 * rdp->cpu is the current cpu.
92 *
93 * cpu_online_map is updated by the _cpu_down()
94 * using stop_machine_run(). Since we're in irqs disabled
95 * section, stop_machine_run() is not exectuting, hence
96 * the cpu_online_map is stable.
97 *
98 * However, a cpu might have been offlined _just_ before
99 * we disabled irqs while entering here.
100 * And rcu subsystem might not yet have handled the CPU_DEAD
101 * notification, leading to the offlined cpu's bit
102 * being set in the rcp->cpumask.
103 *
104 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
105 * sending smp_reschedule() to an offlined CPU.
92 */ 106 */
93 cpumask = rcp->cpumask; 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
94 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
95 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask(cpu, cpumask)
96 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5e02b7740702..41d275a81df5 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -925,26 +925,22 @@ void rcu_offline_cpu(int cpu)
925 spin_unlock_irqrestore(&rdp->lock, flags); 925 spin_unlock_irqrestore(&rdp->lock, flags);
926} 926}
927 927
928void __devinit rcu_online_cpu(int cpu)
929{
930 unsigned long flags;
931
932 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
933 cpu_set(cpu, rcu_cpu_online_map);
934 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
935}
936
937#else /* #ifdef CONFIG_HOTPLUG_CPU */ 928#else /* #ifdef CONFIG_HOTPLUG_CPU */
938 929
939void rcu_offline_cpu(int cpu) 930void rcu_offline_cpu(int cpu)
940{ 931{
941} 932}
942 933
943void __devinit rcu_online_cpu(int cpu) 934#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
935
936void __cpuinit rcu_online_cpu(int cpu)
944{ 937{
945} 938 unsigned long flags;
946 939
947#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 940 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
941 cpu_set(cpu, rcu_cpu_online_map);
942 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
943}
948 944
949static void rcu_process_callbacks(struct softirq_action *unused) 945static void rcu_process_callbacks(struct softirq_action *unused)
950{ 946{
diff --git a/kernel/sched.c b/kernel/sched.c
index 3aaa5c8cb421..4e2f60335656 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5622,10 +5622,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5622 double_rq_lock(rq_src, rq_dest); 5622 double_rq_lock(rq_src, rq_dest);
5623 /* Already moved. */ 5623 /* Already moved. */
5624 if (task_cpu(p) != src_cpu) 5624 if (task_cpu(p) != src_cpu)
5625 goto out; 5625 goto done;
5626 /* Affinity changed (again). */ 5626 /* Affinity changed (again). */
5627 if (!cpu_isset(dest_cpu, p->cpus_allowed)) 5627 if (!cpu_isset(dest_cpu, p->cpus_allowed))
5628 goto out; 5628 goto fail;
5629 5629
5630 on_rq = p->se.on_rq; 5630 on_rq = p->se.on_rq;
5631 if (on_rq) 5631 if (on_rq)
@@ -5636,8 +5636,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5636 activate_task(rq_dest, p, 0); 5636 activate_task(rq_dest, p, 0);
5637 check_preempt_curr(rq_dest, p); 5637 check_preempt_curr(rq_dest, p);
5638 } 5638 }
5639done:
5639 ret = 1; 5640 ret = 1;
5640out: 5641fail:
5641 double_rq_unlock(rq_src, rq_dest); 5642 double_rq_unlock(rq_src, rq_dest);
5642 return ret; 5643 return ret;
5643} 5644}
@@ -5887,6 +5888,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5887 next = pick_next_task(rq, rq->curr); 5888 next = pick_next_task(rq, rq->curr);
5888 if (!next) 5889 if (!next)
5889 break; 5890 break;
5891 next->sched_class->put_prev_task(rq, next);
5890 migrate_dead(dead_cpu, next); 5892 migrate_dead(dead_cpu, next);
5891 5893
5892 } 5894 }
@@ -8501,6 +8503,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8501 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 8503 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8502 rt_runtime = tg->rt_bandwidth.rt_runtime; 8504 rt_runtime = tg->rt_bandwidth.rt_runtime;
8503 8505
8506 if (rt_period == 0)
8507 return -EINVAL;
8508
8504 return tg_set_bandwidth(tg, rt_period, rt_runtime); 8509 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8505} 8510}
8506 8511
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index c828c2339cc9..a272d78185eb 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -120,6 +120,7 @@ void softlockup_tick(void)
120 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 120 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
121 this_cpu, now - touch_timestamp, 121 this_cpu, now - touch_timestamp,
122 current->comm, task_pid_nr(current)); 122 current->comm, task_pid_nr(current));
123 print_modules();
123 if (regs) 124 if (regs)
124 show_regs(regs); 125 show_regs(regs);
125 else 126 else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 29fc39f1029c..ce7799540c91 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -13,7 +13,7 @@
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 * 15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. 16 * Made to use alloc_percpu by Christoph Lameter.
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/module.h>