aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2011-10-20 09:14:25 -0400
committerArnd Bergmann <arnd@arndb.de>2011-10-20 09:14:25 -0400
commitb4cbb8a4e602ea77b0525d06eff89c6a6070dab3 (patch)
treea5dd723679582505ef3905c90f0c2c032d191b94 /kernel
parent526b264163068f77c5f2409031f5e25caf3900a9 (diff)
parentc5d7a9230e5e277f262b6806b7f4d6b35de5a3fb (diff)
Merge branch 'imx-features-for-arnd' of git://git.pengutronix.de/git/imx/linux-2.6 into imx/devel
Conflicts: arch/arm/mach-mx5/clock-mx51-mx53.c arch/arm/mach-mx5/devices-imx53.h
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c67
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/irqdomain.c6
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/posix-cpu-timers.c5
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/resource.c7
-rw-r--r--kernel/sched.c67
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/sys.c38
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/taskstats.c1
-rw-r--r--kernel/time/alarmtimer.c18
-rw-r--r--kernel/tsacct.c15
-rw-r--r--kernel/workqueue.c7
18 files changed, 178 insertions, 90 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b8785e26ee1c..0f857782d06f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
399 local_irq_restore(flags); 399 local_irq_restore(flags);
400} 400}
401 401
402static inline void perf_cgroup_sched_out(struct task_struct *task) 402static inline void perf_cgroup_sched_out(struct task_struct *task,
403 struct task_struct *next)
403{ 404{
404 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 405 struct perf_cgroup *cgrp1;
406 struct perf_cgroup *cgrp2 = NULL;
407
408 /*
409 * we come here when we know perf_cgroup_events > 0
410 */
411 cgrp1 = perf_cgroup_from_task(task);
412
413 /*
414 * next is NULL when called from perf_event_enable_on_exec()
415 * that will systematically cause a cgroup_switch()
416 */
417 if (next)
418 cgrp2 = perf_cgroup_from_task(next);
419
420 /*
421 * only schedule out current cgroup events if we know
422 * that we are switching to a different cgroup. Otherwise,
423 * do no touch the cgroup events.
424 */
425 if (cgrp1 != cgrp2)
426 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
405} 427}
406 428
407static inline void perf_cgroup_sched_in(struct task_struct *task) 429static inline void perf_cgroup_sched_in(struct task_struct *prev,
430 struct task_struct *task)
408{ 431{
409 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 432 struct perf_cgroup *cgrp1;
433 struct perf_cgroup *cgrp2 = NULL;
434
435 /*
436 * we come here when we know perf_cgroup_events > 0
437 */
438 cgrp1 = perf_cgroup_from_task(task);
439
440 /* prev can never be NULL */
441 cgrp2 = perf_cgroup_from_task(prev);
442
443 /*
444 * only need to schedule in cgroup events if we are changing
445 * cgroup during ctxsw. Cgroup events were not scheduled
446 * out of ctxsw out if that was not the case.
447 */
448 if (cgrp1 != cgrp2)
449 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
410} 450}
411 451
412static inline int perf_cgroup_connect(int fd, struct perf_event *event, 452static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
518{ 558{
519} 559}
520 560
521static inline void perf_cgroup_sched_out(struct task_struct *task) 561static inline void perf_cgroup_sched_out(struct task_struct *task,
562 struct task_struct *next)
522{ 563{
523} 564}
524 565
525static inline void perf_cgroup_sched_in(struct task_struct *task) 566static inline void perf_cgroup_sched_in(struct task_struct *prev,
567 struct task_struct *task)
526{ 568{
527} 569}
528 570
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
1988 * cgroup event are system-wide mode only 2030 * cgroup event are system-wide mode only
1989 */ 2031 */
1990 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2032 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
1991 perf_cgroup_sched_out(task); 2033 perf_cgroup_sched_out(task, next);
1992} 2034}
1993 2035
1994static void task_ctx_sched_out(struct perf_event_context *ctx) 2036static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2153 * accessing the event control register. If a NMI hits, then it will 2195 * accessing the event control register. If a NMI hits, then it will
2154 * keep the event running. 2196 * keep the event running.
2155 */ 2197 */
2156void __perf_event_task_sched_in(struct task_struct *task) 2198void __perf_event_task_sched_in(struct task_struct *prev,
2199 struct task_struct *task)
2157{ 2200{
2158 struct perf_event_context *ctx; 2201 struct perf_event_context *ctx;
2159 int ctxn; 2202 int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
2171 * cgroup event are system-wide mode only 2214 * cgroup event are system-wide mode only
2172 */ 2215 */
2173 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2216 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2174 perf_cgroup_sched_in(task); 2217 perf_cgroup_sched_in(prev, task);
2175} 2218}
2176 2219
2177static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2220static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2427 * ctxswin cgroup events which are already scheduled 2470 * ctxswin cgroup events which are already scheduled
2428 * in. 2471 * in.
2429 */ 2472 */
2430 perf_cgroup_sched_out(current); 2473 perf_cgroup_sched_out(current, NULL);
2431 2474
2432 raw_spin_lock(&ctx->lock); 2475 raw_spin_lock(&ctx->lock);
2433 task_ctx_sched_out(ctx); 2476 task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
3353} 3396}
3354 3397
3355static void calc_timer_values(struct perf_event *event, 3398static void calc_timer_values(struct perf_event *event,
3356 u64 *running, 3399 u64 *enabled,
3357 u64 *enabled) 3400 u64 *running)
3358{ 3401{
3359 u64 now, ctx_time; 3402 u64 now, ctx_time;
3360 3403
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d5a3009da71a..dc5114b4c16c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
178 desc->depth = 1; 178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown) 179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 if (desc->irq_data.chip->irq_disable) 181 else if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data); 182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else 183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data); 184 desc->irq_data.chip->irq_mask(&desc->irq_data);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index d5828da3fd38..b57a3776de44 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
29 */ 29 */
30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { 30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); 31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
32 if (d || d->domain) { 32 if (!d) {
33 WARN(1, "error: assigning domain to non existant irq_desc");
34 return;
35 }
36 if (d->domain) {
33 /* things are broken; just report, don't clean up */ 37 /* things are broken; just report, don't clean up */
34 WARN(1, "error: irq_desc already assigned to a domain"); 38 WARN(1, "error: irq_desc already assigned to a domain");
35 return; 39 return;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2e9425889fa8..9b956fa20308 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1331,7 +1331,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1331 if (!thread_fn) 1331 if (!thread_fn)
1332 return -EINVAL; 1332 return -EINVAL;
1333 handler = irq_default_primary_handler; 1333 handler = irq_default_primary_handler;
1334 irqflags |= IRQF_ONESHOT;
1335 } 1334 }
1336 1335
1337 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1336 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 58f405b581e7..c8008dd58ef2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
250 do { 250 do {
251 times->utime = cputime_add(times->utime, t->utime); 251 times->utime = cputime_add(times->utime, t->utime);
252 times->stime = cputime_add(times->stime, t->stime); 252 times->stime = cputime_add(times->stime, t->stime);
253 times->sum_exec_runtime += t->se.sum_exec_runtime; 253 times->sum_exec_runtime += task_sched_runtime(t);
254 } while_each_thread(tsk, t); 254 } while_each_thread(tsk, t);
255out: 255out:
256 rcu_read_unlock(); 256 rcu_read_unlock();
@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
312 cpu->cpu = cputime.utime; 312 cpu->cpu = cputime.utime;
313 break; 313 break;
314 case CPUCLOCK_SCHED: 314 case CPUCLOCK_SCHED:
315 cpu->sched = thread_group_sched_runtime(p); 315 thread_group_cputime(p, &cputime);
316 cpu->sched = cputime.sum_exec_runtime;
316 break; 317 break;
317 } 318 }
318 return 0; 319 return 0;
diff --git a/kernel/printk.c b/kernel/printk.c
index 836a2ae0ac31..28a40d8171b8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1604,7 +1604,7 @@ static int __init printk_late_init(void)
1604 struct console *con; 1604 struct console *con;
1605 1605
1606 for_each_console(con) { 1606 for_each_console(con) {
1607 if (con->flags & CON_BOOT) { 1607 if (!keep_bootcon && con->flags & CON_BOOT) {
1608 printk(KERN_INFO "turn off boot console %s%d\n", 1608 printk(KERN_INFO "turn off boot console %s%d\n",
1609 con->name, con->index); 1609 con->name, con->index);
1610 unregister_console(con); 1610 unregister_console(con);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 9de3ecfd20f9..a70d2a5d8c7b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request,
744 break; 744 break;
745 745
746 si = child->last_siginfo; 746 si = child->last_siginfo;
747 if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP)) 747 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
748 break; 748 child->jobctl |= JOBCTL_LISTENING;
749 749 /*
750 child->jobctl |= JOBCTL_LISTENING; 750 * If NOTIFY is set, it means event happened between
751 751 * start of this trap and now. Trigger re-trap.
752 /* 752 */
753 * If NOTIFY is set, it means event happened between start 753 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
754 * of this trap and now. Trigger re-trap immediately. 754 signal_wake_up(child, true);
755 */ 755 ret = 0;
756 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 756 }
757 signal_wake_up(child, true);
758
759 unlock_task_sighand(child, &flags); 757 unlock_task_sighand(child, &flags);
760 ret = 0;
761 break; 758 break;
762 759
763 case PTRACE_DETACH: /* detach a process that was attached. */ 760 case PTRACE_DETACH: /* detach a process that was attached. */
diff --git a/kernel/resource.c b/kernel/resource.c
index 3b3cedc52592..c8dc249da5ce 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
419 else 419 else
420 tmp.end = root->end; 420 tmp.end = root->end;
421 421
422 if (tmp.end < tmp.start)
423 goto next;
424
422 resource_clip(&tmp, constraint->min, constraint->max); 425 resource_clip(&tmp, constraint->min, constraint->max);
423 arch_remove_reservations(&tmp); 426 arch_remove_reservations(&tmp);
424 427
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
436 return 0; 439 return 0;
437 } 440 }
438 } 441 }
439 if (!this) 442
443next: if (!this || this->end == root->end)
440 break; 444 break;
445
441 if (this != old) 446 if (this != old)
442 tmp.start = this->end + 1; 447 tmp.start = this->end + 1;
443 this = this->sibling; 448 this = this->sibling;
diff --git a/kernel/sched.c b/kernel/sched.c
index ccacdbdecf45..b50b0f0c9aa9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3066 local_irq_disable(); 3066 local_irq_disable();
3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3068 perf_event_task_sched_in(current); 3068 perf_event_task_sched_in(prev, current);
3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3070 local_irq_enable(); 3070 local_irq_enable();
3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3725} 3725}
3726 3726
3727/* 3727/*
3728 * Return sum_exec_runtime for the thread group.
3729 * In case the task is currently running, return the sum plus current's
3730 * pending runtime that have not been accounted yet.
3731 *
3732 * Note that the thread group might have other running tasks as well,
3733 * so the return value not includes other pending runtime that other
3734 * running tasks might have.
3735 */
3736unsigned long long thread_group_sched_runtime(struct task_struct *p)
3737{
3738 struct task_cputime totals;
3739 unsigned long flags;
3740 struct rq *rq;
3741 u64 ns;
3742
3743 rq = task_rq_lock(p, &flags);
3744 thread_group_cputime(p, &totals);
3745 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3746 task_rq_unlock(rq, p, &flags);
3747
3748 return ns;
3749}
3750
3751/*
3752 * Account user cpu time to a process. 3728 * Account user cpu time to a process.
3753 * @p: the process that the cpu time gets accounted to 3729 * @p: the process that the cpu time gets accounted to
3754 * @cputime: the cpu time spent in user space since the last update 3730 * @cputime: the cpu time spent in user space since the last update
@@ -4279,9 +4255,9 @@ pick_next_task(struct rq *rq)
4279} 4255}
4280 4256
4281/* 4257/*
4282 * schedule() is the main scheduler function. 4258 * __schedule() is the main scheduler function.
4283 */ 4259 */
4284asmlinkage void __sched schedule(void) 4260static void __sched __schedule(void)
4285{ 4261{
4286 struct task_struct *prev, *next; 4262 struct task_struct *prev, *next;
4287 unsigned long *switch_count; 4263 unsigned long *switch_count;
@@ -4322,16 +4298,6 @@ need_resched:
4322 if (to_wakeup) 4298 if (to_wakeup)
4323 try_to_wake_up_local(to_wakeup); 4299 try_to_wake_up_local(to_wakeup);
4324 } 4300 }
4325
4326 /*
4327 * If we are going to sleep and we have plugged IO
4328 * queued, make sure to submit it to avoid deadlocks.
4329 */
4330 if (blk_needs_flush_plug(prev)) {
4331 raw_spin_unlock(&rq->lock);
4332 blk_schedule_flush_plug(prev);
4333 raw_spin_lock(&rq->lock);
4334 }
4335 } 4301 }
4336 switch_count = &prev->nvcsw; 4302 switch_count = &prev->nvcsw;
4337 } 4303 }
@@ -4369,6 +4335,26 @@ need_resched:
4369 if (need_resched()) 4335 if (need_resched())
4370 goto need_resched; 4336 goto need_resched;
4371} 4337}
4338
4339static inline void sched_submit_work(struct task_struct *tsk)
4340{
4341 if (!tsk->state)
4342 return;
4343 /*
4344 * If we are going to sleep and we have plugged IO queued,
4345 * make sure to submit it to avoid deadlocks.
4346 */
4347 if (blk_needs_flush_plug(tsk))
4348 blk_schedule_flush_plug(tsk);
4349}
4350
4351asmlinkage void __sched schedule(void)
4352{
4353 struct task_struct *tsk = current;
4354
4355 sched_submit_work(tsk);
4356 __schedule();
4357}
4372EXPORT_SYMBOL(schedule); 4358EXPORT_SYMBOL(schedule);
4373 4359
4374#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4360#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4421,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
4435 4421
4436 do { 4422 do {
4437 add_preempt_count_notrace(PREEMPT_ACTIVE); 4423 add_preempt_count_notrace(PREEMPT_ACTIVE);
4438 schedule(); 4424 __schedule();
4439 sub_preempt_count_notrace(PREEMPT_ACTIVE); 4425 sub_preempt_count_notrace(PREEMPT_ACTIVE);
4440 4426
4441 /* 4427 /*
@@ -4463,7 +4449,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4463 do { 4449 do {
4464 add_preempt_count(PREEMPT_ACTIVE); 4450 add_preempt_count(PREEMPT_ACTIVE);
4465 local_irq_enable(); 4451 local_irq_enable();
4466 schedule(); 4452 __schedule();
4467 local_irq_disable(); 4453 local_irq_disable();
4468 sub_preempt_count(PREEMPT_ACTIVE); 4454 sub_preempt_count(PREEMPT_ACTIVE);
4469 4455
@@ -5588,7 +5574,7 @@ static inline int should_resched(void)
5588static void __cond_resched(void) 5574static void __cond_resched(void)
5589{ 5575{
5590 add_preempt_count(PREEMPT_ACTIVE); 5576 add_preempt_count(PREEMPT_ACTIVE);
5591 schedule(); 5577 __schedule();
5592 sub_preempt_count(PREEMPT_ACTIVE); 5578 sub_preempt_count(PREEMPT_ACTIVE);
5593} 5579}
5594 5580
@@ -7443,6 +7429,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
7443 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 7429 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7444 if (sd && (sd->flags & SD_OVERLAP)) 7430 if (sd && (sd->flags & SD_OVERLAP))
7445 free_sched_groups(sd->groups, 0); 7431 free_sched_groups(sd->groups, 0);
7432 kfree(*per_cpu_ptr(sdd->sd, j));
7446 kfree(*per_cpu_ptr(sdd->sg, j)); 7433 kfree(*per_cpu_ptr(sdd->sg, j));
7447 kfree(*per_cpu_ptr(sdd->sgp, j)); 7434 kfree(*per_cpu_ptr(sdd->sgp, j));
7448 } 7435 }
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 97540f0c9e47..af1177858be3 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1050 */ 1050 */
1051 if (curr && unlikely(rt_task(curr)) && 1051 if (curr && unlikely(rt_task(curr)) &&
1052 (curr->rt.nr_cpus_allowed < 2 || 1052 (curr->rt.nr_cpus_allowed < 2 ||
1053 curr->prio < p->prio) && 1053 curr->prio <= p->prio) &&
1054 (p->rt.nr_cpus_allowed > 1)) { 1054 (p->rt.nr_cpus_allowed > 1)) {
1055 int target = find_lowest_rq(p); 1055 int target = find_lowest_rq(p);
1056 1056
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
1581 p->rt.nr_cpus_allowed > 1 && 1581 p->rt.nr_cpus_allowed > 1 &&
1582 rt_task(rq->curr) && 1582 rt_task(rq->curr) &&
1583 (rq->curr->rt.nr_cpus_allowed < 2 || 1583 (rq->curr->rt.nr_cpus_allowed < 2 ||
1584 rq->curr->prio < p->prio)) 1584 rq->curr->prio <= p->prio))
1585 push_rt_tasks(rq); 1585 push_rt_tasks(rq);
1586} 1586}
1587 1587
diff --git a/kernel/sys.c b/kernel/sys.c
index dd948a1fca4c..18ee1d2f6474 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -37,6 +37,8 @@
37#include <linux/fs_struct.h> 37#include <linux/fs_struct.h>
38#include <linux/gfp.h> 38#include <linux/gfp.h>
39#include <linux/syscore_ops.h> 39#include <linux/syscore_ops.h>
40#include <linux/version.h>
41#include <linux/ctype.h>
40 42
41#include <linux/compat.h> 43#include <linux/compat.h>
42#include <linux/syscalls.h> 44#include <linux/syscalls.h>
@@ -44,6 +46,8 @@
44#include <linux/user_namespace.h> 46#include <linux/user_namespace.h>
45 47
46#include <linux/kmsg_dump.h> 48#include <linux/kmsg_dump.h>
49/* Move somewhere else to avoid recompiling? */
50#include <generated/utsrelease.h>
47 51
48#include <asm/uaccess.h> 52#include <asm/uaccess.h>
49#include <asm/io.h> 53#include <asm/io.h>
@@ -1161,6 +1165,34 @@ DECLARE_RWSEM(uts_sem);
1161#define override_architecture(name) 0 1165#define override_architecture(name) 0
1162#endif 1166#endif
1163 1167
1168/*
1169 * Work around broken programs that cannot handle "Linux 3.0".
1170 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1171 */
1172static int override_release(char __user *release, int len)
1173{
1174 int ret = 0;
1175 char buf[len];
1176
1177 if (current->personality & UNAME26) {
1178 char *rest = UTS_RELEASE;
1179 int ndots = 0;
1180 unsigned v;
1181
1182 while (*rest) {
1183 if (*rest == '.' && ++ndots >= 3)
1184 break;
1185 if (!isdigit(*rest) && *rest != '.')
1186 break;
1187 rest++;
1188 }
1189 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1190 snprintf(buf, len, "2.6.%u%s", v, rest);
1191 ret = copy_to_user(release, buf, len);
1192 }
1193 return ret;
1194}
1195
1164SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1196SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1165{ 1197{
1166 int errno = 0; 1198 int errno = 0;
@@ -1170,6 +1202,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1170 errno = -EFAULT; 1202 errno = -EFAULT;
1171 up_read(&uts_sem); 1203 up_read(&uts_sem);
1172 1204
1205 if (!errno && override_release(name->release, sizeof(name->release)))
1206 errno = -EFAULT;
1173 if (!errno && override_architecture(name)) 1207 if (!errno && override_architecture(name))
1174 errno = -EFAULT; 1208 errno = -EFAULT;
1175 return errno; 1209 return errno;
@@ -1191,6 +1225,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1191 error = -EFAULT; 1225 error = -EFAULT;
1192 up_read(&uts_sem); 1226 up_read(&uts_sem);
1193 1227
1228 if (!error && override_release(name->release, sizeof(name->release)))
1229 error = -EFAULT;
1194 if (!error && override_architecture(name)) 1230 if (!error && override_architecture(name))
1195 error = -EFAULT; 1231 error = -EFAULT;
1196 return error; 1232 return error;
@@ -1225,6 +1261,8 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1225 1261
1226 if (!error && override_architecture(name)) 1262 if (!error && override_architecture(name))
1227 error = -EFAULT; 1263 error = -EFAULT;
1264 if (!error && override_release(name->release, sizeof(name->release)))
1265 error = -EFAULT;
1228 return error ? -EFAULT : 0; 1266 return error ? -EFAULT : 0;
1229} 1267}
1230#endif 1268#endif
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 62cbc8877fef..a9a5de07c4f1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -16,7 +16,6 @@ asmlinkage long sys_ni_syscall(void)
16 return -ENOSYS; 16 return -ENOSYS;
17} 17}
18 18
19cond_syscall(sys_nfsservctl);
20cond_syscall(sys_quotactl); 19cond_syscall(sys_quotactl);
21cond_syscall(sys32_quotactl); 20cond_syscall(sys32_quotactl);
22cond_syscall(sys_acct); 21cond_syscall(sys_acct);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 3b8e028b9601..e8bffbe2ba4b 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1,6 +1,6 @@
1#include <linux/stat.h> 1#include <linux/stat.h>
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include "../fs/xfs/linux-2.6/xfs_sysctl.h" 3#include "../fs/xfs/xfs_sysctl.h"
4#include <linux/sunrpc/debug.h> 4#include <linux/sunrpc/debug.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <net/ip_vs.h> 6#include <net/ip_vs.h>
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 4e4932a7b360..362da653813d 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1,6 +1,6 @@
1#include <linux/stat.h> 1#include <linux/stat.h>
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include "../fs/xfs/linux-2.6/xfs_sysctl.h" 3#include "../fs/xfs/xfs_sysctl.h"
4#include <linux/sunrpc/debug.h> 4#include <linux/sunrpc/debug.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <net/ip_vs.h> 6#include <net/ip_vs.h>
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index e19ce1454ee1..e66046456f4f 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
655 .cmd = TASKSTATS_CMD_GET, 655 .cmd = TASKSTATS_CMD_GET,
656 .doit = taskstats_user_cmd, 656 .doit = taskstats_user_cmd,
657 .policy = taskstats_cmd_get_policy, 657 .policy = taskstats_cmd_get_policy,
658 .flags = GENL_ADMIN_PERM,
658}; 659};
659 660
660static struct genl_ops cgroupstats_ops = { 661static struct genl_ops cgroupstats_ops = {
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 59f369f98a04..ea5e1a928d5b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
441static void alarm_timer_get(struct k_itimer *timr, 441static void alarm_timer_get(struct k_itimer *timr,
442 struct itimerspec *cur_setting) 442 struct itimerspec *cur_setting)
443{ 443{
444 memset(cur_setting, 0, sizeof(struct itimerspec));
445
444 cur_setting->it_interval = 446 cur_setting->it_interval =
445 ktime_to_timespec(timr->it.alarmtimer.period); 447 ktime_to_timespec(timr->it.alarmtimer.period);
446 cur_setting->it_value = 448 cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
479 if (!rtcdev) 481 if (!rtcdev)
480 return -ENOTSUPP; 482 return -ENOTSUPP;
481 483
482 /* Save old values */ 484 /*
483 old_setting->it_interval = 485 * XXX HACK! Currently we can DOS a system if the interval
484 ktime_to_timespec(timr->it.alarmtimer.period); 486 * period on alarmtimers is too small. Cap the interval here
485 old_setting->it_value = 487 * to 100us and solve this properly in a future patch! -jstultz
486 ktime_to_timespec(timr->it.alarmtimer.node.expires); 488 */
489 if ((new_setting->it_interval.tv_sec == 0) &&
490 (new_setting->it_interval.tv_nsec < 100000))
491 new_setting->it_interval.tv_nsec = 100000;
492
493 if (old_setting)
494 alarm_timer_get(timr, old_setting);
487 495
488 /* If the timer was already set, cancel it */ 496 /* If the timer was already set, cancel it */
489 alarm_cancel(&timr->it.alarmtimer); 497 alarm_cancel(&timr->it.alarmtimer);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 24dc60d9fa1f..5bbfac85866e 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
78 78
79#define KB 1024 79#define KB 1024
80#define MB (1024*KB) 80#define MB (1024*KB)
81#define KB_MASK (~(KB-1))
81/* 82/*
82 * fill in extended accounting fields 83 * fill in extended accounting fields
83 */ 84 */
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
95 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; 96 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
96 mmput(mm); 97 mmput(mm);
97 } 98 }
98 stats->read_char = p->ioac.rchar; 99 stats->read_char = p->ioac.rchar & KB_MASK;
99 stats->write_char = p->ioac.wchar; 100 stats->write_char = p->ioac.wchar & KB_MASK;
100 stats->read_syscalls = p->ioac.syscr; 101 stats->read_syscalls = p->ioac.syscr & KB_MASK;
101 stats->write_syscalls = p->ioac.syscw; 102 stats->write_syscalls = p->ioac.syscw & KB_MASK;
102#ifdef CONFIG_TASK_IO_ACCOUNTING 103#ifdef CONFIG_TASK_IO_ACCOUNTING
103 stats->read_bytes = p->ioac.read_bytes; 104 stats->read_bytes = p->ioac.read_bytes & KB_MASK;
104 stats->write_bytes = p->ioac.write_bytes; 105 stats->write_bytes = p->ioac.write_bytes & KB_MASK;
105 stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; 106 stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
106#else 107#else
107 stats->read_bytes = 0; 108 stats->read_bytes = 0;
108 stats->write_bytes = 0; 109 stats->write_bytes = 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 25fb1b0e53fa..1783aabc6128 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2412,8 +2412,13 @@ reflush:
2412 2412
2413 for_each_cwq_cpu(cpu, wq) { 2413 for_each_cwq_cpu(cpu, wq) {
2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2415 bool drained;
2415 2416
2416 if (!cwq->nr_active && list_empty(&cwq->delayed_works)) 2417 spin_lock_irq(&cwq->gcwq->lock);
2418 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2419 spin_unlock_irq(&cwq->gcwq->lock);
2420
2421 if (drained)
2417 continue; 2422 continue;
2418 2423
2419 if (++flush_cnt == 10 || 2424 if (++flush_cnt == 10 ||