aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/exit.c39
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/relay.c17
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/workqueue.c12
8 files changed, 51 insertions, 42 deletions
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 2e896f8ae29e..9c8c23227c7f 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -800,8 +800,8 @@ static inline int audit_dupe_selinux_field(struct audit_field *df,
800 800
801 /* our own copy of se_str */ 801 /* our own copy of se_str */
802 se_str = kstrdup(sf->se_str, GFP_KERNEL); 802 se_str = kstrdup(sf->se_str, GFP_KERNEL);
803 if (unlikely(IS_ERR(se_str))) 803 if (unlikely(!se_str))
804 return -ENOMEM; 804 return -ENOMEM;
805 df->se_str = se_str; 805 df->se_str = se_str;
806 806
807 /* our own (refreshed) copy of se_rule */ 807 /* our own (refreshed) copy of se_rule */
diff --git a/kernel/exit.c b/kernel/exit.c
index 122fadb972fc..46cf6b681460 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -468,7 +468,7 @@ void fastcall put_files_struct(struct files_struct *files)
468 fdt = files_fdtable(files); 468 fdt = files_fdtable(files);
469 if (fdt != &files->fdtab) 469 if (fdt != &files->fdtab)
470 kmem_cache_free(files_cachep, files); 470 kmem_cache_free(files_cachep, files);
471 call_rcu(&fdt->rcu, free_fdtable_rcu); 471 free_fdtable(fdt);
472 } 472 }
473} 473}
474 474
@@ -597,14 +597,6 @@ choose_new_parent(struct task_struct *p, struct task_struct *reaper)
597static void 597static void
598reparent_thread(struct task_struct *p, struct task_struct *father, int traced) 598reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
599{ 599{
600 /* We don't want people slaying init. */
601 if (p->exit_signal != -1)
602 p->exit_signal = SIGCHLD;
603
604 if (p->pdeath_signal)
605 /* We already hold the tasklist_lock here. */
606 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
607
608 /* Move the child from its dying parent to the new one. */ 600 /* Move the child from its dying parent to the new one. */
609 if (unlikely(traced)) { 601 if (unlikely(traced)) {
610 /* Preserve ptrace links if someone else is tracing this child. */ 602 /* Preserve ptrace links if someone else is tracing this child. */
@@ -620,13 +612,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
620 p->parent = p->real_parent; 612 p->parent = p->real_parent;
621 add_parent(p); 613 add_parent(p);
622 614
623 /* If we'd notified the old parent about this child's death, 615 if (p->state == TASK_TRACED) {
624 * also notify the new parent.
625 */
626 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
627 thread_group_empty(p))
628 do_notify_parent(p, p->exit_signal);
629 else if (p->state == TASK_TRACED) {
630 /* 616 /*
631 * If it was at a trace stop, turn it into 617 * If it was at a trace stop, turn it into
632 * a normal stop since it's no longer being 618 * a normal stop since it's no longer being
@@ -636,6 +622,27 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
636 } 622 }
637 } 623 }
638 624
625 /* If this is a threaded reparent there is no need to
626 * notify anyone anything has happened.
627 */
628 if (p->real_parent->group_leader == father->group_leader)
629 return;
630
631 /* We don't want people slaying init. */
632 if (p->exit_signal != -1)
633 p->exit_signal = SIGCHLD;
634
635 if (p->pdeath_signal)
636 /* We already hold the tasklist_lock here. */
637 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
638
639 /* If we'd notified the old parent about this child's death,
640 * also notify the new parent.
641 */
642 if (!traced && p->exit_state == EXIT_ZOMBIE &&
643 p->exit_signal != -1 && thread_group_empty(p))
644 do_notify_parent(p, p->exit_signal);
645
639 /* 646 /*
640 * process group orphan check 647 * process group orphan check
641 * Case ii: Our child is in a different pgrp 648 * Case ii: Our child is in a different pgrp
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ebfd24a41858..d27b25855743 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -517,10 +517,9 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
517 517
518 if (!handle) 518 if (!handle)
519 handle = handle_bad_irq; 519 handle = handle_bad_irq;
520 520 else if (desc->chip == &no_irq_chip) {
521 if (desc->chip == &no_irq_chip) {
522 printk(KERN_WARNING "Trying to install %sinterrupt handler " 521 printk(KERN_WARNING "Trying to install %sinterrupt handler "
523 "for IRQ%d\n", is_chained ? "chained " : " ", irq); 522 "for IRQ%d\n", is_chained ? "chained " : "", irq);
524 /* 523 /*
525 * Some ARM implementations install a handler for really dumb 524 * Some ARM implementations install a handler for really dumb
526 * interrupt hardware without setting an irq_chip. This worked 525 * interrupt hardware without setting an irq_chip. This worked
diff --git a/kernel/printk.c b/kernel/printk.c
index 185bb45eacf7..c770e1a4e882 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -335,7 +335,7 @@ static void __call_console_drivers(unsigned long start, unsigned long end)
335 335
336static int __read_mostly ignore_loglevel; 336static int __read_mostly ignore_loglevel;
337 337
338int __init ignore_loglevel_setup(char *str) 338static int __init ignore_loglevel_setup(char *str)
339{ 339{
340 ignore_loglevel = 1; 340 ignore_loglevel = 1;
341 printk(KERN_INFO "debug: ignoring loglevel setting.\n"); 341 printk(KERN_INFO "debug: ignoring loglevel setting.\n");
diff --git a/kernel/relay.c b/kernel/relay.c
index a4701e7ba7d0..284e2e8b4eed 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -302,7 +302,7 @@ static struct rchan_callbacks default_channel_callbacks = {
302 302
303/** 303/**
304 * wakeup_readers - wake up readers waiting on a channel 304 * wakeup_readers - wake up readers waiting on a channel
305 * @private: the channel buffer 305 * @work: work struct that contains the the channel buffer
306 * 306 *
307 * This is the work function used to defer reader waking. The 307 * This is the work function used to defer reader waking. The
308 * reason waking is deferred is that calling directly from write 308 * reason waking is deferred is that calling directly from write
@@ -322,7 +322,7 @@ static void wakeup_readers(struct work_struct *work)
322 * 322 *
323 * See relay_reset for description of effect. 323 * See relay_reset for description of effect.
324 */ 324 */
325static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) 325static void __relay_reset(struct rchan_buf *buf, unsigned int init)
326{ 326{
327 size_t i; 327 size_t i;
328 328
@@ -418,7 +418,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan,
418 * The channel buffer and channel buffer data structure are then freed 418 * The channel buffer and channel buffer data structure are then freed
419 * automatically when the last reference is given up. 419 * automatically when the last reference is given up.
420 */ 420 */
421static inline void relay_close_buf(struct rchan_buf *buf) 421static void relay_close_buf(struct rchan_buf *buf)
422{ 422{
423 buf->finalized = 1; 423 buf->finalized = 1;
424 cancel_delayed_work(&buf->wake_readers); 424 cancel_delayed_work(&buf->wake_readers);
@@ -426,7 +426,7 @@ static inline void relay_close_buf(struct rchan_buf *buf)
426 kref_put(&buf->kref, relay_remove_buf); 426 kref_put(&buf->kref, relay_remove_buf);
427} 427}
428 428
429static inline void setup_callbacks(struct rchan *chan, 429static void setup_callbacks(struct rchan *chan,
430 struct rchan_callbacks *cb) 430 struct rchan_callbacks *cb)
431{ 431{
432 if (!cb) { 432 if (!cb) {
@@ -946,11 +946,10 @@ typedef int (*subbuf_actor_t) (size_t read_start,
946/* 946/*
947 * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries 947 * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
948 */ 948 */
949static inline ssize_t relay_file_read_subbufs(struct file *filp, 949static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
950 loff_t *ppos, 950 subbuf_actor_t subbuf_actor,
951 subbuf_actor_t subbuf_actor, 951 read_actor_t actor,
952 read_actor_t actor, 952 read_descriptor_t *desc)
953 read_descriptor_t *desc)
954{ 953{
955 struct rchan_buf *buf = filp->private_data; 954 struct rchan_buf *buf = filp->private_data;
956 size_t read_start, avail; 955 size_t read_start, avail;
diff --git a/kernel/sched.c b/kernel/sched.c
index 5cd833bc2173..b515e3caad7f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1567,6 +1567,7 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1567 return try_to_wake_up(p, state, 0); 1567 return try_to_wake_up(p, state, 0);
1568} 1568}
1569 1569
1570static void task_running_tick(struct rq *rq, struct task_struct *p);
1570/* 1571/*
1571 * Perform scheduler related setup for a newly forked process p. 1572 * Perform scheduler related setup for a newly forked process p.
1572 * p is forked by current. 1573 * p is forked by current.
@@ -1627,7 +1628,7 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
1627 * runqueue lock is not a problem. 1628 * runqueue lock is not a problem.
1628 */ 1629 */
1629 current->time_slice = 1; 1630 current->time_slice = 1;
1630 scheduler_tick(); 1631 task_running_tick(cpu_rq(cpu), current);
1631 } 1632 }
1632 local_irq_enable(); 1633 local_irq_enable();
1633 put_cpu(); 1634 put_cpu();
@@ -4618,8 +4619,10 @@ asmlinkage long sys_sched_yield(void)
4618 4619
4619static inline int __resched_legal(int expected_preempt_count) 4620static inline int __resched_legal(int expected_preempt_count)
4620{ 4621{
4622#ifdef CONFIG_PREEMPT
4621 if (unlikely(preempt_count() != expected_preempt_count)) 4623 if (unlikely(preempt_count() != expected_preempt_count))
4622 return 0; 4624 return 0;
4625#endif
4623 if (unlikely(system_state != SYSTEM_RUNNING)) 4626 if (unlikely(system_state != SYSTEM_RUNNING))
4624 return 0; 4627 return 0;
4625 return 1; 4628 return 1;
@@ -5607,7 +5610,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5607} 5610}
5608 5611
5609/* cpus with isolated domains */ 5612/* cpus with isolated domains */
5610static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE; 5613static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
5611 5614
5612/* Setup the mask of cpus configured for isolated domains */ 5615/* Setup the mask of cpus configured for isolated domains */
5613static int __init isolated_cpu_setup(char *str) 5616static int __init isolated_cpu_setup(char *str)
diff --git a/kernel/timer.c b/kernel/timer.c
index feddf817baa5..c2a8ccfc2882 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1344,11 +1344,10 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
1344 * should never happens anyway). You just have the printk() 1344 * should never happens anyway). You just have the printk()
1345 * that will tell you if something is gone wrong and where. 1345 * that will tell you if something is gone wrong and where.
1346 */ 1346 */
1347 if (timeout < 0) 1347 if (timeout < 0) {
1348 {
1349 printk(KERN_ERR "schedule_timeout: wrong timeout " 1348 printk(KERN_ERR "schedule_timeout: wrong timeout "
1350 "value %lx from %p\n", timeout, 1349 "value %lx\n", timeout);
1351 __builtin_return_address(0)); 1350 dump_stack();
1352 current->state = TASK_RUNNING; 1351 current->state = TASK_RUNNING;
1353 goto out; 1352 goto out;
1354 } 1353 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 742cbbe49bdc..a3da07c5af28 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -233,7 +233,7 @@ static void delayed_work_timer_fn(unsigned long __data)
233/** 233/**
234 * queue_delayed_work - queue work on a workqueue after delay 234 * queue_delayed_work - queue work on a workqueue after delay
235 * @wq: workqueue to use 235 * @wq: workqueue to use
236 * @work: delayable work to queue 236 * @dwork: delayable work to queue
237 * @delay: number of jiffies to wait before queueing 237 * @delay: number of jiffies to wait before queueing
238 * 238 *
239 * Returns 0 if @work was already on a queue, non-zero otherwise. 239 * Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
268 * queue_delayed_work_on - queue work on specific CPU after delay 268 * queue_delayed_work_on - queue work on specific CPU after delay
269 * @cpu: CPU number to execute work on 269 * @cpu: CPU number to execute work on
270 * @wq: workqueue to use 270 * @wq: workqueue to use
271 * @work: work to queue 271 * @dwork: work to queue
272 * @delay: number of jiffies to wait before queueing 272 * @delay: number of jiffies to wait before queueing
273 * 273 *
274 * Returns 0 if @work was already on a queue, non-zero otherwise. 274 * Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -637,9 +637,11 @@ int schedule_on_each_cpu(work_func_t func)
637 637
638 mutex_lock(&workqueue_mutex); 638 mutex_lock(&workqueue_mutex);
639 for_each_online_cpu(cpu) { 639 for_each_online_cpu(cpu) {
640 INIT_WORK(per_cpu_ptr(works, cpu), func); 640 struct work_struct *work = per_cpu_ptr(works, cpu);
641 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 641
642 per_cpu_ptr(works, cpu)); 642 INIT_WORK(work, func);
643 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
644 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
643 } 645 }
644 mutex_unlock(&workqueue_mutex); 646 mutex_unlock(&workqueue_mutex);
645 flush_workqueue(keventd_wq); 647 flush_workqueue(keventd_wq);