diff options
Diffstat (limited to 'kernel')
43 files changed, 311 insertions, 321 deletions
diff --git a/kernel/async.c b/kernel/async.c index 968ef9457d4e..27235f5de198 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
| @@ -92,19 +92,18 @@ extern int initcall_debug; | |||
| 92 | static async_cookie_t __lowest_in_progress(struct list_head *running) | 92 | static async_cookie_t __lowest_in_progress(struct list_head *running) |
| 93 | { | 93 | { |
| 94 | struct async_entry *entry; | 94 | struct async_entry *entry; |
| 95 | |||
| 95 | if (!list_empty(running)) { | 96 | if (!list_empty(running)) { |
| 96 | entry = list_first_entry(running, | 97 | entry = list_first_entry(running, |
| 97 | struct async_entry, list); | 98 | struct async_entry, list); |
| 98 | return entry->cookie; | 99 | return entry->cookie; |
| 99 | } else if (!list_empty(&async_pending)) { | ||
| 100 | entry = list_first_entry(&async_pending, | ||
| 101 | struct async_entry, list); | ||
| 102 | return entry->cookie; | ||
| 103 | } else { | ||
| 104 | /* nothing in progress... next_cookie is "infinity" */ | ||
| 105 | return next_cookie; | ||
| 106 | } | 100 | } |
| 107 | 101 | ||
| 102 | list_for_each_entry(entry, &async_pending, list) | ||
| 103 | if (entry->running == running) | ||
| 104 | return entry->cookie; | ||
| 105 | |||
| 106 | return next_cookie; /* "infinity" value */ | ||
| 108 | } | 107 | } |
| 109 | 108 | ||
| 110 | static async_cookie_t lowest_in_progress(struct list_head *running) | 109 | static async_cookie_t lowest_in_progress(struct list_head *running) |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 917ab9525568..6e7351739a82 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new) | |||
| 734 | dentry = dget(path.dentry); | 734 | dentry = dget(path.dentry); |
| 735 | path_put(&path); | 735 | path_put(&path); |
| 736 | 736 | ||
| 737 | if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) | ||
| 738 | follow_up(&mnt, &dentry); | ||
| 739 | |||
| 740 | list_add_tail(&list, &tagged->mnt_list); | 737 | list_add_tail(&list, &tagged->mnt_list); |
| 741 | 738 | ||
| 742 | mutex_lock(&audit_filter_mutex); | 739 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index a6fe71fd5d1b..713098ee5a02 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent, | |||
| 1028 | 1028 | ||
| 1029 | if (audit_enabled) { | 1029 | if (audit_enabled) { |
| 1030 | struct audit_buffer *ab; | 1030 | struct audit_buffer *ab; |
| 1031 | ab = audit_log_start(NULL, GFP_KERNEL, | 1031 | ab = audit_log_start(NULL, GFP_NOFS, |
| 1032 | AUDIT_CONFIG_CHANGE); | 1032 | AUDIT_CONFIG_CHANGE); |
| 1033 | audit_log_format(ab, "auid=%u ses=%u", | 1033 | audit_log_format(ab, "auid=%u ses=%u", |
| 1034 | audit_get_loginuid(current), | 1034 | audit_get_loginuid(current), |
| @@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent) | |||
| 1067 | e = container_of(r, struct audit_entry, rule); | 1067 | e = container_of(r, struct audit_entry, rule); |
| 1068 | if (audit_enabled) { | 1068 | if (audit_enabled) { |
| 1069 | struct audit_buffer *ab; | 1069 | struct audit_buffer *ab; |
| 1070 | ab = audit_log_start(NULL, GFP_KERNEL, | 1070 | ab = audit_log_start(NULL, GFP_NOFS, |
| 1071 | AUDIT_CONFIG_CHANGE); | 1071 | AUDIT_CONFIG_CHANGE); |
| 1072 | audit_log_format(ab, "auid=%u ses=%u", | 1072 | audit_log_format(ab, "auid=%u ses=%u", |
| 1073 | audit_get_loginuid(current), | 1073 | audit_get_loginuid(current), |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 382109b5baeb..a7267bfd3765 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1133,8 +1133,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
| 1133 | free_cg_links: | 1133 | free_cg_links: |
| 1134 | free_cg_links(&tmp_cg_links); | 1134 | free_cg_links(&tmp_cg_links); |
| 1135 | drop_new_super: | 1135 | drop_new_super: |
| 1136 | up_write(&sb->s_umount); | 1136 | deactivate_locked_super(sb); |
| 1137 | deactivate_super(sb); | ||
| 1138 | return ret; | 1137 | return ret; |
| 1139 | } | 1138 | } |
| 1140 | 1139 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index b9e2edd00726..875ffbdd96d0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1409,7 +1409,7 @@ long do_fork(unsigned long clone_flags, | |||
| 1409 | } | 1409 | } |
| 1410 | 1410 | ||
| 1411 | audit_finish_fork(p); | 1411 | audit_finish_fork(p); |
| 1412 | tracehook_report_clone(trace, regs, clone_flags, nr, p); | 1412 | tracehook_report_clone(regs, clone_flags, nr, p); |
| 1413 | 1413 | ||
| 1414 | /* | 1414 | /* |
| 1415 | * We set PF_STARTING at creation in case tracing wants to | 1415 | * We set PF_STARTING at creation in case tracing wants to |
diff --git a/kernel/futex.c b/kernel/futex.c index eef8cd26b5e5..d546b2d53a62 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -193,6 +193,7 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 193 | * @uaddr: virtual address of the futex | 193 | * @uaddr: virtual address of the futex |
| 194 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 194 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
| 195 | * @key: address where result is stored. | 195 | * @key: address where result is stored. |
| 196 | * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE) | ||
| 196 | * | 197 | * |
| 197 | * Returns a negative error code or 0 | 198 | * Returns a negative error code or 0 |
| 198 | * The key words are stored in *key on success. | 199 | * The key words are stored in *key on success. |
| @@ -203,7 +204,8 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 203 | * | 204 | * |
| 204 | * lock_page() might sleep, the caller should not hold a spinlock. | 205 | * lock_page() might sleep, the caller should not hold a spinlock. |
| 205 | */ | 206 | */ |
| 206 | static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | 207 | static int |
| 208 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | ||
| 207 | { | 209 | { |
| 208 | unsigned long address = (unsigned long)uaddr; | 210 | unsigned long address = (unsigned long)uaddr; |
| 209 | struct mm_struct *mm = current->mm; | 211 | struct mm_struct *mm = current->mm; |
| @@ -226,7 +228,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | |||
| 226 | * but access_ok() should be faster than find_vma() | 228 | * but access_ok() should be faster than find_vma() |
| 227 | */ | 229 | */ |
| 228 | if (!fshared) { | 230 | if (!fshared) { |
| 229 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | 231 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) |
| 230 | return -EFAULT; | 232 | return -EFAULT; |
| 231 | key->private.mm = mm; | 233 | key->private.mm = mm; |
| 232 | key->private.address = address; | 234 | key->private.address = address; |
| @@ -235,7 +237,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | |||
| 235 | } | 237 | } |
| 236 | 238 | ||
| 237 | again: | 239 | again: |
| 238 | err = get_user_pages_fast(address, 1, 0, &page); | 240 | err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); |
| 239 | if (err < 0) | 241 | if (err < 0) |
| 240 | return err; | 242 | return err; |
| 241 | 243 | ||
| @@ -677,7 +679,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
| 677 | if (!bitset) | 679 | if (!bitset) |
| 678 | return -EINVAL; | 680 | return -EINVAL; |
| 679 | 681 | ||
| 680 | ret = get_futex_key(uaddr, fshared, &key); | 682 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); |
| 681 | if (unlikely(ret != 0)) | 683 | if (unlikely(ret != 0)) |
| 682 | goto out; | 684 | goto out; |
| 683 | 685 | ||
| @@ -723,10 +725,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
| 723 | int ret, op_ret; | 725 | int ret, op_ret; |
| 724 | 726 | ||
| 725 | retry: | 727 | retry: |
| 726 | ret = get_futex_key(uaddr1, fshared, &key1); | 728 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); |
| 727 | if (unlikely(ret != 0)) | 729 | if (unlikely(ret != 0)) |
| 728 | goto out; | 730 | goto out; |
| 729 | ret = get_futex_key(uaddr2, fshared, &key2); | 731 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); |
| 730 | if (unlikely(ret != 0)) | 732 | if (unlikely(ret != 0)) |
| 731 | goto out_put_key1; | 733 | goto out_put_key1; |
| 732 | 734 | ||
| @@ -814,10 +816,10 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
| 814 | int ret, drop_count = 0; | 816 | int ret, drop_count = 0; |
| 815 | 817 | ||
| 816 | retry: | 818 | retry: |
| 817 | ret = get_futex_key(uaddr1, fshared, &key1); | 819 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); |
| 818 | if (unlikely(ret != 0)) | 820 | if (unlikely(ret != 0)) |
| 819 | goto out; | 821 | goto out; |
| 820 | ret = get_futex_key(uaddr2, fshared, &key2); | 822 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ); |
| 821 | if (unlikely(ret != 0)) | 823 | if (unlikely(ret != 0)) |
| 822 | goto out_put_key1; | 824 | goto out_put_key1; |
| 823 | 825 | ||
| @@ -1140,7 +1142,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
| 1140 | q.bitset = bitset; | 1142 | q.bitset = bitset; |
| 1141 | retry: | 1143 | retry: |
| 1142 | q.key = FUTEX_KEY_INIT; | 1144 | q.key = FUTEX_KEY_INIT; |
| 1143 | ret = get_futex_key(uaddr, fshared, &q.key); | 1145 | ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ); |
| 1144 | if (unlikely(ret != 0)) | 1146 | if (unlikely(ret != 0)) |
| 1145 | goto out; | 1147 | goto out; |
| 1146 | 1148 | ||
| @@ -1330,7 +1332,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1330 | q.pi_state = NULL; | 1332 | q.pi_state = NULL; |
| 1331 | retry: | 1333 | retry: |
| 1332 | q.key = FUTEX_KEY_INIT; | 1334 | q.key = FUTEX_KEY_INIT; |
| 1333 | ret = get_futex_key(uaddr, fshared, &q.key); | 1335 | ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); |
| 1334 | if (unlikely(ret != 0)) | 1336 | if (unlikely(ret != 0)) |
| 1335 | goto out; | 1337 | goto out; |
| 1336 | 1338 | ||
| @@ -1594,7 +1596,7 @@ retry: | |||
| 1594 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 1596 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
| 1595 | return -EPERM; | 1597 | return -EPERM; |
| 1596 | 1598 | ||
| 1597 | ret = get_futex_key(uaddr, fshared, &key); | 1599 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); |
| 1598 | if (unlikely(ret != 0)) | 1600 | if (unlikely(ret != 0)) |
| 1599 | goto out; | 1601 | goto out; |
| 1600 | 1602 | ||
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 3394f8f52964..7d047808419d 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
| @@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o | |||
| 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
| 4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
| 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
| 6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o | 6 | obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o |
| 7 | obj-$(CONFIG_PM_SLEEP) += pm.o | 7 | obj-$(CONFIG_PM_SLEEP) += pm.o |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c687ba4363f2..13c68e71b726 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -359,7 +359,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 359 | 359 | ||
| 360 | spin_lock(&desc->lock); | 360 | spin_lock(&desc->lock); |
| 361 | mask_ack_irq(desc, irq); | 361 | mask_ack_irq(desc, irq); |
| 362 | desc = irq_remap_to_desc(irq, desc); | ||
| 363 | 362 | ||
| 364 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 363 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
| 365 | goto out_unlock; | 364 | goto out_unlock; |
| @@ -438,7 +437,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 438 | desc->status &= ~IRQ_INPROGRESS; | 437 | desc->status &= ~IRQ_INPROGRESS; |
| 439 | out: | 438 | out: |
| 440 | desc->chip->eoi(irq); | 439 | desc->chip->eoi(irq); |
| 441 | desc = irq_remap_to_desc(irq, desc); | ||
| 442 | 440 | ||
| 443 | spin_unlock(&desc->lock); | 441 | spin_unlock(&desc->lock); |
| 444 | } | 442 | } |
| @@ -475,7 +473,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 475 | !desc->action)) { | 473 | !desc->action)) { |
| 476 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 474 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
| 477 | mask_ack_irq(desc, irq); | 475 | mask_ack_irq(desc, irq); |
| 478 | desc = irq_remap_to_desc(irq, desc); | ||
| 479 | goto out_unlock; | 476 | goto out_unlock; |
| 480 | } | 477 | } |
| 481 | kstat_incr_irqs_this_cpu(irq, desc); | 478 | kstat_incr_irqs_this_cpu(irq, desc); |
| @@ -483,7 +480,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 483 | /* Start handling the irq */ | 480 | /* Start handling the irq */ |
| 484 | if (desc->chip->ack) | 481 | if (desc->chip->ack) |
| 485 | desc->chip->ack(irq); | 482 | desc->chip->ack(irq); |
| 486 | desc = irq_remap_to_desc(irq, desc); | ||
| 487 | 483 | ||
| 488 | /* Mark the IRQ currently in progress.*/ | 484 | /* Mark the IRQ currently in progress.*/ |
| 489 | desc->status |= IRQ_INPROGRESS; | 485 | desc->status |= IRQ_INPROGRESS; |
| @@ -544,10 +540,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
| 544 | if (!noirqdebug) | 540 | if (!noirqdebug) |
| 545 | note_interrupt(irq, desc, action_ret); | 541 | note_interrupt(irq, desc, action_ret); |
| 546 | 542 | ||
| 547 | if (desc->chip->eoi) { | 543 | if (desc->chip->eoi) |
| 548 | desc->chip->eoi(irq); | 544 | desc->chip->eoi(irq); |
| 549 | desc = irq_remap_to_desc(irq, desc); | ||
| 550 | } | ||
| 551 | } | 545 | } |
| 552 | 546 | ||
| 553 | void | 547 | void |
| @@ -582,10 +576,8 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 582 | 576 | ||
| 583 | /* Uninstall? */ | 577 | /* Uninstall? */ |
| 584 | if (handle == handle_bad_irq) { | 578 | if (handle == handle_bad_irq) { |
| 585 | if (desc->chip != &no_irq_chip) { | 579 | if (desc->chip != &no_irq_chip) |
| 586 | mask_ack_irq(desc, irq); | 580 | mask_ack_irq(desc, irq); |
| 587 | desc = irq_remap_to_desc(irq, desc); | ||
| 588 | } | ||
| 589 | desc->status |= IRQ_DISABLED; | 581 | desc->status |= IRQ_DISABLED; |
| 590 | desc->depth = 1; | 582 | desc->depth = 1; |
| 591 | } | 583 | } |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index d82142be8dd2..18041a254d32 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
| 14 | #include <linux/slab.h> | ||
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 15 | #include <linux/random.h> | 16 | #include <linux/random.h> |
| 16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
| @@ -81,45 +82,48 @@ static struct irq_desc irq_desc_init = { | |||
| 81 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 82 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 82 | }; | 83 | }; |
| 83 | 84 | ||
| 84 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 85 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
| 85 | { | 86 | { |
| 86 | int node; | ||
| 87 | void *ptr; | 87 | void *ptr; |
| 88 | 88 | ||
| 89 | node = cpu_to_node(cpu); | 89 | if (slab_is_available()) |
| 90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); | 90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), |
| 91 | GFP_ATOMIC, node); | ||
| 92 | else | ||
| 93 | ptr = alloc_bootmem_node(NODE_DATA(node), | ||
| 94 | nr * sizeof(*desc->kstat_irqs)); | ||
| 91 | 95 | ||
| 92 | /* | 96 | /* |
| 93 | * don't overwite if can not get new one | 97 | * don't overwite if can not get new one |
| 94 | * init_copy_kstat_irqs() could still use old one | 98 | * init_copy_kstat_irqs() could still use old one |
| 95 | */ | 99 | */ |
| 96 | if (ptr) { | 100 | if (ptr) { |
| 97 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", | 101 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); |
| 98 | cpu, node); | ||
| 99 | desc->kstat_irqs = ptr; | 102 | desc->kstat_irqs = ptr; |
| 100 | } | 103 | } |
| 101 | } | 104 | } |
| 102 | 105 | ||
| 103 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 106 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) |
| 104 | { | 107 | { |
| 105 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 108 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
| 106 | 109 | ||
| 107 | spin_lock_init(&desc->lock); | 110 | spin_lock_init(&desc->lock); |
| 108 | desc->irq = irq; | 111 | desc->irq = irq; |
| 109 | #ifdef CONFIG_SMP | 112 | #ifdef CONFIG_SMP |
| 110 | desc->cpu = cpu; | 113 | desc->node = node; |
| 111 | #endif | 114 | #endif |
| 112 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 115 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 113 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | 116 | init_kstat_irqs(desc, node, nr_cpu_ids); |
| 114 | if (!desc->kstat_irqs) { | 117 | if (!desc->kstat_irqs) { |
| 115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 118 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
| 116 | BUG_ON(1); | 119 | BUG_ON(1); |
| 117 | } | 120 | } |
| 118 | if (!init_alloc_desc_masks(desc, cpu, false)) { | 121 | if (!alloc_desc_masks(desc, node, false)) { |
| 119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | 122 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
| 120 | BUG_ON(1); | 123 | BUG_ON(1); |
| 121 | } | 124 | } |
| 122 | arch_init_chip_data(desc, cpu); | 125 | init_desc_masks(desc); |
| 126 | arch_init_chip_data(desc, node); | ||
| 123 | } | 127 | } |
| 124 | 128 | ||
| 125 | /* | 129 | /* |
| @@ -169,7 +173,8 @@ int __init early_irq_init(void) | |||
| 169 | desc[i].irq = i; | 173 | desc[i].irq = i; |
| 170 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | 174 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 171 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 175 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 172 | init_alloc_desc_masks(&desc[i], 0, true); | 176 | alloc_desc_masks(&desc[i], 0, true); |
| 177 | init_desc_masks(&desc[i]); | ||
| 173 | irq_desc_ptrs[i] = desc + i; | 178 | irq_desc_ptrs[i] = desc + i; |
| 174 | } | 179 | } |
| 175 | 180 | ||
| @@ -187,11 +192,10 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
| 187 | return NULL; | 192 | return NULL; |
| 188 | } | 193 | } |
| 189 | 194 | ||
| 190 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 195 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
| 191 | { | 196 | { |
| 192 | struct irq_desc *desc; | 197 | struct irq_desc *desc; |
| 193 | unsigned long flags; | 198 | unsigned long flags; |
| 194 | int node; | ||
| 195 | 199 | ||
| 196 | if (irq >= nr_irqs) { | 200 | if (irq >= nr_irqs) { |
| 197 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", | 201 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
| @@ -210,15 +214,17 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
| 210 | if (desc) | 214 | if (desc) |
| 211 | goto out_unlock; | 215 | goto out_unlock; |
| 212 | 216 | ||
| 213 | node = cpu_to_node(cpu); | 217 | if (slab_is_available()) |
| 214 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 218 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
| 215 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | 219 | else |
| 216 | irq, cpu, node); | 220 | desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); |
| 221 | |||
| 222 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); | ||
| 217 | if (!desc) { | 223 | if (!desc) { |
| 218 | printk(KERN_ERR "can not alloc irq_desc\n"); | 224 | printk(KERN_ERR "can not alloc irq_desc\n"); |
| 219 | BUG_ON(1); | 225 | BUG_ON(1); |
| 220 | } | 226 | } |
| 221 | init_one_irq_desc(irq, desc, cpu); | 227 | init_one_irq_desc(irq, desc, node); |
| 222 | 228 | ||
| 223 | irq_desc_ptrs[irq] = desc; | 229 | irq_desc_ptrs[irq] = desc; |
| 224 | 230 | ||
| @@ -256,7 +262,8 @@ int __init early_irq_init(void) | |||
| 256 | 262 | ||
| 257 | for (i = 0; i < count; i++) { | 263 | for (i = 0; i < count; i++) { |
| 258 | desc[i].irq = i; | 264 | desc[i].irq = i; |
| 259 | init_alloc_desc_masks(&desc[i], 0, true); | 265 | alloc_desc_masks(&desc[i], 0, true); |
| 266 | init_desc_masks(&desc[i]); | ||
| 260 | desc[i].kstat_irqs = kstat_irqs_all[i]; | 267 | desc[i].kstat_irqs = kstat_irqs_all[i]; |
| 261 | } | 268 | } |
| 262 | return arch_early_irq_init(); | 269 | return arch_early_irq_init(); |
| @@ -267,7 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
| 267 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 274 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 268 | } | 275 | } |
| 269 | 276 | ||
| 270 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 277 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
| 271 | { | 278 | { |
| 272 | return irq_to_desc(irq); | 279 | return irq_to_desc(irq); |
| 273 | } | 280 | } |
| @@ -363,8 +370,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 363 | irqreturn_t ret, retval = IRQ_NONE; | 370 | irqreturn_t ret, retval = IRQ_NONE; |
| 364 | unsigned int status = 0; | 371 | unsigned int status = 0; |
| 365 | 372 | ||
| 366 | WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); | ||
| 367 | |||
| 368 | if (!(action->flags & IRQF_DISABLED)) | 373 | if (!(action->flags & IRQF_DISABLED)) |
| 369 | local_irq_enable_in_hardirq(); | 374 | local_irq_enable_in_hardirq(); |
| 370 | 375 | ||
| @@ -455,11 +460,8 @@ unsigned int __do_IRQ(unsigned int irq) | |||
| 455 | /* | 460 | /* |
| 456 | * No locking required for CPU-local interrupts: | 461 | * No locking required for CPU-local interrupts: |
| 457 | */ | 462 | */ |
| 458 | if (desc->chip->ack) { | 463 | if (desc->chip->ack) |
| 459 | desc->chip->ack(irq); | 464 | desc->chip->ack(irq); |
| 460 | /* get new one */ | ||
| 461 | desc = irq_remap_to_desc(irq, desc); | ||
| 462 | } | ||
| 463 | if (likely(!(desc->status & IRQ_DISABLED))) { | 465 | if (likely(!(desc->status & IRQ_DISABLED))) { |
| 464 | action_ret = handle_IRQ_event(irq, desc->action); | 466 | action_ret = handle_IRQ_event(irq, desc->action); |
| 465 | if (!noirqdebug) | 467 | if (!noirqdebug) |
| @@ -470,10 +472,8 @@ unsigned int __do_IRQ(unsigned int irq) | |||
| 470 | } | 472 | } |
| 471 | 473 | ||
| 472 | spin_lock(&desc->lock); | 474 | spin_lock(&desc->lock); |
| 473 | if (desc->chip->ack) { | 475 | if (desc->chip->ack) |
| 474 | desc->chip->ack(irq); | 476 | desc->chip->ack(irq); |
| 475 | desc = irq_remap_to_desc(irq, desc); | ||
| 476 | } | ||
| 477 | /* | 477 | /* |
| 478 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 478 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
| 479 | * WAITING is used by probe to mark irqs that are being tested | 479 | * WAITING is used by probe to mark irqs that are being tested |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 01ce20eab38f..73468253143b 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -16,7 +16,7 @@ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | |||
| 16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
| 17 | 17 | ||
| 18 | extern struct lock_class_key irq_desc_lock_class; | 18 | extern struct lock_class_key irq_desc_lock_class; |
| 19 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 19 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
| 20 | extern void clear_kstat_irqs(struct irq_desc *desc); | 20 | extern void clear_kstat_irqs(struct irq_desc *desc); |
| 21 | extern spinlock_t sparse_irq_lock; | 21 | extern spinlock_t sparse_irq_lock; |
| 22 | 22 | ||
| @@ -42,6 +42,9 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
| 42 | 42 | ||
| 43 | extern int irq_select_affinity_usr(unsigned int irq); | 43 | extern int irq_select_affinity_usr(unsigned int irq); |
| 44 | 44 | ||
| 45 | extern void | ||
| 46 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask); | ||
| 47 | |||
| 45 | /* | 48 | /* |
| 46 | * Debugging printout: | 49 | * Debugging printout: |
| 47 | */ | 50 | */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7e2e7dd4cd2f..aaf5c9d05770 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 80 | return 1; | 80 | return 1; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static void | 83 | void |
| 84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | 84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) |
| 85 | { | 85 | { |
| 86 | struct irqaction *action = desc->action; | 86 | struct irqaction *action = desc->action; |
| @@ -109,18 +109,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 109 | spin_lock_irqsave(&desc->lock, flags); | 109 | spin_lock_irqsave(&desc->lock, flags); |
| 110 | 110 | ||
| 111 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 111 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 112 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 112 | if (desc->status & IRQ_MOVE_PCNTXT) { |
| 113 | cpumask_copy(desc->affinity, cpumask); | 113 | if (!desc->chip->set_affinity(irq, cpumask)) { |
| 114 | desc->chip->set_affinity(irq, cpumask); | 114 | cpumask_copy(desc->affinity, cpumask); |
| 115 | } else { | 115 | irq_set_thread_affinity(desc, cpumask); |
| 116 | } | ||
| 117 | } | ||
| 118 | else { | ||
| 116 | desc->status |= IRQ_MOVE_PENDING; | 119 | desc->status |= IRQ_MOVE_PENDING; |
| 117 | cpumask_copy(desc->pending_mask, cpumask); | 120 | cpumask_copy(desc->pending_mask, cpumask); |
| 118 | } | 121 | } |
| 119 | #else | 122 | #else |
| 120 | cpumask_copy(desc->affinity, cpumask); | 123 | if (!desc->chip->set_affinity(irq, cpumask)) { |
| 121 | desc->chip->set_affinity(irq, cpumask); | 124 | cpumask_copy(desc->affinity, cpumask); |
| 125 | irq_set_thread_affinity(desc, cpumask); | ||
| 126 | } | ||
| 122 | #endif | 127 | #endif |
| 123 | irq_set_thread_affinity(desc, cpumask); | ||
| 124 | desc->status |= IRQ_AFFINITY_SET; | 128 | desc->status |= IRQ_AFFINITY_SET; |
| 125 | spin_unlock_irqrestore(&desc->lock, flags); | 129 | spin_unlock_irqrestore(&desc->lock, flags); |
| 126 | return 0; | 130 | return 0; |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index e05ad9be43b7..cfe767ca1545 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -1,5 +1,8 @@ | |||
| 1 | 1 | ||
| 2 | #include <linux/irq.h> | 2 | #include <linux/irq.h> |
| 3 | #include <linux/interrupt.h> | ||
| 4 | |||
| 5 | #include "internals.h" | ||
| 3 | 6 | ||
| 4 | void move_masked_irq(int irq) | 7 | void move_masked_irq(int irq) |
| 5 | { | 8 | { |
| @@ -39,11 +42,12 @@ void move_masked_irq(int irq) | |||
| 39 | * masking the irqs. | 42 | * masking the irqs. |
| 40 | */ | 43 | */ |
| 41 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 44 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
| 42 | < nr_cpu_ids)) { | 45 | < nr_cpu_ids)) |
| 43 | cpumask_and(desc->affinity, | 46 | if (!desc->chip->set_affinity(irq, desc->pending_mask)) { |
| 44 | desc->pending_mask, cpu_online_mask); | 47 | cpumask_copy(desc->affinity, desc->pending_mask); |
| 45 | desc->chip->set_affinity(irq, desc->affinity); | 48 | irq_set_thread_affinity(desc, desc->pending_mask); |
| 46 | } | 49 | } |
| 50 | |||
| 47 | cpumask_clear(desc->pending_mask); | 51 | cpumask_clear(desc->pending_mask); |
| 48 | } | 52 | } |
| 49 | 53 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 44bbdcbaf8d2..2f69bee57bf2 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -15,9 +15,9 @@ | |||
| 15 | 15 | ||
| 16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | 16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, |
| 17 | struct irq_desc *desc, | 17 | struct irq_desc *desc, |
| 18 | int cpu, int nr) | 18 | int node, int nr) |
| 19 | { | 19 | { |
| 20 | init_kstat_irqs(desc, cpu, nr); | 20 | init_kstat_irqs(desc, node, nr); |
| 21 | 21 | ||
| 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) | 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) |
| 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, | 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, |
| @@ -34,20 +34,20 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
| 37 | struct irq_desc *desc, int cpu) | 37 | struct irq_desc *desc, int node) |
| 38 | { | 38 | { |
| 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
| 40 | if (!init_alloc_desc_masks(desc, cpu, false)) { | 40 | if (!alloc_desc_masks(desc, node, false)) { |
| 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " |
| 42 | "for migration.\n", irq); | 42 | "for migration.\n", irq); |
| 43 | return false; | 43 | return false; |
| 44 | } | 44 | } |
| 45 | spin_lock_init(&desc->lock); | 45 | spin_lock_init(&desc->lock); |
| 46 | desc->cpu = cpu; | 46 | desc->node = node; |
| 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 48 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); |
| 49 | init_copy_desc_masks(old_desc, desc); | 49 | init_copy_desc_masks(old_desc, desc); |
| 50 | arch_init_copy_chip_data(old_desc, desc, cpu); | 50 | arch_init_copy_chip_data(old_desc, desc, node); |
| 51 | return true; | 51 | return true; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| @@ -59,12 +59,11 @@ static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | |||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | 61 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, |
| 62 | int cpu) | 62 | int node) |
| 63 | { | 63 | { |
| 64 | struct irq_desc *desc; | 64 | struct irq_desc *desc; |
| 65 | unsigned int irq; | 65 | unsigned int irq; |
| 66 | unsigned long flags; | 66 | unsigned long flags; |
| 67 | int node; | ||
| 68 | 67 | ||
| 69 | irq = old_desc->irq; | 68 | irq = old_desc->irq; |
| 70 | 69 | ||
| @@ -76,7 +75,6 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
| 76 | if (desc && old_desc != desc) | 75 | if (desc && old_desc != desc) |
| 77 | goto out_unlock; | 76 | goto out_unlock; |
| 78 | 77 | ||
| 79 | node = cpu_to_node(cpu); | ||
| 80 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 78 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
| 81 | if (!desc) { | 79 | if (!desc) { |
| 82 | printk(KERN_ERR "irq %d: can not get new irq_desc " | 80 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
| @@ -85,7 +83,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
| 85 | desc = old_desc; | 83 | desc = old_desc; |
| 86 | goto out_unlock; | 84 | goto out_unlock; |
| 87 | } | 85 | } |
| 88 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | 86 | if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { |
| 89 | /* still use old one */ | 87 | /* still use old one */ |
| 90 | kfree(desc); | 88 | kfree(desc); |
| 91 | desc = old_desc; | 89 | desc = old_desc; |
| @@ -97,9 +95,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
| 97 | 95 | ||
| 98 | /* free the old one */ | 96 | /* free the old one */ |
| 99 | free_one_irq_desc(old_desc, desc); | 97 | free_one_irq_desc(old_desc, desc); |
| 100 | spin_unlock(&old_desc->lock); | ||
| 101 | kfree(old_desc); | 98 | kfree(old_desc); |
| 102 | spin_lock(&desc->lock); | ||
| 103 | 99 | ||
| 104 | return desc; | 100 | return desc; |
| 105 | 101 | ||
| @@ -109,24 +105,14 @@ out_unlock: | |||
| 109 | return desc; | 105 | return desc; |
| 110 | } | 106 | } |
| 111 | 107 | ||
| 112 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) | 108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
| 113 | { | 109 | { |
| 114 | int old_cpu; | ||
| 115 | int node, old_node; | ||
| 116 | |||
| 117 | /* those all static, do move them */ | 110 | /* those all static, do move them */ |
| 118 | if (desc->irq < NR_IRQS_LEGACY) | 111 | if (desc->irq < NR_IRQS_LEGACY) |
| 119 | return desc; | 112 | return desc; |
| 120 | 113 | ||
| 121 | old_cpu = desc->cpu; | 114 | if (desc->node != node) |
| 122 | if (old_cpu != cpu) { | 115 | desc = __real_move_irq_desc(desc, node); |
| 123 | node = cpu_to_node(cpu); | ||
| 124 | old_node = cpu_to_node(old_cpu); | ||
| 125 | if (old_node != node) | ||
| 126 | desc = __real_move_irq_desc(desc, cpu); | ||
| 127 | else | ||
| 128 | desc->cpu = cpu; | ||
| 129 | } | ||
| 130 | 116 | ||
| 131 | return desc; | 117 | return desc; |
| 132 | } | 118 | } |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 5a758c6e4950..e4983770913b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1451,7 +1451,6 @@ int kernel_kexec(void) | |||
| 1451 | error = device_suspend(PMSG_FREEZE); | 1451 | error = device_suspend(PMSG_FREEZE); |
| 1452 | if (error) | 1452 | if (error) |
| 1453 | goto Resume_console; | 1453 | goto Resume_console; |
| 1454 | device_pm_lock(); | ||
| 1455 | /* At this point, device_suspend() has been called, | 1454 | /* At this point, device_suspend() has been called, |
| 1456 | * but *not* device_power_down(). We *must* | 1455 | * but *not* device_power_down(). We *must* |
| 1457 | * device_power_down() now. Otherwise, drivers for | 1456 | * device_power_down() now. Otherwise, drivers for |
| @@ -1489,7 +1488,6 @@ int kernel_kexec(void) | |||
| 1489 | enable_nonboot_cpus(); | 1488 | enable_nonboot_cpus(); |
| 1490 | device_power_up(PMSG_RESTORE); | 1489 | device_power_up(PMSG_RESTORE); |
| 1491 | Resume_devices: | 1490 | Resume_devices: |
| 1492 | device_pm_unlock(); | ||
| 1493 | device_resume(PMSG_RESTORE); | 1491 | device_resume(PMSG_RESTORE); |
| 1494 | Resume_console: | 1492 | Resume_console: |
| 1495 | resume_console(); | 1493 | resume_console(); |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index e4dcfb2272a4..9147a3190c9d 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
| @@ -1583,8 +1583,8 @@ static void sysrq_handle_gdb(int key, struct tty_struct *tty) | |||
| 1583 | 1583 | ||
| 1584 | static struct sysrq_key_op sysrq_gdb_op = { | 1584 | static struct sysrq_key_op sysrq_gdb_op = { |
| 1585 | .handler = sysrq_handle_gdb, | 1585 | .handler = sysrq_handle_gdb, |
| 1586 | .help_msg = "Gdb", | 1586 | .help_msg = "debug(G)", |
| 1587 | .action_msg = "GDB", | 1587 | .action_msg = "DEBUG", |
| 1588 | }; | 1588 | }; |
| 1589 | #endif | 1589 | #endif |
| 1590 | 1590 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index b750675251e5..7e95bedb2bfc 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -370,8 +370,10 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, | |||
| 370 | sub_info->argv = argv; | 370 | sub_info->argv = argv; |
| 371 | sub_info->envp = envp; | 371 | sub_info->envp = envp; |
| 372 | sub_info->cred = prepare_usermodehelper_creds(); | 372 | sub_info->cred = prepare_usermodehelper_creds(); |
| 373 | if (!sub_info->cred) | 373 | if (!sub_info->cred) { |
| 374 | kfree(sub_info); | ||
| 374 | return NULL; | 375 | return NULL; |
| 376 | } | ||
| 375 | 377 | ||
| 376 | out: | 378 | out: |
| 377 | return sub_info; | 379 | return sub_info; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a5e74ddee0e2..c0fa54b276d9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr) | |||
| 319 | return NULL; | 319 | return NULL; |
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | /* Arm a kprobe with text_mutex */ | ||
| 323 | static void __kprobes arm_kprobe(struct kprobe *kp) | ||
| 324 | { | ||
| 325 | mutex_lock(&text_mutex); | ||
| 326 | arch_arm_kprobe(kp); | ||
| 327 | mutex_unlock(&text_mutex); | ||
| 328 | } | ||
| 329 | |||
| 330 | /* Disarm a kprobe with text_mutex */ | ||
| 331 | static void __kprobes disarm_kprobe(struct kprobe *kp) | ||
| 332 | { | ||
| 333 | mutex_lock(&text_mutex); | ||
| 334 | arch_disarm_kprobe(kp); | ||
| 335 | mutex_unlock(&text_mutex); | ||
| 336 | } | ||
| 337 | |||
| 322 | /* | 338 | /* |
| 323 | * Aggregate handlers for multiple kprobes support - these handlers | 339 | * Aggregate handlers for multiple kprobes support - these handlers |
| 324 | * take care of invoking the individual kprobe handlers on p->list | 340 | * take care of invoking the individual kprobe handlers on p->list |
| @@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
| 538 | ap->flags &= ~KPROBE_FLAG_DISABLED; | 554 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
| 539 | if (!kprobes_all_disarmed) | 555 | if (!kprobes_all_disarmed) |
| 540 | /* Arm the breakpoint again. */ | 556 | /* Arm the breakpoint again. */ |
| 541 | arch_arm_kprobe(ap); | 557 | arm_kprobe(ap); |
| 542 | } | 558 | } |
| 543 | return 0; | 559 | return 0; |
| 544 | } | 560 | } |
| @@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p) | |||
| 789 | * enabled and not gone - otherwise, the breakpoint would | 805 | * enabled and not gone - otherwise, the breakpoint would |
| 790 | * already have been removed. We save on flushing icache. | 806 | * already have been removed. We save on flushing icache. |
| 791 | */ | 807 | */ |
| 792 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { | 808 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) |
| 793 | mutex_lock(&text_mutex); | 809 | disarm_kprobe(p); |
| 794 | arch_disarm_kprobe(p); | ||
| 795 | mutex_unlock(&text_mutex); | ||
| 796 | } | ||
| 797 | hlist_del_rcu(&old_p->hlist); | 810 | hlist_del_rcu(&old_p->hlist); |
| 798 | } else { | 811 | } else { |
| 799 | if (p->break_handler && !kprobe_gone(p)) | 812 | if (p->break_handler && !kprobe_gone(p)) |
| @@ -810,7 +823,7 @@ noclean: | |||
| 810 | if (!kprobe_disabled(old_p)) { | 823 | if (!kprobe_disabled(old_p)) { |
| 811 | try_to_disable_aggr_kprobe(old_p); | 824 | try_to_disable_aggr_kprobe(old_p); |
| 812 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) | 825 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) |
| 813 | arch_disarm_kprobe(old_p); | 826 | disarm_kprobe(old_p); |
| 814 | } | 827 | } |
| 815 | } | 828 | } |
| 816 | return 0; | 829 | return 0; |
| @@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp) | |||
| 1364 | try_to_disable_aggr_kprobe(p); | 1377 | try_to_disable_aggr_kprobe(p); |
| 1365 | 1378 | ||
| 1366 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | 1379 | if (!kprobes_all_disarmed && kprobe_disabled(p)) |
| 1367 | arch_disarm_kprobe(p); | 1380 | disarm_kprobe(p); |
| 1368 | out: | 1381 | out: |
| 1369 | mutex_unlock(&kprobe_mutex); | 1382 | mutex_unlock(&kprobe_mutex); |
| 1370 | return ret; | 1383 | return ret; |
| @@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp) | |||
| 1393 | } | 1406 | } |
| 1394 | 1407 | ||
| 1395 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | 1408 | if (!kprobes_all_disarmed && kprobe_disabled(p)) |
| 1396 | arch_arm_kprobe(p); | 1409 | arm_kprobe(p); |
| 1397 | 1410 | ||
| 1398 | p->flags &= ~KPROBE_FLAG_DISABLED; | 1411 | p->flags &= ~KPROBE_FLAG_DISABLED; |
| 1399 | if (p != kp) | 1412 | if (p != kp) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b0f011866969..accb40cdb12a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 2490 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2490 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 2491 | struct lock_class_key *key, int subclass) | 2491 | struct lock_class_key *key, int subclass) |
| 2492 | { | 2492 | { |
| 2493 | if (unlikely(!debug_locks)) | 2493 | lock->class_cache = NULL; |
| 2494 | #ifdef CONFIG_LOCK_STAT | ||
| 2495 | lock->cpu = raw_smp_processor_id(); | ||
| 2496 | #endif | ||
| 2497 | |||
| 2498 | if (DEBUG_LOCKS_WARN_ON(!name)) { | ||
| 2499 | lock->name = "NULL"; | ||
| 2494 | return; | 2500 | return; |
| 2501 | } | ||
| 2502 | |||
| 2503 | lock->name = name; | ||
| 2495 | 2504 | ||
| 2496 | if (DEBUG_LOCKS_WARN_ON(!key)) | 2505 | if (DEBUG_LOCKS_WARN_ON(!key)) |
| 2497 | return; | 2506 | return; |
| 2498 | if (DEBUG_LOCKS_WARN_ON(!name)) | ||
| 2499 | return; | ||
| 2500 | /* | 2507 | /* |
| 2501 | * Sanity check, the lock-class key must be persistent: | 2508 | * Sanity check, the lock-class key must be persistent: |
| 2502 | */ | 2509 | */ |
| @@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 2505 | DEBUG_LOCKS_WARN_ON(1); | 2512 | DEBUG_LOCKS_WARN_ON(1); |
| 2506 | return; | 2513 | return; |
| 2507 | } | 2514 | } |
| 2508 | lock->name = name; | ||
| 2509 | lock->key = key; | 2515 | lock->key = key; |
| 2510 | lock->class_cache = NULL; | 2516 | |
| 2511 | #ifdef CONFIG_LOCK_STAT | 2517 | if (unlikely(!debug_locks)) |
| 2512 | lock->cpu = raw_smp_processor_id(); | 2518 | return; |
| 2513 | #endif | 2519 | |
| 2514 | if (subclass) | 2520 | if (subclass) |
| 2515 | register_lock_class(lock, subclass, 1); | 2521 | register_lock_class(lock, subclass, 1); |
| 2516 | } | 2522 | } |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index a2cc7e9a6e84..699a2ac3a0d7 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
| @@ -54,9 +54,9 @@ enum { | |||
| 54 | * table (if it's not there yet), and we check it for lock order | 54 | * table (if it's not there yet), and we check it for lock order |
| 55 | * conflicts and deadlocks. | 55 | * conflicts and deadlocks. |
| 56 | */ | 56 | */ |
| 57 | #define MAX_LOCKDEP_ENTRIES 8192UL | 57 | #define MAX_LOCKDEP_ENTRIES 16384UL |
| 58 | 58 | ||
| 59 | #define MAX_LOCKDEP_CHAINS_BITS 14 | 59 | #define MAX_LOCKDEP_CHAINS_BITS 15 |
| 60 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | 60 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
| 61 | 61 | ||
| 62 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) | 62 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
diff --git a/kernel/panic.c b/kernel/panic.c index 934fb377f4b3..984b3ecbd72c 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -221,7 +221,7 @@ void add_taint(unsigned flag) | |||
| 221 | * post-warning case. | 221 | * post-warning case. |
| 222 | */ | 222 | */ |
| 223 | if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) | 223 | if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) |
| 224 | printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); | 224 | printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); |
| 225 | 225 | ||
| 226 | set_bit(flag, &tainted_mask); | 226 | set_bit(flag, &tainted_mask); |
| 227 | } | 227 | } |
| @@ -340,34 +340,46 @@ void oops_exit(void) | |||
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | #ifdef WANT_WARN_ON_SLOWPATH | 342 | #ifdef WANT_WARN_ON_SLOWPATH |
| 343 | void warn_slowpath(const char *file, int line, const char *fmt, ...) | 343 | struct slowpath_args { |
| 344 | { | 344 | const char *fmt; |
| 345 | va_list args; | 345 | va_list args; |
| 346 | char function[KSYM_SYMBOL_LEN]; | 346 | }; |
| 347 | unsigned long caller = (unsigned long)__builtin_return_address(0); | ||
| 348 | const char *board; | ||
| 349 | 347 | ||
| 350 | sprint_symbol(function, caller); | 348 | static void warn_slowpath_common(const char *file, int line, void *caller, struct slowpath_args *args) |
| 349 | { | ||
| 350 | const char *board; | ||
| 351 | 351 | ||
| 352 | printk(KERN_WARNING "------------[ cut here ]------------\n"); | 352 | printk(KERN_WARNING "------------[ cut here ]------------\n"); |
| 353 | printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, | 353 | printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); |
| 354 | line, function); | ||
| 355 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | 354 | board = dmi_get_system_info(DMI_PRODUCT_NAME); |
| 356 | if (board) | 355 | if (board) |
| 357 | printk(KERN_WARNING "Hardware name: %s\n", board); | 356 | printk(KERN_WARNING "Hardware name: %s\n", board); |
| 358 | 357 | ||
| 359 | if (fmt) { | 358 | if (args) |
| 360 | va_start(args, fmt); | 359 | vprintk(args->fmt, args->args); |
| 361 | vprintk(fmt, args); | ||
| 362 | va_end(args); | ||
| 363 | } | ||
| 364 | 360 | ||
| 365 | print_modules(); | 361 | print_modules(); |
| 366 | dump_stack(); | 362 | dump_stack(); |
| 367 | print_oops_end_marker(); | 363 | print_oops_end_marker(); |
| 368 | add_taint(TAINT_WARN); | 364 | add_taint(TAINT_WARN); |
| 369 | } | 365 | } |
| 370 | EXPORT_SYMBOL(warn_slowpath); | 366 | |
| 367 | void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) | ||
| 368 | { | ||
| 369 | struct slowpath_args args; | ||
| 370 | |||
| 371 | args.fmt = fmt; | ||
| 372 | va_start(args.args, fmt); | ||
| 373 | warn_slowpath_common(file, line, __builtin_return_address(0), &args); | ||
| 374 | va_end(args.args); | ||
| 375 | } | ||
| 376 | EXPORT_SYMBOL(warn_slowpath_fmt); | ||
| 377 | |||
| 378 | void warn_slowpath_null(const char *file, int line) | ||
| 379 | { | ||
| 380 | warn_slowpath_common(file, line, __builtin_return_address(0), NULL); | ||
| 381 | } | ||
| 382 | EXPORT_SYMBOL(warn_slowpath_null); | ||
| 371 | #endif | 383 | #endif |
| 372 | 384 | ||
| 373 | #ifdef CONFIG_CC_STACKPROTECTOR | 385 | #ifdef CONFIG_CC_STACKPROTECTOR |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c9dcf98b4463..bece7c0b67b2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
| 1420 | * timer call will interfere. | 1420 | * timer call will interfere. |
| 1421 | */ | 1421 | */ |
| 1422 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { | 1422 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
| 1423 | int firing; | 1423 | int cpu_firing; |
| 1424 | |||
| 1424 | spin_lock(&timer->it_lock); | 1425 | spin_lock(&timer->it_lock); |
| 1425 | list_del_init(&timer->it.cpu.entry); | 1426 | list_del_init(&timer->it.cpu.entry); |
| 1426 | firing = timer->it.cpu.firing; | 1427 | cpu_firing = timer->it.cpu.firing; |
| 1427 | timer->it.cpu.firing = 0; | 1428 | timer->it.cpu.firing = 0; |
| 1428 | /* | 1429 | /* |
| 1429 | * The firing flag is -1 if we collided with a reset | 1430 | * The firing flag is -1 if we collided with a reset |
| 1430 | * of the timer, which already reported this | 1431 | * of the timer, which already reported this |
| 1431 | * almost-firing as an overrun. So don't generate an event. | 1432 | * almost-firing as an overrun. So don't generate an event. |
| 1432 | */ | 1433 | */ |
| 1433 | if (likely(firing >= 0)) { | 1434 | if (likely(cpu_firing >= 0)) |
| 1434 | cpu_timer_fire(timer); | 1435 | cpu_timer_fire(timer); |
| 1435 | } | ||
| 1436 | spin_unlock(&timer->it_lock); | 1436 | spin_unlock(&timer->it_lock); |
| 1437 | } | 1437 | } |
| 1438 | } | 1438 | } |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 0854770b63b9..5cb080e7eebd 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -215,8 +215,6 @@ static int create_image(int platform_mode) | |||
| 215 | if (error) | 215 | if (error) |
| 216 | return error; | 216 | return error; |
| 217 | 217 | ||
| 218 | device_pm_lock(); | ||
| 219 | |||
| 220 | /* At this point, device_suspend() has been called, but *not* | 218 | /* At this point, device_suspend() has been called, but *not* |
| 221 | * device_power_down(). We *must* call device_power_down() now. | 219 | * device_power_down(). We *must* call device_power_down() now. |
| 222 | * Otherwise, drivers for some devices (e.g. interrupt controllers) | 220 | * Otherwise, drivers for some devices (e.g. interrupt controllers) |
| @@ -227,7 +225,7 @@ static int create_image(int platform_mode) | |||
| 227 | if (error) { | 225 | if (error) { |
| 228 | printk(KERN_ERR "PM: Some devices failed to power down, " | 226 | printk(KERN_ERR "PM: Some devices failed to power down, " |
| 229 | "aborting hibernation\n"); | 227 | "aborting hibernation\n"); |
| 230 | goto Unlock; | 228 | return error; |
| 231 | } | 229 | } |
| 232 | 230 | ||
| 233 | error = platform_pre_snapshot(platform_mode); | 231 | error = platform_pre_snapshot(platform_mode); |
| @@ -241,9 +239,9 @@ static int create_image(int platform_mode) | |||
| 241 | 239 | ||
| 242 | local_irq_disable(); | 240 | local_irq_disable(); |
| 243 | 241 | ||
| 244 | sysdev_suspend(PMSG_FREEZE); | 242 | error = sysdev_suspend(PMSG_FREEZE); |
| 245 | if (error) { | 243 | if (error) { |
| 246 | printk(KERN_ERR "PM: Some devices failed to power down, " | 244 | printk(KERN_ERR "PM: Some system devices failed to power down, " |
| 247 | "aborting hibernation\n"); | 245 | "aborting hibernation\n"); |
| 248 | goto Enable_irqs; | 246 | goto Enable_irqs; |
| 249 | } | 247 | } |
| @@ -280,9 +278,6 @@ static int create_image(int platform_mode) | |||
| 280 | device_power_up(in_suspend ? | 278 | device_power_up(in_suspend ? |
| 281 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 279 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| 282 | 280 | ||
| 283 | Unlock: | ||
| 284 | device_pm_unlock(); | ||
| 285 | |||
| 286 | return error; | 281 | return error; |
| 287 | } | 282 | } |
| 288 | 283 | ||
| @@ -344,13 +339,11 @@ static int resume_target_kernel(bool platform_mode) | |||
| 344 | { | 339 | { |
| 345 | int error; | 340 | int error; |
| 346 | 341 | ||
| 347 | device_pm_lock(); | ||
| 348 | |||
| 349 | error = device_power_down(PMSG_QUIESCE); | 342 | error = device_power_down(PMSG_QUIESCE); |
| 350 | if (error) { | 343 | if (error) { |
| 351 | printk(KERN_ERR "PM: Some devices failed to power down, " | 344 | printk(KERN_ERR "PM: Some devices failed to power down, " |
| 352 | "aborting resume\n"); | 345 | "aborting resume\n"); |
| 353 | goto Unlock; | 346 | return error; |
| 354 | } | 347 | } |
| 355 | 348 | ||
| 356 | error = platform_pre_restore(platform_mode); | 349 | error = platform_pre_restore(platform_mode); |
| @@ -403,9 +396,6 @@ static int resume_target_kernel(bool platform_mode) | |||
| 403 | 396 | ||
| 404 | device_power_up(PMSG_RECOVER); | 397 | device_power_up(PMSG_RECOVER); |
| 405 | 398 | ||
| 406 | Unlock: | ||
| 407 | device_pm_unlock(); | ||
| 408 | |||
| 409 | return error; | 399 | return error; |
| 410 | } | 400 | } |
| 411 | 401 | ||
| @@ -464,11 +454,9 @@ int hibernation_platform_enter(void) | |||
| 464 | goto Resume_devices; | 454 | goto Resume_devices; |
| 465 | } | 455 | } |
| 466 | 456 | ||
| 467 | device_pm_lock(); | ||
| 468 | |||
| 469 | error = device_power_down(PMSG_HIBERNATE); | 457 | error = device_power_down(PMSG_HIBERNATE); |
| 470 | if (error) | 458 | if (error) |
| 471 | goto Unlock; | 459 | goto Resume_devices; |
| 472 | 460 | ||
| 473 | error = hibernation_ops->prepare(); | 461 | error = hibernation_ops->prepare(); |
| 474 | if (error) | 462 | if (error) |
| @@ -493,9 +481,6 @@ int hibernation_platform_enter(void) | |||
| 493 | 481 | ||
| 494 | device_power_up(PMSG_RESTORE); | 482 | device_power_up(PMSG_RESTORE); |
| 495 | 483 | ||
| 496 | Unlock: | ||
| 497 | device_pm_unlock(); | ||
| 498 | |||
| 499 | Resume_devices: | 484 | Resume_devices: |
| 500 | entering_platform_hibernation = false; | 485 | entering_platform_hibernation = false; |
| 501 | device_resume(PMSG_RESTORE); | 486 | device_resume(PMSG_RESTORE); |
| @@ -646,13 +631,6 @@ static int software_resume(void) | |||
| 646 | return 0; | 631 | return 0; |
| 647 | 632 | ||
| 648 | /* | 633 | /* |
| 649 | * We can't depend on SCSI devices being available after loading one of | ||
| 650 | * their modules if scsi_complete_async_scans() is not called and the | ||
| 651 | * resume device usually is a SCSI one. | ||
| 652 | */ | ||
| 653 | scsi_complete_async_scans(); | ||
| 654 | |||
| 655 | /* | ||
| 656 | * name_to_dev_t() below takes a sysfs buffer mutex when sysfs | 634 | * name_to_dev_t() below takes a sysfs buffer mutex when sysfs |
| 657 | * is configured into the kernel. Since the regular hibernate | 635 | * is configured into the kernel. Since the regular hibernate |
| 658 | * trigger path is via sysfs which takes a buffer mutex before | 636 | * trigger path is via sysfs which takes a buffer mutex before |
| @@ -663,32 +641,42 @@ static int software_resume(void) | |||
| 663 | * here to avoid lockdep complaining. | 641 | * here to avoid lockdep complaining. |
| 664 | */ | 642 | */ |
| 665 | mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); | 643 | mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); |
| 644 | |||
| 645 | if (swsusp_resume_device) | ||
| 646 | goto Check_image; | ||
| 647 | |||
| 648 | if (!strlen(resume_file)) { | ||
| 649 | error = -ENOENT; | ||
| 650 | goto Unlock; | ||
| 651 | } | ||
| 652 | |||
| 653 | pr_debug("PM: Checking image partition %s\n", resume_file); | ||
| 654 | |||
| 655 | /* Check if the device is there */ | ||
| 656 | swsusp_resume_device = name_to_dev_t(resume_file); | ||
| 666 | if (!swsusp_resume_device) { | 657 | if (!swsusp_resume_device) { |
| 667 | if (!strlen(resume_file)) { | ||
| 668 | mutex_unlock(&pm_mutex); | ||
| 669 | return -ENOENT; | ||
| 670 | } | ||
| 671 | /* | 658 | /* |
| 672 | * Some device discovery might still be in progress; we need | 659 | * Some device discovery might still be in progress; we need |
| 673 | * to wait for this to finish. | 660 | * to wait for this to finish. |
| 674 | */ | 661 | */ |
| 675 | wait_for_device_probe(); | 662 | wait_for_device_probe(); |
| 663 | /* | ||
| 664 | * We can't depend on SCSI devices being available after loading | ||
| 665 | * one of their modules until scsi_complete_async_scans() is | ||
| 666 | * called and the resume device usually is a SCSI one. | ||
| 667 | */ | ||
| 668 | scsi_complete_async_scans(); | ||
| 669 | |||
| 676 | swsusp_resume_device = name_to_dev_t(resume_file); | 670 | swsusp_resume_device = name_to_dev_t(resume_file); |
| 677 | pr_debug("PM: Resume from partition %s\n", resume_file); | 671 | if (!swsusp_resume_device) { |
| 678 | } else { | 672 | error = -ENODEV; |
| 679 | pr_debug("PM: Resume from partition %d:%d\n", | 673 | goto Unlock; |
| 680 | MAJOR(swsusp_resume_device), | 674 | } |
| 681 | MINOR(swsusp_resume_device)); | ||
| 682 | } | 675 | } |
| 683 | 676 | ||
| 684 | if (noresume) { | 677 | Check_image: |
| 685 | /** | 678 | pr_debug("PM: Resume from partition %d:%d\n", |
| 686 | * FIXME: If noresume is specified, we need to find the | 679 | MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); |
| 687 | * partition and reset it back to normal swap space. | ||
| 688 | */ | ||
| 689 | mutex_unlock(&pm_mutex); | ||
| 690 | return 0; | ||
| 691 | } | ||
| 692 | 680 | ||
| 693 | pr_debug("PM: Checking hibernation image.\n"); | 681 | pr_debug("PM: Checking hibernation image.\n"); |
| 694 | error = swsusp_check(); | 682 | error = swsusp_check(); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index f172f41858bb..868028280d13 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -289,22 +289,26 @@ static int suspend_enter(suspend_state_t state) | |||
| 289 | { | 289 | { |
| 290 | int error; | 290 | int error; |
| 291 | 291 | ||
| 292 | device_pm_lock(); | 292 | if (suspend_ops->prepare) { |
| 293 | error = suspend_ops->prepare(); | ||
| 294 | if (error) | ||
| 295 | return error; | ||
| 296 | } | ||
| 293 | 297 | ||
| 294 | error = device_power_down(PMSG_SUSPEND); | 298 | error = device_power_down(PMSG_SUSPEND); |
| 295 | if (error) { | 299 | if (error) { |
| 296 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 300 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
| 297 | goto Done; | 301 | goto Platfrom_finish; |
| 298 | } | 302 | } |
| 299 | 303 | ||
| 300 | if (suspend_ops->prepare) { | 304 | if (suspend_ops->prepare_late) { |
| 301 | error = suspend_ops->prepare(); | 305 | error = suspend_ops->prepare_late(); |
| 302 | if (error) | 306 | if (error) |
| 303 | goto Power_up_devices; | 307 | goto Power_up_devices; |
| 304 | } | 308 | } |
| 305 | 309 | ||
| 306 | if (suspend_test(TEST_PLATFORM)) | 310 | if (suspend_test(TEST_PLATFORM)) |
| 307 | goto Platfrom_finish; | 311 | goto Platform_wake; |
| 308 | 312 | ||
| 309 | error = disable_nonboot_cpus(); | 313 | error = disable_nonboot_cpus(); |
| 310 | if (error || suspend_test(TEST_CPUS)) | 314 | if (error || suspend_test(TEST_CPUS)) |
| @@ -326,15 +330,16 @@ static int suspend_enter(suspend_state_t state) | |||
| 326 | Enable_cpus: | 330 | Enable_cpus: |
| 327 | enable_nonboot_cpus(); | 331 | enable_nonboot_cpus(); |
| 328 | 332 | ||
| 329 | Platfrom_finish: | 333 | Platform_wake: |
| 330 | if (suspend_ops->finish) | 334 | if (suspend_ops->wake) |
| 331 | suspend_ops->finish(); | 335 | suspend_ops->wake(); |
| 332 | 336 | ||
| 333 | Power_up_devices: | 337 | Power_up_devices: |
| 334 | device_power_up(PMSG_RESUME); | 338 | device_power_up(PMSG_RESUME); |
| 335 | 339 | ||
| 336 | Done: | 340 | Platfrom_finish: |
| 337 | device_pm_unlock(); | 341 | if (suspend_ops->finish) |
| 342 | suspend_ops->finish(); | ||
| 338 | 343 | ||
| 339 | return error; | 344 | return error; |
| 340 | } | 345 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index dfcd83ceee3b..42c317874cfa 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task) | |||
| 188 | /* Protect exec's credential calculations against our interference; | 188 | /* Protect exec's credential calculations against our interference; |
| 189 | * SUID, SGID and LSM creds get determined differently under ptrace. | 189 | * SUID, SGID and LSM creds get determined differently under ptrace. |
| 190 | */ | 190 | */ |
| 191 | retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | 191 | retval = mutex_lock_interruptible(&task->cred_exec_mutex); |
| 192 | if (retval < 0) | 192 | if (retval < 0) |
| 193 | goto out; | 193 | goto out; |
| 194 | 194 | ||
| @@ -232,7 +232,7 @@ repeat: | |||
| 232 | bad: | 232 | bad: |
| 233 | write_unlock_irqrestore(&tasklist_lock, flags); | 233 | write_unlock_irqrestore(&tasklist_lock, flags); |
| 234 | task_unlock(task); | 234 | task_unlock(task); |
| 235 | mutex_unlock(¤t->cred_exec_mutex); | 235 | mutex_unlock(&task->cred_exec_mutex); |
| 236 | out: | 236 | out: |
| 237 | return retval; | 237 | return retval; |
| 238 | } | 238 | } |
| @@ -304,6 +304,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
| 304 | if (child->ptrace) { | 304 | if (child->ptrace) { |
| 305 | child->exit_code = data; | 305 | child->exit_code = data; |
| 306 | dead = __ptrace_detach(current, child); | 306 | dead = __ptrace_detach(current, child); |
| 307 | if (!child->exit_state) | ||
| 308 | wake_up_process(child); | ||
| 307 | } | 309 | } |
| 308 | write_unlock_irq(&tasklist_lock); | 310 | write_unlock_irq(&tasklist_lock); |
| 309 | 311 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c7b8457d0d2..a967c9feb90a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex); | |||
| 58 | static struct completion rcu_barrier_completion; | 58 | static struct completion rcu_barrier_completion; |
| 59 | int rcu_scheduler_active __read_mostly; | 59 | int rcu_scheduler_active __read_mostly; |
| 60 | 60 | ||
| 61 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
| 62 | static struct rcu_head rcu_migrate_head[3]; | ||
| 63 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
| 64 | |||
| 61 | /* | 65 | /* |
| 62 | * Awaken the corresponding synchronize_rcu() instance now that a | 66 | * Awaken the corresponding synchronize_rcu() instance now that a |
| 63 | * grace period has elapsed. | 67 | * grace period has elapsed. |
| @@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type) | |||
| 122 | } | 126 | } |
| 123 | } | 127 | } |
| 124 | 128 | ||
| 125 | static inline void wait_migrated_callbacks(void); | 129 | static inline void wait_migrated_callbacks(void) |
| 130 | { | ||
| 131 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
| 132 | } | ||
| 126 | 133 | ||
| 127 | /* | 134 | /* |
| 128 | * Orchestrate the specified type of RCU barrier, waiting for all | 135 | * Orchestrate the specified type of RCU barrier, waiting for all |
| @@ -179,21 +186,12 @@ void rcu_barrier_sched(void) | |||
| 179 | } | 186 | } |
| 180 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 187 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
| 181 | 188 | ||
| 182 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
| 183 | static struct rcu_head rcu_migrate_head[3]; | ||
| 184 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
| 185 | |||
| 186 | static void rcu_migrate_callback(struct rcu_head *notused) | 189 | static void rcu_migrate_callback(struct rcu_head *notused) |
| 187 | { | 190 | { |
| 188 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | 191 | if (atomic_dec_and_test(&rcu_migrate_type_count)) |
| 189 | wake_up(&rcu_migrate_wq); | 192 | wake_up(&rcu_migrate_wq); |
| 190 | } | 193 | } |
| 191 | 194 | ||
| 192 | static inline void wait_migrated_callbacks(void) | ||
| 193 | { | ||
| 194 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
| 195 | } | ||
| 196 | |||
| 197 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 195 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
| 198 | unsigned long action, void *hcpu) | 196 | unsigned long action, void *hcpu) |
| 199 | { | 197 | { |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7f3266922572..d2a372fb0b9b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 530 | rdp->qs_pending = 1; | 530 | rdp->qs_pending = 1; |
| 531 | rdp->passed_quiesc = 0; | 531 | rdp->passed_quiesc = 0; |
| 532 | rdp->gpnum = rsp->gpnum; | 532 | rdp->gpnum = rsp->gpnum; |
| 533 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
| 534 | RCU_JIFFIES_TILL_FORCE_QS; | ||
| 535 | } | 533 | } |
| 536 | 534 | ||
| 537 | /* | 535 | /* |
| @@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
| 578 | rsp->gpnum++; | 576 | rsp->gpnum++; |
| 579 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 577 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
| 580 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 578 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
| 581 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
| 582 | RCU_JIFFIES_TILL_FORCE_QS; | ||
| 583 | record_gp_stall_check_time(rsp); | 579 | record_gp_stall_check_time(rsp); |
| 584 | dyntick_record_completed(rsp, rsp->completed - 1); | 580 | dyntick_record_completed(rsp, rsp->completed - 1); |
| 585 | note_new_gpnum(rsp, rdp); | 581 | note_new_gpnum(rsp, rdp); |
| @@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
| 1055 | { | 1051 | { |
| 1056 | unsigned long flags; | 1052 | unsigned long flags; |
| 1057 | long lastcomp; | 1053 | long lastcomp; |
| 1058 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
| 1059 | struct rcu_node *rnp = rcu_get_root(rsp); | 1054 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 1060 | u8 signaled; | 1055 | u8 signaled; |
| 1061 | 1056 | ||
| @@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
| 1066 | return; /* Someone else is already on the job. */ | 1061 | return; /* Someone else is already on the job. */ |
| 1067 | } | 1062 | } |
| 1068 | if (relaxed && | 1063 | if (relaxed && |
| 1069 | (long)(rsp->jiffies_force_qs - jiffies) >= 0 && | 1064 | (long)(rsp->jiffies_force_qs - jiffies) >= 0) |
| 1070 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) | ||
| 1071 | goto unlock_ret; /* no emergency and done recently. */ | 1065 | goto unlock_ret; /* no emergency and done recently. */ |
| 1072 | rsp->n_force_qs++; | 1066 | rsp->n_force_qs++; |
| 1073 | spin_lock(&rnp->lock); | 1067 | spin_lock(&rnp->lock); |
| 1074 | lastcomp = rsp->completed; | 1068 | lastcomp = rsp->completed; |
| 1075 | signaled = rsp->signaled; | 1069 | signaled = rsp->signaled; |
| 1076 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1070 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
| 1077 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
| 1078 | RCU_JIFFIES_TILL_FORCE_QS; | ||
| 1079 | if (lastcomp == rsp->gpnum) { | 1071 | if (lastcomp == rsp->gpnum) { |
| 1080 | rsp->n_force_qs_ngp++; | 1072 | rsp->n_force_qs_ngp++; |
| 1081 | spin_unlock(&rnp->lock); | 1073 | spin_unlock(&rnp->lock); |
| @@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1144 | * If an RCU GP has gone long enough, go check for dyntick | 1136 | * If an RCU GP has gone long enough, go check for dyntick |
| 1145 | * idle CPUs and, if needed, send resched IPIs. | 1137 | * idle CPUs and, if needed, send resched IPIs. |
| 1146 | */ | 1138 | */ |
| 1147 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1139 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
| 1148 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
| 1149 | force_quiescent_state(rsp, 1); | 1140 | force_quiescent_state(rsp, 1); |
| 1150 | 1141 | ||
| 1151 | /* | 1142 | /* |
| @@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
| 1230 | if (unlikely(++rdp->qlen > qhimark)) { | 1221 | if (unlikely(++rdp->qlen > qhimark)) { |
| 1231 | rdp->blimit = LONG_MAX; | 1222 | rdp->blimit = LONG_MAX; |
| 1232 | force_quiescent_state(rsp, 0); | 1223 | force_quiescent_state(rsp, 0); |
| 1233 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1224 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
| 1234 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
| 1235 | force_quiescent_state(rsp, 1); | 1225 | force_quiescent_state(rsp, 1); |
| 1236 | local_irq_restore(flags); | 1226 | local_irq_restore(flags); |
| 1237 | } | 1227 | } |
| @@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1290 | 1280 | ||
| 1291 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | 1281 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ |
| 1292 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && | 1282 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && |
| 1293 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1283 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) |
| 1294 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) | ||
| 1295 | return 1; | 1284 | return 1; |
| 1296 | 1285 | ||
| 1297 | /* nothing to do */ | 1286 | /* nothing to do */ |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4ee954f6a8d5..4b1875ba9404 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
| @@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
| 49 | { | 49 | { |
| 50 | if (!rdp->beenonline) | 50 | if (!rdp->beenonline) |
| 51 | return; | 51 | return; |
| 52 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", | 52 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d", |
| 53 | rdp->cpu, | 53 | rdp->cpu, |
| 54 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | 54 | cpu_is_offline(rdp->cpu) ? '!' : ' ', |
| 55 | rdp->completed, rdp->gpnum, | 55 | rdp->completed, rdp->gpnum, |
| 56 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | 56 | rdp->passed_quiesc, rdp->passed_quiesc_completed, |
| 57 | rdp->qs_pending, | 57 | rdp->qs_pending); |
| 58 | rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, | ||
| 59 | (int)(rdp->n_rcu_pending & 0xffff)); | ||
| 60 | #ifdef CONFIG_NO_HZ | 58 | #ifdef CONFIG_NO_HZ |
| 61 | seq_printf(m, " dt=%d/%d dn=%d df=%lu", | 59 | seq_printf(m, " dt=%d/%d dn=%d df=%lu", |
| 62 | rdp->dynticks->dynticks, | 60 | rdp->dynticks->dynticks, |
| @@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
| 102 | { | 100 | { |
| 103 | if (!rdp->beenonline) | 101 | if (!rdp->beenonline) |
| 104 | return; | 102 | return; |
| 105 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", | 103 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", |
| 106 | rdp->cpu, | 104 | rdp->cpu, |
| 107 | cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", | 105 | cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", |
| 108 | rdp->completed, rdp->gpnum, | 106 | rdp->completed, rdp->gpnum, |
| 109 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | 107 | rdp->passed_quiesc, rdp->passed_quiesc_completed, |
| 110 | rdp->qs_pending, | 108 | rdp->qs_pending); |
| 111 | rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, | ||
| 112 | rdp->n_rcu_pending); | ||
| 113 | #ifdef CONFIG_NO_HZ | 109 | #ifdef CONFIG_NO_HZ |
| 114 | seq_printf(m, ",%d,%d,%d,%lu", | 110 | seq_printf(m, ",%d,%d,%d,%lu", |
| 115 | rdp->dynticks->dynticks, | 111 | rdp->dynticks->dynticks, |
| @@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
| 123 | 119 | ||
| 124 | static int show_rcudata_csv(struct seq_file *m, void *unused) | 120 | static int show_rcudata_csv(struct seq_file *m, void *unused) |
| 125 | { | 121 | { |
| 126 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); | 122 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); |
| 127 | #ifdef CONFIG_NO_HZ | 123 | #ifdef CONFIG_NO_HZ |
| 128 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); | 124 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); |
| 129 | #endif /* #ifdef CONFIG_NO_HZ */ | 125 | #endif /* #ifdef CONFIG_NO_HZ */ |
diff --git a/kernel/resource.c b/kernel/resource.c index fd5d7d574bb9..ac5f3a36923f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root, | |||
| 533 | res->end = end; | 533 | res->end = end; |
| 534 | res->flags = IORESOURCE_BUSY; | 534 | res->flags = IORESOURCE_BUSY; |
| 535 | 535 | ||
| 536 | for (;;) { | 536 | conflict = __request_resource(parent, res); |
| 537 | conflict = __request_resource(parent, res); | 537 | if (!conflict) |
| 538 | if (!conflict) | 538 | return; |
| 539 | break; | ||
| 540 | if (conflict != parent) { | ||
| 541 | parent = conflict; | ||
| 542 | if (!(conflict->flags & IORESOURCE_BUSY)) | ||
| 543 | continue; | ||
| 544 | } | ||
| 545 | |||
| 546 | /* Uhhuh, that didn't work out.. */ | ||
| 547 | kfree(res); | ||
| 548 | res = NULL; | ||
| 549 | break; | ||
| 550 | } | ||
| 551 | |||
| 552 | if (!res) { | ||
| 553 | /* failed, split and try again */ | ||
| 554 | |||
| 555 | /* conflict covered whole area */ | ||
| 556 | if (conflict->start <= start && conflict->end >= end) | ||
| 557 | return; | ||
| 558 | 539 | ||
| 559 | if (conflict->start > start) | 540 | /* failed, split and try again */ |
| 560 | __reserve_region_with_split(root, start, conflict->start-1, name); | 541 | kfree(res); |
| 561 | if (!(conflict->flags & IORESOURCE_BUSY)) { | ||
| 562 | resource_size_t common_start, common_end; | ||
| 563 | 542 | ||
| 564 | common_start = max(conflict->start, start); | 543 | /* conflict covered whole area */ |
| 565 | common_end = min(conflict->end, end); | 544 | if (conflict->start <= start && conflict->end >= end) |
| 566 | if (common_start < common_end) | 545 | return; |
| 567 | __reserve_region_with_split(root, common_start, common_end, name); | ||
| 568 | } | ||
| 569 | if (conflict->end < end) | ||
| 570 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
| 571 | } | ||
| 572 | 546 | ||
| 547 | if (conflict->start > start) | ||
| 548 | __reserve_region_with_split(root, start, conflict->start-1, name); | ||
| 549 | if (conflict->end < end) | ||
| 550 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
| 573 | } | 551 | } |
| 574 | 552 | ||
| 575 | void __init reserve_region_with_split(struct resource *root, | 553 | void __init reserve_region_with_split(struct resource *root, |
diff --git a/kernel/sched.c b/kernel/sched.c index 5724508c3b66..26efa475bdc1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4732,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
| 4732 | 4732 | ||
| 4733 | if (user_tick) | 4733 | if (user_tick) |
| 4734 | account_user_time(p, one_jiffy, one_jiffy_scaled); | 4734 | account_user_time(p, one_jiffy, one_jiffy_scaled); |
| 4735 | else if (p != rq->idle) | 4735 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
| 4736 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | 4736 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, |
| 4737 | one_jiffy_scaled); | 4737 | one_jiffy_scaled); |
| 4738 | else | 4738 | else |
| @@ -4846,7 +4846,7 @@ void scheduler_tick(void) | |||
| 4846 | #endif | 4846 | #endif |
| 4847 | } | 4847 | } |
| 4848 | 4848 | ||
| 4849 | unsigned long get_parent_ip(unsigned long addr) | 4849 | notrace unsigned long get_parent_ip(unsigned long addr) |
| 4850 | { | 4850 | { |
| 4851 | if (in_lock_functions(addr)) { | 4851 | if (in_lock_functions(addr)) { |
| 4852 | addr = CALLER_ADDR2; | 4852 | addr = CALLER_ADDR2; |
| @@ -7367,8 +7367,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 7367 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); | 7367 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
| 7368 | 7368 | ||
| 7369 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); | 7369 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
| 7370 | printk(KERN_CONT " %s (__cpu_power = %d)", str, | 7370 | |
| 7371 | group->__cpu_power); | 7371 | printk(KERN_CONT " %s", str); |
| 7372 | if (group->__cpu_power != SCHED_LOAD_SCALE) { | ||
| 7373 | printk(KERN_CONT " (__cpu_power = %d)", | ||
| 7374 | group->__cpu_power); | ||
| 7375 | } | ||
| 7372 | 7376 | ||
| 7373 | group = group->next; | 7377 | group = group->next; |
| 7374 | } while (group != sd->groups); | 7378 | } while (group != sd->groups); |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 819f17ac796e..e1d16c9a7680 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
| @@ -38,7 +38,8 @@ | |||
| 38 | */ | 38 | */ |
| 39 | unsigned long long __attribute__((weak)) sched_clock(void) | 39 | unsigned long long __attribute__((weak)) sched_clock(void) |
| 40 | { | 40 | { |
| 41 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | 41 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
| 42 | * (NSEC_PER_SEC / HZ); | ||
| 42 | } | 43 | } |
| 43 | 44 | ||
| 44 | static __read_mostly int sched_clock_running; | 45 | static __read_mostly int sched_clock_running; |
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index cdd3c89574cd..344712a5e3ed 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
| @@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) | |||
| 165 | vec->count = 0; | 165 | vec->count = 0; |
| 166 | if (bootmem) | 166 | if (bootmem) |
| 167 | alloc_bootmem_cpumask_var(&vec->mask); | 167 | alloc_bootmem_cpumask_var(&vec->mask); |
| 168 | else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) | 168 | else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) |
| 169 | goto cleanup; | 169 | goto cleanup; |
| 170 | } | 170 | } |
| 171 | 171 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f2c66f8f9712..9bf0d2a73045 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void) | |||
| 1591 | unsigned int i; | 1591 | unsigned int i; |
| 1592 | 1592 | ||
| 1593 | for_each_possible_cpu(i) | 1593 | for_each_possible_cpu(i) |
| 1594 | alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), | 1594 | zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), |
| 1595 | GFP_KERNEL, cpu_to_node(i)); | 1595 | GFP_KERNEL, cpu_to_node(i)); |
| 1596 | } | 1596 | } |
| 1597 | #endif /* CONFIG_SMP */ | 1597 | #endif /* CONFIG_SMP */ |
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index cf2bc01186ef..b28d19135f43 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
| @@ -609,14 +609,14 @@ void slow_work_unregister_user(void) | |||
| 609 | if (slow_work_user_count == 0) { | 609 | if (slow_work_user_count == 0) { |
| 610 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); | 610 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); |
| 611 | slow_work_threads_should_exit = true; | 611 | slow_work_threads_should_exit = true; |
| 612 | del_timer_sync(&slow_work_cull_timer); | ||
| 613 | del_timer_sync(&slow_work_oom_timer); | ||
| 612 | wake_up_all(&slow_work_thread_wq); | 614 | wake_up_all(&slow_work_thread_wq); |
| 613 | wait_for_completion(&slow_work_last_thread_exited); | 615 | wait_for_completion(&slow_work_last_thread_exited); |
| 614 | printk(KERN_NOTICE "Slow work thread pool:" | 616 | printk(KERN_NOTICE "Slow work thread pool:" |
| 615 | " Shut down complete\n"); | 617 | " Shut down complete\n"); |
| 616 | } | 618 | } |
| 617 | 619 | ||
| 618 | del_timer_sync(&slow_work_cull_timer); | ||
| 619 | |||
| 620 | mutex_unlock(&slow_work_user_lock); | 620 | mutex_unlock(&slow_work_user_lock); |
| 621 | } | 621 | } |
| 622 | EXPORT_SYMBOL(slow_work_unregister_user); | 622 | EXPORT_SYMBOL(slow_work_unregister_user); |
diff --git a/kernel/smp.c b/kernel/smp.c index 858baac568ee..ad63d8501207 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 52 | switch (action) { | 52 | switch (action) { |
| 53 | case CPU_UP_PREPARE: | 53 | case CPU_UP_PREPARE: |
| 54 | case CPU_UP_PREPARE_FROZEN: | 54 | case CPU_UP_PREPARE_FROZEN: |
| 55 | if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, | 55 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
| 56 | cpu_to_node(cpu))) | 56 | cpu_to_node(cpu))) |
| 57 | return NOTIFY_BAD; | 57 | return NOTIFY_BAD; |
| 58 | break; | 58 | break; |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 2fecefacdc5b..f674f332a024 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -472,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t) | |||
| 472 | printk("Attempt to kill tasklet from interrupt\n"); | 472 | printk("Attempt to kill tasklet from interrupt\n"); |
| 473 | 473 | ||
| 474 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | 474 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
| 475 | do | 475 | do { |
| 476 | yield(); | 476 | yield(); |
| 477 | while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 477 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
| 478 | } | 478 | } |
| 479 | tasklet_unlock_wait(t); | 479 | tasklet_unlock_wait(t); |
| 480 | clear_bit(TASKLET_STATE_SCHED, &t->state); | 480 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
| @@ -828,7 +828,7 @@ int __init __weak arch_early_irq_init(void) | |||
| 828 | return 0; | 828 | return 0; |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) | 831 | int __weak arch_init_chip_data(struct irq_desc *desc, int node) |
| 832 | { | 832 | { |
| 833 | return 0; | 833 | return 0; |
| 834 | } | 834 | } |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index cf91c9317b26..6a463716ecbf 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -101,7 +101,9 @@ static int __maybe_unused one = 1; | |||
| 101 | static int __maybe_unused two = 2; | 101 | static int __maybe_unused two = 2; |
| 102 | static unsigned long one_ul = 1; | 102 | static unsigned long one_ul = 1; |
| 103 | static int one_hundred = 100; | 103 | static int one_hundred = 100; |
| 104 | static int one_thousand = 1000; | 104 | |
| 105 | /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ | ||
| 106 | static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; | ||
| 105 | 107 | ||
| 106 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ | 108 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ |
| 107 | static int maxolduid = 65535; | 109 | static int maxolduid = 65535; |
| @@ -1014,7 +1016,7 @@ static struct ctl_table vm_table[] = { | |||
| 1014 | .mode = 0644, | 1016 | .mode = 0644, |
| 1015 | .proc_handler = &dirty_bytes_handler, | 1017 | .proc_handler = &dirty_bytes_handler, |
| 1016 | .strategy = &sysctl_intvec, | 1018 | .strategy = &sysctl_intvec, |
| 1017 | .extra1 = &one_ul, | 1019 | .extra1 = &dirty_bytes_min, |
| 1018 | }, | 1020 | }, |
| 1019 | { | 1021 | { |
| 1020 | .procname = "dirty_writeback_centisecs", | 1022 | .procname = "dirty_writeback_centisecs", |
| @@ -1039,28 +1041,6 @@ static struct ctl_table vm_table[] = { | |||
| 1039 | .proc_handler = &proc_dointvec, | 1041 | .proc_handler = &proc_dointvec, |
| 1040 | }, | 1042 | }, |
| 1041 | { | 1043 | { |
| 1042 | .ctl_name = CTL_UNNUMBERED, | ||
| 1043 | .procname = "nr_pdflush_threads_min", | ||
| 1044 | .data = &nr_pdflush_threads_min, | ||
| 1045 | .maxlen = sizeof nr_pdflush_threads_min, | ||
| 1046 | .mode = 0644 /* read-write */, | ||
| 1047 | .proc_handler = &proc_dointvec_minmax, | ||
| 1048 | .strategy = &sysctl_intvec, | ||
| 1049 | .extra1 = &one, | ||
| 1050 | .extra2 = &nr_pdflush_threads_max, | ||
| 1051 | }, | ||
| 1052 | { | ||
| 1053 | .ctl_name = CTL_UNNUMBERED, | ||
| 1054 | .procname = "nr_pdflush_threads_max", | ||
| 1055 | .data = &nr_pdflush_threads_max, | ||
| 1056 | .maxlen = sizeof nr_pdflush_threads_max, | ||
| 1057 | .mode = 0644 /* read-write */, | ||
| 1058 | .proc_handler = &proc_dointvec_minmax, | ||
| 1059 | .strategy = &sysctl_intvec, | ||
| 1060 | .extra1 = &nr_pdflush_threads_min, | ||
| 1061 | .extra2 = &one_thousand, | ||
| 1062 | }, | ||
| 1063 | { | ||
| 1064 | .ctl_name = VM_SWAPPINESS, | 1044 | .ctl_name = VM_SWAPPINESS, |
| 1065 | .procname = "swappiness", | 1045 | .procname = "swappiness", |
| 1066 | .data = &vm_swappiness, | 1046 | .data = &vm_swappiness, |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c46c931a7fe7..ecfd7b5187e0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data) | |||
| 181 | 181 | ||
| 182 | resumed = test_and_clear_bit(0, &watchdog_resumed); | 182 | resumed = test_and_clear_bit(0, &watchdog_resumed); |
| 183 | 183 | ||
| 184 | wdnow = watchdog->read(); | 184 | wdnow = watchdog->read(watchdog); |
| 185 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); | 185 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); |
| 186 | watchdog_last = wdnow; | 186 | watchdog_last = wdnow; |
| 187 | 187 | ||
| 188 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | 188 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
| 189 | csnow = cs->read(); | 189 | csnow = cs->read(cs); |
| 190 | 190 | ||
| 191 | if (unlikely(resumed)) { | 191 | if (unlikely(resumed)) { |
| 192 | cs->wd_last = csnow; | 192 | cs->wd_last = csnow; |
| @@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
| 247 | 247 | ||
| 248 | list_add(&cs->wd_list, &watchdog_list); | 248 | list_add(&cs->wd_list, &watchdog_list); |
| 249 | if (!started && watchdog) { | 249 | if (!started && watchdog) { |
| 250 | watchdog_last = watchdog->read(); | 250 | watchdog_last = watchdog->read(watchdog); |
| 251 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 251 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
| 252 | add_timer_on(&watchdog_timer, | 252 | add_timer_on(&watchdog_timer, |
| 253 | cpumask_first(cpu_online_mask)); | 253 | cpumask_first(cpu_online_mask)); |
| @@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
| 268 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; | 268 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; |
| 269 | /* Start if list is not empty */ | 269 | /* Start if list is not empty */ |
| 270 | if (!list_empty(&watchdog_list)) { | 270 | if (!list_empty(&watchdog_list)) { |
| 271 | watchdog_last = watchdog->read(); | 271 | watchdog_last = watchdog->read(watchdog); |
| 272 | watchdog_timer.expires = | 272 | watchdog_timer.expires = |
| 273 | jiffies + WATCHDOG_INTERVAL; | 273 | jiffies + WATCHDOG_INTERVAL; |
| 274 | add_timer_on(&watchdog_timer, | 274 | add_timer_on(&watchdog_timer, |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 06f197560f3b..c3f6c30816e3 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | */ | 50 | */ |
| 51 | #define JIFFIES_SHIFT 8 | 51 | #define JIFFIES_SHIFT 8 |
| 52 | 52 | ||
| 53 | static cycle_t jiffies_read(void) | 53 | static cycle_t jiffies_read(struct clocksource *cs) |
| 54 | { | 54 | { |
| 55 | return (cycle_t) jiffies; | 55 | return (cycle_t) jiffies; |
| 56 | } | 56 | } |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 21a5ca849514..83c4417b6a3c 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
| 93 | for (;;) { | 93 | for (;;) { |
| 94 | if (!clockevents_program_event(dev, next, ktime_get())) | 94 | if (!clockevents_program_event(dev, next, ktime_get())) |
| 95 | return; | 95 | return; |
| 96 | tick_periodic(cpu); | 96 | /* |
| 97 | * Have to be careful here. If we're in oneshot mode, | ||
| 98 | * before we call tick_periodic() in a loop, we need | ||
| 99 | * to be sure we're using a real hardware clocksource. | ||
| 100 | * Otherwise we could get trapped in an infinite | ||
| 101 | * loop, as the tick_periodic() increments jiffies, | ||
| 102 | * when then will increment time, posibly causing | ||
| 103 | * the loop to trigger again and again. | ||
| 104 | */ | ||
| 105 | if (timekeeping_valid_for_hres()) | ||
| 106 | tick_periodic(cpu); | ||
| 97 | next = ktime_add(next, tick_period); | 107 | next = ktime_add(next, tick_period); |
| 98 | } | 108 | } |
| 99 | } | 109 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 900f1b6598d1..687dff49f6e7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday); | |||
| 182 | */ | 182 | */ |
| 183 | static void change_clocksource(void) | 183 | static void change_clocksource(void) |
| 184 | { | 184 | { |
| 185 | struct clocksource *new; | 185 | struct clocksource *new, *old; |
| 186 | 186 | ||
| 187 | new = clocksource_get_next(); | 187 | new = clocksource_get_next(); |
| 188 | 188 | ||
| @@ -191,11 +191,16 @@ static void change_clocksource(void) | |||
| 191 | 191 | ||
| 192 | clocksource_forward_now(); | 192 | clocksource_forward_now(); |
| 193 | 193 | ||
| 194 | new->raw_time = clock->raw_time; | 194 | if (clocksource_enable(new)) |
| 195 | return; | ||
| 195 | 196 | ||
| 197 | new->raw_time = clock->raw_time; | ||
| 198 | old = clock; | ||
| 196 | clock = new; | 199 | clock = new; |
| 200 | clocksource_disable(old); | ||
| 201 | |||
| 197 | clock->cycle_last = 0; | 202 | clock->cycle_last = 0; |
| 198 | clock->cycle_last = clocksource_read(new); | 203 | clock->cycle_last = clocksource_read(clock); |
| 199 | clock->error = 0; | 204 | clock->error = 0; |
| 200 | clock->xtime_nsec = 0; | 205 | clock->xtime_nsec = 0; |
| 201 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 206 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
| @@ -292,6 +297,7 @@ void __init timekeeping_init(void) | |||
| 292 | ntp_init(); | 297 | ntp_init(); |
| 293 | 298 | ||
| 294 | clock = clocksource_get_next(); | 299 | clock = clocksource_get_next(); |
| 300 | clocksource_enable(clock); | ||
| 295 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 301 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
| 296 | clock->cycle_last = clocksource_read(clock); | 302 | clock->cycle_last = clocksource_read(clock); |
| 297 | 303 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1ce5dc6372b8..cda81ec58d9f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -2380,7 +2380,7 @@ static const char readme_msg[] = | |||
| 2380 | "# echo print-parent > /debug/tracing/trace_options\n" | 2380 | "# echo print-parent > /debug/tracing/trace_options\n" |
| 2381 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2381 | "# echo 1 > /debug/tracing/tracing_enabled\n" |
| 2382 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2382 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" |
| 2383 | "echo 0 > /debug/tracing/tracing_enabled\n" | 2383 | "# echo 0 > /debug/tracing/tracing_enabled\n" |
| 2384 | ; | 2384 | ; |
| 2385 | 2385 | ||
| 2386 | static ssize_t | 2386 | static ssize_t |
| @@ -3448,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3448 | if (!ref) | 3448 | if (!ref) |
| 3449 | break; | 3449 | break; |
| 3450 | 3450 | ||
| 3451 | ref->ref = 1; | ||
| 3451 | ref->buffer = info->tr->buffer; | 3452 | ref->buffer = info->tr->buffer; |
| 3452 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | 3453 | ref->page = ring_buffer_alloc_read_page(ref->buffer); |
| 3453 | if (!ref->page) { | 3454 | if (!ref->page) { |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..8333715e4066 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
| @@ -155,6 +155,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, | |||
| 155 | return TRACE_TYPE_HANDLED; | 155 | return TRACE_TYPE_HANDLED; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static void branch_print_header(struct seq_file *s) | ||
| 159 | { | ||
| 160 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" | ||
| 161 | " FUNC:FILE:LINE\n"); | ||
| 162 | seq_puts(s, "# | | | | | " | ||
| 163 | " |\n"); | ||
| 164 | } | ||
| 158 | 165 | ||
| 159 | static struct trace_event trace_branch_event = { | 166 | static struct trace_event trace_branch_event = { |
| 160 | .type = TRACE_BRANCH, | 167 | .type = TRACE_BRANCH, |
| @@ -169,6 +176,7 @@ static struct tracer branch_trace __read_mostly = | |||
| 169 | #ifdef CONFIG_FTRACE_SELFTEST | 176 | #ifdef CONFIG_FTRACE_SELFTEST |
| 170 | .selftest = trace_selftest_startup_branch, | 177 | .selftest = trace_selftest_startup_branch, |
| 171 | #endif /* CONFIG_FTRACE_SELFTEST */ | 178 | #endif /* CONFIG_FTRACE_SELFTEST */ |
| 179 | .print_header = branch_print_header, | ||
| 172 | }; | 180 | }; |
| 173 | 181 | ||
| 174 | __init static int init_branch_tracer(void) | 182 | __init static int init_branch_tracer(void) |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index bae791ebcc51..118439709fb7 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
| @@ -186,6 +186,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter) | |||
| 186 | return TRACE_TYPE_UNHANDLED; | 186 | return TRACE_TYPE_UNHANDLED; |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | static void power_print_header(struct seq_file *s) | ||
| 190 | { | ||
| 191 | seq_puts(s, "# TIMESTAMP STATE EVENT\n"); | ||
| 192 | seq_puts(s, "# | | |\n"); | ||
| 193 | } | ||
| 194 | |||
| 189 | static struct tracer power_tracer __read_mostly = | 195 | static struct tracer power_tracer __read_mostly = |
| 190 | { | 196 | { |
| 191 | .name = "power", | 197 | .name = "power", |
| @@ -194,6 +200,7 @@ static struct tracer power_tracer __read_mostly = | |||
| 194 | .stop = stop_power_trace, | 200 | .stop = stop_power_trace, |
| 195 | .reset = power_trace_reset, | 201 | .reset = power_trace_reset, |
| 196 | .print_line = power_print_line, | 202 | .print_line = power_print_line, |
| 203 | .print_header = power_print_header, | ||
| 197 | }; | 204 | }; |
| 198 | 205 | ||
| 199 | static int init_power_trace(void) | 206 | static int init_power_trace(void) |
