diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 34 | ||||
| -rw-r--r-- | kernel/compat.c | 5 | ||||
| -rw-r--r-- | kernel/cpu.c | 6 | ||||
| -rw-r--r-- | kernel/cpuset.c | 34 | ||||
| -rw-r--r-- | kernel/dma-coherent.c | 42 | ||||
| -rw-r--r-- | kernel/exit.c | 21 | ||||
| -rw-r--r-- | kernel/fork.c | 17 | ||||
| -rw-r--r-- | kernel/futex.c | 72 | ||||
| -rw-r--r-- | kernel/kmod.c | 4 | ||||
| -rw-r--r-- | kernel/kprobes.c | 281 | ||||
| -rw-r--r-- | kernel/ksysfs.c | 4 | ||||
| -rw-r--r-- | kernel/module.c | 92 | ||||
| -rw-r--r-- | kernel/panic.c | 2 | ||||
| -rw-r--r-- | kernel/power/main.c | 6 | ||||
| -rw-r--r-- | kernel/profile.c | 1 | ||||
| -rw-r--r-- | kernel/rcupdate.c | 11 | ||||
| -rw-r--r-- | kernel/rcupreempt.c | 11 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 18 | ||||
| -rw-r--r-- | kernel/rcutree.c | 13 | ||||
| -rw-r--r-- | kernel/signal.c | 3 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 55 | ||||
| -rw-r--r-- | kernel/sys.c | 2 | ||||
| -rw-r--r-- | kernel/sysctl.c | 27 | ||||
| -rw-r--r-- | kernel/test_kprobes.c | 210 | ||||
| -rw-r--r-- | kernel/time.c | 4 | ||||
| -rw-r--r-- | kernel/tsacct.c | 4 |
26 files changed, 676 insertions, 303 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 891a84eb9d30..f221446aa02d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -116,7 +116,6 @@ static int root_count; | |||
| 116 | * be called. | 116 | * be called. |
| 117 | */ | 117 | */ |
| 118 | static int need_forkexit_callback __read_mostly; | 118 | static int need_forkexit_callback __read_mostly; |
| 119 | static int need_mm_owner_callback __read_mostly; | ||
| 120 | 119 | ||
| 121 | /* convenient tests for these bits */ | 120 | /* convenient tests for these bits */ |
| 122 | inline int cgroup_is_removed(const struct cgroup *cgrp) | 121 | inline int cgroup_is_removed(const struct cgroup *cgrp) |
| @@ -573,7 +572,6 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) | |||
| 573 | inode->i_mode = mode; | 572 | inode->i_mode = mode; |
| 574 | inode->i_uid = current_fsuid(); | 573 | inode->i_uid = current_fsuid(); |
| 575 | inode->i_gid = current_fsgid(); | 574 | inode->i_gid = current_fsgid(); |
| 576 | inode->i_blocks = 0; | ||
| 577 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 575 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| 578 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; | 576 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; |
| 579 | } | 577 | } |
| @@ -2540,7 +2538,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
| 2540 | init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; | 2538 | init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; |
| 2541 | 2539 | ||
| 2542 | need_forkexit_callback |= ss->fork || ss->exit; | 2540 | need_forkexit_callback |= ss->fork || ss->exit; |
| 2543 | need_mm_owner_callback |= !!ss->mm_owner_changed; | ||
| 2544 | 2541 | ||
| 2545 | /* At system boot, before all subsystems have been | 2542 | /* At system boot, before all subsystems have been |
| 2546 | * registered, no tasks have been forked, so we don't | 2543 | * registered, no tasks have been forked, so we don't |
| @@ -2790,37 +2787,6 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
| 2790 | } | 2787 | } |
| 2791 | } | 2788 | } |
| 2792 | 2789 | ||
| 2793 | #ifdef CONFIG_MM_OWNER | ||
| 2794 | /** | ||
| 2795 | * cgroup_mm_owner_callbacks - run callbacks when the mm->owner changes | ||
| 2796 | * @p: the new owner | ||
| 2797 | * | ||
| 2798 | * Called on every change to mm->owner. mm_init_owner() does not | ||
| 2799 | * invoke this routine, since it assigns the mm->owner the first time | ||
| 2800 | * and does not change it. | ||
| 2801 | * | ||
| 2802 | * The callbacks are invoked with mmap_sem held in read mode. | ||
| 2803 | */ | ||
| 2804 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | ||
| 2805 | { | ||
| 2806 | struct cgroup *oldcgrp, *newcgrp = NULL; | ||
| 2807 | |||
| 2808 | if (need_mm_owner_callback) { | ||
| 2809 | int i; | ||
| 2810 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
| 2811 | struct cgroup_subsys *ss = subsys[i]; | ||
| 2812 | oldcgrp = task_cgroup(old, ss->subsys_id); | ||
| 2813 | if (new) | ||
| 2814 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
| 2815 | if (oldcgrp == newcgrp) | ||
| 2816 | continue; | ||
| 2817 | if (ss->mm_owner_changed) | ||
| 2818 | ss->mm_owner_changed(ss, oldcgrp, newcgrp, new); | ||
| 2819 | } | ||
| 2820 | } | ||
| 2821 | } | ||
| 2822 | #endif /* CONFIG_MM_OWNER */ | ||
| 2823 | |||
| 2824 | /** | 2790 | /** |
| 2825 | * cgroup_post_fork - called on a new task after adding it to the task list | 2791 | * cgroup_post_fork - called on a new task after adding it to the task list |
| 2826 | * @child: the task in question | 2792 | * @child: the task in question |
diff --git a/kernel/compat.c b/kernel/compat.c index d52e2ec1deb5..42d56544460f 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/migrate.h> | 24 | #include <linux/migrate.h> |
| 25 | #include <linux/posix-timers.h> | 25 | #include <linux/posix-timers.h> |
| 26 | #include <linux/times.h> | 26 | #include <linux/times.h> |
| 27 | #include <linux/ptrace.h> | ||
| 27 | 28 | ||
| 28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
| 29 | 30 | ||
| @@ -229,6 +230,7 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) | |||
| 229 | if (copy_to_user(tbuf, &tmp, sizeof(tmp))) | 230 | if (copy_to_user(tbuf, &tmp, sizeof(tmp))) |
| 230 | return -EFAULT; | 231 | return -EFAULT; |
| 231 | } | 232 | } |
| 233 | force_successful_syscall_return(); | ||
| 232 | return compat_jiffies_to_clock_t(jiffies); | 234 | return compat_jiffies_to_clock_t(jiffies); |
| 233 | } | 235 | } |
| 234 | 236 | ||
| @@ -894,8 +896,9 @@ asmlinkage long compat_sys_time(compat_time_t __user * tloc) | |||
| 894 | 896 | ||
| 895 | if (tloc) { | 897 | if (tloc) { |
| 896 | if (put_user(i,tloc)) | 898 | if (put_user(i,tloc)) |
| 897 | i = -EFAULT; | 899 | return -EFAULT; |
| 898 | } | 900 | } |
| 901 | force_successful_syscall_return(); | ||
| 899 | return i; | 902 | return i; |
| 900 | } | 903 | } |
| 901 | 904 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index 47fff3b63cbf..30e74dd6d01b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -269,8 +269,11 @@ out_release: | |||
| 269 | 269 | ||
| 270 | int __ref cpu_down(unsigned int cpu) | 270 | int __ref cpu_down(unsigned int cpu) |
| 271 | { | 271 | { |
| 272 | int err = 0; | 272 | int err; |
| 273 | 273 | ||
| 274 | err = stop_machine_create(); | ||
| 275 | if (err) | ||
| 276 | return err; | ||
| 274 | cpu_maps_update_begin(); | 277 | cpu_maps_update_begin(); |
| 275 | 278 | ||
| 276 | if (cpu_hotplug_disabled) { | 279 | if (cpu_hotplug_disabled) { |
| @@ -297,6 +300,7 @@ int __ref cpu_down(unsigned int cpu) | |||
| 297 | 300 | ||
| 298 | out: | 301 | out: |
| 299 | cpu_maps_update_done(); | 302 | cpu_maps_update_done(); |
| 303 | stop_machine_destroy(); | ||
| 300 | return err; | 304 | return err; |
| 301 | } | 305 | } |
| 302 | EXPORT_SYMBOL(cpu_down); | 306 | EXPORT_SYMBOL(cpu_down); |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 39c1a4c1c5a9..345ace5117de 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -240,6 +240,17 @@ static struct cpuset top_cpuset = { | |||
| 240 | static DEFINE_MUTEX(callback_mutex); | 240 | static DEFINE_MUTEX(callback_mutex); |
| 241 | 241 | ||
| 242 | /* | 242 | /* |
| 243 | * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist | ||
| 244 | * buffers. They are statically allocated to prevent using excess stack | ||
| 245 | * when calling cpuset_print_task_mems_allowed(). | ||
| 246 | */ | ||
| 247 | #define CPUSET_NAME_LEN (128) | ||
| 248 | #define CPUSET_NODELIST_LEN (256) | ||
| 249 | static char cpuset_name[CPUSET_NAME_LEN]; | ||
| 250 | static char cpuset_nodelist[CPUSET_NODELIST_LEN]; | ||
| 251 | static DEFINE_SPINLOCK(cpuset_buffer_lock); | ||
| 252 | |||
| 253 | /* | ||
| 243 | * This is ugly, but preserves the userspace API for existing cpuset | 254 | * This is ugly, but preserves the userspace API for existing cpuset |
| 244 | * users. If someone tries to mount the "cpuset" filesystem, we | 255 | * users. If someone tries to mount the "cpuset" filesystem, we |
| 245 | * silently switch it to mount "cgroup" instead | 256 | * silently switch it to mount "cgroup" instead |
| @@ -2356,6 +2367,29 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |||
| 2356 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); | 2367 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
| 2357 | } | 2368 | } |
| 2358 | 2369 | ||
| 2370 | /** | ||
| 2371 | * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed | ||
| 2372 | * @task: pointer to task_struct of some task. | ||
| 2373 | * | ||
| 2374 | * Description: Prints @task's name, cpuset name, and cached copy of its | ||
| 2375 | * mems_allowed to the kernel log. Must hold task_lock(task) to allow | ||
| 2376 | * dereferencing task_cs(task). | ||
| 2377 | */ | ||
| 2378 | void cpuset_print_task_mems_allowed(struct task_struct *tsk) | ||
| 2379 | { | ||
| 2380 | struct dentry *dentry; | ||
| 2381 | |||
| 2382 | dentry = task_cs(tsk)->css.cgroup->dentry; | ||
| 2383 | spin_lock(&cpuset_buffer_lock); | ||
| 2384 | snprintf(cpuset_name, CPUSET_NAME_LEN, | ||
| 2385 | dentry ? (const char *)dentry->d_name.name : "/"); | ||
| 2386 | nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, | ||
| 2387 | tsk->mems_allowed); | ||
| 2388 | printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", | ||
| 2389 | tsk->comm, cpuset_name, cpuset_nodelist); | ||
| 2390 | spin_unlock(&cpuset_buffer_lock); | ||
| 2391 | } | ||
| 2392 | |||
| 2359 | /* | 2393 | /* |
| 2360 | * Collection of memory_pressure is suppressed unless | 2394 | * Collection of memory_pressure is suppressed unless |
| 2361 | * this flag is enabled by writing "1" to the special | 2395 | * this flag is enabled by writing "1" to the special |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index f013a0c2e111..038707404b76 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c | |||
| @@ -109,20 +109,40 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |||
| 109 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | 109 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
| 110 | dma_addr_t *dma_handle, void **ret) | 110 | dma_addr_t *dma_handle, void **ret) |
| 111 | { | 111 | { |
| 112 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 112 | struct dma_coherent_mem *mem; |
| 113 | int order = get_order(size); | 113 | int order = get_order(size); |
| 114 | int pageno; | ||
| 114 | 115 | ||
| 115 | if (mem) { | 116 | if (!dev) |
| 116 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | 117 | return 0; |
| 117 | order); | 118 | mem = dev->dma_mem; |
| 118 | if (page >= 0) { | 119 | if (!mem) |
| 119 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | 120 | return 0; |
| 120 | *ret = mem->virt_base + (page << PAGE_SHIFT); | 121 | if (unlikely(size > mem->size)) |
| 121 | memset(*ret, 0, size); | 122 | return 0; |
| 122 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) | 123 | |
| 123 | *ret = NULL; | 124 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
| 125 | if (pageno >= 0) { | ||
| 126 | /* | ||
| 127 | * Memory was found in the per-device arena. | ||
| 128 | */ | ||
| 129 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | ||
| 130 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | ||
| 131 | memset(*ret, 0, size); | ||
| 132 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { | ||
| 133 | /* | ||
| 134 | * The per-device arena is exhausted and we are not | ||
| 135 | * permitted to fall back to generic memory. | ||
| 136 | */ | ||
| 137 | *ret = NULL; | ||
| 138 | } else { | ||
| 139 | /* | ||
| 140 | * The per-device arena is exhausted and we are | ||
| 141 | * permitted to fall back to generic memory. | ||
| 142 | */ | ||
| 143 | return 0; | ||
| 124 | } | 144 | } |
| 125 | return (mem != NULL); | 145 | return 1; |
| 126 | } | 146 | } |
| 127 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 147 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
| 128 | 148 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index c9e5a1c14e08..c7740fa3252c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -642,35 +642,31 @@ retry: | |||
| 642 | /* | 642 | /* |
| 643 | * We found no owner yet mm_users > 1: this implies that we are | 643 | * We found no owner yet mm_users > 1: this implies that we are |
| 644 | * most likely racing with swapoff (try_to_unuse()) or /proc or | 644 | * most likely racing with swapoff (try_to_unuse()) or /proc or |
| 645 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | 645 | * ptrace or page migration (get_task_mm()). Mark owner as NULL. |
| 646 | * so that subsystems can understand the callback and take action. | ||
| 647 | */ | 646 | */ |
| 648 | down_write(&mm->mmap_sem); | ||
| 649 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
| 650 | mm->owner = NULL; | 647 | mm->owner = NULL; |
| 651 | up_write(&mm->mmap_sem); | ||
| 652 | return; | 648 | return; |
| 653 | 649 | ||
| 654 | assign_new_owner: | 650 | assign_new_owner: |
| 655 | BUG_ON(c == p); | 651 | BUG_ON(c == p); |
| 656 | get_task_struct(c); | 652 | get_task_struct(c); |
| 657 | read_unlock(&tasklist_lock); | ||
| 658 | down_write(&mm->mmap_sem); | ||
| 659 | /* | 653 | /* |
| 660 | * The task_lock protects c->mm from changing. | 654 | * The task_lock protects c->mm from changing. |
| 661 | * We always want mm->owner->mm == mm | 655 | * We always want mm->owner->mm == mm |
| 662 | */ | 656 | */ |
| 663 | task_lock(c); | 657 | task_lock(c); |
| 658 | /* | ||
| 659 | * Delay read_unlock() till we have the task_lock() | ||
| 660 | * to ensure that c does not slip away underneath us | ||
| 661 | */ | ||
| 662 | read_unlock(&tasklist_lock); | ||
| 664 | if (c->mm != mm) { | 663 | if (c->mm != mm) { |
| 665 | task_unlock(c); | 664 | task_unlock(c); |
| 666 | up_write(&mm->mmap_sem); | ||
| 667 | put_task_struct(c); | 665 | put_task_struct(c); |
| 668 | goto retry; | 666 | goto retry; |
| 669 | } | 667 | } |
| 670 | cgroup_mm_owner_callbacks(mm->owner, c); | ||
| 671 | mm->owner = c; | 668 | mm->owner = c; |
| 672 | task_unlock(c); | 669 | task_unlock(c); |
| 673 | up_write(&mm->mmap_sem); | ||
| 674 | put_task_struct(c); | 670 | put_task_struct(c); |
| 675 | } | 671 | } |
| 676 | #endif /* CONFIG_MM_OWNER */ | 672 | #endif /* CONFIG_MM_OWNER */ |
| @@ -1055,10 +1051,7 @@ NORET_TYPE void do_exit(long code) | |||
| 1055 | preempt_count()); | 1051 | preempt_count()); |
| 1056 | 1052 | ||
| 1057 | acct_update_integrals(tsk); | 1053 | acct_update_integrals(tsk); |
| 1058 | if (tsk->mm) { | 1054 | |
| 1059 | update_hiwater_rss(tsk->mm); | ||
| 1060 | update_hiwater_vm(tsk->mm); | ||
| 1061 | } | ||
| 1062 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 1055 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
| 1063 | if (group_dead) { | 1056 | if (group_dead) { |
| 1064 | hrtimer_cancel(&tsk->signal->real_timer); | 1057 | hrtimer_cancel(&tsk->signal->real_timer); |
diff --git a/kernel/fork.c b/kernel/fork.c index 43cbf30669e6..7b8f2a78be3d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -400,6 +400,18 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); | |||
| 400 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) | 400 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) |
| 401 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) | 401 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) |
| 402 | 402 | ||
| 403 | static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; | ||
| 404 | |||
| 405 | static int __init coredump_filter_setup(char *s) | ||
| 406 | { | ||
| 407 | default_dump_filter = | ||
| 408 | (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & | ||
| 409 | MMF_DUMP_FILTER_MASK; | ||
| 410 | return 1; | ||
| 411 | } | ||
| 412 | |||
| 413 | __setup("coredump_filter=", coredump_filter_setup); | ||
| 414 | |||
| 403 | #include <linux/init_task.h> | 415 | #include <linux/init_task.h> |
| 404 | 416 | ||
| 405 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | 417 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) |
| @@ -408,8 +420,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
| 408 | atomic_set(&mm->mm_count, 1); | 420 | atomic_set(&mm->mm_count, 1); |
| 409 | init_rwsem(&mm->mmap_sem); | 421 | init_rwsem(&mm->mmap_sem); |
| 410 | INIT_LIST_HEAD(&mm->mmlist); | 422 | INIT_LIST_HEAD(&mm->mmlist); |
| 411 | mm->flags = (current->mm) ? current->mm->flags | 423 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; |
| 412 | : MMF_DUMP_FILTER_DEFAULT; | ||
| 413 | mm->core_state = NULL; | 424 | mm->core_state = NULL; |
| 414 | mm->nr_ptes = 0; | 425 | mm->nr_ptes = 0; |
| 415 | set_mm_counter(mm, file_rss, 0); | 426 | set_mm_counter(mm, file_rss, 0); |
| @@ -758,7 +769,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) | |||
| 758 | { | 769 | { |
| 759 | struct sighand_struct *sig; | 770 | struct sighand_struct *sig; |
| 760 | 771 | ||
| 761 | if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { | 772 | if (clone_flags & CLONE_SIGHAND) { |
| 762 | atomic_inc(¤t->sighand->count); | 773 | atomic_inc(¤t->sighand->count); |
| 763 | return 0; | 774 | return 0; |
| 764 | } | 775 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index 7c6cbabe52b3..002aa189eb09 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -170,8 +170,11 @@ static void get_futex_key_refs(union futex_key *key) | |||
| 170 | */ | 170 | */ |
| 171 | static void drop_futex_key_refs(union futex_key *key) | 171 | static void drop_futex_key_refs(union futex_key *key) |
| 172 | { | 172 | { |
| 173 | if (!key->both.ptr) | 173 | if (!key->both.ptr) { |
| 174 | /* If we're here then we tried to put a key we failed to get */ | ||
| 175 | WARN_ON_ONCE(1); | ||
| 174 | return; | 176 | return; |
| 177 | } | ||
| 175 | 178 | ||
| 176 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { | 179 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
| 177 | case FUT_OFF_INODE: | 180 | case FUT_OFF_INODE: |
| @@ -730,8 +733,8 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
| 730 | } | 733 | } |
| 731 | 734 | ||
| 732 | spin_unlock(&hb->lock); | 735 | spin_unlock(&hb->lock); |
| 733 | out: | ||
| 734 | put_futex_key(fshared, &key); | 736 | put_futex_key(fshared, &key); |
| 737 | out: | ||
| 735 | return ret; | 738 | return ret; |
| 736 | } | 739 | } |
| 737 | 740 | ||
| @@ -755,7 +758,7 @@ retryfull: | |||
| 755 | goto out; | 758 | goto out; |
| 756 | ret = get_futex_key(uaddr2, fshared, &key2); | 759 | ret = get_futex_key(uaddr2, fshared, &key2); |
| 757 | if (unlikely(ret != 0)) | 760 | if (unlikely(ret != 0)) |
| 758 | goto out; | 761 | goto out_put_key1; |
| 759 | 762 | ||
| 760 | hb1 = hash_futex(&key1); | 763 | hb1 = hash_futex(&key1); |
| 761 | hb2 = hash_futex(&key2); | 764 | hb2 = hash_futex(&key2); |
| @@ -777,12 +780,12 @@ retry: | |||
| 777 | * but we might get them from range checking | 780 | * but we might get them from range checking |
| 778 | */ | 781 | */ |
| 779 | ret = op_ret; | 782 | ret = op_ret; |
| 780 | goto out; | 783 | goto out_put_keys; |
| 781 | #endif | 784 | #endif |
| 782 | 785 | ||
| 783 | if (unlikely(op_ret != -EFAULT)) { | 786 | if (unlikely(op_ret != -EFAULT)) { |
| 784 | ret = op_ret; | 787 | ret = op_ret; |
| 785 | goto out; | 788 | goto out_put_keys; |
| 786 | } | 789 | } |
| 787 | 790 | ||
| 788 | /* | 791 | /* |
| @@ -796,7 +799,7 @@ retry: | |||
| 796 | ret = futex_handle_fault((unsigned long)uaddr2, | 799 | ret = futex_handle_fault((unsigned long)uaddr2, |
| 797 | attempt); | 800 | attempt); |
| 798 | if (ret) | 801 | if (ret) |
| 799 | goto out; | 802 | goto out_put_keys; |
| 800 | goto retry; | 803 | goto retry; |
| 801 | } | 804 | } |
| 802 | 805 | ||
| @@ -834,10 +837,11 @@ retry: | |||
| 834 | spin_unlock(&hb1->lock); | 837 | spin_unlock(&hb1->lock); |
| 835 | if (hb1 != hb2) | 838 | if (hb1 != hb2) |
| 836 | spin_unlock(&hb2->lock); | 839 | spin_unlock(&hb2->lock); |
| 837 | out: | 840 | out_put_keys: |
| 838 | put_futex_key(fshared, &key2); | 841 | put_futex_key(fshared, &key2); |
| 842 | out_put_key1: | ||
| 839 | put_futex_key(fshared, &key1); | 843 | put_futex_key(fshared, &key1); |
| 840 | 844 | out: | |
| 841 | return ret; | 845 | return ret; |
| 842 | } | 846 | } |
| 843 | 847 | ||
| @@ -854,13 +858,13 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
| 854 | struct futex_q *this, *next; | 858 | struct futex_q *this, *next; |
| 855 | int ret, drop_count = 0; | 859 | int ret, drop_count = 0; |
| 856 | 860 | ||
| 857 | retry: | 861 | retry: |
| 858 | ret = get_futex_key(uaddr1, fshared, &key1); | 862 | ret = get_futex_key(uaddr1, fshared, &key1); |
| 859 | if (unlikely(ret != 0)) | 863 | if (unlikely(ret != 0)) |
| 860 | goto out; | 864 | goto out; |
| 861 | ret = get_futex_key(uaddr2, fshared, &key2); | 865 | ret = get_futex_key(uaddr2, fshared, &key2); |
| 862 | if (unlikely(ret != 0)) | 866 | if (unlikely(ret != 0)) |
| 863 | goto out; | 867 | goto out_put_key1; |
| 864 | 868 | ||
| 865 | hb1 = hash_futex(&key1); | 869 | hb1 = hash_futex(&key1); |
| 866 | hb2 = hash_futex(&key2); | 870 | hb2 = hash_futex(&key2); |
| @@ -882,7 +886,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
| 882 | if (!ret) | 886 | if (!ret) |
| 883 | goto retry; | 887 | goto retry; |
| 884 | 888 | ||
| 885 | return ret; | 889 | goto out_put_keys; |
| 886 | } | 890 | } |
| 887 | if (curval != *cmpval) { | 891 | if (curval != *cmpval) { |
| 888 | ret = -EAGAIN; | 892 | ret = -EAGAIN; |
| @@ -927,9 +931,11 @@ out_unlock: | |||
| 927 | while (--drop_count >= 0) | 931 | while (--drop_count >= 0) |
| 928 | drop_futex_key_refs(&key1); | 932 | drop_futex_key_refs(&key1); |
| 929 | 933 | ||
| 930 | out: | 934 | out_put_keys: |
| 931 | put_futex_key(fshared, &key2); | 935 | put_futex_key(fshared, &key2); |
| 936 | out_put_key1: | ||
| 932 | put_futex_key(fshared, &key1); | 937 | put_futex_key(fshared, &key1); |
| 938 | out: | ||
| 933 | return ret; | 939 | return ret; |
| 934 | } | 940 | } |
| 935 | 941 | ||
| @@ -990,7 +996,7 @@ static int unqueue_me(struct futex_q *q) | |||
| 990 | int ret = 0; | 996 | int ret = 0; |
| 991 | 997 | ||
| 992 | /* In the common case we don't take the spinlock, which is nice. */ | 998 | /* In the common case we don't take the spinlock, which is nice. */ |
| 993 | retry: | 999 | retry: |
| 994 | lock_ptr = q->lock_ptr; | 1000 | lock_ptr = q->lock_ptr; |
| 995 | barrier(); | 1001 | barrier(); |
| 996 | if (lock_ptr != NULL) { | 1002 | if (lock_ptr != NULL) { |
| @@ -1172,11 +1178,11 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
| 1172 | 1178 | ||
| 1173 | q.pi_state = NULL; | 1179 | q.pi_state = NULL; |
| 1174 | q.bitset = bitset; | 1180 | q.bitset = bitset; |
| 1175 | retry: | 1181 | retry: |
| 1176 | q.key = FUTEX_KEY_INIT; | 1182 | q.key = FUTEX_KEY_INIT; |
| 1177 | ret = get_futex_key(uaddr, fshared, &q.key); | 1183 | ret = get_futex_key(uaddr, fshared, &q.key); |
| 1178 | if (unlikely(ret != 0)) | 1184 | if (unlikely(ret != 0)) |
| 1179 | goto out_release_sem; | 1185 | goto out; |
| 1180 | 1186 | ||
| 1181 | hb = queue_lock(&q); | 1187 | hb = queue_lock(&q); |
| 1182 | 1188 | ||
| @@ -1204,6 +1210,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
| 1204 | 1210 | ||
| 1205 | if (unlikely(ret)) { | 1211 | if (unlikely(ret)) { |
| 1206 | queue_unlock(&q, hb); | 1212 | queue_unlock(&q, hb); |
| 1213 | put_futex_key(fshared, &q.key); | ||
| 1207 | 1214 | ||
| 1208 | ret = get_user(uval, uaddr); | 1215 | ret = get_user(uval, uaddr); |
| 1209 | 1216 | ||
| @@ -1213,7 +1220,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
| 1213 | } | 1220 | } |
| 1214 | ret = -EWOULDBLOCK; | 1221 | ret = -EWOULDBLOCK; |
| 1215 | if (uval != val) | 1222 | if (uval != val) |
| 1216 | goto out_unlock_release_sem; | 1223 | goto out_unlock_put_key; |
| 1217 | 1224 | ||
| 1218 | /* Only actually queue if *uaddr contained val. */ | 1225 | /* Only actually queue if *uaddr contained val. */ |
| 1219 | queue_me(&q, hb); | 1226 | queue_me(&q, hb); |
| @@ -1305,11 +1312,11 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
| 1305 | return -ERESTART_RESTARTBLOCK; | 1312 | return -ERESTART_RESTARTBLOCK; |
| 1306 | } | 1313 | } |
| 1307 | 1314 | ||
| 1308 | out_unlock_release_sem: | 1315 | out_unlock_put_key: |
| 1309 | queue_unlock(&q, hb); | 1316 | queue_unlock(&q, hb); |
| 1310 | |||
| 1311 | out_release_sem: | ||
| 1312 | put_futex_key(fshared, &q.key); | 1317 | put_futex_key(fshared, &q.key); |
| 1318 | |||
| 1319 | out: | ||
| 1313 | return ret; | 1320 | return ret; |
| 1314 | } | 1321 | } |
| 1315 | 1322 | ||
| @@ -1358,16 +1365,16 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1358 | } | 1365 | } |
| 1359 | 1366 | ||
| 1360 | q.pi_state = NULL; | 1367 | q.pi_state = NULL; |
| 1361 | retry: | 1368 | retry: |
| 1362 | q.key = FUTEX_KEY_INIT; | 1369 | q.key = FUTEX_KEY_INIT; |
| 1363 | ret = get_futex_key(uaddr, fshared, &q.key); | 1370 | ret = get_futex_key(uaddr, fshared, &q.key); |
| 1364 | if (unlikely(ret != 0)) | 1371 | if (unlikely(ret != 0)) |
| 1365 | goto out_release_sem; | 1372 | goto out; |
| 1366 | 1373 | ||
| 1367 | retry_unlocked: | 1374 | retry_unlocked: |
| 1368 | hb = queue_lock(&q); | 1375 | hb = queue_lock(&q); |
| 1369 | 1376 | ||
| 1370 | retry_locked: | 1377 | retry_locked: |
| 1371 | ret = lock_taken = 0; | 1378 | ret = lock_taken = 0; |
| 1372 | 1379 | ||
| 1373 | /* | 1380 | /* |
| @@ -1388,14 +1395,14 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1388 | */ | 1395 | */ |
| 1389 | if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { | 1396 | if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { |
| 1390 | ret = -EDEADLK; | 1397 | ret = -EDEADLK; |
| 1391 | goto out_unlock_release_sem; | 1398 | goto out_unlock_put_key; |
| 1392 | } | 1399 | } |
| 1393 | 1400 | ||
| 1394 | /* | 1401 | /* |
| 1395 | * Surprise - we got the lock. Just return to userspace: | 1402 | * Surprise - we got the lock. Just return to userspace: |
| 1396 | */ | 1403 | */ |
| 1397 | if (unlikely(!curval)) | 1404 | if (unlikely(!curval)) |
| 1398 | goto out_unlock_release_sem; | 1405 | goto out_unlock_put_key; |
| 1399 | 1406 | ||
| 1400 | uval = curval; | 1407 | uval = curval; |
| 1401 | 1408 | ||
| @@ -1431,7 +1438,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1431 | * We took the lock due to owner died take over. | 1438 | * We took the lock due to owner died take over. |
| 1432 | */ | 1439 | */ |
| 1433 | if (unlikely(lock_taken)) | 1440 | if (unlikely(lock_taken)) |
| 1434 | goto out_unlock_release_sem; | 1441 | goto out_unlock_put_key; |
| 1435 | 1442 | ||
| 1436 | /* | 1443 | /* |
| 1437 | * We dont have the lock. Look up the PI state (or create it if | 1444 | * We dont have the lock. Look up the PI state (or create it if |
| @@ -1470,7 +1477,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1470 | goto retry_locked; | 1477 | goto retry_locked; |
| 1471 | } | 1478 | } |
| 1472 | default: | 1479 | default: |
| 1473 | goto out_unlock_release_sem; | 1480 | goto out_unlock_put_key; |
| 1474 | } | 1481 | } |
| 1475 | } | 1482 | } |
| 1476 | 1483 | ||
| @@ -1561,16 +1568,17 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1561 | destroy_hrtimer_on_stack(&to->timer); | 1568 | destroy_hrtimer_on_stack(&to->timer); |
| 1562 | return ret != -EINTR ? ret : -ERESTARTNOINTR; | 1569 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
| 1563 | 1570 | ||
| 1564 | out_unlock_release_sem: | 1571 | out_unlock_put_key: |
| 1565 | queue_unlock(&q, hb); | 1572 | queue_unlock(&q, hb); |
| 1566 | 1573 | ||
| 1567 | out_release_sem: | 1574 | out_put_key: |
| 1568 | put_futex_key(fshared, &q.key); | 1575 | put_futex_key(fshared, &q.key); |
| 1576 | out: | ||
| 1569 | if (to) | 1577 | if (to) |
| 1570 | destroy_hrtimer_on_stack(&to->timer); | 1578 | destroy_hrtimer_on_stack(&to->timer); |
| 1571 | return ret; | 1579 | return ret; |
| 1572 | 1580 | ||
| 1573 | uaddr_faulted: | 1581 | uaddr_faulted: |
| 1574 | /* | 1582 | /* |
| 1575 | * We have to r/w *(int __user *)uaddr, and we have to modify it | 1583 | * We have to r/w *(int __user *)uaddr, and we have to modify it |
| 1576 | * atomically. Therefore, if we continue to fault after get_user() | 1584 | * atomically. Therefore, if we continue to fault after get_user() |
| @@ -1583,7 +1591,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1583 | if (attempt++) { | 1591 | if (attempt++) { |
| 1584 | ret = futex_handle_fault((unsigned long)uaddr, attempt); | 1592 | ret = futex_handle_fault((unsigned long)uaddr, attempt); |
| 1585 | if (ret) | 1593 | if (ret) |
| 1586 | goto out_release_sem; | 1594 | goto out_put_key; |
| 1587 | goto retry_unlocked; | 1595 | goto retry_unlocked; |
| 1588 | } | 1596 | } |
| 1589 | 1597 | ||
| @@ -1675,9 +1683,9 @@ retry_unlocked: | |||
| 1675 | 1683 | ||
| 1676 | out_unlock: | 1684 | out_unlock: |
| 1677 | spin_unlock(&hb->lock); | 1685 | spin_unlock(&hb->lock); |
| 1678 | out: | ||
| 1679 | put_futex_key(fshared, &key); | 1686 | put_futex_key(fshared, &key); |
| 1680 | 1687 | ||
| 1688 | out: | ||
| 1681 | return ret; | 1689 | return ret; |
| 1682 | 1690 | ||
| 1683 | pi_faulted: | 1691 | pi_faulted: |
diff --git a/kernel/kmod.c b/kernel/kmod.c index b46dbb908669..a27a5f64443d 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -51,8 +51,8 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; | |||
| 51 | 51 | ||
| 52 | /** | 52 | /** |
| 53 | * request_module - try to load a kernel module | 53 | * request_module - try to load a kernel module |
| 54 | * @fmt: printf style format string for the name of the module | 54 | * @fmt: printf style format string for the name of the module |
| 55 | * @varargs: arguements as specified in the format string | 55 | * @...: arguments as specified in the format string |
| 56 | * | 56 | * |
| 57 | * Load a module using the user mode module loader. The function returns | 57 | * Load a module using the user mode module loader. The function returns |
| 58 | * zero on success or a negative errno code on failure. Note that a | 58 | * zero on success or a negative errno code on failure. Note that a |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9f8a3f25259a..1b9cbdc0127a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -69,7 +69,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
| 69 | /* NOTE: change this value only with kprobe_mutex held */ | 69 | /* NOTE: change this value only with kprobe_mutex held */ |
| 70 | static bool kprobe_enabled; | 70 | static bool kprobe_enabled; |
| 71 | 71 | ||
| 72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 72 | static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
| 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
| 74 | static struct { | 74 | static struct { |
| 75 | spinlock_t lock ____cacheline_aligned_in_smp; | 75 | spinlock_t lock ____cacheline_aligned_in_smp; |
| @@ -115,6 +115,7 @@ enum kprobe_slot_state { | |||
| 115 | SLOT_USED = 2, | 115 | SLOT_USED = 2, |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ | ||
| 118 | static struct hlist_head kprobe_insn_pages; | 119 | static struct hlist_head kprobe_insn_pages; |
| 119 | static int kprobe_garbage_slots; | 120 | static int kprobe_garbage_slots; |
| 120 | static int collect_garbage_slots(void); | 121 | static int collect_garbage_slots(void); |
| @@ -144,10 +145,10 @@ loop_end: | |||
| 144 | } | 145 | } |
| 145 | 146 | ||
| 146 | /** | 147 | /** |
| 147 | * get_insn_slot() - Find a slot on an executable page for an instruction. | 148 | * __get_insn_slot() - Find a slot on an executable page for an instruction. |
| 148 | * We allocate an executable page if there's no room on existing ones. | 149 | * We allocate an executable page if there's no room on existing ones. |
| 149 | */ | 150 | */ |
| 150 | kprobe_opcode_t __kprobes *get_insn_slot(void) | 151 | static kprobe_opcode_t __kprobes *__get_insn_slot(void) |
| 151 | { | 152 | { |
| 152 | struct kprobe_insn_page *kip; | 153 | struct kprobe_insn_page *kip; |
| 153 | struct hlist_node *pos; | 154 | struct hlist_node *pos; |
| @@ -196,6 +197,15 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) | |||
| 196 | return kip->insns; | 197 | return kip->insns; |
| 197 | } | 198 | } |
| 198 | 199 | ||
| 200 | kprobe_opcode_t __kprobes *get_insn_slot(void) | ||
| 201 | { | ||
| 202 | kprobe_opcode_t *ret; | ||
| 203 | mutex_lock(&kprobe_insn_mutex); | ||
| 204 | ret = __get_insn_slot(); | ||
| 205 | mutex_unlock(&kprobe_insn_mutex); | ||
| 206 | return ret; | ||
| 207 | } | ||
| 208 | |||
| 199 | /* Return 1 if all garbages are collected, otherwise 0. */ | 209 | /* Return 1 if all garbages are collected, otherwise 0. */ |
| 200 | static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | 210 | static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) |
| 201 | { | 211 | { |
| @@ -226,9 +236,13 @@ static int __kprobes collect_garbage_slots(void) | |||
| 226 | { | 236 | { |
| 227 | struct kprobe_insn_page *kip; | 237 | struct kprobe_insn_page *kip; |
| 228 | struct hlist_node *pos, *next; | 238 | struct hlist_node *pos, *next; |
| 239 | int safety; | ||
| 229 | 240 | ||
| 230 | /* Ensure no-one is preepmted on the garbages */ | 241 | /* Ensure no-one is preepmted on the garbages */ |
| 231 | if (check_safety() != 0) | 242 | mutex_unlock(&kprobe_insn_mutex); |
| 243 | safety = check_safety(); | ||
| 244 | mutex_lock(&kprobe_insn_mutex); | ||
| 245 | if (safety != 0) | ||
| 232 | return -EAGAIN; | 246 | return -EAGAIN; |
| 233 | 247 | ||
| 234 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { | 248 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { |
| @@ -251,6 +265,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | |||
| 251 | struct kprobe_insn_page *kip; | 265 | struct kprobe_insn_page *kip; |
| 252 | struct hlist_node *pos; | 266 | struct hlist_node *pos; |
| 253 | 267 | ||
| 268 | mutex_lock(&kprobe_insn_mutex); | ||
| 254 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { | 269 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { |
| 255 | if (kip->insns <= slot && | 270 | if (kip->insns <= slot && |
| 256 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { | 271 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { |
| @@ -267,6 +282,8 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | |||
| 267 | 282 | ||
| 268 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) | 283 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) |
| 269 | collect_garbage_slots(); | 284 | collect_garbage_slots(); |
| 285 | |||
| 286 | mutex_unlock(&kprobe_insn_mutex); | ||
| 270 | } | 287 | } |
| 271 | #endif | 288 | #endif |
| 272 | 289 | ||
| @@ -310,7 +327,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 310 | struct kprobe *kp; | 327 | struct kprobe *kp; |
| 311 | 328 | ||
| 312 | list_for_each_entry_rcu(kp, &p->list, list) { | 329 | list_for_each_entry_rcu(kp, &p->list, list) { |
| 313 | if (kp->pre_handler) { | 330 | if (kp->pre_handler && !kprobe_gone(kp)) { |
| 314 | set_kprobe_instance(kp); | 331 | set_kprobe_instance(kp); |
| 315 | if (kp->pre_handler(kp, regs)) | 332 | if (kp->pre_handler(kp, regs)) |
| 316 | return 1; | 333 | return 1; |
| @@ -326,7 +343,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
| 326 | struct kprobe *kp; | 343 | struct kprobe *kp; |
| 327 | 344 | ||
| 328 | list_for_each_entry_rcu(kp, &p->list, list) { | 345 | list_for_each_entry_rcu(kp, &p->list, list) { |
| 329 | if (kp->post_handler) { | 346 | if (kp->post_handler && !kprobe_gone(kp)) { |
| 330 | set_kprobe_instance(kp); | 347 | set_kprobe_instance(kp); |
| 331 | kp->post_handler(kp, regs, flags); | 348 | kp->post_handler(kp, regs, flags); |
| 332 | reset_kprobe_instance(); | 349 | reset_kprobe_instance(); |
| @@ -393,7 +410,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
| 393 | hlist_add_head(&ri->hlist, head); | 410 | hlist_add_head(&ri->hlist, head); |
| 394 | } | 411 | } |
| 395 | 412 | ||
| 396 | void kretprobe_hash_lock(struct task_struct *tsk, | 413 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, |
| 397 | struct hlist_head **head, unsigned long *flags) | 414 | struct hlist_head **head, unsigned long *flags) |
| 398 | { | 415 | { |
| 399 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 416 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| @@ -404,13 +421,15 @@ void kretprobe_hash_lock(struct task_struct *tsk, | |||
| 404 | spin_lock_irqsave(hlist_lock, *flags); | 421 | spin_lock_irqsave(hlist_lock, *flags); |
| 405 | } | 422 | } |
| 406 | 423 | ||
| 407 | static void kretprobe_table_lock(unsigned long hash, unsigned long *flags) | 424 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
| 425 | unsigned long *flags) | ||
| 408 | { | 426 | { |
| 409 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 427 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 410 | spin_lock_irqsave(hlist_lock, *flags); | 428 | spin_lock_irqsave(hlist_lock, *flags); |
| 411 | } | 429 | } |
| 412 | 430 | ||
| 413 | void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) | 431 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
| 432 | unsigned long *flags) | ||
| 414 | { | 433 | { |
| 415 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 434 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 416 | spinlock_t *hlist_lock; | 435 | spinlock_t *hlist_lock; |
| @@ -419,7 +438,7 @@ void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) | |||
| 419 | spin_unlock_irqrestore(hlist_lock, *flags); | 438 | spin_unlock_irqrestore(hlist_lock, *flags); |
| 420 | } | 439 | } |
| 421 | 440 | ||
| 422 | void kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | 441 | void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) |
| 423 | { | 442 | { |
| 424 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 443 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 425 | spin_unlock_irqrestore(hlist_lock, *flags); | 444 | spin_unlock_irqrestore(hlist_lock, *flags); |
| @@ -526,9 +545,10 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
| 526 | ap->addr = p->addr; | 545 | ap->addr = p->addr; |
| 527 | ap->pre_handler = aggr_pre_handler; | 546 | ap->pre_handler = aggr_pre_handler; |
| 528 | ap->fault_handler = aggr_fault_handler; | 547 | ap->fault_handler = aggr_fault_handler; |
| 529 | if (p->post_handler) | 548 | /* We don't care the kprobe which has gone. */ |
| 549 | if (p->post_handler && !kprobe_gone(p)) | ||
| 530 | ap->post_handler = aggr_post_handler; | 550 | ap->post_handler = aggr_post_handler; |
| 531 | if (p->break_handler) | 551 | if (p->break_handler && !kprobe_gone(p)) |
| 532 | ap->break_handler = aggr_break_handler; | 552 | ap->break_handler = aggr_break_handler; |
| 533 | 553 | ||
| 534 | INIT_LIST_HEAD(&ap->list); | 554 | INIT_LIST_HEAD(&ap->list); |
| @@ -547,17 +567,41 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
| 547 | int ret = 0; | 567 | int ret = 0; |
| 548 | struct kprobe *ap; | 568 | struct kprobe *ap; |
| 549 | 569 | ||
| 570 | if (kprobe_gone(old_p)) { | ||
| 571 | /* | ||
| 572 | * Attempting to insert new probe at the same location that | ||
| 573 | * had a probe in the module vaddr area which already | ||
| 574 | * freed. So, the instruction slot has already been | ||
| 575 | * released. We need a new slot for the new probe. | ||
| 576 | */ | ||
| 577 | ret = arch_prepare_kprobe(old_p); | ||
| 578 | if (ret) | ||
| 579 | return ret; | ||
| 580 | } | ||
| 550 | if (old_p->pre_handler == aggr_pre_handler) { | 581 | if (old_p->pre_handler == aggr_pre_handler) { |
| 551 | copy_kprobe(old_p, p); | 582 | copy_kprobe(old_p, p); |
| 552 | ret = add_new_kprobe(old_p, p); | 583 | ret = add_new_kprobe(old_p, p); |
| 584 | ap = old_p; | ||
| 553 | } else { | 585 | } else { |
| 554 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); | 586 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
| 555 | if (!ap) | 587 | if (!ap) { |
| 588 | if (kprobe_gone(old_p)) | ||
| 589 | arch_remove_kprobe(old_p); | ||
| 556 | return -ENOMEM; | 590 | return -ENOMEM; |
| 591 | } | ||
| 557 | add_aggr_kprobe(ap, old_p); | 592 | add_aggr_kprobe(ap, old_p); |
| 558 | copy_kprobe(ap, p); | 593 | copy_kprobe(ap, p); |
| 559 | ret = add_new_kprobe(ap, p); | 594 | ret = add_new_kprobe(ap, p); |
| 560 | } | 595 | } |
| 596 | if (kprobe_gone(old_p)) { | ||
| 597 | /* | ||
| 598 | * If the old_p has gone, its breakpoint has been disarmed. | ||
| 599 | * We have to arm it again after preparing real kprobes. | ||
| 600 | */ | ||
| 601 | ap->flags &= ~KPROBE_FLAG_GONE; | ||
| 602 | if (kprobe_enabled) | ||
| 603 | arch_arm_kprobe(ap); | ||
| 604 | } | ||
| 561 | return ret; | 605 | return ret; |
| 562 | } | 606 | } |
| 563 | 607 | ||
| @@ -600,8 +644,7 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) | |||
| 600 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); | 644 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); |
| 601 | } | 645 | } |
| 602 | 646 | ||
| 603 | static int __kprobes __register_kprobe(struct kprobe *p, | 647 | int __kprobes register_kprobe(struct kprobe *p) |
| 604 | unsigned long called_from) | ||
| 605 | { | 648 | { |
| 606 | int ret = 0; | 649 | int ret = 0; |
| 607 | struct kprobe *old_p; | 650 | struct kprobe *old_p; |
| @@ -620,28 +663,30 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
| 620 | return -EINVAL; | 663 | return -EINVAL; |
| 621 | } | 664 | } |
| 622 | 665 | ||
| 623 | p->mod_refcounted = 0; | 666 | p->flags = 0; |
| 624 | |||
| 625 | /* | 667 | /* |
| 626 | * Check if are we probing a module. | 668 | * Check if are we probing a module. |
| 627 | */ | 669 | */ |
| 628 | probed_mod = __module_text_address((unsigned long) p->addr); | 670 | probed_mod = __module_text_address((unsigned long) p->addr); |
| 629 | if (probed_mod) { | 671 | if (probed_mod) { |
| 630 | struct module *calling_mod; | ||
| 631 | calling_mod = __module_text_address(called_from); | ||
| 632 | /* | 672 | /* |
| 633 | * We must allow modules to probe themself and in this case | 673 | * We must hold a refcount of the probed module while updating |
| 634 | * avoid incrementing the module refcount, so as to allow | 674 | * its code to prohibit unexpected unloading. |
| 635 | * unloading of self probing modules. | ||
| 636 | */ | 675 | */ |
| 637 | if (calling_mod && calling_mod != probed_mod) { | 676 | if (unlikely(!try_module_get(probed_mod))) { |
| 638 | if (unlikely(!try_module_get(probed_mod))) { | 677 | preempt_enable(); |
| 639 | preempt_enable(); | 678 | return -EINVAL; |
| 640 | return -EINVAL; | 679 | } |
| 641 | } | 680 | /* |
| 642 | p->mod_refcounted = 1; | 681 | * If the module freed .init.text, we couldn't insert |
| 643 | } else | 682 | * kprobes in there. |
| 644 | probed_mod = NULL; | 683 | */ |
| 684 | if (within_module_init((unsigned long)p->addr, probed_mod) && | ||
| 685 | probed_mod->state != MODULE_STATE_COMING) { | ||
| 686 | module_put(probed_mod); | ||
| 687 | preempt_enable(); | ||
| 688 | return -EINVAL; | ||
| 689 | } | ||
| 645 | } | 690 | } |
| 646 | preempt_enable(); | 691 | preempt_enable(); |
| 647 | 692 | ||
| @@ -668,8 +713,9 @@ static int __kprobes __register_kprobe(struct kprobe *p, | |||
| 668 | out: | 713 | out: |
| 669 | mutex_unlock(&kprobe_mutex); | 714 | mutex_unlock(&kprobe_mutex); |
| 670 | 715 | ||
| 671 | if (ret && probed_mod) | 716 | if (probed_mod) |
| 672 | module_put(probed_mod); | 717 | module_put(probed_mod); |
| 718 | |||
| 673 | return ret; | 719 | return ret; |
| 674 | } | 720 | } |
| 675 | 721 | ||
| @@ -697,16 +743,16 @@ valid_p: | |||
| 697 | list_is_singular(&old_p->list))) { | 743 | list_is_singular(&old_p->list))) { |
| 698 | /* | 744 | /* |
| 699 | * Only probe on the hash list. Disarm only if kprobes are | 745 | * Only probe on the hash list. Disarm only if kprobes are |
| 700 | * enabled - otherwise, the breakpoint would already have | 746 | * enabled and not gone - otherwise, the breakpoint would |
| 701 | * been removed. We save on flushing icache. | 747 | * already have been removed. We save on flushing icache. |
| 702 | */ | 748 | */ |
| 703 | if (kprobe_enabled) | 749 | if (kprobe_enabled && !kprobe_gone(old_p)) |
| 704 | arch_disarm_kprobe(p); | 750 | arch_disarm_kprobe(p); |
| 705 | hlist_del_rcu(&old_p->hlist); | 751 | hlist_del_rcu(&old_p->hlist); |
| 706 | } else { | 752 | } else { |
| 707 | if (p->break_handler) | 753 | if (p->break_handler && !kprobe_gone(p)) |
| 708 | old_p->break_handler = NULL; | 754 | old_p->break_handler = NULL; |
| 709 | if (p->post_handler) { | 755 | if (p->post_handler && !kprobe_gone(p)) { |
| 710 | list_for_each_entry_rcu(list_p, &old_p->list, list) { | 756 | list_for_each_entry_rcu(list_p, &old_p->list, list) { |
| 711 | if ((list_p != p) && (list_p->post_handler)) | 757 | if ((list_p != p) && (list_p->post_handler)) |
| 712 | goto noclean; | 758 | goto noclean; |
| @@ -721,39 +767,27 @@ noclean: | |||
| 721 | 767 | ||
| 722 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | 768 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) |
| 723 | { | 769 | { |
| 724 | struct module *mod; | ||
| 725 | struct kprobe *old_p; | 770 | struct kprobe *old_p; |
| 726 | 771 | ||
| 727 | if (p->mod_refcounted) { | 772 | if (list_empty(&p->list)) |
| 728 | /* | ||
| 729 | * Since we've already incremented refcount, | ||
| 730 | * we don't need to disable preemption. | ||
| 731 | */ | ||
| 732 | mod = module_text_address((unsigned long)p->addr); | ||
| 733 | if (mod) | ||
| 734 | module_put(mod); | ||
| 735 | } | ||
| 736 | |||
| 737 | if (list_empty(&p->list) || list_is_singular(&p->list)) { | ||
| 738 | if (!list_empty(&p->list)) { | ||
| 739 | /* "p" is the last child of an aggr_kprobe */ | ||
| 740 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
| 741 | list_del(&p->list); | ||
| 742 | kfree(old_p); | ||
| 743 | } | ||
| 744 | arch_remove_kprobe(p); | 773 | arch_remove_kprobe(p); |
| 774 | else if (list_is_singular(&p->list)) { | ||
| 775 | /* "p" is the last child of an aggr_kprobe */ | ||
| 776 | old_p = list_entry(p->list.next, struct kprobe, list); | ||
| 777 | list_del(&p->list); | ||
| 778 | arch_remove_kprobe(old_p); | ||
| 779 | kfree(old_p); | ||
| 745 | } | 780 | } |
| 746 | } | 781 | } |
| 747 | 782 | ||
| 748 | static int __register_kprobes(struct kprobe **kps, int num, | 783 | int __kprobes register_kprobes(struct kprobe **kps, int num) |
| 749 | unsigned long called_from) | ||
| 750 | { | 784 | { |
| 751 | int i, ret = 0; | 785 | int i, ret = 0; |
| 752 | 786 | ||
| 753 | if (num <= 0) | 787 | if (num <= 0) |
| 754 | return -EINVAL; | 788 | return -EINVAL; |
| 755 | for (i = 0; i < num; i++) { | 789 | for (i = 0; i < num; i++) { |
| 756 | ret = __register_kprobe(kps[i], called_from); | 790 | ret = register_kprobe(kps[i]); |
| 757 | if (ret < 0) { | 791 | if (ret < 0) { |
| 758 | if (i > 0) | 792 | if (i > 0) |
| 759 | unregister_kprobes(kps, i); | 793 | unregister_kprobes(kps, i); |
| @@ -763,26 +797,11 @@ static int __register_kprobes(struct kprobe **kps, int num, | |||
| 763 | return ret; | 797 | return ret; |
| 764 | } | 798 | } |
| 765 | 799 | ||
| 766 | /* | ||
| 767 | * Registration and unregistration functions for kprobe. | ||
| 768 | */ | ||
| 769 | int __kprobes register_kprobe(struct kprobe *p) | ||
| 770 | { | ||
| 771 | return __register_kprobes(&p, 1, | ||
| 772 | (unsigned long)__builtin_return_address(0)); | ||
| 773 | } | ||
| 774 | |||
| 775 | void __kprobes unregister_kprobe(struct kprobe *p) | 800 | void __kprobes unregister_kprobe(struct kprobe *p) |
| 776 | { | 801 | { |
| 777 | unregister_kprobes(&p, 1); | 802 | unregister_kprobes(&p, 1); |
| 778 | } | 803 | } |
| 779 | 804 | ||
| 780 | int __kprobes register_kprobes(struct kprobe **kps, int num) | ||
| 781 | { | ||
| 782 | return __register_kprobes(kps, num, | ||
| 783 | (unsigned long)__builtin_return_address(0)); | ||
| 784 | } | ||
| 785 | |||
| 786 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) | 805 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) |
| 787 | { | 806 | { |
| 788 | int i; | 807 | int i; |
| @@ -811,8 +830,7 @@ unsigned long __weak arch_deref_entry_point(void *entry) | |||
| 811 | return (unsigned long)entry; | 830 | return (unsigned long)entry; |
| 812 | } | 831 | } |
| 813 | 832 | ||
| 814 | static int __register_jprobes(struct jprobe **jps, int num, | 833 | int __kprobes register_jprobes(struct jprobe **jps, int num) |
| 815 | unsigned long called_from) | ||
| 816 | { | 834 | { |
| 817 | struct jprobe *jp; | 835 | struct jprobe *jp; |
| 818 | int ret = 0, i; | 836 | int ret = 0, i; |
| @@ -830,7 +848,7 @@ static int __register_jprobes(struct jprobe **jps, int num, | |||
| 830 | /* Todo: Verify probepoint is a function entry point */ | 848 | /* Todo: Verify probepoint is a function entry point */ |
| 831 | jp->kp.pre_handler = setjmp_pre_handler; | 849 | jp->kp.pre_handler = setjmp_pre_handler; |
| 832 | jp->kp.break_handler = longjmp_break_handler; | 850 | jp->kp.break_handler = longjmp_break_handler; |
| 833 | ret = __register_kprobe(&jp->kp, called_from); | 851 | ret = register_kprobe(&jp->kp); |
| 834 | } | 852 | } |
| 835 | if (ret < 0) { | 853 | if (ret < 0) { |
| 836 | if (i > 0) | 854 | if (i > 0) |
| @@ -843,8 +861,7 @@ static int __register_jprobes(struct jprobe **jps, int num, | |||
| 843 | 861 | ||
| 844 | int __kprobes register_jprobe(struct jprobe *jp) | 862 | int __kprobes register_jprobe(struct jprobe *jp) |
| 845 | { | 863 | { |
| 846 | return __register_jprobes(&jp, 1, | 864 | return register_jprobes(&jp, 1); |
| 847 | (unsigned long)__builtin_return_address(0)); | ||
| 848 | } | 865 | } |
| 849 | 866 | ||
| 850 | void __kprobes unregister_jprobe(struct jprobe *jp) | 867 | void __kprobes unregister_jprobe(struct jprobe *jp) |
| @@ -852,12 +869,6 @@ void __kprobes unregister_jprobe(struct jprobe *jp) | |||
| 852 | unregister_jprobes(&jp, 1); | 869 | unregister_jprobes(&jp, 1); |
| 853 | } | 870 | } |
| 854 | 871 | ||
| 855 | int __kprobes register_jprobes(struct jprobe **jps, int num) | ||
| 856 | { | ||
| 857 | return __register_jprobes(jps, num, | ||
| 858 | (unsigned long)__builtin_return_address(0)); | ||
| 859 | } | ||
| 860 | |||
| 861 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) | 872 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) |
| 862 | { | 873 | { |
| 863 | int i; | 874 | int i; |
| @@ -920,8 +931,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
| 920 | return 0; | 931 | return 0; |
| 921 | } | 932 | } |
| 922 | 933 | ||
| 923 | static int __kprobes __register_kretprobe(struct kretprobe *rp, | 934 | int __kprobes register_kretprobe(struct kretprobe *rp) |
| 924 | unsigned long called_from) | ||
| 925 | { | 935 | { |
| 926 | int ret = 0; | 936 | int ret = 0; |
| 927 | struct kretprobe_instance *inst; | 937 | struct kretprobe_instance *inst; |
| @@ -967,21 +977,20 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp, | |||
| 967 | 977 | ||
| 968 | rp->nmissed = 0; | 978 | rp->nmissed = 0; |
| 969 | /* Establish function entry probe point */ | 979 | /* Establish function entry probe point */ |
| 970 | ret = __register_kprobe(&rp->kp, called_from); | 980 | ret = register_kprobe(&rp->kp); |
| 971 | if (ret != 0) | 981 | if (ret != 0) |
| 972 | free_rp_inst(rp); | 982 | free_rp_inst(rp); |
| 973 | return ret; | 983 | return ret; |
| 974 | } | 984 | } |
| 975 | 985 | ||
| 976 | static int __register_kretprobes(struct kretprobe **rps, int num, | 986 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) |
| 977 | unsigned long called_from) | ||
| 978 | { | 987 | { |
| 979 | int ret = 0, i; | 988 | int ret = 0, i; |
| 980 | 989 | ||
| 981 | if (num <= 0) | 990 | if (num <= 0) |
| 982 | return -EINVAL; | 991 | return -EINVAL; |
| 983 | for (i = 0; i < num; i++) { | 992 | for (i = 0; i < num; i++) { |
| 984 | ret = __register_kretprobe(rps[i], called_from); | 993 | ret = register_kretprobe(rps[i]); |
| 985 | if (ret < 0) { | 994 | if (ret < 0) { |
| 986 | if (i > 0) | 995 | if (i > 0) |
| 987 | unregister_kretprobes(rps, i); | 996 | unregister_kretprobes(rps, i); |
| @@ -991,23 +1000,11 @@ static int __register_kretprobes(struct kretprobe **rps, int num, | |||
| 991 | return ret; | 1000 | return ret; |
| 992 | } | 1001 | } |
| 993 | 1002 | ||
| 994 | int __kprobes register_kretprobe(struct kretprobe *rp) | ||
| 995 | { | ||
| 996 | return __register_kretprobes(&rp, 1, | ||
| 997 | (unsigned long)__builtin_return_address(0)); | ||
| 998 | } | ||
| 999 | |||
| 1000 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | 1003 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
| 1001 | { | 1004 | { |
| 1002 | unregister_kretprobes(&rp, 1); | 1005 | unregister_kretprobes(&rp, 1); |
| 1003 | } | 1006 | } |
| 1004 | 1007 | ||
| 1005 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) | ||
| 1006 | { | ||
| 1007 | return __register_kretprobes(rps, num, | ||
| 1008 | (unsigned long)__builtin_return_address(0)); | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) | 1008 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) |
| 1012 | { | 1009 | { |
| 1013 | int i; | 1010 | int i; |
| @@ -1055,6 +1052,72 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
| 1055 | 1052 | ||
| 1056 | #endif /* CONFIG_KRETPROBES */ | 1053 | #endif /* CONFIG_KRETPROBES */ |
| 1057 | 1054 | ||
| 1055 | /* Set the kprobe gone and remove its instruction buffer. */ | ||
| 1056 | static void __kprobes kill_kprobe(struct kprobe *p) | ||
| 1057 | { | ||
| 1058 | struct kprobe *kp; | ||
| 1059 | p->flags |= KPROBE_FLAG_GONE; | ||
| 1060 | if (p->pre_handler == aggr_pre_handler) { | ||
| 1061 | /* | ||
| 1062 | * If this is an aggr_kprobe, we have to list all the | ||
| 1063 | * chained probes and mark them GONE. | ||
| 1064 | */ | ||
| 1065 | list_for_each_entry_rcu(kp, &p->list, list) | ||
| 1066 | kp->flags |= KPROBE_FLAG_GONE; | ||
| 1067 | p->post_handler = NULL; | ||
| 1068 | p->break_handler = NULL; | ||
| 1069 | } | ||
| 1070 | /* | ||
| 1071 | * Here, we can remove insn_slot safely, because no thread calls | ||
| 1072 | * the original probed function (which will be freed soon) any more. | ||
| 1073 | */ | ||
| 1074 | arch_remove_kprobe(p); | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | /* Module notifier call back, checking kprobes on the module */ | ||
| 1078 | static int __kprobes kprobes_module_callback(struct notifier_block *nb, | ||
| 1079 | unsigned long val, void *data) | ||
| 1080 | { | ||
| 1081 | struct module *mod = data; | ||
| 1082 | struct hlist_head *head; | ||
| 1083 | struct hlist_node *node; | ||
| 1084 | struct kprobe *p; | ||
| 1085 | unsigned int i; | ||
| 1086 | int checkcore = (val == MODULE_STATE_GOING); | ||
| 1087 | |||
| 1088 | if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) | ||
| 1089 | return NOTIFY_DONE; | ||
| 1090 | |||
| 1091 | /* | ||
| 1092 | * When MODULE_STATE_GOING was notified, both of module .text and | ||
| 1093 | * .init.text sections would be freed. When MODULE_STATE_LIVE was | ||
| 1094 | * notified, only .init.text section would be freed. We need to | ||
| 1095 | * disable kprobes which have been inserted in the sections. | ||
| 1096 | */ | ||
| 1097 | mutex_lock(&kprobe_mutex); | ||
| 1098 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
| 1099 | head = &kprobe_table[i]; | ||
| 1100 | hlist_for_each_entry_rcu(p, node, head, hlist) | ||
| 1101 | if (within_module_init((unsigned long)p->addr, mod) || | ||
| 1102 | (checkcore && | ||
| 1103 | within_module_core((unsigned long)p->addr, mod))) { | ||
| 1104 | /* | ||
| 1105 | * The vaddr this probe is installed will soon | ||
| 1106 | * be vfreed buy not synced to disk. Hence, | ||
| 1107 | * disarming the breakpoint isn't needed. | ||
| 1108 | */ | ||
| 1109 | kill_kprobe(p); | ||
| 1110 | } | ||
| 1111 | } | ||
| 1112 | mutex_unlock(&kprobe_mutex); | ||
| 1113 | return NOTIFY_DONE; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | static struct notifier_block kprobe_module_nb = { | ||
| 1117 | .notifier_call = kprobes_module_callback, | ||
| 1118 | .priority = 0 | ||
| 1119 | }; | ||
| 1120 | |||
| 1058 | static int __init init_kprobes(void) | 1121 | static int __init init_kprobes(void) |
| 1059 | { | 1122 | { |
| 1060 | int i, err = 0; | 1123 | int i, err = 0; |
| @@ -1111,6 +1174,9 @@ static int __init init_kprobes(void) | |||
| 1111 | err = arch_init_kprobes(); | 1174 | err = arch_init_kprobes(); |
| 1112 | if (!err) | 1175 | if (!err) |
| 1113 | err = register_die_notifier(&kprobe_exceptions_nb); | 1176 | err = register_die_notifier(&kprobe_exceptions_nb); |
| 1177 | if (!err) | ||
| 1178 | err = register_module_notifier(&kprobe_module_nb); | ||
| 1179 | |||
| 1114 | kprobes_initialized = (err == 0); | 1180 | kprobes_initialized = (err == 0); |
| 1115 | 1181 | ||
| 1116 | if (!err) | 1182 | if (!err) |
| @@ -1131,10 +1197,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
| 1131 | else | 1197 | else |
| 1132 | kprobe_type = "k"; | 1198 | kprobe_type = "k"; |
| 1133 | if (sym) | 1199 | if (sym) |
| 1134 | seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, | 1200 | seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, |
| 1135 | sym, offset, (modname ? modname : " ")); | 1201 | sym, offset, (modname ? modname : " "), |
| 1202 | (kprobe_gone(p) ? "[GONE]" : "")); | ||
| 1136 | else | 1203 | else |
| 1137 | seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr); | 1204 | seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, |
| 1205 | (kprobe_gone(p) ? "[GONE]" : "")); | ||
| 1138 | } | 1206 | } |
| 1139 | 1207 | ||
| 1140 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 1208 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
| @@ -1215,7 +1283,8 @@ static void __kprobes enable_all_kprobes(void) | |||
| 1215 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1283 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 1216 | head = &kprobe_table[i]; | 1284 | head = &kprobe_table[i]; |
| 1217 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1285 | hlist_for_each_entry_rcu(p, node, head, hlist) |
| 1218 | arch_arm_kprobe(p); | 1286 | if (!kprobe_gone(p)) |
| 1287 | arch_arm_kprobe(p); | ||
| 1219 | } | 1288 | } |
| 1220 | 1289 | ||
| 1221 | kprobe_enabled = true; | 1290 | kprobe_enabled = true; |
| @@ -1244,7 +1313,7 @@ static void __kprobes disable_all_kprobes(void) | |||
| 1244 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1313 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 1245 | head = &kprobe_table[i]; | 1314 | head = &kprobe_table[i]; |
| 1246 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1315 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
| 1247 | if (!arch_trampoline_kprobe(p)) | 1316 | if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) |
| 1248 | arch_disarm_kprobe(p); | 1317 | arch_disarm_kprobe(p); |
| 1249 | } | 1318 | } |
| 1250 | } | 1319 | } |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 08dd8ed86c77..528dd78e7e7e 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
| @@ -24,7 +24,7 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |||
| 24 | static struct kobj_attribute _name##_attr = \ | 24 | static struct kobj_attribute _name##_attr = \ |
| 25 | __ATTR(_name, 0644, _name##_show, _name##_store) | 25 | __ATTR(_name, 0644, _name##_show, _name##_store) |
| 26 | 26 | ||
| 27 | #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) | 27 | #if defined(CONFIG_HOTPLUG) |
| 28 | /* current uevent sequence number */ | 28 | /* current uevent sequence number */ |
| 29 | static ssize_t uevent_seqnum_show(struct kobject *kobj, | 29 | static ssize_t uevent_seqnum_show(struct kobject *kobj, |
| 30 | struct kobj_attribute *attr, char *buf) | 30 | struct kobj_attribute *attr, char *buf) |
| @@ -137,7 +137,7 @@ struct kobject *kernel_kobj; | |||
| 137 | EXPORT_SYMBOL_GPL(kernel_kobj); | 137 | EXPORT_SYMBOL_GPL(kernel_kobj); |
| 138 | 138 | ||
| 139 | static struct attribute * kernel_attrs[] = { | 139 | static struct attribute * kernel_attrs[] = { |
| 140 | #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) | 140 | #if defined(CONFIG_HOTPLUG) |
| 141 | &uevent_seqnum_attr.attr, | 141 | &uevent_seqnum_attr.attr, |
| 142 | &uevent_helper_attr.attr, | 142 | &uevent_helper_attr.attr, |
| 143 | #endif | 143 | #endif |
diff --git a/kernel/module.c b/kernel/module.c index dd2a54155b54..496dcb57b608 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -43,7 +43,6 @@ | |||
| 43 | #include <linux/device.h> | 43 | #include <linux/device.h> |
| 44 | #include <linux/string.h> | 44 | #include <linux/string.h> |
| 45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
| 46 | #include <linux/unwind.h> | ||
| 47 | #include <linux/rculist.h> | 46 | #include <linux/rculist.h> |
| 48 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
| 49 | #include <asm/cacheflush.h> | 48 | #include <asm/cacheflush.h> |
| @@ -757,8 +756,16 @@ sys_delete_module(const char __user *name_user, unsigned int flags) | |||
| 757 | return -EFAULT; | 756 | return -EFAULT; |
| 758 | name[MODULE_NAME_LEN-1] = '\0'; | 757 | name[MODULE_NAME_LEN-1] = '\0'; |
| 759 | 758 | ||
| 760 | if (mutex_lock_interruptible(&module_mutex) != 0) | 759 | /* Create stop_machine threads since free_module relies on |
| 761 | return -EINTR; | 760 | * a non-failing stop_machine call. */ |
| 761 | ret = stop_machine_create(); | ||
| 762 | if (ret) | ||
| 763 | return ret; | ||
| 764 | |||
| 765 | if (mutex_lock_interruptible(&module_mutex) != 0) { | ||
| 766 | ret = -EINTR; | ||
| 767 | goto out_stop; | ||
| 768 | } | ||
| 762 | 769 | ||
| 763 | mod = find_module(name); | 770 | mod = find_module(name); |
| 764 | if (!mod) { | 771 | if (!mod) { |
| @@ -817,10 +824,12 @@ sys_delete_module(const char __user *name_user, unsigned int flags) | |||
| 817 | 824 | ||
| 818 | out: | 825 | out: |
| 819 | mutex_unlock(&module_mutex); | 826 | mutex_unlock(&module_mutex); |
| 827 | out_stop: | ||
| 828 | stop_machine_destroy(); | ||
| 820 | return ret; | 829 | return ret; |
| 821 | } | 830 | } |
| 822 | 831 | ||
| 823 | static void print_unload_info(struct seq_file *m, struct module *mod) | 832 | static inline void print_unload_info(struct seq_file *m, struct module *mod) |
| 824 | { | 833 | { |
| 825 | struct module_use *use; | 834 | struct module_use *use; |
| 826 | int printed_something = 0; | 835 | int printed_something = 0; |
| @@ -893,7 +902,7 @@ void module_put(struct module *module) | |||
| 893 | EXPORT_SYMBOL(module_put); | 902 | EXPORT_SYMBOL(module_put); |
| 894 | 903 | ||
| 895 | #else /* !CONFIG_MODULE_UNLOAD */ | 904 | #else /* !CONFIG_MODULE_UNLOAD */ |
| 896 | static void print_unload_info(struct seq_file *m, struct module *mod) | 905 | static inline void print_unload_info(struct seq_file *m, struct module *mod) |
| 897 | { | 906 | { |
| 898 | /* We don't know the usage count, or what modules are using. */ | 907 | /* We don't know the usage count, or what modules are using. */ |
| 899 | seq_printf(m, " - -"); | 908 | seq_printf(m, " - -"); |
| @@ -1439,8 +1448,6 @@ static void free_module(struct module *mod) | |||
| 1439 | remove_sect_attrs(mod); | 1448 | remove_sect_attrs(mod); |
| 1440 | mod_kobject_remove(mod); | 1449 | mod_kobject_remove(mod); |
| 1441 | 1450 | ||
| 1442 | unwind_remove_table(mod->unwind_info, 0); | ||
| 1443 | |||
| 1444 | /* Arch-specific cleanup. */ | 1451 | /* Arch-specific cleanup. */ |
| 1445 | module_arch_cleanup(mod); | 1452 | module_arch_cleanup(mod); |
| 1446 | 1453 | ||
| @@ -1578,11 +1585,21 @@ static int simplify_symbols(Elf_Shdr *sechdrs, | |||
| 1578 | return ret; | 1585 | return ret; |
| 1579 | } | 1586 | } |
| 1580 | 1587 | ||
| 1588 | /* Additional bytes needed by arch in front of individual sections */ | ||
| 1589 | unsigned int __weak arch_mod_section_prepend(struct module *mod, | ||
| 1590 | unsigned int section) | ||
| 1591 | { | ||
| 1592 | /* default implementation just returns zero */ | ||
| 1593 | return 0; | ||
| 1594 | } | ||
| 1595 | |||
| 1581 | /* Update size with this section: return offset. */ | 1596 | /* Update size with this section: return offset. */ |
| 1582 | static long get_offset(unsigned int *size, Elf_Shdr *sechdr) | 1597 | static long get_offset(struct module *mod, unsigned int *size, |
| 1598 | Elf_Shdr *sechdr, unsigned int section) | ||
| 1583 | { | 1599 | { |
| 1584 | long ret; | 1600 | long ret; |
| 1585 | 1601 | ||
| 1602 | *size += arch_mod_section_prepend(mod, section); | ||
| 1586 | ret = ALIGN(*size, sechdr->sh_addralign ?: 1); | 1603 | ret = ALIGN(*size, sechdr->sh_addralign ?: 1); |
| 1587 | *size = ret + sechdr->sh_size; | 1604 | *size = ret + sechdr->sh_size; |
| 1588 | return ret; | 1605 | return ret; |
| @@ -1622,7 +1639,7 @@ static void layout_sections(struct module *mod, | |||
| 1622 | || strncmp(secstrings + s->sh_name, | 1639 | || strncmp(secstrings + s->sh_name, |
| 1623 | ".init", 5) == 0) | 1640 | ".init", 5) == 0) |
| 1624 | continue; | 1641 | continue; |
| 1625 | s->sh_entsize = get_offset(&mod->core_size, s); | 1642 | s->sh_entsize = get_offset(mod, &mod->core_size, s, i); |
| 1626 | DEBUGP("\t%s\n", secstrings + s->sh_name); | 1643 | DEBUGP("\t%s\n", secstrings + s->sh_name); |
| 1627 | } | 1644 | } |
| 1628 | if (m == 0) | 1645 | if (m == 0) |
| @@ -1640,7 +1657,7 @@ static void layout_sections(struct module *mod, | |||
| 1640 | || strncmp(secstrings + s->sh_name, | 1657 | || strncmp(secstrings + s->sh_name, |
| 1641 | ".init", 5) != 0) | 1658 | ".init", 5) != 0) |
| 1642 | continue; | 1659 | continue; |
| 1643 | s->sh_entsize = (get_offset(&mod->init_size, s) | 1660 | s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) |
| 1644 | | INIT_OFFSET_MASK); | 1661 | | INIT_OFFSET_MASK); |
| 1645 | DEBUGP("\t%s\n", secstrings + s->sh_name); | 1662 | DEBUGP("\t%s\n", secstrings + s->sh_name); |
| 1646 | } | 1663 | } |
| @@ -1725,15 +1742,15 @@ static const struct kernel_symbol *lookup_symbol(const char *name, | |||
| 1725 | return NULL; | 1742 | return NULL; |
| 1726 | } | 1743 | } |
| 1727 | 1744 | ||
| 1728 | static int is_exported(const char *name, const struct module *mod) | 1745 | static int is_exported(const char *name, unsigned long value, |
| 1746 | const struct module *mod) | ||
| 1729 | { | 1747 | { |
| 1730 | if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) | 1748 | const struct kernel_symbol *ks; |
| 1731 | return 1; | 1749 | if (!mod) |
| 1750 | ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); | ||
| 1732 | else | 1751 | else |
| 1733 | if (mod && lookup_symbol(name, mod->syms, mod->syms + mod->num_syms)) | 1752 | ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); |
| 1734 | return 1; | 1753 | return ks != NULL && ks->value == value; |
| 1735 | else | ||
| 1736 | return 0; | ||
| 1737 | } | 1754 | } |
| 1738 | 1755 | ||
| 1739 | /* As per nm */ | 1756 | /* As per nm */ |
| @@ -1847,7 +1864,6 @@ static noinline struct module *load_module(void __user *umod, | |||
| 1847 | unsigned int symindex = 0; | 1864 | unsigned int symindex = 0; |
| 1848 | unsigned int strindex = 0; | 1865 | unsigned int strindex = 0; |
| 1849 | unsigned int modindex, versindex, infoindex, pcpuindex; | 1866 | unsigned int modindex, versindex, infoindex, pcpuindex; |
| 1850 | unsigned int unwindex = 0; | ||
| 1851 | unsigned int num_kp, num_mcount; | 1867 | unsigned int num_kp, num_mcount; |
| 1852 | struct kernel_param *kp; | 1868 | struct kernel_param *kp; |
| 1853 | struct module *mod; | 1869 | struct module *mod; |
| @@ -1865,6 +1881,13 @@ static noinline struct module *load_module(void __user *umod, | |||
| 1865 | /* vmalloc barfs on "unusual" numbers. Check here */ | 1881 | /* vmalloc barfs on "unusual" numbers. Check here */ |
| 1866 | if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) | 1882 | if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) |
| 1867 | return ERR_PTR(-ENOMEM); | 1883 | return ERR_PTR(-ENOMEM); |
| 1884 | |||
| 1885 | /* Create stop_machine threads since the error path relies on | ||
| 1886 | * a non-failing stop_machine call. */ | ||
| 1887 | err = stop_machine_create(); | ||
| 1888 | if (err) | ||
| 1889 | goto free_hdr; | ||
| 1890 | |||
| 1868 | if (copy_from_user(hdr, umod, len) != 0) { | 1891 | if (copy_from_user(hdr, umod, len) != 0) { |
| 1869 | err = -EFAULT; | 1892 | err = -EFAULT; |
| 1870 | goto free_hdr; | 1893 | goto free_hdr; |
| @@ -1930,9 +1953,6 @@ static noinline struct module *load_module(void __user *umod, | |||
| 1930 | versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); | 1953 | versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); |
| 1931 | infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); | 1954 | infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); |
| 1932 | pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); | 1955 | pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); |
| 1933 | #ifdef ARCH_UNWIND_SECTION_NAME | ||
| 1934 | unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME); | ||
| 1935 | #endif | ||
| 1936 | 1956 | ||
| 1937 | /* Don't keep modinfo and version sections. */ | 1957 | /* Don't keep modinfo and version sections. */ |
| 1938 | sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 1958 | sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
| @@ -1942,8 +1962,6 @@ static noinline struct module *load_module(void __user *umod, | |||
| 1942 | sechdrs[symindex].sh_flags |= SHF_ALLOC; | 1962 | sechdrs[symindex].sh_flags |= SHF_ALLOC; |
| 1943 | sechdrs[strindex].sh_flags |= SHF_ALLOC; | 1963 | sechdrs[strindex].sh_flags |= SHF_ALLOC; |
| 1944 | #endif | 1964 | #endif |
| 1945 | if (unwindex) | ||
| 1946 | sechdrs[unwindex].sh_flags |= SHF_ALLOC; | ||
| 1947 | 1965 | ||
| 1948 | /* Check module struct version now, before we try to use module. */ | 1966 | /* Check module struct version now, before we try to use module. */ |
| 1949 | if (!check_modstruct_version(sechdrs, versindex, mod)) { | 1967 | if (!check_modstruct_version(sechdrs, versindex, mod)) { |
| @@ -2240,14 +2258,10 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2240 | add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); | 2258 | add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); |
| 2241 | add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); | 2259 | add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); |
| 2242 | 2260 | ||
| 2243 | /* Size of section 0 is 0, so this works well if no unwind info. */ | ||
| 2244 | mod->unwind_info = unwind_add_table(mod, | ||
| 2245 | (void *)sechdrs[unwindex].sh_addr, | ||
| 2246 | sechdrs[unwindex].sh_size); | ||
| 2247 | |||
| 2248 | /* Get rid of temporary copy */ | 2261 | /* Get rid of temporary copy */ |
| 2249 | vfree(hdr); | 2262 | vfree(hdr); |
| 2250 | 2263 | ||
| 2264 | stop_machine_destroy(); | ||
| 2251 | /* Done! */ | 2265 | /* Done! */ |
| 2252 | return mod; | 2266 | return mod; |
| 2253 | 2267 | ||
| @@ -2270,6 +2284,7 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2270 | kfree(args); | 2284 | kfree(args); |
| 2271 | free_hdr: | 2285 | free_hdr: |
| 2272 | vfree(hdr); | 2286 | vfree(hdr); |
| 2287 | stop_machine_destroy(); | ||
| 2273 | return ERR_PTR(err); | 2288 | return ERR_PTR(err); |
| 2274 | 2289 | ||
| 2275 | truncated: | 2290 | truncated: |
| @@ -2337,11 +2352,12 @@ sys_init_module(void __user *umod, | |||
| 2337 | /* Now it's a first class citizen! Wake up anyone waiting for it. */ | 2352 | /* Now it's a first class citizen! Wake up anyone waiting for it. */ |
| 2338 | mod->state = MODULE_STATE_LIVE; | 2353 | mod->state = MODULE_STATE_LIVE; |
| 2339 | wake_up(&module_wq); | 2354 | wake_up(&module_wq); |
| 2355 | blocking_notifier_call_chain(&module_notify_list, | ||
| 2356 | MODULE_STATE_LIVE, mod); | ||
| 2340 | 2357 | ||
| 2341 | mutex_lock(&module_mutex); | 2358 | mutex_lock(&module_mutex); |
| 2342 | /* Drop initial reference. */ | 2359 | /* Drop initial reference. */ |
| 2343 | module_put(mod); | 2360 | module_put(mod); |
| 2344 | unwind_remove_table(mod->unwind_info, 1); | ||
| 2345 | module_free(mod, mod->module_init); | 2361 | module_free(mod, mod->module_init); |
| 2346 | mod->module_init = NULL; | 2362 | mod->module_init = NULL; |
| 2347 | mod->init_size = 0; | 2363 | mod->init_size = 0; |
| @@ -2376,7 +2392,7 @@ static const char *get_ksymbol(struct module *mod, | |||
| 2376 | unsigned long nextval; | 2392 | unsigned long nextval; |
| 2377 | 2393 | ||
| 2378 | /* At worse, next value is at end of module */ | 2394 | /* At worse, next value is at end of module */ |
| 2379 | if (within(addr, mod->module_init, mod->init_size)) | 2395 | if (within_module_init(addr, mod)) |
| 2380 | nextval = (unsigned long)mod->module_init+mod->init_text_size; | 2396 | nextval = (unsigned long)mod->module_init+mod->init_text_size; |
| 2381 | else | 2397 | else |
| 2382 | nextval = (unsigned long)mod->module_core+mod->core_text_size; | 2398 | nextval = (unsigned long)mod->module_core+mod->core_text_size; |
| @@ -2424,8 +2440,8 @@ const char *module_address_lookup(unsigned long addr, | |||
| 2424 | 2440 | ||
| 2425 | preempt_disable(); | 2441 | preempt_disable(); |
| 2426 | list_for_each_entry_rcu(mod, &modules, list) { | 2442 | list_for_each_entry_rcu(mod, &modules, list) { |
| 2427 | if (within(addr, mod->module_init, mod->init_size) | 2443 | if (within_module_init(addr, mod) || |
| 2428 | || within(addr, mod->module_core, mod->core_size)) { | 2444 | within_module_core(addr, mod)) { |
| 2429 | if (modname) | 2445 | if (modname) |
| 2430 | *modname = mod->name; | 2446 | *modname = mod->name; |
| 2431 | ret = get_ksymbol(mod, addr, size, offset); | 2447 | ret = get_ksymbol(mod, addr, size, offset); |
| @@ -2447,8 +2463,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) | |||
| 2447 | 2463 | ||
| 2448 | preempt_disable(); | 2464 | preempt_disable(); |
| 2449 | list_for_each_entry_rcu(mod, &modules, list) { | 2465 | list_for_each_entry_rcu(mod, &modules, list) { |
| 2450 | if (within(addr, mod->module_init, mod->init_size) || | 2466 | if (within_module_init(addr, mod) || |
| 2451 | within(addr, mod->module_core, mod->core_size)) { | 2467 | within_module_core(addr, mod)) { |
| 2452 | const char *sym; | 2468 | const char *sym; |
| 2453 | 2469 | ||
| 2454 | sym = get_ksymbol(mod, addr, NULL, NULL); | 2470 | sym = get_ksymbol(mod, addr, NULL, NULL); |
| @@ -2471,8 +2487,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, | |||
| 2471 | 2487 | ||
| 2472 | preempt_disable(); | 2488 | preempt_disable(); |
| 2473 | list_for_each_entry_rcu(mod, &modules, list) { | 2489 | list_for_each_entry_rcu(mod, &modules, list) { |
| 2474 | if (within(addr, mod->module_init, mod->init_size) || | 2490 | if (within_module_init(addr, mod) || |
| 2475 | within(addr, mod->module_core, mod->core_size)) { | 2491 | within_module_core(addr, mod)) { |
| 2476 | const char *sym; | 2492 | const char *sym; |
| 2477 | 2493 | ||
| 2478 | sym = get_ksymbol(mod, addr, size, offset); | 2494 | sym = get_ksymbol(mod, addr, size, offset); |
| @@ -2504,7 +2520,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |||
| 2504 | strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, | 2520 | strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, |
| 2505 | KSYM_NAME_LEN); | 2521 | KSYM_NAME_LEN); |
| 2506 | strlcpy(module_name, mod->name, MODULE_NAME_LEN); | 2522 | strlcpy(module_name, mod->name, MODULE_NAME_LEN); |
| 2507 | *exported = is_exported(name, mod); | 2523 | *exported = is_exported(name, *value, mod); |
| 2508 | preempt_enable(); | 2524 | preempt_enable(); |
| 2509 | return 0; | 2525 | return 0; |
| 2510 | } | 2526 | } |
| @@ -2691,7 +2707,7 @@ int is_module_address(unsigned long addr) | |||
| 2691 | preempt_disable(); | 2707 | preempt_disable(); |
| 2692 | 2708 | ||
| 2693 | list_for_each_entry_rcu(mod, &modules, list) { | 2709 | list_for_each_entry_rcu(mod, &modules, list) { |
| 2694 | if (within(addr, mod->module_core, mod->core_size)) { | 2710 | if (within_module_core(addr, mod)) { |
| 2695 | preempt_enable(); | 2711 | preempt_enable(); |
| 2696 | return 1; | 2712 | return 1; |
| 2697 | } | 2713 | } |
diff --git a/kernel/panic.c b/kernel/panic.c index 13f06349a786..2a2ff36ff44d 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -299,6 +299,8 @@ static int init_oops_id(void) | |||
| 299 | { | 299 | { |
| 300 | if (!oops_id) | 300 | if (!oops_id) |
| 301 | get_random_bytes(&oops_id, sizeof(oops_id)); | 301 | get_random_bytes(&oops_id, sizeof(oops_id)); |
| 302 | else | ||
| 303 | oops_id++; | ||
| 302 | 304 | ||
| 303 | return 0; | 305 | return 0; |
| 304 | } | 306 | } |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 613f16941b85..239988873971 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -615,7 +615,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | |||
| 615 | /* this may fail if the RTC hasn't been initialized */ | 615 | /* this may fail if the RTC hasn't been initialized */ |
| 616 | status = rtc_read_time(rtc, &alm.time); | 616 | status = rtc_read_time(rtc, &alm.time); |
| 617 | if (status < 0) { | 617 | if (status < 0) { |
| 618 | printk(err_readtime, rtc->dev.bus_id, status); | 618 | printk(err_readtime, dev_name(&rtc->dev), status); |
| 619 | return; | 619 | return; |
| 620 | } | 620 | } |
| 621 | rtc_tm_to_time(&alm.time, &now); | 621 | rtc_tm_to_time(&alm.time, &now); |
| @@ -626,7 +626,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | |||
| 626 | 626 | ||
| 627 | status = rtc_set_alarm(rtc, &alm); | 627 | status = rtc_set_alarm(rtc, &alm); |
| 628 | if (status < 0) { | 628 | if (status < 0) { |
| 629 | printk(err_wakealarm, rtc->dev.bus_id, status); | 629 | printk(err_wakealarm, dev_name(&rtc->dev), status); |
| 630 | return; | 630 | return; |
| 631 | } | 631 | } |
| 632 | 632 | ||
| @@ -660,7 +660,7 @@ static int __init has_wakealarm(struct device *dev, void *name_ptr) | |||
| 660 | if (!device_may_wakeup(candidate->dev.parent)) | 660 | if (!device_may_wakeup(candidate->dev.parent)) |
| 661 | return 0; | 661 | return 0; |
| 662 | 662 | ||
| 663 | *(char **)name_ptr = dev->bus_id; | 663 | *(const char **)name_ptr = dev_name(dev); |
| 664 | return 1; | 664 | return 1; |
| 665 | } | 665 | } |
| 666 | 666 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index d18e2d2654f2..784933acf5b8 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -445,7 +445,6 @@ void profile_tick(int type) | |||
| 445 | #ifdef CONFIG_PROC_FS | 445 | #ifdef CONFIG_PROC_FS |
| 446 | #include <linux/proc_fs.h> | 446 | #include <linux/proc_fs.h> |
| 447 | #include <asm/uaccess.h> | 447 | #include <asm/uaccess.h> |
| 448 | #include <asm/ptrace.h> | ||
| 449 | 448 | ||
| 450 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, | 449 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, |
| 451 | int count, int *eof, void *data) | 450 | int count, int *eof, void *data) |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index ad63af8b2521..d92a76a881aa 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -77,8 +77,15 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
| 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| 78 | * and may be nested. | 78 | * and may be nested. |
| 79 | */ | 79 | */ |
| 80 | void synchronize_rcu(void); /* Makes kernel-doc tools happy */ | 80 | void synchronize_rcu(void) |
| 81 | synchronize_rcu_xxx(synchronize_rcu, call_rcu) | 81 | { |
| 82 | struct rcu_synchronize rcu; | ||
| 83 | init_completion(&rcu.completion); | ||
| 84 | /* Will wake me after RCU finished. */ | ||
| 85 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
| 86 | /* Wait for it. */ | ||
| 87 | wait_for_completion(&rcu.completion); | ||
| 88 | } | ||
| 82 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 89 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
| 83 | 90 | ||
| 84 | static void rcu_barrier_callback(struct rcu_head *notused) | 91 | static void rcu_barrier_callback(struct rcu_head *notused) |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index f9dc8f3720f6..33cfc50781f9 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
| @@ -1177,7 +1177,16 @@ EXPORT_SYMBOL_GPL(call_rcu_sched); | |||
| 1177 | * in -rt this does -not- necessarily result in all currently executing | 1177 | * in -rt this does -not- necessarily result in all currently executing |
| 1178 | * interrupt -handlers- having completed. | 1178 | * interrupt -handlers- having completed. |
| 1179 | */ | 1179 | */ |
| 1180 | synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched) | 1180 | void __synchronize_sched(void) |
| 1181 | { | ||
| 1182 | struct rcu_synchronize rcu; | ||
| 1183 | |||
| 1184 | init_completion(&rcu.completion); | ||
| 1185 | /* Will wake me after RCU finished. */ | ||
| 1186 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
| 1187 | /* Wait for it. */ | ||
| 1188 | wait_for_completion(&rcu.completion); | ||
| 1189 | } | ||
| 1181 | EXPORT_SYMBOL_GPL(__synchronize_sched); | 1190 | EXPORT_SYMBOL_GPL(__synchronize_sched); |
| 1182 | 1191 | ||
| 1183 | /* | 1192 | /* |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 3245b40952c6..1cff28db56b6 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -136,7 +136,7 @@ static int stutter_pause_test = 0; | |||
| 136 | #endif | 136 | #endif |
| 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
| 138 | 138 | ||
| 139 | #define FULLSTOP_SIGNALED 1 /* Bail due to signal. */ | 139 | #define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */ |
| 140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ | 140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ |
| 141 | static int fullstop; /* stop generating callbacks at test end. */ | 141 | static int fullstop; /* stop generating callbacks at test end. */ |
| 142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ | 142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ |
| @@ -151,12 +151,10 @@ rcutorture_shutdown_notify(struct notifier_block *unused1, | |||
| 151 | { | 151 | { |
| 152 | if (fullstop) | 152 | if (fullstop) |
| 153 | return NOTIFY_DONE; | 153 | return NOTIFY_DONE; |
| 154 | if (signal_pending(current)) { | 154 | mutex_lock(&fullstop_mutex); |
| 155 | mutex_lock(&fullstop_mutex); | 155 | if (!fullstop) |
| 156 | if (!ACCESS_ONCE(fullstop)) | 156 | fullstop = FULLSTOP_SHUTDOWN; |
| 157 | fullstop = FULLSTOP_SIGNALED; | 157 | mutex_unlock(&fullstop_mutex); |
| 158 | mutex_unlock(&fullstop_mutex); | ||
| 159 | } | ||
| 160 | return NOTIFY_DONE; | 158 | return NOTIFY_DONE; |
| 161 | } | 159 | } |
| 162 | 160 | ||
| @@ -624,7 +622,7 @@ rcu_torture_writer(void *arg) | |||
| 624 | rcu_stutter_wait(); | 622 | rcu_stutter_wait(); |
| 625 | } while (!kthread_should_stop() && !fullstop); | 623 | } while (!kthread_should_stop() && !fullstop); |
| 626 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 624 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
| 627 | while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) | 625 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) |
| 628 | schedule_timeout_uninterruptible(1); | 626 | schedule_timeout_uninterruptible(1); |
| 629 | return 0; | 627 | return 0; |
| 630 | } | 628 | } |
| @@ -649,7 +647,7 @@ rcu_torture_fakewriter(void *arg) | |||
| 649 | } while (!kthread_should_stop() && !fullstop); | 647 | } while (!kthread_should_stop() && !fullstop); |
| 650 | 648 | ||
| 651 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 649 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
| 652 | while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) | 650 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) |
| 653 | schedule_timeout_uninterruptible(1); | 651 | schedule_timeout_uninterruptible(1); |
| 654 | return 0; | 652 | return 0; |
| 655 | } | 653 | } |
| @@ -759,7 +757,7 @@ rcu_torture_reader(void *arg) | |||
| 759 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 757 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
| 760 | if (irqreader && cur_ops->irqcapable) | 758 | if (irqreader && cur_ops->irqcapable) |
| 761 | del_timer_sync(&t); | 759 | del_timer_sync(&t); |
| 762 | while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) | 760 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) |
| 763 | schedule_timeout_uninterruptible(1); | 761 | schedule_timeout_uninterruptible(1); |
| 764 | return 0; | 762 | return 0; |
| 765 | } | 763 | } |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index a342b032112c..f2d8638e6c60 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -79,7 +79,10 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | |||
| 79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
| 80 | 80 | ||
| 81 | #ifdef CONFIG_NO_HZ | 81 | #ifdef CONFIG_NO_HZ |
| 82 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks); | 82 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
| 83 | .dynticks_nesting = 1, | ||
| 84 | .dynticks = 1, | ||
| 85 | }; | ||
| 83 | #endif /* #ifdef CONFIG_NO_HZ */ | 86 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 84 | 87 | ||
| 85 | static int blimit = 10; /* Maximum callbacks per softirq. */ | 88 | static int blimit = 10; /* Maximum callbacks per softirq. */ |
| @@ -572,6 +575,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
| 572 | /* Special-case the common single-level case. */ | 575 | /* Special-case the common single-level case. */ |
| 573 | if (NUM_RCU_NODES == 1) { | 576 | if (NUM_RCU_NODES == 1) { |
| 574 | rnp->qsmask = rnp->qsmaskinit; | 577 | rnp->qsmask = rnp->qsmaskinit; |
| 578 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | ||
| 575 | spin_unlock_irqrestore(&rnp->lock, flags); | 579 | spin_unlock_irqrestore(&rnp->lock, flags); |
| 576 | return; | 580 | return; |
| 577 | } | 581 | } |
| @@ -1379,13 +1383,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 1379 | 1383 | ||
| 1380 | static void __cpuinit rcu_online_cpu(int cpu) | 1384 | static void __cpuinit rcu_online_cpu(int cpu) |
| 1381 | { | 1385 | { |
| 1382 | #ifdef CONFIG_NO_HZ | ||
| 1383 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
| 1384 | |||
| 1385 | rdtp->dynticks_nesting = 1; | ||
| 1386 | rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */ | ||
| 1387 | rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1; | ||
| 1388 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 1389 | rcu_init_percpu_data(cpu, &rcu_state); | 1386 | rcu_init_percpu_data(cpu, &rcu_state); |
| 1390 | rcu_init_percpu_data(cpu, &rcu_bh_state); | 1387 | rcu_init_percpu_data(cpu, &rcu_bh_state); |
| 1391 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1388 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
diff --git a/kernel/signal.c b/kernel/signal.c index 8e95855ff3cf..3152ac3b62e2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -858,7 +858,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
| 858 | q->info.si_signo = sig; | 858 | q->info.si_signo = sig; |
| 859 | q->info.si_errno = 0; | 859 | q->info.si_errno = 0; |
| 860 | q->info.si_code = SI_USER; | 860 | q->info.si_code = SI_USER; |
| 861 | q->info.si_pid = task_pid_vnr(current); | 861 | q->info.si_pid = task_tgid_nr_ns(current, |
| 862 | task_active_pid_ns(t)); | ||
| 862 | q->info.si_uid = current_uid(); | 863 | q->info.si_uid = current_uid(); |
| 863 | break; | 864 | break; |
| 864 | case (unsigned long) SEND_SIG_PRIV: | 865 | case (unsigned long) SEND_SIG_PRIV: |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 286c41722e8c..0cd415ee62a2 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -38,7 +38,10 @@ struct stop_machine_data { | |||
| 38 | static unsigned int num_threads; | 38 | static unsigned int num_threads; |
| 39 | static atomic_t thread_ack; | 39 | static atomic_t thread_ack; |
| 40 | static DEFINE_MUTEX(lock); | 40 | static DEFINE_MUTEX(lock); |
| 41 | 41 | /* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */ | |
| 42 | static DEFINE_MUTEX(setup_lock); | ||
| 43 | /* Users of stop_machine. */ | ||
| 44 | static int refcount; | ||
| 42 | static struct workqueue_struct *stop_machine_wq; | 45 | static struct workqueue_struct *stop_machine_wq; |
| 43 | static struct stop_machine_data active, idle; | 46 | static struct stop_machine_data active, idle; |
| 44 | static const cpumask_t *active_cpus; | 47 | static const cpumask_t *active_cpus; |
| @@ -109,6 +112,43 @@ static int chill(void *unused) | |||
| 109 | return 0; | 112 | return 0; |
| 110 | } | 113 | } |
| 111 | 114 | ||
| 115 | int stop_machine_create(void) | ||
| 116 | { | ||
| 117 | mutex_lock(&setup_lock); | ||
| 118 | if (refcount) | ||
| 119 | goto done; | ||
| 120 | stop_machine_wq = create_rt_workqueue("kstop"); | ||
| 121 | if (!stop_machine_wq) | ||
| 122 | goto err_out; | ||
| 123 | stop_machine_work = alloc_percpu(struct work_struct); | ||
| 124 | if (!stop_machine_work) | ||
| 125 | goto err_out; | ||
| 126 | done: | ||
| 127 | refcount++; | ||
| 128 | mutex_unlock(&setup_lock); | ||
| 129 | return 0; | ||
| 130 | |||
| 131 | err_out: | ||
| 132 | if (stop_machine_wq) | ||
| 133 | destroy_workqueue(stop_machine_wq); | ||
| 134 | mutex_unlock(&setup_lock); | ||
| 135 | return -ENOMEM; | ||
| 136 | } | ||
| 137 | EXPORT_SYMBOL_GPL(stop_machine_create); | ||
| 138 | |||
| 139 | void stop_machine_destroy(void) | ||
| 140 | { | ||
| 141 | mutex_lock(&setup_lock); | ||
| 142 | refcount--; | ||
| 143 | if (refcount) | ||
| 144 | goto done; | ||
| 145 | destroy_workqueue(stop_machine_wq); | ||
| 146 | free_percpu(stop_machine_work); | ||
| 147 | done: | ||
| 148 | mutex_unlock(&setup_lock); | ||
| 149 | } | ||
| 150 | EXPORT_SYMBOL_GPL(stop_machine_destroy); | ||
| 151 | |||
| 112 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | 152 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
| 113 | { | 153 | { |
| 114 | struct work_struct *sm_work; | 154 | struct work_struct *sm_work; |
| @@ -146,19 +186,14 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
| 146 | { | 186 | { |
| 147 | int ret; | 187 | int ret; |
| 148 | 188 | ||
| 189 | ret = stop_machine_create(); | ||
| 190 | if (ret) | ||
| 191 | return ret; | ||
| 149 | /* No CPUs can come up or down during this. */ | 192 | /* No CPUs can come up or down during this. */ |
| 150 | get_online_cpus(); | 193 | get_online_cpus(); |
| 151 | ret = __stop_machine(fn, data, cpus); | 194 | ret = __stop_machine(fn, data, cpus); |
| 152 | put_online_cpus(); | 195 | put_online_cpus(); |
| 153 | 196 | stop_machine_destroy(); | |
| 154 | return ret; | 197 | return ret; |
| 155 | } | 198 | } |
| 156 | EXPORT_SYMBOL_GPL(stop_machine); | 199 | EXPORT_SYMBOL_GPL(stop_machine); |
| 157 | |||
| 158 | static int __init stop_machine_init(void) | ||
| 159 | { | ||
| 160 | stop_machine_wq = create_rt_workqueue("kstop"); | ||
| 161 | stop_machine_work = alloc_percpu(struct work_struct); | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | core_initcall(stop_machine_init); | ||
diff --git a/kernel/sys.c b/kernel/sys.c index d356d79e84ac..4a43617cd565 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/task_io_accounting_ops.h> | 33 | #include <linux/task_io_accounting_ops.h> |
| 34 | #include <linux/seccomp.h> | 34 | #include <linux/seccomp.h> |
| 35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
| 36 | #include <linux/ptrace.h> | ||
| 36 | 37 | ||
| 37 | #include <linux/compat.h> | 38 | #include <linux/compat.h> |
| 38 | #include <linux/syscalls.h> | 39 | #include <linux/syscalls.h> |
| @@ -927,6 +928,7 @@ asmlinkage long sys_times(struct tms __user * tbuf) | |||
| 927 | if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) | 928 | if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) |
| 928 | return -EFAULT; | 929 | return -EFAULT; |
| 929 | } | 930 | } |
| 931 | force_successful_syscall_return(); | ||
| 930 | return (long) jiffies_64_to_clock_t(get_jiffies_64()); | 932 | return (long) jiffies_64_to_clock_t(get_jiffies_64()); |
| 931 | } | 933 | } |
| 932 | 934 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ff6d45c7626f..92f6e5bc3c24 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -87,10 +87,6 @@ extern int rcutorture_runnable; | |||
| 87 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 87 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
| 88 | 88 | ||
| 89 | /* Constants used for minimum and maximum */ | 89 | /* Constants used for minimum and maximum */ |
| 90 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP) | ||
| 91 | static int one = 1; | ||
| 92 | #endif | ||
| 93 | |||
| 94 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 90 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
| 95 | static int sixty = 60; | 91 | static int sixty = 60; |
| 96 | static int neg_one = -1; | 92 | static int neg_one = -1; |
| @@ -101,6 +97,7 @@ static int two = 2; | |||
| 101 | #endif | 97 | #endif |
| 102 | 98 | ||
| 103 | static int zero; | 99 | static int zero; |
| 100 | static int one = 1; | ||
| 104 | static int one_hundred = 100; | 101 | static int one_hundred = 100; |
| 105 | 102 | ||
| 106 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ | 103 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ |
| @@ -952,12 +949,22 @@ static struct ctl_table vm_table[] = { | |||
| 952 | .data = &dirty_background_ratio, | 949 | .data = &dirty_background_ratio, |
| 953 | .maxlen = sizeof(dirty_background_ratio), | 950 | .maxlen = sizeof(dirty_background_ratio), |
| 954 | .mode = 0644, | 951 | .mode = 0644, |
| 955 | .proc_handler = &proc_dointvec_minmax, | 952 | .proc_handler = &dirty_background_ratio_handler, |
| 956 | .strategy = &sysctl_intvec, | 953 | .strategy = &sysctl_intvec, |
| 957 | .extra1 = &zero, | 954 | .extra1 = &zero, |
| 958 | .extra2 = &one_hundred, | 955 | .extra2 = &one_hundred, |
| 959 | }, | 956 | }, |
| 960 | { | 957 | { |
| 958 | .ctl_name = CTL_UNNUMBERED, | ||
| 959 | .procname = "dirty_background_bytes", | ||
| 960 | .data = &dirty_background_bytes, | ||
| 961 | .maxlen = sizeof(dirty_background_bytes), | ||
| 962 | .mode = 0644, | ||
| 963 | .proc_handler = &dirty_background_bytes_handler, | ||
| 964 | .strategy = &sysctl_intvec, | ||
| 965 | .extra1 = &one, | ||
| 966 | }, | ||
| 967 | { | ||
| 961 | .ctl_name = VM_DIRTY_RATIO, | 968 | .ctl_name = VM_DIRTY_RATIO, |
| 962 | .procname = "dirty_ratio", | 969 | .procname = "dirty_ratio", |
| 963 | .data = &vm_dirty_ratio, | 970 | .data = &vm_dirty_ratio, |
| @@ -969,6 +976,16 @@ static struct ctl_table vm_table[] = { | |||
| 969 | .extra2 = &one_hundred, | 976 | .extra2 = &one_hundred, |
| 970 | }, | 977 | }, |
| 971 | { | 978 | { |
| 979 | .ctl_name = CTL_UNNUMBERED, | ||
| 980 | .procname = "dirty_bytes", | ||
| 981 | .data = &vm_dirty_bytes, | ||
| 982 | .maxlen = sizeof(vm_dirty_bytes), | ||
| 983 | .mode = 0644, | ||
| 984 | .proc_handler = &dirty_bytes_handler, | ||
| 985 | .strategy = &sysctl_intvec, | ||
| 986 | .extra1 = &one, | ||
| 987 | }, | ||
| 988 | { | ||
| 972 | .procname = "dirty_writeback_centisecs", | 989 | .procname = "dirty_writeback_centisecs", |
| 973 | .data = &dirty_writeback_interval, | 990 | .data = &dirty_writeback_interval, |
| 974 | .maxlen = sizeof(dirty_writeback_interval), | 991 | .maxlen = sizeof(dirty_writeback_interval), |
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c index 06b6395b45b2..4f104515a19b 100644 --- a/kernel/test_kprobes.c +++ b/kernel/test_kprobes.c | |||
| @@ -22,21 +22,11 @@ | |||
| 22 | 22 | ||
| 23 | static u32 rand1, preh_val, posth_val, jph_val; | 23 | static u32 rand1, preh_val, posth_val, jph_val; |
| 24 | static int errors, handler_errors, num_tests; | 24 | static int errors, handler_errors, num_tests; |
| 25 | static u32 (*target)(u32 value); | ||
| 26 | static u32 (*target2)(u32 value); | ||
| 25 | 27 | ||
| 26 | static noinline u32 kprobe_target(u32 value) | 28 | static noinline u32 kprobe_target(u32 value) |
| 27 | { | 29 | { |
| 28 | /* | ||
| 29 | * gcc ignores noinline on some architectures unless we stuff | ||
| 30 | * sufficient lard into the function. The get_kprobe() here is | ||
| 31 | * just for that. | ||
| 32 | * | ||
| 33 | * NOTE: We aren't concerned about the correctness of get_kprobe() | ||
| 34 | * here; hence, this call is neither under !preempt nor with the | ||
| 35 | * kprobe_mutex held. This is fine(tm) | ||
| 36 | */ | ||
| 37 | if (get_kprobe((void *)0xdeadbeef)) | ||
| 38 | printk(KERN_INFO "Kprobe smoke test: probe on 0xdeadbeef!\n"); | ||
| 39 | |||
| 40 | return (value / div_factor); | 30 | return (value / div_factor); |
| 41 | } | 31 | } |
| 42 | 32 | ||
| @@ -74,7 +64,7 @@ static int test_kprobe(void) | |||
| 74 | return ret; | 64 | return ret; |
| 75 | } | 65 | } |
| 76 | 66 | ||
| 77 | ret = kprobe_target(rand1); | 67 | ret = target(rand1); |
| 78 | unregister_kprobe(&kp); | 68 | unregister_kprobe(&kp); |
| 79 | 69 | ||
| 80 | if (preh_val == 0) { | 70 | if (preh_val == 0) { |
| @@ -92,6 +82,84 @@ static int test_kprobe(void) | |||
| 92 | return 0; | 82 | return 0; |
| 93 | } | 83 | } |
| 94 | 84 | ||
| 85 | static noinline u32 kprobe_target2(u32 value) | ||
| 86 | { | ||
| 87 | return (value / div_factor) + 1; | ||
| 88 | } | ||
| 89 | |||
| 90 | static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs) | ||
| 91 | { | ||
| 92 | preh_val = (rand1 / div_factor) + 1; | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs, | ||
| 97 | unsigned long flags) | ||
| 98 | { | ||
| 99 | if (preh_val != (rand1 / div_factor) + 1) { | ||
| 100 | handler_errors++; | ||
| 101 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 102 | "incorrect value in post_handler2\n"); | ||
| 103 | } | ||
| 104 | posth_val = preh_val + div_factor; | ||
| 105 | } | ||
| 106 | |||
| 107 | static struct kprobe kp2 = { | ||
| 108 | .symbol_name = "kprobe_target2", | ||
| 109 | .pre_handler = kp_pre_handler2, | ||
| 110 | .post_handler = kp_post_handler2 | ||
| 111 | }; | ||
| 112 | |||
| 113 | static int test_kprobes(void) | ||
| 114 | { | ||
| 115 | int ret; | ||
| 116 | struct kprobe *kps[2] = {&kp, &kp2}; | ||
| 117 | |||
| 118 | kp.addr = 0; /* addr should be cleard for reusing kprobe. */ | ||
| 119 | ret = register_kprobes(kps, 2); | ||
| 120 | if (ret < 0) { | ||
| 121 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 122 | "register_kprobes returned %d\n", ret); | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | preh_val = 0; | ||
| 127 | posth_val = 0; | ||
| 128 | ret = target(rand1); | ||
| 129 | |||
| 130 | if (preh_val == 0) { | ||
| 131 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 132 | "kprobe pre_handler not called\n"); | ||
| 133 | handler_errors++; | ||
| 134 | } | ||
| 135 | |||
| 136 | if (posth_val == 0) { | ||
| 137 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 138 | "kprobe post_handler not called\n"); | ||
| 139 | handler_errors++; | ||
| 140 | } | ||
| 141 | |||
| 142 | preh_val = 0; | ||
| 143 | posth_val = 0; | ||
| 144 | ret = target2(rand1); | ||
| 145 | |||
| 146 | if (preh_val == 0) { | ||
| 147 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 148 | "kprobe pre_handler2 not called\n"); | ||
| 149 | handler_errors++; | ||
| 150 | } | ||
| 151 | |||
| 152 | if (posth_val == 0) { | ||
| 153 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 154 | "kprobe post_handler2 not called\n"); | ||
| 155 | handler_errors++; | ||
| 156 | } | ||
| 157 | |||
| 158 | unregister_kprobes(kps, 2); | ||
| 159 | return 0; | ||
| 160 | |||
| 161 | } | ||
| 162 | |||
| 95 | static u32 j_kprobe_target(u32 value) | 163 | static u32 j_kprobe_target(u32 value) |
| 96 | { | 164 | { |
| 97 | if (value != rand1) { | 165 | if (value != rand1) { |
| @@ -121,7 +189,7 @@ static int test_jprobe(void) | |||
| 121 | return ret; | 189 | return ret; |
| 122 | } | 190 | } |
| 123 | 191 | ||
| 124 | ret = kprobe_target(rand1); | 192 | ret = target(rand1); |
| 125 | unregister_jprobe(&jp); | 193 | unregister_jprobe(&jp); |
| 126 | if (jph_val == 0) { | 194 | if (jph_val == 0) { |
| 127 | printk(KERN_ERR "Kprobe smoke test failed: " | 195 | printk(KERN_ERR "Kprobe smoke test failed: " |
| @@ -132,6 +200,43 @@ static int test_jprobe(void) | |||
| 132 | return 0; | 200 | return 0; |
| 133 | } | 201 | } |
| 134 | 202 | ||
| 203 | static struct jprobe jp2 = { | ||
| 204 | .entry = j_kprobe_target, | ||
| 205 | .kp.symbol_name = "kprobe_target2" | ||
| 206 | }; | ||
| 207 | |||
| 208 | static int test_jprobes(void) | ||
| 209 | { | ||
| 210 | int ret; | ||
| 211 | struct jprobe *jps[2] = {&jp, &jp2}; | ||
| 212 | |||
| 213 | jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */ | ||
| 214 | ret = register_jprobes(jps, 2); | ||
| 215 | if (ret < 0) { | ||
| 216 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 217 | "register_jprobes returned %d\n", ret); | ||
| 218 | return ret; | ||
| 219 | } | ||
| 220 | |||
| 221 | jph_val = 0; | ||
| 222 | ret = target(rand1); | ||
| 223 | if (jph_val == 0) { | ||
| 224 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 225 | "jprobe handler not called\n"); | ||
| 226 | handler_errors++; | ||
| 227 | } | ||
| 228 | |||
| 229 | jph_val = 0; | ||
| 230 | ret = target2(rand1); | ||
| 231 | if (jph_val == 0) { | ||
| 232 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 233 | "jprobe handler2 not called\n"); | ||
| 234 | handler_errors++; | ||
| 235 | } | ||
| 236 | unregister_jprobes(jps, 2); | ||
| 237 | |||
| 238 | return 0; | ||
| 239 | } | ||
| 135 | #ifdef CONFIG_KRETPROBES | 240 | #ifdef CONFIG_KRETPROBES |
| 136 | static u32 krph_val; | 241 | static u32 krph_val; |
| 137 | 242 | ||
| @@ -177,7 +282,7 @@ static int test_kretprobe(void) | |||
| 177 | return ret; | 282 | return ret; |
| 178 | } | 283 | } |
| 179 | 284 | ||
| 180 | ret = kprobe_target(rand1); | 285 | ret = target(rand1); |
| 181 | unregister_kretprobe(&rp); | 286 | unregister_kretprobe(&rp); |
| 182 | if (krph_val != rand1) { | 287 | if (krph_val != rand1) { |
| 183 | printk(KERN_ERR "Kprobe smoke test failed: " | 288 | printk(KERN_ERR "Kprobe smoke test failed: " |
| @@ -187,12 +292,72 @@ static int test_kretprobe(void) | |||
| 187 | 292 | ||
| 188 | return 0; | 293 | return 0; |
| 189 | } | 294 | } |
| 295 | |||
| 296 | static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
| 297 | { | ||
| 298 | unsigned long ret = regs_return_value(regs); | ||
| 299 | |||
| 300 | if (ret != (rand1 / div_factor) + 1) { | ||
| 301 | handler_errors++; | ||
| 302 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 303 | "incorrect value in kretprobe handler2\n"); | ||
| 304 | } | ||
| 305 | if (krph_val == 0) { | ||
| 306 | handler_errors++; | ||
| 307 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 308 | "call to kretprobe entry handler failed\n"); | ||
| 309 | } | ||
| 310 | |||
| 311 | krph_val = rand1; | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | static struct kretprobe rp2 = { | ||
| 316 | .handler = return_handler2, | ||
| 317 | .entry_handler = entry_handler, | ||
| 318 | .kp.symbol_name = "kprobe_target2" | ||
| 319 | }; | ||
| 320 | |||
| 321 | static int test_kretprobes(void) | ||
| 322 | { | ||
| 323 | int ret; | ||
| 324 | struct kretprobe *rps[2] = {&rp, &rp2}; | ||
| 325 | |||
| 326 | rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */ | ||
| 327 | ret = register_kretprobes(rps, 2); | ||
| 328 | if (ret < 0) { | ||
| 329 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 330 | "register_kretprobe returned %d\n", ret); | ||
| 331 | return ret; | ||
| 332 | } | ||
| 333 | |||
| 334 | krph_val = 0; | ||
| 335 | ret = target(rand1); | ||
| 336 | if (krph_val != rand1) { | ||
| 337 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 338 | "kretprobe handler not called\n"); | ||
| 339 | handler_errors++; | ||
| 340 | } | ||
| 341 | |||
| 342 | krph_val = 0; | ||
| 343 | ret = target2(rand1); | ||
| 344 | if (krph_val != rand1) { | ||
| 345 | printk(KERN_ERR "Kprobe smoke test failed: " | ||
| 346 | "kretprobe handler2 not called\n"); | ||
| 347 | handler_errors++; | ||
| 348 | } | ||
| 349 | unregister_kretprobes(rps, 2); | ||
| 350 | return 0; | ||
| 351 | } | ||
| 190 | #endif /* CONFIG_KRETPROBES */ | 352 | #endif /* CONFIG_KRETPROBES */ |
| 191 | 353 | ||
| 192 | int init_test_probes(void) | 354 | int init_test_probes(void) |
| 193 | { | 355 | { |
| 194 | int ret; | 356 | int ret; |
| 195 | 357 | ||
| 358 | target = kprobe_target; | ||
| 359 | target2 = kprobe_target2; | ||
| 360 | |||
| 196 | do { | 361 | do { |
| 197 | rand1 = random32(); | 362 | rand1 = random32(); |
| 198 | } while (rand1 <= div_factor); | 363 | } while (rand1 <= div_factor); |
| @@ -204,15 +369,30 @@ int init_test_probes(void) | |||
| 204 | errors++; | 369 | errors++; |
| 205 | 370 | ||
| 206 | num_tests++; | 371 | num_tests++; |
| 372 | ret = test_kprobes(); | ||
| 373 | if (ret < 0) | ||
| 374 | errors++; | ||
| 375 | |||
| 376 | num_tests++; | ||
| 207 | ret = test_jprobe(); | 377 | ret = test_jprobe(); |
| 208 | if (ret < 0) | 378 | if (ret < 0) |
| 209 | errors++; | 379 | errors++; |
| 210 | 380 | ||
| 381 | num_tests++; | ||
| 382 | ret = test_jprobes(); | ||
| 383 | if (ret < 0) | ||
| 384 | errors++; | ||
| 385 | |||
| 211 | #ifdef CONFIG_KRETPROBES | 386 | #ifdef CONFIG_KRETPROBES |
| 212 | num_tests++; | 387 | num_tests++; |
| 213 | ret = test_kretprobe(); | 388 | ret = test_kretprobe(); |
| 214 | if (ret < 0) | 389 | if (ret < 0) |
| 215 | errors++; | 390 | errors++; |
| 391 | |||
| 392 | num_tests++; | ||
| 393 | ret = test_kretprobes(); | ||
| 394 | if (ret < 0) | ||
| 395 | errors++; | ||
| 216 | #endif /* CONFIG_KRETPROBES */ | 396 | #endif /* CONFIG_KRETPROBES */ |
| 217 | 397 | ||
| 218 | if (errors) | 398 | if (errors) |
diff --git a/kernel/time.c b/kernel/time.c index d63a4336fad6..4886e3ce83a4 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <linux/math64.h> | 39 | #include <linux/math64.h> |
| 40 | #include <linux/ptrace.h> | ||
| 40 | 41 | ||
| 41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
| 42 | #include <asm/unistd.h> | 43 | #include <asm/unistd.h> |
| @@ -65,8 +66,9 @@ asmlinkage long sys_time(time_t __user * tloc) | |||
| 65 | 66 | ||
| 66 | if (tloc) { | 67 | if (tloc) { |
| 67 | if (put_user(i,tloc)) | 68 | if (put_user(i,tloc)) |
| 68 | i = -EFAULT; | 69 | return -EFAULT; |
| 69 | } | 70 | } |
| 71 | force_successful_syscall_return(); | ||
| 70 | return i; | 72 | return i; |
| 71 | } | 73 | } |
| 72 | 74 | ||
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 2dc06ab35716..43f891b05a4b 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
| @@ -92,8 +92,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | |||
| 92 | mm = get_task_mm(p); | 92 | mm = get_task_mm(p); |
| 93 | if (mm) { | 93 | if (mm) { |
| 94 | /* adjust to KB unit */ | 94 | /* adjust to KB unit */ |
| 95 | stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB; | 95 | stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB; |
| 96 | stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; | 96 | stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; |
| 97 | mmput(mm); | 97 | mmput(mm); |
| 98 | } | 98 | } |
| 99 | stats->read_char = p->ioac.rchar; | 99 | stats->read_char = p->ioac.rchar; |
