diff options
Diffstat (limited to 'kernel')
56 files changed, 984 insertions, 627 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 2921d90ce32f..170a9213c1b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -41,6 +41,9 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | |||
| 41 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 41 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
| 42 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 42 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
| 43 | obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o | 43 | obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o |
| 44 | ifneq ($(CONFIG_SMP),y) | ||
| 45 | obj-y += up.o | ||
| 46 | endif | ||
| 44 | obj-$(CONFIG_SMP) += spinlock.o | 47 | obj-$(CONFIG_SMP) += spinlock.o |
| 45 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 48 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
| 46 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 49 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
diff --git a/kernel/acct.c b/kernel/acct.c index d57b7cbb98b6..7afa31564162 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
| @@ -277,7 +277,7 @@ static int acct_on(char *name) | |||
| 277 | * should be written. If the filename is NULL, accounting will be | 277 | * should be written. If the filename is NULL, accounting will be |
| 278 | * shutdown. | 278 | * shutdown. |
| 279 | */ | 279 | */ |
| 280 | asmlinkage long sys_acct(const char __user *name) | 280 | SYSCALL_DEFINE1(acct, const char __user *, name) |
| 281 | { | 281 | { |
| 282 | int error; | 282 | int error; |
| 283 | 283 | ||
diff --git a/kernel/async.c b/kernel/async.c index 64cc916299a5..608b32b42812 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
| @@ -65,6 +65,8 @@ static LIST_HEAD(async_pending); | |||
| 65 | static LIST_HEAD(async_running); | 65 | static LIST_HEAD(async_running); |
| 66 | static DEFINE_SPINLOCK(async_lock); | 66 | static DEFINE_SPINLOCK(async_lock); |
| 67 | 67 | ||
| 68 | static int async_enabled = 0; | ||
| 69 | |||
| 68 | struct async_entry { | 70 | struct async_entry { |
| 69 | struct list_head list; | 71 | struct list_head list; |
| 70 | async_cookie_t cookie; | 72 | async_cookie_t cookie; |
| @@ -88,12 +90,12 @@ extern int initcall_debug; | |||
| 88 | static async_cookie_t __lowest_in_progress(struct list_head *running) | 90 | static async_cookie_t __lowest_in_progress(struct list_head *running) |
| 89 | { | 91 | { |
| 90 | struct async_entry *entry; | 92 | struct async_entry *entry; |
| 91 | if (!list_empty(&async_pending)) { | 93 | if (!list_empty(running)) { |
| 92 | entry = list_first_entry(&async_pending, | 94 | entry = list_first_entry(running, |
| 93 | struct async_entry, list); | 95 | struct async_entry, list); |
| 94 | return entry->cookie; | 96 | return entry->cookie; |
| 95 | } else if (!list_empty(running)) { | 97 | } else if (!list_empty(&async_pending)) { |
| 96 | entry = list_first_entry(running, | 98 | entry = list_first_entry(&async_pending, |
| 97 | struct async_entry, list); | 99 | struct async_entry, list); |
| 98 | return entry->cookie; | 100 | return entry->cookie; |
| 99 | } else { | 101 | } else { |
| @@ -102,6 +104,17 @@ static async_cookie_t __lowest_in_progress(struct list_head *running) | |||
| 102 | } | 104 | } |
| 103 | 105 | ||
| 104 | } | 106 | } |
| 107 | |||
| 108 | static async_cookie_t lowest_in_progress(struct list_head *running) | ||
| 109 | { | ||
| 110 | unsigned long flags; | ||
| 111 | async_cookie_t ret; | ||
| 112 | |||
| 113 | spin_lock_irqsave(&async_lock, flags); | ||
| 114 | ret = __lowest_in_progress(running); | ||
| 115 | spin_unlock_irqrestore(&async_lock, flags); | ||
| 116 | return ret; | ||
| 117 | } | ||
| 105 | /* | 118 | /* |
| 106 | * pick the first pending entry and run it | 119 | * pick the first pending entry and run it |
| 107 | */ | 120 | */ |
| @@ -169,7 +182,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l | |||
| 169 | * If we're out of memory or if there's too much work | 182 | * If we're out of memory or if there's too much work |
| 170 | * pending already, we execute synchronously. | 183 | * pending already, we execute synchronously. |
| 171 | */ | 184 | */ |
| 172 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { | 185 | if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { |
| 173 | kfree(entry); | 186 | kfree(entry); |
| 174 | spin_lock_irqsave(&async_lock, flags); | 187 | spin_lock_irqsave(&async_lock, flags); |
| 175 | newcookie = next_cookie++; | 188 | newcookie = next_cookie++; |
| @@ -227,7 +240,7 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r | |||
| 227 | starttime = ktime_get(); | 240 | starttime = ktime_get(); |
| 228 | } | 241 | } |
| 229 | 242 | ||
| 230 | wait_event(async_done, __lowest_in_progress(running) >= cookie); | 243 | wait_event(async_done, lowest_in_progress(running) >= cookie); |
| 231 | 244 | ||
| 232 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 245 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
| 233 | endtime = ktime_get(); | 246 | endtime = ktime_get(); |
| @@ -316,8 +329,18 @@ static int async_manager_thread(void *unused) | |||
| 316 | 329 | ||
| 317 | static int __init async_init(void) | 330 | static int __init async_init(void) |
| 318 | { | 331 | { |
| 319 | kthread_run(async_manager_thread, NULL, "async/mgr"); | 332 | if (async_enabled) |
| 333 | kthread_run(async_manager_thread, NULL, "async/mgr"); | ||
| 320 | return 0; | 334 | return 0; |
| 321 | } | 335 | } |
| 322 | 336 | ||
| 337 | static int __init setup_async(char *str) | ||
| 338 | { | ||
| 339 | async_enabled = 1; | ||
| 340 | return 1; | ||
| 341 | } | ||
| 342 | |||
| 343 | __setup("fastboot", setup_async); | ||
| 344 | |||
| 345 | |||
| 323 | core_initcall(async_init); | 346 | core_initcall(async_init); |
diff --git a/kernel/capability.c b/kernel/capability.c index 688926e496be..4e17041963f5 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
| @@ -161,7 +161,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | |||
| 161 | * | 161 | * |
| 162 | * Returns 0 on success and < 0 on error. | 162 | * Returns 0 on success and < 0 on error. |
| 163 | */ | 163 | */ |
| 164 | asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | 164 | SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) |
| 165 | { | 165 | { |
| 166 | int ret = 0; | 166 | int ret = 0; |
| 167 | pid_t pid; | 167 | pid_t pid; |
| @@ -235,7 +235,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
| 235 | * | 235 | * |
| 236 | * Returns 0 on success and < 0 on error. | 236 | * Returns 0 on success and < 0 on error. |
| 237 | */ | 237 | */ |
| 238 | asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | 238 | SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) |
| 239 | { | 239 | { |
| 240 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 240 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
| 241 | unsigned i, tocopy; | 241 | unsigned i, tocopy; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c29831076e7a..5a54ff42874e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1115,8 +1115,10 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
| 1115 | } | 1115 | } |
| 1116 | write_unlock(&css_set_lock); | 1116 | write_unlock(&css_set_lock); |
| 1117 | 1117 | ||
| 1118 | list_del(&root->root_list); | 1118 | if (!list_empty(&root->root_list)) { |
| 1119 | root_count--; | 1119 | list_del(&root->root_list); |
| 1120 | root_count--; | ||
| 1121 | } | ||
| 1120 | 1122 | ||
| 1121 | mutex_unlock(&cgroup_mutex); | 1123 | mutex_unlock(&cgroup_mutex); |
| 1122 | 1124 | ||
| @@ -2434,7 +2436,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 2434 | 2436 | ||
| 2435 | err_remove: | 2437 | err_remove: |
| 2436 | 2438 | ||
| 2439 | cgroup_lock_hierarchy(root); | ||
| 2437 | list_del(&cgrp->sibling); | 2440 | list_del(&cgrp->sibling); |
| 2441 | cgroup_unlock_hierarchy(root); | ||
| 2438 | root->number_of_cgroups--; | 2442 | root->number_of_cgroups--; |
| 2439 | 2443 | ||
| 2440 | err_destroy: | 2444 | err_destroy: |
| @@ -2507,7 +2511,7 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) | |||
| 2507 | for_each_subsys(cgrp->root, ss) { | 2511 | for_each_subsys(cgrp->root, ss) { |
| 2508 | struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; | 2512 | struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; |
| 2509 | int refcnt; | 2513 | int refcnt; |
| 2510 | do { | 2514 | while (1) { |
| 2511 | /* We can only remove a CSS with a refcnt==1 */ | 2515 | /* We can only remove a CSS with a refcnt==1 */ |
| 2512 | refcnt = atomic_read(&css->refcnt); | 2516 | refcnt = atomic_read(&css->refcnt); |
| 2513 | if (refcnt > 1) { | 2517 | if (refcnt > 1) { |
| @@ -2521,7 +2525,10 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) | |||
| 2521 | * css_tryget() to spin until we set the | 2525 | * css_tryget() to spin until we set the |
| 2522 | * CSS_REMOVED bits or abort | 2526 | * CSS_REMOVED bits or abort |
| 2523 | */ | 2527 | */ |
| 2524 | } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt); | 2528 | if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt) |
| 2529 | break; | ||
| 2530 | cpu_relax(); | ||
| 2531 | } | ||
| 2525 | } | 2532 | } |
| 2526 | done: | 2533 | done: |
| 2527 | for_each_subsys(cgrp->root, ss) { | 2534 | for_each_subsys(cgrp->root, ss) { |
| @@ -2991,20 +2998,21 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
| 2991 | mutex_unlock(&cgroup_mutex); | 2998 | mutex_unlock(&cgroup_mutex); |
| 2992 | return 0; | 2999 | return 0; |
| 2993 | } | 3000 | } |
| 2994 | task_lock(tsk); | ||
| 2995 | cg = tsk->cgroups; | ||
| 2996 | parent = task_cgroup(tsk, subsys->subsys_id); | ||
| 2997 | 3001 | ||
| 2998 | /* Pin the hierarchy */ | 3002 | /* Pin the hierarchy */ |
| 2999 | if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { | 3003 | if (!atomic_inc_not_zero(&root->sb->s_active)) { |
| 3000 | /* We race with the final deactivate_super() */ | 3004 | /* We race with the final deactivate_super() */ |
| 3001 | mutex_unlock(&cgroup_mutex); | 3005 | mutex_unlock(&cgroup_mutex); |
| 3002 | return 0; | 3006 | return 0; |
| 3003 | } | 3007 | } |
| 3004 | 3008 | ||
| 3005 | /* Keep the cgroup alive */ | 3009 | /* Keep the cgroup alive */ |
| 3010 | task_lock(tsk); | ||
| 3011 | parent = task_cgroup(tsk, subsys->subsys_id); | ||
| 3012 | cg = tsk->cgroups; | ||
| 3006 | get_css_set(cg); | 3013 | get_css_set(cg); |
| 3007 | task_unlock(tsk); | 3014 | task_unlock(tsk); |
| 3015 | |||
| 3008 | mutex_unlock(&cgroup_mutex); | 3016 | mutex_unlock(&cgroup_mutex); |
| 3009 | 3017 | ||
| 3010 | /* Now do the VFS work to create a cgroup */ | 3018 | /* Now do the VFS work to create a cgroup */ |
| @@ -3043,7 +3051,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
| 3043 | mutex_unlock(&inode->i_mutex); | 3051 | mutex_unlock(&inode->i_mutex); |
| 3044 | put_css_set(cg); | 3052 | put_css_set(cg); |
| 3045 | 3053 | ||
| 3046 | deactivate_super(parent->root->sb); | 3054 | deactivate_super(root->sb); |
| 3047 | /* The cgroup is still accessible in the VFS, but | 3055 | /* The cgroup is still accessible in the VFS, but |
| 3048 | * we're not going to try to rmdir() it at this | 3056 | * we're not going to try to rmdir() it at this |
| 3049 | * point. */ | 3057 | * point. */ |
| @@ -3069,7 +3077,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
| 3069 | mutex_lock(&cgroup_mutex); | 3077 | mutex_lock(&cgroup_mutex); |
| 3070 | put_css_set(cg); | 3078 | put_css_set(cg); |
| 3071 | mutex_unlock(&cgroup_mutex); | 3079 | mutex_unlock(&cgroup_mutex); |
| 3072 | deactivate_super(parent->root->sb); | 3080 | deactivate_super(root->sb); |
| 3073 | return ret; | 3081 | return ret; |
| 3074 | } | 3082 | } |
| 3075 | 3083 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 647c77a88fcb..a85678865c5e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -568,7 +568,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
| 568 | * load balancing domains (sched domains) as specified by that partial | 568 | * load balancing domains (sched domains) as specified by that partial |
| 569 | * partition. | 569 | * partition. |
| 570 | * | 570 | * |
| 571 | * See "What is sched_load_balance" in Documentation/cpusets.txt | 571 | * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt |
| 572 | * for a background explanation of this. | 572 | * for a background explanation of this. |
| 573 | * | 573 | * |
| 574 | * Does not return errors, on the theory that the callers of this | 574 | * Does not return errors, on the theory that the callers of this |
diff --git a/kernel/cred.c b/kernel/cred.c index ff7bc071991c..3a039189d707 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
| @@ -372,7 +372,8 @@ int commit_creds(struct cred *new) | |||
| 372 | old->fsuid != new->fsuid || | 372 | old->fsuid != new->fsuid || |
| 373 | old->fsgid != new->fsgid || | 373 | old->fsgid != new->fsgid || |
| 374 | !cap_issubset(new->cap_permitted, old->cap_permitted)) { | 374 | !cap_issubset(new->cap_permitted, old->cap_permitted)) { |
| 375 | set_dumpable(task->mm, suid_dumpable); | 375 | if (task->mm) |
| 376 | set_dumpable(task->mm, suid_dumpable); | ||
| 376 | task->pdeath_signal = 0; | 377 | task->pdeath_signal = 0; |
| 377 | smp_wmb(); | 378 | smp_wmb(); |
| 378 | } | 379 | } |
| @@ -506,6 +507,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
| 506 | else | 507 | else |
| 507 | old = get_cred(&init_cred); | 508 | old = get_cred(&init_cred); |
| 508 | 509 | ||
| 510 | *new = *old; | ||
| 509 | get_uid(new->user); | 511 | get_uid(new->user); |
| 510 | get_group_info(new->group_info); | 512 | get_group_info(new->group_info); |
| 511 | 513 | ||
| @@ -529,6 +531,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
| 529 | 531 | ||
| 530 | error: | 532 | error: |
| 531 | put_cred(new); | 533 | put_cred(new); |
| 534 | put_cred(old); | ||
| 532 | return NULL; | 535 | return NULL; |
| 533 | } | 536 | } |
| 534 | EXPORT_SYMBOL(prepare_kernel_cred); | 537 | EXPORT_SYMBOL(prepare_kernel_cred); |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index 038707404b76..962a3b574f21 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c | |||
| @@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |||
| 98 | * @size: size of requested memory area | 98 | * @size: size of requested memory area |
| 99 | * @dma_handle: This will be filled with the correct dma handle | 99 | * @dma_handle: This will be filled with the correct dma handle |
| 100 | * @ret: This pointer will be filled with the virtual address | 100 | * @ret: This pointer will be filled with the virtual address |
| 101 | * to allocated area. | 101 | * to allocated area. |
| 102 | * | 102 | * |
| 103 | * This function should be only called from per-arch dma_alloc_coherent() | 103 | * This function should be only called from per-arch dma_alloc_coherent() |
| 104 | * to support allocation from per-device coherent memory pools. | 104 | * to support allocation from per-device coherent memory pools. |
| @@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
| 118 | mem = dev->dma_mem; | 118 | mem = dev->dma_mem; |
| 119 | if (!mem) | 119 | if (!mem) |
| 120 | return 0; | 120 | return 0; |
| 121 | if (unlikely(size > mem->size)) | 121 | |
| 122 | return 0; | 122 | *ret = NULL; |
| 123 | |||
| 124 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | ||
| 125 | goto err; | ||
| 123 | 126 | ||
| 124 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | 127 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
| 125 | if (pageno >= 0) { | 128 | if (unlikely(pageno < 0)) |
| 126 | /* | 129 | goto err; |
| 127 | * Memory was found in the per-device arena. | 130 | |
| 128 | */ | 131 | /* |
| 129 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | 132 | * Memory was found in the per-device area. |
| 130 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | 133 | */ |
| 131 | memset(*ret, 0, size); | 134 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
| 132 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { | 135 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); |
| 133 | /* | 136 | memset(*ret, 0, size); |
| 134 | * The per-device arena is exhausted and we are not | 137 | |
| 135 | * permitted to fall back to generic memory. | ||
| 136 | */ | ||
| 137 | *ret = NULL; | ||
| 138 | } else { | ||
| 139 | /* | ||
| 140 | * The per-device arena is exhausted and we are | ||
| 141 | * permitted to fall back to generic memory. | ||
| 142 | */ | ||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | return 1; | 138 | return 1; |
| 139 | |||
| 140 | err: | ||
| 141 | /* | ||
| 142 | * In the case where the allocation can not be satisfied from the | ||
| 143 | * per-device area, try to fall back to generic memory if the | ||
| 144 | * constraints allow it. | ||
| 145 | */ | ||
| 146 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | ||
| 146 | } | 147 | } |
| 147 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 148 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
| 148 | 149 | ||
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index 0511716e9424..667c841c2952 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c | |||
| @@ -209,8 +209,7 @@ static int __init proc_execdomains_init(void) | |||
| 209 | module_init(proc_execdomains_init); | 209 | module_init(proc_execdomains_init); |
| 210 | #endif | 210 | #endif |
| 211 | 211 | ||
| 212 | asmlinkage long | 212 | SYSCALL_DEFINE1(personality, u_long, personality) |
| 213 | sys_personality(u_long personality) | ||
| 214 | { | 213 | { |
| 215 | u_long old = current->personality; | 214 | u_long old = current->personality; |
| 216 | 215 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index c7740fa3252c..f80dec3f1875 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1141,7 +1141,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code) | |||
| 1141 | 1141 | ||
| 1142 | EXPORT_SYMBOL(complete_and_exit); | 1142 | EXPORT_SYMBOL(complete_and_exit); |
| 1143 | 1143 | ||
| 1144 | asmlinkage long sys_exit(int error_code) | 1144 | SYSCALL_DEFINE1(exit, int, error_code) |
| 1145 | { | 1145 | { |
| 1146 | do_exit((error_code&0xff)<<8); | 1146 | do_exit((error_code&0xff)<<8); |
| 1147 | } | 1147 | } |
| @@ -1182,9 +1182,11 @@ do_group_exit(int exit_code) | |||
| 1182 | * wait4()-ing process will get the correct exit code - even if this | 1182 | * wait4()-ing process will get the correct exit code - even if this |
| 1183 | * thread is not the thread group leader. | 1183 | * thread is not the thread group leader. |
| 1184 | */ | 1184 | */ |
| 1185 | asmlinkage void sys_exit_group(int error_code) | 1185 | SYSCALL_DEFINE1(exit_group, int, error_code) |
| 1186 | { | 1186 | { |
| 1187 | do_group_exit((error_code & 0xff) << 8); | 1187 | do_group_exit((error_code & 0xff) << 8); |
| 1188 | /* NOTREACHED */ | ||
| 1189 | return 0; | ||
| 1188 | } | 1190 | } |
| 1189 | 1191 | ||
| 1190 | static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) | 1192 | static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) |
| @@ -1752,9 +1754,8 @@ end: | |||
| 1752 | return retval; | 1754 | return retval; |
| 1753 | } | 1755 | } |
| 1754 | 1756 | ||
| 1755 | asmlinkage long sys_waitid(int which, pid_t upid, | 1757 | SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, |
| 1756 | struct siginfo __user *infop, int options, | 1758 | infop, int, options, struct rusage __user *, ru) |
| 1757 | struct rusage __user *ru) | ||
| 1758 | { | 1759 | { |
| 1759 | struct pid *pid = NULL; | 1760 | struct pid *pid = NULL; |
| 1760 | enum pid_type type; | 1761 | enum pid_type type; |
| @@ -1793,8 +1794,8 @@ asmlinkage long sys_waitid(int which, pid_t upid, | |||
| 1793 | return ret; | 1794 | return ret; |
| 1794 | } | 1795 | } |
| 1795 | 1796 | ||
| 1796 | asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr, | 1797 | SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, |
| 1797 | int options, struct rusage __user *ru) | 1798 | int, options, struct rusage __user *, ru) |
| 1798 | { | 1799 | { |
| 1799 | struct pid *pid = NULL; | 1800 | struct pid *pid = NULL; |
| 1800 | enum pid_type type; | 1801 | enum pid_type type; |
| @@ -1831,7 +1832,7 @@ asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr, | |||
| 1831 | * sys_waitpid() remains for compatibility. waitpid() should be | 1832 | * sys_waitpid() remains for compatibility. waitpid() should be |
| 1832 | * implemented by calling sys_wait4() from libc.a. | 1833 | * implemented by calling sys_wait4() from libc.a. |
| 1833 | */ | 1834 | */ |
| 1834 | asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) | 1835 | SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) |
| 1835 | { | 1836 | { |
| 1836 | return sys_wait4(pid, stat_addr, options, NULL); | 1837 | return sys_wait4(pid, stat_addr, options, NULL); |
| 1837 | } | 1838 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 4018308048cf..242a706e7721 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
| 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
| 818 | { | 818 | { |
| 819 | struct signal_struct *sig; | 819 | struct signal_struct *sig; |
| 820 | int ret; | ||
| 821 | 820 | ||
| 822 | if (clone_flags & CLONE_THREAD) { | 821 | if (clone_flags & CLONE_THREAD) { |
| 823 | ret = thread_group_cputime_clone_thread(current); | 822 | atomic_inc(¤t->signal->count); |
| 824 | if (likely(!ret)) { | 823 | atomic_inc(¤t->signal->live); |
| 825 | atomic_inc(¤t->signal->count); | 824 | return 0; |
| 826 | atomic_inc(¤t->signal->live); | ||
| 827 | } | ||
| 828 | return ret; | ||
| 829 | } | 825 | } |
| 830 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 826 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
| 827 | |||
| 828 | if (sig) | ||
| 829 | posix_cpu_timers_init_group(sig); | ||
| 830 | |||
| 831 | tsk->signal = sig; | 831 | tsk->signal = sig; |
| 832 | if (!sig) | 832 | if (!sig) |
| 833 | return -ENOMEM; | 833 | return -ENOMEM; |
| @@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
| 865 | task_unlock(current->group_leader); | 865 | task_unlock(current->group_leader); |
| 866 | 866 | ||
| 867 | posix_cpu_timers_init_group(sig); | ||
| 868 | |||
| 869 | acct_init_pacct(&sig->pacct); | 867 | acct_init_pacct(&sig->pacct); |
| 870 | 868 | ||
| 871 | tty_audit_fork(sig); | 869 | tty_audit_fork(sig); |
| @@ -901,7 +899,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) | |||
| 901 | clear_freeze_flag(p); | 899 | clear_freeze_flag(p); |
| 902 | } | 900 | } |
| 903 | 901 | ||
| 904 | asmlinkage long sys_set_tid_address(int __user *tidptr) | 902 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
| 905 | { | 903 | { |
| 906 | current->clear_child_tid = tidptr; | 904 | current->clear_child_tid = tidptr; |
| 907 | 905 | ||
| @@ -1481,12 +1479,10 @@ void __init proc_caches_init(void) | |||
| 1481 | fs_cachep = kmem_cache_create("fs_cache", | 1479 | fs_cachep = kmem_cache_create("fs_cache", |
| 1482 | sizeof(struct fs_struct), 0, | 1480 | sizeof(struct fs_struct), 0, |
| 1483 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1481 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 1484 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
| 1485 | sizeof(struct vm_area_struct), 0, | ||
| 1486 | SLAB_PANIC, NULL); | ||
| 1487 | mm_cachep = kmem_cache_create("mm_struct", | 1482 | mm_cachep = kmem_cache_create("mm_struct", |
| 1488 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1483 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
| 1489 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1484 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 1485 | mmap_init(); | ||
| 1490 | } | 1486 | } |
| 1491 | 1487 | ||
| 1492 | /* | 1488 | /* |
| @@ -1605,7 +1601,7 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp | |||
| 1605 | * constructed. Here we are modifying the current, active, | 1601 | * constructed. Here we are modifying the current, active, |
| 1606 | * task_struct. | 1602 | * task_struct. |
| 1607 | */ | 1603 | */ |
| 1608 | asmlinkage long sys_unshare(unsigned long unshare_flags) | 1604 | SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
| 1609 | { | 1605 | { |
| 1610 | int err = 0; | 1606 | int err = 0; |
| 1611 | struct fs_struct *fs, *new_fs = NULL; | 1607 | struct fs_struct *fs, *new_fs = NULL; |
diff --git a/kernel/futex.c b/kernel/futex.c index 002aa189eb09..f89d373a9c6d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1733,9 +1733,8 @@ pi_faulted: | |||
| 1733 | * @head: pointer to the list-head | 1733 | * @head: pointer to the list-head |
| 1734 | * @len: length of the list-head, as userspace expects | 1734 | * @len: length of the list-head, as userspace expects |
| 1735 | */ | 1735 | */ |
| 1736 | asmlinkage long | 1736 | SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, |
| 1737 | sys_set_robust_list(struct robust_list_head __user *head, | 1737 | size_t, len) |
| 1738 | size_t len) | ||
| 1739 | { | 1738 | { |
| 1740 | if (!futex_cmpxchg_enabled) | 1739 | if (!futex_cmpxchg_enabled) |
| 1741 | return -ENOSYS; | 1740 | return -ENOSYS; |
| @@ -1756,9 +1755,9 @@ sys_set_robust_list(struct robust_list_head __user *head, | |||
| 1756 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in | 1755 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in |
| 1757 | * @len_ptr: pointer to a length field, the kernel fills in the header size | 1756 | * @len_ptr: pointer to a length field, the kernel fills in the header size |
| 1758 | */ | 1757 | */ |
| 1759 | asmlinkage long | 1758 | SYSCALL_DEFINE3(get_robust_list, int, pid, |
| 1760 | sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | 1759 | struct robust_list_head __user * __user *, head_ptr, |
| 1761 | size_t __user *len_ptr) | 1760 | size_t __user *, len_ptr) |
| 1762 | { | 1761 | { |
| 1763 | struct robust_list_head __user *head; | 1762 | struct robust_list_head __user *head; |
| 1764 | unsigned long ret; | 1763 | unsigned long ret; |
| @@ -1978,9 +1977,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | |||
| 1978 | } | 1977 | } |
| 1979 | 1978 | ||
| 1980 | 1979 | ||
| 1981 | asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, | 1980 | SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
| 1982 | struct timespec __user *utime, u32 __user *uaddr2, | 1981 | struct timespec __user *, utime, u32 __user *, uaddr2, |
| 1983 | u32 val3) | 1982 | u32, val3) |
| 1984 | { | 1983 | { |
| 1985 | struct timespec ts; | 1984 | struct timespec ts; |
| 1986 | ktime_t t, *tp = NULL; | 1985 | ktime_t t, *tp = NULL; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1455b7651b6b..f394d2a42ca3 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
| 501 | continue; | 501 | continue; |
| 502 | timer = rb_entry(base->first, struct hrtimer, node); | 502 | timer = rb_entry(base->first, struct hrtimer, node); |
| 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
| 504 | /* | ||
| 505 | * clock_was_set() has changed base->offset so the | ||
| 506 | * result might be negative. Fix it up to prevent a | ||
| 507 | * false positive in clockevents_program_event() | ||
| 508 | */ | ||
| 509 | if (expires.tv64 < 0) | ||
| 510 | expires.tv64 = 0; | ||
| 504 | if (expires.tv64 < cpu_base->expires_next.tv64) | 511 | if (expires.tv64 < cpu_base->expires_next.tv64) |
| 505 | cpu_base->expires_next = expires; | 512 | cpu_base->expires_next = expires; |
| 506 | } | 513 | } |
| @@ -614,7 +621,9 @@ void clock_was_set(void) | |||
| 614 | */ | 621 | */ |
| 615 | void hres_timers_resume(void) | 622 | void hres_timers_resume(void) |
| 616 | { | 623 | { |
| 617 | /* Retrigger the CPU local events: */ | 624 | WARN_ONCE(!irqs_disabled(), |
| 625 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
| 626 | |||
| 618 | retrigger_next_event(NULL); | 627 | retrigger_next_event(NULL); |
| 619 | } | 628 | } |
| 620 | 629 | ||
| @@ -1156,6 +1165,29 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
| 1156 | 1165 | ||
| 1157 | #ifdef CONFIG_HIGH_RES_TIMERS | 1166 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1158 | 1167 | ||
| 1168 | static int force_clock_reprogram; | ||
| 1169 | |||
| 1170 | /* | ||
| 1171 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
| 1172 | * is hanging, which could happen with something that slows the interrupt | ||
| 1173 | * such as the tracing. Then we force the clock reprogramming for each future | ||
| 1174 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
| 1175 | * threshold that we will overwrite. | ||
| 1176 | * The next tick event will be scheduled to 3 times we currently spend on | ||
| 1177 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
| 1178 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
| 1179 | * let it running without serious starvation. | ||
| 1180 | */ | ||
| 1181 | |||
| 1182 | static inline void | ||
| 1183 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
| 1184 | ktime_t try_time) | ||
| 1185 | { | ||
| 1186 | force_clock_reprogram = 1; | ||
| 1187 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
| 1188 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
| 1189 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
| 1190 | } | ||
| 1159 | /* | 1191 | /* |
| 1160 | * High resolution timer interrupt | 1192 | * High resolution timer interrupt |
| 1161 | * Called with interrupts disabled | 1193 | * Called with interrupts disabled |
| @@ -1165,6 +1197,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1165 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1197 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1166 | struct hrtimer_clock_base *base; | 1198 | struct hrtimer_clock_base *base; |
| 1167 | ktime_t expires_next, now; | 1199 | ktime_t expires_next, now; |
| 1200 | int nr_retries = 0; | ||
| 1168 | int i; | 1201 | int i; |
| 1169 | 1202 | ||
| 1170 | BUG_ON(!cpu_base->hres_active); | 1203 | BUG_ON(!cpu_base->hres_active); |
| @@ -1172,6 +1205,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1172 | dev->next_event.tv64 = KTIME_MAX; | 1205 | dev->next_event.tv64 = KTIME_MAX; |
| 1173 | 1206 | ||
| 1174 | retry: | 1207 | retry: |
| 1208 | /* 5 retries is enough to notice a hang */ | ||
| 1209 | if (!(++nr_retries % 5)) | ||
| 1210 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
| 1211 | |||
| 1175 | now = ktime_get(); | 1212 | now = ktime_get(); |
| 1176 | 1213 | ||
| 1177 | expires_next.tv64 = KTIME_MAX; | 1214 | expires_next.tv64 = KTIME_MAX; |
| @@ -1224,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1224 | 1261 | ||
| 1225 | /* Reprogramming necessary ? */ | 1262 | /* Reprogramming necessary ? */ |
| 1226 | if (expires_next.tv64 != KTIME_MAX) { | 1263 | if (expires_next.tv64 != KTIME_MAX) { |
| 1227 | if (tick_program_event(expires_next, 0)) | 1264 | if (tick_program_event(expires_next, force_clock_reprogram)) |
| 1228 | goto retry; | 1265 | goto retry; |
| 1229 | } | 1266 | } |
| 1230 | } | 1267 | } |
| @@ -1467,8 +1504,8 @@ out: | |||
| 1467 | return ret; | 1504 | return ret; |
| 1468 | } | 1505 | } |
| 1469 | 1506 | ||
| 1470 | asmlinkage long | 1507 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, |
| 1471 | sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | 1508 | struct timespec __user *, rmtp) |
| 1472 | { | 1509 | { |
| 1473 | struct timespec tu; | 1510 | struct timespec tu; |
| 1474 | 1511 | ||
| @@ -1578,6 +1615,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
| 1578 | break; | 1615 | break; |
| 1579 | 1616 | ||
| 1580 | #ifdef CONFIG_HOTPLUG_CPU | 1617 | #ifdef CONFIG_HOTPLUG_CPU |
| 1618 | case CPU_DYING: | ||
| 1619 | case CPU_DYING_FROZEN: | ||
| 1620 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | ||
| 1621 | break; | ||
| 1581 | case CPU_DEAD: | 1622 | case CPU_DEAD: |
| 1582 | case CPU_DEAD_FROZEN: | 1623 | case CPU_DEAD_FROZEN: |
| 1583 | { | 1624 | { |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f63c706d25e1..7de11bd64dfe 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -383,6 +383,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 383 | out_unlock: | 383 | out_unlock: |
| 384 | spin_unlock(&desc->lock); | 384 | spin_unlock(&desc->lock); |
| 385 | } | 385 | } |
| 386 | EXPORT_SYMBOL_GPL(handle_level_irq); | ||
| 386 | 387 | ||
| 387 | /** | 388 | /** |
| 388 | * handle_fasteoi_irq - irq handler for transparent controllers | 389 | * handle_fasteoi_irq - irq handler for transparent controllers |
| @@ -593,6 +594,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 593 | } | 594 | } |
| 594 | spin_unlock_irqrestore(&desc->lock, flags); | 595 | spin_unlock_irqrestore(&desc->lock, flags); |
| 595 | } | 596 | } |
| 597 | EXPORT_SYMBOL_GPL(__set_irq_handler); | ||
| 596 | 598 | ||
| 597 | void | 599 | void |
| 598 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 600 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c20db0be9173..3aba8d12f328 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
| 39 | ack_bad_irq(irq); | 39 | ack_bad_irq(irq); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
| 43 | static void __init init_irq_default_affinity(void) | ||
| 44 | { | ||
| 45 | alloc_bootmem_cpumask_var(&irq_default_affinity); | ||
| 46 | cpumask_setall(irq_default_affinity); | ||
| 47 | } | ||
| 48 | #else | ||
| 49 | static void __init init_irq_default_affinity(void) | ||
| 50 | { | ||
| 51 | } | ||
| 52 | #endif | ||
| 53 | |||
| 42 | /* | 54 | /* |
| 43 | * Linux has a controller-independent interrupt architecture. | 55 | * Linux has a controller-independent interrupt architecture. |
| 44 | * Every controller has a 'controller-template', that is used | 56 | * Every controller has a 'controller-template', that is used |
| @@ -134,6 +146,8 @@ int __init early_irq_init(void) | |||
| 134 | int legacy_count; | 146 | int legacy_count; |
| 135 | int i; | 147 | int i; |
| 136 | 148 | ||
| 149 | init_irq_default_affinity(); | ||
| 150 | |||
| 137 | desc = irq_desc_legacy; | 151 | desc = irq_desc_legacy; |
| 138 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 152 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
| 139 | 153 | ||
| @@ -219,6 +233,8 @@ int __init early_irq_init(void) | |||
| 219 | int count; | 233 | int count; |
| 220 | int i; | 234 | int i; |
| 221 | 235 | ||
| 236 | init_irq_default_affinity(); | ||
| 237 | |||
| 222 | desc = irq_desc; | 238 | desc = irq_desc; |
| 223 | count = ARRAY_SIZE(irq_desc); | 239 | count = ARRAY_SIZE(irq_desc); |
| 224 | 240 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cd0cd8dcb345..291f03664552 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -15,17 +15,9 @@ | |||
| 15 | 15 | ||
| 16 | #include "internals.h" | 16 | #include "internals.h" |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_SMP | 18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
| 19 | cpumask_var_t irq_default_affinity; | 19 | cpumask_var_t irq_default_affinity; |
| 20 | 20 | ||
| 21 | static int init_irq_default_affinity(void) | ||
| 22 | { | ||
| 23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
| 24 | cpumask_setall(irq_default_affinity); | ||
| 25 | return 0; | ||
| 26 | } | ||
| 27 | core_initcall(init_irq_default_affinity); | ||
| 28 | |||
| 29 | /** | 21 | /** |
| 30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| 31 | * @irq: interrupt number to wait for | 23 | * @irq: interrupt number to wait for |
diff --git a/kernel/itimer.c b/kernel/itimer.c index db7c358b9a02..6a5fe93dd8bd 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
| @@ -100,7 +100,7 @@ int do_getitimer(int which, struct itimerval *value) | |||
| 100 | return 0; | 100 | return 0; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | asmlinkage long sys_getitimer(int which, struct itimerval __user *value) | 103 | SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value) |
| 104 | { | 104 | { |
| 105 | int error = -EFAULT; | 105 | int error = -EFAULT; |
| 106 | struct itimerval get_buffer; | 106 | struct itimerval get_buffer; |
| @@ -260,9 +260,8 @@ unsigned int alarm_setitimer(unsigned int seconds) | |||
| 260 | return it_old.it_value.tv_sec; | 260 | return it_old.it_value.tv_sec; |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | asmlinkage long sys_setitimer(int which, | 263 | SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value, |
| 264 | struct itimerval __user *value, | 264 | struct itimerval __user *, ovalue) |
| 265 | struct itimerval __user *ovalue) | ||
| 266 | { | 265 | { |
| 267 | struct itimerval set_buffer, get_buffer; | 266 | struct itimerval set_buffer, get_buffer; |
| 268 | int error; | 267 | int error; |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index e694afa0eb8c..7b8b0f21a5b1 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
| @@ -30,19 +30,20 @@ | |||
| 30 | #define all_var 0 | 30 | #define all_var 0 |
| 31 | #endif | 31 | #endif |
| 32 | 32 | ||
| 33 | extern const unsigned long kallsyms_addresses[]; | 33 | /* These will be re-linked against their real values during the second link stage */ |
| 34 | extern const u8 kallsyms_names[]; | 34 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); |
| 35 | extern const u8 kallsyms_names[] __attribute__((weak)); | ||
| 35 | 36 | ||
| 36 | /* tell the compiler that the count isn't in the small data section if the arch | 37 | /* tell the compiler that the count isn't in the small data section if the arch |
| 37 | * has one (eg: FRV) | 38 | * has one (eg: FRV) |
| 38 | */ | 39 | */ |
| 39 | extern const unsigned long kallsyms_num_syms | 40 | extern const unsigned long kallsyms_num_syms |
| 40 | __attribute__((__section__(".rodata"))); | 41 | __attribute__((weak, section(".rodata"))); |
| 41 | 42 | ||
| 42 | extern const u8 kallsyms_token_table[]; | 43 | extern const u8 kallsyms_token_table[] __attribute__((weak)); |
| 43 | extern const u16 kallsyms_token_index[]; | 44 | extern const u16 kallsyms_token_index[] __attribute__((weak)); |
| 44 | 45 | ||
| 45 | extern const unsigned long kallsyms_markers[]; | 46 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); |
| 46 | 47 | ||
| 47 | static inline int is_kernel_inittext(unsigned long addr) | 48 | static inline int is_kernel_inittext(unsigned long addr) |
| 48 | { | 49 | { |
| @@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr, | |||
| 167 | unsigned long symbol_start = 0, symbol_end = 0; | 168 | unsigned long symbol_start = 0, symbol_end = 0; |
| 168 | unsigned long i, low, high, mid; | 169 | unsigned long i, low, high, mid; |
| 169 | 170 | ||
| 171 | /* This kernel should never had been booted. */ | ||
| 172 | BUG_ON(!kallsyms_addresses); | ||
| 173 | |||
| 170 | /* do a binary search on the sorted kallsyms_addresses array */ | 174 | /* do a binary search on the sorted kallsyms_addresses array */ |
| 171 | low = 0; | 175 | low = 0; |
| 172 | high = kallsyms_num_syms; | 176 | high = kallsyms_num_syms; |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 3fb855ad6aa0..8a6d7b08864e 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -934,9 +934,8 @@ struct kimage *kexec_crash_image; | |||
| 934 | 934 | ||
| 935 | static DEFINE_MUTEX(kexec_mutex); | 935 | static DEFINE_MUTEX(kexec_mutex); |
| 936 | 936 | ||
| 937 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | 937 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
| 938 | struct kexec_segment __user *segments, | 938 | struct kexec_segment __user *, segments, unsigned long, flags) |
| 939 | unsigned long flags) | ||
| 940 | { | 939 | { |
| 941 | struct kimage **dest_image, *image; | 940 | struct kimage **dest_image, *image; |
| 942 | int result; | 941 | int result; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1b9cbdc0127a..7ba8cd9845cb 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -123,7 +123,7 @@ static int collect_garbage_slots(void); | |||
| 123 | static int __kprobes check_safety(void) | 123 | static int __kprobes check_safety(void) |
| 124 | { | 124 | { |
| 125 | int ret = 0; | 125 | int ret = 0; |
| 126 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) | 126 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) |
| 127 | ret = freeze_processes(); | 127 | ret = freeze_processes(); |
| 128 | if (ret == 0) { | 128 | if (ret == 0) { |
| 129 | struct task_struct *p, *q; | 129 | struct task_struct *p, *q; |
diff --git a/kernel/module.c b/kernel/module.c index c9332c90d5a0..e8b51d41dd72 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -743,8 +743,8 @@ static void wait_for_zero_refcount(struct module *mod) | |||
| 743 | mutex_lock(&module_mutex); | 743 | mutex_lock(&module_mutex); |
| 744 | } | 744 | } |
| 745 | 745 | ||
| 746 | asmlinkage long | 746 | SYSCALL_DEFINE2(delete_module, const char __user *, name_user, |
| 747 | sys_delete_module(const char __user *name_user, unsigned int flags) | 747 | unsigned int, flags) |
| 748 | { | 748 | { |
| 749 | struct module *mod; | 749 | struct module *mod; |
| 750 | char name[MODULE_NAME_LEN]; | 750 | char name[MODULE_NAME_LEN]; |
| @@ -2296,10 +2296,8 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2296 | } | 2296 | } |
| 2297 | 2297 | ||
| 2298 | /* This is where the real work happens */ | 2298 | /* This is where the real work happens */ |
| 2299 | asmlinkage long | 2299 | SYSCALL_DEFINE3(init_module, void __user *, umod, |
| 2300 | sys_init_module(void __user *umod, | 2300 | unsigned long, len, const char __user *, uargs) |
| 2301 | unsigned long len, | ||
| 2302 | const char __user *uargs) | ||
| 2303 | { | 2301 | { |
| 2304 | struct module *mod; | 2302 | struct module *mod; |
| 2305 | int ret = 0; | 2303 | int ret = 0; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a47832..fa07da94d7be 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -10,76 +10,6 @@ | |||
| 10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * Allocate the thread_group_cputime structure appropriately and fill in the | ||
| 14 | * current values of the fields. Called from copy_signal() via | ||
| 15 | * thread_group_cputime_clone_thread() when adding a second or subsequent | ||
| 16 | * thread to a thread group. Assumes interrupts are enabled when called. | ||
| 17 | */ | ||
| 18 | int thread_group_cputime_alloc(struct task_struct *tsk) | ||
| 19 | { | ||
| 20 | struct signal_struct *sig = tsk->signal; | ||
| 21 | struct task_cputime *cputime; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * If we have multiple threads and we don't already have a | ||
| 25 | * per-CPU task_cputime struct (checked in the caller), allocate | ||
| 26 | * one and fill it in with the times accumulated so far. We may | ||
| 27 | * race with another thread so recheck after we pick up the sighand | ||
| 28 | * lock. | ||
| 29 | */ | ||
| 30 | cputime = alloc_percpu(struct task_cputime); | ||
| 31 | if (cputime == NULL) | ||
| 32 | return -ENOMEM; | ||
| 33 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 34 | if (sig->cputime.totals) { | ||
| 35 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 36 | free_percpu(cputime); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | sig->cputime.totals = cputime; | ||
| 40 | cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); | ||
| 41 | cputime->utime = tsk->utime; | ||
| 42 | cputime->stime = tsk->stime; | ||
| 43 | cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
| 44 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | /** | ||
| 49 | * thread_group_cputime - Sum the thread group time fields across all CPUs. | ||
| 50 | * | ||
| 51 | * @tsk: The task we use to identify the thread group. | ||
| 52 | * @times: task_cputime structure in which we return the summed fields. | ||
| 53 | * | ||
| 54 | * Walk the list of CPUs to sum the per-CPU time fields in the thread group | ||
| 55 | * time structure. | ||
| 56 | */ | ||
| 57 | void thread_group_cputime( | ||
| 58 | struct task_struct *tsk, | ||
| 59 | struct task_cputime *times) | ||
| 60 | { | ||
| 61 | struct task_cputime *totals, *tot; | ||
| 62 | int i; | ||
| 63 | |||
| 64 | totals = tsk->signal->cputime.totals; | ||
| 65 | if (!totals) { | ||
| 66 | times->utime = tsk->utime; | ||
| 67 | times->stime = tsk->stime; | ||
| 68 | times->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
| 69 | return; | ||
| 70 | } | ||
| 71 | |||
| 72 | times->stime = times->utime = cputime_zero; | ||
| 73 | times->sum_exec_runtime = 0; | ||
| 74 | for_each_possible_cpu(i) { | ||
| 75 | tot = per_cpu_ptr(totals, i); | ||
| 76 | times->utime = cputime_add(times->utime, tot->utime); | ||
| 77 | times->stime = cputime_add(times->stime, tot->stime); | ||
| 78 | times->sum_exec_runtime += tot->sum_exec_runtime; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | /* | ||
| 83 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
| 84 | */ | 14 | */ |
| 85 | void update_rlimit_cpu(unsigned long rlim_new) | 15 | void update_rlimit_cpu(unsigned long rlim_new) |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 887c63787de6..052ec4d195c7 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
| @@ -477,10 +477,9 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
| 477 | 477 | ||
| 478 | /* Create a POSIX.1b interval timer. */ | 478 | /* Create a POSIX.1b interval timer. */ |
| 479 | 479 | ||
| 480 | asmlinkage long | 480 | SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, |
| 481 | sys_timer_create(const clockid_t which_clock, | 481 | struct sigevent __user *, timer_event_spec, |
| 482 | struct sigevent __user *timer_event_spec, | 482 | timer_t __user *, created_timer_id) |
| 483 | timer_t __user * created_timer_id) | ||
| 484 | { | 483 | { |
| 485 | struct k_itimer *new_timer; | 484 | struct k_itimer *new_timer; |
| 486 | int error, new_timer_id; | 485 | int error, new_timer_id; |
| @@ -661,8 +660,8 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
| 661 | } | 660 | } |
| 662 | 661 | ||
| 663 | /* Get the time remaining on a POSIX.1b interval timer. */ | 662 | /* Get the time remaining on a POSIX.1b interval timer. */ |
| 664 | asmlinkage long | 663 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, |
| 665 | sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) | 664 | struct itimerspec __user *, setting) |
| 666 | { | 665 | { |
| 667 | struct k_itimer *timr; | 666 | struct k_itimer *timr; |
| 668 | struct itimerspec cur_setting; | 667 | struct itimerspec cur_setting; |
| @@ -691,8 +690,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) | |||
| 691 | * the call back to do_schedule_next_timer(). So all we need to do is | 690 | * the call back to do_schedule_next_timer(). So all we need to do is |
| 692 | * to pick up the frozen overrun. | 691 | * to pick up the frozen overrun. |
| 693 | */ | 692 | */ |
| 694 | asmlinkage long | 693 | SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) |
| 695 | sys_timer_getoverrun(timer_t timer_id) | ||
| 696 | { | 694 | { |
| 697 | struct k_itimer *timr; | 695 | struct k_itimer *timr; |
| 698 | int overrun; | 696 | int overrun; |
| @@ -760,10 +758,9 @@ common_timer_set(struct k_itimer *timr, int flags, | |||
| 760 | } | 758 | } |
| 761 | 759 | ||
| 762 | /* Set a POSIX.1b interval timer */ | 760 | /* Set a POSIX.1b interval timer */ |
| 763 | asmlinkage long | 761 | SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, |
| 764 | sys_timer_settime(timer_t timer_id, int flags, | 762 | const struct itimerspec __user *, new_setting, |
| 765 | const struct itimerspec __user *new_setting, | 763 | struct itimerspec __user *, old_setting) |
| 766 | struct itimerspec __user *old_setting) | ||
| 767 | { | 764 | { |
| 768 | struct k_itimer *timr; | 765 | struct k_itimer *timr; |
| 769 | struct itimerspec new_spec, old_spec; | 766 | struct itimerspec new_spec, old_spec; |
| @@ -816,8 +813,7 @@ static inline int timer_delete_hook(struct k_itimer *timer) | |||
| 816 | } | 813 | } |
| 817 | 814 | ||
| 818 | /* Delete a POSIX.1b interval timer. */ | 815 | /* Delete a POSIX.1b interval timer. */ |
| 819 | asmlinkage long | 816 | SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) |
| 820 | sys_timer_delete(timer_t timer_id) | ||
| 821 | { | 817 | { |
| 822 | struct k_itimer *timer; | 818 | struct k_itimer *timer; |
| 823 | unsigned long flags; | 819 | unsigned long flags; |
| @@ -903,8 +899,8 @@ int do_posix_clock_nonanosleep(const clockid_t clock, int flags, | |||
| 903 | } | 899 | } |
| 904 | EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); | 900 | EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); |
| 905 | 901 | ||
| 906 | asmlinkage long sys_clock_settime(const clockid_t which_clock, | 902 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, |
| 907 | const struct timespec __user *tp) | 903 | const struct timespec __user *, tp) |
| 908 | { | 904 | { |
| 909 | struct timespec new_tp; | 905 | struct timespec new_tp; |
| 910 | 906 | ||
| @@ -916,8 +912,8 @@ asmlinkage long sys_clock_settime(const clockid_t which_clock, | |||
| 916 | return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); | 912 | return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); |
| 917 | } | 913 | } |
| 918 | 914 | ||
| 919 | asmlinkage long | 915 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, |
| 920 | sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) | 916 | struct timespec __user *,tp) |
| 921 | { | 917 | { |
| 922 | struct timespec kernel_tp; | 918 | struct timespec kernel_tp; |
| 923 | int error; | 919 | int error; |
| @@ -933,8 +929,8 @@ sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) | |||
| 933 | 929 | ||
| 934 | } | 930 | } |
| 935 | 931 | ||
| 936 | asmlinkage long | 932 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, |
| 937 | sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp) | 933 | struct timespec __user *, tp) |
| 938 | { | 934 | { |
| 939 | struct timespec rtn_tp; | 935 | struct timespec rtn_tp; |
| 940 | int error; | 936 | int error; |
| @@ -963,10 +959,9 @@ static int common_nsleep(const clockid_t which_clock, int flags, | |||
| 963 | which_clock); | 959 | which_clock); |
| 964 | } | 960 | } |
| 965 | 961 | ||
| 966 | asmlinkage long | 962 | SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, |
| 967 | sys_clock_nanosleep(const clockid_t which_clock, int flags, | 963 | const struct timespec __user *, rqtp, |
| 968 | const struct timespec __user *rqtp, | 964 | struct timespec __user *, rmtp) |
| 969 | struct timespec __user *rmtp) | ||
| 970 | { | 965 | { |
| 971 | struct timespec t; | 966 | struct timespec t; |
| 972 | 967 | ||
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 597823b5b700..d7a10167a25b 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
| @@ -4,7 +4,8 @@ EXTRA_CFLAGS += -DDEBUG | |||
| 4 | endif | 4 | endif |
| 5 | 5 | ||
| 6 | obj-y := main.o | 6 | obj-y := main.o |
| 7 | obj-$(CONFIG_PM_SLEEP) += process.o console.o | 7 | obj-$(CONFIG_PM_SLEEP) += console.o |
| 8 | obj-$(CONFIG_FREEZER) += process.o | ||
| 8 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o | 9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o |
| 9 | 10 | ||
| 10 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 11 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index f77d3819ef57..432ee575c9ee 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -71,6 +71,14 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops) | |||
| 71 | mutex_unlock(&pm_mutex); | 71 | mutex_unlock(&pm_mutex); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static bool entering_platform_hibernation; | ||
| 75 | |||
| 76 | bool system_entering_hibernation(void) | ||
| 77 | { | ||
| 78 | return entering_platform_hibernation; | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL(system_entering_hibernation); | ||
| 81 | |||
| 74 | #ifdef CONFIG_PM_DEBUG | 82 | #ifdef CONFIG_PM_DEBUG |
| 75 | static void hibernation_debug_sleep(void) | 83 | static void hibernation_debug_sleep(void) |
| 76 | { | 84 | { |
| @@ -258,12 +266,12 @@ int hibernation_snapshot(int platform_mode) | |||
| 258 | { | 266 | { |
| 259 | int error; | 267 | int error; |
| 260 | 268 | ||
| 261 | /* Free memory before shutting down devices. */ | 269 | error = platform_begin(platform_mode); |
| 262 | error = swsusp_shrink_memory(); | ||
| 263 | if (error) | 270 | if (error) |
| 264 | return error; | 271 | return error; |
| 265 | 272 | ||
| 266 | error = platform_begin(platform_mode); | 273 | /* Free memory before shutting down devices. */ |
| 274 | error = swsusp_shrink_memory(); | ||
| 267 | if (error) | 275 | if (error) |
| 268 | goto Close; | 276 | goto Close; |
| 269 | 277 | ||
| @@ -411,6 +419,7 @@ int hibernation_platform_enter(void) | |||
| 411 | if (error) | 419 | if (error) |
| 412 | goto Close; | 420 | goto Close; |
| 413 | 421 | ||
| 422 | entering_platform_hibernation = true; | ||
| 414 | suspend_console(); | 423 | suspend_console(); |
| 415 | error = device_suspend(PMSG_HIBERNATE); | 424 | error = device_suspend(PMSG_HIBERNATE); |
| 416 | if (error) { | 425 | if (error) { |
| @@ -445,6 +454,7 @@ int hibernation_platform_enter(void) | |||
| 445 | Finish: | 454 | Finish: |
| 446 | hibernation_ops->finish(); | 455 | hibernation_ops->finish(); |
| 447 | Resume_devices: | 456 | Resume_devices: |
| 457 | entering_platform_hibernation = false; | ||
| 448 | device_resume(PMSG_RESTORE); | 458 | device_resume(PMSG_RESTORE); |
| 449 | resume_console(); | 459 | resume_console(); |
| 450 | Close: | 460 | Close: |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5d2ab836e998..f5fc2d7680f2 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
| 26 | #include <linux/console.h> | 26 | #include <linux/console.h> |
| 27 | #include <linux/highmem.h> | 27 | #include <linux/highmem.h> |
| 28 | #include <linux/list.h> | ||
| 28 | 29 | ||
| 29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
| 30 | #include <asm/mmu_context.h> | 31 | #include <asm/mmu_context.h> |
| @@ -192,12 +193,6 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) | |||
| 192 | return ret; | 193 | return ret; |
| 193 | } | 194 | } |
| 194 | 195 | ||
| 195 | static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | ||
| 196 | { | ||
| 197 | free_list_of_pages(ca->chain, clear_page_nosave); | ||
| 198 | memset(ca, 0, sizeof(struct chain_allocator)); | ||
| 199 | } | ||
| 200 | |||
| 201 | /** | 196 | /** |
| 202 | * Data types related to memory bitmaps. | 197 | * Data types related to memory bitmaps. |
| 203 | * | 198 | * |
| @@ -233,7 +228,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | |||
| 233 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) | 228 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) |
| 234 | 229 | ||
| 235 | struct bm_block { | 230 | struct bm_block { |
| 236 | struct bm_block *next; /* next element of the list */ | 231 | struct list_head hook; /* hook into a list of bitmap blocks */ |
| 237 | unsigned long start_pfn; /* pfn represented by the first bit */ | 232 | unsigned long start_pfn; /* pfn represented by the first bit */ |
| 238 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ | 233 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ |
| 239 | unsigned long *data; /* bitmap representing pages */ | 234 | unsigned long *data; /* bitmap representing pages */ |
| @@ -244,24 +239,15 @@ static inline unsigned long bm_block_bits(struct bm_block *bb) | |||
| 244 | return bb->end_pfn - bb->start_pfn; | 239 | return bb->end_pfn - bb->start_pfn; |
| 245 | } | 240 | } |
| 246 | 241 | ||
| 247 | struct zone_bitmap { | ||
| 248 | struct zone_bitmap *next; /* next element of the list */ | ||
| 249 | unsigned long start_pfn; /* minimal pfn in this zone */ | ||
| 250 | unsigned long end_pfn; /* maximal pfn in this zone plus 1 */ | ||
| 251 | struct bm_block *bm_blocks; /* list of bitmap blocks */ | ||
| 252 | struct bm_block *cur_block; /* recently used bitmap block */ | ||
| 253 | }; | ||
| 254 | |||
| 255 | /* strcut bm_position is used for browsing memory bitmaps */ | 242 | /* strcut bm_position is used for browsing memory bitmaps */ |
| 256 | 243 | ||
| 257 | struct bm_position { | 244 | struct bm_position { |
| 258 | struct zone_bitmap *zone_bm; | ||
| 259 | struct bm_block *block; | 245 | struct bm_block *block; |
| 260 | int bit; | 246 | int bit; |
| 261 | }; | 247 | }; |
| 262 | 248 | ||
| 263 | struct memory_bitmap { | 249 | struct memory_bitmap { |
| 264 | struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ | 250 | struct list_head blocks; /* list of bitmap blocks */ |
| 265 | struct linked_page *p_list; /* list of pages used to store zone | 251 | struct linked_page *p_list; /* list of pages used to store zone |
| 266 | * bitmap objects and bitmap block | 252 | * bitmap objects and bitmap block |
| 267 | * objects | 253 | * objects |
| @@ -273,11 +259,7 @@ struct memory_bitmap { | |||
| 273 | 259 | ||
| 274 | static void memory_bm_position_reset(struct memory_bitmap *bm) | 260 | static void memory_bm_position_reset(struct memory_bitmap *bm) |
| 275 | { | 261 | { |
| 276 | struct zone_bitmap *zone_bm; | 262 | bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); |
| 277 | |||
| 278 | zone_bm = bm->zone_bm_list; | ||
| 279 | bm->cur.zone_bm = zone_bm; | ||
| 280 | bm->cur.block = zone_bm->bm_blocks; | ||
| 281 | bm->cur.bit = 0; | 263 | bm->cur.bit = 0; |
| 282 | } | 264 | } |
| 283 | 265 | ||
| @@ -285,151 +267,184 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); | |||
| 285 | 267 | ||
| 286 | /** | 268 | /** |
| 287 | * create_bm_block_list - create a list of block bitmap objects | 269 | * create_bm_block_list - create a list of block bitmap objects |
| 270 | * @nr_blocks - number of blocks to allocate | ||
| 271 | * @list - list to put the allocated blocks into | ||
| 272 | * @ca - chain allocator to be used for allocating memory | ||
| 288 | */ | 273 | */ |
| 289 | 274 | static int create_bm_block_list(unsigned long pages, | |
| 290 | static inline struct bm_block * | 275 | struct list_head *list, |
| 291 | create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca) | 276 | struct chain_allocator *ca) |
| 292 | { | 277 | { |
| 293 | struct bm_block *bblist = NULL; | 278 | unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); |
| 294 | 279 | ||
| 295 | while (nr_blocks-- > 0) { | 280 | while (nr_blocks-- > 0) { |
| 296 | struct bm_block *bb; | 281 | struct bm_block *bb; |
| 297 | 282 | ||
| 298 | bb = chain_alloc(ca, sizeof(struct bm_block)); | 283 | bb = chain_alloc(ca, sizeof(struct bm_block)); |
| 299 | if (!bb) | 284 | if (!bb) |
| 300 | return NULL; | 285 | return -ENOMEM; |
| 301 | 286 | list_add(&bb->hook, list); | |
| 302 | bb->next = bblist; | ||
| 303 | bblist = bb; | ||
| 304 | } | 287 | } |
| 305 | return bblist; | 288 | |
| 289 | return 0; | ||
| 306 | } | 290 | } |
| 307 | 291 | ||
| 292 | struct mem_extent { | ||
| 293 | struct list_head hook; | ||
| 294 | unsigned long start; | ||
| 295 | unsigned long end; | ||
| 296 | }; | ||
| 297 | |||
| 308 | /** | 298 | /** |
| 309 | * create_zone_bm_list - create a list of zone bitmap objects | 299 | * free_mem_extents - free a list of memory extents |
| 300 | * @list - list of extents to empty | ||
| 310 | */ | 301 | */ |
| 302 | static void free_mem_extents(struct list_head *list) | ||
| 303 | { | ||
| 304 | struct mem_extent *ext, *aux; | ||
| 311 | 305 | ||
| 312 | static inline struct zone_bitmap * | 306 | list_for_each_entry_safe(ext, aux, list, hook) { |
| 313 | create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca) | 307 | list_del(&ext->hook); |
| 308 | kfree(ext); | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | /** | ||
| 313 | * create_mem_extents - create a list of memory extents representing | ||
| 314 | * contiguous ranges of PFNs | ||
| 315 | * @list - list to put the extents into | ||
| 316 | * @gfp_mask - mask to use for memory allocations | ||
| 317 | */ | ||
| 318 | static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) | ||
| 314 | { | 319 | { |
| 315 | struct zone_bitmap *zbmlist = NULL; | 320 | struct zone *zone; |
| 316 | 321 | ||
| 317 | while (nr_zones-- > 0) { | 322 | INIT_LIST_HEAD(list); |
| 318 | struct zone_bitmap *zbm; | ||
| 319 | 323 | ||
| 320 | zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); | 324 | for_each_zone(zone) { |
| 321 | if (!zbm) | 325 | unsigned long zone_start, zone_end; |
| 322 | return NULL; | 326 | struct mem_extent *ext, *cur, *aux; |
| 327 | |||
| 328 | if (!populated_zone(zone)) | ||
| 329 | continue; | ||
| 323 | 330 | ||
| 324 | zbm->next = zbmlist; | 331 | zone_start = zone->zone_start_pfn; |
| 325 | zbmlist = zbm; | 332 | zone_end = zone->zone_start_pfn + zone->spanned_pages; |
| 333 | |||
| 334 | list_for_each_entry(ext, list, hook) | ||
| 335 | if (zone_start <= ext->end) | ||
| 336 | break; | ||
| 337 | |||
| 338 | if (&ext->hook == list || zone_end < ext->start) { | ||
| 339 | /* New extent is necessary */ | ||
| 340 | struct mem_extent *new_ext; | ||
| 341 | |||
| 342 | new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); | ||
| 343 | if (!new_ext) { | ||
| 344 | free_mem_extents(list); | ||
| 345 | return -ENOMEM; | ||
| 346 | } | ||
| 347 | new_ext->start = zone_start; | ||
| 348 | new_ext->end = zone_end; | ||
| 349 | list_add_tail(&new_ext->hook, &ext->hook); | ||
| 350 | continue; | ||
| 351 | } | ||
| 352 | |||
| 353 | /* Merge this zone's range of PFNs with the existing one */ | ||
| 354 | if (zone_start < ext->start) | ||
| 355 | ext->start = zone_start; | ||
| 356 | if (zone_end > ext->end) | ||
| 357 | ext->end = zone_end; | ||
| 358 | |||
| 359 | /* More merging may be possible */ | ||
| 360 | cur = ext; | ||
| 361 | list_for_each_entry_safe_continue(cur, aux, list, hook) { | ||
| 362 | if (zone_end < cur->start) | ||
| 363 | break; | ||
| 364 | if (zone_end < cur->end) | ||
| 365 | ext->end = cur->end; | ||
| 366 | list_del(&cur->hook); | ||
| 367 | kfree(cur); | ||
| 368 | } | ||
| 326 | } | 369 | } |
| 327 | return zbmlist; | 370 | |
| 371 | return 0; | ||
| 328 | } | 372 | } |
| 329 | 373 | ||
| 330 | /** | 374 | /** |
| 331 | * memory_bm_create - allocate memory for a memory bitmap | 375 | * memory_bm_create - allocate memory for a memory bitmap |
| 332 | */ | 376 | */ |
| 333 | |||
| 334 | static int | 377 | static int |
| 335 | memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | 378 | memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) |
| 336 | { | 379 | { |
| 337 | struct chain_allocator ca; | 380 | struct chain_allocator ca; |
| 338 | struct zone *zone; | 381 | struct list_head mem_extents; |
| 339 | struct zone_bitmap *zone_bm; | 382 | struct mem_extent *ext; |
| 340 | struct bm_block *bb; | 383 | int error; |
| 341 | unsigned int nr; | ||
| 342 | 384 | ||
| 343 | chain_init(&ca, gfp_mask, safe_needed); | 385 | chain_init(&ca, gfp_mask, safe_needed); |
| 386 | INIT_LIST_HEAD(&bm->blocks); | ||
| 344 | 387 | ||
| 345 | /* Compute the number of zones */ | 388 | error = create_mem_extents(&mem_extents, gfp_mask); |
| 346 | nr = 0; | 389 | if (error) |
| 347 | for_each_zone(zone) | 390 | return error; |
| 348 | if (populated_zone(zone)) | ||
| 349 | nr++; | ||
| 350 | |||
| 351 | /* Allocate the list of zones bitmap objects */ | ||
| 352 | zone_bm = create_zone_bm_list(nr, &ca); | ||
| 353 | bm->zone_bm_list = zone_bm; | ||
| 354 | if (!zone_bm) { | ||
| 355 | chain_free(&ca, PG_UNSAFE_CLEAR); | ||
| 356 | return -ENOMEM; | ||
| 357 | } | ||
| 358 | |||
| 359 | /* Initialize the zone bitmap objects */ | ||
| 360 | for_each_zone(zone) { | ||
| 361 | unsigned long pfn; | ||
| 362 | 391 | ||
| 363 | if (!populated_zone(zone)) | 392 | list_for_each_entry(ext, &mem_extents, hook) { |
| 364 | continue; | 393 | struct bm_block *bb; |
| 394 | unsigned long pfn = ext->start; | ||
| 395 | unsigned long pages = ext->end - ext->start; | ||
| 365 | 396 | ||
| 366 | zone_bm->start_pfn = zone->zone_start_pfn; | 397 | bb = list_entry(bm->blocks.prev, struct bm_block, hook); |
| 367 | zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; | ||
| 368 | /* Allocate the list of bitmap block objects */ | ||
| 369 | nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | ||
| 370 | bb = create_bm_block_list(nr, &ca); | ||
| 371 | zone_bm->bm_blocks = bb; | ||
| 372 | zone_bm->cur_block = bb; | ||
| 373 | if (!bb) | ||
| 374 | goto Free; | ||
| 375 | 398 | ||
| 376 | nr = zone->spanned_pages; | 399 | error = create_bm_block_list(pages, bm->blocks.prev, &ca); |
| 377 | pfn = zone->zone_start_pfn; | 400 | if (error) |
| 378 | /* Initialize the bitmap block objects */ | 401 | goto Error; |
| 379 | while (bb) { | ||
| 380 | unsigned long *ptr; | ||
| 381 | 402 | ||
| 382 | ptr = get_image_page(gfp_mask, safe_needed); | 403 | list_for_each_entry_continue(bb, &bm->blocks, hook) { |
| 383 | bb->data = ptr; | 404 | bb->data = get_image_page(gfp_mask, safe_needed); |
| 384 | if (!ptr) | 405 | if (!bb->data) { |
| 385 | goto Free; | 406 | error = -ENOMEM; |
| 407 | goto Error; | ||
| 408 | } | ||
| 386 | 409 | ||
| 387 | bb->start_pfn = pfn; | 410 | bb->start_pfn = pfn; |
| 388 | if (nr >= BM_BITS_PER_BLOCK) { | 411 | if (pages >= BM_BITS_PER_BLOCK) { |
| 389 | pfn += BM_BITS_PER_BLOCK; | 412 | pfn += BM_BITS_PER_BLOCK; |
| 390 | nr -= BM_BITS_PER_BLOCK; | 413 | pages -= BM_BITS_PER_BLOCK; |
| 391 | } else { | 414 | } else { |
| 392 | /* This is executed only once in the loop */ | 415 | /* This is executed only once in the loop */ |
| 393 | pfn += nr; | 416 | pfn += pages; |
| 394 | } | 417 | } |
| 395 | bb->end_pfn = pfn; | 418 | bb->end_pfn = pfn; |
| 396 | bb = bb->next; | ||
| 397 | } | 419 | } |
| 398 | zone_bm = zone_bm->next; | ||
| 399 | } | 420 | } |
| 421 | |||
| 400 | bm->p_list = ca.chain; | 422 | bm->p_list = ca.chain; |
| 401 | memory_bm_position_reset(bm); | 423 | memory_bm_position_reset(bm); |
| 402 | return 0; | 424 | Exit: |
| 425 | free_mem_extents(&mem_extents); | ||
| 426 | return error; | ||
| 403 | 427 | ||
| 404 | Free: | 428 | Error: |
| 405 | bm->p_list = ca.chain; | 429 | bm->p_list = ca.chain; |
| 406 | memory_bm_free(bm, PG_UNSAFE_CLEAR); | 430 | memory_bm_free(bm, PG_UNSAFE_CLEAR); |
| 407 | return -ENOMEM; | 431 | goto Exit; |
| 408 | } | 432 | } |
| 409 | 433 | ||
| 410 | /** | 434 | /** |
| 411 | * memory_bm_free - free memory occupied by the memory bitmap @bm | 435 | * memory_bm_free - free memory occupied by the memory bitmap @bm |
| 412 | */ | 436 | */ |
| 413 | |||
| 414 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | 437 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) |
| 415 | { | 438 | { |
| 416 | struct zone_bitmap *zone_bm; | 439 | struct bm_block *bb; |
| 417 | 440 | ||
| 418 | /* Free the list of bit blocks for each zone_bitmap object */ | 441 | list_for_each_entry(bb, &bm->blocks, hook) |
| 419 | zone_bm = bm->zone_bm_list; | 442 | if (bb->data) |
| 420 | while (zone_bm) { | 443 | free_image_page(bb->data, clear_nosave_free); |
| 421 | struct bm_block *bb; | ||
| 422 | 444 | ||
| 423 | bb = zone_bm->bm_blocks; | ||
| 424 | while (bb) { | ||
| 425 | if (bb->data) | ||
| 426 | free_image_page(bb->data, clear_nosave_free); | ||
| 427 | bb = bb->next; | ||
| 428 | } | ||
| 429 | zone_bm = zone_bm->next; | ||
| 430 | } | ||
| 431 | free_list_of_pages(bm->p_list, clear_nosave_free); | 445 | free_list_of_pages(bm->p_list, clear_nosave_free); |
| 432 | bm->zone_bm_list = NULL; | 446 | |
| 447 | INIT_LIST_HEAD(&bm->blocks); | ||
| 433 | } | 448 | } |
| 434 | 449 | ||
| 435 | /** | 450 | /** |
| @@ -437,38 +452,33 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | |||
| 437 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member | 452 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member |
| 438 | * of @bm->cur_zone_bm are updated. | 453 | * of @bm->cur_zone_bm are updated. |
| 439 | */ | 454 | */ |
| 440 | |||
| 441 | static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, | 455 | static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, |
| 442 | void **addr, unsigned int *bit_nr) | 456 | void **addr, unsigned int *bit_nr) |
| 443 | { | 457 | { |
| 444 | struct zone_bitmap *zone_bm; | ||
| 445 | struct bm_block *bb; | 458 | struct bm_block *bb; |
| 446 | 459 | ||
| 447 | /* Check if the pfn is from the current zone */ | 460 | /* |
| 448 | zone_bm = bm->cur.zone_bm; | 461 | * Check if the pfn corresponds to the current bitmap block and find |
| 449 | if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | 462 | * the block where it fits if this is not the case. |
| 450 | zone_bm = bm->zone_bm_list; | 463 | */ |
| 451 | /* We don't assume that the zones are sorted by pfns */ | 464 | bb = bm->cur.block; |
| 452 | while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | ||
| 453 | zone_bm = zone_bm->next; | ||
| 454 | |||
| 455 | if (!zone_bm) | ||
| 456 | return -EFAULT; | ||
| 457 | } | ||
| 458 | bm->cur.zone_bm = zone_bm; | ||
| 459 | } | ||
| 460 | /* Check if the pfn corresponds to the current bitmap block */ | ||
| 461 | bb = zone_bm->cur_block; | ||
| 462 | if (pfn < bb->start_pfn) | 465 | if (pfn < bb->start_pfn) |
| 463 | bb = zone_bm->bm_blocks; | 466 | list_for_each_entry_continue_reverse(bb, &bm->blocks, hook) |
| 467 | if (pfn >= bb->start_pfn) | ||
| 468 | break; | ||
| 464 | 469 | ||
| 465 | while (pfn >= bb->end_pfn) { | 470 | if (pfn >= bb->end_pfn) |
| 466 | bb = bb->next; | 471 | list_for_each_entry_continue(bb, &bm->blocks, hook) |
| 472 | if (pfn >= bb->start_pfn && pfn < bb->end_pfn) | ||
| 473 | break; | ||
| 467 | 474 | ||
| 468 | BUG_ON(!bb); | 475 | if (&bb->hook == &bm->blocks) |
| 469 | } | 476 | return -EFAULT; |
| 470 | zone_bm->cur_block = bb; | 477 | |
| 478 | /* The block has been found */ | ||
| 479 | bm->cur.block = bb; | ||
| 471 | pfn -= bb->start_pfn; | 480 | pfn -= bb->start_pfn; |
| 481 | bm->cur.bit = pfn + 1; | ||
| 472 | *bit_nr = pfn; | 482 | *bit_nr = pfn; |
| 473 | *addr = bb->data; | 483 | *addr = bb->data; |
| 474 | return 0; | 484 | return 0; |
| @@ -519,6 +529,14 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
| 519 | return test_bit(bit, addr); | 529 | return test_bit(bit, addr); |
| 520 | } | 530 | } |
| 521 | 531 | ||
| 532 | static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) | ||
| 533 | { | ||
| 534 | void *addr; | ||
| 535 | unsigned int bit; | ||
| 536 | |||
| 537 | return !memory_bm_find_bit(bm, pfn, &addr, &bit); | ||
| 538 | } | ||
| 539 | |||
| 522 | /** | 540 | /** |
| 523 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit | 541 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit |
| 524 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is | 542 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is |
| @@ -530,29 +548,21 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
| 530 | 548 | ||
| 531 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) | 549 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) |
| 532 | { | 550 | { |
| 533 | struct zone_bitmap *zone_bm; | ||
| 534 | struct bm_block *bb; | 551 | struct bm_block *bb; |
| 535 | int bit; | 552 | int bit; |
| 536 | 553 | ||
| 554 | bb = bm->cur.block; | ||
| 537 | do { | 555 | do { |
| 538 | bb = bm->cur.block; | 556 | bit = bm->cur.bit; |
| 539 | do { | 557 | bit = find_next_bit(bb->data, bm_block_bits(bb), bit); |
| 540 | bit = bm->cur.bit; | 558 | if (bit < bm_block_bits(bb)) |
| 541 | bit = find_next_bit(bb->data, bm_block_bits(bb), bit); | 559 | goto Return_pfn; |
| 542 | if (bit < bm_block_bits(bb)) | 560 | |
| 543 | goto Return_pfn; | 561 | bb = list_entry(bb->hook.next, struct bm_block, hook); |
| 544 | 562 | bm->cur.block = bb; | |
| 545 | bb = bb->next; | 563 | bm->cur.bit = 0; |
| 546 | bm->cur.block = bb; | 564 | } while (&bb->hook != &bm->blocks); |
| 547 | bm->cur.bit = 0; | 565 | |
| 548 | } while (bb); | ||
| 549 | zone_bm = bm->cur.zone_bm->next; | ||
| 550 | if (zone_bm) { | ||
| 551 | bm->cur.zone_bm = zone_bm; | ||
| 552 | bm->cur.block = zone_bm->bm_blocks; | ||
| 553 | bm->cur.bit = 0; | ||
| 554 | } | ||
| 555 | } while (zone_bm); | ||
| 556 | memory_bm_position_reset(bm); | 566 | memory_bm_position_reset(bm); |
| 557 | return BM_END_OF_MAP; | 567 | return BM_END_OF_MAP; |
| 558 | 568 | ||
| @@ -808,8 +818,7 @@ static unsigned int count_free_highmem_pages(void) | |||
| 808 | * We should save the page if it isn't Nosave or NosaveFree, or Reserved, | 818 | * We should save the page if it isn't Nosave or NosaveFree, or Reserved, |
| 809 | * and it isn't a part of a free chunk of pages. | 819 | * and it isn't a part of a free chunk of pages. |
| 810 | */ | 820 | */ |
| 811 | 821 | static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) | |
| 812 | static struct page *saveable_highmem_page(unsigned long pfn) | ||
| 813 | { | 822 | { |
| 814 | struct page *page; | 823 | struct page *page; |
| 815 | 824 | ||
| @@ -817,6 +826,8 @@ static struct page *saveable_highmem_page(unsigned long pfn) | |||
| 817 | return NULL; | 826 | return NULL; |
| 818 | 827 | ||
| 819 | page = pfn_to_page(pfn); | 828 | page = pfn_to_page(pfn); |
| 829 | if (page_zone(page) != zone) | ||
| 830 | return NULL; | ||
| 820 | 831 | ||
| 821 | BUG_ON(!PageHighMem(page)); | 832 | BUG_ON(!PageHighMem(page)); |
| 822 | 833 | ||
| @@ -846,13 +857,16 @@ unsigned int count_highmem_pages(void) | |||
| 846 | mark_free_pages(zone); | 857 | mark_free_pages(zone); |
| 847 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 858 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| 848 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 859 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
| 849 | if (saveable_highmem_page(pfn)) | 860 | if (saveable_highmem_page(zone, pfn)) |
| 850 | n++; | 861 | n++; |
| 851 | } | 862 | } |
| 852 | return n; | 863 | return n; |
| 853 | } | 864 | } |
| 854 | #else | 865 | #else |
| 855 | static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } | 866 | static inline void *saveable_highmem_page(struct zone *z, unsigned long p) |
| 867 | { | ||
| 868 | return NULL; | ||
| 869 | } | ||
| 856 | #endif /* CONFIG_HIGHMEM */ | 870 | #endif /* CONFIG_HIGHMEM */ |
| 857 | 871 | ||
| 858 | /** | 872 | /** |
| @@ -863,8 +877,7 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } | |||
| 863 | * of pages statically defined as 'unsaveable', and it isn't a part of | 877 | * of pages statically defined as 'unsaveable', and it isn't a part of |
| 864 | * a free chunk of pages. | 878 | * a free chunk of pages. |
| 865 | */ | 879 | */ |
| 866 | 880 | static struct page *saveable_page(struct zone *zone, unsigned long pfn) | |
| 867 | static struct page *saveable_page(unsigned long pfn) | ||
| 868 | { | 881 | { |
| 869 | struct page *page; | 882 | struct page *page; |
| 870 | 883 | ||
| @@ -872,6 +885,8 @@ static struct page *saveable_page(unsigned long pfn) | |||
| 872 | return NULL; | 885 | return NULL; |
| 873 | 886 | ||
| 874 | page = pfn_to_page(pfn); | 887 | page = pfn_to_page(pfn); |
| 888 | if (page_zone(page) != zone) | ||
| 889 | return NULL; | ||
| 875 | 890 | ||
| 876 | BUG_ON(PageHighMem(page)); | 891 | BUG_ON(PageHighMem(page)); |
| 877 | 892 | ||
| @@ -903,7 +918,7 @@ unsigned int count_data_pages(void) | |||
| 903 | mark_free_pages(zone); | 918 | mark_free_pages(zone); |
| 904 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 919 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| 905 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 920 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
| 906 | if(saveable_page(pfn)) | 921 | if (saveable_page(zone, pfn)) |
| 907 | n++; | 922 | n++; |
| 908 | } | 923 | } |
| 909 | return n; | 924 | return n; |
| @@ -944,7 +959,7 @@ static inline struct page * | |||
| 944 | page_is_saveable(struct zone *zone, unsigned long pfn) | 959 | page_is_saveable(struct zone *zone, unsigned long pfn) |
| 945 | { | 960 | { |
| 946 | return is_highmem(zone) ? | 961 | return is_highmem(zone) ? |
| 947 | saveable_highmem_page(pfn) : saveable_page(pfn); | 962 | saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); |
| 948 | } | 963 | } |
| 949 | 964 | ||
| 950 | static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | 965 | static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) |
| @@ -966,7 +981,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | |||
| 966 | * data modified by kmap_atomic() | 981 | * data modified by kmap_atomic() |
| 967 | */ | 982 | */ |
| 968 | safe_copy_page(buffer, s_page); | 983 | safe_copy_page(buffer, s_page); |
| 969 | dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0); | 984 | dst = kmap_atomic(d_page, KM_USER0); |
| 970 | memcpy(dst, buffer, PAGE_SIZE); | 985 | memcpy(dst, buffer, PAGE_SIZE); |
| 971 | kunmap_atomic(dst, KM_USER0); | 986 | kunmap_atomic(dst, KM_USER0); |
| 972 | } else { | 987 | } else { |
| @@ -975,7 +990,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | |||
| 975 | } | 990 | } |
| 976 | } | 991 | } |
| 977 | #else | 992 | #else |
| 978 | #define page_is_saveable(zone, pfn) saveable_page(pfn) | 993 | #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) |
| 979 | 994 | ||
| 980 | static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | 995 | static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) |
| 981 | { | 996 | { |
| @@ -1459,9 +1474,7 @@ load_header(struct swsusp_info *info) | |||
| 1459 | * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set | 1474 | * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set |
| 1460 | * the corresponding bit in the memory bitmap @bm | 1475 | * the corresponding bit in the memory bitmap @bm |
| 1461 | */ | 1476 | */ |
| 1462 | 1477 | static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |
| 1463 | static inline void | ||
| 1464 | unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | ||
| 1465 | { | 1478 | { |
| 1466 | int j; | 1479 | int j; |
| 1467 | 1480 | ||
| @@ -1469,8 +1482,13 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |||
| 1469 | if (unlikely(buf[j] == BM_END_OF_MAP)) | 1482 | if (unlikely(buf[j] == BM_END_OF_MAP)) |
| 1470 | break; | 1483 | break; |
| 1471 | 1484 | ||
| 1472 | memory_bm_set_bit(bm, buf[j]); | 1485 | if (memory_bm_pfn_present(bm, buf[j])) |
| 1486 | memory_bm_set_bit(bm, buf[j]); | ||
| 1487 | else | ||
| 1488 | return -EFAULT; | ||
| 1473 | } | 1489 | } |
| 1490 | |||
| 1491 | return 0; | ||
| 1474 | } | 1492 | } |
| 1475 | 1493 | ||
| 1476 | /* List of "safe" pages that may be used to store data loaded from the suspend | 1494 | /* List of "safe" pages that may be used to store data loaded from the suspend |
| @@ -1608,7 +1626,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) | |||
| 1608 | pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); | 1626 | pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); |
| 1609 | if (!pbe) { | 1627 | if (!pbe) { |
| 1610 | swsusp_free(); | 1628 | swsusp_free(); |
| 1611 | return NULL; | 1629 | return ERR_PTR(-ENOMEM); |
| 1612 | } | 1630 | } |
| 1613 | pbe->orig_page = page; | 1631 | pbe->orig_page = page; |
| 1614 | if (safe_highmem_pages > 0) { | 1632 | if (safe_highmem_pages > 0) { |
| @@ -1677,7 +1695,7 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) | |||
| 1677 | static inline void * | 1695 | static inline void * |
| 1678 | get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) | 1696 | get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) |
| 1679 | { | 1697 | { |
| 1680 | return NULL; | 1698 | return ERR_PTR(-EINVAL); |
| 1681 | } | 1699 | } |
| 1682 | 1700 | ||
| 1683 | static inline void copy_last_highmem_page(void) {} | 1701 | static inline void copy_last_highmem_page(void) {} |
| @@ -1788,8 +1806,13 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) | |||
| 1788 | static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) | 1806 | static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) |
| 1789 | { | 1807 | { |
| 1790 | struct pbe *pbe; | 1808 | struct pbe *pbe; |
| 1791 | struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); | 1809 | struct page *page; |
| 1810 | unsigned long pfn = memory_bm_next_pfn(bm); | ||
| 1792 | 1811 | ||
| 1812 | if (pfn == BM_END_OF_MAP) | ||
| 1813 | return ERR_PTR(-EFAULT); | ||
| 1814 | |||
| 1815 | page = pfn_to_page(pfn); | ||
| 1793 | if (PageHighMem(page)) | 1816 | if (PageHighMem(page)) |
| 1794 | return get_highmem_page_buffer(page, ca); | 1817 | return get_highmem_page_buffer(page, ca); |
| 1795 | 1818 | ||
| @@ -1805,7 +1828,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) | |||
| 1805 | pbe = chain_alloc(ca, sizeof(struct pbe)); | 1828 | pbe = chain_alloc(ca, sizeof(struct pbe)); |
| 1806 | if (!pbe) { | 1829 | if (!pbe) { |
| 1807 | swsusp_free(); | 1830 | swsusp_free(); |
| 1808 | return NULL; | 1831 | return ERR_PTR(-ENOMEM); |
| 1809 | } | 1832 | } |
| 1810 | pbe->orig_address = page_address(page); | 1833 | pbe->orig_address = page_address(page); |
| 1811 | pbe->address = safe_pages_list; | 1834 | pbe->address = safe_pages_list; |
| @@ -1868,7 +1891,10 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) | |||
| 1868 | return error; | 1891 | return error; |
| 1869 | 1892 | ||
| 1870 | } else if (handle->prev <= nr_meta_pages) { | 1893 | } else if (handle->prev <= nr_meta_pages) { |
| 1871 | unpack_orig_pfns(buffer, ©_bm); | 1894 | error = unpack_orig_pfns(buffer, ©_bm); |
| 1895 | if (error) | ||
| 1896 | return error; | ||
| 1897 | |||
| 1872 | if (handle->prev == nr_meta_pages) { | 1898 | if (handle->prev == nr_meta_pages) { |
| 1873 | error = prepare_image(&orig_bm, ©_bm); | 1899 | error = prepare_image(&orig_bm, ©_bm); |
| 1874 | if (error) | 1900 | if (error) |
| @@ -1879,12 +1905,14 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) | |||
| 1879 | restore_pblist = NULL; | 1905 | restore_pblist = NULL; |
| 1880 | handle->buffer = get_buffer(&orig_bm, &ca); | 1906 | handle->buffer = get_buffer(&orig_bm, &ca); |
| 1881 | handle->sync_read = 0; | 1907 | handle->sync_read = 0; |
| 1882 | if (!handle->buffer) | 1908 | if (IS_ERR(handle->buffer)) |
| 1883 | return -ENOMEM; | 1909 | return PTR_ERR(handle->buffer); |
| 1884 | } | 1910 | } |
| 1885 | } else { | 1911 | } else { |
| 1886 | copy_last_highmem_page(); | 1912 | copy_last_highmem_page(); |
| 1887 | handle->buffer = get_buffer(&orig_bm, &ca); | 1913 | handle->buffer = get_buffer(&orig_bm, &ca); |
| 1914 | if (IS_ERR(handle->buffer)) | ||
| 1915 | return PTR_ERR(handle->buffer); | ||
| 1888 | if (handle->buffer != buffer) | 1916 | if (handle->buffer != buffer) |
| 1889 | handle->sync_read = 0; | 1917 | handle->sync_read = 0; |
| 1890 | } | 1918 | } |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 023ff2a31d89..a92c91451559 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
| @@ -262,3 +262,125 @@ int swsusp_shrink_memory(void) | |||
| 262 | 262 | ||
| 263 | return 0; | 263 | return 0; |
| 264 | } | 264 | } |
| 265 | |||
| 266 | /* | ||
| 267 | * Platforms, like ACPI, may want us to save some memory used by them during | ||
| 268 | * hibernation and to restore the contents of this memory during the subsequent | ||
| 269 | * resume. The code below implements a mechanism allowing us to do that. | ||
| 270 | */ | ||
| 271 | |||
| 272 | struct nvs_page { | ||
| 273 | unsigned long phys_start; | ||
| 274 | unsigned int size; | ||
| 275 | void *kaddr; | ||
| 276 | void *data; | ||
| 277 | struct list_head node; | ||
| 278 | }; | ||
| 279 | |||
| 280 | static LIST_HEAD(nvs_list); | ||
| 281 | |||
| 282 | /** | ||
| 283 | * hibernate_nvs_register - register platform NVS memory region to save | ||
| 284 | * @start - physical address of the region | ||
| 285 | * @size - size of the region | ||
| 286 | * | ||
| 287 | * The NVS region need not be page-aligned (both ends) and we arrange | ||
| 288 | * things so that the data from page-aligned addresses in this region will | ||
| 289 | * be copied into separate RAM pages. | ||
| 290 | */ | ||
| 291 | int hibernate_nvs_register(unsigned long start, unsigned long size) | ||
| 292 | { | ||
| 293 | struct nvs_page *entry, *next; | ||
| 294 | |||
| 295 | while (size > 0) { | ||
| 296 | unsigned int nr_bytes; | ||
| 297 | |||
| 298 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); | ||
| 299 | if (!entry) | ||
| 300 | goto Error; | ||
| 301 | |||
| 302 | list_add_tail(&entry->node, &nvs_list); | ||
| 303 | entry->phys_start = start; | ||
| 304 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); | ||
| 305 | entry->size = (size < nr_bytes) ? size : nr_bytes; | ||
| 306 | |||
| 307 | start += entry->size; | ||
| 308 | size -= entry->size; | ||
| 309 | } | ||
| 310 | return 0; | ||
| 311 | |||
| 312 | Error: | ||
| 313 | list_for_each_entry_safe(entry, next, &nvs_list, node) { | ||
| 314 | list_del(&entry->node); | ||
| 315 | kfree(entry); | ||
| 316 | } | ||
| 317 | return -ENOMEM; | ||
| 318 | } | ||
| 319 | |||
| 320 | /** | ||
| 321 | * hibernate_nvs_free - free data pages allocated for saving NVS regions | ||
| 322 | */ | ||
| 323 | void hibernate_nvs_free(void) | ||
| 324 | { | ||
| 325 | struct nvs_page *entry; | ||
| 326 | |||
| 327 | list_for_each_entry(entry, &nvs_list, node) | ||
| 328 | if (entry->data) { | ||
| 329 | free_page((unsigned long)entry->data); | ||
| 330 | entry->data = NULL; | ||
| 331 | if (entry->kaddr) { | ||
| 332 | iounmap(entry->kaddr); | ||
| 333 | entry->kaddr = NULL; | ||
| 334 | } | ||
| 335 | } | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions | ||
| 340 | */ | ||
| 341 | int hibernate_nvs_alloc(void) | ||
| 342 | { | ||
| 343 | struct nvs_page *entry; | ||
| 344 | |||
| 345 | list_for_each_entry(entry, &nvs_list, node) { | ||
| 346 | entry->data = (void *)__get_free_page(GFP_KERNEL); | ||
| 347 | if (!entry->data) { | ||
| 348 | hibernate_nvs_free(); | ||
| 349 | return -ENOMEM; | ||
| 350 | } | ||
| 351 | } | ||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | /** | ||
| 356 | * hibernate_nvs_save - save NVS memory regions | ||
| 357 | */ | ||
| 358 | void hibernate_nvs_save(void) | ||
| 359 | { | ||
| 360 | struct nvs_page *entry; | ||
| 361 | |||
| 362 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); | ||
| 363 | |||
| 364 | list_for_each_entry(entry, &nvs_list, node) | ||
| 365 | if (entry->data) { | ||
| 366 | entry->kaddr = ioremap(entry->phys_start, entry->size); | ||
| 367 | memcpy(entry->data, entry->kaddr, entry->size); | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 371 | /** | ||
| 372 | * hibernate_nvs_restore - restore NVS memory regions | ||
| 373 | * | ||
| 374 | * This function is going to be called with interrupts disabled, so it | ||
| 375 | * cannot iounmap the virtual addresses used to access the NVS region. | ||
| 376 | */ | ||
| 377 | void hibernate_nvs_restore(void) | ||
| 378 | { | ||
| 379 | struct nvs_page *entry; | ||
| 380 | |||
| 381 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); | ||
| 382 | |||
| 383 | list_for_each_entry(entry, &nvs_list, node) | ||
| 384 | if (entry->data) | ||
| 385 | memcpy(entry->kaddr, entry->data, entry->size); | ||
| 386 | } | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 7015733793e8..69188f226a93 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -382,7 +382,7 @@ out: | |||
| 382 | return error; | 382 | return error; |
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | asmlinkage long sys_syslog(int type, char __user *buf, int len) | 385 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
| 386 | { | 386 | { |
| 387 | return do_syslog(type, buf, len); | 387 | return do_syslog(type, buf, len); |
| 388 | } | 388 | } |
| @@ -742,11 +742,6 @@ EXPORT_SYMBOL(vprintk); | |||
| 742 | 742 | ||
| 743 | #else | 743 | #else |
| 744 | 744 | ||
| 745 | asmlinkage long sys_syslog(int type, char __user *buf, int len) | ||
| 746 | { | ||
| 747 | return -ENOSYS; | ||
| 748 | } | ||
| 749 | |||
| 750 | static void call_console_drivers(unsigned start, unsigned end) | 745 | static void call_console_drivers(unsigned start, unsigned end) |
| 751 | { | 746 | { |
| 752 | } | 747 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 29dc700e198c..c9cf48b21f05 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -574,7 +574,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid) | |||
| 574 | #define arch_ptrace_attach(child) do { } while (0) | 574 | #define arch_ptrace_attach(child) do { } while (0) |
| 575 | #endif | 575 | #endif |
| 576 | 576 | ||
| 577 | asmlinkage long sys_ptrace(long request, long pid, long addr, long data) | 577 | SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) |
| 578 | { | 578 | { |
| 579 | struct task_struct *child; | 579 | struct task_struct *child; |
| 580 | long ret; | 580 | long ret; |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 490934fc7ac3..bd5a9003497c 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 716 | raise_rcu_softirq(); | 716 | raise_rcu_softirq(); |
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | 719 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
| 720 | struct rcu_data *rdp) | 720 | struct rcu_data *rdp) |
| 721 | { | 721 | { |
| 722 | unsigned long flags; | 722 | unsigned long flags; |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 1cff28db56b6..7c4142a79f0a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -136,29 +136,47 @@ static int stutter_pause_test = 0; | |||
| 136 | #endif | 136 | #endif |
| 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
| 138 | 138 | ||
| 139 | #define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */ | 139 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ |
| 140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ | 140 | |
| 141 | static int fullstop; /* stop generating callbacks at test end. */ | 141 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ |
| 142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ | 142 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ |
| 143 | /* spawning of kthreads. */ | 143 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ |
| 144 | static int fullstop = FULLSTOP_RMMOD; | ||
| 145 | DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ | ||
| 146 | /* of kthreads. */ | ||
| 144 | 147 | ||
| 145 | /* | 148 | /* |
| 146 | * Detect and respond to a signal-based shutdown. | 149 | * Detect and respond to a system shutdown. |
| 147 | */ | 150 | */ |
| 148 | static int | 151 | static int |
| 149 | rcutorture_shutdown_notify(struct notifier_block *unused1, | 152 | rcutorture_shutdown_notify(struct notifier_block *unused1, |
| 150 | unsigned long unused2, void *unused3) | 153 | unsigned long unused2, void *unused3) |
| 151 | { | 154 | { |
| 152 | if (fullstop) | ||
| 153 | return NOTIFY_DONE; | ||
| 154 | mutex_lock(&fullstop_mutex); | 155 | mutex_lock(&fullstop_mutex); |
| 155 | if (!fullstop) | 156 | if (fullstop == FULLSTOP_DONTSTOP) |
| 156 | fullstop = FULLSTOP_SHUTDOWN; | 157 | fullstop = FULLSTOP_SHUTDOWN; |
| 158 | else | ||
| 159 | printk(KERN_WARNING /* but going down anyway, so... */ | ||
| 160 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
| 157 | mutex_unlock(&fullstop_mutex); | 161 | mutex_unlock(&fullstop_mutex); |
| 158 | return NOTIFY_DONE; | 162 | return NOTIFY_DONE; |
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | /* | 165 | /* |
| 166 | * Absorb kthreads into a kernel function that won't return, so that | ||
| 167 | * they won't ever access module text or data again. | ||
| 168 | */ | ||
| 169 | static void rcutorture_shutdown_absorb(char *title) | ||
| 170 | { | ||
| 171 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
| 172 | printk(KERN_NOTICE | ||
| 173 | "rcutorture thread %s parking due to system shutdown\n", | ||
| 174 | title); | ||
| 175 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 179 | /* | ||
| 162 | * Allocate an element from the rcu_tortures pool. | 180 | * Allocate an element from the rcu_tortures pool. |
| 163 | */ | 181 | */ |
| 164 | static struct rcu_torture * | 182 | static struct rcu_torture * |
| @@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp) | |||
| 219 | } | 237 | } |
| 220 | 238 | ||
| 221 | static void | 239 | static void |
| 222 | rcu_stutter_wait(void) | 240 | rcu_stutter_wait(char *title) |
| 223 | { | 241 | { |
| 224 | while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) { | 242 | while (stutter_pause_test || !rcutorture_runnable) { |
| 225 | if (rcutorture_runnable) | 243 | if (rcutorture_runnable) |
| 226 | schedule_timeout_interruptible(1); | 244 | schedule_timeout_interruptible(1); |
| 227 | else | 245 | else |
| 228 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | 246 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); |
| 247 | rcutorture_shutdown_absorb(title); | ||
| 229 | } | 248 | } |
| 230 | } | 249 | } |
| 231 | 250 | ||
| @@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
| 287 | int i; | 306 | int i; |
| 288 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | 307 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
| 289 | 308 | ||
| 290 | if (fullstop) { | 309 | if (fullstop != FULLSTOP_DONTSTOP) { |
| 291 | /* Test is ending, just drop callbacks on the floor. */ | 310 | /* Test is ending, just drop callbacks on the floor. */ |
| 292 | /* The next initialization will pick up the pieces. */ | 311 | /* The next initialization will pick up the pieces. */ |
| 293 | return; | 312 | return; |
| @@ -619,10 +638,11 @@ rcu_torture_writer(void *arg) | |||
| 619 | } | 638 | } |
| 620 | rcu_torture_current_version++; | 639 | rcu_torture_current_version++; |
| 621 | oldbatch = cur_ops->completed(); | 640 | oldbatch = cur_ops->completed(); |
| 622 | rcu_stutter_wait(); | 641 | rcu_stutter_wait("rcu_torture_writer"); |
| 623 | } while (!kthread_should_stop() && !fullstop); | 642 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
| 624 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 643 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
| 625 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 644 | rcutorture_shutdown_absorb("rcu_torture_writer"); |
| 645 | while (!kthread_should_stop()) | ||
| 626 | schedule_timeout_uninterruptible(1); | 646 | schedule_timeout_uninterruptible(1); |
| 627 | return 0; | 647 | return 0; |
| 628 | } | 648 | } |
| @@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg) | |||
| 643 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 663 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); |
| 644 | udelay(rcu_random(&rand) & 0x3ff); | 664 | udelay(rcu_random(&rand) & 0x3ff); |
| 645 | cur_ops->sync(); | 665 | cur_ops->sync(); |
| 646 | rcu_stutter_wait(); | 666 | rcu_stutter_wait("rcu_torture_fakewriter"); |
| 647 | } while (!kthread_should_stop() && !fullstop); | 667 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
| 648 | 668 | ||
| 649 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 669 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
| 650 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 670 | rcutorture_shutdown_absorb("rcu_torture_fakewriter"); |
| 671 | while (!kthread_should_stop()) | ||
| 651 | schedule_timeout_uninterruptible(1); | 672 | schedule_timeout_uninterruptible(1); |
| 652 | return 0; | 673 | return 0; |
| 653 | } | 674 | } |
| @@ -752,12 +773,13 @@ rcu_torture_reader(void *arg) | |||
| 752 | preempt_enable(); | 773 | preempt_enable(); |
| 753 | cur_ops->readunlock(idx); | 774 | cur_ops->readunlock(idx); |
| 754 | schedule(); | 775 | schedule(); |
| 755 | rcu_stutter_wait(); | 776 | rcu_stutter_wait("rcu_torture_reader"); |
| 756 | } while (!kthread_should_stop() && !fullstop); | 777 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
| 757 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 778 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
| 779 | rcutorture_shutdown_absorb("rcu_torture_reader"); | ||
| 758 | if (irqreader && cur_ops->irqcapable) | 780 | if (irqreader && cur_ops->irqcapable) |
| 759 | del_timer_sync(&t); | 781 | del_timer_sync(&t); |
| 760 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 782 | while (!kthread_should_stop()) |
| 761 | schedule_timeout_uninterruptible(1); | 783 | schedule_timeout_uninterruptible(1); |
| 762 | return 0; | 784 | return 0; |
| 763 | } | 785 | } |
| @@ -854,7 +876,8 @@ rcu_torture_stats(void *arg) | |||
| 854 | do { | 876 | do { |
| 855 | schedule_timeout_interruptible(stat_interval * HZ); | 877 | schedule_timeout_interruptible(stat_interval * HZ); |
| 856 | rcu_torture_stats_print(); | 878 | rcu_torture_stats_print(); |
| 857 | } while (!kthread_should_stop() && !fullstop); | 879 | rcutorture_shutdown_absorb("rcu_torture_stats"); |
| 880 | } while (!kthread_should_stop()); | ||
| 858 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | 881 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); |
| 859 | return 0; | 882 | return 0; |
| 860 | } | 883 | } |
| @@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |||
| 866 | */ | 889 | */ |
| 867 | static void rcu_torture_shuffle_tasks(void) | 890 | static void rcu_torture_shuffle_tasks(void) |
| 868 | { | 891 | { |
| 869 | cpumask_var_t tmp_mask; | 892 | cpumask_t tmp_mask; |
| 870 | int i; | 893 | int i; |
| 871 | 894 | ||
| 872 | if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) | 895 | cpus_setall(tmp_mask); |
| 873 | BUG(); | ||
| 874 | |||
| 875 | cpumask_setall(tmp_mask); | ||
| 876 | get_online_cpus(); | 896 | get_online_cpus(); |
| 877 | 897 | ||
| 878 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 898 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
| 879 | if (num_online_cpus() == 1) | 899 | if (num_online_cpus() == 1) { |
| 880 | goto out; | 900 | put_online_cpus(); |
| 901 | return; | ||
| 902 | } | ||
| 881 | 903 | ||
| 882 | if (rcu_idle_cpu != -1) | 904 | if (rcu_idle_cpu != -1) |
| 883 | cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); | 905 | cpu_clear(rcu_idle_cpu, tmp_mask); |
| 884 | 906 | ||
| 885 | set_cpus_allowed_ptr(current, tmp_mask); | 907 | set_cpus_allowed_ptr(current, &tmp_mask); |
| 886 | 908 | ||
| 887 | if (reader_tasks) { | 909 | if (reader_tasks) { |
| 888 | for (i = 0; i < nrealreaders; i++) | 910 | for (i = 0; i < nrealreaders; i++) |
| 889 | if (reader_tasks[i]) | 911 | if (reader_tasks[i]) |
| 890 | set_cpus_allowed_ptr(reader_tasks[i], | 912 | set_cpus_allowed_ptr(reader_tasks[i], |
| 891 | tmp_mask); | 913 | &tmp_mask); |
| 892 | } | 914 | } |
| 893 | 915 | ||
| 894 | if (fakewriter_tasks) { | 916 | if (fakewriter_tasks) { |
| 895 | for (i = 0; i < nfakewriters; i++) | 917 | for (i = 0; i < nfakewriters; i++) |
| 896 | if (fakewriter_tasks[i]) | 918 | if (fakewriter_tasks[i]) |
| 897 | set_cpus_allowed_ptr(fakewriter_tasks[i], | 919 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
| 898 | tmp_mask); | 920 | &tmp_mask); |
| 899 | } | 921 | } |
| 900 | 922 | ||
| 901 | if (writer_task) | 923 | if (writer_task) |
| 902 | set_cpus_allowed_ptr(writer_task, tmp_mask); | 924 | set_cpus_allowed_ptr(writer_task, &tmp_mask); |
| 903 | 925 | ||
| 904 | if (stats_task) | 926 | if (stats_task) |
| 905 | set_cpus_allowed_ptr(stats_task, tmp_mask); | 927 | set_cpus_allowed_ptr(stats_task, &tmp_mask); |
| 906 | 928 | ||
| 907 | if (rcu_idle_cpu == -1) | 929 | if (rcu_idle_cpu == -1) |
| 908 | rcu_idle_cpu = num_online_cpus() - 1; | 930 | rcu_idle_cpu = num_online_cpus() - 1; |
| 909 | else | 931 | else |
| 910 | rcu_idle_cpu--; | 932 | rcu_idle_cpu--; |
| 911 | 933 | ||
| 912 | out: | ||
| 913 | put_online_cpus(); | 934 | put_online_cpus(); |
| 914 | free_cpumask_var(tmp_mask); | ||
| 915 | } | 935 | } |
| 916 | 936 | ||
| 917 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | 937 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the |
| @@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg) | |||
| 925 | do { | 945 | do { |
| 926 | schedule_timeout_interruptible(shuffle_interval * HZ); | 946 | schedule_timeout_interruptible(shuffle_interval * HZ); |
| 927 | rcu_torture_shuffle_tasks(); | 947 | rcu_torture_shuffle_tasks(); |
| 928 | } while (!kthread_should_stop() && !fullstop); | 948 | rcutorture_shutdown_absorb("rcu_torture_shuffle"); |
| 949 | } while (!kthread_should_stop()); | ||
| 929 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | 950 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); |
| 930 | return 0; | 951 | return 0; |
| 931 | } | 952 | } |
| @@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg) | |||
| 940 | do { | 961 | do { |
| 941 | schedule_timeout_interruptible(stutter * HZ); | 962 | schedule_timeout_interruptible(stutter * HZ); |
| 942 | stutter_pause_test = 1; | 963 | stutter_pause_test = 1; |
| 943 | if (!kthread_should_stop() && !fullstop) | 964 | if (!kthread_should_stop()) |
| 944 | schedule_timeout_interruptible(stutter * HZ); | 965 | schedule_timeout_interruptible(stutter * HZ); |
| 945 | stutter_pause_test = 0; | 966 | stutter_pause_test = 0; |
| 946 | } while (!kthread_should_stop() && !fullstop); | 967 | rcutorture_shutdown_absorb("rcu_torture_stutter"); |
| 968 | } while (!kthread_should_stop()); | ||
| 947 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | 969 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); |
| 948 | return 0; | 970 | return 0; |
| 949 | } | 971 | } |
| @@ -970,15 +992,16 @@ rcu_torture_cleanup(void) | |||
| 970 | int i; | 992 | int i; |
| 971 | 993 | ||
| 972 | mutex_lock(&fullstop_mutex); | 994 | mutex_lock(&fullstop_mutex); |
| 973 | if (!fullstop) { | 995 | if (fullstop == FULLSTOP_SHUTDOWN) { |
| 974 | /* If being signaled, let it happen, then exit. */ | 996 | printk(KERN_WARNING /* but going down anyway, so... */ |
| 997 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
| 975 | mutex_unlock(&fullstop_mutex); | 998 | mutex_unlock(&fullstop_mutex); |
| 976 | schedule_timeout_interruptible(10 * HZ); | 999 | schedule_timeout_uninterruptible(10); |
| 977 | if (cur_ops->cb_barrier != NULL) | 1000 | if (cur_ops->cb_barrier != NULL) |
| 978 | cur_ops->cb_barrier(); | 1001 | cur_ops->cb_barrier(); |
| 979 | return; | 1002 | return; |
| 980 | } | 1003 | } |
| 981 | fullstop = FULLSTOP_CLEANUP; | 1004 | fullstop = FULLSTOP_RMMOD; |
| 982 | mutex_unlock(&fullstop_mutex); | 1005 | mutex_unlock(&fullstop_mutex); |
| 983 | unregister_reboot_notifier(&rcutorture_nb); | 1006 | unregister_reboot_notifier(&rcutorture_nb); |
| 984 | if (stutter_task) { | 1007 | if (stutter_task) { |
| @@ -1078,7 +1101,7 @@ rcu_torture_init(void) | |||
| 1078 | else | 1101 | else |
| 1079 | nrealreaders = 2 * num_online_cpus(); | 1102 | nrealreaders = 2 * num_online_cpus(); |
| 1080 | rcu_torture_print_module_parms("Start of test"); | 1103 | rcu_torture_print_module_parms("Start of test"); |
| 1081 | fullstop = 0; | 1104 | fullstop = FULLSTOP_DONTSTOP; |
| 1082 | 1105 | ||
| 1083 | /* Set up the freelist. */ | 1106 | /* Set up the freelist. */ |
| 1084 | 1107 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f2d8638e6c60..b2fd602a6f6f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu) | |||
| 1314 | * access due to the fact that this CPU cannot possibly have any RCU | 1314 | * access due to the fact that this CPU cannot possibly have any RCU |
| 1315 | * callbacks in flight yet. | 1315 | * callbacks in flight yet. |
| 1316 | */ | 1316 | */ |
| 1317 | static void | 1317 | static void __cpuinit |
| 1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) |
| 1319 | { | 1319 | { |
| 1320 | unsigned long flags; | 1320 | unsigned long flags; |
diff --git a/kernel/relay.c b/kernel/relay.c index 09ac2008f77b..9d79b7854fa6 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan, | |||
| 663 | 663 | ||
| 664 | mutex_lock(&relay_channels_mutex); | 664 | mutex_lock(&relay_channels_mutex); |
| 665 | /* Is chan already set up? */ | 665 | /* Is chan already set up? */ |
| 666 | if (unlikely(chan->has_base_filename)) | 666 | if (unlikely(chan->has_base_filename)) { |
| 667 | mutex_unlock(&relay_channels_mutex); | ||
| 667 | return -EEXIST; | 668 | return -EEXIST; |
| 669 | } | ||
| 668 | chan->has_base_filename = 1; | 670 | chan->has_base_filename = 1; |
| 669 | chan->parent = parent; | 671 | chan->parent = parent; |
| 670 | curr_cpu = get_cpu(); | 672 | curr_cpu = get_cpu(); |
diff --git a/kernel/resource.c b/kernel/resource.c index ca6a1536b205..fd5d7d574bb9 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -620,6 +620,7 @@ resource_size_t resource_alignment(struct resource *res) | |||
| 620 | * @start: resource start address | 620 | * @start: resource start address |
| 621 | * @n: resource region size | 621 | * @n: resource region size |
| 622 | * @name: reserving caller's ID string | 622 | * @name: reserving caller's ID string |
| 623 | * @flags: IO resource flags | ||
| 623 | */ | 624 | */ |
| 624 | struct resource * __request_region(struct resource *parent, | 625 | struct resource * __request_region(struct resource *parent, |
| 625 | resource_size_t start, resource_size_t n, | 626 | resource_size_t start, resource_size_t n, |
diff --git a/kernel/sched.c b/kernel/sched.c index deb5ac8c12f3..52bbf1c842a8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch); | |||
| 125 | DEFINE_TRACE(sched_migrate_task); | 125 | DEFINE_TRACE(sched_migrate_task); |
| 126 | 126 | ||
| 127 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
| 128 | |||
| 129 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
| 130 | |||
| 128 | /* | 131 | /* |
| 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 132 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
| 130 | * Since cpu_power is a 'constant', we can use a reciprocal divide. | 133 | * Since cpu_power is a 'constant', we can use a reciprocal divide. |
| @@ -1320,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
| 1320 | * slice expiry etc. | 1323 | * slice expiry etc. |
| 1321 | */ | 1324 | */ |
| 1322 | 1325 | ||
| 1323 | #define WEIGHT_IDLEPRIO 2 | 1326 | #define WEIGHT_IDLEPRIO 3 |
| 1324 | #define WMULT_IDLEPRIO (1 << 31) | 1327 | #define WMULT_IDLEPRIO 1431655765 |
| 1325 | 1328 | ||
| 1326 | /* | 1329 | /* |
| 1327 | * Nice levels are multiplicative, with a gentle 10% change for every | 1330 | * Nice levels are multiplicative, with a gentle 10% change for every |
| @@ -4437,7 +4440,7 @@ void __kprobes sub_preempt_count(int val) | |||
| 4437 | /* | 4440 | /* |
| 4438 | * Underflow? | 4441 | * Underflow? |
| 4439 | */ | 4442 | */ |
| 4440 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) | 4443 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
| 4441 | return; | 4444 | return; |
| 4442 | /* | 4445 | /* |
| 4443 | * Is the spinlock portion underflowing? | 4446 | * Is the spinlock portion underflowing? |
| @@ -5123,7 +5126,7 @@ int can_nice(const struct task_struct *p, const int nice) | |||
| 5123 | * sys_setpriority is a more generic, but much slower function that | 5126 | * sys_setpriority is a more generic, but much slower function that |
| 5124 | * does similar things. | 5127 | * does similar things. |
| 5125 | */ | 5128 | */ |
| 5126 | asmlinkage long sys_nice(int increment) | 5129 | SYSCALL_DEFINE1(nice, int, increment) |
| 5127 | { | 5130 | { |
| 5128 | long nice, retval; | 5131 | long nice, retval; |
| 5129 | 5132 | ||
| @@ -5430,8 +5433,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
| 5430 | * @policy: new policy. | 5433 | * @policy: new policy. |
| 5431 | * @param: structure containing the new RT priority. | 5434 | * @param: structure containing the new RT priority. |
| 5432 | */ | 5435 | */ |
| 5433 | asmlinkage long | 5436 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 5434 | sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 5437 | struct sched_param __user *, param) |
| 5435 | { | 5438 | { |
| 5436 | /* negative values for policy are not valid */ | 5439 | /* negative values for policy are not valid */ |
| 5437 | if (policy < 0) | 5440 | if (policy < 0) |
| @@ -5445,7 +5448,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
| 5445 | * @pid: the pid in question. | 5448 | * @pid: the pid in question. |
| 5446 | * @param: structure containing the new RT priority. | 5449 | * @param: structure containing the new RT priority. |
| 5447 | */ | 5450 | */ |
| 5448 | asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | 5451 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
| 5449 | { | 5452 | { |
| 5450 | return do_sched_setscheduler(pid, -1, param); | 5453 | return do_sched_setscheduler(pid, -1, param); |
| 5451 | } | 5454 | } |
| @@ -5454,7 +5457,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | |||
| 5454 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 5457 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 5455 | * @pid: the pid in question. | 5458 | * @pid: the pid in question. |
| 5456 | */ | 5459 | */ |
| 5457 | asmlinkage long sys_sched_getscheduler(pid_t pid) | 5460 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
| 5458 | { | 5461 | { |
| 5459 | struct task_struct *p; | 5462 | struct task_struct *p; |
| 5460 | int retval; | 5463 | int retval; |
| @@ -5479,7 +5482,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) | |||
| 5479 | * @pid: the pid in question. | 5482 | * @pid: the pid in question. |
| 5480 | * @param: structure containing the RT priority. | 5483 | * @param: structure containing the RT priority. |
| 5481 | */ | 5484 | */ |
| 5482 | asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) | 5485 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
| 5483 | { | 5486 | { |
| 5484 | struct sched_param lp; | 5487 | struct sched_param lp; |
| 5485 | struct task_struct *p; | 5488 | struct task_struct *p; |
| @@ -5597,8 +5600,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
| 5597 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5600 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5598 | * @user_mask_ptr: user-space pointer to the new cpu mask | 5601 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 5599 | */ | 5602 | */ |
| 5600 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5603 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 5601 | unsigned long __user *user_mask_ptr) | 5604 | unsigned long __user *, user_mask_ptr) |
| 5602 | { | 5605 | { |
| 5603 | cpumask_var_t new_mask; | 5606 | cpumask_var_t new_mask; |
| 5604 | int retval; | 5607 | int retval; |
| @@ -5645,8 +5648,8 @@ out_unlock: | |||
| 5645 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5648 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 5646 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 5649 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 5647 | */ | 5650 | */ |
| 5648 | asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | 5651 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 5649 | unsigned long __user *user_mask_ptr) | 5652 | unsigned long __user *, user_mask_ptr) |
| 5650 | { | 5653 | { |
| 5651 | int ret; | 5654 | int ret; |
| 5652 | cpumask_var_t mask; | 5655 | cpumask_var_t mask; |
| @@ -5675,7 +5678,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
| 5675 | * This function yields the current CPU to other tasks. If there are no | 5678 | * This function yields the current CPU to other tasks. If there are no |
| 5676 | * other threads running on this CPU then this function will return. | 5679 | * other threads running on this CPU then this function will return. |
| 5677 | */ | 5680 | */ |
| 5678 | asmlinkage long sys_sched_yield(void) | 5681 | SYSCALL_DEFINE0(sched_yield) |
| 5679 | { | 5682 | { |
| 5680 | struct rq *rq = this_rq_lock(); | 5683 | struct rq *rq = this_rq_lock(); |
| 5681 | 5684 | ||
| @@ -5816,7 +5819,7 @@ long __sched io_schedule_timeout(long timeout) | |||
| 5816 | * this syscall returns the maximum rt_priority that can be used | 5819 | * this syscall returns the maximum rt_priority that can be used |
| 5817 | * by a given scheduling class. | 5820 | * by a given scheduling class. |
| 5818 | */ | 5821 | */ |
| 5819 | asmlinkage long sys_sched_get_priority_max(int policy) | 5822 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
| 5820 | { | 5823 | { |
| 5821 | int ret = -EINVAL; | 5824 | int ret = -EINVAL; |
| 5822 | 5825 | ||
| @@ -5841,7 +5844,7 @@ asmlinkage long sys_sched_get_priority_max(int policy) | |||
| 5841 | * this syscall returns the minimum rt_priority that can be used | 5844 | * this syscall returns the minimum rt_priority that can be used |
| 5842 | * by a given scheduling class. | 5845 | * by a given scheduling class. |
| 5843 | */ | 5846 | */ |
| 5844 | asmlinkage long sys_sched_get_priority_min(int policy) | 5847 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
| 5845 | { | 5848 | { |
| 5846 | int ret = -EINVAL; | 5849 | int ret = -EINVAL; |
| 5847 | 5850 | ||
| @@ -5866,8 +5869,8 @@ asmlinkage long sys_sched_get_priority_min(int policy) | |||
| 5866 | * this syscall writes the default timeslice value of a given process | 5869 | * this syscall writes the default timeslice value of a given process |
| 5867 | * into the user-space timespec buffer. A value of '0' means infinity. | 5870 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 5868 | */ | 5871 | */ |
| 5869 | asmlinkage | 5872 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
| 5870 | long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | 5873 | struct timespec __user *, interval) |
| 5871 | { | 5874 | { |
| 5872 | struct task_struct *p; | 5875 | struct task_struct *p; |
| 5873 | unsigned int time_slice; | 5876 | unsigned int time_slice; |
| @@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
| 7282 | * groups, so roll our own. Now each node has its own list of groups which | 7285 | * groups, so roll our own. Now each node has its own list of groups which |
| 7283 | * gets dynamically allocated. | 7286 | * gets dynamically allocated. |
| 7284 | */ | 7287 | */ |
| 7285 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 7288 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
| 7286 | static struct sched_group ***sched_group_nodes_bycpu; | 7289 | static struct sched_group ***sched_group_nodes_bycpu; |
| 7287 | 7290 | ||
| 7288 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7291 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
| 7289 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); | 7292 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
| 7290 | 7293 | ||
| 7291 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | 7294 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
| @@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7560 | #ifdef CONFIG_NUMA | 7563 | #ifdef CONFIG_NUMA |
| 7561 | if (cpumask_weight(cpu_map) > | 7564 | if (cpumask_weight(cpu_map) > |
| 7562 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { | 7565 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
| 7563 | sd = &per_cpu(allnodes_domains, i); | 7566 | sd = &per_cpu(allnodes_domains, i).sd; |
| 7564 | SD_INIT(sd, ALLNODES); | 7567 | SD_INIT(sd, ALLNODES); |
| 7565 | set_domain_attribute(sd, attr); | 7568 | set_domain_attribute(sd, attr); |
| 7566 | cpumask_copy(sched_domain_span(sd), cpu_map); | 7569 | cpumask_copy(sched_domain_span(sd), cpu_map); |
| @@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7570 | } else | 7573 | } else |
| 7571 | p = NULL; | 7574 | p = NULL; |
| 7572 | 7575 | ||
| 7573 | sd = &per_cpu(node_domains, i); | 7576 | sd = &per_cpu(node_domains, i).sd; |
| 7574 | SD_INIT(sd, NODE); | 7577 | SD_INIT(sd, NODE); |
| 7575 | set_domain_attribute(sd, attr); | 7578 | set_domain_attribute(sd, attr); |
| 7576 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); | 7579 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
| @@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7688 | for_each_cpu(j, nodemask) { | 7691 | for_each_cpu(j, nodemask) { |
| 7689 | struct sched_domain *sd; | 7692 | struct sched_domain *sd; |
| 7690 | 7693 | ||
| 7691 | sd = &per_cpu(node_domains, j); | 7694 | sd = &per_cpu(node_domains, j).sd; |
| 7692 | sd->groups = sg; | 7695 | sd->groups = sg; |
| 7693 | } | 7696 | } |
| 7694 | sg->__cpu_power = 0; | 7697 | sg->__cpu_power = 0; |
| @@ -9047,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data) | |||
| 9047 | runtime = d->rt_runtime; | 9050 | runtime = d->rt_runtime; |
| 9048 | } | 9051 | } |
| 9049 | 9052 | ||
| 9053 | #ifdef CONFIG_USER_SCHED | ||
| 9054 | if (tg == &root_task_group) { | ||
| 9055 | period = global_rt_period(); | ||
| 9056 | runtime = global_rt_runtime(); | ||
| 9057 | } | ||
| 9058 | #endif | ||
| 9059 | |||
| 9050 | /* | 9060 | /* |
| 9051 | * Cannot have more runtime than the period. | 9061 | * Cannot have more runtime than the period. |
| 9052 | */ | 9062 | */ |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 4293cfa9681d..16eeba4e4169 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
| @@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
| 145 | read_unlock_irqrestore(&tasklist_lock, flags); | 145 | read_unlock_irqrestore(&tasklist_lock, flags); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | #if defined(CONFIG_CGROUP_SCHED) && \ | ||
| 149 | (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)) | ||
| 150 | static void task_group_path(struct task_group *tg, char *buf, int buflen) | ||
| 151 | { | ||
| 152 | /* may be NULL if the underlying cgroup isn't fully-created yet */ | ||
| 153 | if (!tg->css.cgroup) { | ||
| 154 | buf[0] = '\0'; | ||
| 155 | return; | ||
| 156 | } | ||
| 157 | cgroup_path(tg->css.cgroup, buf, buflen); | ||
| 158 | } | ||
| 159 | #endif | ||
| 160 | |||
| 148 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | 161 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
| 149 | { | 162 | { |
| 150 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, | 163 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, |
| @@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 154 | unsigned long flags; | 167 | unsigned long flags; |
| 155 | 168 | ||
| 156 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 169 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
| 157 | char path[128] = ""; | 170 | char path[128]; |
| 158 | struct task_group *tg = cfs_rq->tg; | 171 | struct task_group *tg = cfs_rq->tg; |
| 159 | 172 | ||
| 160 | cgroup_path(tg->css.cgroup, path, sizeof(path)); | 173 | task_group_path(tg, path, sizeof(path)); |
| 161 | 174 | ||
| 162 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); | 175 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); |
| 163 | #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 176 | #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
| @@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 208 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | 221 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) |
| 209 | { | 222 | { |
| 210 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) | 223 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) |
| 211 | char path[128] = ""; | 224 | char path[128]; |
| 212 | struct task_group *tg = rt_rq->tg; | 225 | struct task_group *tg = rt_rq->tg; |
| 213 | 226 | ||
| 214 | cgroup_path(tg->css.cgroup, path, sizeof(path)); | 227 | task_group_path(tg, path, sizeof(path)); |
| 215 | 228 | ||
| 216 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); | 229 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); |
| 217 | #else | 230 | #else |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8e1352c75557..5cc1c162044f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) | |||
| 283 | struct sched_entity, | 283 | struct sched_entity, |
| 284 | run_node); | 284 | run_node); |
| 285 | 285 | ||
| 286 | if (vruntime == cfs_rq->min_vruntime) | 286 | if (!cfs_rq->curr) |
| 287 | vruntime = se->vruntime; | 287 | vruntime = se->vruntime; |
| 288 | else | 288 | else |
| 289 | vruntime = min_vruntime(vruntime, se->vruntime); | 289 | vruntime = min_vruntime(vruntime, se->vruntime); |
| @@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); |
| 430 | 430 | ||
| 431 | for_each_sched_entity(se) { | 431 | for_each_sched_entity(se) { |
| 432 | struct load_weight *load = &cfs_rq->load; | 432 | struct load_weight *load; |
| 433 | |||
| 434 | cfs_rq = cfs_rq_of(se); | ||
| 435 | load = &cfs_rq->load; | ||
| 433 | 436 | ||
| 434 | if (unlikely(!se->on_rq)) { | 437 | if (unlikely(!se->on_rq)) { |
| 435 | struct load_weight lw = cfs_rq->load; | 438 | struct load_weight lw = cfs_rq->load; |
| @@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
| 677 | unsigned long thresh = sysctl_sched_latency; | 680 | unsigned long thresh = sysctl_sched_latency; |
| 678 | 681 | ||
| 679 | /* | 682 | /* |
| 680 | * convert the sleeper threshold into virtual time | 683 | * Convert the sleeper threshold into virtual time. |
| 684 | * SCHED_IDLE is a special sub-class. We care about | ||
| 685 | * fairness only relative to other SCHED_IDLE tasks, | ||
| 686 | * all of which have the same weight. | ||
| 681 | */ | 687 | */ |
| 682 | if (sched_feat(NORMALIZED_SLEEPER)) | 688 | if (sched_feat(NORMALIZED_SLEEPER) && |
| 689 | task_of(se)->policy != SCHED_IDLE) | ||
| 683 | thresh = calc_delta_fair(thresh, se); | 690 | thresh = calc_delta_fair(thresh, se); |
| 684 | 691 | ||
| 685 | vruntime -= thresh; | 692 | vruntime -= thresh; |
| @@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
| 1340 | 1347 | ||
| 1341 | static void set_last_buddy(struct sched_entity *se) | 1348 | static void set_last_buddy(struct sched_entity *se) |
| 1342 | { | 1349 | { |
| 1343 | for_each_sched_entity(se) | 1350 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
| 1344 | cfs_rq_of(se)->last = se; | 1351 | for_each_sched_entity(se) |
| 1352 | cfs_rq_of(se)->last = se; | ||
| 1353 | } | ||
| 1345 | } | 1354 | } |
| 1346 | 1355 | ||
| 1347 | static void set_next_buddy(struct sched_entity *se) | 1356 | static void set_next_buddy(struct sched_entity *se) |
| 1348 | { | 1357 | { |
| 1349 | for_each_sched_entity(se) | 1358 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
| 1350 | cfs_rq_of(se)->next = se; | 1359 | for_each_sched_entity(se) |
| 1360 | cfs_rq_of(se)->next = se; | ||
| 1361 | } | ||
| 1351 | } | 1362 | } |
| 1352 | 1363 | ||
| 1353 | /* | 1364 | /* |
| @@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
| 1393 | return; | 1404 | return; |
| 1394 | 1405 | ||
| 1395 | /* | 1406 | /* |
| 1396 | * Batch tasks do not preempt (their preemption is driven by | 1407 | * Batch and idle tasks do not preempt (their preemption is driven by |
| 1397 | * the tick): | 1408 | * the tick): |
| 1398 | */ | 1409 | */ |
| 1399 | if (unlikely(p->policy == SCHED_BATCH)) | 1410 | if (unlikely(p->policy != SCHED_NORMAL)) |
| 1400 | return; | 1411 | return; |
| 1401 | 1412 | ||
| 1413 | /* Idle tasks are by definition preempted by everybody. */ | ||
| 1414 | if (unlikely(curr->policy == SCHED_IDLE)) { | ||
| 1415 | resched_task(curr); | ||
| 1416 | return; | ||
| 1417 | } | ||
| 1418 | |||
| 1402 | if (!sched_feat(WAKEUP_PREEMPT)) | 1419 | if (!sched_feat(WAKEUP_PREEMPT)) |
| 1403 | return; | 1420 | return; |
| 1404 | 1421 | ||
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d1226..8ab0cef8ecab 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) | |||
| 296 | static inline void account_group_user_time(struct task_struct *tsk, | 296 | static inline void account_group_user_time(struct task_struct *tsk, |
| 297 | cputime_t cputime) | 297 | cputime_t cputime) |
| 298 | { | 298 | { |
| 299 | struct task_cputime *times; | ||
| 299 | struct signal_struct *sig; | 300 | struct signal_struct *sig; |
| 300 | 301 | ||
| 301 | /* tsk == current, ensure it is safe to use ->signal */ | 302 | /* tsk == current, ensure it is safe to use ->signal */ |
| @@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
| 303 | return; | 304 | return; |
| 304 | 305 | ||
| 305 | sig = tsk->signal; | 306 | sig = tsk->signal; |
| 306 | if (sig->cputime.totals) { | 307 | times = &sig->cputime.totals; |
| 307 | struct task_cputime *times; | ||
| 308 | 308 | ||
| 309 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 309 | spin_lock(×->lock); |
| 310 | times->utime = cputime_add(times->utime, cputime); | 310 | times->utime = cputime_add(times->utime, cputime); |
| 311 | put_cpu_no_resched(); | 311 | spin_unlock(×->lock); |
| 312 | } | ||
| 313 | } | 312 | } |
| 314 | 313 | ||
| 315 | /** | 314 | /** |
| @@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
| 325 | static inline void account_group_system_time(struct task_struct *tsk, | 324 | static inline void account_group_system_time(struct task_struct *tsk, |
| 326 | cputime_t cputime) | 325 | cputime_t cputime) |
| 327 | { | 326 | { |
| 327 | struct task_cputime *times; | ||
| 328 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
| 329 | 329 | ||
| 330 | /* tsk == current, ensure it is safe to use ->signal */ | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
| @@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
| 332 | return; | 332 | return; |
| 333 | 333 | ||
| 334 | sig = tsk->signal; | 334 | sig = tsk->signal; |
| 335 | if (sig->cputime.totals) { | 335 | times = &sig->cputime.totals; |
| 336 | struct task_cputime *times; | ||
| 337 | 336 | ||
| 338 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 337 | spin_lock(×->lock); |
| 339 | times->stime = cputime_add(times->stime, cputime); | 338 | times->stime = cputime_add(times->stime, cputime); |
| 340 | put_cpu_no_resched(); | 339 | spin_unlock(×->lock); |
| 341 | } | ||
| 342 | } | 340 | } |
| 343 | 341 | ||
| 344 | /** | 342 | /** |
| @@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
| 354 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 352 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
| 355 | unsigned long long ns) | 353 | unsigned long long ns) |
| 356 | { | 354 | { |
| 355 | struct task_cputime *times; | ||
| 357 | struct signal_struct *sig; | 356 | struct signal_struct *sig; |
| 358 | 357 | ||
| 359 | sig = tsk->signal; | 358 | sig = tsk->signal; |
| @@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
| 362 | if (unlikely(!sig)) | 361 | if (unlikely(!sig)) |
| 363 | return; | 362 | return; |
| 364 | 363 | ||
| 365 | if (sig->cputime.totals) { | 364 | times = &sig->cputime.totals; |
| 366 | struct task_cputime *times; | ||
| 367 | 365 | ||
| 368 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 366 | spin_lock(×->lock); |
| 369 | times->sum_exec_runtime += ns; | 367 | times->sum_exec_runtime += ns; |
| 370 | put_cpu_no_resched(); | 368 | spin_unlock(×->lock); |
| 371 | } | ||
| 372 | } | 369 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index 3152ac3b62e2..b6b36768b758 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -909,7 +909,9 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | |||
| 909 | } | 909 | } |
| 910 | #endif | 910 | #endif |
| 911 | printk("\n"); | 911 | printk("\n"); |
| 912 | preempt_disable(); | ||
| 912 | show_regs(regs); | 913 | show_regs(regs); |
| 914 | preempt_enable(); | ||
| 913 | } | 915 | } |
| 914 | 916 | ||
| 915 | static int __init setup_print_fatal_signals(char *str) | 917 | static int __init setup_print_fatal_signals(char *str) |
| @@ -1961,7 +1963,7 @@ EXPORT_SYMBOL(unblock_all_signals); | |||
| 1961 | * System call entry points. | 1963 | * System call entry points. |
| 1962 | */ | 1964 | */ |
| 1963 | 1965 | ||
| 1964 | asmlinkage long sys_restart_syscall(void) | 1966 | SYSCALL_DEFINE0(restart_syscall) |
| 1965 | { | 1967 | { |
| 1966 | struct restart_block *restart = ¤t_thread_info()->restart_block; | 1968 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
| 1967 | return restart->fn(restart); | 1969 | return restart->fn(restart); |
| @@ -2014,8 +2016,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |||
| 2014 | return error; | 2016 | return error; |
| 2015 | } | 2017 | } |
| 2016 | 2018 | ||
| 2017 | asmlinkage long | 2019 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, |
| 2018 | sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) | 2020 | sigset_t __user *, oset, size_t, sigsetsize) |
| 2019 | { | 2021 | { |
| 2020 | int error = -EINVAL; | 2022 | int error = -EINVAL; |
| 2021 | sigset_t old_set, new_set; | 2023 | sigset_t old_set, new_set; |
| @@ -2074,8 +2076,7 @@ out: | |||
| 2074 | return error; | 2076 | return error; |
| 2075 | } | 2077 | } |
| 2076 | 2078 | ||
| 2077 | asmlinkage long | 2079 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
| 2078 | sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) | ||
| 2079 | { | 2080 | { |
| 2080 | return do_sigpending(set, sigsetsize); | 2081 | return do_sigpending(set, sigsetsize); |
| 2081 | } | 2082 | } |
| @@ -2146,11 +2147,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
| 2146 | 2147 | ||
| 2147 | #endif | 2148 | #endif |
| 2148 | 2149 | ||
| 2149 | asmlinkage long | 2150 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
| 2150 | sys_rt_sigtimedwait(const sigset_t __user *uthese, | 2151 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
| 2151 | siginfo_t __user *uinfo, | 2152 | size_t, sigsetsize) |
| 2152 | const struct timespec __user *uts, | ||
| 2153 | size_t sigsetsize) | ||
| 2154 | { | 2153 | { |
| 2155 | int ret, sig; | 2154 | int ret, sig; |
| 2156 | sigset_t these; | 2155 | sigset_t these; |
| @@ -2223,8 +2222,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, | |||
| 2223 | return ret; | 2222 | return ret; |
| 2224 | } | 2223 | } |
| 2225 | 2224 | ||
| 2226 | asmlinkage long | 2225 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
| 2227 | sys_kill(pid_t pid, int sig) | ||
| 2228 | { | 2226 | { |
| 2229 | struct siginfo info; | 2227 | struct siginfo info; |
| 2230 | 2228 | ||
| @@ -2283,7 +2281,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig) | |||
| 2283 | * exists but it's not belonging to the target process anymore. This | 2281 | * exists but it's not belonging to the target process anymore. This |
| 2284 | * method solves the problem of threads exiting and PIDs getting reused. | 2282 | * method solves the problem of threads exiting and PIDs getting reused. |
| 2285 | */ | 2283 | */ |
| 2286 | asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig) | 2284 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
| 2287 | { | 2285 | { |
| 2288 | /* This is only valid for single tasks */ | 2286 | /* This is only valid for single tasks */ |
| 2289 | if (pid <= 0 || tgid <= 0) | 2287 | if (pid <= 0 || tgid <= 0) |
| @@ -2295,8 +2293,7 @@ asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig) | |||
| 2295 | /* | 2293 | /* |
| 2296 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2294 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
| 2297 | */ | 2295 | */ |
| 2298 | asmlinkage long | 2296 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
| 2299 | sys_tkill(pid_t pid, int sig) | ||
| 2300 | { | 2297 | { |
| 2301 | /* This is only valid for single tasks */ | 2298 | /* This is only valid for single tasks */ |
| 2302 | if (pid <= 0) | 2299 | if (pid <= 0) |
| @@ -2305,8 +2302,8 @@ sys_tkill(pid_t pid, int sig) | |||
| 2305 | return do_tkill(0, pid, sig); | 2302 | return do_tkill(0, pid, sig); |
| 2306 | } | 2303 | } |
| 2307 | 2304 | ||
| 2308 | asmlinkage long | 2305 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
| 2309 | sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo) | 2306 | siginfo_t __user *, uinfo) |
| 2310 | { | 2307 | { |
| 2311 | siginfo_t info; | 2308 | siginfo_t info; |
| 2312 | 2309 | ||
| @@ -2434,8 +2431,7 @@ out: | |||
| 2434 | 2431 | ||
| 2435 | #ifdef __ARCH_WANT_SYS_SIGPENDING | 2432 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
| 2436 | 2433 | ||
| 2437 | asmlinkage long | 2434 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
| 2438 | sys_sigpending(old_sigset_t __user *set) | ||
| 2439 | { | 2435 | { |
| 2440 | return do_sigpending(set, sizeof(*set)); | 2436 | return do_sigpending(set, sizeof(*set)); |
| 2441 | } | 2437 | } |
| @@ -2446,8 +2442,8 @@ sys_sigpending(old_sigset_t __user *set) | |||
| 2446 | /* Some platforms have their own version with special arguments others | 2442 | /* Some platforms have their own version with special arguments others |
| 2447 | support only sys_rt_sigprocmask. */ | 2443 | support only sys_rt_sigprocmask. */ |
| 2448 | 2444 | ||
| 2449 | asmlinkage long | 2445 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, |
| 2450 | sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) | 2446 | old_sigset_t __user *, oset) |
| 2451 | { | 2447 | { |
| 2452 | int error; | 2448 | int error; |
| 2453 | old_sigset_t old_set, new_set; | 2449 | old_sigset_t old_set, new_set; |
| @@ -2497,11 +2493,10 @@ out: | |||
| 2497 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | 2493 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
| 2498 | 2494 | ||
| 2499 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION | 2495 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
| 2500 | asmlinkage long | 2496 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
| 2501 | sys_rt_sigaction(int sig, | 2497 | const struct sigaction __user *, act, |
| 2502 | const struct sigaction __user *act, | 2498 | struct sigaction __user *, oact, |
| 2503 | struct sigaction __user *oact, | 2499 | size_t, sigsetsize) |
| 2504 | size_t sigsetsize) | ||
| 2505 | { | 2500 | { |
| 2506 | struct k_sigaction new_sa, old_sa; | 2501 | struct k_sigaction new_sa, old_sa; |
| 2507 | int ret = -EINVAL; | 2502 | int ret = -EINVAL; |
| @@ -2531,15 +2526,13 @@ out: | |||
| 2531 | /* | 2526 | /* |
| 2532 | * For backwards compatibility. Functionality superseded by sigprocmask. | 2527 | * For backwards compatibility. Functionality superseded by sigprocmask. |
| 2533 | */ | 2528 | */ |
| 2534 | asmlinkage long | 2529 | SYSCALL_DEFINE0(sgetmask) |
| 2535 | sys_sgetmask(void) | ||
| 2536 | { | 2530 | { |
| 2537 | /* SMP safe */ | 2531 | /* SMP safe */ |
| 2538 | return current->blocked.sig[0]; | 2532 | return current->blocked.sig[0]; |
| 2539 | } | 2533 | } |
| 2540 | 2534 | ||
| 2541 | asmlinkage long | 2535 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
| 2542 | sys_ssetmask(int newmask) | ||
| 2543 | { | 2536 | { |
| 2544 | int old; | 2537 | int old; |
| 2545 | 2538 | ||
| @@ -2559,8 +2552,7 @@ sys_ssetmask(int newmask) | |||
| 2559 | /* | 2552 | /* |
| 2560 | * For backwards compatibility. Functionality superseded by sigaction. | 2553 | * For backwards compatibility. Functionality superseded by sigaction. |
| 2561 | */ | 2554 | */ |
| 2562 | asmlinkage unsigned long | 2555 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
| 2563 | sys_signal(int sig, __sighandler_t handler) | ||
| 2564 | { | 2556 | { |
| 2565 | struct k_sigaction new_sa, old_sa; | 2557 | struct k_sigaction new_sa, old_sa; |
| 2566 | int ret; | 2558 | int ret; |
| @@ -2577,8 +2569,7 @@ sys_signal(int sig, __sighandler_t handler) | |||
| 2577 | 2569 | ||
| 2578 | #ifdef __ARCH_WANT_SYS_PAUSE | 2570 | #ifdef __ARCH_WANT_SYS_PAUSE |
| 2579 | 2571 | ||
| 2580 | asmlinkage long | 2572 | SYSCALL_DEFINE0(pause) |
| 2581 | sys_pause(void) | ||
| 2582 | { | 2573 | { |
| 2583 | current->state = TASK_INTERRUPTIBLE; | 2574 | current->state = TASK_INTERRUPTIBLE; |
| 2584 | schedule(); | 2575 | schedule(); |
| @@ -2588,7 +2579,7 @@ sys_pause(void) | |||
| 2588 | #endif | 2579 | #endif |
| 2589 | 2580 | ||
| 2590 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND | 2581 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
| 2591 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) | 2582 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
| 2592 | { | 2583 | { |
| 2593 | sigset_t newset; | 2584 | sigset_t newset; |
| 2594 | 2585 | ||
diff --git a/kernel/smp.c b/kernel/smp.c index 5cfa0e5e3e88..bbedbb7efe32 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -18,6 +18,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | |||
| 18 | enum { | 18 | enum { |
| 19 | CSD_FLAG_WAIT = 0x01, | 19 | CSD_FLAG_WAIT = 0x01, |
| 20 | CSD_FLAG_ALLOC = 0x02, | 20 | CSD_FLAG_ALLOC = 0x02, |
| 21 | CSD_FLAG_LOCK = 0x04, | ||
| 21 | }; | 22 | }; |
| 22 | 23 | ||
| 23 | struct call_function_data { | 24 | struct call_function_data { |
| @@ -186,6 +187,9 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 186 | if (data_flags & CSD_FLAG_WAIT) { | 187 | if (data_flags & CSD_FLAG_WAIT) { |
| 187 | smp_wmb(); | 188 | smp_wmb(); |
| 188 | data->flags &= ~CSD_FLAG_WAIT; | 189 | data->flags &= ~CSD_FLAG_WAIT; |
| 190 | } else if (data_flags & CSD_FLAG_LOCK) { | ||
| 191 | smp_wmb(); | ||
| 192 | data->flags &= ~CSD_FLAG_LOCK; | ||
| 189 | } else if (data_flags & CSD_FLAG_ALLOC) | 193 | } else if (data_flags & CSD_FLAG_ALLOC) |
| 190 | kfree(data); | 194 | kfree(data); |
| 191 | } | 195 | } |
| @@ -196,6 +200,8 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 196 | } | 200 | } |
| 197 | } | 201 | } |
| 198 | 202 | ||
| 203 | static DEFINE_PER_CPU(struct call_single_data, csd_data); | ||
| 204 | |||
| 199 | /* | 205 | /* |
| 200 | * smp_call_function_single - Run a function on a specific CPU | 206 | * smp_call_function_single - Run a function on a specific CPU |
| 201 | * @func: The function to run. This must be fast and non-blocking. | 207 | * @func: The function to run. This must be fast and non-blocking. |
| @@ -224,14 +230,38 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 224 | func(info); | 230 | func(info); |
| 225 | local_irq_restore(flags); | 231 | local_irq_restore(flags); |
| 226 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | 232 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |
| 227 | struct call_single_data *data = NULL; | 233 | struct call_single_data *data; |
| 228 | 234 | ||
| 229 | if (!wait) { | 235 | if (!wait) { |
| 236 | /* | ||
| 237 | * We are calling a function on a single CPU | ||
| 238 | * and we are not going to wait for it to finish. | ||
| 239 | * We first try to allocate the data, but if we | ||
| 240 | * fail, we fall back to use a per cpu data to pass | ||
| 241 | * the information to that CPU. Since all callers | ||
| 242 | * of this code will use the same data, we must | ||
| 243 | * synchronize the callers to prevent a new caller | ||
| 244 | * from corrupting the data before the callee | ||
| 245 | * can access it. | ||
| 246 | * | ||
| 247 | * The CSD_FLAG_LOCK is used to let us know when | ||
| 248 | * the IPI handler is done with the data. | ||
| 249 | * The first caller will set it, and the callee | ||
| 250 | * will clear it. The next caller must wait for | ||
| 251 | * it to clear before we set it again. This | ||
| 252 | * will make sure the callee is done with the | ||
| 253 | * data before a new caller will use it. | ||
| 254 | */ | ||
| 230 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 255 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
| 231 | if (data) | 256 | if (data) |
| 232 | data->flags = CSD_FLAG_ALLOC; | 257 | data->flags = CSD_FLAG_ALLOC; |
| 233 | } | 258 | else { |
| 234 | if (!data) { | 259 | data = &per_cpu(csd_data, me); |
| 260 | while (data->flags & CSD_FLAG_LOCK) | ||
| 261 | cpu_relax(); | ||
| 262 | data->flags = CSD_FLAG_LOCK; | ||
| 263 | } | ||
| 264 | } else { | ||
| 235 | data = &d; | 265 | data = &d; |
| 236 | data->flags = CSD_FLAG_WAIT; | 266 | data->flags = CSD_FLAG_WAIT; |
| 237 | } | 267 | } |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9188c66278a..85d5a2455103 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/lockdep.h> | 16 | #include <linux/lockdep.h> |
| 17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/sysctl.h> | ||
| 19 | 20 | ||
| 20 | #include <asm/irq_regs.h> | 21 | #include <asm/irq_regs.h> |
| 21 | 22 | ||
| @@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 88 | } | 89 | } |
| 89 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
| 90 | 91 | ||
| 92 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | ||
| 93 | struct file *filp, void __user *buffer, | ||
| 94 | size_t *lenp, loff_t *ppos) | ||
| 95 | { | ||
| 96 | touch_all_softlockup_watchdogs(); | ||
| 97 | return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
| 98 | } | ||
| 99 | |||
| 91 | /* | 100 | /* |
| 92 | * This callback runs from the timer interrupt, and checks | 101 | * This callback runs from the timer interrupt, and checks |
| 93 | * whether the watchdog thread has hung or not: | 102 | * whether the watchdog thread has hung or not: |
diff --git a/kernel/sys.c b/kernel/sys.c index 763c3c17ded3..e7dc0e10a485 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -143,7 +143,7 @@ out: | |||
| 143 | return error; | 143 | return error; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | asmlinkage long sys_setpriority(int which, int who, int niceval) | 146 | SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) |
| 147 | { | 147 | { |
| 148 | struct task_struct *g, *p; | 148 | struct task_struct *g, *p; |
| 149 | struct user_struct *user; | 149 | struct user_struct *user; |
| @@ -208,7 +208,7 @@ out: | |||
| 208 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) | 208 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) |
| 209 | * to stay compatible. | 209 | * to stay compatible. |
| 210 | */ | 210 | */ |
| 211 | asmlinkage long sys_getpriority(int which, int who) | 211 | SYSCALL_DEFINE2(getpriority, int, which, int, who) |
| 212 | { | 212 | { |
| 213 | struct task_struct *g, *p; | 213 | struct task_struct *g, *p; |
| 214 | struct user_struct *user; | 214 | struct user_struct *user; |
| @@ -355,7 +355,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off); | |||
| 355 | * | 355 | * |
| 356 | * reboot doesn't sync: do that yourself before calling this. | 356 | * reboot doesn't sync: do that yourself before calling this. |
| 357 | */ | 357 | */ |
| 358 | asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) | 358 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, |
| 359 | void __user *, arg) | ||
| 359 | { | 360 | { |
| 360 | char buffer[256]; | 361 | char buffer[256]; |
| 361 | 362 | ||
| @@ -478,7 +479,7 @@ void ctrl_alt_del(void) | |||
| 478 | * SMP: There are not races, the GIDs are checked only by filesystem | 479 | * SMP: There are not races, the GIDs are checked only by filesystem |
| 479 | * operations (as far as semantic preservation is concerned). | 480 | * operations (as far as semantic preservation is concerned). |
| 480 | */ | 481 | */ |
| 481 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | 482 | SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) |
| 482 | { | 483 | { |
| 483 | const struct cred *old; | 484 | const struct cred *old; |
| 484 | struct cred *new; | 485 | struct cred *new; |
| @@ -529,7 +530,7 @@ error: | |||
| 529 | * | 530 | * |
| 530 | * SMP: Same implicit races as above. | 531 | * SMP: Same implicit races as above. |
| 531 | */ | 532 | */ |
| 532 | asmlinkage long sys_setgid(gid_t gid) | 533 | SYSCALL_DEFINE1(setgid, gid_t, gid) |
| 533 | { | 534 | { |
| 534 | const struct cred *old; | 535 | const struct cred *old; |
| 535 | struct cred *new; | 536 | struct cred *new; |
| @@ -597,7 +598,7 @@ static int set_user(struct cred *new) | |||
| 597 | * 100% compatible with BSD. A program which uses just setuid() will be | 598 | * 100% compatible with BSD. A program which uses just setuid() will be |
| 598 | * 100% compatible with POSIX with saved IDs. | 599 | * 100% compatible with POSIX with saved IDs. |
| 599 | */ | 600 | */ |
| 600 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | 601 | SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) |
| 601 | { | 602 | { |
| 602 | const struct cred *old; | 603 | const struct cred *old; |
| 603 | struct cred *new; | 604 | struct cred *new; |
| @@ -661,7 +662,7 @@ error: | |||
| 661 | * will allow a root program to temporarily drop privileges and be able to | 662 | * will allow a root program to temporarily drop privileges and be able to |
| 662 | * regain them by swapping the real and effective uid. | 663 | * regain them by swapping the real and effective uid. |
| 663 | */ | 664 | */ |
| 664 | asmlinkage long sys_setuid(uid_t uid) | 665 | SYSCALL_DEFINE1(setuid, uid_t, uid) |
| 665 | { | 666 | { |
| 666 | const struct cred *old; | 667 | const struct cred *old; |
| 667 | struct cred *new; | 668 | struct cred *new; |
| @@ -705,7 +706,7 @@ error: | |||
| 705 | * This function implements a generic ability to update ruid, euid, | 706 | * This function implements a generic ability to update ruid, euid, |
| 706 | * and suid. This allows you to implement the 4.4 compatible seteuid(). | 707 | * and suid. This allows you to implement the 4.4 compatible seteuid(). |
| 707 | */ | 708 | */ |
| 708 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) | 709 | SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) |
| 709 | { | 710 | { |
| 710 | const struct cred *old; | 711 | const struct cred *old; |
| 711 | struct cred *new; | 712 | struct cred *new; |
| @@ -756,7 +757,7 @@ error: | |||
| 756 | return retval; | 757 | return retval; |
| 757 | } | 758 | } |
| 758 | 759 | ||
| 759 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) | 760 | SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid) |
| 760 | { | 761 | { |
| 761 | const struct cred *cred = current_cred(); | 762 | const struct cred *cred = current_cred(); |
| 762 | int retval; | 763 | int retval; |
| @@ -771,7 +772,7 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us | |||
| 771 | /* | 772 | /* |
| 772 | * Same as above, but for rgid, egid, sgid. | 773 | * Same as above, but for rgid, egid, sgid. |
| 773 | */ | 774 | */ |
| 774 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) | 775 | SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) |
| 775 | { | 776 | { |
| 776 | const struct cred *old; | 777 | const struct cred *old; |
| 777 | struct cred *new; | 778 | struct cred *new; |
| @@ -814,7 +815,7 @@ error: | |||
| 814 | return retval; | 815 | return retval; |
| 815 | } | 816 | } |
| 816 | 817 | ||
| 817 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) | 818 | SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid) |
| 818 | { | 819 | { |
| 819 | const struct cred *cred = current_cred(); | 820 | const struct cred *cred = current_cred(); |
| 820 | int retval; | 821 | int retval; |
| @@ -833,7 +834,7 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us | |||
| 833 | * whatever uid it wants to). It normally shadows "euid", except when | 834 | * whatever uid it wants to). It normally shadows "euid", except when |
| 834 | * explicitly set by setfsuid() or for access.. | 835 | * explicitly set by setfsuid() or for access.. |
| 835 | */ | 836 | */ |
| 836 | asmlinkage long sys_setfsuid(uid_t uid) | 837 | SYSCALL_DEFINE1(setfsuid, uid_t, uid) |
| 837 | { | 838 | { |
| 838 | const struct cred *old; | 839 | const struct cred *old; |
| 839 | struct cred *new; | 840 | struct cred *new; |
| @@ -870,7 +871,7 @@ change_okay: | |||
| 870 | /* | 871 | /* |
| 871 | * Samma på svenska.. | 872 | * Samma på svenska.. |
| 872 | */ | 873 | */ |
| 873 | asmlinkage long sys_setfsgid(gid_t gid) | 874 | SYSCALL_DEFINE1(setfsgid, gid_t, gid) |
| 874 | { | 875 | { |
| 875 | const struct cred *old; | 876 | const struct cred *old; |
| 876 | struct cred *new; | 877 | struct cred *new; |
| @@ -919,7 +920,7 @@ void do_sys_times(struct tms *tms) | |||
| 919 | tms->tms_cstime = cputime_to_clock_t(cstime); | 920 | tms->tms_cstime = cputime_to_clock_t(cstime); |
| 920 | } | 921 | } |
| 921 | 922 | ||
| 922 | asmlinkage long sys_times(struct tms __user * tbuf) | 923 | SYSCALL_DEFINE1(times, struct tms __user *, tbuf) |
| 923 | { | 924 | { |
| 924 | if (tbuf) { | 925 | if (tbuf) { |
| 925 | struct tms tmp; | 926 | struct tms tmp; |
| @@ -944,7 +945,7 @@ asmlinkage long sys_times(struct tms __user * tbuf) | |||
| 944 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. | 945 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. |
| 945 | * LBT 04.03.94 | 946 | * LBT 04.03.94 |
| 946 | */ | 947 | */ |
| 947 | asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) | 948 | SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) |
| 948 | { | 949 | { |
| 949 | struct task_struct *p; | 950 | struct task_struct *p; |
| 950 | struct task_struct *group_leader = current->group_leader; | 951 | struct task_struct *group_leader = current->group_leader; |
| @@ -1015,7 +1016,7 @@ out: | |||
| 1015 | return err; | 1016 | return err; |
| 1016 | } | 1017 | } |
| 1017 | 1018 | ||
| 1018 | asmlinkage long sys_getpgid(pid_t pid) | 1019 | SYSCALL_DEFINE1(getpgid, pid_t, pid) |
| 1019 | { | 1020 | { |
| 1020 | struct task_struct *p; | 1021 | struct task_struct *p; |
| 1021 | struct pid *grp; | 1022 | struct pid *grp; |
| @@ -1045,14 +1046,14 @@ out: | |||
| 1045 | 1046 | ||
| 1046 | #ifdef __ARCH_WANT_SYS_GETPGRP | 1047 | #ifdef __ARCH_WANT_SYS_GETPGRP |
| 1047 | 1048 | ||
| 1048 | asmlinkage long sys_getpgrp(void) | 1049 | SYSCALL_DEFINE0(getpgrp) |
| 1049 | { | 1050 | { |
| 1050 | return sys_getpgid(0); | 1051 | return sys_getpgid(0); |
| 1051 | } | 1052 | } |
| 1052 | 1053 | ||
| 1053 | #endif | 1054 | #endif |
| 1054 | 1055 | ||
| 1055 | asmlinkage long sys_getsid(pid_t pid) | 1056 | SYSCALL_DEFINE1(getsid, pid_t, pid) |
| 1056 | { | 1057 | { |
| 1057 | struct task_struct *p; | 1058 | struct task_struct *p; |
| 1058 | struct pid *sid; | 1059 | struct pid *sid; |
| @@ -1080,7 +1081,7 @@ out: | |||
| 1080 | return retval; | 1081 | return retval; |
| 1081 | } | 1082 | } |
| 1082 | 1083 | ||
| 1083 | asmlinkage long sys_setsid(void) | 1084 | SYSCALL_DEFINE0(setsid) |
| 1084 | { | 1085 | { |
| 1085 | struct task_struct *group_leader = current->group_leader; | 1086 | struct task_struct *group_leader = current->group_leader; |
| 1086 | struct pid *sid = task_pid(group_leader); | 1087 | struct pid *sid = task_pid(group_leader); |
| @@ -1311,7 +1312,7 @@ int set_current_groups(struct group_info *group_info) | |||
| 1311 | 1312 | ||
| 1312 | EXPORT_SYMBOL(set_current_groups); | 1313 | EXPORT_SYMBOL(set_current_groups); |
| 1313 | 1314 | ||
| 1314 | asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) | 1315 | SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist) |
| 1315 | { | 1316 | { |
| 1316 | const struct cred *cred = current_cred(); | 1317 | const struct cred *cred = current_cred(); |
| 1317 | int i; | 1318 | int i; |
| @@ -1340,7 +1341,7 @@ out: | |||
| 1340 | * without another task interfering. | 1341 | * without another task interfering. |
| 1341 | */ | 1342 | */ |
| 1342 | 1343 | ||
| 1343 | asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) | 1344 | SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) |
| 1344 | { | 1345 | { |
| 1345 | struct group_info *group_info; | 1346 | struct group_info *group_info; |
| 1346 | int retval; | 1347 | int retval; |
| @@ -1394,7 +1395,7 @@ EXPORT_SYMBOL(in_egroup_p); | |||
| 1394 | 1395 | ||
| 1395 | DECLARE_RWSEM(uts_sem); | 1396 | DECLARE_RWSEM(uts_sem); |
| 1396 | 1397 | ||
| 1397 | asmlinkage long sys_newuname(struct new_utsname __user * name) | 1398 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
| 1398 | { | 1399 | { |
| 1399 | int errno = 0; | 1400 | int errno = 0; |
| 1400 | 1401 | ||
| @@ -1405,7 +1406,7 @@ asmlinkage long sys_newuname(struct new_utsname __user * name) | |||
| 1405 | return errno; | 1406 | return errno; |
| 1406 | } | 1407 | } |
| 1407 | 1408 | ||
| 1408 | asmlinkage long sys_sethostname(char __user *name, int len) | 1409 | SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) |
| 1409 | { | 1410 | { |
| 1410 | int errno; | 1411 | int errno; |
| 1411 | char tmp[__NEW_UTS_LEN]; | 1412 | char tmp[__NEW_UTS_LEN]; |
| @@ -1429,7 +1430,7 @@ asmlinkage long sys_sethostname(char __user *name, int len) | |||
| 1429 | 1430 | ||
| 1430 | #ifdef __ARCH_WANT_SYS_GETHOSTNAME | 1431 | #ifdef __ARCH_WANT_SYS_GETHOSTNAME |
| 1431 | 1432 | ||
| 1432 | asmlinkage long sys_gethostname(char __user *name, int len) | 1433 | SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) |
| 1433 | { | 1434 | { |
| 1434 | int i, errno; | 1435 | int i, errno; |
| 1435 | struct new_utsname *u; | 1436 | struct new_utsname *u; |
| @@ -1454,7 +1455,7 @@ asmlinkage long sys_gethostname(char __user *name, int len) | |||
| 1454 | * Only setdomainname; getdomainname can be implemented by calling | 1455 | * Only setdomainname; getdomainname can be implemented by calling |
| 1455 | * uname() | 1456 | * uname() |
| 1456 | */ | 1457 | */ |
| 1457 | asmlinkage long sys_setdomainname(char __user *name, int len) | 1458 | SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) |
| 1458 | { | 1459 | { |
| 1459 | int errno; | 1460 | int errno; |
| 1460 | char tmp[__NEW_UTS_LEN]; | 1461 | char tmp[__NEW_UTS_LEN]; |
| @@ -1477,7 +1478,7 @@ asmlinkage long sys_setdomainname(char __user *name, int len) | |||
| 1477 | return errno; | 1478 | return errno; |
| 1478 | } | 1479 | } |
| 1479 | 1480 | ||
| 1480 | asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) | 1481 | SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) |
| 1481 | { | 1482 | { |
| 1482 | if (resource >= RLIM_NLIMITS) | 1483 | if (resource >= RLIM_NLIMITS) |
| 1483 | return -EINVAL; | 1484 | return -EINVAL; |
| @@ -1496,7 +1497,8 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) | |||
| 1496 | * Back compatibility for getrlimit. Needed for some apps. | 1497 | * Back compatibility for getrlimit. Needed for some apps. |
| 1497 | */ | 1498 | */ |
| 1498 | 1499 | ||
| 1499 | asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) | 1500 | SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, |
| 1501 | struct rlimit __user *, rlim) | ||
| 1500 | { | 1502 | { |
| 1501 | struct rlimit x; | 1503 | struct rlimit x; |
| 1502 | if (resource >= RLIM_NLIMITS) | 1504 | if (resource >= RLIM_NLIMITS) |
| @@ -1514,7 +1516,7 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r | |||
| 1514 | 1516 | ||
| 1515 | #endif | 1517 | #endif |
| 1516 | 1518 | ||
| 1517 | asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) | 1519 | SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) |
| 1518 | { | 1520 | { |
| 1519 | struct rlimit new_rlim, *old_rlim; | 1521 | struct rlimit new_rlim, *old_rlim; |
| 1520 | int retval; | 1522 | int retval; |
| @@ -1687,7 +1689,7 @@ int getrusage(struct task_struct *p, int who, struct rusage __user *ru) | |||
| 1687 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; | 1689 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; |
| 1688 | } | 1690 | } |
| 1689 | 1691 | ||
| 1690 | asmlinkage long sys_getrusage(int who, struct rusage __user *ru) | 1692 | SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) |
| 1691 | { | 1693 | { |
| 1692 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && | 1694 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && |
| 1693 | who != RUSAGE_THREAD) | 1695 | who != RUSAGE_THREAD) |
| @@ -1695,14 +1697,14 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru) | |||
| 1695 | return getrusage(current, who, ru); | 1697 | return getrusage(current, who, ru); |
| 1696 | } | 1698 | } |
| 1697 | 1699 | ||
| 1698 | asmlinkage long sys_umask(int mask) | 1700 | SYSCALL_DEFINE1(umask, int, mask) |
| 1699 | { | 1701 | { |
| 1700 | mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); | 1702 | mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); |
| 1701 | return mask; | 1703 | return mask; |
| 1702 | } | 1704 | } |
| 1703 | 1705 | ||
| 1704 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | 1706 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
| 1705 | unsigned long arg4, unsigned long arg5) | 1707 | unsigned long, arg4, unsigned long, arg5) |
| 1706 | { | 1708 | { |
| 1707 | struct task_struct *me = current; | 1709 | struct task_struct *me = current; |
| 1708 | unsigned char comm[sizeof(me->comm)]; | 1710 | unsigned char comm[sizeof(me->comm)]; |
| @@ -1815,8 +1817,8 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
| 1815 | return error; | 1817 | return error; |
| 1816 | } | 1818 | } |
| 1817 | 1819 | ||
| 1818 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, | 1820 | SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, |
| 1819 | struct getcpu_cache __user *unused) | 1821 | struct getcpu_cache __user *, unused) |
| 1820 | { | 1822 | { |
| 1821 | int err = 0; | 1823 | int err = 0; |
| 1822 | int cpu = raw_smp_processor_id(); | 1824 | int cpu = raw_smp_processor_id(); |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index e14a23281707..27dad2967387 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -131,6 +131,7 @@ cond_syscall(sys_io_destroy); | |||
| 131 | cond_syscall(sys_io_submit); | 131 | cond_syscall(sys_io_submit); |
| 132 | cond_syscall(sys_io_cancel); | 132 | cond_syscall(sys_io_cancel); |
| 133 | cond_syscall(sys_io_getevents); | 133 | cond_syscall(sys_io_getevents); |
| 134 | cond_syscall(sys_syslog); | ||
| 134 | 135 | ||
| 135 | /* arch-specific weak syscall entries */ | 136 | /* arch-specific weak syscall entries */ |
| 136 | cond_syscall(sys_pciconfig_read); | 137 | cond_syscall(sys_pciconfig_read); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 92f6e5bc3c24..790f9d785663 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -82,6 +82,9 @@ extern int percpu_pagelist_fraction; | |||
| 82 | extern int compat_log; | 82 | extern int compat_log; |
| 83 | extern int latencytop_enabled; | 83 | extern int latencytop_enabled; |
| 84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; | 84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; |
| 85 | #ifndef CONFIG_MMU | ||
| 86 | extern int sysctl_nr_trim_pages; | ||
| 87 | #endif | ||
| 85 | #ifdef CONFIG_RCU_TORTURE_TEST | 88 | #ifdef CONFIG_RCU_TORTURE_TEST |
| 86 | extern int rcutorture_runnable; | 89 | extern int rcutorture_runnable; |
| 87 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 90 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
| @@ -141,6 +144,7 @@ extern int acct_parm[]; | |||
| 141 | 144 | ||
| 142 | #ifdef CONFIG_IA64 | 145 | #ifdef CONFIG_IA64 |
| 143 | extern int no_unaligned_warning; | 146 | extern int no_unaligned_warning; |
| 147 | extern int unaligned_dump_stack; | ||
| 144 | #endif | 148 | #endif |
| 145 | 149 | ||
| 146 | #ifdef CONFIG_RT_MUTEXES | 150 | #ifdef CONFIG_RT_MUTEXES |
| @@ -778,6 +782,14 @@ static struct ctl_table kern_table[] = { | |||
| 778 | .mode = 0644, | 782 | .mode = 0644, |
| 779 | .proc_handler = &proc_dointvec, | 783 | .proc_handler = &proc_dointvec, |
| 780 | }, | 784 | }, |
| 785 | { | ||
| 786 | .ctl_name = CTL_UNNUMBERED, | ||
| 787 | .procname = "unaligned-dump-stack", | ||
| 788 | .data = &unaligned_dump_stack, | ||
| 789 | .maxlen = sizeof (int), | ||
| 790 | .mode = 0644, | ||
| 791 | .proc_handler = &proc_dointvec, | ||
| 792 | }, | ||
| 781 | #endif | 793 | #endif |
| 782 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 794 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
| 783 | { | 795 | { |
| @@ -797,7 +809,7 @@ static struct ctl_table kern_table[] = { | |||
| 797 | .data = &softlockup_thresh, | 809 | .data = &softlockup_thresh, |
| 798 | .maxlen = sizeof(int), | 810 | .maxlen = sizeof(int), |
| 799 | .mode = 0644, | 811 | .mode = 0644, |
| 800 | .proc_handler = &proc_dointvec_minmax, | 812 | .proc_handler = &proc_dosoftlockup_thresh, |
| 801 | .strategy = &sysctl_intvec, | 813 | .strategy = &sysctl_intvec, |
| 802 | .extra1 = &neg_one, | 814 | .extra1 = &neg_one, |
| 803 | .extra2 = &sixty, | 815 | .extra2 = &sixty, |
| @@ -1102,6 +1114,17 @@ static struct ctl_table vm_table[] = { | |||
| 1102 | .mode = 0644, | 1114 | .mode = 0644, |
| 1103 | .proc_handler = &proc_dointvec | 1115 | .proc_handler = &proc_dointvec |
| 1104 | }, | 1116 | }, |
| 1117 | #else | ||
| 1118 | { | ||
| 1119 | .ctl_name = CTL_UNNUMBERED, | ||
| 1120 | .procname = "nr_trim_pages", | ||
| 1121 | .data = &sysctl_nr_trim_pages, | ||
| 1122 | .maxlen = sizeof(sysctl_nr_trim_pages), | ||
| 1123 | .mode = 0644, | ||
| 1124 | .proc_handler = &proc_dointvec_minmax, | ||
| 1125 | .strategy = &sysctl_intvec, | ||
| 1126 | .extra1 = &zero, | ||
| 1127 | }, | ||
| 1105 | #endif | 1128 | #endif |
| 1106 | { | 1129 | { |
| 1107 | .ctl_name = VM_LAPTOP_MODE, | 1130 | .ctl_name = VM_LAPTOP_MODE, |
| @@ -1674,7 +1697,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol | |||
| 1674 | return error; | 1697 | return error; |
| 1675 | } | 1698 | } |
| 1676 | 1699 | ||
| 1677 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) | 1700 | SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args) |
| 1678 | { | 1701 | { |
| 1679 | struct __sysctl_args tmp; | 1702 | struct __sysctl_args tmp; |
| 1680 | int error; | 1703 | int error; |
| @@ -2975,7 +2998,7 @@ int sysctl_ms_jiffies(struct ctl_table *table, | |||
| 2975 | #else /* CONFIG_SYSCTL_SYSCALL */ | 2998 | #else /* CONFIG_SYSCTL_SYSCALL */ |
| 2976 | 2999 | ||
| 2977 | 3000 | ||
| 2978 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) | 3001 | SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args) |
| 2979 | { | 3002 | { |
| 2980 | struct __sysctl_args tmp; | 3003 | struct __sysctl_args tmp; |
| 2981 | int error; | 3004 | int error; |
diff --git a/kernel/time.c b/kernel/time.c index 4886e3ce83a4..29511943871a 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -60,7 +60,7 @@ EXPORT_SYMBOL(sys_tz); | |||
| 60 | * why not move it into the appropriate arch directory (for those | 60 | * why not move it into the appropriate arch directory (for those |
| 61 | * architectures that need it). | 61 | * architectures that need it). |
| 62 | */ | 62 | */ |
| 63 | asmlinkage long sys_time(time_t __user * tloc) | 63 | SYSCALL_DEFINE1(time, time_t __user *, tloc) |
| 64 | { | 64 | { |
| 65 | time_t i = get_seconds(); | 65 | time_t i = get_seconds(); |
| 66 | 66 | ||
| @@ -79,7 +79,7 @@ asmlinkage long sys_time(time_t __user * tloc) | |||
| 79 | * architectures that need it). | 79 | * architectures that need it). |
| 80 | */ | 80 | */ |
| 81 | 81 | ||
| 82 | asmlinkage long sys_stime(time_t __user *tptr) | 82 | SYSCALL_DEFINE1(stime, time_t __user *, tptr) |
| 83 | { | 83 | { |
| 84 | struct timespec tv; | 84 | struct timespec tv; |
| 85 | int err; | 85 | int err; |
| @@ -99,8 +99,8 @@ asmlinkage long sys_stime(time_t __user *tptr) | |||
| 99 | 99 | ||
| 100 | #endif /* __ARCH_WANT_SYS_TIME */ | 100 | #endif /* __ARCH_WANT_SYS_TIME */ |
| 101 | 101 | ||
| 102 | asmlinkage long sys_gettimeofday(struct timeval __user *tv, | 102 | SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv, |
| 103 | struct timezone __user *tz) | 103 | struct timezone __user *, tz) |
| 104 | { | 104 | { |
| 105 | if (likely(tv != NULL)) { | 105 | if (likely(tv != NULL)) { |
| 106 | struct timeval ktv; | 106 | struct timeval ktv; |
| @@ -184,8 +184,8 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz) | |||
| 184 | return 0; | 184 | return 0; |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | asmlinkage long sys_settimeofday(struct timeval __user *tv, | 187 | SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, |
| 188 | struct timezone __user *tz) | 188 | struct timezone __user *, tz) |
| 189 | { | 189 | { |
| 190 | struct timeval user_tv; | 190 | struct timeval user_tv; |
| 191 | struct timespec new_ts; | 191 | struct timespec new_ts; |
| @@ -205,7 +205,7 @@ asmlinkage long sys_settimeofday(struct timeval __user *tv, | |||
| 205 | return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); | 205 | return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | asmlinkage long sys_adjtimex(struct timex __user *txc_p) | 208 | SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p) |
| 209 | { | 209 | { |
| 210 | struct timex txc; /* Local copy of parameter */ | 210 | struct timex txc; /* Local copy of parameter */ |
| 211 | int ret; | 211 | int ret; |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 63e05d423a09..21a5ca849514 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -274,6 +274,21 @@ out_bc: | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | /* | 276 | /* |
| 277 | * Transfer the do_timer job away from a dying cpu. | ||
| 278 | * | ||
| 279 | * Called with interrupts disabled. | ||
| 280 | */ | ||
| 281 | static void tick_handover_do_timer(int *cpup) | ||
| 282 | { | ||
| 283 | if (*cpup == tick_do_timer_cpu) { | ||
| 284 | int cpu = cpumask_first(cpu_online_mask); | ||
| 285 | |||
| 286 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
| 287 | TICK_DO_TIMER_NONE; | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | /* | ||
| 277 | * Shutdown an event device on a given cpu: | 292 | * Shutdown an event device on a given cpu: |
| 278 | * | 293 | * |
| 279 | * This is called on a life CPU, when a CPU is dead. So we cannot | 294 | * This is called on a life CPU, when a CPU is dead. So we cannot |
| @@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup) | |||
| 297 | clockevents_exchange_device(dev, NULL); | 312 | clockevents_exchange_device(dev, NULL); |
| 298 | td->evtdev = NULL; | 313 | td->evtdev = NULL; |
| 299 | } | 314 | } |
| 300 | /* Transfer the do_timer job away from this cpu */ | ||
| 301 | if (*cpup == tick_do_timer_cpu) { | ||
| 302 | int cpu = cpumask_first(cpu_online_mask); | ||
| 303 | |||
| 304 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
| 305 | TICK_DO_TIMER_NONE; | ||
| 306 | } | ||
| 307 | spin_unlock_irqrestore(&tick_device_lock, flags); | 315 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 308 | } | 316 | } |
| 309 | 317 | ||
| @@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
| 357 | tick_broadcast_oneshot_control(reason); | 365 | tick_broadcast_oneshot_control(reason); |
| 358 | break; | 366 | break; |
| 359 | 367 | ||
| 368 | case CLOCK_EVT_NOTIFY_CPU_DYING: | ||
| 369 | tick_handover_do_timer(dev); | ||
| 370 | break; | ||
| 371 | |||
| 360 | case CLOCK_EVT_NOTIFY_CPU_DEAD: | 372 | case CLOCK_EVT_NOTIFY_CPU_DEAD: |
| 361 | tick_shutdown_broadcast_oneshot(dev); | 373 | tick_shutdown_broadcast_oneshot(dev); |
| 362 | tick_shutdown_broadcast(dev); | 374 | tick_shutdown_broadcast(dev); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0a..d3f1ef4d5cbe 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); | |||
| 134 | * value. We do this unconditionally on any cpu, as we don't know whether the | 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
| 135 | * cpu, which has the update task assigned is in a long sleep. | 135 | * cpu, which has the update task assigned is in a long sleep. |
| 136 | */ | 136 | */ |
| 137 | void tick_nohz_update_jiffies(void) | 137 | static void tick_nohz_update_jiffies(void) |
| 138 | { | 138 | { |
| 139 | int cpu = smp_processor_id(); | 139 | int cpu = smp_processor_id(); |
| 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
diff --git a/kernel/timer.c b/kernel/timer.c index dee3f641a7a7..13dd64fe143d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1129,7 +1129,7 @@ void do_timer(unsigned long ticks) | |||
| 1129 | * For backwards compatibility? This can be done in libc so Alpha | 1129 | * For backwards compatibility? This can be done in libc so Alpha |
| 1130 | * and all newer ports shouldn't need it. | 1130 | * and all newer ports shouldn't need it. |
| 1131 | */ | 1131 | */ |
| 1132 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | 1132 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
| 1133 | { | 1133 | { |
| 1134 | return alarm_setitimer(seconds); | 1134 | return alarm_setitimer(seconds); |
| 1135 | } | 1135 | } |
| @@ -1152,7 +1152,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds) | |||
| 1152 | * | 1152 | * |
| 1153 | * This is SMP safe as current->tgid does not change. | 1153 | * This is SMP safe as current->tgid does not change. |
| 1154 | */ | 1154 | */ |
| 1155 | asmlinkage long sys_getpid(void) | 1155 | SYSCALL_DEFINE0(getpid) |
| 1156 | { | 1156 | { |
| 1157 | return task_tgid_vnr(current); | 1157 | return task_tgid_vnr(current); |
| 1158 | } | 1158 | } |
| @@ -1163,7 +1163,7 @@ asmlinkage long sys_getpid(void) | |||
| 1163 | * value of ->real_parent under rcu_read_lock(), see | 1163 | * value of ->real_parent under rcu_read_lock(), see |
| 1164 | * release_task()->call_rcu(delayed_put_task_struct). | 1164 | * release_task()->call_rcu(delayed_put_task_struct). |
| 1165 | */ | 1165 | */ |
| 1166 | asmlinkage long sys_getppid(void) | 1166 | SYSCALL_DEFINE0(getppid) |
| 1167 | { | 1167 | { |
| 1168 | int pid; | 1168 | int pid; |
| 1169 | 1169 | ||
| @@ -1174,25 +1174,25 @@ asmlinkage long sys_getppid(void) | |||
| 1174 | return pid; | 1174 | return pid; |
| 1175 | } | 1175 | } |
| 1176 | 1176 | ||
| 1177 | asmlinkage long sys_getuid(void) | 1177 | SYSCALL_DEFINE0(getuid) |
| 1178 | { | 1178 | { |
| 1179 | /* Only we change this so SMP safe */ | 1179 | /* Only we change this so SMP safe */ |
| 1180 | return current_uid(); | 1180 | return current_uid(); |
| 1181 | } | 1181 | } |
| 1182 | 1182 | ||
| 1183 | asmlinkage long sys_geteuid(void) | 1183 | SYSCALL_DEFINE0(geteuid) |
| 1184 | { | 1184 | { |
| 1185 | /* Only we change this so SMP safe */ | 1185 | /* Only we change this so SMP safe */ |
| 1186 | return current_euid(); | 1186 | return current_euid(); |
| 1187 | } | 1187 | } |
| 1188 | 1188 | ||
| 1189 | asmlinkage long sys_getgid(void) | 1189 | SYSCALL_DEFINE0(getgid) |
| 1190 | { | 1190 | { |
| 1191 | /* Only we change this so SMP safe */ | 1191 | /* Only we change this so SMP safe */ |
| 1192 | return current_gid(); | 1192 | return current_gid(); |
| 1193 | } | 1193 | } |
| 1194 | 1194 | ||
| 1195 | asmlinkage long sys_getegid(void) | 1195 | SYSCALL_DEFINE0(getegid) |
| 1196 | { | 1196 | { |
| 1197 | /* Only we change this so SMP safe */ | 1197 | /* Only we change this so SMP safe */ |
| 1198 | return current_egid(); | 1198 | return current_egid(); |
| @@ -1308,7 +1308,7 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |||
| 1308 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1308 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
| 1309 | 1309 | ||
| 1310 | /* Thread ID - the internal kernel "pid" */ | 1310 | /* Thread ID - the internal kernel "pid" */ |
| 1311 | asmlinkage long sys_gettid(void) | 1311 | SYSCALL_DEFINE0(gettid) |
| 1312 | { | 1312 | { |
| 1313 | return task_pid_vnr(current); | 1313 | return task_pid_vnr(current); |
| 1314 | } | 1314 | } |
| @@ -1400,7 +1400,7 @@ out: | |||
| 1400 | return 0; | 1400 | return 0; |
| 1401 | } | 1401 | } |
| 1402 | 1402 | ||
| 1403 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | 1403 | SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) |
| 1404 | { | 1404 | { |
| 1405 | struct sysinfo val; | 1405 | struct sysinfo val; |
| 1406 | 1406 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2f32969c09df..7dcf6e9f2b04 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| 19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
| 20 | #include <linux/suspend.h> | ||
| 20 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
| 21 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
| 22 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
| @@ -1965,6 +1966,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 1965 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1966 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1966 | 1967 | ||
| 1967 | static atomic_t ftrace_graph_active; | 1968 | static atomic_t ftrace_graph_active; |
| 1969 | static struct notifier_block ftrace_suspend_notifier; | ||
| 1968 | 1970 | ||
| 1969 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 1971 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
| 1970 | { | 1972 | { |
| @@ -2043,6 +2045,27 @@ static int start_graph_tracing(void) | |||
| 2043 | return ret; | 2045 | return ret; |
| 2044 | } | 2046 | } |
| 2045 | 2047 | ||
| 2048 | /* | ||
| 2049 | * Hibernation protection. | ||
| 2050 | * The state of the current task is too much unstable during | ||
| 2051 | * suspend/restore to disk. We want to protect against that. | ||
| 2052 | */ | ||
| 2053 | static int | ||
| 2054 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | ||
| 2055 | void *unused) | ||
| 2056 | { | ||
| 2057 | switch (state) { | ||
| 2058 | case PM_HIBERNATION_PREPARE: | ||
| 2059 | pause_graph_tracing(); | ||
| 2060 | break; | ||
| 2061 | |||
| 2062 | case PM_POST_HIBERNATION: | ||
| 2063 | unpause_graph_tracing(); | ||
| 2064 | break; | ||
| 2065 | } | ||
| 2066 | return NOTIFY_DONE; | ||
| 2067 | } | ||
| 2068 | |||
| 2046 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 2069 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
| 2047 | trace_func_graph_ent_t entryfunc) | 2070 | trace_func_graph_ent_t entryfunc) |
| 2048 | { | 2071 | { |
| @@ -2050,6 +2073,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 2050 | 2073 | ||
| 2051 | mutex_lock(&ftrace_sysctl_lock); | 2074 | mutex_lock(&ftrace_sysctl_lock); |
| 2052 | 2075 | ||
| 2076 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | ||
| 2077 | register_pm_notifier(&ftrace_suspend_notifier); | ||
| 2078 | |||
| 2053 | atomic_inc(&ftrace_graph_active); | 2079 | atomic_inc(&ftrace_graph_active); |
| 2054 | ret = start_graph_tracing(); | 2080 | ret = start_graph_tracing(); |
| 2055 | if (ret) { | 2081 | if (ret) { |
| @@ -2075,6 +2101,7 @@ void unregister_ftrace_graph(void) | |||
| 2075 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 2101 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 2076 | ftrace_graph_entry = ftrace_graph_entry_stub; | 2102 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 2077 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 2103 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
| 2104 | unregister_pm_notifier(&ftrace_suspend_notifier); | ||
| 2078 | 2105 | ||
| 2079 | mutex_unlock(&ftrace_sysctl_lock); | 2106 | mutex_unlock(&ftrace_sysctl_lock); |
| 2080 | } | 2107 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a9d9760dc7b6..bd38c5cfd8ad 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event) | |||
| 168 | */ | 168 | */ |
| 169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
| 170 | { | 170 | { |
| 171 | return rb_event_length(event); | 171 | unsigned length = rb_event_length(event); |
| 172 | if (event->type != RINGBUF_TYPE_DATA) | ||
| 173 | return length; | ||
| 174 | length -= RB_EVNT_HDR_SIZE; | ||
| 175 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | ||
| 176 | length -= sizeof(event->array[0]); | ||
| 177 | return length; | ||
| 172 | } | 178 | } |
| 173 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 179 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); |
| 174 | 180 | ||
| @@ -240,7 +246,7 @@ static inline int test_time_stamp(u64 delta) | |||
| 240 | return 0; | 246 | return 0; |
| 241 | } | 247 | } |
| 242 | 248 | ||
| 243 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) | 249 | #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) |
| 244 | 250 | ||
| 245 | /* | 251 | /* |
| 246 | * head_page == tail_page && head == tail then buffer is empty. | 252 | * head_page == tail_page && head == tail then buffer is empty. |
| @@ -1019,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1019 | } | 1025 | } |
| 1020 | 1026 | ||
| 1021 | if (next_page == head_page) { | 1027 | if (next_page == head_page) { |
| 1022 | if (!(buffer->flags & RB_FL_OVERWRITE)) { | 1028 | if (!(buffer->flags & RB_FL_OVERWRITE)) |
| 1023 | /* reset write */ | ||
| 1024 | if (tail <= BUF_PAGE_SIZE) | ||
| 1025 | local_set(&tail_page->write, tail); | ||
| 1026 | goto out_unlock; | 1029 | goto out_unlock; |
| 1027 | } | ||
| 1028 | 1030 | ||
| 1029 | /* tail_page has not moved yet? */ | 1031 | /* tail_page has not moved yet? */ |
| 1030 | if (tail_page == cpu_buffer->tail_page) { | 1032 | if (tail_page == cpu_buffer->tail_page) { |
| @@ -1099,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1099 | return event; | 1101 | return event; |
| 1100 | 1102 | ||
| 1101 | out_unlock: | 1103 | out_unlock: |
| 1104 | /* reset write */ | ||
| 1105 | if (tail <= BUF_PAGE_SIZE) | ||
| 1106 | local_set(&tail_page->write, tail); | ||
| 1107 | |||
| 1102 | __raw_spin_unlock(&cpu_buffer->lock); | 1108 | __raw_spin_unlock(&cpu_buffer->lock); |
| 1103 | local_irq_restore(flags); | 1109 | local_irq_restore(flags); |
| 1104 | return NULL; | 1110 | return NULL; |
| @@ -2168,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2168 | 2174 | ||
| 2169 | cpu_buffer->overrun = 0; | 2175 | cpu_buffer->overrun = 0; |
| 2170 | cpu_buffer->entries = 0; | 2176 | cpu_buffer->entries = 0; |
| 2177 | |||
| 2178 | cpu_buffer->write_stamp = 0; | ||
| 2179 | cpu_buffer->read_stamp = 0; | ||
| 2171 | } | 2180 | } |
| 2172 | 2181 | ||
| 2173 | /** | 2182 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c580233add95..17bb88d86ac2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | 40 | ||
| 41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
| 42 | 42 | ||
| 43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency; |
| 44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
| 45 | 45 | ||
| 46 | /* | 46 | /* |
| @@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = { | |||
| 3736 | * it if we decide to change what log level the ftrace dump | 3736 | * it if we decide to change what log level the ftrace dump |
| 3737 | * should be at. | 3737 | * should be at. |
| 3738 | */ | 3738 | */ |
| 3739 | #define KERN_TRACE KERN_INFO | 3739 | #define KERN_TRACE KERN_EMERG |
| 3740 | 3740 | ||
| 3741 | static void | 3741 | static void |
| 3742 | trace_printk_seq(struct trace_seq *s) | 3742 | trace_printk_seq(struct trace_seq *s) |
| @@ -3770,6 +3770,7 @@ void ftrace_dump(void) | |||
| 3770 | dump_ran = 1; | 3770 | dump_ran = 1; |
| 3771 | 3771 | ||
| 3772 | /* No turning back! */ | 3772 | /* No turning back! */ |
| 3773 | tracing_off(); | ||
| 3773 | ftrace_kill(); | 3774 | ftrace_kill(); |
| 3774 | 3775 | ||
| 3775 | for_each_tracing_cpu(cpu) { | 3776 | for_each_tracing_cpu(cpu) { |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7c2e326bbc8b..62a78d943534 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr) | |||
| 380 | 380 | ||
| 381 | static void __irqsoff_tracer_init(struct trace_array *tr) | 381 | static void __irqsoff_tracer_init(struct trace_array *tr) |
| 382 | { | 382 | { |
| 383 | tracing_max_latency = 0; | ||
| 383 | irqsoff_trace = tr; | 384 | irqsoff_trace = tr; |
| 384 | /* make sure that the tracer is visible */ | 385 | /* make sure that the tracer is visible */ |
| 385 | smp_wmb(); | 386 | smp_wmb(); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 43586b689e31..42ae1e77b6b3 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
| 333 | 333 | ||
| 334 | static int wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
| 335 | { | 335 | { |
| 336 | tracing_max_latency = 0; | ||
| 336 | wakeup_trace = tr; | 337 | wakeup_trace = tr; |
| 337 | start_wakeup_tracer(tr); | 338 | start_wakeup_tracer(tr); |
| 338 | return 0; | 339 | return 0; |
diff --git a/kernel/uid16.c b/kernel/uid16.c index 2460c3199b5a..0314501688b9 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
| 19 | 19 | ||
| 20 | asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group) | 20 | SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) |
| 21 | { | 21 | { |
| 22 | long ret = sys_chown(filename, low2highuid(user), low2highgid(group)); | 22 | long ret = sys_chown(filename, low2highuid(user), low2highgid(group)); |
| 23 | /* avoid REGPARM breakage on x86: */ | 23 | /* avoid REGPARM breakage on x86: */ |
| @@ -25,7 +25,7 @@ asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gi | |||
| 25 | return ret; | 25 | return ret; |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group) | 28 | SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) |
| 29 | { | 29 | { |
| 30 | long ret = sys_lchown(filename, low2highuid(user), low2highgid(group)); | 30 | long ret = sys_lchown(filename, low2highuid(user), low2highgid(group)); |
| 31 | /* avoid REGPARM breakage on x86: */ | 31 | /* avoid REGPARM breakage on x86: */ |
| @@ -33,7 +33,7 @@ asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_g | |||
| 33 | return ret; | 33 | return ret; |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) | 36 | SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group) |
| 37 | { | 37 | { |
| 38 | long ret = sys_fchown(fd, low2highuid(user), low2highgid(group)); | 38 | long ret = sys_fchown(fd, low2highuid(user), low2highgid(group)); |
| 39 | /* avoid REGPARM breakage on x86: */ | 39 | /* avoid REGPARM breakage on x86: */ |
| @@ -41,7 +41,7 @@ asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) | |||
| 41 | return ret; | 41 | return ret; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) | 44 | SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid) |
| 45 | { | 45 | { |
| 46 | long ret = sys_setregid(low2highgid(rgid), low2highgid(egid)); | 46 | long ret = sys_setregid(low2highgid(rgid), low2highgid(egid)); |
| 47 | /* avoid REGPARM breakage on x86: */ | 47 | /* avoid REGPARM breakage on x86: */ |
| @@ -49,7 +49,7 @@ asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) | |||
| 49 | return ret; | 49 | return ret; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | asmlinkage long sys_setgid16(old_gid_t gid) | 52 | SYSCALL_DEFINE1(setgid16, old_gid_t, gid) |
| 53 | { | 53 | { |
| 54 | long ret = sys_setgid(low2highgid(gid)); | 54 | long ret = sys_setgid(low2highgid(gid)); |
| 55 | /* avoid REGPARM breakage on x86: */ | 55 | /* avoid REGPARM breakage on x86: */ |
| @@ -57,7 +57,7 @@ asmlinkage long sys_setgid16(old_gid_t gid) | |||
| 57 | return ret; | 57 | return ret; |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) | 60 | SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid) |
| 61 | { | 61 | { |
| 62 | long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid)); | 62 | long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid)); |
| 63 | /* avoid REGPARM breakage on x86: */ | 63 | /* avoid REGPARM breakage on x86: */ |
| @@ -65,7 +65,7 @@ asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) | |||
| 65 | return ret; | 65 | return ret; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | asmlinkage long sys_setuid16(old_uid_t uid) | 68 | SYSCALL_DEFINE1(setuid16, old_uid_t, uid) |
| 69 | { | 69 | { |
| 70 | long ret = sys_setuid(low2highuid(uid)); | 70 | long ret = sys_setuid(low2highuid(uid)); |
| 71 | /* avoid REGPARM breakage on x86: */ | 71 | /* avoid REGPARM breakage on x86: */ |
| @@ -73,7 +73,7 @@ asmlinkage long sys_setuid16(old_uid_t uid) | |||
| 73 | return ret; | 73 | return ret; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) | 76 | SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid) |
| 77 | { | 77 | { |
| 78 | long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid), | 78 | long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid), |
| 79 | low2highuid(suid)); | 79 | low2highuid(suid)); |
| @@ -82,7 +82,7 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) | |||
| 82 | return ret; | 82 | return ret; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) | 85 | SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid) |
| 86 | { | 86 | { |
| 87 | const struct cred *cred = current_cred(); | 87 | const struct cred *cred = current_cred(); |
| 88 | int retval; | 88 | int retval; |
| @@ -94,7 +94,7 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, | |||
| 94 | return retval; | 94 | return retval; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) | 97 | SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid) |
| 98 | { | 98 | { |
| 99 | long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid), | 99 | long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid), |
| 100 | low2highgid(sgid)); | 100 | low2highgid(sgid)); |
| @@ -103,7 +103,8 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) | |||
| 103 | return ret; | 103 | return ret; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) | 106 | |
| 107 | SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid) | ||
| 107 | { | 108 | { |
| 108 | const struct cred *cred = current_cred(); | 109 | const struct cred *cred = current_cred(); |
| 109 | int retval; | 110 | int retval; |
| @@ -115,7 +116,7 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, | |||
| 115 | return retval; | 116 | return retval; |
| 116 | } | 117 | } |
| 117 | 118 | ||
| 118 | asmlinkage long sys_setfsuid16(old_uid_t uid) | 119 | SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid) |
| 119 | { | 120 | { |
| 120 | long ret = sys_setfsuid(low2highuid(uid)); | 121 | long ret = sys_setfsuid(low2highuid(uid)); |
| 121 | /* avoid REGPARM breakage on x86: */ | 122 | /* avoid REGPARM breakage on x86: */ |
| @@ -123,7 +124,7 @@ asmlinkage long sys_setfsuid16(old_uid_t uid) | |||
| 123 | return ret; | 124 | return ret; |
| 124 | } | 125 | } |
| 125 | 126 | ||
| 126 | asmlinkage long sys_setfsgid16(old_gid_t gid) | 127 | SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid) |
| 127 | { | 128 | { |
| 128 | long ret = sys_setfsgid(low2highgid(gid)); | 129 | long ret = sys_setfsgid(low2highgid(gid)); |
| 129 | /* avoid REGPARM breakage on x86: */ | 130 | /* avoid REGPARM breakage on x86: */ |
| @@ -161,7 +162,7 @@ static int groups16_from_user(struct group_info *group_info, | |||
| 161 | return 0; | 162 | return 0; |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) | 165 | SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist) |
| 165 | { | 166 | { |
| 166 | const struct cred *cred = current_cred(); | 167 | const struct cred *cred = current_cred(); |
| 167 | int i; | 168 | int i; |
| @@ -184,7 +185,7 @@ out: | |||
| 184 | return i; | 185 | return i; |
| 185 | } | 186 | } |
| 186 | 187 | ||
| 187 | asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) | 188 | SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) |
| 188 | { | 189 | { |
| 189 | struct group_info *group_info; | 190 | struct group_info *group_info; |
| 190 | int retval; | 191 | int retval; |
| @@ -209,22 +210,22 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) | |||
| 209 | return retval; | 210 | return retval; |
| 210 | } | 211 | } |
| 211 | 212 | ||
| 212 | asmlinkage long sys_getuid16(void) | 213 | SYSCALL_DEFINE0(getuid16) |
| 213 | { | 214 | { |
| 214 | return high2lowuid(current_uid()); | 215 | return high2lowuid(current_uid()); |
| 215 | } | 216 | } |
| 216 | 217 | ||
| 217 | asmlinkage long sys_geteuid16(void) | 218 | SYSCALL_DEFINE0(geteuid16) |
| 218 | { | 219 | { |
| 219 | return high2lowuid(current_euid()); | 220 | return high2lowuid(current_euid()); |
| 220 | } | 221 | } |
| 221 | 222 | ||
| 222 | asmlinkage long sys_getgid16(void) | 223 | SYSCALL_DEFINE0(getgid16) |
| 223 | { | 224 | { |
| 224 | return high2lowgid(current_gid()); | 225 | return high2lowgid(current_gid()); |
| 225 | } | 226 | } |
| 226 | 227 | ||
| 227 | asmlinkage long sys_getegid16(void) | 228 | SYSCALL_DEFINE0(getegid16) |
| 228 | { | 229 | { |
| 229 | return high2lowgid(current_egid()); | 230 | return high2lowgid(current_egid()); |
| 230 | } | 231 | } |
diff --git a/kernel/up.c b/kernel/up.c new file mode 100644 index 000000000000..1ff27a28bb7d --- /dev/null +++ b/kernel/up.c | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | /* | ||
| 2 | * Uniprocessor-only support functions. The counterpart to kernel/smp.c | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/interrupt.h> | ||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/module.h> | ||
| 8 | #include <linux/smp.h> | ||
| 9 | |||
| 10 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
| 11 | int wait) | ||
| 12 | { | ||
| 13 | WARN_ON(cpu != 0); | ||
| 14 | |||
| 15 | local_irq_disable(); | ||
| 16 | (func)(info); | ||
| 17 | local_irq_enable(); | ||
| 18 | |||
| 19 | return 0; | ||
| 20 | } | ||
| 21 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae37..1f0c509b40d3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -971,6 +971,8 @@ undo: | |||
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | #ifdef CONFIG_SMP | 973 | #ifdef CONFIG_SMP |
| 974 | static struct workqueue_struct *work_on_cpu_wq __read_mostly; | ||
| 975 | |||
| 974 | struct work_for_cpu { | 976 | struct work_for_cpu { |
| 975 | struct work_struct work; | 977 | struct work_struct work; |
| 976 | long (*fn)(void *); | 978 | long (*fn)(void *); |
| @@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w) | |||
| 991 | * @fn: the function to run | 993 | * @fn: the function to run |
| 992 | * @arg: the function arg | 994 | * @arg: the function arg |
| 993 | * | 995 | * |
| 994 | * This will return -EINVAL in the cpu is not online, or the return value | 996 | * This will return the value @fn returns. |
| 995 | * of @fn otherwise. | 997 | * It is up to the caller to ensure that the cpu doesn't go offline. |
| 996 | */ | 998 | */ |
| 997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 999 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
| 998 | { | 1000 | { |
| @@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
| 1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | 1003 | INIT_WORK(&wfc.work, do_work_for_cpu); |
| 1002 | wfc.fn = fn; | 1004 | wfc.fn = fn; |
| 1003 | wfc.arg = arg; | 1005 | wfc.arg = arg; |
| 1004 | get_online_cpus(); | 1006 | queue_work_on(cpu, work_on_cpu_wq, &wfc.work); |
| 1005 | if (unlikely(!cpu_online(cpu))) | 1007 | flush_work(&wfc.work); |
| 1006 | wfc.ret = -EINVAL; | ||
| 1007 | else { | ||
| 1008 | schedule_work_on(cpu, &wfc.work); | ||
| 1009 | flush_work(&wfc.work); | ||
| 1010 | } | ||
| 1011 | put_online_cpus(); | ||
| 1012 | 1008 | ||
| 1013 | return wfc.ret; | 1009 | return wfc.ret; |
| 1014 | } | 1010 | } |
| @@ -1025,4 +1021,8 @@ void __init init_workqueues(void) | |||
| 1025 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1021 | hotcpu_notifier(workqueue_cpu_callback, 0); |
| 1026 | keventd_wq = create_workqueue("events"); | 1022 | keventd_wq = create_workqueue("events"); |
| 1027 | BUG_ON(!keventd_wq); | 1023 | BUG_ON(!keventd_wq); |
| 1024 | #ifdef CONFIG_SMP | ||
| 1025 | work_on_cpu_wq = create_workqueue("work_on_cpu"); | ||
| 1026 | BUG_ON(!work_on_cpu_wq); | ||
| 1027 | #endif | ||
| 1028 | } | 1028 | } |
