diff options
Diffstat (limited to 'kernel')
52 files changed, 1135 insertions, 674 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 0b5ff083fa22..353d3fe8ba33 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -43,7 +43,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | |||
| 43 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 43 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
| 44 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 44 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
| 45 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 45 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
| 46 | obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o | 46 | obj-$(CONFIG_SMP) += smp.o |
| 47 | ifneq ($(CONFIG_SMP),y) | 47 | ifneq ($(CONFIG_SMP),y) |
| 48 | obj-y += up.o | 48 | obj-y += up.o |
| 49 | endif | 49 | endif |
| @@ -100,6 +100,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/ | |||
| 100 | obj-$(CONFIG_TRACING) += trace/ | 100 | obj-$(CONFIG_TRACING) += trace/ |
| 101 | obj-$(CONFIG_X86_DS) += trace/ | 101 | obj-$(CONFIG_X86_DS) += trace/ |
| 102 | obj-$(CONFIG_RING_BUFFER) += trace/ | 102 | obj-$(CONFIG_RING_BUFFER) += trace/ |
| 103 | obj-$(CONFIG_TRACEPOINTS) += trace/ | ||
| 103 | obj-$(CONFIG_SMP) += sched_cpupri.o | 104 | obj-$(CONFIG_SMP) += sched_cpupri.o |
| 104 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 105 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
| 105 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 106 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
| @@ -121,7 +122,7 @@ $(obj)/configs.o: $(obj)/config_data.h | |||
| 121 | # config_data.h contains the same information as ikconfig.h but gzipped. | 122 | # config_data.h contains the same information as ikconfig.h but gzipped. |
| 122 | # Info from config_data can be extracted from /proc/config* | 123 | # Info from config_data can be extracted from /proc/config* |
| 123 | targets += config_data.gz | 124 | targets += config_data.gz |
| 124 | $(obj)/config_data.gz: .config FORCE | 125 | $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE |
| 125 | $(call if_changed,gzip) | 126 | $(call if_changed,gzip) |
| 126 | 127 | ||
| 127 | quiet_cmd_ikconfiggz = IKCFG $@ | 128 | quiet_cmd_ikconfiggz = IKCFG $@ |
diff --git a/kernel/audit.c b/kernel/audit.c index 77770a034d59..e4956244ae50 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -400,7 +400,7 @@ static void kauditd_send_skb(struct sk_buff *skb) | |||
| 400 | if (err < 0) { | 400 | if (err < 0) { |
| 401 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ | 401 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ |
| 402 | printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); | 402 | printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); |
| 403 | audit_log_lost("auditd dissapeared\n"); | 403 | audit_log_lost("auditd disappeared\n"); |
| 404 | audit_pid = 0; | 404 | audit_pid = 0; |
| 405 | /* we might get lucky and get this in the next auditd */ | 405 | /* we might get lucky and get this in the next auditd */ |
| 406 | audit_hold_skb(skb); | 406 | audit_hold_skb(skb); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 51cddc11cd85..b24d7027b83c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -763,9 +763,8 @@ EXPORT_SYMBOL_GPL(cgroup_unlock); | |||
| 763 | * -> cgroup_mkdir. | 763 | * -> cgroup_mkdir. |
| 764 | */ | 764 | */ |
| 765 | 765 | ||
| 766 | static struct dentry *cgroup_lookup(struct inode *dir, | ||
| 767 | struct dentry *dentry, struct nameidata *nd); | ||
| 768 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); | 766 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); |
| 767 | static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *); | ||
| 769 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | 768 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); |
| 770 | static int cgroup_populate_dir(struct cgroup *cgrp); | 769 | static int cgroup_populate_dir(struct cgroup *cgrp); |
| 771 | static const struct inode_operations cgroup_dir_inode_operations; | 770 | static const struct inode_operations cgroup_dir_inode_operations; |
| @@ -862,6 +861,11 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
| 862 | iput(inode); | 861 | iput(inode); |
| 863 | } | 862 | } |
| 864 | 863 | ||
| 864 | static int cgroup_delete(const struct dentry *d) | ||
| 865 | { | ||
| 866 | return 1; | ||
| 867 | } | ||
| 868 | |||
| 865 | static void remove_dir(struct dentry *d) | 869 | static void remove_dir(struct dentry *d) |
| 866 | { | 870 | { |
| 867 | struct dentry *parent = dget(d->d_parent); | 871 | struct dentry *parent = dget(d->d_parent); |
| @@ -912,7 +916,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry) | |||
| 912 | 916 | ||
| 913 | parent = dentry->d_parent; | 917 | parent = dentry->d_parent; |
| 914 | spin_lock(&parent->d_lock); | 918 | spin_lock(&parent->d_lock); |
| 915 | spin_lock(&dentry->d_lock); | 919 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| 916 | list_del_init(&dentry->d_u.d_child); | 920 | list_del_init(&dentry->d_u.d_child); |
| 917 | spin_unlock(&dentry->d_lock); | 921 | spin_unlock(&dentry->d_lock); |
| 918 | spin_unlock(&parent->d_lock); | 922 | spin_unlock(&parent->d_lock); |
| @@ -1451,6 +1455,11 @@ static int cgroup_set_super(struct super_block *sb, void *data) | |||
| 1451 | 1455 | ||
| 1452 | static int cgroup_get_rootdir(struct super_block *sb) | 1456 | static int cgroup_get_rootdir(struct super_block *sb) |
| 1453 | { | 1457 | { |
| 1458 | static const struct dentry_operations cgroup_dops = { | ||
| 1459 | .d_iput = cgroup_diput, | ||
| 1460 | .d_delete = cgroup_delete, | ||
| 1461 | }; | ||
| 1462 | |||
| 1454 | struct inode *inode = | 1463 | struct inode *inode = |
| 1455 | cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); | 1464 | cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); |
| 1456 | struct dentry *dentry; | 1465 | struct dentry *dentry; |
| @@ -1468,6 +1477,8 @@ static int cgroup_get_rootdir(struct super_block *sb) | |||
| 1468 | return -ENOMEM; | 1477 | return -ENOMEM; |
| 1469 | } | 1478 | } |
| 1470 | sb->s_root = dentry; | 1479 | sb->s_root = dentry; |
| 1480 | /* for everything else we want ->d_op set */ | ||
| 1481 | sb->s_d_op = &cgroup_dops; | ||
| 1471 | return 0; | 1482 | return 0; |
| 1472 | } | 1483 | } |
| 1473 | 1484 | ||
| @@ -2197,6 +2208,14 @@ static const struct inode_operations cgroup_dir_inode_operations = { | |||
| 2197 | .rename = cgroup_rename, | 2208 | .rename = cgroup_rename, |
| 2198 | }; | 2209 | }; |
| 2199 | 2210 | ||
| 2211 | static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | ||
| 2212 | { | ||
| 2213 | if (dentry->d_name.len > NAME_MAX) | ||
| 2214 | return ERR_PTR(-ENAMETOOLONG); | ||
| 2215 | d_add(dentry, NULL); | ||
| 2216 | return NULL; | ||
| 2217 | } | ||
| 2218 | |||
| 2200 | /* | 2219 | /* |
| 2201 | * Check if a file is a control file | 2220 | * Check if a file is a control file |
| 2202 | */ | 2221 | */ |
| @@ -2207,26 +2226,6 @@ static inline struct cftype *__file_cft(struct file *file) | |||
| 2207 | return __d_cft(file->f_dentry); | 2226 | return __d_cft(file->f_dentry); |
| 2208 | } | 2227 | } |
| 2209 | 2228 | ||
| 2210 | static int cgroup_delete_dentry(const struct dentry *dentry) | ||
| 2211 | { | ||
| 2212 | return 1; | ||
| 2213 | } | ||
| 2214 | |||
| 2215 | static struct dentry *cgroup_lookup(struct inode *dir, | ||
| 2216 | struct dentry *dentry, struct nameidata *nd) | ||
| 2217 | { | ||
| 2218 | static const struct dentry_operations cgroup_dentry_operations = { | ||
| 2219 | .d_delete = cgroup_delete_dentry, | ||
| 2220 | .d_iput = cgroup_diput, | ||
| 2221 | }; | ||
| 2222 | |||
| 2223 | if (dentry->d_name.len > NAME_MAX) | ||
| 2224 | return ERR_PTR(-ENAMETOOLONG); | ||
| 2225 | d_set_d_op(dentry, &cgroup_dentry_operations); | ||
| 2226 | d_add(dentry, NULL); | ||
| 2227 | return NULL; | ||
| 2228 | } | ||
| 2229 | |||
| 2230 | static int cgroup_create_file(struct dentry *dentry, mode_t mode, | 2229 | static int cgroup_create_file(struct dentry *dentry, mode_t mode, |
| 2231 | struct super_block *sb) | 2230 | struct super_block *sb) |
| 2232 | { | 2231 | { |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index a6e729766821..bd3e8e29caa3 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -2914,7 +2914,7 @@ static void __init kdb_cmd_init(void) | |||
| 2914 | } | 2914 | } |
| 2915 | } | 2915 | } |
| 2916 | 2916 | ||
| 2917 | /* Intialize kdb_printf, breakpoint tables and kdb state */ | 2917 | /* Initialize kdb_printf, breakpoint tables and kdb state */ |
| 2918 | void __init kdb_init(int lvl) | 2918 | void __init kdb_init(int lvl) |
| 2919 | { | 2919 | { |
| 2920 | static int kdb_init_lvl = KDB_NOT_INITIALIZED; | 2920 | static int kdb_init_lvl = KDB_NOT_INITIALIZED; |
diff --git a/kernel/exit.c b/kernel/exit.c index 89c74861a3da..f9a45ebcc7b1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -994,6 +994,15 @@ NORET_TYPE void do_exit(long code) | |||
| 994 | exit_fs(tsk); | 994 | exit_fs(tsk); |
| 995 | check_stack_usage(); | 995 | check_stack_usage(); |
| 996 | exit_thread(); | 996 | exit_thread(); |
| 997 | |||
| 998 | /* | ||
| 999 | * Flush inherited counters to the parent - before the parent | ||
| 1000 | * gets woken up by child-exit notifications. | ||
| 1001 | * | ||
| 1002 | * because of cgroup mode, must be called before cgroup_exit() | ||
| 1003 | */ | ||
| 1004 | perf_event_exit_task(tsk); | ||
| 1005 | |||
| 997 | cgroup_exit(tsk, 1); | 1006 | cgroup_exit(tsk, 1); |
| 998 | 1007 | ||
| 999 | if (group_dead) | 1008 | if (group_dead) |
| @@ -1007,11 +1016,6 @@ NORET_TYPE void do_exit(long code) | |||
| 1007 | * FIXME: do that only when needed, using sched_exit tracepoint | 1016 | * FIXME: do that only when needed, using sched_exit tracepoint |
| 1008 | */ | 1017 | */ |
| 1009 | flush_ptrace_hw_breakpoint(tsk); | 1018 | flush_ptrace_hw_breakpoint(tsk); |
| 1010 | /* | ||
| 1011 | * Flush inherited counters to the parent - before the parent | ||
| 1012 | * gets woken up by child-exit notifications. | ||
| 1013 | */ | ||
| 1014 | perf_event_exit_task(tsk); | ||
| 1015 | 1019 | ||
| 1016 | exit_notify(tsk, group_dead); | 1020 | exit_notify(tsk, group_dead); |
| 1017 | #ifdef CONFIG_NUMA | 1021 | #ifdef CONFIG_NUMA |
diff --git a/kernel/fork.c b/kernel/fork.c index d9b44f20b6b0..25e429152ddc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -66,6 +66,7 @@ | |||
| 66 | #include <linux/posix-timers.h> | 66 | #include <linux/posix-timers.h> |
| 67 | #include <linux/user-return-notifier.h> | 67 | #include <linux/user-return-notifier.h> |
| 68 | #include <linux/oom.h> | 68 | #include <linux/oom.h> |
| 69 | #include <linux/khugepaged.h> | ||
| 69 | 70 | ||
| 70 | #include <asm/pgtable.h> | 71 | #include <asm/pgtable.h> |
| 71 | #include <asm/pgalloc.h> | 72 | #include <asm/pgalloc.h> |
| @@ -330,6 +331,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 330 | retval = ksm_fork(mm, oldmm); | 331 | retval = ksm_fork(mm, oldmm); |
| 331 | if (retval) | 332 | if (retval) |
| 332 | goto out; | 333 | goto out; |
| 334 | retval = khugepaged_fork(mm, oldmm); | ||
| 335 | if (retval) | ||
| 336 | goto out; | ||
| 333 | 337 | ||
| 334 | prev = NULL; | 338 | prev = NULL; |
| 335 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { | 339 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
| @@ -529,6 +533,9 @@ void __mmdrop(struct mm_struct *mm) | |||
| 529 | mm_free_pgd(mm); | 533 | mm_free_pgd(mm); |
| 530 | destroy_context(mm); | 534 | destroy_context(mm); |
| 531 | mmu_notifier_mm_destroy(mm); | 535 | mmu_notifier_mm_destroy(mm); |
| 536 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 537 | VM_BUG_ON(mm->pmd_huge_pte); | ||
| 538 | #endif | ||
| 532 | free_mm(mm); | 539 | free_mm(mm); |
| 533 | } | 540 | } |
| 534 | EXPORT_SYMBOL_GPL(__mmdrop); | 541 | EXPORT_SYMBOL_GPL(__mmdrop); |
| @@ -543,6 +550,7 @@ void mmput(struct mm_struct *mm) | |||
| 543 | if (atomic_dec_and_test(&mm->mm_users)) { | 550 | if (atomic_dec_and_test(&mm->mm_users)) { |
| 544 | exit_aio(mm); | 551 | exit_aio(mm); |
| 545 | ksm_exit(mm); | 552 | ksm_exit(mm); |
| 553 | khugepaged_exit(mm); /* must run before exit_mmap */ | ||
| 546 | exit_mmap(mm); | 554 | exit_mmap(mm); |
| 547 | set_mm_exe_file(mm, NULL); | 555 | set_mm_exe_file(mm, NULL); |
| 548 | if (!list_empty(&mm->mmlist)) { | 556 | if (!list_empty(&mm->mmlist)) { |
| @@ -669,6 +677,10 @@ struct mm_struct *dup_mm(struct task_struct *tsk) | |||
| 669 | mm->token_priority = 0; | 677 | mm->token_priority = 0; |
| 670 | mm->last_interval = 0; | 678 | mm->last_interval = 0; |
| 671 | 679 | ||
| 680 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 681 | mm->pmd_huge_pte = NULL; | ||
| 682 | #endif | ||
| 683 | |||
| 672 | if (!mm_init(mm, tsk)) | 684 | if (!mm_init(mm, tsk)) |
| 673 | goto fail_nomem; | 685 | goto fail_nomem; |
| 674 | 686 | ||
| @@ -910,6 +922,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 910 | 922 | ||
| 911 | sig->oom_adj = current->signal->oom_adj; | 923 | sig->oom_adj = current->signal->oom_adj; |
| 912 | sig->oom_score_adj = current->signal->oom_score_adj; | 924 | sig->oom_score_adj = current->signal->oom_score_adj; |
| 925 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; | ||
| 913 | 926 | ||
| 914 | mutex_init(&sig->cred_guard_mutex); | 927 | mutex_init(&sig->cred_guard_mutex); |
| 915 | 928 | ||
| @@ -1410,23 +1423,6 @@ long do_fork(unsigned long clone_flags, | |||
| 1410 | } | 1423 | } |
| 1411 | 1424 | ||
| 1412 | /* | 1425 | /* |
| 1413 | * We hope to recycle these flags after 2.6.26 | ||
| 1414 | */ | ||
| 1415 | if (unlikely(clone_flags & CLONE_STOPPED)) { | ||
| 1416 | static int __read_mostly count = 100; | ||
| 1417 | |||
| 1418 | if (count > 0 && printk_ratelimit()) { | ||
| 1419 | char comm[TASK_COMM_LEN]; | ||
| 1420 | |||
| 1421 | count--; | ||
| 1422 | printk(KERN_INFO "fork(): process `%s' used deprecated " | ||
| 1423 | "clone flags 0x%lx\n", | ||
| 1424 | get_task_comm(comm, current), | ||
| 1425 | clone_flags & CLONE_STOPPED); | ||
| 1426 | } | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | /* | ||
| 1430 | * When called from kernel_thread, don't do user tracing stuff. | 1426 | * When called from kernel_thread, don't do user tracing stuff. |
| 1431 | */ | 1427 | */ |
| 1432 | if (likely(user_mode(regs))) | 1428 | if (likely(user_mode(regs))) |
| @@ -1464,16 +1460,7 @@ long do_fork(unsigned long clone_flags, | |||
| 1464 | */ | 1460 | */ |
| 1465 | p->flags &= ~PF_STARTING; | 1461 | p->flags &= ~PF_STARTING; |
| 1466 | 1462 | ||
| 1467 | if (unlikely(clone_flags & CLONE_STOPPED)) { | 1463 | wake_up_new_task(p, clone_flags); |
| 1468 | /* | ||
| 1469 | * We'll start up with an immediate SIGSTOP. | ||
| 1470 | */ | ||
| 1471 | sigaddset(&p->pending.signal, SIGSTOP); | ||
| 1472 | set_tsk_thread_flag(p, TIF_SIGPENDING); | ||
| 1473 | __set_task_state(p, TASK_STOPPED); | ||
| 1474 | } else { | ||
| 1475 | wake_up_new_task(p, clone_flags); | ||
| 1476 | } | ||
| 1477 | 1464 | ||
| 1478 | tracehook_report_clone_complete(trace, regs, | 1465 | tracehook_report_clone_complete(trace, regs, |
| 1479 | clone_flags, nr, p); | 1466 | clone_flags, nr, p); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index bd1d42b17cb2..66ecd2ead215 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
| @@ -104,8 +104,13 @@ bool freeze_task(struct task_struct *p, bool sig_only) | |||
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | if (should_send_signal(p)) { | 106 | if (should_send_signal(p)) { |
| 107 | if (!signal_pending(p)) | 107 | fake_signal_wake_up(p); |
| 108 | fake_signal_wake_up(p); | 108 | /* |
| 109 | * fake_signal_wake_up() goes through p's scheduler | ||
| 110 | * lock and guarantees that TASK_STOPPED/TRACED -> | ||
| 111 | * TASK_RUNNING transition can't race with task state | ||
| 112 | * testing in try_to_freeze_tasks(). | ||
| 113 | */ | ||
| 109 | } else if (sig_only) { | 114 | } else if (sig_only) { |
| 110 | return false; | 115 | return false; |
| 111 | } else { | 116 | } else { |
diff --git a/kernel/futex.c b/kernel/futex.c index 3019b92e6917..b766d28accd6 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -233,7 +233,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | |||
| 233 | { | 233 | { |
| 234 | unsigned long address = (unsigned long)uaddr; | 234 | unsigned long address = (unsigned long)uaddr; |
| 235 | struct mm_struct *mm = current->mm; | 235 | struct mm_struct *mm = current->mm; |
| 236 | struct page *page; | 236 | struct page *page, *page_head; |
| 237 | int err; | 237 | int err; |
| 238 | 238 | ||
| 239 | /* | 239 | /* |
| @@ -265,11 +265,46 @@ again: | |||
| 265 | if (err < 0) | 265 | if (err < 0) |
| 266 | return err; | 266 | return err; |
| 267 | 267 | ||
| 268 | page = compound_head(page); | 268 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 269 | lock_page(page); | 269 | page_head = page; |
| 270 | if (!page->mapping) { | 270 | if (unlikely(PageTail(page))) { |
| 271 | unlock_page(page); | ||
| 272 | put_page(page); | 271 | put_page(page); |
| 272 | /* serialize against __split_huge_page_splitting() */ | ||
| 273 | local_irq_disable(); | ||
| 274 | if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { | ||
| 275 | page_head = compound_head(page); | ||
| 276 | /* | ||
| 277 | * page_head is valid pointer but we must pin | ||
| 278 | * it before taking the PG_lock and/or | ||
| 279 | * PG_compound_lock. The moment we re-enable | ||
| 280 | * irqs __split_huge_page_splitting() can | ||
| 281 | * return and the head page can be freed from | ||
| 282 | * under us. We can't take the PG_lock and/or | ||
| 283 | * PG_compound_lock on a page that could be | ||
| 284 | * freed from under us. | ||
| 285 | */ | ||
| 286 | if (page != page_head) { | ||
| 287 | get_page(page_head); | ||
| 288 | put_page(page); | ||
| 289 | } | ||
| 290 | local_irq_enable(); | ||
| 291 | } else { | ||
| 292 | local_irq_enable(); | ||
| 293 | goto again; | ||
| 294 | } | ||
| 295 | } | ||
| 296 | #else | ||
| 297 | page_head = compound_head(page); | ||
| 298 | if (page != page_head) { | ||
| 299 | get_page(page_head); | ||
| 300 | put_page(page); | ||
| 301 | } | ||
| 302 | #endif | ||
| 303 | |||
| 304 | lock_page(page_head); | ||
| 305 | if (!page_head->mapping) { | ||
| 306 | unlock_page(page_head); | ||
| 307 | put_page(page_head); | ||
| 273 | goto again; | 308 | goto again; |
| 274 | } | 309 | } |
| 275 | 310 | ||
| @@ -280,20 +315,20 @@ again: | |||
| 280 | * it's a read-only handle, it's expected that futexes attach to | 315 | * it's a read-only handle, it's expected that futexes attach to |
| 281 | * the object not the particular process. | 316 | * the object not the particular process. |
| 282 | */ | 317 | */ |
| 283 | if (PageAnon(page)) { | 318 | if (PageAnon(page_head)) { |
| 284 | key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ | 319 | key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
| 285 | key->private.mm = mm; | 320 | key->private.mm = mm; |
| 286 | key->private.address = address; | 321 | key->private.address = address; |
| 287 | } else { | 322 | } else { |
| 288 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ | 323 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
| 289 | key->shared.inode = page->mapping->host; | 324 | key->shared.inode = page_head->mapping->host; |
| 290 | key->shared.pgoff = page->index; | 325 | key->shared.pgoff = page_head->index; |
| 291 | } | 326 | } |
| 292 | 327 | ||
| 293 | get_futex_key_refs(key); | 328 | get_futex_key_refs(key); |
| 294 | 329 | ||
| 295 | unlock_page(page); | 330 | unlock_page(page_head); |
| 296 | put_page(page); | 331 | put_page(page_head); |
| 297 | return 0; | 332 | return 0; |
| 298 | } | 333 | } |
| 299 | 334 | ||
| @@ -791,10 +826,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
| 791 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 826 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
| 792 | 827 | ||
| 793 | /* | 828 | /* |
| 794 | * This happens when we have stolen the lock and the original | 829 | * It is possible that the next waiter (the one that brought |
| 795 | * pending owner did not enqueue itself back on the rt_mutex. | 830 | * this owner to the kernel) timed out and is no longer |
| 796 | * Thats not a tragedy. We know that way, that a lock waiter | 831 | * waiting on the lock. |
| 797 | * is on the fly. We make the futex_q waiter the pending owner. | ||
| 798 | */ | 832 | */ |
| 799 | if (!new_owner) | 833 | if (!new_owner) |
| 800 | new_owner = this->task; | 834 | new_owner = this->task; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 45da2b6920ab..0c8d7c048615 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -1745,7 +1745,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, | |||
| 1745 | } | 1745 | } |
| 1746 | 1746 | ||
| 1747 | /* | 1747 | /* |
| 1748 | * A NULL parameter means "inifinte" | 1748 | * A NULL parameter means "infinite" |
| 1749 | */ | 1749 | */ |
| 1750 | if (!expires) { | 1750 | if (!expires) { |
| 1751 | schedule(); | 1751 | schedule(); |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 31d766bf5d2e..8e42fec7686d 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
| @@ -9,9 +9,6 @@ menu "IRQ subsystem" | |||
| 9 | config GENERIC_HARDIRQS | 9 | config GENERIC_HARDIRQS |
| 10 | def_bool y | 10 | def_bool y |
| 11 | 11 | ||
| 12 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 13 | def_bool y | ||
| 14 | |||
| 15 | # Select this to disable the deprecated stuff | 12 | # Select this to disable the deprecated stuff |
| 16 | config GENERIC_HARDIRQS_NO_DEPRECATED | 13 | config GENERIC_HARDIRQS_NO_DEPRECATED |
| 17 | def_bool n | 14 | def_bool n |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index e2347eb63306..3540a7190122 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -118,114 +118,3 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 118 | 118 | ||
| 119 | return retval; | 119 | return retval; |
| 120 | } | 120 | } |
| 121 | |||
| 122 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 123 | |||
| 124 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | ||
| 125 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | ||
| 126 | #endif | ||
| 127 | |||
| 128 | /** | ||
| 129 | * __do_IRQ - original all in one highlevel IRQ handler | ||
| 130 | * @irq: the interrupt number | ||
| 131 | * | ||
| 132 | * __do_IRQ handles all normal device IRQ's (the special | ||
| 133 | * SMP cross-CPU interrupts have their own specific | ||
| 134 | * handlers). | ||
| 135 | * | ||
| 136 | * This is the original x86 implementation which is used for every | ||
| 137 | * interrupt type. | ||
| 138 | */ | ||
| 139 | unsigned int __do_IRQ(unsigned int irq) | ||
| 140 | { | ||
| 141 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 142 | struct irqaction *action; | ||
| 143 | unsigned int status; | ||
| 144 | |||
| 145 | kstat_incr_irqs_this_cpu(irq, desc); | ||
| 146 | |||
| 147 | if (CHECK_IRQ_PER_CPU(desc->status)) { | ||
| 148 | irqreturn_t action_ret; | ||
| 149 | |||
| 150 | /* | ||
| 151 | * No locking required for CPU-local interrupts: | ||
| 152 | */ | ||
| 153 | if (desc->irq_data.chip->ack) | ||
| 154 | desc->irq_data.chip->ack(irq); | ||
| 155 | if (likely(!(desc->status & IRQ_DISABLED))) { | ||
| 156 | action_ret = handle_IRQ_event(irq, desc->action); | ||
| 157 | if (!noirqdebug) | ||
| 158 | note_interrupt(irq, desc, action_ret); | ||
| 159 | } | ||
| 160 | desc->irq_data.chip->end(irq); | ||
| 161 | return 1; | ||
| 162 | } | ||
| 163 | |||
| 164 | raw_spin_lock(&desc->lock); | ||
| 165 | if (desc->irq_data.chip->ack) | ||
| 166 | desc->irq_data.chip->ack(irq); | ||
| 167 | /* | ||
| 168 | * REPLAY is when Linux resends an IRQ that was dropped earlier | ||
| 169 | * WAITING is used by probe to mark irqs that are being tested | ||
| 170 | */ | ||
| 171 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
| 172 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
| 173 | |||
| 174 | /* | ||
| 175 | * If the IRQ is disabled for whatever reason, we cannot | ||
| 176 | * use the action we have. | ||
| 177 | */ | ||
| 178 | action = NULL; | ||
| 179 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
| 180 | action = desc->action; | ||
| 181 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
| 182 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
| 183 | } | ||
| 184 | desc->status = status; | ||
| 185 | |||
| 186 | /* | ||
| 187 | * If there is no IRQ handler or it was disabled, exit early. | ||
| 188 | * Since we set PENDING, if another processor is handling | ||
| 189 | * a different instance of this same irq, the other processor | ||
| 190 | * will take care of it. | ||
| 191 | */ | ||
| 192 | if (unlikely(!action)) | ||
| 193 | goto out; | ||
| 194 | |||
| 195 | /* | ||
| 196 | * Edge triggered interrupts need to remember | ||
| 197 | * pending events. | ||
| 198 | * This applies to any hw interrupts that allow a second | ||
| 199 | * instance of the same irq to arrive while we are in do_IRQ | ||
| 200 | * or in the handler. But the code here only handles the _second_ | ||
| 201 | * instance of the irq, not the third or fourth. So it is mostly | ||
| 202 | * useful for irq hardware that does not mask cleanly in an | ||
| 203 | * SMP environment. | ||
| 204 | */ | ||
| 205 | for (;;) { | ||
| 206 | irqreturn_t action_ret; | ||
| 207 | |||
| 208 | raw_spin_unlock(&desc->lock); | ||
| 209 | |||
| 210 | action_ret = handle_IRQ_event(irq, action); | ||
| 211 | if (!noirqdebug) | ||
| 212 | note_interrupt(irq, desc, action_ret); | ||
| 213 | |||
| 214 | raw_spin_lock(&desc->lock); | ||
| 215 | if (likely(!(desc->status & IRQ_PENDING))) | ||
| 216 | break; | ||
| 217 | desc->status &= ~IRQ_PENDING; | ||
| 218 | } | ||
| 219 | desc->status &= ~IRQ_INPROGRESS; | ||
| 220 | |||
| 221 | out: | ||
| 222 | /* | ||
| 223 | * The ->end() handler has to deal with interrupts which got | ||
| 224 | * disabled while the handler was running. | ||
| 225 | */ | ||
| 226 | desc->irq_data.chip->end(irq); | ||
| 227 | raw_spin_unlock(&desc->lock); | ||
| 228 | |||
| 229 | return 1; | ||
| 230 | } | ||
| 231 | #endif | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9988d03797f5..282f20230e67 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -72,6 +72,8 @@ static inline int desc_node(struct irq_desc *desc) { return 0; } | |||
| 72 | 72 | ||
| 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) |
| 74 | { | 74 | { |
| 75 | int cpu; | ||
| 76 | |||
| 75 | desc->irq_data.irq = irq; | 77 | desc->irq_data.irq = irq; |
| 76 | desc->irq_data.chip = &no_irq_chip; | 78 | desc->irq_data.chip = &no_irq_chip; |
| 77 | desc->irq_data.chip_data = NULL; | 79 | desc->irq_data.chip_data = NULL; |
| @@ -83,7 +85,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
| 83 | desc->irq_count = 0; | 85 | desc->irq_count = 0; |
| 84 | desc->irqs_unhandled = 0; | 86 | desc->irqs_unhandled = 0; |
| 85 | desc->name = NULL; | 87 | desc->name = NULL; |
| 86 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | 88 | for_each_possible_cpu(cpu) |
| 89 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | ||
| 87 | desc_smp_init(desc, node); | 90 | desc_smp_init(desc, node); |
| 88 | } | 91 | } |
| 89 | 92 | ||
| @@ -133,8 +136,7 @@ static struct irq_desc *alloc_desc(int irq, int node) | |||
| 133 | if (!desc) | 136 | if (!desc) |
| 134 | return NULL; | 137 | return NULL; |
| 135 | /* allocate based on nr_cpu_ids */ | 138 | /* allocate based on nr_cpu_ids */ |
| 136 | desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), | 139 | desc->kstat_irqs = alloc_percpu(unsigned int); |
| 137 | gfp, node); | ||
| 138 | if (!desc->kstat_irqs) | 140 | if (!desc->kstat_irqs) |
| 139 | goto err_desc; | 141 | goto err_desc; |
| 140 | 142 | ||
| @@ -149,7 +151,7 @@ static struct irq_desc *alloc_desc(int irq, int node) | |||
| 149 | return desc; | 151 | return desc; |
| 150 | 152 | ||
| 151 | err_kstat: | 153 | err_kstat: |
| 152 | kfree(desc->kstat_irqs); | 154 | free_percpu(desc->kstat_irqs); |
| 153 | err_desc: | 155 | err_desc: |
| 154 | kfree(desc); | 156 | kfree(desc); |
| 155 | return NULL; | 157 | return NULL; |
| @@ -166,7 +168,7 @@ static void free_desc(unsigned int irq) | |||
| 166 | mutex_unlock(&sparse_irq_lock); | 168 | mutex_unlock(&sparse_irq_lock); |
| 167 | 169 | ||
| 168 | free_masks(desc); | 170 | free_masks(desc); |
| 169 | kfree(desc->kstat_irqs); | 171 | free_percpu(desc->kstat_irqs); |
| 170 | kfree(desc); | 172 | kfree(desc); |
| 171 | } | 173 | } |
| 172 | 174 | ||
| @@ -234,7 +236,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
| 234 | } | 236 | } |
| 235 | }; | 237 | }; |
| 236 | 238 | ||
| 237 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
| 238 | int __init early_irq_init(void) | 239 | int __init early_irq_init(void) |
| 239 | { | 240 | { |
| 240 | int count, i, node = first_online_node; | 241 | int count, i, node = first_online_node; |
| @@ -250,7 +251,8 @@ int __init early_irq_init(void) | |||
| 250 | for (i = 0; i < count; i++) { | 251 | for (i = 0; i < count; i++) { |
| 251 | desc[i].irq_data.irq = i; | 252 | desc[i].irq_data.irq = i; |
| 252 | desc[i].irq_data.chip = &no_irq_chip; | 253 | desc[i].irq_data.chip = &no_irq_chip; |
| 253 | desc[i].kstat_irqs = kstat_irqs_all[i]; | 254 | /* TODO : do this allocation on-demand ... */ |
| 255 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | ||
| 254 | alloc_masks(desc + i, GFP_KERNEL, node); | 256 | alloc_masks(desc + i, GFP_KERNEL, node); |
| 255 | desc_smp_init(desc + i, node); | 257 | desc_smp_init(desc + i, node); |
| 256 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 258 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| @@ -275,6 +277,22 @@ static void free_desc(unsigned int irq) | |||
| 275 | 277 | ||
| 276 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 278 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) |
| 277 | { | 279 | { |
| 280 | #if defined(CONFIG_KSTAT_IRQS_ONDEMAND) | ||
| 281 | struct irq_desc *desc; | ||
| 282 | unsigned int i; | ||
| 283 | |||
| 284 | for (i = 0; i < cnt; i++) { | ||
| 285 | desc = irq_to_desc(start + i); | ||
| 286 | if (desc && !desc->kstat_irqs) { | ||
| 287 | unsigned int __percpu *stats = alloc_percpu(unsigned int); | ||
| 288 | |||
| 289 | if (!stats) | ||
| 290 | return -1; | ||
| 291 | if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) | ||
| 292 | free_percpu(stats); | ||
| 293 | } | ||
| 294 | } | ||
| 295 | #endif | ||
| 278 | return start; | 296 | return start; |
| 279 | } | 297 | } |
| 280 | #endif /* !CONFIG_SPARSE_IRQ */ | 298 | #endif /* !CONFIG_SPARSE_IRQ */ |
| @@ -391,7 +409,9 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
| 391 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 409 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 392 | { | 410 | { |
| 393 | struct irq_desc *desc = irq_to_desc(irq); | 411 | struct irq_desc *desc = irq_to_desc(irq); |
| 394 | return desc ? desc->kstat_irqs[cpu] : 0; | 412 | |
| 413 | return desc && desc->kstat_irqs ? | ||
| 414 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | ||
| 395 | } | 415 | } |
| 396 | 416 | ||
| 397 | #ifdef CONFIG_GENERIC_HARDIRQS | 417 | #ifdef CONFIG_GENERIC_HARDIRQS |
| @@ -401,10 +421,10 @@ unsigned int kstat_irqs(unsigned int irq) | |||
| 401 | int cpu; | 421 | int cpu; |
| 402 | int sum = 0; | 422 | int sum = 0; |
| 403 | 423 | ||
| 404 | if (!desc) | 424 | if (!desc || !desc->kstat_irqs) |
| 405 | return 0; | 425 | return 0; |
| 406 | for_each_possible_cpu(cpu) | 426 | for_each_possible_cpu(cpu) |
| 407 | sum += desc->kstat_irqs[cpu]; | 427 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
| 408 | return sum; | 428 | return sum; |
| 409 | } | 429 | } |
| 410 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 430 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
diff --git a/kernel/kexec.c b/kernel/kexec.c index b55045bc7563..ec19b92c7ebd 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -163,7 +163,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
| 163 | * just verifies it is an address we can use. | 163 | * just verifies it is an address we can use. |
| 164 | * | 164 | * |
| 165 | * Since the kernel does everything in page size chunks ensure | 165 | * Since the kernel does everything in page size chunks ensure |
| 166 | * the destination addreses are page aligned. Too many | 166 | * the destination addresses are page aligned. Too many |
| 167 | * special cases crop of when we don't do this. The most | 167 | * special cases crop of when we don't do this. The most |
| 168 | * insidious is getting overlapping destination addresses | 168 | * insidious is getting overlapping destination addresses |
| 169 | * simply because addresses are changed to page size | 169 | * simply because addresses are changed to page size |
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 17110a4a4fc2..ee74b35e528d 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
| @@ -241,24 +241,19 @@ static int lstats_show(struct seq_file *m, void *v) | |||
| 241 | seq_puts(m, "Latency Top version : v0.1\n"); | 241 | seq_puts(m, "Latency Top version : v0.1\n"); |
| 242 | 242 | ||
| 243 | for (i = 0; i < MAXLR; i++) { | 243 | for (i = 0; i < MAXLR; i++) { |
| 244 | if (latency_record[i].backtrace[0]) { | 244 | struct latency_record *lr = &latency_record[i]; |
| 245 | |||
| 246 | if (lr->backtrace[0]) { | ||
| 245 | int q; | 247 | int q; |
| 246 | seq_printf(m, "%i %lu %lu ", | 248 | seq_printf(m, "%i %lu %lu", |
| 247 | latency_record[i].count, | 249 | lr->count, lr->time, lr->max); |
| 248 | latency_record[i].time, | ||
| 249 | latency_record[i].max); | ||
| 250 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { | 250 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
| 251 | char sym[KSYM_SYMBOL_LEN]; | 251 | unsigned long bt = lr->backtrace[q]; |
| 252 | char *c; | 252 | if (!bt) |
| 253 | if (!latency_record[i].backtrace[q]) | ||
| 254 | break; | 253 | break; |
| 255 | if (latency_record[i].backtrace[q] == ULONG_MAX) | 254 | if (bt == ULONG_MAX) |
| 256 | break; | 255 | break; |
| 257 | sprint_symbol(sym, latency_record[i].backtrace[q]); | 256 | seq_printf(m, " %ps", (void *)bt); |
| 258 | c = strchr(sym, '+'); | ||
| 259 | if (c) | ||
| 260 | *c = 0; | ||
| 261 | seq_printf(m, "%s ", sym); | ||
| 262 | } | 257 | } |
| 263 | seq_printf(m, "\n"); | 258 | seq_printf(m, "\n"); |
| 264 | } | 259 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 42ba65dff7d9..0d2058da80f5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -2292,22 +2292,6 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
| 2292 | } | 2292 | } |
| 2293 | 2293 | ||
| 2294 | /* | 2294 | /* |
| 2295 | * Debugging helper: via this flag we know that we are in | ||
| 2296 | * 'early bootup code', and will warn about any invalid irqs-on event: | ||
| 2297 | */ | ||
| 2298 | static int early_boot_irqs_enabled; | ||
| 2299 | |||
| 2300 | void early_boot_irqs_off(void) | ||
| 2301 | { | ||
| 2302 | early_boot_irqs_enabled = 0; | ||
| 2303 | } | ||
| 2304 | |||
| 2305 | void early_boot_irqs_on(void) | ||
| 2306 | { | ||
| 2307 | early_boot_irqs_enabled = 1; | ||
| 2308 | } | ||
| 2309 | |||
| 2310 | /* | ||
| 2311 | * Hardirqs will be enabled: | 2295 | * Hardirqs will be enabled: |
| 2312 | */ | 2296 | */ |
| 2313 | void trace_hardirqs_on_caller(unsigned long ip) | 2297 | void trace_hardirqs_on_caller(unsigned long ip) |
| @@ -2319,7 +2303,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
| 2319 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2303 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2320 | return; | 2304 | return; |
| 2321 | 2305 | ||
| 2322 | if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) | 2306 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) |
| 2323 | return; | 2307 | return; |
| 2324 | 2308 | ||
| 2325 | if (unlikely(curr->hardirqs_enabled)) { | 2309 | if (unlikely(curr->hardirqs_enabled)) { |
diff --git a/kernel/panic.c b/kernel/panic.c index 4c13b1a88ebb..991bb87a1704 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -34,6 +34,7 @@ static int pause_on_oops_flag; | |||
| 34 | static DEFINE_SPINLOCK(pause_on_oops_lock); | 34 | static DEFINE_SPINLOCK(pause_on_oops_lock); |
| 35 | 35 | ||
| 36 | int panic_timeout; | 36 | int panic_timeout; |
| 37 | EXPORT_SYMBOL_GPL(panic_timeout); | ||
| 37 | 38 | ||
| 38 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); | 39 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
| 39 | 40 | ||
diff --git a/kernel/params.c b/kernel/params.c index 08107d181758..0da1411222b9 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
| @@ -719,9 +719,7 @@ void destroy_params(const struct kernel_param *params, unsigned num) | |||
| 719 | params[i].ops->free(params[i].arg); | 719 | params[i].ops->free(params[i].arg); |
| 720 | } | 720 | } |
| 721 | 721 | ||
| 722 | static void __init kernel_add_sysfs_param(const char *name, | 722 | static struct module_kobject * __init locate_module_kobject(const char *name) |
| 723 | struct kernel_param *kparam, | ||
| 724 | unsigned int name_skip) | ||
| 725 | { | 723 | { |
| 726 | struct module_kobject *mk; | 724 | struct module_kobject *mk; |
| 727 | struct kobject *kobj; | 725 | struct kobject *kobj; |
| @@ -729,10 +727,7 @@ static void __init kernel_add_sysfs_param(const char *name, | |||
| 729 | 727 | ||
| 730 | kobj = kset_find_obj(module_kset, name); | 728 | kobj = kset_find_obj(module_kset, name); |
| 731 | if (kobj) { | 729 | if (kobj) { |
| 732 | /* We already have one. Remove params so we can add more. */ | ||
| 733 | mk = to_module_kobject(kobj); | 730 | mk = to_module_kobject(kobj); |
| 734 | /* We need to remove it before adding parameters. */ | ||
| 735 | sysfs_remove_group(&mk->kobj, &mk->mp->grp); | ||
| 736 | } else { | 731 | } else { |
| 737 | mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); | 732 | mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); |
| 738 | BUG_ON(!mk); | 733 | BUG_ON(!mk); |
| @@ -743,15 +738,36 @@ static void __init kernel_add_sysfs_param(const char *name, | |||
| 743 | "%s", name); | 738 | "%s", name); |
| 744 | if (err) { | 739 | if (err) { |
| 745 | kobject_put(&mk->kobj); | 740 | kobject_put(&mk->kobj); |
| 746 | printk(KERN_ERR "Module '%s' failed add to sysfs, " | 741 | printk(KERN_ERR |
| 747 | "error number %d\n", name, err); | 742 | "Module '%s' failed add to sysfs, error number %d\n", |
| 748 | printk(KERN_ERR "The system will be unstable now.\n"); | 743 | name, err); |
| 749 | return; | 744 | printk(KERN_ERR |
| 745 | "The system will be unstable now.\n"); | ||
| 746 | return NULL; | ||
| 750 | } | 747 | } |
| 751 | /* So that exit path is even. */ | 748 | |
| 749 | /* So that we hold reference in both cases. */ | ||
| 752 | kobject_get(&mk->kobj); | 750 | kobject_get(&mk->kobj); |
| 753 | } | 751 | } |
| 754 | 752 | ||
| 753 | return mk; | ||
| 754 | } | ||
| 755 | |||
| 756 | static void __init kernel_add_sysfs_param(const char *name, | ||
| 757 | struct kernel_param *kparam, | ||
| 758 | unsigned int name_skip) | ||
| 759 | { | ||
| 760 | struct module_kobject *mk; | ||
| 761 | int err; | ||
| 762 | |||
| 763 | mk = locate_module_kobject(name); | ||
| 764 | if (!mk) | ||
| 765 | return; | ||
| 766 | |||
| 767 | /* We need to remove old parameters before adding more. */ | ||
| 768 | if (mk->mp) | ||
| 769 | sysfs_remove_group(&mk->kobj, &mk->mp->grp); | ||
| 770 | |||
| 755 | /* These should not fail at boot. */ | 771 | /* These should not fail at boot. */ |
| 756 | err = add_sysfs_param(mk, kparam, kparam->name + name_skip); | 772 | err = add_sysfs_param(mk, kparam, kparam->name + name_skip); |
| 757 | BUG_ON(err); | 773 | BUG_ON(err); |
| @@ -796,6 +812,32 @@ static void __init param_sysfs_builtin(void) | |||
| 796 | } | 812 | } |
| 797 | } | 813 | } |
| 798 | 814 | ||
| 815 | ssize_t __modver_version_show(struct module_attribute *mattr, | ||
| 816 | struct module *mod, char *buf) | ||
| 817 | { | ||
| 818 | struct module_version_attribute *vattr = | ||
| 819 | container_of(mattr, struct module_version_attribute, mattr); | ||
| 820 | |||
| 821 | return sprintf(buf, "%s\n", vattr->version); | ||
| 822 | } | ||
| 823 | |||
| 824 | extern struct module_version_attribute __start___modver[], __stop___modver[]; | ||
| 825 | |||
| 826 | static void __init version_sysfs_builtin(void) | ||
| 827 | { | ||
| 828 | const struct module_version_attribute *vattr; | ||
| 829 | struct module_kobject *mk; | ||
| 830 | int err; | ||
| 831 | |||
| 832 | for (vattr = __start___modver; vattr < __stop___modver; vattr++) { | ||
| 833 | mk = locate_module_kobject(vattr->module_name); | ||
| 834 | if (mk) { | ||
| 835 | err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); | ||
| 836 | kobject_uevent(&mk->kobj, KOBJ_ADD); | ||
| 837 | kobject_put(&mk->kobj); | ||
| 838 | } | ||
| 839 | } | ||
| 840 | } | ||
| 799 | 841 | ||
| 800 | /* module-related sysfs stuff */ | 842 | /* module-related sysfs stuff */ |
| 801 | 843 | ||
| @@ -875,6 +917,7 @@ static int __init param_sysfs_init(void) | |||
| 875 | } | 917 | } |
| 876 | module_sysfs_initialized = 1; | 918 | module_sysfs_initialized = 1; |
| 877 | 919 | ||
| 920 | version_sysfs_builtin(); | ||
| 878 | param_sysfs_builtin(); | 921 | param_sysfs_builtin(); |
| 879 | 922 | ||
| 880 | return 0; | 923 | return 0; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 11847bf1e8cc..126a302c481c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -38,6 +38,12 @@ | |||
| 38 | 38 | ||
| 39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
| 40 | 40 | ||
| 41 | enum event_type_t { | ||
| 42 | EVENT_FLEXIBLE = 0x1, | ||
| 43 | EVENT_PINNED = 0x2, | ||
| 44 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | ||
| 45 | }; | ||
| 46 | |||
| 41 | atomic_t perf_task_events __read_mostly; | 47 | atomic_t perf_task_events __read_mostly; |
| 42 | static atomic_t nr_mmap_events __read_mostly; | 48 | static atomic_t nr_mmap_events __read_mostly; |
| 43 | static atomic_t nr_comm_events __read_mostly; | 49 | static atomic_t nr_comm_events __read_mostly; |
| @@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; | |||
| 65 | 71 | ||
| 66 | static atomic64_t perf_event_id; | 72 | static atomic64_t perf_event_id; |
| 67 | 73 | ||
| 74 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | ||
| 75 | enum event_type_t event_type); | ||
| 76 | |||
| 77 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | ||
| 78 | enum event_type_t event_type); | ||
| 79 | |||
| 68 | void __weak perf_event_print_debug(void) { } | 80 | void __weak perf_event_print_debug(void) { } |
| 69 | 81 | ||
| 70 | extern __weak const char *perf_pmu_name(void) | 82 | extern __weak const char *perf_pmu_name(void) |
| @@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void) | |||
| 72 | return "pmu"; | 84 | return "pmu"; |
| 73 | } | 85 | } |
| 74 | 86 | ||
| 87 | static inline u64 perf_clock(void) | ||
| 88 | { | ||
| 89 | return local_clock(); | ||
| 90 | } | ||
| 91 | |||
| 75 | void perf_pmu_disable(struct pmu *pmu) | 92 | void perf_pmu_disable(struct pmu *pmu) |
| 76 | { | 93 | { |
| 77 | int *count = this_cpu_ptr(pmu->pmu_disable_count); | 94 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| @@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx) | |||
| 240 | put_ctx(ctx); | 257 | put_ctx(ctx); |
| 241 | } | 258 | } |
| 242 | 259 | ||
| 243 | static inline u64 perf_clock(void) | ||
| 244 | { | ||
| 245 | return local_clock(); | ||
| 246 | } | ||
| 247 | |||
| 248 | /* | 260 | /* |
| 249 | * Update the record of the current time in a context. | 261 | * Update the record of the current time in a context. |
| 250 | */ | 262 | */ |
| @@ -256,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx) | |||
| 256 | ctx->timestamp = now; | 268 | ctx->timestamp = now; |
| 257 | } | 269 | } |
| 258 | 270 | ||
| 271 | static u64 perf_event_time(struct perf_event *event) | ||
| 272 | { | ||
| 273 | struct perf_event_context *ctx = event->ctx; | ||
| 274 | return ctx ? ctx->time : 0; | ||
| 275 | } | ||
| 276 | |||
| 259 | /* | 277 | /* |
| 260 | * Update the total_time_enabled and total_time_running fields for a event. | 278 | * Update the total_time_enabled and total_time_running fields for a event. |
| 261 | */ | 279 | */ |
| @@ -269,7 +287,7 @@ static void update_event_times(struct perf_event *event) | |||
| 269 | return; | 287 | return; |
| 270 | 288 | ||
| 271 | if (ctx->is_active) | 289 | if (ctx->is_active) |
| 272 | run_end = ctx->time; | 290 | run_end = perf_event_time(event); |
| 273 | else | 291 | else |
| 274 | run_end = event->tstamp_stopped; | 292 | run_end = event->tstamp_stopped; |
| 275 | 293 | ||
| @@ -278,7 +296,7 @@ static void update_event_times(struct perf_event *event) | |||
| 278 | if (event->state == PERF_EVENT_STATE_INACTIVE) | 296 | if (event->state == PERF_EVENT_STATE_INACTIVE) |
| 279 | run_end = event->tstamp_stopped; | 297 | run_end = event->tstamp_stopped; |
| 280 | else | 298 | else |
| 281 | run_end = ctx->time; | 299 | run_end = perf_event_time(event); |
| 282 | 300 | ||
| 283 | event->total_time_running = run_end - event->tstamp_running; | 301 | event->total_time_running = run_end - event->tstamp_running; |
| 284 | } | 302 | } |
| @@ -534,6 +552,7 @@ event_sched_out(struct perf_event *event, | |||
| 534 | struct perf_cpu_context *cpuctx, | 552 | struct perf_cpu_context *cpuctx, |
| 535 | struct perf_event_context *ctx) | 553 | struct perf_event_context *ctx) |
| 536 | { | 554 | { |
| 555 | u64 tstamp = perf_event_time(event); | ||
| 537 | u64 delta; | 556 | u64 delta; |
| 538 | /* | 557 | /* |
| 539 | * An event which could not be activated because of | 558 | * An event which could not be activated because of |
| @@ -545,7 +564,7 @@ event_sched_out(struct perf_event *event, | |||
| 545 | && !event_filter_match(event)) { | 564 | && !event_filter_match(event)) { |
| 546 | delta = ctx->time - event->tstamp_stopped; | 565 | delta = ctx->time - event->tstamp_stopped; |
| 547 | event->tstamp_running += delta; | 566 | event->tstamp_running += delta; |
| 548 | event->tstamp_stopped = ctx->time; | 567 | event->tstamp_stopped = tstamp; |
| 549 | } | 568 | } |
| 550 | 569 | ||
| 551 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 570 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| @@ -556,7 +575,7 @@ event_sched_out(struct perf_event *event, | |||
| 556 | event->pending_disable = 0; | 575 | event->pending_disable = 0; |
| 557 | event->state = PERF_EVENT_STATE_OFF; | 576 | event->state = PERF_EVENT_STATE_OFF; |
| 558 | } | 577 | } |
| 559 | event->tstamp_stopped = ctx->time; | 578 | event->tstamp_stopped = tstamp; |
| 560 | event->pmu->del(event, 0); | 579 | event->pmu->del(event, 0); |
| 561 | event->oncpu = -1; | 580 | event->oncpu = -1; |
| 562 | 581 | ||
| @@ -768,6 +787,8 @@ event_sched_in(struct perf_event *event, | |||
| 768 | struct perf_cpu_context *cpuctx, | 787 | struct perf_cpu_context *cpuctx, |
| 769 | struct perf_event_context *ctx) | 788 | struct perf_event_context *ctx) |
| 770 | { | 789 | { |
| 790 | u64 tstamp = perf_event_time(event); | ||
| 791 | |||
| 771 | if (event->state <= PERF_EVENT_STATE_OFF) | 792 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 772 | return 0; | 793 | return 0; |
| 773 | 794 | ||
| @@ -784,9 +805,9 @@ event_sched_in(struct perf_event *event, | |||
| 784 | return -EAGAIN; | 805 | return -EAGAIN; |
| 785 | } | 806 | } |
| 786 | 807 | ||
| 787 | event->tstamp_running += ctx->time - event->tstamp_stopped; | 808 | event->tstamp_running += tstamp - event->tstamp_stopped; |
| 788 | 809 | ||
| 789 | event->shadow_ctx_time = ctx->time - ctx->timestamp; | 810 | event->shadow_ctx_time = tstamp - ctx->timestamp; |
| 790 | 811 | ||
| 791 | if (!is_software_event(event)) | 812 | if (!is_software_event(event)) |
| 792 | cpuctx->active_oncpu++; | 813 | cpuctx->active_oncpu++; |
| @@ -898,11 +919,13 @@ static int group_can_go_on(struct perf_event *event, | |||
| 898 | static void add_event_to_ctx(struct perf_event *event, | 919 | static void add_event_to_ctx(struct perf_event *event, |
| 899 | struct perf_event_context *ctx) | 920 | struct perf_event_context *ctx) |
| 900 | { | 921 | { |
| 922 | u64 tstamp = perf_event_time(event); | ||
| 923 | |||
| 901 | list_add_event(event, ctx); | 924 | list_add_event(event, ctx); |
| 902 | perf_group_attach(event); | 925 | perf_group_attach(event); |
| 903 | event->tstamp_enabled = ctx->time; | 926 | event->tstamp_enabled = tstamp; |
| 904 | event->tstamp_running = ctx->time; | 927 | event->tstamp_running = tstamp; |
| 905 | event->tstamp_stopped = ctx->time; | 928 | event->tstamp_stopped = tstamp; |
| 906 | } | 929 | } |
| 907 | 930 | ||
| 908 | /* | 931 | /* |
| @@ -937,7 +960,7 @@ static void __perf_install_in_context(void *info) | |||
| 937 | 960 | ||
| 938 | add_event_to_ctx(event, ctx); | 961 | add_event_to_ctx(event, ctx); |
| 939 | 962 | ||
| 940 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 963 | if (!event_filter_match(event)) |
| 941 | goto unlock; | 964 | goto unlock; |
| 942 | 965 | ||
| 943 | /* | 966 | /* |
| @@ -1042,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event, | |||
| 1042 | struct perf_event_context *ctx) | 1065 | struct perf_event_context *ctx) |
| 1043 | { | 1066 | { |
| 1044 | struct perf_event *sub; | 1067 | struct perf_event *sub; |
| 1068 | u64 tstamp = perf_event_time(event); | ||
| 1045 | 1069 | ||
| 1046 | event->state = PERF_EVENT_STATE_INACTIVE; | 1070 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1047 | event->tstamp_enabled = ctx->time - event->total_time_enabled; | 1071 | event->tstamp_enabled = tstamp - event->total_time_enabled; |
| 1048 | list_for_each_entry(sub, &event->sibling_list, group_entry) { | 1072 | list_for_each_entry(sub, &event->sibling_list, group_entry) { |
| 1049 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) { | 1073 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) |
| 1050 | sub->tstamp_enabled = | 1074 | sub->tstamp_enabled = tstamp - sub->total_time_enabled; |
| 1051 | ctx->time - sub->total_time_enabled; | ||
| 1052 | } | ||
| 1053 | } | 1075 | } |
| 1054 | } | 1076 | } |
| 1055 | 1077 | ||
| @@ -1082,7 +1104,7 @@ static void __perf_event_enable(void *info) | |||
| 1082 | goto unlock; | 1104 | goto unlock; |
| 1083 | __perf_event_mark_enabled(event, ctx); | 1105 | __perf_event_mark_enabled(event, ctx); |
| 1084 | 1106 | ||
| 1085 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1107 | if (!event_filter_match(event)) |
| 1086 | goto unlock; | 1108 | goto unlock; |
| 1087 | 1109 | ||
| 1088 | /* | 1110 | /* |
| @@ -1193,12 +1215,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh) | |||
| 1193 | return 0; | 1215 | return 0; |
| 1194 | } | 1216 | } |
| 1195 | 1217 | ||
| 1196 | enum event_type_t { | ||
| 1197 | EVENT_FLEXIBLE = 0x1, | ||
| 1198 | EVENT_PINNED = 0x2, | ||
| 1199 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | ||
| 1200 | }; | ||
| 1201 | |||
| 1202 | static void ctx_sched_out(struct perf_event_context *ctx, | 1218 | static void ctx_sched_out(struct perf_event_context *ctx, |
| 1203 | struct perf_cpu_context *cpuctx, | 1219 | struct perf_cpu_context *cpuctx, |
| 1204 | enum event_type_t event_type) | 1220 | enum event_type_t event_type) |
| @@ -1435,7 +1451,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, | |||
| 1435 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 1451 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { |
| 1436 | if (event->state <= PERF_EVENT_STATE_OFF) | 1452 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 1437 | continue; | 1453 | continue; |
| 1438 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1454 | if (!event_filter_match(event)) |
| 1439 | continue; | 1455 | continue; |
| 1440 | 1456 | ||
| 1441 | if (group_can_go_on(event, cpuctx, 1)) | 1457 | if (group_can_go_on(event, cpuctx, 1)) |
| @@ -1467,7 +1483,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, | |||
| 1467 | * Listen to the 'cpu' scheduling filter constraint | 1483 | * Listen to the 'cpu' scheduling filter constraint |
| 1468 | * of events: | 1484 | * of events: |
| 1469 | */ | 1485 | */ |
| 1470 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1486 | if (!event_filter_match(event)) |
| 1471 | continue; | 1487 | continue; |
| 1472 | 1488 | ||
| 1473 | if (group_can_go_on(event, cpuctx, can_add_hw)) { | 1489 | if (group_can_go_on(event, cpuctx, can_add_hw)) { |
| @@ -1694,7 +1710,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
| 1694 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1710 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 1695 | continue; | 1711 | continue; |
| 1696 | 1712 | ||
| 1697 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1713 | if (!event_filter_match(event)) |
| 1698 | continue; | 1714 | continue; |
| 1699 | 1715 | ||
| 1700 | hwc = &event->hw; | 1716 | hwc = &event->hw; |
| @@ -2185,13 +2201,6 @@ find_lively_task_by_vpid(pid_t vpid) | |||
| 2185 | if (!task) | 2201 | if (!task) |
| 2186 | return ERR_PTR(-ESRCH); | 2202 | return ERR_PTR(-ESRCH); |
| 2187 | 2203 | ||
| 2188 | /* | ||
| 2189 | * Can't attach events to a dying task. | ||
| 2190 | */ | ||
| 2191 | err = -ESRCH; | ||
| 2192 | if (task->flags & PF_EXITING) | ||
| 2193 | goto errout; | ||
| 2194 | |||
| 2195 | /* Reuse ptrace permission checks for now. */ | 2204 | /* Reuse ptrace permission checks for now. */ |
| 2196 | err = -EACCES; | 2205 | err = -EACCES; |
| 2197 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 2206 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
| @@ -2212,14 +2221,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) | |||
| 2212 | unsigned long flags; | 2221 | unsigned long flags; |
| 2213 | int ctxn, err; | 2222 | int ctxn, err; |
| 2214 | 2223 | ||
| 2215 | if (!task && cpu != -1) { | 2224 | if (!task) { |
| 2216 | /* Must be root to operate on a CPU event: */ | 2225 | /* Must be root to operate on a CPU event: */ |
| 2217 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 2226 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
| 2218 | return ERR_PTR(-EACCES); | 2227 | return ERR_PTR(-EACCES); |
| 2219 | 2228 | ||
| 2220 | if (cpu < 0 || cpu >= nr_cpumask_bits) | ||
| 2221 | return ERR_PTR(-EINVAL); | ||
| 2222 | |||
| 2223 | /* | 2229 | /* |
| 2224 | * We could be clever and allow to attach a event to an | 2230 | * We could be clever and allow to attach a event to an |
| 2225 | * offline CPU and activate it when the CPU comes up, but | 2231 | * offline CPU and activate it when the CPU comes up, but |
| @@ -2255,14 +2261,27 @@ retry: | |||
| 2255 | 2261 | ||
| 2256 | get_ctx(ctx); | 2262 | get_ctx(ctx); |
| 2257 | 2263 | ||
| 2258 | if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { | 2264 | err = 0; |
| 2259 | /* | 2265 | mutex_lock(&task->perf_event_mutex); |
| 2260 | * We raced with some other task; use | 2266 | /* |
| 2261 | * the context they set. | 2267 | * If it has already passed perf_event_exit_task(). |
| 2262 | */ | 2268 | * we must see PF_EXITING, it takes this mutex too. |
| 2269 | */ | ||
| 2270 | if (task->flags & PF_EXITING) | ||
| 2271 | err = -ESRCH; | ||
| 2272 | else if (task->perf_event_ctxp[ctxn]) | ||
| 2273 | err = -EAGAIN; | ||
| 2274 | else | ||
| 2275 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); | ||
| 2276 | mutex_unlock(&task->perf_event_mutex); | ||
| 2277 | |||
| 2278 | if (unlikely(err)) { | ||
| 2263 | put_task_struct(task); | 2279 | put_task_struct(task); |
| 2264 | kfree(ctx); | 2280 | kfree(ctx); |
| 2265 | goto retry; | 2281 | |
| 2282 | if (err == -EAGAIN) | ||
| 2283 | goto retry; | ||
| 2284 | goto errout; | ||
| 2266 | } | 2285 | } |
| 2267 | } | 2286 | } |
| 2268 | 2287 | ||
| @@ -3893,7 +3912,7 @@ static int perf_event_task_match(struct perf_event *event) | |||
| 3893 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 3912 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 3894 | return 0; | 3913 | return 0; |
| 3895 | 3914 | ||
| 3896 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3915 | if (!event_filter_match(event)) |
| 3897 | return 0; | 3916 | return 0; |
| 3898 | 3917 | ||
| 3899 | if (event->attr.comm || event->attr.mmap || | 3918 | if (event->attr.comm || event->attr.mmap || |
| @@ -4030,7 +4049,7 @@ static int perf_event_comm_match(struct perf_event *event) | |||
| 4030 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 4049 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 4031 | return 0; | 4050 | return 0; |
| 4032 | 4051 | ||
| 4033 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 4052 | if (!event_filter_match(event)) |
| 4034 | return 0; | 4053 | return 0; |
| 4035 | 4054 | ||
| 4036 | if (event->attr.comm) | 4055 | if (event->attr.comm) |
| @@ -4178,7 +4197,7 @@ static int perf_event_mmap_match(struct perf_event *event, | |||
| 4178 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 4197 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 4179 | return 0; | 4198 | return 0; |
| 4180 | 4199 | ||
| 4181 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 4200 | if (!event_filter_match(event)) |
| 4182 | return 0; | 4201 | return 0; |
| 4183 | 4202 | ||
| 4184 | if ((!executable && event->attr.mmap_data) || | 4203 | if ((!executable && event->attr.mmap_data) || |
| @@ -4648,7 +4667,7 @@ int perf_swevent_get_recursion_context(void) | |||
| 4648 | } | 4667 | } |
| 4649 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | 4668 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); |
| 4650 | 4669 | ||
| 4651 | void inline perf_swevent_put_recursion_context(int rctx) | 4670 | inline void perf_swevent_put_recursion_context(int rctx) |
| 4652 | { | 4671 | { |
| 4653 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 4672 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); |
| 4654 | 4673 | ||
| @@ -5361,6 +5380,8 @@ free_dev: | |||
| 5361 | goto out; | 5380 | goto out; |
| 5362 | } | 5381 | } |
| 5363 | 5382 | ||
| 5383 | static struct lock_class_key cpuctx_mutex; | ||
| 5384 | |||
| 5364 | int perf_pmu_register(struct pmu *pmu, char *name, int type) | 5385 | int perf_pmu_register(struct pmu *pmu, char *name, int type) |
| 5365 | { | 5386 | { |
| 5366 | int cpu, ret; | 5387 | int cpu, ret; |
| @@ -5409,6 +5430,7 @@ skip_type: | |||
| 5409 | 5430 | ||
| 5410 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 5431 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 5411 | __perf_event_init_context(&cpuctx->ctx); | 5432 | __perf_event_init_context(&cpuctx->ctx); |
| 5433 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); | ||
| 5412 | cpuctx->ctx.type = cpu_context; | 5434 | cpuctx->ctx.type = cpu_context; |
| 5413 | cpuctx->ctx.pmu = pmu; | 5435 | cpuctx->ctx.pmu = pmu; |
| 5414 | cpuctx->jiffies_interval = 1; | 5436 | cpuctx->jiffies_interval = 1; |
| @@ -5525,6 +5547,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
| 5525 | struct hw_perf_event *hwc; | 5547 | struct hw_perf_event *hwc; |
| 5526 | long err; | 5548 | long err; |
| 5527 | 5549 | ||
| 5550 | if ((unsigned)cpu >= nr_cpu_ids) { | ||
| 5551 | if (!task || cpu != -1) | ||
| 5552 | return ERR_PTR(-EINVAL); | ||
| 5553 | } | ||
| 5554 | |||
| 5528 | event = kzalloc(sizeof(*event), GFP_KERNEL); | 5555 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 5529 | if (!event) | 5556 | if (!event) |
| 5530 | return ERR_PTR(-ENOMEM); | 5557 | return ERR_PTR(-ENOMEM); |
| @@ -5573,7 +5600,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
| 5573 | 5600 | ||
| 5574 | if (!overflow_handler && parent_event) | 5601 | if (!overflow_handler && parent_event) |
| 5575 | overflow_handler = parent_event->overflow_handler; | 5602 | overflow_handler = parent_event->overflow_handler; |
| 5576 | 5603 | ||
| 5577 | event->overflow_handler = overflow_handler; | 5604 | event->overflow_handler = overflow_handler; |
| 5578 | 5605 | ||
| 5579 | if (attr->disabled) | 5606 | if (attr->disabled) |
| @@ -6109,7 +6136,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
| 6109 | * scheduled, so we are now safe from rescheduling changing | 6136 | * scheduled, so we are now safe from rescheduling changing |
| 6110 | * our context. | 6137 | * our context. |
| 6111 | */ | 6138 | */ |
| 6112 | child_ctx = child->perf_event_ctxp[ctxn]; | 6139 | child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); |
| 6113 | task_ctx_sched_out(child_ctx, EVENT_ALL); | 6140 | task_ctx_sched_out(child_ctx, EVENT_ALL); |
| 6114 | 6141 | ||
| 6115 | /* | 6142 | /* |
| @@ -6422,11 +6449,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6422 | unsigned long flags; | 6449 | unsigned long flags; |
| 6423 | int ret = 0; | 6450 | int ret = 0; |
| 6424 | 6451 | ||
| 6425 | child->perf_event_ctxp[ctxn] = NULL; | ||
| 6426 | |||
| 6427 | mutex_init(&child->perf_event_mutex); | ||
| 6428 | INIT_LIST_HEAD(&child->perf_event_list); | ||
| 6429 | |||
| 6430 | if (likely(!parent->perf_event_ctxp[ctxn])) | 6452 | if (likely(!parent->perf_event_ctxp[ctxn])) |
| 6431 | return 0; | 6453 | return 0; |
| 6432 | 6454 | ||
| @@ -6478,7 +6500,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6478 | 6500 | ||
| 6479 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 6501 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| 6480 | parent_ctx->rotate_disable = 0; | 6502 | parent_ctx->rotate_disable = 0; |
| 6481 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
| 6482 | 6503 | ||
| 6483 | child_ctx = child->perf_event_ctxp[ctxn]; | 6504 | child_ctx = child->perf_event_ctxp[ctxn]; |
| 6484 | 6505 | ||
| @@ -6486,12 +6507,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6486 | /* | 6507 | /* |
| 6487 | * Mark the child context as a clone of the parent | 6508 | * Mark the child context as a clone of the parent |
| 6488 | * context, or of whatever the parent is a clone of. | 6509 | * context, or of whatever the parent is a clone of. |
| 6489 | * Note that if the parent is a clone, it could get | 6510 | * |
| 6490 | * uncloned at any point, but that doesn't matter | 6511 | * Note that if the parent is a clone, the holding of |
| 6491 | * because the list of events and the generation | 6512 | * parent_ctx->lock avoids it from being uncloned. |
| 6492 | * count can't have changed since we took the mutex. | ||
| 6493 | */ | 6513 | */ |
| 6494 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | 6514 | cloned_ctx = parent_ctx->parent_ctx; |
| 6495 | if (cloned_ctx) { | 6515 | if (cloned_ctx) { |
| 6496 | child_ctx->parent_ctx = cloned_ctx; | 6516 | child_ctx->parent_ctx = cloned_ctx; |
| 6497 | child_ctx->parent_gen = parent_ctx->parent_gen; | 6517 | child_ctx->parent_gen = parent_ctx->parent_gen; |
| @@ -6502,6 +6522,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6502 | get_ctx(child_ctx->parent_ctx); | 6522 | get_ctx(child_ctx->parent_ctx); |
| 6503 | } | 6523 | } |
| 6504 | 6524 | ||
| 6525 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
| 6505 | mutex_unlock(&parent_ctx->mutex); | 6526 | mutex_unlock(&parent_ctx->mutex); |
| 6506 | 6527 | ||
| 6507 | perf_unpin_context(parent_ctx); | 6528 | perf_unpin_context(parent_ctx); |
| @@ -6516,6 +6537,10 @@ int perf_event_init_task(struct task_struct *child) | |||
| 6516 | { | 6537 | { |
| 6517 | int ctxn, ret; | 6538 | int ctxn, ret; |
| 6518 | 6539 | ||
| 6540 | memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); | ||
| 6541 | mutex_init(&child->perf_event_mutex); | ||
| 6542 | INIT_LIST_HEAD(&child->perf_event_list); | ||
| 6543 | |||
| 6519 | for_each_task_context_nr(ctxn) { | 6544 | for_each_task_context_nr(ctxn) { |
| 6520 | ret = perf_event_init_context(child, ctxn); | 6545 | ret = perf_event_init_context(child, ctxn); |
| 6521 | if (ret) | 6546 | if (ret) |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index a5aff3ebad38..265729966ece 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -100,13 +100,9 @@ config PM_SLEEP_ADVANCED_DEBUG | |||
| 100 | depends on PM_ADVANCED_DEBUG | 100 | depends on PM_ADVANCED_DEBUG |
| 101 | default n | 101 | default n |
| 102 | 102 | ||
| 103 | config SUSPEND_NVS | ||
| 104 | bool | ||
| 105 | |||
| 106 | config SUSPEND | 103 | config SUSPEND |
| 107 | bool "Suspend to RAM and standby" | 104 | bool "Suspend to RAM and standby" |
| 108 | depends on PM && ARCH_SUSPEND_POSSIBLE | 105 | depends on PM && ARCH_SUSPEND_POSSIBLE |
| 109 | select SUSPEND_NVS if HAS_IOMEM | ||
| 110 | default y | 106 | default y |
| 111 | ---help--- | 107 | ---help--- |
| 112 | Allow the system to enter sleep states in which main memory is | 108 | Allow the system to enter sleep states in which main memory is |
| @@ -140,7 +136,6 @@ config HIBERNATION | |||
| 140 | depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE | 136 | depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE |
| 141 | select LZO_COMPRESS | 137 | select LZO_COMPRESS |
| 142 | select LZO_DECOMPRESS | 138 | select LZO_DECOMPRESS |
| 143 | select SUSPEND_NVS if HAS_IOMEM | ||
| 144 | ---help--- | 139 | ---help--- |
| 145 | Enable the suspend to disk (STD) functionality, which is usually | 140 | Enable the suspend to disk (STD) functionality, which is usually |
| 146 | called "hibernation" in user interfaces. STD checkpoints the | 141 | called "hibernation" in user interfaces. STD checkpoints the |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index f9063c6b185d..c350e18b53e3 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
| @@ -1,7 +1,4 @@ | |||
| 1 | 1 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG | |
| 2 | ifeq ($(CONFIG_PM_DEBUG),y) | ||
| 3 | EXTRA_CFLAGS += -DDEBUG | ||
| 4 | endif | ||
| 5 | 2 | ||
| 6 | obj-$(CONFIG_PM) += main.o | 3 | obj-$(CONFIG_PM) += main.o |
| 7 | obj-$(CONFIG_PM_SLEEP) += console.o | 4 | obj-$(CONFIG_PM_SLEEP) += console.o |
| @@ -10,6 +7,5 @@ obj-$(CONFIG_SUSPEND) += suspend.o | |||
| 10 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | 7 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
| 11 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ | 8 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ |
| 12 | block_io.o | 9 | block_io.o |
| 13 | obj-$(CONFIG_SUSPEND_NVS) += nvs.o | ||
| 14 | 10 | ||
| 15 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 11 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 048d0b514831..1832bd264219 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -51,18 +51,18 @@ enum { | |||
| 51 | 51 | ||
| 52 | static int hibernation_mode = HIBERNATION_SHUTDOWN; | 52 | static int hibernation_mode = HIBERNATION_SHUTDOWN; |
| 53 | 53 | ||
| 54 | static struct platform_hibernation_ops *hibernation_ops; | 54 | static const struct platform_hibernation_ops *hibernation_ops; |
| 55 | 55 | ||
| 56 | /** | 56 | /** |
| 57 | * hibernation_set_ops - set the global hibernate operations | 57 | * hibernation_set_ops - set the global hibernate operations |
| 58 | * @ops: the hibernation operations to use in subsequent hibernation transitions | 58 | * @ops: the hibernation operations to use in subsequent hibernation transitions |
| 59 | */ | 59 | */ |
| 60 | 60 | ||
| 61 | void hibernation_set_ops(struct platform_hibernation_ops *ops) | 61 | void hibernation_set_ops(const struct platform_hibernation_ops *ops) |
| 62 | { | 62 | { |
| 63 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot | 63 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot |
| 64 | && ops->prepare && ops->finish && ops->enter && ops->pre_restore | 64 | && ops->prepare && ops->finish && ops->enter && ops->pre_restore |
| 65 | && ops->restore_cleanup)) { | 65 | && ops->restore_cleanup && ops->leave)) { |
| 66 | WARN_ON(1); | 66 | WARN_ON(1); |
| 67 | return; | 67 | return; |
| 68 | } | 68 | } |
| @@ -278,7 +278,7 @@ static int create_image(int platform_mode) | |||
| 278 | goto Enable_irqs; | 278 | goto Enable_irqs; |
| 279 | } | 279 | } |
| 280 | 280 | ||
| 281 | if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) | 281 | if (hibernation_test(TEST_CORE) || pm_wakeup_pending()) |
| 282 | goto Power_up; | 282 | goto Power_up; |
| 283 | 283 | ||
| 284 | in_suspend = 1; | 284 | in_suspend = 1; |
| @@ -516,7 +516,7 @@ int hibernation_platform_enter(void) | |||
| 516 | 516 | ||
| 517 | local_irq_disable(); | 517 | local_irq_disable(); |
| 518 | sysdev_suspend(PMSG_HIBERNATE); | 518 | sysdev_suspend(PMSG_HIBERNATE); |
| 519 | if (!pm_check_wakeup_events()) { | 519 | if (pm_wakeup_pending()) { |
| 520 | error = -EAGAIN; | 520 | error = -EAGAIN; |
| 521 | goto Power_up; | 521 | goto Power_up; |
| 522 | } | 522 | } |
| @@ -647,6 +647,7 @@ int hibernate(void) | |||
| 647 | swsusp_free(); | 647 | swsusp_free(); |
| 648 | if (!error) | 648 | if (!error) |
| 649 | power_down(); | 649 | power_down(); |
| 650 | in_suspend = 0; | ||
| 650 | pm_restore_gfp_mask(); | 651 | pm_restore_gfp_mask(); |
| 651 | } else { | 652 | } else { |
| 652 | pr_debug("PM: Image restored successfully.\n"); | 653 | pr_debug("PM: Image restored successfully.\n"); |
diff --git a/kernel/power/nvs.c b/kernel/power/nvs.c deleted file mode 100644 index 1836db60bbb6..000000000000 --- a/kernel/power/nvs.c +++ /dev/null | |||
| @@ -1,136 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/io.h> | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/list.h> | ||
| 12 | #include <linux/mm.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/suspend.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Platforms, like ACPI, may want us to save some memory used by them during | ||
| 18 | * suspend and to restore the contents of this memory during the subsequent | ||
| 19 | * resume. The code below implements a mechanism allowing us to do that. | ||
| 20 | */ | ||
| 21 | |||
| 22 | struct nvs_page { | ||
| 23 | unsigned long phys_start; | ||
| 24 | unsigned int size; | ||
| 25 | void *kaddr; | ||
| 26 | void *data; | ||
| 27 | struct list_head node; | ||
| 28 | }; | ||
| 29 | |||
| 30 | static LIST_HEAD(nvs_list); | ||
| 31 | |||
| 32 | /** | ||
| 33 | * suspend_nvs_register - register platform NVS memory region to save | ||
| 34 | * @start - physical address of the region | ||
| 35 | * @size - size of the region | ||
| 36 | * | ||
| 37 | * The NVS region need not be page-aligned (both ends) and we arrange | ||
| 38 | * things so that the data from page-aligned addresses in this region will | ||
| 39 | * be copied into separate RAM pages. | ||
| 40 | */ | ||
| 41 | int suspend_nvs_register(unsigned long start, unsigned long size) | ||
| 42 | { | ||
| 43 | struct nvs_page *entry, *next; | ||
| 44 | |||
| 45 | while (size > 0) { | ||
| 46 | unsigned int nr_bytes; | ||
| 47 | |||
| 48 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); | ||
| 49 | if (!entry) | ||
| 50 | goto Error; | ||
| 51 | |||
| 52 | list_add_tail(&entry->node, &nvs_list); | ||
| 53 | entry->phys_start = start; | ||
| 54 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); | ||
| 55 | entry->size = (size < nr_bytes) ? size : nr_bytes; | ||
| 56 | |||
| 57 | start += entry->size; | ||
| 58 | size -= entry->size; | ||
| 59 | } | ||
| 60 | return 0; | ||
| 61 | |||
| 62 | Error: | ||
| 63 | list_for_each_entry_safe(entry, next, &nvs_list, node) { | ||
| 64 | list_del(&entry->node); | ||
| 65 | kfree(entry); | ||
| 66 | } | ||
| 67 | return -ENOMEM; | ||
| 68 | } | ||
| 69 | |||
| 70 | /** | ||
| 71 | * suspend_nvs_free - free data pages allocated for saving NVS regions | ||
| 72 | */ | ||
| 73 | void suspend_nvs_free(void) | ||
| 74 | { | ||
| 75 | struct nvs_page *entry; | ||
| 76 | |||
| 77 | list_for_each_entry(entry, &nvs_list, node) | ||
| 78 | if (entry->data) { | ||
| 79 | free_page((unsigned long)entry->data); | ||
| 80 | entry->data = NULL; | ||
| 81 | if (entry->kaddr) { | ||
| 82 | iounmap(entry->kaddr); | ||
| 83 | entry->kaddr = NULL; | ||
| 84 | } | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | /** | ||
| 89 | * suspend_nvs_alloc - allocate memory necessary for saving NVS regions | ||
| 90 | */ | ||
| 91 | int suspend_nvs_alloc(void) | ||
| 92 | { | ||
| 93 | struct nvs_page *entry; | ||
| 94 | |||
| 95 | list_for_each_entry(entry, &nvs_list, node) { | ||
| 96 | entry->data = (void *)__get_free_page(GFP_KERNEL); | ||
| 97 | if (!entry->data) { | ||
| 98 | suspend_nvs_free(); | ||
| 99 | return -ENOMEM; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | /** | ||
| 106 | * suspend_nvs_save - save NVS memory regions | ||
| 107 | */ | ||
| 108 | void suspend_nvs_save(void) | ||
| 109 | { | ||
| 110 | struct nvs_page *entry; | ||
| 111 | |||
| 112 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); | ||
| 113 | |||
| 114 | list_for_each_entry(entry, &nvs_list, node) | ||
| 115 | if (entry->data) { | ||
| 116 | entry->kaddr = ioremap(entry->phys_start, entry->size); | ||
| 117 | memcpy(entry->data, entry->kaddr, entry->size); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | /** | ||
| 122 | * suspend_nvs_restore - restore NVS memory regions | ||
| 123 | * | ||
| 124 | * This function is going to be called with interrupts disabled, so it | ||
| 125 | * cannot iounmap the virtual addresses used to access the NVS region. | ||
| 126 | */ | ||
| 127 | void suspend_nvs_restore(void) | ||
| 128 | { | ||
| 129 | struct nvs_page *entry; | ||
| 130 | |||
| 131 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); | ||
| 132 | |||
| 133 | list_for_each_entry(entry, &nvs_list, node) | ||
| 134 | if (entry->data) | ||
| 135 | memcpy(entry->kaddr, entry->data, entry->size); | ||
| 136 | } | ||
diff --git a/kernel/power/process.c b/kernel/power/process.c index e50b4c1b2a0f..d6d2a10320e0 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
| @@ -64,6 +64,12 @@ static int try_to_freeze_tasks(bool sig_only) | |||
| 64 | * perturb a task in TASK_STOPPED or TASK_TRACED. | 64 | * perturb a task in TASK_STOPPED or TASK_TRACED. |
| 65 | * It is "frozen enough". If the task does wake | 65 | * It is "frozen enough". If the task does wake |
| 66 | * up, it will immediately call try_to_freeze. | 66 | * up, it will immediately call try_to_freeze. |
| 67 | * | ||
| 68 | * Because freeze_task() goes through p's | ||
| 69 | * scheduler lock after setting TIF_FREEZE, it's | ||
| 70 | * guaranteed that either we see TASK_RUNNING or | ||
| 71 | * try_to_stop() after schedule() in ptrace/signal | ||
| 72 | * stop sees TIF_FREEZE. | ||
| 67 | */ | 73 | */ |
| 68 | if (!task_is_stopped_or_traced(p) && | 74 | if (!task_is_stopped_or_traced(p) && |
| 69 | !freezer_should_skip(p)) | 75 | !freezer_should_skip(p)) |
| @@ -79,7 +85,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
| 79 | if (!todo || time_after(jiffies, end_time)) | 85 | if (!todo || time_after(jiffies, end_time)) |
| 80 | break; | 86 | break; |
| 81 | 87 | ||
| 82 | if (!pm_check_wakeup_events()) { | 88 | if (pm_wakeup_pending()) { |
| 83 | wakeup = true; | 89 | wakeup = true; |
| 84 | break; | 90 | break; |
| 85 | } | 91 | } |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 031d5e3a6197..de6f86bfa303 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -31,13 +31,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = { | |||
| 31 | [PM_SUSPEND_MEM] = "mem", | 31 | [PM_SUSPEND_MEM] = "mem", |
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | static struct platform_suspend_ops *suspend_ops; | 34 | static const struct platform_suspend_ops *suspend_ops; |
| 35 | 35 | ||
| 36 | /** | 36 | /** |
| 37 | * suspend_set_ops - Set the global suspend method table. | 37 | * suspend_set_ops - Set the global suspend method table. |
| 38 | * @ops: Pointer to ops structure. | 38 | * @ops: Pointer to ops structure. |
| 39 | */ | 39 | */ |
| 40 | void suspend_set_ops(struct platform_suspend_ops *ops) | 40 | void suspend_set_ops(const struct platform_suspend_ops *ops) |
| 41 | { | 41 | { |
| 42 | mutex_lock(&pm_mutex); | 42 | mutex_lock(&pm_mutex); |
| 43 | suspend_ops = ops; | 43 | suspend_ops = ops; |
| @@ -164,7 +164,7 @@ static int suspend_enter(suspend_state_t state) | |||
| 164 | 164 | ||
| 165 | error = sysdev_suspend(PMSG_SUSPEND); | 165 | error = sysdev_suspend(PMSG_SUSPEND); |
| 166 | if (!error) { | 166 | if (!error) { |
| 167 | if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { | 167 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { |
| 168 | error = suspend_ops->enter(state); | 168 | error = suspend_ops->enter(state); |
| 169 | events_check_enabled = false; | 169 | events_check_enabled = false; |
| 170 | } | 170 | } |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8c7e4832b9be..7c97c3a0eee3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -224,7 +224,7 @@ static int swsusp_swap_check(void) | |||
| 224 | return res; | 224 | return res; |
| 225 | 225 | ||
| 226 | root_swap = res; | 226 | root_swap = res; |
| 227 | res = blkdev_get(hib_resume_bdev, FMODE_WRITE); | 227 | res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); |
| 228 | if (res) | 228 | if (res) |
| 229 | return res; | 229 | return res; |
| 230 | 230 | ||
| @@ -888,7 +888,7 @@ out_finish: | |||
| 888 | /** | 888 | /** |
| 889 | * swsusp_read - read the hibernation image. | 889 | * swsusp_read - read the hibernation image. |
| 890 | * @flags_p: flags passed by the "frozen" kernel in the image header should | 890 | * @flags_p: flags passed by the "frozen" kernel in the image header should |
| 891 | * be written into this memeory location | 891 | * be written into this memory location |
| 892 | */ | 892 | */ |
| 893 | 893 | ||
| 894 | int swsusp_read(unsigned int *flags_p) | 894 | int swsusp_read(unsigned int *flags_p) |
| @@ -930,7 +930,8 @@ int swsusp_check(void) | |||
| 930 | { | 930 | { |
| 931 | int error; | 931 | int error; |
| 932 | 932 | ||
| 933 | hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); | 933 | hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, |
| 934 | FMODE_READ, NULL); | ||
| 934 | if (!IS_ERR(hib_resume_bdev)) { | 935 | if (!IS_ERR(hib_resume_bdev)) { |
| 935 | set_blocksize(hib_resume_bdev, PAGE_SIZE); | 936 | set_blocksize(hib_resume_bdev, PAGE_SIZE); |
| 936 | clear_page(swsusp_header); | 937 | clear_page(swsusp_header); |
diff --git a/kernel/printk.c b/kernel/printk.c index 4642a5c439eb..53d9a9ec88e6 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/syslog.h> | 39 | #include <linux/syslog.h> |
| 40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
| 41 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
| 42 | #include <linux/rculist.h> | ||
| 42 | 43 | ||
| 43 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
| 44 | 45 | ||
| @@ -273,12 +274,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
| 273 | * at open time. | 274 | * at open time. |
| 274 | */ | 275 | */ |
| 275 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | 276 | if (type == SYSLOG_ACTION_OPEN || !from_file) { |
| 276 | if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) | 277 | if (dmesg_restrict && !capable(CAP_SYSLOG)) |
| 277 | return -EPERM; | 278 | goto warn; /* switch to return -EPERM after 2.6.39 */ |
| 278 | if ((type != SYSLOG_ACTION_READ_ALL && | 279 | if ((type != SYSLOG_ACTION_READ_ALL && |
| 279 | type != SYSLOG_ACTION_SIZE_BUFFER) && | 280 | type != SYSLOG_ACTION_SIZE_BUFFER) && |
| 280 | !capable(CAP_SYS_ADMIN)) | 281 | !capable(CAP_SYSLOG)) |
| 281 | return -EPERM; | 282 | goto warn; /* switch to return -EPERM after 2.6.39 */ |
| 282 | } | 283 | } |
| 283 | 284 | ||
| 284 | error = security_syslog(type); | 285 | error = security_syslog(type); |
| @@ -422,6 +423,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
| 422 | } | 423 | } |
| 423 | out: | 424 | out: |
| 424 | return error; | 425 | return error; |
| 426 | warn: | ||
| 427 | /* remove after 2.6.39 */ | ||
| 428 | if (capable(CAP_SYS_ADMIN)) | ||
| 429 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
| 430 | "but no CAP_SYSLOG (deprecated and denied).\n"); | ||
| 431 | return -EPERM; | ||
| 425 | } | 432 | } |
| 426 | 433 | ||
| 427 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) | 434 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
| @@ -1496,7 +1503,7 @@ int kmsg_dump_register(struct kmsg_dumper *dumper) | |||
| 1496 | /* Don't allow registering multiple times */ | 1503 | /* Don't allow registering multiple times */ |
| 1497 | if (!dumper->registered) { | 1504 | if (!dumper->registered) { |
| 1498 | dumper->registered = 1; | 1505 | dumper->registered = 1; |
| 1499 | list_add_tail(&dumper->list, &dump_list); | 1506 | list_add_tail_rcu(&dumper->list, &dump_list); |
| 1500 | err = 0; | 1507 | err = 0; |
| 1501 | } | 1508 | } |
| 1502 | spin_unlock_irqrestore(&dump_list_lock, flags); | 1509 | spin_unlock_irqrestore(&dump_list_lock, flags); |
| @@ -1520,29 +1527,16 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper) | |||
| 1520 | spin_lock_irqsave(&dump_list_lock, flags); | 1527 | spin_lock_irqsave(&dump_list_lock, flags); |
| 1521 | if (dumper->registered) { | 1528 | if (dumper->registered) { |
| 1522 | dumper->registered = 0; | 1529 | dumper->registered = 0; |
| 1523 | list_del(&dumper->list); | 1530 | list_del_rcu(&dumper->list); |
| 1524 | err = 0; | 1531 | err = 0; |
| 1525 | } | 1532 | } |
| 1526 | spin_unlock_irqrestore(&dump_list_lock, flags); | 1533 | spin_unlock_irqrestore(&dump_list_lock, flags); |
| 1534 | synchronize_rcu(); | ||
| 1527 | 1535 | ||
| 1528 | return err; | 1536 | return err; |
| 1529 | } | 1537 | } |
| 1530 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); | 1538 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); |
| 1531 | 1539 | ||
| 1532 | static const char * const kmsg_reasons[] = { | ||
| 1533 | [KMSG_DUMP_OOPS] = "oops", | ||
| 1534 | [KMSG_DUMP_PANIC] = "panic", | ||
| 1535 | [KMSG_DUMP_KEXEC] = "kexec", | ||
| 1536 | }; | ||
| 1537 | |||
| 1538 | static const char *kmsg_to_str(enum kmsg_dump_reason reason) | ||
| 1539 | { | ||
| 1540 | if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0) | ||
| 1541 | return "unknown"; | ||
| 1542 | |||
| 1543 | return kmsg_reasons[reason]; | ||
| 1544 | } | ||
| 1545 | |||
| 1546 | /** | 1540 | /** |
| 1547 | * kmsg_dump - dump kernel log to kernel message dumpers. | 1541 | * kmsg_dump - dump kernel log to kernel message dumpers. |
| 1548 | * @reason: the reason (oops, panic etc) for dumping | 1542 | * @reason: the reason (oops, panic etc) for dumping |
| @@ -1581,13 +1575,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) | |||
| 1581 | l2 = chars; | 1575 | l2 = chars; |
| 1582 | } | 1576 | } |
| 1583 | 1577 | ||
| 1584 | if (!spin_trylock_irqsave(&dump_list_lock, flags)) { | 1578 | rcu_read_lock(); |
| 1585 | printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n", | 1579 | list_for_each_entry_rcu(dumper, &dump_list, list) |
| 1586 | kmsg_to_str(reason)); | ||
| 1587 | return; | ||
| 1588 | } | ||
| 1589 | list_for_each_entry(dumper, &dump_list, list) | ||
| 1590 | dumper->dump(dumper, reason, s1, l1, s2, l2); | 1580 | dumper->dump(dumper, reason, s1, l1, s2, l2); |
| 1591 | spin_unlock_irqrestore(&dump_list_lock, flags); | 1581 | rcu_read_unlock(); |
| 1592 | } | 1582 | } |
| 1593 | #endif | 1583 | #endif |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 034493724749..0c343b9a46d5 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
| @@ -189,7 +189,8 @@ static int rcu_kthread(void *arg) | |||
| 189 | unsigned long flags; | 189 | unsigned long flags; |
| 190 | 190 | ||
| 191 | for (;;) { | 191 | for (;;) { |
| 192 | wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0); | 192 | wait_event_interruptible(rcu_kthread_wq, |
| 193 | have_rcu_kthread_work != 0); | ||
| 193 | morework = rcu_boost(); | 194 | morework = rcu_boost(); |
| 194 | local_irq_save(flags); | 195 | local_irq_save(flags); |
| 195 | work = have_rcu_kthread_work; | 196 | work = have_rcu_kthread_work; |
diff --git a/kernel/sched.c b/kernel/sched.c index a0eb0941fa84..18d38e4ec7ba 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -553,9 +553,6 @@ struct rq { | |||
| 553 | /* try_to_wake_up() stats */ | 553 | /* try_to_wake_up() stats */ |
| 554 | unsigned int ttwu_count; | 554 | unsigned int ttwu_count; |
| 555 | unsigned int ttwu_local; | 555 | unsigned int ttwu_local; |
| 556 | |||
| 557 | /* BKL stats */ | ||
| 558 | unsigned int bkl_count; | ||
| 559 | #endif | 556 | #endif |
| 560 | }; | 557 | }; |
| 561 | 558 | ||
| @@ -609,6 +606,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
| 609 | struct task_group *tg; | 606 | struct task_group *tg; |
| 610 | struct cgroup_subsys_state *css; | 607 | struct cgroup_subsys_state *css; |
| 611 | 608 | ||
| 609 | if (p->flags & PF_EXITING) | ||
| 610 | return &root_task_group; | ||
| 611 | |||
| 612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | 612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
| 613 | lockdep_is_held(&task_rq(p)->lock)); | 613 | lockdep_is_held(&task_rq(p)->lock)); |
| 614 | tg = container_of(css, struct task_group, css); | 614 | tg = container_of(css, struct task_group, css); |
| @@ -2505,7 +2505,7 @@ out: | |||
| 2505 | * try_to_wake_up_local - try to wake up a local task with rq lock held | 2505 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
| 2506 | * @p: the thread to be awakened | 2506 | * @p: the thread to be awakened |
| 2507 | * | 2507 | * |
| 2508 | * Put @p on the run-queue if it's not alredy there. The caller must | 2508 | * Put @p on the run-queue if it's not already there. The caller must |
| 2509 | * ensure that this_rq() is locked, @p is bound to this_rq() and not | 2509 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
| 2510 | * the current task. this_rq() stays locked over invocation. | 2510 | * the current task. this_rq() stays locked over invocation. |
| 2511 | */ | 2511 | */ |
| @@ -3887,7 +3887,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
| 3887 | schedstat_inc(this_rq(), sched_count); | 3887 | schedstat_inc(this_rq(), sched_count); |
| 3888 | #ifdef CONFIG_SCHEDSTATS | 3888 | #ifdef CONFIG_SCHEDSTATS |
| 3889 | if (unlikely(prev->lock_depth >= 0)) { | 3889 | if (unlikely(prev->lock_depth >= 0)) { |
| 3890 | schedstat_inc(this_rq(), bkl_count); | 3890 | schedstat_inc(this_rq(), rq_sched_info.bkl_count); |
| 3891 | schedstat_inc(prev, sched_info.bkl_count); | 3891 | schedstat_inc(prev, sched_info.bkl_count); |
| 3892 | } | 3892 | } |
| 3893 | #endif | 3893 | #endif |
| @@ -4871,7 +4871,8 @@ recheck: | |||
| 4871 | * assigned. | 4871 | * assigned. |
| 4872 | */ | 4872 | */ |
| 4873 | if (rt_bandwidth_enabled() && rt_policy(policy) && | 4873 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
| 4874 | task_group(p)->rt_bandwidth.rt_runtime == 0) { | 4874 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
| 4875 | !task_group_is_autogroup(task_group(p))) { | ||
| 4875 | __task_rq_unlock(rq); | 4876 | __task_rq_unlock(rq); |
| 4876 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | 4877 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4877 | return -EPERM; | 4878 | return -EPERM; |
| @@ -8882,6 +8883,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
| 8882 | } | 8883 | } |
| 8883 | } | 8884 | } |
| 8884 | 8885 | ||
| 8886 | static void | ||
| 8887 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task) | ||
| 8888 | { | ||
| 8889 | /* | ||
| 8890 | * cgroup_exit() is called in the copy_process() failure path. | ||
| 8891 | * Ignore this case since the task hasn't ran yet, this avoids | ||
| 8892 | * trying to poke a half freed task state from generic code. | ||
| 8893 | */ | ||
| 8894 | if (!(task->flags & PF_EXITING)) | ||
| 8895 | return; | ||
| 8896 | |||
| 8897 | sched_move_task(task); | ||
| 8898 | } | ||
| 8899 | |||
| 8885 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8900 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8886 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, | 8901 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
| 8887 | u64 shareval) | 8902 | u64 shareval) |
| @@ -8954,6 +8969,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
| 8954 | .destroy = cpu_cgroup_destroy, | 8969 | .destroy = cpu_cgroup_destroy, |
| 8955 | .can_attach = cpu_cgroup_can_attach, | 8970 | .can_attach = cpu_cgroup_can_attach, |
| 8956 | .attach = cpu_cgroup_attach, | 8971 | .attach = cpu_cgroup_attach, |
| 8972 | .exit = cpu_cgroup_exit, | ||
| 8957 | .populate = cpu_cgroup_populate, | 8973 | .populate = cpu_cgroup_populate, |
| 8958 | .subsys_id = cpu_cgroup_subsys_id, | 8974 | .subsys_id = cpu_cgroup_subsys_id, |
| 8959 | .early_init = 1, | 8975 | .early_init = 1, |
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 32a723b8f84c..9fb656283157 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c | |||
| @@ -27,6 +27,11 @@ static inline void autogroup_destroy(struct kref *kref) | |||
| 27 | { | 27 | { |
| 28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); | 28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 31 | /* We've redirected RT tasks to the root task group... */ | ||
| 32 | ag->tg->rt_se = NULL; | ||
| 33 | ag->tg->rt_rq = NULL; | ||
| 34 | #endif | ||
| 30 | sched_destroy_group(ag->tg); | 35 | sched_destroy_group(ag->tg); |
| 31 | } | 36 | } |
| 32 | 37 | ||
| @@ -55,6 +60,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) | |||
| 55 | return ag; | 60 | return ag; |
| 56 | } | 61 | } |
| 57 | 62 | ||
| 63 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 64 | static void free_rt_sched_group(struct task_group *tg); | ||
| 65 | #endif | ||
| 66 | |||
| 58 | static inline struct autogroup *autogroup_create(void) | 67 | static inline struct autogroup *autogroup_create(void) |
| 59 | { | 68 | { |
| 60 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); | 69 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); |
| @@ -72,6 +81,19 @@ static inline struct autogroup *autogroup_create(void) | |||
| 72 | init_rwsem(&ag->lock); | 81 | init_rwsem(&ag->lock); |
| 73 | ag->id = atomic_inc_return(&autogroup_seq_nr); | 82 | ag->id = atomic_inc_return(&autogroup_seq_nr); |
| 74 | ag->tg = tg; | 83 | ag->tg = tg; |
| 84 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 85 | /* | ||
| 86 | * Autogroup RT tasks are redirected to the root task group | ||
| 87 | * so we don't have to move tasks around upon policy change, | ||
| 88 | * or flail around trying to allocate bandwidth on the fly. | ||
| 89 | * A bandwidth exception in __sched_setscheduler() allows | ||
| 90 | * the policy change to proceed. Thereafter, task_group() | ||
| 91 | * returns &root_task_group, so zero bandwidth is required. | ||
| 92 | */ | ||
| 93 | free_rt_sched_group(tg); | ||
| 94 | tg->rt_se = root_task_group.rt_se; | ||
| 95 | tg->rt_rq = root_task_group.rt_rq; | ||
| 96 | #endif | ||
| 75 | tg->autogroup = ag; | 97 | tg->autogroup = ag; |
| 76 | 98 | ||
| 77 | return ag; | 99 | return ag; |
| @@ -106,6 +128,11 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
| 106 | return true; | 128 | return true; |
| 107 | } | 129 | } |
| 108 | 130 | ||
| 131 | static inline bool task_group_is_autogroup(struct task_group *tg) | ||
| 132 | { | ||
| 133 | return tg != &root_task_group && tg->autogroup; | ||
| 134 | } | ||
| 135 | |||
| 109 | static inline struct task_group * | 136 | static inline struct task_group * |
| 110 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | 137 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
| 111 | { | 138 | { |
| @@ -231,6 +258,11 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) | |||
| 231 | #ifdef CONFIG_SCHED_DEBUG | 258 | #ifdef CONFIG_SCHED_DEBUG |
| 232 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) | 259 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) |
| 233 | { | 260 | { |
| 261 | int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); | ||
| 262 | |||
| 263 | if (!enabled || !tg->autogroup) | ||
| 264 | return 0; | ||
| 265 | |||
| 234 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); | 266 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); |
| 235 | } | 267 | } |
| 236 | #endif /* CONFIG_SCHED_DEBUG */ | 268 | #endif /* CONFIG_SCHED_DEBUG */ |
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h index 5358e241cb20..7b859ffe5dad 100644 --- a/kernel/sched_autogroup.h +++ b/kernel/sched_autogroup.h | |||
| @@ -15,6 +15,10 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg); | |||
| 15 | 15 | ||
| 16 | static inline void autogroup_init(struct task_struct *init_task) { } | 16 | static inline void autogroup_init(struct task_struct *init_task) { } |
| 17 | static inline void autogroup_free(struct task_group *tg) { } | 17 | static inline void autogroup_free(struct task_group *tg) { } |
| 18 | static inline bool task_group_is_autogroup(struct task_group *tg) | ||
| 19 | { | ||
| 20 | return 0; | ||
| 21 | } | ||
| 18 | 22 | ||
| 19 | static inline struct task_group * | 23 | static inline struct task_group * |
| 20 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | 24 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 1dfae3d014b5..eb6cb8edd075 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
| @@ -16,6 +16,8 @@ | |||
| 16 | #include <linux/kallsyms.h> | 16 | #include <linux/kallsyms.h> |
| 17 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
| 18 | 18 | ||
| 19 | static DEFINE_SPINLOCK(sched_debug_lock); | ||
| 20 | |||
| 19 | /* | 21 | /* |
| 20 | * This allows printing both to /proc/sched_debug and | 22 | * This allows printing both to /proc/sched_debug and |
| 21 | * to the console | 23 | * to the console |
| @@ -86,6 +88,26 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group | |||
| 86 | } | 88 | } |
| 87 | #endif | 89 | #endif |
| 88 | 90 | ||
| 91 | #ifdef CONFIG_CGROUP_SCHED | ||
| 92 | static char group_path[PATH_MAX]; | ||
| 93 | |||
| 94 | static char *task_group_path(struct task_group *tg) | ||
| 95 | { | ||
| 96 | if (autogroup_path(tg, group_path, PATH_MAX)) | ||
| 97 | return group_path; | ||
| 98 | |||
| 99 | /* | ||
| 100 | * May be NULL if the underlying cgroup isn't fully-created yet | ||
| 101 | */ | ||
| 102 | if (!tg->css.cgroup) { | ||
| 103 | group_path[0] = '\0'; | ||
| 104 | return group_path; | ||
| 105 | } | ||
| 106 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); | ||
| 107 | return group_path; | ||
| 108 | } | ||
| 109 | #endif | ||
| 110 | |||
| 89 | static void | 111 | static void |
| 90 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 112 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
| 91 | { | 113 | { |
| @@ -108,6 +130,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
| 108 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", | 130 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", |
| 109 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); | 131 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); |
| 110 | #endif | 132 | #endif |
| 133 | #ifdef CONFIG_CGROUP_SCHED | ||
| 134 | SEQ_printf(m, " %s", task_group_path(task_group(p))); | ||
| 135 | #endif | ||
| 111 | 136 | ||
| 112 | SEQ_printf(m, "\n"); | 137 | SEQ_printf(m, "\n"); |
| 113 | } | 138 | } |
| @@ -144,7 +169,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 144 | struct sched_entity *last; | 169 | struct sched_entity *last; |
| 145 | unsigned long flags; | 170 | unsigned long flags; |
| 146 | 171 | ||
| 172 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 173 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); | ||
| 174 | #else | ||
| 147 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); | 175 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); |
| 176 | #endif | ||
| 148 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", | 177 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
| 149 | SPLIT_NS(cfs_rq->exec_clock)); | 178 | SPLIT_NS(cfs_rq->exec_clock)); |
| 150 | 179 | ||
| @@ -191,7 +220,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 191 | 220 | ||
| 192 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | 221 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) |
| 193 | { | 222 | { |
| 223 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 224 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); | ||
| 225 | #else | ||
| 194 | SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); | 226 | SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); |
| 227 | #endif | ||
| 195 | 228 | ||
| 196 | #define P(x) \ | 229 | #define P(x) \ |
| 197 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) | 230 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) |
| @@ -212,6 +245,7 @@ extern __read_mostly int sched_clock_running; | |||
| 212 | static void print_cpu(struct seq_file *m, int cpu) | 245 | static void print_cpu(struct seq_file *m, int cpu) |
| 213 | { | 246 | { |
| 214 | struct rq *rq = cpu_rq(cpu); | 247 | struct rq *rq = cpu_rq(cpu); |
| 248 | unsigned long flags; | ||
| 215 | 249 | ||
| 216 | #ifdef CONFIG_X86 | 250 | #ifdef CONFIG_X86 |
| 217 | { | 251 | { |
| @@ -262,14 +296,20 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
| 262 | P(ttwu_count); | 296 | P(ttwu_count); |
| 263 | P(ttwu_local); | 297 | P(ttwu_local); |
| 264 | 298 | ||
| 265 | P(bkl_count); | 299 | SEQ_printf(m, " .%-30s: %d\n", "bkl_count", |
| 300 | rq->rq_sched_info.bkl_count); | ||
| 266 | 301 | ||
| 267 | #undef P | 302 | #undef P |
| 303 | #undef P64 | ||
| 268 | #endif | 304 | #endif |
| 305 | spin_lock_irqsave(&sched_debug_lock, flags); | ||
| 269 | print_cfs_stats(m, cpu); | 306 | print_cfs_stats(m, cpu); |
| 270 | print_rt_stats(m, cpu); | 307 | print_rt_stats(m, cpu); |
| 271 | 308 | ||
| 309 | rcu_read_lock(); | ||
| 272 | print_rq(m, rq, cpu); | 310 | print_rq(m, rq, cpu); |
| 311 | rcu_read_unlock(); | ||
| 312 | spin_unlock_irqrestore(&sched_debug_lock, flags); | ||
| 273 | } | 313 | } |
| 274 | 314 | ||
| 275 | static const char *sched_tunable_scaling_names[] = { | 315 | static const char *sched_tunable_scaling_names[] = { |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c62ebae65cf0..354769979c02 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 699 | cfs_rq->nr_running--; | 699 | cfs_rq->nr_running--; |
| 700 | } | 700 | } |
| 701 | 701 | ||
| 702 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 702 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 703 | # ifdef CONFIG_SMP | ||
| 703 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | 704 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, |
| 704 | int global_update) | 705 | int global_update) |
| 705 | { | 706 | { |
| @@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
| 762 | list_del_leaf_cfs_rq(cfs_rq); | 763 | list_del_leaf_cfs_rq(cfs_rq); |
| 763 | } | 764 | } |
| 764 | 765 | ||
| 766 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
| 767 | long weight_delta) | ||
| 768 | { | ||
| 769 | long load_weight, load, shares; | ||
| 770 | |||
| 771 | load = cfs_rq->load.weight + weight_delta; | ||
| 772 | |||
| 773 | load_weight = atomic_read(&tg->load_weight); | ||
| 774 | load_weight -= cfs_rq->load_contribution; | ||
| 775 | load_weight += load; | ||
| 776 | |||
| 777 | shares = (tg->shares * load); | ||
| 778 | if (load_weight) | ||
| 779 | shares /= load_weight; | ||
| 780 | |||
| 781 | if (shares < MIN_SHARES) | ||
| 782 | shares = MIN_SHARES; | ||
| 783 | if (shares > tg->shares) | ||
| 784 | shares = tg->shares; | ||
| 785 | |||
| 786 | return shares; | ||
| 787 | } | ||
| 788 | |||
| 789 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
| 790 | { | ||
| 791 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
| 792 | update_cfs_load(cfs_rq, 0); | ||
| 793 | update_cfs_shares(cfs_rq, 0); | ||
| 794 | } | ||
| 795 | } | ||
| 796 | # else /* CONFIG_SMP */ | ||
| 797 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
| 798 | { | ||
| 799 | } | ||
| 800 | |||
| 801 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
| 802 | long weight_delta) | ||
| 803 | { | ||
| 804 | return tg->shares; | ||
| 805 | } | ||
| 806 | |||
| 807 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
| 808 | { | ||
| 809 | } | ||
| 810 | # endif /* CONFIG_SMP */ | ||
| 765 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 811 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, |
| 766 | unsigned long weight) | 812 | unsigned long weight) |
| 767 | { | 813 | { |
| @@ -782,7 +828,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
| 782 | { | 828 | { |
| 783 | struct task_group *tg; | 829 | struct task_group *tg; |
| 784 | struct sched_entity *se; | 830 | struct sched_entity *se; |
| 785 | long load_weight, load, shares; | 831 | long shares; |
| 786 | 832 | ||
| 787 | if (!cfs_rq) | 833 | if (!cfs_rq) |
| 788 | return; | 834 | return; |
| @@ -791,32 +837,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
| 791 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 837 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
| 792 | if (!se) | 838 | if (!se) |
| 793 | return; | 839 | return; |
| 794 | 840 | #ifndef CONFIG_SMP | |
| 795 | load = cfs_rq->load.weight + weight_delta; | 841 | if (likely(se->load.weight == tg->shares)) |
| 796 | 842 | return; | |
| 797 | load_weight = atomic_read(&tg->load_weight); | 843 | #endif |
| 798 | load_weight -= cfs_rq->load_contribution; | 844 | shares = calc_cfs_shares(cfs_rq, tg, weight_delta); |
| 799 | load_weight += load; | ||
| 800 | |||
| 801 | shares = (tg->shares * load); | ||
| 802 | if (load_weight) | ||
| 803 | shares /= load_weight; | ||
| 804 | |||
| 805 | if (shares < MIN_SHARES) | ||
| 806 | shares = MIN_SHARES; | ||
| 807 | if (shares > tg->shares) | ||
| 808 | shares = tg->shares; | ||
| 809 | 845 | ||
| 810 | reweight_entity(cfs_rq_of(se), se, shares); | 846 | reweight_entity(cfs_rq_of(se), se, shares); |
| 811 | } | 847 | } |
| 812 | |||
| 813 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
| 814 | { | ||
| 815 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
| 816 | update_cfs_load(cfs_rq, 0); | ||
| 817 | update_cfs_shares(cfs_rq, 0); | ||
| 818 | } | ||
| 819 | } | ||
| 820 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 848 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
| 821 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 849 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
| 822 | { | 850 | { |
| @@ -1062,6 +1090,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 1062 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 1090 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
| 1063 | s64 delta = curr->vruntime - se->vruntime; | 1091 | s64 delta = curr->vruntime - se->vruntime; |
| 1064 | 1092 | ||
| 1093 | if (delta < 0) | ||
| 1094 | return; | ||
| 1095 | |||
| 1065 | if (delta > ideal_runtime) | 1096 | if (delta > ideal_runtime) |
| 1066 | resched_task(rq_of(cfs_rq)->curr); | 1097 | resched_task(rq_of(cfs_rq)->curr); |
| 1067 | } | 1098 | } |
| @@ -1362,27 +1393,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
| 1362 | return wl; | 1393 | return wl; |
| 1363 | 1394 | ||
| 1364 | for_each_sched_entity(se) { | 1395 | for_each_sched_entity(se) { |
| 1365 | long S, rw, s, a, b; | 1396 | long lw, w; |
| 1366 | 1397 | ||
| 1367 | S = se->my_q->tg->shares; | 1398 | tg = se->my_q->tg; |
| 1368 | s = se->load.weight; | 1399 | w = se->my_q->load.weight; |
| 1369 | rw = se->my_q->load.weight; | ||
| 1370 | 1400 | ||
| 1371 | a = S*(rw + wl); | 1401 | /* use this cpu's instantaneous contribution */ |
| 1372 | b = S*rw + s*wg; | 1402 | lw = atomic_read(&tg->load_weight); |
| 1403 | lw -= se->my_q->load_contribution; | ||
| 1404 | lw += w + wg; | ||
| 1373 | 1405 | ||
| 1374 | wl = s*(a-b); | 1406 | wl += w; |
| 1375 | 1407 | ||
| 1376 | if (likely(b)) | 1408 | if (lw > 0 && wl < lw) |
| 1377 | wl /= b; | 1409 | wl = (wl * tg->shares) / lw; |
| 1410 | else | ||
| 1411 | wl = tg->shares; | ||
| 1378 | 1412 | ||
| 1379 | /* | 1413 | /* zero point is MIN_SHARES */ |
| 1380 | * Assume the group is already running and will | 1414 | if (wl < MIN_SHARES) |
| 1381 | * thus already be accounted for in the weight. | 1415 | wl = MIN_SHARES; |
| 1382 | * | 1416 | wl -= se->load.weight; |
| 1383 | * That is, moving shares between CPUs, does not | ||
| 1384 | * alter the group weight. | ||
| 1385 | */ | ||
| 1386 | wg = 0; | 1417 | wg = 0; |
| 1387 | } | 1418 | } |
| 1388 | 1419 | ||
diff --git a/kernel/smp.c b/kernel/smp.c index 12ed8b013e2d..9910744f0856 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
| 14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
| 15 | 15 | ||
| 16 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
| 16 | static struct { | 17 | static struct { |
| 17 | struct list_head queue; | 18 | struct list_head queue; |
| 18 | raw_spinlock_t lock; | 19 | raw_spinlock_t lock; |
| @@ -193,23 +194,52 @@ void generic_smp_call_function_interrupt(void) | |||
| 193 | */ | 194 | */ |
| 194 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
| 195 | int refs; | 196 | int refs; |
| 197 | void (*func) (void *info); | ||
| 196 | 198 | ||
| 197 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) | 199 | /* |
| 200 | * Since we walk the list without any locks, we might | ||
| 201 | * see an entry that was completed, removed from the | ||
| 202 | * list and is in the process of being reused. | ||
| 203 | * | ||
| 204 | * We must check that the cpu is in the cpumask before | ||
| 205 | * checking the refs, and both must be set before | ||
| 206 | * executing the callback on this cpu. | ||
| 207 | */ | ||
| 208 | |||
| 209 | if (!cpumask_test_cpu(cpu, data->cpumask)) | ||
| 210 | continue; | ||
| 211 | |||
| 212 | smp_rmb(); | ||
| 213 | |||
| 214 | if (atomic_read(&data->refs) == 0) | ||
| 198 | continue; | 215 | continue; |
| 199 | 216 | ||
| 217 | func = data->csd.func; /* for later warn */ | ||
| 200 | data->csd.func(data->csd.info); | 218 | data->csd.func(data->csd.info); |
| 201 | 219 | ||
| 220 | /* | ||
| 221 | * If the cpu mask is not still set then it enabled interrupts, | ||
| 222 | * we took another smp interrupt, and executed the function | ||
| 223 | * twice on this cpu. In theory that copy decremented refs. | ||
| 224 | */ | ||
| 225 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { | ||
| 226 | WARN(1, "%pS enabled interrupts and double executed\n", | ||
| 227 | func); | ||
| 228 | continue; | ||
| 229 | } | ||
| 230 | |||
| 202 | refs = atomic_dec_return(&data->refs); | 231 | refs = atomic_dec_return(&data->refs); |
| 203 | WARN_ON(refs < 0); | 232 | WARN_ON(refs < 0); |
| 204 | if (!refs) { | ||
| 205 | raw_spin_lock(&call_function.lock); | ||
| 206 | list_del_rcu(&data->csd.list); | ||
| 207 | raw_spin_unlock(&call_function.lock); | ||
| 208 | } | ||
| 209 | 233 | ||
| 210 | if (refs) | 234 | if (refs) |
| 211 | continue; | 235 | continue; |
| 212 | 236 | ||
| 237 | WARN_ON(!cpumask_empty(data->cpumask)); | ||
| 238 | |||
| 239 | raw_spin_lock(&call_function.lock); | ||
| 240 | list_del_rcu(&data->csd.list); | ||
| 241 | raw_spin_unlock(&call_function.lock); | ||
| 242 | |||
| 213 | csd_unlock(&data->csd); | 243 | csd_unlock(&data->csd); |
| 214 | } | 244 | } |
| 215 | 245 | ||
| @@ -429,7 +459,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 429 | * can't happen. | 459 | * can't happen. |
| 430 | */ | 460 | */ |
| 431 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 461 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
| 432 | && !oops_in_progress); | 462 | && !oops_in_progress && !early_boot_irqs_disabled); |
| 433 | 463 | ||
| 434 | /* So, what's a CPU they want? Ignoring this one. */ | 464 | /* So, what's a CPU they want? Ignoring this one. */ |
| 435 | cpu = cpumask_first_and(mask, cpu_online_mask); | 465 | cpu = cpumask_first_and(mask, cpu_online_mask); |
| @@ -453,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 453 | 483 | ||
| 454 | data = &__get_cpu_var(cfd_data); | 484 | data = &__get_cpu_var(cfd_data); |
| 455 | csd_lock(&data->csd); | 485 | csd_lock(&data->csd); |
| 486 | BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); | ||
| 456 | 487 | ||
| 457 | data->csd.func = func; | 488 | data->csd.func = func; |
| 458 | data->csd.info = info; | 489 | data->csd.info = info; |
| 459 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 490 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
| 460 | cpumask_clear_cpu(this_cpu, data->cpumask); | 491 | cpumask_clear_cpu(this_cpu, data->cpumask); |
| 492 | |||
| 493 | /* | ||
| 494 | * To ensure the interrupt handler gets an complete view | ||
| 495 | * we order the cpumask and refs writes and order the read | ||
| 496 | * of them in the interrupt handler. In addition we may | ||
| 497 | * only clear our own cpu bit from the mask. | ||
| 498 | */ | ||
| 499 | smp_wmb(); | ||
| 500 | |||
| 461 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); | 501 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
| 462 | 502 | ||
| 463 | raw_spin_lock_irqsave(&call_function.lock, flags); | 503 | raw_spin_lock_irqsave(&call_function.lock, flags); |
| @@ -529,3 +569,24 @@ void ipi_call_unlock_irq(void) | |||
| 529 | { | 569 | { |
| 530 | raw_spin_unlock_irq(&call_function.lock); | 570 | raw_spin_unlock_irq(&call_function.lock); |
| 531 | } | 571 | } |
| 572 | #endif /* USE_GENERIC_SMP_HELPERS */ | ||
| 573 | |||
| 574 | /* | ||
| 575 | * Call a function on all processors. May be used during early boot while | ||
| 576 | * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead | ||
| 577 | * of local_irq_disable/enable(). | ||
| 578 | */ | ||
| 579 | int on_each_cpu(void (*func) (void *info), void *info, int wait) | ||
| 580 | { | ||
| 581 | unsigned long flags; | ||
| 582 | int ret = 0; | ||
| 583 | |||
| 584 | preempt_disable(); | ||
| 585 | ret = smp_call_function(func, info, wait); | ||
| 586 | local_irq_save(flags); | ||
| 587 | func(info); | ||
| 588 | local_irq_restore(flags); | ||
| 589 | preempt_enable(); | ||
| 590 | return ret; | ||
| 591 | } | ||
| 592 | EXPORT_SYMBOL(on_each_cpu); | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 0823778f87fc..68eb5efec388 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -885,25 +885,6 @@ static __init int spawn_ksoftirqd(void) | |||
| 885 | } | 885 | } |
| 886 | early_initcall(spawn_ksoftirqd); | 886 | early_initcall(spawn_ksoftirqd); |
| 887 | 887 | ||
| 888 | #ifdef CONFIG_SMP | ||
| 889 | /* | ||
| 890 | * Call a function on all processors | ||
| 891 | */ | ||
| 892 | int on_each_cpu(void (*func) (void *info), void *info, int wait) | ||
| 893 | { | ||
| 894 | int ret = 0; | ||
| 895 | |||
| 896 | preempt_disable(); | ||
| 897 | ret = smp_call_function(func, info, wait); | ||
| 898 | local_irq_disable(); | ||
| 899 | func(info); | ||
| 900 | local_irq_enable(); | ||
| 901 | preempt_enable(); | ||
| 902 | return ret; | ||
| 903 | } | ||
| 904 | EXPORT_SYMBOL(on_each_cpu); | ||
| 905 | #endif | ||
| 906 | |||
| 907 | /* | 888 | /* |
| 908 | * [ These __weak aliases are kept in a separate compilation unit, so that | 889 | * [ These __weak aliases are kept in a separate compilation unit, so that |
| 909 | * GCC does not inline them incorrectly. ] | 890 | * GCC does not inline them incorrectly. ] |
diff --git a/kernel/srcu.c b/kernel/srcu.c index 98d8c1e80edb..73ce23feaea9 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
| @@ -156,6 +156,16 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx) | |||
| 156 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | 156 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 157 | 157 | ||
| 158 | /* | 158 | /* |
| 159 | * We use an adaptive strategy for synchronize_srcu() and especially for | ||
| 160 | * synchronize_srcu_expedited(). We spin for a fixed time period | ||
| 161 | * (defined below) to allow SRCU readers to exit their read-side critical | ||
| 162 | * sections. If there are still some readers after 10 microseconds, | ||
| 163 | * we repeatedly block for 1-millisecond time periods. This approach | ||
| 164 | * has done well in testing, so there is no need for a config parameter. | ||
| 165 | */ | ||
| 166 | #define SYNCHRONIZE_SRCU_READER_DELAY 10 | ||
| 167 | |||
| 168 | /* | ||
| 159 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | 169 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 160 | */ | 170 | */ |
| 161 | static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) | 171 | static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) |
| @@ -207,11 +217,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) | |||
| 207 | * will have finished executing. We initially give readers | 217 | * will have finished executing. We initially give readers |
| 208 | * an arbitrarily chosen 10 microseconds to get out of their | 218 | * an arbitrarily chosen 10 microseconds to get out of their |
| 209 | * SRCU read-side critical sections, then loop waiting 1/HZ | 219 | * SRCU read-side critical sections, then loop waiting 1/HZ |
| 210 | * seconds per iteration. | 220 | * seconds per iteration. The 10-microsecond value has done |
| 221 | * very well in testing. | ||
| 211 | */ | 222 | */ |
| 212 | 223 | ||
| 213 | if (srcu_readers_active_idx(sp, idx)) | 224 | if (srcu_readers_active_idx(sp, idx)) |
| 214 | udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY); | 225 | udelay(SYNCHRONIZE_SRCU_READER_DELAY); |
| 215 | while (srcu_readers_active_idx(sp, idx)) | 226 | while (srcu_readers_active_idx(sp, idx)) |
| 216 | schedule_timeout_interruptible(1); | 227 | schedule_timeout_interruptible(1); |
| 217 | 228 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 2745dcdb6c6c..31b71a276b40 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -43,6 +43,8 @@ | |||
| 43 | #include <linux/kprobes.h> | 43 | #include <linux/kprobes.h> |
| 44 | #include <linux/user_namespace.h> | 44 | #include <linux/user_namespace.h> |
| 45 | 45 | ||
| 46 | #include <linux/kmsg_dump.h> | ||
| 47 | |||
| 46 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
| 47 | #include <asm/io.h> | 49 | #include <asm/io.h> |
| 48 | #include <asm/unistd.h> | 50 | #include <asm/unistd.h> |
| @@ -285,6 +287,7 @@ out_unlock: | |||
| 285 | */ | 287 | */ |
| 286 | void emergency_restart(void) | 288 | void emergency_restart(void) |
| 287 | { | 289 | { |
| 290 | kmsg_dump(KMSG_DUMP_EMERG); | ||
| 288 | machine_emergency_restart(); | 291 | machine_emergency_restart(); |
| 289 | } | 292 | } |
| 290 | EXPORT_SYMBOL_GPL(emergency_restart); | 293 | EXPORT_SYMBOL_GPL(emergency_restart); |
| @@ -312,6 +315,7 @@ void kernel_restart(char *cmd) | |||
| 312 | printk(KERN_EMERG "Restarting system.\n"); | 315 | printk(KERN_EMERG "Restarting system.\n"); |
| 313 | else | 316 | else |
| 314 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); | 317 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); |
| 318 | kmsg_dump(KMSG_DUMP_RESTART); | ||
| 315 | machine_restart(cmd); | 319 | machine_restart(cmd); |
| 316 | } | 320 | } |
| 317 | EXPORT_SYMBOL_GPL(kernel_restart); | 321 | EXPORT_SYMBOL_GPL(kernel_restart); |
| @@ -333,6 +337,7 @@ void kernel_halt(void) | |||
| 333 | kernel_shutdown_prepare(SYSTEM_HALT); | 337 | kernel_shutdown_prepare(SYSTEM_HALT); |
| 334 | sysdev_shutdown(); | 338 | sysdev_shutdown(); |
| 335 | printk(KERN_EMERG "System halted.\n"); | 339 | printk(KERN_EMERG "System halted.\n"); |
| 340 | kmsg_dump(KMSG_DUMP_HALT); | ||
| 336 | machine_halt(); | 341 | machine_halt(); |
| 337 | } | 342 | } |
| 338 | 343 | ||
| @@ -351,6 +356,7 @@ void kernel_power_off(void) | |||
| 351 | disable_nonboot_cpus(); | 356 | disable_nonboot_cpus(); |
| 352 | sysdev_shutdown(); | 357 | sysdev_shutdown(); |
| 353 | printk(KERN_EMERG "Power down.\n"); | 358 | printk(KERN_EMERG "Power down.\n"); |
| 359 | kmsg_dump(KMSG_DUMP_POWEROFF); | ||
| 354 | machine_power_off(); | 360 | machine_power_off(); |
| 355 | } | 361 | } |
| 356 | EXPORT_SYMBOL_GPL(kernel_power_off); | 362 | EXPORT_SYMBOL_GPL(kernel_power_off); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ae5cbb1e3ced..bc86bb32e126 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
| 26 | #include <linux/signal.h> | 26 | #include <linux/signal.h> |
| 27 | #include <linux/printk.h> | ||
| 27 | #include <linux/proc_fs.h> | 28 | #include <linux/proc_fs.h> |
| 28 | #include <linux/security.h> | 29 | #include <linux/security.h> |
| 29 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
| @@ -245,10 +246,6 @@ static struct ctl_table root_table[] = { | |||
| 245 | .mode = 0555, | 246 | .mode = 0555, |
| 246 | .child = dev_table, | 247 | .child = dev_table, |
| 247 | }, | 248 | }, |
| 248 | /* | ||
| 249 | * NOTE: do not add new entries to this table unless you have read | ||
| 250 | * Documentation/sysctl/ctl_unnumbered.txt | ||
| 251 | */ | ||
| 252 | { } | 249 | { } |
| 253 | }; | 250 | }; |
| 254 | 251 | ||
| @@ -710,6 +707,15 @@ static struct ctl_table kern_table[] = { | |||
| 710 | .extra1 = &zero, | 707 | .extra1 = &zero, |
| 711 | .extra2 = &one, | 708 | .extra2 = &one, |
| 712 | }, | 709 | }, |
| 710 | { | ||
| 711 | .procname = "kptr_restrict", | ||
| 712 | .data = &kptr_restrict, | ||
| 713 | .maxlen = sizeof(int), | ||
| 714 | .mode = 0644, | ||
| 715 | .proc_handler = proc_dointvec_minmax, | ||
| 716 | .extra1 = &zero, | ||
| 717 | .extra2 = &two, | ||
| 718 | }, | ||
| 713 | #endif | 719 | #endif |
| 714 | { | 720 | { |
| 715 | .procname = "ngroups_max", | 721 | .procname = "ngroups_max", |
| @@ -962,10 +968,6 @@ static struct ctl_table kern_table[] = { | |||
| 962 | .proc_handler = proc_dointvec, | 968 | .proc_handler = proc_dointvec, |
| 963 | }, | 969 | }, |
| 964 | #endif | 970 | #endif |
| 965 | /* | ||
| 966 | * NOTE: do not add new entries to this table unless you have read | ||
| 967 | * Documentation/sysctl/ctl_unnumbered.txt | ||
| 968 | */ | ||
| 969 | { } | 971 | { } |
| 970 | }; | 972 | }; |
| 971 | 973 | ||
| @@ -1326,11 +1328,6 @@ static struct ctl_table vm_table[] = { | |||
| 1326 | .extra2 = &one, | 1328 | .extra2 = &one, |
| 1327 | }, | 1329 | }, |
| 1328 | #endif | 1330 | #endif |
| 1329 | |||
| 1330 | /* | ||
| 1331 | * NOTE: do not add new entries to this table unless you have read | ||
| 1332 | * Documentation/sysctl/ctl_unnumbered.txt | ||
| 1333 | */ | ||
| 1334 | { } | 1331 | { } |
| 1335 | }; | 1332 | }; |
| 1336 | 1333 | ||
| @@ -1486,10 +1483,6 @@ static struct ctl_table fs_table[] = { | |||
| 1486 | .proc_handler = &pipe_proc_fn, | 1483 | .proc_handler = &pipe_proc_fn, |
| 1487 | .extra1 = &pipe_min_size, | 1484 | .extra1 = &pipe_min_size, |
| 1488 | }, | 1485 | }, |
| 1489 | /* | ||
| 1490 | * NOTE: do not add new entries to this table unless you have read | ||
| 1491 | * Documentation/sysctl/ctl_unnumbered.txt | ||
| 1492 | */ | ||
| 1493 | { } | 1486 | { } |
| 1494 | }; | 1487 | }; |
| 1495 | 1488 | ||
| @@ -2899,7 +2892,7 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, | |||
| 2899 | } | 2892 | } |
| 2900 | } | 2893 | } |
| 2901 | 2894 | ||
| 2902 | #else /* CONFIG_PROC_FS */ | 2895 | #else /* CONFIG_PROC_SYSCTL */ |
| 2903 | 2896 | ||
| 2904 | int proc_dostring(struct ctl_table *table, int write, | 2897 | int proc_dostring(struct ctl_table *table, int write, |
| 2905 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2898 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| @@ -2951,7 +2944,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, | |||
| 2951 | } | 2944 | } |
| 2952 | 2945 | ||
| 2953 | 2946 | ||
| 2954 | #endif /* CONFIG_PROC_FS */ | 2947 | #endif /* CONFIG_PROC_SYSCTL */ |
| 2955 | 2948 | ||
| 2956 | /* | 2949 | /* |
| 2957 | * No sense putting this after each symbol definition, twice, | 2950 | * No sense putting this after each symbol definition, twice, |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 4b2545a136ff..b875bedf7c9a 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
| @@ -1192,7 +1192,7 @@ static ssize_t bin_dn_node_address(struct file *file, | |||
| 1192 | 1192 | ||
| 1193 | buf[result] = '\0'; | 1193 | buf[result] = '\0'; |
| 1194 | 1194 | ||
| 1195 | /* Convert the decnet addresss to binary */ | 1195 | /* Convert the decnet address to binary */ |
| 1196 | result = -EIO; | 1196 | result = -EIO; |
| 1197 | nodep = strchr(buf, '.') + 1; | 1197 | nodep = strchr(buf, '.') + 1; |
| 1198 | if (!nodep) | 1198 | if (!nodep) |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 69691eb4b715..3971c6b9d58d 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
| @@ -348,7 +348,7 @@ static int parse(struct nlattr *na, struct cpumask *mask) | |||
| 348 | return ret; | 348 | return ret; |
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | #ifdef CONFIG_IA64 | 351 | #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
| 352 | #define TASKSTATS_NEEDS_PADDING 1 | 352 | #define TASKSTATS_NEEDS_PADDING 1 |
| 353 | #endif | 353 | #endif |
| 354 | 354 | ||
diff --git a/kernel/time.c b/kernel/time.c index ba9b338d1835..32174359576f 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -238,7 +238,7 @@ EXPORT_SYMBOL(current_fs_time); | |||
| 238 | * Avoid unnecessary multiplications/divisions in the | 238 | * Avoid unnecessary multiplications/divisions in the |
| 239 | * two most common HZ cases: | 239 | * two most common HZ cases: |
| 240 | */ | 240 | */ |
| 241 | unsigned int inline jiffies_to_msecs(const unsigned long j) | 241 | inline unsigned int jiffies_to_msecs(const unsigned long j) |
| 242 | { | 242 | { |
| 243 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) | 243 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
| 244 | return (MSEC_PER_SEC / HZ) * j; | 244 | return (MSEC_PER_SEC / HZ) * j; |
| @@ -254,7 +254,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j) | |||
| 254 | } | 254 | } |
| 255 | EXPORT_SYMBOL(jiffies_to_msecs); | 255 | EXPORT_SYMBOL(jiffies_to_msecs); |
| 256 | 256 | ||
| 257 | unsigned int inline jiffies_to_usecs(const unsigned long j) | 257 | inline unsigned int jiffies_to_usecs(const unsigned long j) |
| 258 | { | 258 | { |
| 259 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) | 259 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
| 260 | return (USEC_PER_SEC / HZ) * j; | 260 | return (USEC_PER_SEC / HZ) * j; |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index df140cd3ea47..6519cf62d9cd 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time); | |||
| 113 | * @shift: pointer to shift variable | 113 | * @shift: pointer to shift variable |
| 114 | * @from: frequency to convert from | 114 | * @from: frequency to convert from |
| 115 | * @to: frequency to convert to | 115 | * @to: frequency to convert to |
| 116 | * @minsec: guaranteed runtime conversion range in seconds | 116 | * @maxsec: guaranteed runtime conversion range in seconds |
| 117 | * | 117 | * |
| 118 | * The function evaluates the shift/mult pair for the scaled math | 118 | * The function evaluates the shift/mult pair for the scaled math |
| 119 | * operations of clocksources and clockevents. | 119 | * operations of clocksources and clockevents. |
| @@ -122,7 +122,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time); | |||
| 122 | * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock | 122 | * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock |
| 123 | * event @to is the counter frequency and @from is NSEC_PER_SEC. | 123 | * event @to is the counter frequency and @from is NSEC_PER_SEC. |
| 124 | * | 124 | * |
| 125 | * The @minsec conversion range argument controls the time frame in | 125 | * The @maxsec conversion range argument controls the time frame in |
| 126 | * seconds which must be covered by the runtime conversion with the | 126 | * seconds which must be covered by the runtime conversion with the |
| 127 | * calculated mult and shift factors. This guarantees that no 64bit | 127 | * calculated mult and shift factors. This guarantees that no 64bit |
| 128 | * overflow happens when the input value of the conversion is | 128 | * overflow happens when the input value of the conversion is |
| @@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time); | |||
| 131 | * factors. | 131 | * factors. |
| 132 | */ | 132 | */ |
| 133 | void | 133 | void |
| 134 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) | 134 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) |
| 135 | { | 135 | { |
| 136 | u64 tmp; | 136 | u64 tmp; |
| 137 | u32 sft, sftacc= 32; | 137 | u32 sft, sftacc= 32; |
| @@ -140,7 +140,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) | |||
| 140 | * Calculate the shift factor which is limiting the conversion | 140 | * Calculate the shift factor which is limiting the conversion |
| 141 | * range: | 141 | * range: |
| 142 | */ | 142 | */ |
| 143 | tmp = ((u64)minsec * from) >> 32; | 143 | tmp = ((u64)maxsec * from) >> 32; |
| 144 | while (tmp) { | 144 | while (tmp) { |
| 145 | tmp >>=1; | 145 | tmp >>=1; |
| 146 | sftacc--; | 146 | sftacc--; |
| @@ -679,7 +679,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | |||
| 679 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | 679 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) |
| 680 | { | 680 | { |
| 681 | 681 | ||
| 682 | /* Intialize mult/shift and max_idle_ns */ | 682 | /* Initialize mult/shift and max_idle_ns */ |
| 683 | __clocksource_updatefreq_scale(cs, scale, freq); | 683 | __clocksource_updatefreq_scale(cs, scale, freq); |
| 684 | 684 | ||
| 685 | /* Add clocksource to the clcoksource list */ | 685 | /* Add clocksource to the clcoksource list */ |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index d2321891538f..5c00242fa921 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/timex.h> | 14 | #include <linux/timex.h> |
| 15 | #include <linux/time.h> | 15 | #include <linux/time.h> |
| 16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
| 17 | #include <linux/module.h> | ||
| 17 | 18 | ||
| 18 | /* | 19 | /* |
| 19 | * NTP timekeeping variables: | 20 | * NTP timekeeping variables: |
| @@ -74,6 +75,162 @@ static long time_adjust; | |||
| 74 | /* constant (boot-param configurable) NTP tick adjustment (upscaled) */ | 75 | /* constant (boot-param configurable) NTP tick adjustment (upscaled) */ |
| 75 | static s64 ntp_tick_adj; | 76 | static s64 ntp_tick_adj; |
| 76 | 77 | ||
| 78 | #ifdef CONFIG_NTP_PPS | ||
| 79 | |||
| 80 | /* | ||
| 81 | * The following variables are used when a pulse-per-second (PPS) signal | ||
| 82 | * is available. They establish the engineering parameters of the clock | ||
| 83 | * discipline loop when controlled by the PPS signal. | ||
| 84 | */ | ||
| 85 | #define PPS_VALID 10 /* PPS signal watchdog max (s) */ | ||
| 86 | #define PPS_POPCORN 4 /* popcorn spike threshold (shift) */ | ||
| 87 | #define PPS_INTMIN 2 /* min freq interval (s) (shift) */ | ||
| 88 | #define PPS_INTMAX 8 /* max freq interval (s) (shift) */ | ||
| 89 | #define PPS_INTCOUNT 4 /* number of consecutive good intervals to | ||
| 90 | increase pps_shift or consecutive bad | ||
| 91 | intervals to decrease it */ | ||
| 92 | #define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */ | ||
| 93 | |||
| 94 | static int pps_valid; /* signal watchdog counter */ | ||
| 95 | static long pps_tf[3]; /* phase median filter */ | ||
| 96 | static long pps_jitter; /* current jitter (ns) */ | ||
| 97 | static struct timespec pps_fbase; /* beginning of the last freq interval */ | ||
| 98 | static int pps_shift; /* current interval duration (s) (shift) */ | ||
| 99 | static int pps_intcnt; /* interval counter */ | ||
| 100 | static s64 pps_freq; /* frequency offset (scaled ns/s) */ | ||
| 101 | static long pps_stabil; /* current stability (scaled ns/s) */ | ||
| 102 | |||
| 103 | /* | ||
| 104 | * PPS signal quality monitors | ||
| 105 | */ | ||
| 106 | static long pps_calcnt; /* calibration intervals */ | ||
| 107 | static long pps_jitcnt; /* jitter limit exceeded */ | ||
| 108 | static long pps_stbcnt; /* stability limit exceeded */ | ||
| 109 | static long pps_errcnt; /* calibration errors */ | ||
| 110 | |||
| 111 | |||
| 112 | /* PPS kernel consumer compensates the whole phase error immediately. | ||
| 113 | * Otherwise, reduce the offset by a fixed factor times the time constant. | ||
| 114 | */ | ||
| 115 | static inline s64 ntp_offset_chunk(s64 offset) | ||
| 116 | { | ||
| 117 | if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) | ||
| 118 | return offset; | ||
| 119 | else | ||
| 120 | return shift_right(offset, SHIFT_PLL + time_constant); | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline void pps_reset_freq_interval(void) | ||
| 124 | { | ||
| 125 | /* the PPS calibration interval may end | ||
| 126 | surprisingly early */ | ||
| 127 | pps_shift = PPS_INTMIN; | ||
| 128 | pps_intcnt = 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | /** | ||
| 132 | * pps_clear - Clears the PPS state variables | ||
| 133 | * | ||
| 134 | * Must be called while holding a write on the xtime_lock | ||
| 135 | */ | ||
| 136 | static inline void pps_clear(void) | ||
| 137 | { | ||
| 138 | pps_reset_freq_interval(); | ||
| 139 | pps_tf[0] = 0; | ||
| 140 | pps_tf[1] = 0; | ||
| 141 | pps_tf[2] = 0; | ||
| 142 | pps_fbase.tv_sec = pps_fbase.tv_nsec = 0; | ||
| 143 | pps_freq = 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | /* Decrease pps_valid to indicate that another second has passed since | ||
| 147 | * the last PPS signal. When it reaches 0, indicate that PPS signal is | ||
| 148 | * missing. | ||
| 149 | * | ||
| 150 | * Must be called while holding a write on the xtime_lock | ||
| 151 | */ | ||
| 152 | static inline void pps_dec_valid(void) | ||
| 153 | { | ||
| 154 | if (pps_valid > 0) | ||
| 155 | pps_valid--; | ||
| 156 | else { | ||
| 157 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | | ||
| 158 | STA_PPSWANDER | STA_PPSERROR); | ||
| 159 | pps_clear(); | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | static inline void pps_set_freq(s64 freq) | ||
| 164 | { | ||
| 165 | pps_freq = freq; | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline int is_error_status(int status) | ||
| 169 | { | ||
| 170 | return (time_status & (STA_UNSYNC|STA_CLOCKERR)) | ||
| 171 | /* PPS signal lost when either PPS time or | ||
| 172 | * PPS frequency synchronization requested | ||
| 173 | */ | ||
| 174 | || ((time_status & (STA_PPSFREQ|STA_PPSTIME)) | ||
| 175 | && !(time_status & STA_PPSSIGNAL)) | ||
| 176 | /* PPS jitter exceeded when | ||
| 177 | * PPS time synchronization requested */ | ||
| 178 | || ((time_status & (STA_PPSTIME|STA_PPSJITTER)) | ||
| 179 | == (STA_PPSTIME|STA_PPSJITTER)) | ||
| 180 | /* PPS wander exceeded or calibration error when | ||
| 181 | * PPS frequency synchronization requested | ||
| 182 | */ | ||
| 183 | || ((time_status & STA_PPSFREQ) | ||
| 184 | && (time_status & (STA_PPSWANDER|STA_PPSERROR))); | ||
| 185 | } | ||
| 186 | |||
| 187 | static inline void pps_fill_timex(struct timex *txc) | ||
| 188 | { | ||
| 189 | txc->ppsfreq = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) * | ||
| 190 | PPM_SCALE_INV, NTP_SCALE_SHIFT); | ||
| 191 | txc->jitter = pps_jitter; | ||
| 192 | if (!(time_status & STA_NANO)) | ||
| 193 | txc->jitter /= NSEC_PER_USEC; | ||
| 194 | txc->shift = pps_shift; | ||
| 195 | txc->stabil = pps_stabil; | ||
| 196 | txc->jitcnt = pps_jitcnt; | ||
| 197 | txc->calcnt = pps_calcnt; | ||
| 198 | txc->errcnt = pps_errcnt; | ||
| 199 | txc->stbcnt = pps_stbcnt; | ||
| 200 | } | ||
| 201 | |||
| 202 | #else /* !CONFIG_NTP_PPS */ | ||
| 203 | |||
| 204 | static inline s64 ntp_offset_chunk(s64 offset) | ||
| 205 | { | ||
| 206 | return shift_right(offset, SHIFT_PLL + time_constant); | ||
| 207 | } | ||
| 208 | |||
| 209 | static inline void pps_reset_freq_interval(void) {} | ||
| 210 | static inline void pps_clear(void) {} | ||
| 211 | static inline void pps_dec_valid(void) {} | ||
| 212 | static inline void pps_set_freq(s64 freq) {} | ||
| 213 | |||
| 214 | static inline int is_error_status(int status) | ||
| 215 | { | ||
| 216 | return status & (STA_UNSYNC|STA_CLOCKERR); | ||
| 217 | } | ||
| 218 | |||
| 219 | static inline void pps_fill_timex(struct timex *txc) | ||
| 220 | { | ||
| 221 | /* PPS is not implemented, so these are zero */ | ||
| 222 | txc->ppsfreq = 0; | ||
| 223 | txc->jitter = 0; | ||
| 224 | txc->shift = 0; | ||
| 225 | txc->stabil = 0; | ||
| 226 | txc->jitcnt = 0; | ||
| 227 | txc->calcnt = 0; | ||
| 228 | txc->errcnt = 0; | ||
| 229 | txc->stbcnt = 0; | ||
| 230 | } | ||
| 231 | |||
| 232 | #endif /* CONFIG_NTP_PPS */ | ||
| 233 | |||
| 77 | /* | 234 | /* |
| 78 | * NTP methods: | 235 | * NTP methods: |
| 79 | */ | 236 | */ |
| @@ -185,6 +342,9 @@ void ntp_clear(void) | |||
| 185 | 342 | ||
| 186 | tick_length = tick_length_base; | 343 | tick_length = tick_length_base; |
| 187 | time_offset = 0; | 344 | time_offset = 0; |
| 345 | |||
| 346 | /* Clear PPS state variables */ | ||
| 347 | pps_clear(); | ||
| 188 | } | 348 | } |
| 189 | 349 | ||
| 190 | /* | 350 | /* |
| @@ -250,16 +410,16 @@ void second_overflow(void) | |||
| 250 | time_status |= STA_UNSYNC; | 410 | time_status |= STA_UNSYNC; |
| 251 | } | 411 | } |
| 252 | 412 | ||
| 253 | /* | 413 | /* Compute the phase adjustment for the next second */ |
| 254 | * Compute the phase adjustment for the next second. The offset is | ||
| 255 | * reduced by a fixed factor times the time constant. | ||
| 256 | */ | ||
| 257 | tick_length = tick_length_base; | 414 | tick_length = tick_length_base; |
| 258 | 415 | ||
| 259 | delta = shift_right(time_offset, SHIFT_PLL + time_constant); | 416 | delta = ntp_offset_chunk(time_offset); |
| 260 | time_offset -= delta; | 417 | time_offset -= delta; |
| 261 | tick_length += delta; | 418 | tick_length += delta; |
| 262 | 419 | ||
| 420 | /* Check PPS signal */ | ||
| 421 | pps_dec_valid(); | ||
| 422 | |||
| 263 | if (!time_adjust) | 423 | if (!time_adjust) |
| 264 | return; | 424 | return; |
| 265 | 425 | ||
| @@ -369,6 +529,8 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) | |||
| 369 | if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { | 529 | if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { |
| 370 | time_state = TIME_OK; | 530 | time_state = TIME_OK; |
| 371 | time_status = STA_UNSYNC; | 531 | time_status = STA_UNSYNC; |
| 532 | /* restart PPS frequency calibration */ | ||
| 533 | pps_reset_freq_interval(); | ||
| 372 | } | 534 | } |
| 373 | 535 | ||
| 374 | /* | 536 | /* |
| @@ -418,6 +580,8 @@ static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts | |||
| 418 | time_freq = txc->freq * PPM_SCALE; | 580 | time_freq = txc->freq * PPM_SCALE; |
| 419 | time_freq = min(time_freq, MAXFREQ_SCALED); | 581 | time_freq = min(time_freq, MAXFREQ_SCALED); |
| 420 | time_freq = max(time_freq, -MAXFREQ_SCALED); | 582 | time_freq = max(time_freq, -MAXFREQ_SCALED); |
| 583 | /* update pps_freq */ | ||
| 584 | pps_set_freq(time_freq); | ||
| 421 | } | 585 | } |
| 422 | 586 | ||
| 423 | if (txc->modes & ADJ_MAXERROR) | 587 | if (txc->modes & ADJ_MAXERROR) |
| @@ -508,7 +672,8 @@ int do_adjtimex(struct timex *txc) | |||
| 508 | } | 672 | } |
| 509 | 673 | ||
| 510 | result = time_state; /* mostly `TIME_OK' */ | 674 | result = time_state; /* mostly `TIME_OK' */ |
| 511 | if (time_status & (STA_UNSYNC|STA_CLOCKERR)) | 675 | /* check for errors */ |
| 676 | if (is_error_status(time_status)) | ||
| 512 | result = TIME_ERROR; | 677 | result = TIME_ERROR; |
| 513 | 678 | ||
| 514 | txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * | 679 | txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * |
| @@ -522,15 +687,8 @@ int do_adjtimex(struct timex *txc) | |||
| 522 | txc->tick = tick_usec; | 687 | txc->tick = tick_usec; |
| 523 | txc->tai = time_tai; | 688 | txc->tai = time_tai; |
| 524 | 689 | ||
| 525 | /* PPS is not implemented, so these are zero */ | 690 | /* fill PPS status fields */ |
| 526 | txc->ppsfreq = 0; | 691 | pps_fill_timex(txc); |
| 527 | txc->jitter = 0; | ||
| 528 | txc->shift = 0; | ||
| 529 | txc->stabil = 0; | ||
| 530 | txc->jitcnt = 0; | ||
| 531 | txc->calcnt = 0; | ||
| 532 | txc->errcnt = 0; | ||
| 533 | txc->stbcnt = 0; | ||
| 534 | 692 | ||
| 535 | write_sequnlock_irq(&xtime_lock); | 693 | write_sequnlock_irq(&xtime_lock); |
| 536 | 694 | ||
| @@ -544,6 +702,243 @@ int do_adjtimex(struct timex *txc) | |||
| 544 | return result; | 702 | return result; |
| 545 | } | 703 | } |
| 546 | 704 | ||
| 705 | #ifdef CONFIG_NTP_PPS | ||
| 706 | |||
| 707 | /* actually struct pps_normtime is good old struct timespec, but it is | ||
| 708 | * semantically different (and it is the reason why it was invented): | ||
| 709 | * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] | ||
| 710 | * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ | ||
| 711 | struct pps_normtime { | ||
| 712 | __kernel_time_t sec; /* seconds */ | ||
| 713 | long nsec; /* nanoseconds */ | ||
| 714 | }; | ||
| 715 | |||
| 716 | /* normalize the timestamp so that nsec is in the | ||
| 717 | ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ | ||
| 718 | static inline struct pps_normtime pps_normalize_ts(struct timespec ts) | ||
| 719 | { | ||
| 720 | struct pps_normtime norm = { | ||
| 721 | .sec = ts.tv_sec, | ||
| 722 | .nsec = ts.tv_nsec | ||
| 723 | }; | ||
| 724 | |||
| 725 | if (norm.nsec > (NSEC_PER_SEC >> 1)) { | ||
| 726 | norm.nsec -= NSEC_PER_SEC; | ||
| 727 | norm.sec++; | ||
| 728 | } | ||
| 729 | |||
| 730 | return norm; | ||
| 731 | } | ||
| 732 | |||
| 733 | /* get current phase correction and jitter */ | ||
| 734 | static inline long pps_phase_filter_get(long *jitter) | ||
| 735 | { | ||
| 736 | *jitter = pps_tf[0] - pps_tf[1]; | ||
| 737 | if (*jitter < 0) | ||
| 738 | *jitter = -*jitter; | ||
| 739 | |||
| 740 | /* TODO: test various filters */ | ||
| 741 | return pps_tf[0]; | ||
| 742 | } | ||
| 743 | |||
| 744 | /* add the sample to the phase filter */ | ||
| 745 | static inline void pps_phase_filter_add(long err) | ||
| 746 | { | ||
| 747 | pps_tf[2] = pps_tf[1]; | ||
| 748 | pps_tf[1] = pps_tf[0]; | ||
| 749 | pps_tf[0] = err; | ||
| 750 | } | ||
| 751 | |||
| 752 | /* decrease frequency calibration interval length. | ||
| 753 | * It is halved after four consecutive unstable intervals. | ||
| 754 | */ | ||
| 755 | static inline void pps_dec_freq_interval(void) | ||
| 756 | { | ||
| 757 | if (--pps_intcnt <= -PPS_INTCOUNT) { | ||
| 758 | pps_intcnt = -PPS_INTCOUNT; | ||
| 759 | if (pps_shift > PPS_INTMIN) { | ||
| 760 | pps_shift--; | ||
| 761 | pps_intcnt = 0; | ||
| 762 | } | ||
| 763 | } | ||
| 764 | } | ||
| 765 | |||
| 766 | /* increase frequency calibration interval length. | ||
| 767 | * It is doubled after four consecutive stable intervals. | ||
| 768 | */ | ||
| 769 | static inline void pps_inc_freq_interval(void) | ||
| 770 | { | ||
| 771 | if (++pps_intcnt >= PPS_INTCOUNT) { | ||
| 772 | pps_intcnt = PPS_INTCOUNT; | ||
| 773 | if (pps_shift < PPS_INTMAX) { | ||
| 774 | pps_shift++; | ||
| 775 | pps_intcnt = 0; | ||
| 776 | } | ||
| 777 | } | ||
| 778 | } | ||
| 779 | |||
| 780 | /* update clock frequency based on MONOTONIC_RAW clock PPS signal | ||
| 781 | * timestamps | ||
| 782 | * | ||
| 783 | * At the end of the calibration interval the difference between the | ||
| 784 | * first and last MONOTONIC_RAW clock timestamps divided by the length | ||
| 785 | * of the interval becomes the frequency update. If the interval was | ||
| 786 | * too long, the data are discarded. | ||
| 787 | * Returns the difference between old and new frequency values. | ||
| 788 | */ | ||
| 789 | static long hardpps_update_freq(struct pps_normtime freq_norm) | ||
| 790 | { | ||
| 791 | long delta, delta_mod; | ||
| 792 | s64 ftemp; | ||
| 793 | |||
| 794 | /* check if the frequency interval was too long */ | ||
| 795 | if (freq_norm.sec > (2 << pps_shift)) { | ||
| 796 | time_status |= STA_PPSERROR; | ||
| 797 | pps_errcnt++; | ||
| 798 | pps_dec_freq_interval(); | ||
| 799 | pr_err("hardpps: PPSERROR: interval too long - %ld s\n", | ||
| 800 | freq_norm.sec); | ||
| 801 | return 0; | ||
| 802 | } | ||
| 803 | |||
| 804 | /* here the raw frequency offset and wander (stability) is | ||
| 805 | * calculated. If the wander is less than the wander threshold | ||
| 806 | * the interval is increased; otherwise it is decreased. | ||
| 807 | */ | ||
| 808 | ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT, | ||
| 809 | freq_norm.sec); | ||
| 810 | delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT); | ||
| 811 | pps_freq = ftemp; | ||
| 812 | if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { | ||
| 813 | pr_warning("hardpps: PPSWANDER: change=%ld\n", delta); | ||
| 814 | time_status |= STA_PPSWANDER; | ||
| 815 | pps_stbcnt++; | ||
| 816 | pps_dec_freq_interval(); | ||
| 817 | } else { /* good sample */ | ||
| 818 | pps_inc_freq_interval(); | ||
| 819 | } | ||
| 820 | |||
| 821 | /* the stability metric is calculated as the average of recent | ||
| 822 | * frequency changes, but is used only for performance | ||
| 823 | * monitoring | ||
| 824 | */ | ||
| 825 | delta_mod = delta; | ||
| 826 | if (delta_mod < 0) | ||
| 827 | delta_mod = -delta_mod; | ||
| 828 | pps_stabil += (div_s64(((s64)delta_mod) << | ||
| 829 | (NTP_SCALE_SHIFT - SHIFT_USEC), | ||
| 830 | NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN; | ||
| 831 | |||
| 832 | /* if enabled, the system clock frequency is updated */ | ||
| 833 | if ((time_status & STA_PPSFREQ) != 0 && | ||
| 834 | (time_status & STA_FREQHOLD) == 0) { | ||
| 835 | time_freq = pps_freq; | ||
| 836 | ntp_update_frequency(); | ||
| 837 | } | ||
| 838 | |||
| 839 | return delta; | ||
| 840 | } | ||
| 841 | |||
| 842 | /* correct REALTIME clock phase error against PPS signal */ | ||
| 843 | static void hardpps_update_phase(long error) | ||
| 844 | { | ||
| 845 | long correction = -error; | ||
| 846 | long jitter; | ||
| 847 | |||
| 848 | /* add the sample to the median filter */ | ||
| 849 | pps_phase_filter_add(correction); | ||
| 850 | correction = pps_phase_filter_get(&jitter); | ||
| 851 | |||
| 852 | /* Nominal jitter is due to PPS signal noise. If it exceeds the | ||
| 853 | * threshold, the sample is discarded; otherwise, if so enabled, | ||
| 854 | * the time offset is updated. | ||
| 855 | */ | ||
| 856 | if (jitter > (pps_jitter << PPS_POPCORN)) { | ||
| 857 | pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n", | ||
| 858 | jitter, (pps_jitter << PPS_POPCORN)); | ||
| 859 | time_status |= STA_PPSJITTER; | ||
| 860 | pps_jitcnt++; | ||
| 861 | } else if (time_status & STA_PPSTIME) { | ||
| 862 | /* correct the time using the phase offset */ | ||
| 863 | time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT, | ||
| 864 | NTP_INTERVAL_FREQ); | ||
| 865 | /* cancel running adjtime() */ | ||
| 866 | time_adjust = 0; | ||
| 867 | } | ||
| 868 | /* update jitter */ | ||
| 869 | pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN; | ||
| 870 | } | ||
| 871 | |||
| 872 | /* | ||
| 873 | * hardpps() - discipline CPU clock oscillator to external PPS signal | ||
| 874 | * | ||
| 875 | * This routine is called at each PPS signal arrival in order to | ||
| 876 | * discipline the CPU clock oscillator to the PPS signal. It takes two | ||
| 877 | * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former | ||
| 878 | * is used to correct clock phase error and the latter is used to | ||
| 879 | * correct the frequency. | ||
| 880 | * | ||
| 881 | * This code is based on David Mills's reference nanokernel | ||
| 882 | * implementation. It was mostly rewritten but keeps the same idea. | ||
| 883 | */ | ||
| 884 | void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | ||
| 885 | { | ||
| 886 | struct pps_normtime pts_norm, freq_norm; | ||
| 887 | unsigned long flags; | ||
| 888 | |||
| 889 | pts_norm = pps_normalize_ts(*phase_ts); | ||
| 890 | |||
| 891 | write_seqlock_irqsave(&xtime_lock, flags); | ||
| 892 | |||
| 893 | /* clear the error bits, they will be set again if needed */ | ||
| 894 | time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); | ||
| 895 | |||
| 896 | /* indicate signal presence */ | ||
| 897 | time_status |= STA_PPSSIGNAL; | ||
| 898 | pps_valid = PPS_VALID; | ||
| 899 | |||
| 900 | /* when called for the first time, | ||
| 901 | * just start the frequency interval */ | ||
| 902 | if (unlikely(pps_fbase.tv_sec == 0)) { | ||
| 903 | pps_fbase = *raw_ts; | ||
| 904 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
| 905 | return; | ||
| 906 | } | ||
| 907 | |||
| 908 | /* ok, now we have a base for frequency calculation */ | ||
| 909 | freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); | ||
| 910 | |||
| 911 | /* check that the signal is in the range | ||
| 912 | * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ | ||
| 913 | if ((freq_norm.sec == 0) || | ||
| 914 | (freq_norm.nsec > MAXFREQ * freq_norm.sec) || | ||
| 915 | (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) { | ||
| 916 | time_status |= STA_PPSJITTER; | ||
| 917 | /* restart the frequency calibration interval */ | ||
| 918 | pps_fbase = *raw_ts; | ||
| 919 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
| 920 | pr_err("hardpps: PPSJITTER: bad pulse\n"); | ||
| 921 | return; | ||
| 922 | } | ||
| 923 | |||
| 924 | /* signal is ok */ | ||
| 925 | |||
| 926 | /* check if the current frequency interval is finished */ | ||
| 927 | if (freq_norm.sec >= (1 << pps_shift)) { | ||
| 928 | pps_calcnt++; | ||
| 929 | /* restart the frequency calibration interval */ | ||
| 930 | pps_fbase = *raw_ts; | ||
| 931 | hardpps_update_freq(freq_norm); | ||
| 932 | } | ||
| 933 | |||
| 934 | hardpps_update_phase(pts_norm.nsec); | ||
| 935 | |||
| 936 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
| 937 | } | ||
| 938 | EXPORT_SYMBOL(hardpps); | ||
| 939 | |||
| 940 | #endif /* CONFIG_NTP_PPS */ | ||
| 941 | |||
| 547 | static int __init ntp_tick_adj_setup(char *str) | 942 | static int __init ntp_tick_adj_setup(char *str) |
| 548 | { | 943 | { |
| 549 | ntp_tick_adj = simple_strtol(str, NULL, 0); | 944 | ntp_tick_adj = simple_strtol(str, NULL, 0); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3e216e01bbd1..c55ea2433471 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -642,8 +642,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
| 642 | } | 642 | } |
| 643 | local_irq_enable(); | 643 | local_irq_enable(); |
| 644 | 644 | ||
| 645 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", | 645 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); |
| 646 | smp_processor_id()); | ||
| 647 | } | 646 | } |
| 648 | 647 | ||
| 649 | /* | 648 | /* |
| @@ -795,8 +794,10 @@ void tick_setup_sched_timer(void) | |||
| 795 | } | 794 | } |
| 796 | 795 | ||
| 797 | #ifdef CONFIG_NO_HZ | 796 | #ifdef CONFIG_NO_HZ |
| 798 | if (tick_nohz_enabled) | 797 | if (tick_nohz_enabled) { |
| 799 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 798 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
| 799 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); | ||
| 800 | } | ||
| 800 | #endif | 801 | #endif |
| 801 | } | 802 | } |
| 802 | #endif /* HIGH_RES_TIMERS */ | 803 | #endif /* HIGH_RES_TIMERS */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5bb86da82003..d27c7562902c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -49,7 +49,7 @@ struct timekeeper { | |||
| 49 | u32 mult; | 49 | u32 mult; |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | struct timekeeper timekeeper; | 52 | static struct timekeeper timekeeper; |
| 53 | 53 | ||
| 54 | /** | 54 | /** |
| 55 | * timekeeper_setup_internals - Set up internals to use clocksource clock. | 55 | * timekeeper_setup_internals - Set up internals to use clocksource clock. |
| @@ -164,7 +164,7 @@ static struct timespec total_sleep_time; | |||
| 164 | /* | 164 | /* |
| 165 | * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. | 165 | * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. |
| 166 | */ | 166 | */ |
| 167 | struct timespec raw_time; | 167 | static struct timespec raw_time; |
| 168 | 168 | ||
| 169 | /* flag for if timekeeping is suspended */ | 169 | /* flag for if timekeeping is suspended */ |
| 170 | int __read_mostly timekeeping_suspended; | 170 | int __read_mostly timekeeping_suspended; |
| @@ -288,6 +288,49 @@ void ktime_get_ts(struct timespec *ts) | |||
| 288 | } | 288 | } |
| 289 | EXPORT_SYMBOL_GPL(ktime_get_ts); | 289 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
| 290 | 290 | ||
| 291 | #ifdef CONFIG_NTP_PPS | ||
| 292 | |||
| 293 | /** | ||
| 294 | * getnstime_raw_and_real - get day and raw monotonic time in timespec format | ||
| 295 | * @ts_raw: pointer to the timespec to be set to raw monotonic time | ||
| 296 | * @ts_real: pointer to the timespec to be set to the time of day | ||
| 297 | * | ||
| 298 | * This function reads both the time of day and raw monotonic time at the | ||
| 299 | * same time atomically and stores the resulting timestamps in timespec | ||
| 300 | * format. | ||
| 301 | */ | ||
| 302 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | ||
| 303 | { | ||
| 304 | unsigned long seq; | ||
| 305 | s64 nsecs_raw, nsecs_real; | ||
| 306 | |||
| 307 | WARN_ON_ONCE(timekeeping_suspended); | ||
| 308 | |||
| 309 | do { | ||
| 310 | u32 arch_offset; | ||
| 311 | |||
| 312 | seq = read_seqbegin(&xtime_lock); | ||
| 313 | |||
| 314 | *ts_raw = raw_time; | ||
| 315 | *ts_real = xtime; | ||
| 316 | |||
| 317 | nsecs_raw = timekeeping_get_ns_raw(); | ||
| 318 | nsecs_real = timekeeping_get_ns(); | ||
| 319 | |||
| 320 | /* If arch requires, add in gettimeoffset() */ | ||
| 321 | arch_offset = arch_gettimeoffset(); | ||
| 322 | nsecs_raw += arch_offset; | ||
| 323 | nsecs_real += arch_offset; | ||
| 324 | |||
| 325 | } while (read_seqretry(&xtime_lock, seq)); | ||
| 326 | |||
| 327 | timespec_add_ns(ts_raw, nsecs_raw); | ||
| 328 | timespec_add_ns(ts_real, nsecs_real); | ||
| 329 | } | ||
| 330 | EXPORT_SYMBOL(getnstime_raw_and_real); | ||
| 331 | |||
| 332 | #endif /* CONFIG_NTP_PPS */ | ||
| 333 | |||
| 291 | /** | 334 | /** |
| 292 | * do_gettimeofday - Returns the time of day in a timeval | 335 | * do_gettimeofday - Returns the time of day in a timeval |
| 293 | * @tv: pointer to the timeval to be set | 336 | * @tv: pointer to the timeval to be set |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 53f338190b26..761c510a06c5 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o | |||
| 52 | endif | 52 | endif |
| 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
| 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
| 55 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | 55 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
| 56 | ifeq ($(CONFIG_TRACING),y) | 56 | ifeq ($(CONFIG_TRACING),y) |
| 57 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | 57 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o |
| 58 | endif | 58 | endif |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7b8ec0281548..153562d0b93c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -758,53 +758,58 @@ static void blk_add_trace_rq_complete(void *ignore, | |||
| 758 | * @q: queue the io is for | 758 | * @q: queue the io is for |
| 759 | * @bio: the source bio | 759 | * @bio: the source bio |
| 760 | * @what: the action | 760 | * @what: the action |
| 761 | * @error: error, if any | ||
| 761 | * | 762 | * |
| 762 | * Description: | 763 | * Description: |
| 763 | * Records an action against a bio. Will log the bio offset + size. | 764 | * Records an action against a bio. Will log the bio offset + size. |
| 764 | * | 765 | * |
| 765 | **/ | 766 | **/ |
| 766 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | 767 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, |
| 767 | u32 what) | 768 | u32 what, int error) |
| 768 | { | 769 | { |
| 769 | struct blk_trace *bt = q->blk_trace; | 770 | struct blk_trace *bt = q->blk_trace; |
| 770 | 771 | ||
| 771 | if (likely(!bt)) | 772 | if (likely(!bt)) |
| 772 | return; | 773 | return; |
| 773 | 774 | ||
| 775 | if (!error && !bio_flagged(bio, BIO_UPTODATE)) | ||
| 776 | error = EIO; | ||
| 777 | |||
| 774 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, | 778 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, |
| 775 | !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | 779 | error, 0, NULL); |
| 776 | } | 780 | } |
| 777 | 781 | ||
| 778 | static void blk_add_trace_bio_bounce(void *ignore, | 782 | static void blk_add_trace_bio_bounce(void *ignore, |
| 779 | struct request_queue *q, struct bio *bio) | 783 | struct request_queue *q, struct bio *bio) |
| 780 | { | 784 | { |
| 781 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); | 785 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); |
| 782 | } | 786 | } |
| 783 | 787 | ||
| 784 | static void blk_add_trace_bio_complete(void *ignore, | 788 | static void blk_add_trace_bio_complete(void *ignore, |
| 785 | struct request_queue *q, struct bio *bio) | 789 | struct request_queue *q, struct bio *bio, |
| 790 | int error) | ||
| 786 | { | 791 | { |
| 787 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); | 792 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); |
| 788 | } | 793 | } |
| 789 | 794 | ||
| 790 | static void blk_add_trace_bio_backmerge(void *ignore, | 795 | static void blk_add_trace_bio_backmerge(void *ignore, |
| 791 | struct request_queue *q, | 796 | struct request_queue *q, |
| 792 | struct bio *bio) | 797 | struct bio *bio) |
| 793 | { | 798 | { |
| 794 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | 799 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); |
| 795 | } | 800 | } |
| 796 | 801 | ||
| 797 | static void blk_add_trace_bio_frontmerge(void *ignore, | 802 | static void blk_add_trace_bio_frontmerge(void *ignore, |
| 798 | struct request_queue *q, | 803 | struct request_queue *q, |
| 799 | struct bio *bio) | 804 | struct bio *bio) |
| 800 | { | 805 | { |
| 801 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | 806 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); |
| 802 | } | 807 | } |
| 803 | 808 | ||
| 804 | static void blk_add_trace_bio_queue(void *ignore, | 809 | static void blk_add_trace_bio_queue(void *ignore, |
| 805 | struct request_queue *q, struct bio *bio) | 810 | struct request_queue *q, struct bio *bio) |
| 806 | { | 811 | { |
| 807 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | 812 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); |
| 808 | } | 813 | } |
| 809 | 814 | ||
| 810 | static void blk_add_trace_getrq(void *ignore, | 815 | static void blk_add_trace_getrq(void *ignore, |
| @@ -812,7 +817,7 @@ static void blk_add_trace_getrq(void *ignore, | |||
| 812 | struct bio *bio, int rw) | 817 | struct bio *bio, int rw) |
| 813 | { | 818 | { |
| 814 | if (bio) | 819 | if (bio) |
| 815 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); | 820 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); |
| 816 | else { | 821 | else { |
| 817 | struct blk_trace *bt = q->blk_trace; | 822 | struct blk_trace *bt = q->blk_trace; |
| 818 | 823 | ||
| @@ -827,7 +832,7 @@ static void blk_add_trace_sleeprq(void *ignore, | |||
| 827 | struct bio *bio, int rw) | 832 | struct bio *bio, int rw) |
| 828 | { | 833 | { |
| 829 | if (bio) | 834 | if (bio) |
| 830 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); | 835 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); |
| 831 | else { | 836 | else { |
| 832 | struct blk_trace *bt = q->blk_trace; | 837 | struct blk_trace *bt = q->blk_trace; |
| 833 | 838 | ||
| @@ -887,7 +892,7 @@ static void blk_add_trace_split(void *ignore, | |||
| 887 | } | 892 | } |
| 888 | 893 | ||
| 889 | /** | 894 | /** |
| 890 | * blk_add_trace_remap - Add a trace for a remap operation | 895 | * blk_add_trace_bio_remap - Add a trace for a bio-remap operation |
| 891 | * @ignore: trace callback data parameter (not used) | 896 | * @ignore: trace callback data parameter (not used) |
| 892 | * @q: queue the io is for | 897 | * @q: queue the io is for |
| 893 | * @bio: the source bio | 898 | * @bio: the source bio |
| @@ -899,9 +904,9 @@ static void blk_add_trace_split(void *ignore, | |||
| 899 | * it spans a stripe (or similar). Add a trace for that action. | 904 | * it spans a stripe (or similar). Add a trace for that action. |
| 900 | * | 905 | * |
| 901 | **/ | 906 | **/ |
| 902 | static void blk_add_trace_remap(void *ignore, | 907 | static void blk_add_trace_bio_remap(void *ignore, |
| 903 | struct request_queue *q, struct bio *bio, | 908 | struct request_queue *q, struct bio *bio, |
| 904 | dev_t dev, sector_t from) | 909 | dev_t dev, sector_t from) |
| 905 | { | 910 | { |
| 906 | struct blk_trace *bt = q->blk_trace; | 911 | struct blk_trace *bt = q->blk_trace; |
| 907 | struct blk_io_trace_remap r; | 912 | struct blk_io_trace_remap r; |
| @@ -1016,7 +1021,7 @@ static void blk_register_tracepoints(void) | |||
| 1016 | WARN_ON(ret); | 1021 | WARN_ON(ret); |
| 1017 | ret = register_trace_block_split(blk_add_trace_split, NULL); | 1022 | ret = register_trace_block_split(blk_add_trace_split, NULL); |
| 1018 | WARN_ON(ret); | 1023 | WARN_ON(ret); |
| 1019 | ret = register_trace_block_remap(blk_add_trace_remap, NULL); | 1024 | ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
| 1020 | WARN_ON(ret); | 1025 | WARN_ON(ret); |
| 1021 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); | 1026 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
| 1022 | WARN_ON(ret); | 1027 | WARN_ON(ret); |
| @@ -1025,7 +1030,7 @@ static void blk_register_tracepoints(void) | |||
| 1025 | static void blk_unregister_tracepoints(void) | 1030 | static void blk_unregister_tracepoints(void) |
| 1026 | { | 1031 | { |
| 1027 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); | 1032 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
| 1028 | unregister_trace_block_remap(blk_add_trace_remap, NULL); | 1033 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
| 1029 | unregister_trace_block_split(blk_add_trace_split, NULL); | 1034 | unregister_trace_block_split(blk_add_trace_split, NULL); |
| 1030 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | 1035 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); |
| 1031 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | 1036 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f8cf959bad45..dc53ecb80589 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1313,12 +1313,10 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1313 | 1313 | ||
| 1314 | __this_cpu_inc(user_stack_count); | 1314 | __this_cpu_inc(user_stack_count); |
| 1315 | 1315 | ||
| 1316 | |||
| 1317 | |||
| 1318 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1316 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1319 | sizeof(*entry), flags, pc); | 1317 | sizeof(*entry), flags, pc); |
| 1320 | if (!event) | 1318 | if (!event) |
| 1321 | return; | 1319 | goto out_drop_count; |
| 1322 | entry = ring_buffer_event_data(event); | 1320 | entry = ring_buffer_event_data(event); |
| 1323 | 1321 | ||
| 1324 | entry->tgid = current->tgid; | 1322 | entry->tgid = current->tgid; |
| @@ -1333,8 +1331,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1333 | if (!filter_check_discard(call, entry, buffer, event)) | 1331 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1334 | ring_buffer_unlock_commit(buffer, event); | 1332 | ring_buffer_unlock_commit(buffer, event); |
| 1335 | 1333 | ||
| 1334 | out_drop_count: | ||
| 1336 | __this_cpu_dec(user_stack_count); | 1335 | __this_cpu_dec(user_stack_count); |
| 1337 | |||
| 1338 | out: | 1336 | out: |
| 1339 | preempt_enable(); | 1337 | preempt_enable(); |
| 1340 | } | 1338 | } |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e3dfecaf13e6..6cf223764be8 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | */ | 53 | */ |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * Function trace entry - function address and parent function addres: | 56 | * Function trace entry - function address and parent function address: |
| 57 | */ | 57 | */ |
| 58 | FTRACE_ENTRY(function, ftrace_entry, | 58 | FTRACE_ENTRY(function, ftrace_entry, |
| 59 | 59 | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 5cf8c602b880..92b6e1e12d98 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -453,14 +453,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) | |||
| 453 | * Stubs: | 453 | * Stubs: |
| 454 | */ | 454 | */ |
| 455 | 455 | ||
| 456 | void early_boot_irqs_off(void) | ||
| 457 | { | ||
| 458 | } | ||
| 459 | |||
| 460 | void early_boot_irqs_on(void) | ||
| 461 | { | ||
| 462 | } | ||
| 463 | |||
| 464 | void trace_softirqs_on(unsigned long ip) | 456 | void trace_softirqs_on(unsigned long ip) |
| 465 | { | 457 | { |
| 466 | } | 458 | } |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index bac752f0cfb5..b706529b4fc7 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -23,9 +23,6 @@ static int syscall_exit_register(struct ftrace_event_call *event, | |||
| 23 | static int syscall_enter_define_fields(struct ftrace_event_call *call); | 23 | static int syscall_enter_define_fields(struct ftrace_event_call *call); |
| 24 | static int syscall_exit_define_fields(struct ftrace_event_call *call); | 24 | static int syscall_exit_define_fields(struct ftrace_event_call *call); |
| 25 | 25 | ||
| 26 | /* All syscall exit events have the same fields */ | ||
| 27 | static LIST_HEAD(syscall_exit_fields); | ||
| 28 | |||
| 29 | static struct list_head * | 26 | static struct list_head * |
| 30 | syscall_get_enter_fields(struct ftrace_event_call *call) | 27 | syscall_get_enter_fields(struct ftrace_event_call *call) |
| 31 | { | 28 | { |
| @@ -34,34 +31,28 @@ syscall_get_enter_fields(struct ftrace_event_call *call) | |||
| 34 | return &entry->enter_fields; | 31 | return &entry->enter_fields; |
| 35 | } | 32 | } |
| 36 | 33 | ||
| 37 | static struct list_head * | ||
| 38 | syscall_get_exit_fields(struct ftrace_event_call *call) | ||
| 39 | { | ||
| 40 | return &syscall_exit_fields; | ||
| 41 | } | ||
| 42 | |||
| 43 | struct trace_event_functions enter_syscall_print_funcs = { | 34 | struct trace_event_functions enter_syscall_print_funcs = { |
| 44 | .trace = print_syscall_enter, | 35 | .trace = print_syscall_enter, |
| 45 | }; | 36 | }; |
| 46 | 37 | ||
| 47 | struct trace_event_functions exit_syscall_print_funcs = { | 38 | struct trace_event_functions exit_syscall_print_funcs = { |
| 48 | .trace = print_syscall_exit, | 39 | .trace = print_syscall_exit, |
| 49 | }; | 40 | }; |
| 50 | 41 | ||
| 51 | struct ftrace_event_class event_class_syscall_enter = { | 42 | struct ftrace_event_class event_class_syscall_enter = { |
| 52 | .system = "syscalls", | 43 | .system = "syscalls", |
| 53 | .reg = syscall_enter_register, | 44 | .reg = syscall_enter_register, |
| 54 | .define_fields = syscall_enter_define_fields, | 45 | .define_fields = syscall_enter_define_fields, |
| 55 | .get_fields = syscall_get_enter_fields, | 46 | .get_fields = syscall_get_enter_fields, |
| 56 | .raw_init = init_syscall_trace, | 47 | .raw_init = init_syscall_trace, |
| 57 | }; | 48 | }; |
| 58 | 49 | ||
| 59 | struct ftrace_event_class event_class_syscall_exit = { | 50 | struct ftrace_event_class event_class_syscall_exit = { |
| 60 | .system = "syscalls", | 51 | .system = "syscalls", |
| 61 | .reg = syscall_exit_register, | 52 | .reg = syscall_exit_register, |
| 62 | .define_fields = syscall_exit_define_fields, | 53 | .define_fields = syscall_exit_define_fields, |
| 63 | .get_fields = syscall_get_exit_fields, | 54 | .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), |
| 64 | .raw_init = init_syscall_trace, | 55 | .raw_init = init_syscall_trace, |
| 65 | }; | 56 | }; |
| 66 | 57 | ||
| 67 | extern unsigned long __start_syscalls_metadata[]; | 58 | extern unsigned long __start_syscalls_metadata[]; |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 25915832291a..9da289c34f22 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | #include <linux/highuid.h> | 12 | #include <linux/highuid.h> |
| 13 | #include <linux/cred.h> | 13 | #include <linux/cred.h> |
| 14 | 14 | ||
| 15 | static struct kmem_cache *user_ns_cachep __read_mostly; | ||
| 16 | |||
| 15 | /* | 17 | /* |
| 16 | * Create a new user namespace, deriving the creator from the user in the | 18 | * Create a new user namespace, deriving the creator from the user in the |
| 17 | * passed credentials, and replacing that user with the new root user for the | 19 | * passed credentials, and replacing that user with the new root user for the |
| @@ -26,7 +28,7 @@ int create_user_ns(struct cred *new) | |||
| 26 | struct user_struct *root_user; | 28 | struct user_struct *root_user; |
| 27 | int n; | 29 | int n; |
| 28 | 30 | ||
| 29 | ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); | 31 | ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL); |
| 30 | if (!ns) | 32 | if (!ns) |
| 31 | return -ENOMEM; | 33 | return -ENOMEM; |
| 32 | 34 | ||
| @@ -38,7 +40,7 @@ int create_user_ns(struct cred *new) | |||
| 38 | /* Alloc new root user. */ | 40 | /* Alloc new root user. */ |
| 39 | root_user = alloc_uid(ns, 0); | 41 | root_user = alloc_uid(ns, 0); |
| 40 | if (!root_user) { | 42 | if (!root_user) { |
| 41 | kfree(ns); | 43 | kmem_cache_free(user_ns_cachep, ns); |
| 42 | return -ENOMEM; | 44 | return -ENOMEM; |
| 43 | } | 45 | } |
| 44 | 46 | ||
| @@ -71,7 +73,7 @@ static void free_user_ns_work(struct work_struct *work) | |||
| 71 | struct user_namespace *ns = | 73 | struct user_namespace *ns = |
| 72 | container_of(work, struct user_namespace, destroyer); | 74 | container_of(work, struct user_namespace, destroyer); |
| 73 | free_uid(ns->creator); | 75 | free_uid(ns->creator); |
| 74 | kfree(ns); | 76 | kmem_cache_free(user_ns_cachep, ns); |
| 75 | } | 77 | } |
| 76 | 78 | ||
| 77 | void free_user_ns(struct kref *kref) | 79 | void free_user_ns(struct kref *kref) |
| @@ -126,3 +128,10 @@ gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t | |||
| 126 | /* No useful relationship so no mapping */ | 128 | /* No useful relationship so no mapping */ |
| 127 | return overflowgid; | 129 | return overflowgid; |
| 128 | } | 130 | } |
| 131 | |||
| 132 | static __init int user_namespaces_init(void) | ||
| 133 | { | ||
| 134 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); | ||
| 135 | return 0; | ||
| 136 | } | ||
| 137 | module_init(user_namespaces_init); | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8ee6ec82f88a..11869faa6819 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
| 768 | 768 | ||
| 769 | worker->flags &= ~flags; | 769 | worker->flags &= ~flags; |
| 770 | 770 | ||
| 771 | /* if transitioning out of NOT_RUNNING, increment nr_running */ | 771 | /* |
| 772 | * If transitioning out of NOT_RUNNING, increment nr_running. Note | ||
| 773 | * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask | ||
| 774 | * of multiple flags, not a single flag. | ||
| 775 | */ | ||
| 772 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) | 776 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) |
| 773 | if (!(worker->flags & WORKER_NOT_RUNNING)) | 777 | if (!(worker->flags & WORKER_NOT_RUNNING)) |
| 774 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); | 778 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); |
| @@ -1840,7 +1844,7 @@ __acquires(&gcwq->lock) | |||
| 1840 | spin_unlock_irq(&gcwq->lock); | 1844 | spin_unlock_irq(&gcwq->lock); |
| 1841 | 1845 | ||
| 1842 | work_clear_pending(work); | 1846 | work_clear_pending(work); |
| 1843 | lock_map_acquire(&cwq->wq->lockdep_map); | 1847 | lock_map_acquire_read(&cwq->wq->lockdep_map); |
| 1844 | lock_map_acquire(&lockdep_map); | 1848 | lock_map_acquire(&lockdep_map); |
| 1845 | trace_workqueue_execute_start(work); | 1849 | trace_workqueue_execute_start(work); |
| 1846 | f(work); | 1850 | f(work); |
| @@ -2384,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | |||
| 2384 | insert_wq_barrier(cwq, barr, work, worker); | 2388 | insert_wq_barrier(cwq, barr, work, worker); |
| 2385 | spin_unlock_irq(&gcwq->lock); | 2389 | spin_unlock_irq(&gcwq->lock); |
| 2386 | 2390 | ||
| 2387 | lock_map_acquire(&cwq->wq->lockdep_map); | 2391 | /* |
| 2392 | * If @max_active is 1 or rescuer is in use, flushing another work | ||
| 2393 | * item on the same workqueue may lead to deadlock. Make sure the | ||
| 2394 | * flusher is not running on the same workqueue by verifying write | ||
| 2395 | * access. | ||
| 2396 | */ | ||
| 2397 | if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) | ||
| 2398 | lock_map_acquire(&cwq->wq->lockdep_map); | ||
| 2399 | else | ||
| 2400 | lock_map_acquire_read(&cwq->wq->lockdep_map); | ||
| 2388 | lock_map_release(&cwq->wq->lockdep_map); | 2401 | lock_map_release(&cwq->wq->lockdep_map); |
| 2402 | |||
| 2389 | return true; | 2403 | return true; |
| 2390 | already_gone: | 2404 | already_gone: |
| 2391 | spin_unlock_irq(&gcwq->lock); | 2405 | spin_unlock_irq(&gcwq->lock); |
