diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 32 | ||||
-rw-r--r-- | kernel/auditsc.c | 24 | ||||
-rw-r--r-- | kernel/cgroup.c | 10 | ||||
-rw-r--r-- | kernel/cpu.c | 2 | ||||
-rw-r--r-- | kernel/cpuset.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 16 | ||||
-rw-r--r-- | kernel/hrtimer.c | 331 | ||||
-rw-r--r-- | kernel/irq/internals.h | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 68 | ||||
-rw-r--r-- | kernel/irq/migration.c | 11 | ||||
-rw-r--r-- | kernel/irq/proc.c | 2 | ||||
-rw-r--r-- | kernel/latencytop.c | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 4 | ||||
-rw-r--r-- | kernel/panic.c | 1 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 2 | ||||
-rw-r--r-- | kernel/posix-timers.c | 46 | ||||
-rw-r--r-- | kernel/power/swap.c | 2 | ||||
-rw-r--r-- | kernel/profile.c | 4 | ||||
-rw-r--r-- | kernel/ptrace.c | 4 | ||||
-rw-r--r-- | kernel/relay.c | 7 | ||||
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/sched_clock.c | 6 | ||||
-rw-r--r-- | kernel/softlockup.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 10 | ||||
-rw-r--r-- | kernel/time/ntp.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 44 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 22 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 24 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 1 |
31 files changed, 304 insertions, 408 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 4414e93d8750..ce6d8ea3131e 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -61,8 +61,11 @@ | |||
61 | 61 | ||
62 | #include "audit.h" | 62 | #include "audit.h" |
63 | 63 | ||
64 | /* No auditing will take place until audit_initialized != 0. | 64 | /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED. |
65 | * (Initialization happens after skb_init is called.) */ | 65 | * (Initialization happens after skb_init is called.) */ |
66 | #define AUDIT_DISABLED -1 | ||
67 | #define AUDIT_UNINITIALIZED 0 | ||
68 | #define AUDIT_INITIALIZED 1 | ||
66 | static int audit_initialized; | 69 | static int audit_initialized; |
67 | 70 | ||
68 | #define AUDIT_OFF 0 | 71 | #define AUDIT_OFF 0 |
@@ -965,6 +968,9 @@ static int __init audit_init(void) | |||
965 | { | 968 | { |
966 | int i; | 969 | int i; |
967 | 970 | ||
971 | if (audit_initialized == AUDIT_DISABLED) | ||
972 | return 0; | ||
973 | |||
968 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", | 974 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", |
969 | audit_default ? "enabled" : "disabled"); | 975 | audit_default ? "enabled" : "disabled"); |
970 | audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, | 976 | audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, |
@@ -976,7 +982,7 @@ static int __init audit_init(void) | |||
976 | 982 | ||
977 | skb_queue_head_init(&audit_skb_queue); | 983 | skb_queue_head_init(&audit_skb_queue); |
978 | skb_queue_head_init(&audit_skb_hold_queue); | 984 | skb_queue_head_init(&audit_skb_hold_queue); |
979 | audit_initialized = 1; | 985 | audit_initialized = AUDIT_INITIALIZED; |
980 | audit_enabled = audit_default; | 986 | audit_enabled = audit_default; |
981 | audit_ever_enabled |= !!audit_default; | 987 | audit_ever_enabled |= !!audit_default; |
982 | 988 | ||
@@ -999,13 +1005,21 @@ __initcall(audit_init); | |||
999 | static int __init audit_enable(char *str) | 1005 | static int __init audit_enable(char *str) |
1000 | { | 1006 | { |
1001 | audit_default = !!simple_strtol(str, NULL, 0); | 1007 | audit_default = !!simple_strtol(str, NULL, 0); |
1002 | printk(KERN_INFO "audit: %s%s\n", | 1008 | if (!audit_default) |
1003 | audit_default ? "enabled" : "disabled", | 1009 | audit_initialized = AUDIT_DISABLED; |
1004 | audit_initialized ? "" : " (after initialization)"); | 1010 | |
1005 | if (audit_initialized) { | 1011 | printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled"); |
1012 | |||
1013 | if (audit_initialized == AUDIT_INITIALIZED) { | ||
1006 | audit_enabled = audit_default; | 1014 | audit_enabled = audit_default; |
1007 | audit_ever_enabled |= !!audit_default; | 1015 | audit_ever_enabled |= !!audit_default; |
1016 | } else if (audit_initialized == AUDIT_UNINITIALIZED) { | ||
1017 | printk(" (after initialization)"); | ||
1018 | } else { | ||
1019 | printk(" (until reboot)"); | ||
1008 | } | 1020 | } |
1021 | printk("\n"); | ||
1022 | |||
1009 | return 1; | 1023 | return 1; |
1010 | } | 1024 | } |
1011 | 1025 | ||
@@ -1107,9 +1121,7 @@ unsigned int audit_serial(void) | |||
1107 | static inline void audit_get_stamp(struct audit_context *ctx, | 1121 | static inline void audit_get_stamp(struct audit_context *ctx, |
1108 | struct timespec *t, unsigned int *serial) | 1122 | struct timespec *t, unsigned int *serial) |
1109 | { | 1123 | { |
1110 | if (ctx) | 1124 | if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { |
1111 | auditsc_get_stamp(ctx, t, serial); | ||
1112 | else { | ||
1113 | *t = CURRENT_TIME; | 1125 | *t = CURRENT_TIME; |
1114 | *serial = audit_serial(); | 1126 | *serial = audit_serial(); |
1115 | } | 1127 | } |
@@ -1146,7 +1158,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
1146 | int reserve; | 1158 | int reserve; |
1147 | unsigned long timeout_start = jiffies; | 1159 | unsigned long timeout_start = jiffies; |
1148 | 1160 | ||
1149 | if (!audit_initialized) | 1161 | if (audit_initialized != AUDIT_INITIALIZED) |
1150 | return NULL; | 1162 | return NULL; |
1151 | 1163 | ||
1152 | if (unlikely(audit_filter_type(type))) | 1164 | if (unlikely(audit_filter_type(type))) |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index cf5bc2f5f9c3..2a3f0afc4d2a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1459,7 +1459,6 @@ void audit_free(struct task_struct *tsk) | |||
1459 | 1459 | ||
1460 | /** | 1460 | /** |
1461 | * audit_syscall_entry - fill in an audit record at syscall entry | 1461 | * audit_syscall_entry - fill in an audit record at syscall entry |
1462 | * @tsk: task being audited | ||
1463 | * @arch: architecture type | 1462 | * @arch: architecture type |
1464 | * @major: major syscall type (function) | 1463 | * @major: major syscall type (function) |
1465 | * @a1: additional syscall register 1 | 1464 | * @a1: additional syscall register 1 |
@@ -1548,9 +1547,25 @@ void audit_syscall_entry(int arch, int major, | |||
1548 | context->ppid = 0; | 1547 | context->ppid = 0; |
1549 | } | 1548 | } |
1550 | 1549 | ||
1550 | void audit_finish_fork(struct task_struct *child) | ||
1551 | { | ||
1552 | struct audit_context *ctx = current->audit_context; | ||
1553 | struct audit_context *p = child->audit_context; | ||
1554 | if (!p || !ctx || !ctx->auditable) | ||
1555 | return; | ||
1556 | p->arch = ctx->arch; | ||
1557 | p->major = ctx->major; | ||
1558 | memcpy(p->argv, ctx->argv, sizeof(ctx->argv)); | ||
1559 | p->ctime = ctx->ctime; | ||
1560 | p->dummy = ctx->dummy; | ||
1561 | p->auditable = ctx->auditable; | ||
1562 | p->in_syscall = ctx->in_syscall; | ||
1563 | p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL); | ||
1564 | p->ppid = current->pid; | ||
1565 | } | ||
1566 | |||
1551 | /** | 1567 | /** |
1552 | * audit_syscall_exit - deallocate audit context after a system call | 1568 | * audit_syscall_exit - deallocate audit context after a system call |
1553 | * @tsk: task being audited | ||
1554 | * @valid: success/failure flag | 1569 | * @valid: success/failure flag |
1555 | * @return_code: syscall return value | 1570 | * @return_code: syscall return value |
1556 | * | 1571 | * |
@@ -1942,15 +1957,18 @@ EXPORT_SYMBOL_GPL(__audit_inode_child); | |||
1942 | * | 1957 | * |
1943 | * Also sets the context as auditable. | 1958 | * Also sets the context as auditable. |
1944 | */ | 1959 | */ |
1945 | void auditsc_get_stamp(struct audit_context *ctx, | 1960 | int auditsc_get_stamp(struct audit_context *ctx, |
1946 | struct timespec *t, unsigned int *serial) | 1961 | struct timespec *t, unsigned int *serial) |
1947 | { | 1962 | { |
1963 | if (!ctx->in_syscall) | ||
1964 | return 0; | ||
1948 | if (!ctx->serial) | 1965 | if (!ctx->serial) |
1949 | ctx->serial = audit_serial(); | 1966 | ctx->serial = audit_serial(); |
1950 | t->tv_sec = ctx->ctime.tv_sec; | 1967 | t->tv_sec = ctx->ctime.tv_sec; |
1951 | t->tv_nsec = ctx->ctime.tv_nsec; | 1968 | t->tv_nsec = ctx->ctime.tv_nsec; |
1952 | *serial = ctx->serial; | 1969 | *serial = ctx->serial; |
1953 | ctx->auditable = 1; | 1970 | ctx->auditable = 1; |
1971 | return 1; | ||
1954 | } | 1972 | } |
1955 | 1973 | ||
1956 | /* global counter which is incremented every time something logs in */ | 1974 | /* global counter which is incremented every time something logs in */ |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fe00b3b983a8..2606d0fb4e54 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -702,7 +702,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, | |||
702 | * any child cgroups exist. This is theoretically supportable | 702 | * any child cgroups exist. This is theoretically supportable |
703 | * but involves complex error handling, so it's being left until | 703 | * but involves complex error handling, so it's being left until |
704 | * later */ | 704 | * later */ |
705 | if (!list_empty(&cgrp->children)) | 705 | if (root->number_of_cgroups > 1) |
706 | return -EBUSY; | 706 | return -EBUSY; |
707 | 707 | ||
708 | /* Process each subsystem */ | 708 | /* Process each subsystem */ |
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1024 | if (ret == -EBUSY) { | 1024 | if (ret == -EBUSY) { |
1025 | mutex_unlock(&cgroup_mutex); | 1025 | mutex_unlock(&cgroup_mutex); |
1026 | mutex_unlock(&inode->i_mutex); | 1026 | mutex_unlock(&inode->i_mutex); |
1027 | goto drop_new_super; | 1027 | goto free_cg_links; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | /* EBUSY should be the only error here */ | 1030 | /* EBUSY should be the only error here */ |
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1073 | 1073 | ||
1074 | return simple_set_mnt(mnt, sb); | 1074 | return simple_set_mnt(mnt, sb); |
1075 | 1075 | ||
1076 | free_cg_links: | ||
1077 | free_cg_links(&tmp_cg_links); | ||
1076 | drop_new_super: | 1078 | drop_new_super: |
1077 | up_write(&sb->s_umount); | 1079 | up_write(&sb->s_umount); |
1078 | deactivate_super(sb); | 1080 | deactivate_super(sb); |
1079 | free_cg_links(&tmp_cg_links); | ||
1080 | return ret; | 1081 | return ret; |
1081 | } | 1082 | } |
1082 | 1083 | ||
@@ -2934,9 +2935,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
2934 | again: | 2935 | again: |
2935 | root = subsys->root; | 2936 | root = subsys->root; |
2936 | if (root == &rootnode) { | 2937 | if (root == &rootnode) { |
2937 | printk(KERN_INFO | ||
2938 | "Not cloning cgroup for unused subsystem %s\n", | ||
2939 | subsys->name); | ||
2940 | mutex_unlock(&cgroup_mutex); | 2938 | mutex_unlock(&cgroup_mutex); |
2941 | return 0; | 2939 | return 0; |
2942 | } | 2940 | } |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 5a732c5ef08b..8ea32e8d68b0 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -462,7 +462,7 @@ out: | |||
462 | * It must be called by the arch code on the new cpu, before the new cpu | 462 | * It must be called by the arch code on the new cpu, before the new cpu |
463 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | 463 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
464 | */ | 464 | */ |
465 | void notify_cpu_starting(unsigned int cpu) | 465 | void __cpuinit notify_cpu_starting(unsigned int cpu) |
466 | { | 466 | { |
467 | unsigned long val = CPU_STARTING; | 467 | unsigned long val = CPU_STARTING; |
468 | 468 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index da7ff6137f37..96c0ba13b8cd 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -585,7 +585,7 @@ static int generate_sched_domains(cpumask_t **domains, | |||
585 | int i, j, k; /* indices for partition finding loops */ | 585 | int i, j, k; /* indices for partition finding loops */ |
586 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ | 586 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ |
587 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 587 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
588 | int ndoms; /* number of sched domains in result */ | 588 | int ndoms = 0; /* number of sched domains in result */ |
589 | int nslot; /* next empty doms[] cpumask_t slot */ | 589 | int nslot; /* next empty doms[] cpumask_t slot */ |
590 | 590 | ||
591 | doms = NULL; | 591 | doms = NULL; |
diff --git a/kernel/fork.c b/kernel/fork.c index 2a372a0e206f..495da2e9a8b4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -315,17 +315,20 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
315 | file = tmp->vm_file; | 315 | file = tmp->vm_file; |
316 | if (file) { | 316 | if (file) { |
317 | struct inode *inode = file->f_path.dentry->d_inode; | 317 | struct inode *inode = file->f_path.dentry->d_inode; |
318 | struct address_space *mapping = file->f_mapping; | ||
319 | |||
318 | get_file(file); | 320 | get_file(file); |
319 | if (tmp->vm_flags & VM_DENYWRITE) | 321 | if (tmp->vm_flags & VM_DENYWRITE) |
320 | atomic_dec(&inode->i_writecount); | 322 | atomic_dec(&inode->i_writecount); |
321 | 323 | spin_lock(&mapping->i_mmap_lock); | |
322 | /* insert tmp into the share list, just after mpnt */ | 324 | if (tmp->vm_flags & VM_SHARED) |
323 | spin_lock(&file->f_mapping->i_mmap_lock); | 325 | mapping->i_mmap_writable++; |
324 | tmp->vm_truncate_count = mpnt->vm_truncate_count; | 326 | tmp->vm_truncate_count = mpnt->vm_truncate_count; |
325 | flush_dcache_mmap_lock(file->f_mapping); | 327 | flush_dcache_mmap_lock(mapping); |
328 | /* insert tmp into the share list, just after mpnt */ | ||
326 | vma_prio_tree_add(tmp, mpnt); | 329 | vma_prio_tree_add(tmp, mpnt); |
327 | flush_dcache_mmap_unlock(file->f_mapping); | 330 | flush_dcache_mmap_unlock(mapping); |
328 | spin_unlock(&file->f_mapping->i_mmap_lock); | 331 | spin_unlock(&mapping->i_mmap_lock); |
329 | } | 332 | } |
330 | 333 | ||
331 | /* | 334 | /* |
@@ -1398,6 +1401,7 @@ long do_fork(unsigned long clone_flags, | |||
1398 | init_completion(&vfork); | 1401 | init_completion(&vfork); |
1399 | } | 1402 | } |
1400 | 1403 | ||
1404 | audit_finish_fork(p); | ||
1401 | tracehook_report_clone(trace, regs, clone_flags, nr, p); | 1405 | tracehook_report_clone(trace, regs, clone_flags, nr, p); |
1402 | 1406 | ||
1403 | /* | 1407 | /* |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 47e63349d1b2..bda9cb924276 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |||
442 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | 442 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
443 | #endif | 443 | #endif |
444 | 444 | ||
445 | /* | ||
446 | * Check, whether the timer is on the callback pending list | ||
447 | */ | ||
448 | static inline int hrtimer_cb_pending(const struct hrtimer *timer) | ||
449 | { | ||
450 | return timer->state & HRTIMER_STATE_PENDING; | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * Remove a timer from the callback pending list | ||
455 | */ | ||
456 | static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) | ||
457 | { | ||
458 | list_del_init(&timer->cb_entry); | ||
459 | } | ||
460 | |||
461 | /* High resolution timer related functions */ | 445 | /* High resolution timer related functions */ |
462 | #ifdef CONFIG_HIGH_RES_TIMERS | 446 | #ifdef CONFIG_HIGH_RES_TIMERS |
463 | 447 | ||
@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |||
651 | { | 635 | { |
652 | } | 636 | } |
653 | 637 | ||
638 | static void __run_hrtimer(struct hrtimer *timer); | ||
639 | |||
654 | /* | 640 | /* |
655 | * When High resolution timers are active, try to reprogram. Note, that in case | 641 | * When High resolution timers are active, try to reprogram. Note, that in case |
656 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | 642 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry |
@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
661 | struct hrtimer_clock_base *base) | 647 | struct hrtimer_clock_base *base) |
662 | { | 648 | { |
663 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 649 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
664 | 650 | /* | |
665 | /* Timer is expired, act upon the callback mode */ | 651 | * XXX: recursion check? |
666 | switch(timer->cb_mode) { | 652 | * hrtimer_forward() should round up with timer granularity |
667 | case HRTIMER_CB_IRQSAFE_PERCPU: | 653 | * so that we never get into inf recursion here, |
668 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | 654 | * it doesn't do that though |
669 | /* | 655 | */ |
670 | * This is solely for the sched tick emulation with | 656 | __run_hrtimer(timer); |
671 | * dynamic tick support to ensure that we do not | 657 | return 1; |
672 | * restart the tick right on the edge and end up with | ||
673 | * the tick timer in the softirq ! The calling site | ||
674 | * takes care of this. Also used for hrtimer sleeper ! | ||
675 | */ | ||
676 | debug_hrtimer_deactivate(timer); | ||
677 | return 1; | ||
678 | case HRTIMER_CB_SOFTIRQ: | ||
679 | /* | ||
680 | * Move everything else into the softirq pending list ! | ||
681 | */ | ||
682 | list_add_tail(&timer->cb_entry, | ||
683 | &base->cpu_base->cb_pending); | ||
684 | timer->state = HRTIMER_STATE_PENDING; | ||
685 | return 1; | ||
686 | default: | ||
687 | BUG(); | ||
688 | } | ||
689 | } | 658 | } |
690 | return 0; | 659 | return 0; |
691 | } | 660 | } |
@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void) | |||
724 | return 1; | 693 | return 1; |
725 | } | 694 | } |
726 | 695 | ||
727 | static inline void hrtimer_raise_softirq(void) | ||
728 | { | ||
729 | raise_softirq(HRTIMER_SOFTIRQ); | ||
730 | } | ||
731 | |||
732 | #else | 696 | #else |
733 | 697 | ||
734 | static inline int hrtimer_hres_active(void) { return 0; } | 698 | static inline int hrtimer_hres_active(void) { return 0; } |
@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer, | |||
747 | { | 711 | { |
748 | return 0; | 712 | return 0; |
749 | } | 713 | } |
750 | static inline void hrtimer_raise_softirq(void) { } | ||
751 | 714 | ||
752 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 715 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
753 | 716 | ||
@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
890 | struct hrtimer_clock_base *base, | 853 | struct hrtimer_clock_base *base, |
891 | unsigned long newstate, int reprogram) | 854 | unsigned long newstate, int reprogram) |
892 | { | 855 | { |
893 | /* High res. callback list. NOP for !HIGHRES */ | 856 | if (timer->state & HRTIMER_STATE_ENQUEUED) { |
894 | if (hrtimer_cb_pending(timer)) | ||
895 | hrtimer_remove_cb_pending(timer); | ||
896 | else { | ||
897 | /* | 857 | /* |
898 | * Remove the timer from the rbtree and replace the | 858 | * Remove the timer from the rbtree and replace the |
899 | * first entry pointer if necessary. | 859 | * first entry pointer if necessary. |
@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
953 | { | 913 | { |
954 | struct hrtimer_clock_base *base, *new_base; | 914 | struct hrtimer_clock_base *base, *new_base; |
955 | unsigned long flags; | 915 | unsigned long flags; |
956 | int ret, raise; | 916 | int ret; |
957 | 917 | ||
958 | base = lock_hrtimer_base(timer, &flags); | 918 | base = lock_hrtimer_base(timer, &flags); |
959 | 919 | ||
@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
988 | enqueue_hrtimer(timer, new_base, | 948 | enqueue_hrtimer(timer, new_base, |
989 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); | 949 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); |
990 | 950 | ||
991 | /* | ||
992 | * The timer may be expired and moved to the cb_pending | ||
993 | * list. We can not raise the softirq with base lock held due | ||
994 | * to a possible deadlock with runqueue lock. | ||
995 | */ | ||
996 | raise = timer->state == HRTIMER_STATE_PENDING; | ||
997 | |||
998 | /* | ||
999 | * We use preempt_disable to prevent this task from migrating after | ||
1000 | * setting up the softirq and raising it. Otherwise, if me migrate | ||
1001 | * we will raise the softirq on the wrong CPU. | ||
1002 | */ | ||
1003 | preempt_disable(); | ||
1004 | |||
1005 | unlock_hrtimer_base(timer, &flags); | 951 | unlock_hrtimer_base(timer, &flags); |
1006 | 952 | ||
1007 | if (raise) | ||
1008 | hrtimer_raise_softirq(); | ||
1009 | preempt_enable(); | ||
1010 | |||
1011 | return ret; | 953 | return ret; |
1012 | } | 954 | } |
1013 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | 955 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |||
1192 | } | 1134 | } |
1193 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 1135 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
1194 | 1136 | ||
1195 | static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | ||
1196 | { | ||
1197 | spin_lock_irq(&cpu_base->lock); | ||
1198 | |||
1199 | while (!list_empty(&cpu_base->cb_pending)) { | ||
1200 | enum hrtimer_restart (*fn)(struct hrtimer *); | ||
1201 | struct hrtimer *timer; | ||
1202 | int restart; | ||
1203 | int emulate_hardirq_ctx = 0; | ||
1204 | |||
1205 | timer = list_entry(cpu_base->cb_pending.next, | ||
1206 | struct hrtimer, cb_entry); | ||
1207 | |||
1208 | debug_hrtimer_deactivate(timer); | ||
1209 | timer_stats_account_hrtimer(timer); | ||
1210 | |||
1211 | fn = timer->function; | ||
1212 | /* | ||
1213 | * A timer might have been added to the cb_pending list | ||
1214 | * when it was migrated during a cpu-offline operation. | ||
1215 | * Emulate hardirq context for such timers. | ||
1216 | */ | ||
1217 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || | ||
1218 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) | ||
1219 | emulate_hardirq_ctx = 1; | ||
1220 | |||
1221 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); | ||
1222 | spin_unlock_irq(&cpu_base->lock); | ||
1223 | |||
1224 | if (unlikely(emulate_hardirq_ctx)) { | ||
1225 | local_irq_disable(); | ||
1226 | restart = fn(timer); | ||
1227 | local_irq_enable(); | ||
1228 | } else | ||
1229 | restart = fn(timer); | ||
1230 | |||
1231 | spin_lock_irq(&cpu_base->lock); | ||
1232 | |||
1233 | timer->state &= ~HRTIMER_STATE_CALLBACK; | ||
1234 | if (restart == HRTIMER_RESTART) { | ||
1235 | BUG_ON(hrtimer_active(timer)); | ||
1236 | /* | ||
1237 | * Enqueue the timer, allow reprogramming of the event | ||
1238 | * device | ||
1239 | */ | ||
1240 | enqueue_hrtimer(timer, timer->base, 1); | ||
1241 | } else if (hrtimer_active(timer)) { | ||
1242 | /* | ||
1243 | * If the timer was rearmed on another CPU, reprogram | ||
1244 | * the event device. | ||
1245 | */ | ||
1246 | struct hrtimer_clock_base *base = timer->base; | ||
1247 | |||
1248 | if (base->first == &timer->node && | ||
1249 | hrtimer_reprogram(timer, base)) { | ||
1250 | /* | ||
1251 | * Timer is expired. Thus move it from tree to | ||
1252 | * pending list again. | ||
1253 | */ | ||
1254 | __remove_hrtimer(timer, base, | ||
1255 | HRTIMER_STATE_PENDING, 0); | ||
1256 | list_add_tail(&timer->cb_entry, | ||
1257 | &base->cpu_base->cb_pending); | ||
1258 | } | ||
1259 | } | ||
1260 | } | ||
1261 | spin_unlock_irq(&cpu_base->lock); | ||
1262 | } | ||
1263 | |||
1264 | static void __run_hrtimer(struct hrtimer *timer) | 1137 | static void __run_hrtimer(struct hrtimer *timer) |
1265 | { | 1138 | { |
1266 | struct hrtimer_clock_base *base = timer->base; | 1139 | struct hrtimer_clock_base *base = timer->base; |
@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1268 | enum hrtimer_restart (*fn)(struct hrtimer *); | 1141 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1269 | int restart; | 1142 | int restart; |
1270 | 1143 | ||
1144 | WARN_ON(!irqs_disabled()); | ||
1145 | |||
1271 | debug_hrtimer_deactivate(timer); | 1146 | debug_hrtimer_deactivate(timer); |
1272 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 1147 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
1273 | timer_stats_account_hrtimer(timer); | 1148 | timer_stats_account_hrtimer(timer); |
1274 | |||
1275 | fn = timer->function; | 1149 | fn = timer->function; |
1276 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || | 1150 | |
1277 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { | 1151 | /* |
1278 | /* | 1152 | * Because we run timers from hardirq context, there is no chance |
1279 | * Used for scheduler timers, avoid lock inversion with | 1153 | * they get migrated to another cpu, therefore its safe to unlock |
1280 | * rq->lock and tasklist_lock. | 1154 | * the timer base. |
1281 | * | 1155 | */ |
1282 | * These timers are required to deal with enqueue expiry | 1156 | spin_unlock(&cpu_base->lock); |
1283 | * themselves and are not allowed to migrate. | 1157 | restart = fn(timer); |
1284 | */ | 1158 | spin_lock(&cpu_base->lock); |
1285 | spin_unlock(&cpu_base->lock); | ||
1286 | restart = fn(timer); | ||
1287 | spin_lock(&cpu_base->lock); | ||
1288 | } else | ||
1289 | restart = fn(timer); | ||
1290 | 1159 | ||
1291 | /* | 1160 | /* |
1292 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid | 1161 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid |
@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1311 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1180 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1312 | struct hrtimer_clock_base *base; | 1181 | struct hrtimer_clock_base *base; |
1313 | ktime_t expires_next, now; | 1182 | ktime_t expires_next, now; |
1314 | int i, raise = 0; | 1183 | int i; |
1315 | 1184 | ||
1316 | BUG_ON(!cpu_base->hres_active); | 1185 | BUG_ON(!cpu_base->hres_active); |
1317 | cpu_base->nr_events++; | 1186 | cpu_base->nr_events++; |
@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1360 | break; | 1229 | break; |
1361 | } | 1230 | } |
1362 | 1231 | ||
1363 | /* Move softirq callbacks to the pending list */ | ||
1364 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | ||
1365 | __remove_hrtimer(timer, base, | ||
1366 | HRTIMER_STATE_PENDING, 0); | ||
1367 | list_add_tail(&timer->cb_entry, | ||
1368 | &base->cpu_base->cb_pending); | ||
1369 | raise = 1; | ||
1370 | continue; | ||
1371 | } | ||
1372 | |||
1373 | __run_hrtimer(timer); | 1232 | __run_hrtimer(timer); |
1374 | } | 1233 | } |
1375 | spin_unlock(&cpu_base->lock); | 1234 | spin_unlock(&cpu_base->lock); |
@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1383 | if (tick_program_event(expires_next, 0)) | 1242 | if (tick_program_event(expires_next, 0)) |
1384 | goto retry; | 1243 | goto retry; |
1385 | } | 1244 | } |
1386 | |||
1387 | /* Raise softirq ? */ | ||
1388 | if (raise) | ||
1389 | raise_softirq(HRTIMER_SOFTIRQ); | ||
1390 | } | 1245 | } |
1391 | 1246 | ||
1392 | /** | 1247 | /** |
@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void) | |||
1413 | local_irq_restore(flags); | 1268 | local_irq_restore(flags); |
1414 | } | 1269 | } |
1415 | 1270 | ||
1416 | static void run_hrtimer_softirq(struct softirq_action *h) | ||
1417 | { | ||
1418 | run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); | ||
1419 | } | ||
1420 | |||
1421 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 1271 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
1422 | 1272 | ||
1423 | /* | 1273 | /* |
@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h) | |||
1429 | */ | 1279 | */ |
1430 | void hrtimer_run_pending(void) | 1280 | void hrtimer_run_pending(void) |
1431 | { | 1281 | { |
1432 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | ||
1433 | |||
1434 | if (hrtimer_hres_active()) | 1282 | if (hrtimer_hres_active()) |
1435 | return; | 1283 | return; |
1436 | 1284 | ||
@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void) | |||
1444 | */ | 1292 | */ |
1445 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 1293 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
1446 | hrtimer_switch_to_hres(); | 1294 | hrtimer_switch_to_hres(); |
1447 | |||
1448 | run_hrtimer_pending(cpu_base); | ||
1449 | } | 1295 | } |
1450 | 1296 | ||
1451 | /* | 1297 | /* |
@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void) | |||
1482 | hrtimer_get_expires_tv64(timer)) | 1328 | hrtimer_get_expires_tv64(timer)) |
1483 | break; | 1329 | break; |
1484 | 1330 | ||
1485 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | ||
1486 | __remove_hrtimer(timer, base, | ||
1487 | HRTIMER_STATE_PENDING, 0); | ||
1488 | list_add_tail(&timer->cb_entry, | ||
1489 | &base->cpu_base->cb_pending); | ||
1490 | continue; | ||
1491 | } | ||
1492 | |||
1493 | __run_hrtimer(timer); | 1331 | __run_hrtimer(timer); |
1494 | } | 1332 | } |
1495 | spin_unlock(&cpu_base->lock); | 1333 | spin_unlock(&cpu_base->lock); |
@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |||
1516 | { | 1354 | { |
1517 | sl->timer.function = hrtimer_wakeup; | 1355 | sl->timer.function = hrtimer_wakeup; |
1518 | sl->task = task; | 1356 | sl->task = task; |
1519 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1520 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
1521 | #endif | ||
1522 | } | 1357 | } |
1523 | 1358 | ||
1524 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | 1359 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
@@ -1655,18 +1490,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1655 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1490 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1656 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1491 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1657 | 1492 | ||
1658 | INIT_LIST_HEAD(&cpu_base->cb_pending); | ||
1659 | hrtimer_init_hres(cpu_base); | 1493 | hrtimer_init_hres(cpu_base); |
1660 | } | 1494 | } |
1661 | 1495 | ||
1662 | #ifdef CONFIG_HOTPLUG_CPU | 1496 | #ifdef CONFIG_HOTPLUG_CPU |
1663 | 1497 | ||
1664 | static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 1498 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1665 | struct hrtimer_clock_base *new_base, int dcpu) | 1499 | struct hrtimer_clock_base *new_base) |
1666 | { | 1500 | { |
1667 | struct hrtimer *timer; | 1501 | struct hrtimer *timer; |
1668 | struct rb_node *node; | 1502 | struct rb_node *node; |
1669 | int raise = 0; | ||
1670 | 1503 | ||
1671 | while ((node = rb_first(&old_base->active))) { | 1504 | while ((node = rb_first(&old_base->active))) { |
1672 | timer = rb_entry(node, struct hrtimer, node); | 1505 | timer = rb_entry(node, struct hrtimer, node); |
@@ -1674,18 +1507,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1674 | debug_hrtimer_deactivate(timer); | 1507 | debug_hrtimer_deactivate(timer); |
1675 | 1508 | ||
1676 | /* | 1509 | /* |
1677 | * Should not happen. Per CPU timers should be | ||
1678 | * canceled _before_ the migration code is called | ||
1679 | */ | ||
1680 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { | ||
1681 | __remove_hrtimer(timer, old_base, | ||
1682 | HRTIMER_STATE_INACTIVE, 0); | ||
1683 | WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", | ||
1684 | timer, timer->function, dcpu); | ||
1685 | continue; | ||
1686 | } | ||
1687 | |||
1688 | /* | ||
1689 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | 1510 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the |
1690 | * timer could be seen as !active and just vanish away | 1511 | * timer could be seen as !active and just vanish away |
1691 | * under us on another CPU | 1512 | * under us on another CPU |
@@ -1693,69 +1514,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1693 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | 1514 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); |
1694 | timer->base = new_base; | 1515 | timer->base = new_base; |
1695 | /* | 1516 | /* |
1696 | * Enqueue the timer. Allow reprogramming of the event device | 1517 | * Enqueue the timers on the new cpu, but do not reprogram |
1518 | * the timer as that would enable a deadlock between | ||
1519 | * hrtimer_enqueue_reprogramm() running the timer and us still | ||
1520 | * holding a nested base lock. | ||
1521 | * | ||
1522 | * Instead we tickle the hrtimer interrupt after the migration | ||
1523 | * is done, which will run all expired timers and re-programm | ||
1524 | * the timer device. | ||
1697 | */ | 1525 | */ |
1698 | enqueue_hrtimer(timer, new_base, 1); | 1526 | enqueue_hrtimer(timer, new_base, 0); |
1699 | 1527 | ||
1700 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1701 | /* | ||
1702 | * Happens with high res enabled when the timer was | ||
1703 | * already expired and the callback mode is | ||
1704 | * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The | ||
1705 | * enqueue code does not move them to the soft irq | ||
1706 | * pending list for performance/latency reasons, but | ||
1707 | * in the migration state, we need to do that | ||
1708 | * otherwise we end up with a stale timer. | ||
1709 | */ | ||
1710 | if (timer->state == HRTIMER_STATE_MIGRATE) { | ||
1711 | timer->state = HRTIMER_STATE_PENDING; | ||
1712 | list_add_tail(&timer->cb_entry, | ||
1713 | &new_base->cpu_base->cb_pending); | ||
1714 | raise = 1; | ||
1715 | } | ||
1716 | #endif | ||
1717 | /* Clear the migration state bit */ | 1528 | /* Clear the migration state bit */ |
1718 | timer->state &= ~HRTIMER_STATE_MIGRATE; | 1529 | timer->state &= ~HRTIMER_STATE_MIGRATE; |
1719 | } | 1530 | } |
1720 | return raise; | ||
1721 | } | ||
1722 | |||
1723 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1724 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1725 | struct hrtimer_cpu_base *new_base) | ||
1726 | { | ||
1727 | struct hrtimer *timer; | ||
1728 | int raise = 0; | ||
1729 | |||
1730 | while (!list_empty(&old_base->cb_pending)) { | ||
1731 | timer = list_entry(old_base->cb_pending.next, | ||
1732 | struct hrtimer, cb_entry); | ||
1733 | |||
1734 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); | ||
1735 | timer->base = &new_base->clock_base[timer->base->index]; | ||
1736 | list_add_tail(&timer->cb_entry, &new_base->cb_pending); | ||
1737 | raise = 1; | ||
1738 | } | ||
1739 | return raise; | ||
1740 | } | ||
1741 | #else | ||
1742 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1743 | struct hrtimer_cpu_base *new_base) | ||
1744 | { | ||
1745 | return 0; | ||
1746 | } | 1531 | } |
1747 | #endif | ||
1748 | 1532 | ||
1749 | static void migrate_hrtimers(int cpu) | 1533 | static int migrate_hrtimers(int scpu) |
1750 | { | 1534 | { |
1751 | struct hrtimer_cpu_base *old_base, *new_base; | 1535 | struct hrtimer_cpu_base *old_base, *new_base; |
1752 | int i, raise = 0; | 1536 | int dcpu, i; |
1753 | 1537 | ||
1754 | BUG_ON(cpu_online(cpu)); | 1538 | BUG_ON(cpu_online(scpu)); |
1755 | old_base = &per_cpu(hrtimer_bases, cpu); | 1539 | old_base = &per_cpu(hrtimer_bases, scpu); |
1756 | new_base = &get_cpu_var(hrtimer_bases); | 1540 | new_base = &get_cpu_var(hrtimer_bases); |
1757 | 1541 | ||
1758 | tick_cancel_sched_timer(cpu); | 1542 | dcpu = smp_processor_id(); |
1543 | |||
1544 | tick_cancel_sched_timer(scpu); | ||
1759 | /* | 1545 | /* |
1760 | * The caller is globally serialized and nobody else | 1546 | * The caller is globally serialized and nobody else |
1761 | * takes two locks at once, deadlock is not possible. | 1547 | * takes two locks at once, deadlock is not possible. |
@@ -1764,41 +1550,47 @@ static void migrate_hrtimers(int cpu) | |||
1764 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1550 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1765 | 1551 | ||
1766 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1552 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1767 | if (migrate_hrtimer_list(&old_base->clock_base[i], | 1553 | migrate_hrtimer_list(&old_base->clock_base[i], |
1768 | &new_base->clock_base[i], cpu)) | 1554 | &new_base->clock_base[i]); |
1769 | raise = 1; | ||
1770 | } | 1555 | } |
1771 | 1556 | ||
1772 | if (migrate_hrtimer_pending(old_base, new_base)) | ||
1773 | raise = 1; | ||
1774 | |||
1775 | spin_unlock(&old_base->lock); | 1557 | spin_unlock(&old_base->lock); |
1776 | spin_unlock_irq(&new_base->lock); | 1558 | spin_unlock_irq(&new_base->lock); |
1777 | put_cpu_var(hrtimer_bases); | 1559 | put_cpu_var(hrtimer_bases); |
1778 | 1560 | ||
1779 | if (raise) | 1561 | return dcpu; |
1780 | hrtimer_raise_softirq(); | 1562 | } |
1563 | |||
1564 | static void tickle_timers(void *arg) | ||
1565 | { | ||
1566 | hrtimer_peek_ahead_timers(); | ||
1781 | } | 1567 | } |
1568 | |||
1782 | #endif /* CONFIG_HOTPLUG_CPU */ | 1569 | #endif /* CONFIG_HOTPLUG_CPU */ |
1783 | 1570 | ||
1784 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 1571 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, |
1785 | unsigned long action, void *hcpu) | 1572 | unsigned long action, void *hcpu) |
1786 | { | 1573 | { |
1787 | unsigned int cpu = (long)hcpu; | 1574 | int scpu = (long)hcpu; |
1788 | 1575 | ||
1789 | switch (action) { | 1576 | switch (action) { |
1790 | 1577 | ||
1791 | case CPU_UP_PREPARE: | 1578 | case CPU_UP_PREPARE: |
1792 | case CPU_UP_PREPARE_FROZEN: | 1579 | case CPU_UP_PREPARE_FROZEN: |
1793 | init_hrtimers_cpu(cpu); | 1580 | init_hrtimers_cpu(scpu); |
1794 | break; | 1581 | break; |
1795 | 1582 | ||
1796 | #ifdef CONFIG_HOTPLUG_CPU | 1583 | #ifdef CONFIG_HOTPLUG_CPU |
1797 | case CPU_DEAD: | 1584 | case CPU_DEAD: |
1798 | case CPU_DEAD_FROZEN: | 1585 | case CPU_DEAD_FROZEN: |
1799 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); | 1586 | { |
1800 | migrate_hrtimers(cpu); | 1587 | int dcpu; |
1588 | |||
1589 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); | ||
1590 | dcpu = migrate_hrtimers(scpu); | ||
1591 | smp_call_function_single(dcpu, tickle_timers, NULL, 0); | ||
1801 | break; | 1592 | break; |
1593 | } | ||
1802 | #endif | 1594 | #endif |
1803 | 1595 | ||
1804 | default: | 1596 | default: |
@@ -1817,9 +1609,6 @@ void __init hrtimers_init(void) | |||
1817 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1609 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1818 | (void *)(long)smp_processor_id()); | 1610 | (void *)(long)smp_processor_id()); |
1819 | register_cpu_notifier(&hrtimers_nb); | 1611 | register_cpu_notifier(&hrtimers_nb); |
1820 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1821 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | ||
1822 | #endif | ||
1823 | } | 1612 | } |
1824 | 1613 | ||
1825 | /** | 1614 | /** |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c9767e641980..64c1c7253dae 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -25,6 +25,8 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
25 | struct irqaction *action) { } | 25 | struct irqaction *action) { } |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | extern int irq_select_affinity_usr(unsigned int irq); | ||
29 | |||
28 | /* | 30 | /* |
29 | * Debugging printout: | 31 | * Debugging printout: |
30 | */ | 32 | */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c498a1b8c621..801addda3c43 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -82,24 +82,27 @@ int irq_can_set_affinity(unsigned int irq) | |||
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) |
83 | { | 83 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); | 84 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | ||
85 | 86 | ||
86 | if (!desc->chip->set_affinity) | 87 | if (!desc->chip->set_affinity) |
87 | return -EINVAL; | 88 | return -EINVAL; |
88 | 89 | ||
90 | spin_lock_irqsave(&desc->lock, flags); | ||
91 | |||
89 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
90 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
91 | unsigned long flags; | ||
92 | |||
93 | spin_lock_irqsave(&desc->lock, flags); | ||
94 | desc->affinity = cpumask; | 94 | desc->affinity = cpumask; |
95 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
96 | spin_unlock_irqrestore(&desc->lock, flags); | 96 | } else { |
97 | } else | 97 | desc->status |= IRQ_MOVE_PENDING; |
98 | set_pending_irq(irq, cpumask); | 98 | desc->pending_mask = cpumask; |
99 | } | ||
99 | #else | 100 | #else |
100 | desc->affinity = cpumask; | 101 | desc->affinity = cpumask; |
101 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
102 | #endif | 103 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | ||
105 | spin_unlock_irqrestore(&desc->lock, flags); | ||
103 | return 0; | 106 | return 0; |
104 | } | 107 | } |
105 | 108 | ||
@@ -107,24 +110,59 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
107 | /* | 110 | /* |
108 | * Generic version of the affinity autoselector. | 111 | * Generic version of the affinity autoselector. |
109 | */ | 112 | */ |
110 | int irq_select_affinity(unsigned int irq) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
111 | { | 114 | { |
112 | cpumask_t mask; | 115 | cpumask_t mask; |
113 | struct irq_desc *desc; | ||
114 | 116 | ||
115 | if (!irq_can_set_affinity(irq)) | 117 | if (!irq_can_set_affinity(irq)) |
116 | return 0; | 118 | return 0; |
117 | 119 | ||
118 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 120 | cpus_and(mask, cpu_online_map, irq_default_affinity); |
119 | 121 | ||
120 | desc = irq_to_desc(irq); | 122 | /* |
123 | * Preserve an userspace affinity setup, but make sure that | ||
124 | * one of the targets is online. | ||
125 | */ | ||
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | ||
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | ||
128 | mask = desc->affinity; | ||
129 | else | ||
130 | desc->status &= ~IRQ_AFFINITY_SET; | ||
131 | } | ||
132 | |||
121 | desc->affinity = mask; | 133 | desc->affinity = mask; |
122 | desc->chip->set_affinity(irq, mask); | 134 | desc->chip->set_affinity(irq, mask); |
123 | 135 | ||
124 | return 0; | 136 | return 0; |
125 | } | 137 | } |
138 | #else | ||
139 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | ||
140 | { | ||
141 | return irq_select_affinity(irq); | ||
142 | } | ||
126 | #endif | 143 | #endif |
127 | 144 | ||
145 | /* | ||
146 | * Called when affinity is set via /proc/irq | ||
147 | */ | ||
148 | int irq_select_affinity_usr(unsigned int irq) | ||
149 | { | ||
150 | struct irq_desc *desc = irq_to_desc(irq); | ||
151 | unsigned long flags; | ||
152 | int ret; | ||
153 | |||
154 | spin_lock_irqsave(&desc->lock, flags); | ||
155 | ret = do_irq_select_affinity(irq, desc); | ||
156 | spin_unlock_irqrestore(&desc->lock, flags); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | #else | ||
162 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | ||
163 | { | ||
164 | return 0; | ||
165 | } | ||
128 | #endif | 166 | #endif |
129 | 167 | ||
130 | /** | 168 | /** |
@@ -327,7 +365,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
327 | * IRQF_TRIGGER_* but the PIC does not support multiple | 365 | * IRQF_TRIGGER_* but the PIC does not support multiple |
328 | * flow-types? | 366 | * flow-types? |
329 | */ | 367 | */ |
330 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | 368 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
331 | chip ? (chip->name ? : "unknown") : "unknown"); | 369 | chip ? (chip->name ? : "unknown") : "unknown"); |
332 | return 0; | 370 | return 0; |
333 | } | 371 | } |
@@ -445,8 +483,12 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
445 | /* Undo nested disables: */ | 483 | /* Undo nested disables: */ |
446 | desc->depth = 1; | 484 | desc->depth = 1; |
447 | 485 | ||
486 | /* Exclude IRQ from balancing if requested */ | ||
487 | if (new->flags & IRQF_NOBALANCING) | ||
488 | desc->status |= IRQ_NO_BALANCING; | ||
489 | |||
448 | /* Set default affinity mask once everything is setup */ | 490 | /* Set default affinity mask once everything is setup */ |
449 | irq_select_affinity(irq); | 491 | do_irq_select_affinity(irq, desc); |
450 | 492 | ||
451 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 493 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
452 | && (new->flags & IRQF_TRIGGER_MASK) | 494 | && (new->flags & IRQF_TRIGGER_MASK) |
@@ -459,10 +501,6 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
459 | 501 | ||
460 | *p = new; | 502 | *p = new; |
461 | 503 | ||
462 | /* Exclude IRQ from balancing */ | ||
463 | if (new->flags & IRQF_NOBALANCING) | ||
464 | desc->status |= IRQ_NO_BALANCING; | ||
465 | |||
466 | /* Reset broken irq detection when installing new handler */ | 504 | /* Reset broken irq detection when installing new handler */ |
467 | desc->irq_count = 0; | 505 | desc->irq_count = 0; |
468 | desc->irqs_unhandled = 0; | 506 | desc->irqs_unhandled = 0; |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 90b920d3f52b..9db681d95814 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -1,17 +1,6 @@ | |||
1 | 1 | ||
2 | #include <linux/irq.h> | 2 | #include <linux/irq.h> |
3 | 3 | ||
4 | void set_pending_irq(unsigned int irq, cpumask_t mask) | ||
5 | { | ||
6 | struct irq_desc *desc = irq_to_desc(irq); | ||
7 | unsigned long flags; | ||
8 | |||
9 | spin_lock_irqsave(&desc->lock, flags); | ||
10 | desc->status |= IRQ_MOVE_PENDING; | ||
11 | desc->pending_mask = mask; | ||
12 | spin_unlock_irqrestore(&desc->lock, flags); | ||
13 | } | ||
14 | |||
15 | void move_masked_irq(int irq) | 4 | void move_masked_irq(int irq) |
16 | { | 5 | { |
17 | struct irq_desc *desc = irq_to_desc(irq); | 6 | struct irq_desc *desc = irq_to_desc(irq); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 4d161c70ba55..d257e7d6a8a4 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -62,7 +62,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
62 | if (!cpus_intersects(new_value, cpu_online_map)) | 62 | if (!cpus_intersects(new_value, cpu_online_map)) |
63 | /* Special case for empty set - allow the architecture | 63 | /* Special case for empty set - allow the architecture |
64 | code to set default SMP affinity. */ | 64 | code to set default SMP affinity. */ |
65 | return irq_select_affinity(irq) ? -EINVAL : count; | 65 | return irq_select_affinity_usr(irq) ? -EINVAL : count; |
66 | 66 | ||
67 | irq_set_affinity(irq, new_value); | 67 | irq_set_affinity(irq, new_value); |
68 | 68 | ||
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 5e7b45c56923..449db466bdbc 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
@@ -191,7 +191,7 @@ static int lstats_show(struct seq_file *m, void *v) | |||
191 | latency_record[i].time, | 191 | latency_record[i].time, |
192 | latency_record[i].max); | 192 | latency_record[i].max); |
193 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { | 193 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
194 | char sym[KSYM_NAME_LEN]; | 194 | char sym[KSYM_SYMBOL_LEN]; |
195 | char *c; | 195 | char *c; |
196 | if (!latency_record[i].backtrace[q]) | 196 | if (!latency_record[i].backtrace[q]) |
197 | break; | 197 | break; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06e157119d2b..46a404173db2 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -3276,10 +3276,10 @@ void __init lockdep_info(void) | |||
3276 | { | 3276 | { |
3277 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); | 3277 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); |
3278 | 3278 | ||
3279 | printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); | 3279 | printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); |
3280 | printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); | 3280 | printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); |
3281 | printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); | 3281 | printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); |
3282 | printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); | 3282 | printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); |
3283 | printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); | 3283 | printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); |
3284 | printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); | 3284 | printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); |
3285 | printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); | 3285 | printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); |
diff --git a/kernel/panic.c b/kernel/panic.c index 6513aac8e992..4d5088355bfe 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -167,6 +167,7 @@ static const struct tnt tnts[] = { | |||
167 | * 'M' - System experienced a machine check exception. | 167 | * 'M' - System experienced a machine check exception. |
168 | * 'B' - System has hit bad_page. | 168 | * 'B' - System has hit bad_page. |
169 | * 'U' - Userspace-defined naughtiness. | 169 | * 'U' - Userspace-defined naughtiness. |
170 | * 'D' - Kernel has oopsed before | ||
170 | * 'A' - ACPI table overridden. | 171 | * 'A' - ACPI table overridden. |
171 | * 'W' - Taint on warning. | 172 | * 'W' - Taint on warning. |
172 | * 'C' - modules from drivers/staging are loaded. | 173 | * 'C' - modules from drivers/staging are loaded. |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 895337b16a24..4e5288a831de 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -311,7 +311,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
311 | struct task_cputime cputime; | 311 | struct task_cputime cputime; |
312 | 312 | ||
313 | thread_group_cputime(p, &cputime); | 313 | thread_group_cputime(p, &cputime); |
314 | switch (which_clock) { | 314 | switch (CPUCLOCK_WHICH(which_clock)) { |
315 | default: | 315 | default: |
316 | return -EINVAL; | 316 | return -EINVAL; |
317 | case CPUCLOCK_PROF: | 317 | case CPUCLOCK_PROF: |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5e79c662294b..887c63787de6 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock); | |||
116 | * must supply functions here, even if the function just returns | 116 | * must supply functions here, even if the function just returns |
117 | * ENOSYS. The standard POSIX timer management code assumes the | 117 | * ENOSYS. The standard POSIX timer management code assumes the |
118 | * following: 1.) The k_itimer struct (sched.h) is used for the | 118 | * following: 1.) The k_itimer struct (sched.h) is used for the |
119 | * timer. 2.) The list, it_lock, it_clock, it_id and it_process | 119 | * timer. 2.) The list, it_lock, it_clock, it_id and it_pid |
120 | * fields are not modified by timer code. | 120 | * fields are not modified by timer code. |
121 | * | 121 | * |
122 | * At this time all functions EXCEPT clock_nanosleep can be | 122 | * At this time all functions EXCEPT clock_nanosleep can be |
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer) | |||
197 | return 0; | 197 | return 0; |
198 | } | 198 | } |
199 | 199 | ||
200 | static int no_timer_create(struct k_itimer *new_timer) | ||
201 | { | ||
202 | return -EOPNOTSUPP; | ||
203 | } | ||
204 | |||
200 | /* | 205 | /* |
201 | * Return nonzero if we know a priori this clockid_t value is bogus. | 206 | * Return nonzero if we know a priori this clockid_t value is bogus. |
202 | */ | 207 | */ |
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void) | |||
248 | .clock_getres = hrtimer_get_res, | 253 | .clock_getres = hrtimer_get_res, |
249 | .clock_get = posix_get_monotonic_raw, | 254 | .clock_get = posix_get_monotonic_raw, |
250 | .clock_set = do_posix_clock_nosettime, | 255 | .clock_set = do_posix_clock_nosettime, |
256 | .timer_create = no_timer_create, | ||
251 | }; | 257 | }; |
252 | 258 | ||
253 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 259 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); |
@@ -313,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info) | |||
313 | 319 | ||
314 | int posix_timer_event(struct k_itimer *timr, int si_private) | 320 | int posix_timer_event(struct k_itimer *timr, int si_private) |
315 | { | 321 | { |
316 | int shared, ret; | 322 | struct task_struct *task; |
323 | int shared, ret = -1; | ||
317 | /* | 324 | /* |
318 | * FIXME: if ->sigq is queued we can race with | 325 | * FIXME: if ->sigq is queued we can race with |
319 | * dequeue_signal()->do_schedule_next_timer(). | 326 | * dequeue_signal()->do_schedule_next_timer(). |
@@ -327,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private) | |||
327 | */ | 334 | */ |
328 | timr->sigq->info.si_sys_private = si_private; | 335 | timr->sigq->info.si_sys_private = si_private; |
329 | 336 | ||
330 | shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); | 337 | rcu_read_lock(); |
331 | ret = send_sigqueue(timr->sigq, timr->it_process, shared); | 338 | task = pid_task(timr->it_pid, PIDTYPE_PID); |
339 | if (task) { | ||
340 | shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); | ||
341 | ret = send_sigqueue(timr->sigq, task, shared); | ||
342 | } | ||
343 | rcu_read_unlock(); | ||
332 | /* If we failed to send the signal the timer stops. */ | 344 | /* If we failed to send the signal the timer stops. */ |
333 | return ret > 0; | 345 | return ret > 0; |
334 | } | 346 | } |
@@ -405,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) | |||
405 | return ret; | 417 | return ret; |
406 | } | 418 | } |
407 | 419 | ||
408 | static struct task_struct * good_sigevent(sigevent_t * event) | 420 | static struct pid *good_sigevent(sigevent_t * event) |
409 | { | 421 | { |
410 | struct task_struct *rtn = current->group_leader; | 422 | struct task_struct *rtn = current->group_leader; |
411 | 423 | ||
@@ -419,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event) | |||
419 | ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) | 431 | ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) |
420 | return NULL; | 432 | return NULL; |
421 | 433 | ||
422 | return rtn; | 434 | return task_pid(rtn); |
423 | } | 435 | } |
424 | 436 | ||
425 | void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) | 437 | void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) |
@@ -458,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
458 | idr_remove(&posix_timers_id, tmr->it_id); | 470 | idr_remove(&posix_timers_id, tmr->it_id); |
459 | spin_unlock_irqrestore(&idr_lock, flags); | 471 | spin_unlock_irqrestore(&idr_lock, flags); |
460 | } | 472 | } |
473 | put_pid(tmr->it_pid); | ||
461 | sigqueue_free(tmr->sigq); | 474 | sigqueue_free(tmr->sigq); |
462 | kmem_cache_free(posix_timers_cache, tmr); | 475 | kmem_cache_free(posix_timers_cache, tmr); |
463 | } | 476 | } |
@@ -471,7 +484,6 @@ sys_timer_create(const clockid_t which_clock, | |||
471 | { | 484 | { |
472 | struct k_itimer *new_timer; | 485 | struct k_itimer *new_timer; |
473 | int error, new_timer_id; | 486 | int error, new_timer_id; |
474 | struct task_struct *process; | ||
475 | sigevent_t event; | 487 | sigevent_t event; |
476 | int it_id_set = IT_ID_NOT_SET; | 488 | int it_id_set = IT_ID_NOT_SET; |
477 | 489 | ||
@@ -525,11 +537,9 @@ sys_timer_create(const clockid_t which_clock, | |||
525 | goto out; | 537 | goto out; |
526 | } | 538 | } |
527 | rcu_read_lock(); | 539 | rcu_read_lock(); |
528 | process = good_sigevent(&event); | 540 | new_timer->it_pid = get_pid(good_sigevent(&event)); |
529 | if (process) | ||
530 | get_task_struct(process); | ||
531 | rcu_read_unlock(); | 541 | rcu_read_unlock(); |
532 | if (!process) { | 542 | if (!new_timer->it_pid) { |
533 | error = -EINVAL; | 543 | error = -EINVAL; |
534 | goto out; | 544 | goto out; |
535 | } | 545 | } |
@@ -537,8 +547,7 @@ sys_timer_create(const clockid_t which_clock, | |||
537 | event.sigev_notify = SIGEV_SIGNAL; | 547 | event.sigev_notify = SIGEV_SIGNAL; |
538 | event.sigev_signo = SIGALRM; | 548 | event.sigev_signo = SIGALRM; |
539 | event.sigev_value.sival_int = new_timer->it_id; | 549 | event.sigev_value.sival_int = new_timer->it_id; |
540 | process = current->group_leader; | 550 | new_timer->it_pid = get_pid(task_tgid(current)); |
541 | get_task_struct(process); | ||
542 | } | 551 | } |
543 | 552 | ||
544 | new_timer->it_sigev_notify = event.sigev_notify; | 553 | new_timer->it_sigev_notify = event.sigev_notify; |
@@ -548,7 +557,7 @@ sys_timer_create(const clockid_t which_clock, | |||
548 | new_timer->sigq->info.si_code = SI_TIMER; | 557 | new_timer->sigq->info.si_code = SI_TIMER; |
549 | 558 | ||
550 | spin_lock_irq(¤t->sighand->siglock); | 559 | spin_lock_irq(¤t->sighand->siglock); |
551 | new_timer->it_process = process; | 560 | new_timer->it_signal = current->signal; |
552 | list_add(&new_timer->list, ¤t->signal->posix_timers); | 561 | list_add(&new_timer->list, ¤t->signal->posix_timers); |
553 | spin_unlock_irq(¤t->sighand->siglock); | 562 | spin_unlock_irq(¤t->sighand->siglock); |
554 | 563 | ||
@@ -583,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags) | |||
583 | timr = idr_find(&posix_timers_id, (int)timer_id); | 592 | timr = idr_find(&posix_timers_id, (int)timer_id); |
584 | if (timr) { | 593 | if (timr) { |
585 | spin_lock(&timr->it_lock); | 594 | spin_lock(&timr->it_lock); |
586 | if (timr->it_process && | 595 | if (timr->it_signal == current->signal) { |
587 | same_thread_group(timr->it_process, current)) { | ||
588 | spin_unlock(&idr_lock); | 596 | spin_unlock(&idr_lock); |
589 | return timr; | 597 | return timr; |
590 | } | 598 | } |
@@ -831,8 +839,7 @@ retry_delete: | |||
831 | * This keeps any tasks waiting on the spin lock from thinking | 839 | * This keeps any tasks waiting on the spin lock from thinking |
832 | * they got something (see the lock code above). | 840 | * they got something (see the lock code above). |
833 | */ | 841 | */ |
834 | put_task_struct(timer->it_process); | 842 | timer->it_signal = NULL; |
835 | timer->it_process = NULL; | ||
836 | 843 | ||
837 | unlock_timer(timer, flags); | 844 | unlock_timer(timer, flags); |
838 | release_posix_timer(timer, IT_ID_SET); | 845 | release_posix_timer(timer, IT_ID_SET); |
@@ -858,8 +865,7 @@ retry_delete: | |||
858 | * This keeps any tasks waiting on the spin lock from thinking | 865 | * This keeps any tasks waiting on the spin lock from thinking |
859 | * they got something (see the lock code above). | 866 | * they got something (see the lock code above). |
860 | */ | 867 | */ |
861 | put_task_struct(timer->it_process); | 868 | timer->it_signal = NULL; |
862 | timer->it_process = NULL; | ||
863 | 869 | ||
864 | unlock_timer(timer, flags); | 870 | unlock_timer(timer, flags); |
865 | release_posix_timer(timer, IT_ID_SET); | 871 | release_posix_timer(timer, IT_ID_SET); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b7713b53d07a..6da14358537c 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -633,7 +633,7 @@ void swsusp_close(fmode_t mode) | |||
633 | return; | 633 | return; |
634 | } | 634 | } |
635 | 635 | ||
636 | blkdev_put(resume_bdev, mode); /* move up */ | 636 | blkdev_put(resume_bdev, mode); |
637 | } | 637 | } |
638 | 638 | ||
639 | static int swsusp_header_init(void) | 639 | static int swsusp_header_init(void) |
diff --git a/kernel/profile.c b/kernel/profile.c index 5b7d1ac7124c..dc41827fbfee 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -351,7 +351,7 @@ out: | |||
351 | put_cpu(); | 351 | put_cpu(); |
352 | } | 352 | } |
353 | 353 | ||
354 | static int __devinit profile_cpu_callback(struct notifier_block *info, | 354 | static int __cpuinit profile_cpu_callback(struct notifier_block *info, |
355 | unsigned long action, void *__cpu) | 355 | unsigned long action, void *__cpu) |
356 | { | 356 | { |
357 | int node, cpu = (unsigned long)__cpu; | 357 | int node, cpu = (unsigned long)__cpu; |
@@ -596,7 +596,7 @@ out_cleanup: | |||
596 | #define create_hash_tables() ({ 0; }) | 596 | #define create_hash_tables() ({ 0; }) |
597 | #endif | 597 | #endif |
598 | 598 | ||
599 | int create_proc_profile(void) | 599 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ |
600 | { | 600 | { |
601 | struct proc_dir_entry *entry; | 601 | struct proc_dir_entry *entry; |
602 | 602 | ||
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1e68e4c39e2c..4c8bcd7dd8e0 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -612,7 +612,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) | |||
612 | return (copied == sizeof(data)) ? 0 : -EIO; | 612 | return (copied == sizeof(data)) ? 0 : -EIO; |
613 | } | 613 | } |
614 | 614 | ||
615 | #if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE | 615 | #if defined CONFIG_COMPAT |
616 | #include <linux/compat.h> | 616 | #include <linux/compat.h> |
617 | 617 | ||
618 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, | 618 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, |
@@ -709,4 +709,4 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
709 | unlock_kernel(); | 709 | unlock_kernel(); |
710 | return ret; | 710 | return ret; |
711 | } | 711 | } |
712 | #endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */ | 712 | #endif /* CONFIG_COMPAT */ |
diff --git a/kernel/relay.c b/kernel/relay.c index 32b0befdcb6a..09ac2008f77b 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -1317,12 +1317,9 @@ static ssize_t relay_file_splice_read(struct file *in, | |||
1317 | if (ret < 0) | 1317 | if (ret < 0) |
1318 | break; | 1318 | break; |
1319 | else if (!ret) { | 1319 | else if (!ret) { |
1320 | if (spliced) | 1320 | if (flags & SPLICE_F_NONBLOCK) |
1321 | break; | ||
1322 | if (flags & SPLICE_F_NONBLOCK) { | ||
1323 | ret = -EAGAIN; | 1321 | ret = -EAGAIN; |
1324 | break; | 1322 | break; |
1325 | } | ||
1326 | } | 1323 | } |
1327 | 1324 | ||
1328 | *ppos += ret; | 1325 | *ppos += ret; |
diff --git a/kernel/sched.c b/kernel/sched.c index 9b1e79371c20..22c532a6f82c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -203,7 +203,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
203 | hrtimer_init(&rt_b->rt_period_timer, | 203 | hrtimer_init(&rt_b->rt_period_timer, |
204 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 204 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
205 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 205 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
206 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
207 | } | 206 | } |
208 | 207 | ||
209 | static inline int rt_bandwidth_enabled(void) | 208 | static inline int rt_bandwidth_enabled(void) |
@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq) | |||
1139 | 1138 | ||
1140 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1139 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1141 | rq->hrtick_timer.function = hrtick; | 1140 | rq->hrtick_timer.function = hrtick; |
1142 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
1143 | } | 1141 | } |
1144 | #else /* CONFIG_SCHED_HRTICK */ | 1142 | #else /* CONFIG_SCHED_HRTICK */ |
1145 | static inline void hrtick_clear(struct rq *rq) | 1143 | static inline void hrtick_clear(struct rq *rq) |
@@ -1453,9 +1451,10 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
1453 | static unsigned long cpu_avg_load_per_task(int cpu) | 1451 | static unsigned long cpu_avg_load_per_task(int cpu) |
1454 | { | 1452 | { |
1455 | struct rq *rq = cpu_rq(cpu); | 1453 | struct rq *rq = cpu_rq(cpu); |
1454 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
1456 | 1455 | ||
1457 | if (rq->nr_running) | 1456 | if (nr_running) |
1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1457 | rq->avg_load_per_task = rq->load.weight / nr_running; |
1459 | else | 1458 | else |
1460 | rq->avg_load_per_task = 0; | 1459 | rq->avg_load_per_task = 0; |
1461 | 1460 | ||
@@ -6586,7 +6585,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6586 | req = list_entry(rq->migration_queue.next, | 6585 | req = list_entry(rq->migration_queue.next, |
6587 | struct migration_req, list); | 6586 | struct migration_req, list); |
6588 | list_del_init(&req->list); | 6587 | list_del_init(&req->list); |
6588 | spin_unlock_irq(&rq->lock); | ||
6589 | complete(&req->done); | 6589 | complete(&req->done); |
6590 | spin_lock_irq(&rq->lock); | ||
6590 | } | 6591 | } |
6591 | spin_unlock_irq(&rq->lock); | 6592 | spin_unlock_irq(&rq->lock); |
6592 | break; | 6593 | break; |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 81787248b60f..e8ab096ddfe3 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
118 | 118 | ||
119 | /* | 119 | /* |
120 | * scd->clock = clamp(scd->tick_gtod + delta, | 120 | * scd->clock = clamp(scd->tick_gtod + delta, |
121 | * max(scd->tick_gtod, scd->clock), | 121 | * max(scd->tick_gtod, scd->clock), |
122 | * max(scd->clock, scd->tick_gtod + TICK_NSEC)); | 122 | * scd->tick_gtod + TICK_NSEC); |
123 | */ | 123 | */ |
124 | 124 | ||
125 | clock = scd->tick_gtod + delta; | 125 | clock = scd->tick_gtod + delta; |
126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); | 126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); |
127 | max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC); | 127 | max_clock = scd->tick_gtod + TICK_NSEC; |
128 | 128 | ||
129 | clock = wrap_max(clock, min_clock); | 129 | clock = wrap_max(clock, min_clock); |
130 | clock = wrap_min(clock, max_clock); | 130 | clock = wrap_min(clock, max_clock); |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 3953e4aed733..dc0b3be6b7d5 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -188,7 +188,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now) | |||
188 | if ((long)(now - t->last_switch_timestamp) < | 188 | if ((long)(now - t->last_switch_timestamp) < |
189 | sysctl_hung_task_timeout_secs) | 189 | sysctl_hung_task_timeout_secs) |
190 | return; | 190 | return; |
191 | if (sysctl_hung_task_warnings < 0) | 191 | if (!sysctl_hung_task_warnings) |
192 | return; | 192 | return; |
193 | sysctl_hung_task_warnings--; | 193 | sysctl_hung_task_warnings--; |
194 | 194 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9d048fa2d902..3d56fe7570da 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -176,6 +176,9 @@ extern struct ctl_table random_table[]; | |||
176 | #ifdef CONFIG_INOTIFY_USER | 176 | #ifdef CONFIG_INOTIFY_USER |
177 | extern struct ctl_table inotify_table[]; | 177 | extern struct ctl_table inotify_table[]; |
178 | #endif | 178 | #endif |
179 | #ifdef CONFIG_EPOLL | ||
180 | extern struct ctl_table epoll_table[]; | ||
181 | #endif | ||
179 | 182 | ||
180 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT | 183 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT |
181 | int sysctl_legacy_va_layout; | 184 | int sysctl_legacy_va_layout; |
@@ -1325,6 +1328,13 @@ static struct ctl_table fs_table[] = { | |||
1325 | .child = inotify_table, | 1328 | .child = inotify_table, |
1326 | }, | 1329 | }, |
1327 | #endif | 1330 | #endif |
1331 | #ifdef CONFIG_EPOLL | ||
1332 | { | ||
1333 | .procname = "epoll", | ||
1334 | .mode = 0555, | ||
1335 | .child = epoll_table, | ||
1336 | }, | ||
1337 | #endif | ||
1328 | #endif | 1338 | #endif |
1329 | { | 1339 | { |
1330 | .ctl_name = KERN_SETUID_DUMPABLE, | 1340 | .ctl_name = KERN_SETUID_DUMPABLE, |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 8ff15e5d486b..f5f793d92415 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | |||
131 | { | 131 | { |
132 | enum hrtimer_restart res = HRTIMER_NORESTART; | 132 | enum hrtimer_restart res = HRTIMER_NORESTART; |
133 | 133 | ||
134 | write_seqlock_irq(&xtime_lock); | 134 | write_seqlock(&xtime_lock); |
135 | 135 | ||
136 | switch (time_state) { | 136 | switch (time_state) { |
137 | case TIME_OK: | 137 | case TIME_OK: |
@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | |||
164 | } | 164 | } |
165 | update_vsyscall(&xtime, clock); | 165 | update_vsyscall(&xtime, clock); |
166 | 166 | ||
167 | write_sequnlock_irq(&xtime_lock); | 167 | write_sequnlock(&xtime_lock); |
168 | 168 | ||
169 | return res; | 169 | return res; |
170 | } | 170 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 342fc9ccab46..8f3fc2582d38 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
247 | if (need_resched()) | 247 | if (need_resched()) |
248 | goto end; | 248 | goto end; |
249 | 249 | ||
250 | if (unlikely(local_softirq_pending())) { | 250 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
251 | static int ratelimit; | 251 | static int ratelimit; |
252 | 252 | ||
253 | if (ratelimit < 10) { | 253 | if (ratelimit < 10) { |
@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
282 | /* Schedule the tick, if we are at least one jiffie off */ | 282 | /* Schedule the tick, if we are at least one jiffie off */ |
283 | if ((long)delta_jiffies >= 1) { | 283 | if ((long)delta_jiffies >= 1) { |
284 | 284 | ||
285 | /* | ||
286 | * calculate the expiry time for the next timer wheel | ||
287 | * timer | ||
288 | */ | ||
289 | expires = ktime_add_ns(last_update, tick_period.tv64 * | ||
290 | delta_jiffies); | ||
291 | |||
292 | /* | ||
293 | * If this cpu is the one which updates jiffies, then | ||
294 | * give up the assignment and let it be taken by the | ||
295 | * cpu which runs the tick timer next, which might be | ||
296 | * this cpu as well. If we don't drop this here the | ||
297 | * jiffies might be stale and do_timer() never | ||
298 | * invoked. | ||
299 | */ | ||
300 | if (cpu == tick_do_timer_cpu) | ||
301 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | ||
302 | |||
285 | if (delta_jiffies > 1) | 303 | if (delta_jiffies > 1) |
286 | cpu_set(cpu, nohz_cpu_mask); | 304 | cpu_set(cpu, nohz_cpu_mask); |
305 | |||
306 | /* Skip reprogram of event if its not changed */ | ||
307 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | ||
308 | goto out; | ||
309 | |||
287 | /* | 310 | /* |
288 | * nohz_stop_sched_tick can be called several times before | 311 | * nohz_stop_sched_tick can be called several times before |
289 | * the nohz_restart_sched_tick is called. This happens when | 312 | * the nohz_restart_sched_tick is called. This happens when |
@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
306 | rcu_enter_nohz(); | 329 | rcu_enter_nohz(); |
307 | } | 330 | } |
308 | 331 | ||
309 | /* | ||
310 | * If this cpu is the one which updates jiffies, then | ||
311 | * give up the assignment and let it be taken by the | ||
312 | * cpu which runs the tick timer next, which might be | ||
313 | * this cpu as well. If we don't drop this here the | ||
314 | * jiffies might be stale and do_timer() never | ||
315 | * invoked. | ||
316 | */ | ||
317 | if (cpu == tick_do_timer_cpu) | ||
318 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | ||
319 | |||
320 | ts->idle_sleeps++; | 332 | ts->idle_sleeps++; |
321 | 333 | ||
322 | /* | 334 | /* |
@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
332 | goto out; | 344 | goto out; |
333 | } | 345 | } |
334 | 346 | ||
335 | /* | 347 | /* Mark expiries */ |
336 | * calculate the expiry time for the next timer wheel | ||
337 | * timer | ||
338 | */ | ||
339 | expires = ktime_add_ns(last_update, tick_period.tv64 * | ||
340 | delta_jiffies); | ||
341 | ts->idle_expires = expires; | 348 | ts->idle_expires = expires; |
342 | 349 | ||
343 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | 350 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void) | |||
681 | */ | 688 | */ |
682 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 689 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
683 | ts->sched_timer.function = tick_sched_timer; | 690 | ts->sched_timer.function = tick_sched_timer; |
684 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
685 | 691 | ||
686 | /* Get the next period (per cpu) */ | 692 | /* Get the next period (per cpu) */ |
687 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | 693 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e7acfb482a68..fa05e88aa76f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -518,6 +518,28 @@ void update_wall_time(void) | |||
518 | /* correct the clock when NTP error is too big */ | 518 | /* correct the clock when NTP error is too big */ |
519 | clocksource_adjust(offset); | 519 | clocksource_adjust(offset); |
520 | 520 | ||
521 | /* | ||
522 | * Since in the loop above, we accumulate any amount of time | ||
523 | * in xtime_nsec over a second into xtime.tv_sec, its possible for | ||
524 | * xtime_nsec to be fairly small after the loop. Further, if we're | ||
525 | * slightly speeding the clocksource up in clocksource_adjust(), | ||
526 | * its possible the required corrective factor to xtime_nsec could | ||
527 | * cause it to underflow. | ||
528 | * | ||
529 | * Now, we cannot simply roll the accumulated second back, since | ||
530 | * the NTP subsystem has been notified via second_overflow. So | ||
531 | * instead we push xtime_nsec forward by the amount we underflowed, | ||
532 | * and add that amount into the error. | ||
533 | * | ||
534 | * We'll correct this error next time through this function, when | ||
535 | * xtime_nsec is not as small. | ||
536 | */ | ||
537 | if (unlikely((s64)clock->xtime_nsec < 0)) { | ||
538 | s64 neg = -(s64)clock->xtime_nsec; | ||
539 | clock->xtime_nsec = 0; | ||
540 | clock->error += neg << (NTP_SCALE_SHIFT - clock->shift); | ||
541 | } | ||
542 | |||
521 | /* store full nanoseconds into xtime after rounding it up and | 543 | /* store full nanoseconds into xtime after rounding it up and |
522 | * add the remainder to the error difference. | 544 | * add the remainder to the error difference. |
523 | */ | 545 | */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f780e9552f91..668bbb5ef2bd 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1215,7 +1215,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1215 | 1215 | ||
1216 | out: | 1216 | out: |
1217 | if (resched) | 1217 | if (resched) |
1218 | preempt_enable_notrace(); | 1218 | preempt_enable_no_resched_notrace(); |
1219 | else | 1219 | else |
1220 | preempt_enable_notrace(); | 1220 | preempt_enable_notrace(); |
1221 | return NULL; | 1221 | return NULL; |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index f28484618ff0..e62cbf78eab6 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -18,12 +18,14 @@ struct header_iter { | |||
18 | 18 | ||
19 | static struct trace_array *mmio_trace_array; | 19 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 20 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | ||
21 | 22 | ||
22 | static void mmio_reset_data(struct trace_array *tr) | 23 | static void mmio_reset_data(struct trace_array *tr) |
23 | { | 24 | { |
24 | int cpu; | 25 | int cpu; |
25 | 26 | ||
26 | overrun_detected = false; | 27 | overrun_detected = false; |
28 | prev_overruns = 0; | ||
27 | tr->time_start = ftrace_now(tr->cpu); | 29 | tr->time_start = ftrace_now(tr->cpu); |
28 | 30 | ||
29 | for_each_online_cpu(cpu) | 31 | for_each_online_cpu(cpu) |
@@ -128,16 +130,12 @@ static void mmio_close(struct trace_iterator *iter) | |||
128 | 130 | ||
129 | static unsigned long count_overruns(struct trace_iterator *iter) | 131 | static unsigned long count_overruns(struct trace_iterator *iter) |
130 | { | 132 | { |
131 | int cpu; | ||
132 | unsigned long cnt = 0; | 133 | unsigned long cnt = 0; |
133 | /* FIXME: */ | 134 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
134 | #if 0 | 135 | |
135 | for_each_online_cpu(cpu) { | 136 | if (over > prev_overruns) |
136 | cnt += iter->overrun[cpu]; | 137 | cnt = over - prev_overruns; |
137 | iter->overrun[cpu] = 0; | 138 | prev_overruns = over; |
138 | } | ||
139 | #endif | ||
140 | (void)cpu; | ||
141 | return cnt; | 139 | return cnt; |
142 | } | 140 | } |
143 | 141 | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index be682b62fe58..3bdb44bde4b7 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -184,11 +184,16 @@ static struct file_operations stack_max_size_fops = { | |||
184 | static void * | 184 | static void * |
185 | t_next(struct seq_file *m, void *v, loff_t *pos) | 185 | t_next(struct seq_file *m, void *v, loff_t *pos) |
186 | { | 186 | { |
187 | long i = (long)m->private; | 187 | long i; |
188 | 188 | ||
189 | (*pos)++; | 189 | (*pos)++; |
190 | 190 | ||
191 | i++; | 191 | if (v == SEQ_START_TOKEN) |
192 | i = 0; | ||
193 | else { | ||
194 | i = *(long *)v; | ||
195 | i++; | ||
196 | } | ||
192 | 197 | ||
193 | if (i >= max_stack_trace.nr_entries || | 198 | if (i >= max_stack_trace.nr_entries || |
194 | stack_dump_trace[i] == ULONG_MAX) | 199 | stack_dump_trace[i] == ULONG_MAX) |
@@ -201,12 +206,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
201 | 206 | ||
202 | static void *t_start(struct seq_file *m, loff_t *pos) | 207 | static void *t_start(struct seq_file *m, loff_t *pos) |
203 | { | 208 | { |
204 | void *t = &m->private; | 209 | void *t = SEQ_START_TOKEN; |
205 | loff_t l = 0; | 210 | loff_t l = 0; |
206 | 211 | ||
207 | local_irq_disable(); | 212 | local_irq_disable(); |
208 | __raw_spin_lock(&max_stack_lock); | 213 | __raw_spin_lock(&max_stack_lock); |
209 | 214 | ||
215 | if (*pos == 0) | ||
216 | return SEQ_START_TOKEN; | ||
217 | |||
210 | for (; t && l < *pos; t = t_next(m, t, &l)) | 218 | for (; t && l < *pos; t = t_next(m, t, &l)) |
211 | ; | 219 | ; |
212 | 220 | ||
@@ -235,10 +243,10 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
235 | 243 | ||
236 | static int t_show(struct seq_file *m, void *v) | 244 | static int t_show(struct seq_file *m, void *v) |
237 | { | 245 | { |
238 | long i = *(long *)v; | 246 | long i; |
239 | int size; | 247 | int size; |
240 | 248 | ||
241 | if (i < 0) { | 249 | if (v == SEQ_START_TOKEN) { |
242 | seq_printf(m, " Depth Size Location" | 250 | seq_printf(m, " Depth Size Location" |
243 | " (%d entries)\n" | 251 | " (%d entries)\n" |
244 | " ----- ---- --------\n", | 252 | " ----- ---- --------\n", |
@@ -246,6 +254,8 @@ static int t_show(struct seq_file *m, void *v) | |||
246 | return 0; | 254 | return 0; |
247 | } | 255 | } |
248 | 256 | ||
257 | i = *(long *)v; | ||
258 | |||
249 | if (i >= max_stack_trace.nr_entries || | 259 | if (i >= max_stack_trace.nr_entries || |
250 | stack_dump_trace[i] == ULONG_MAX) | 260 | stack_dump_trace[i] == ULONG_MAX) |
251 | return 0; | 261 | return 0; |
@@ -275,10 +285,6 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
275 | int ret; | 285 | int ret; |
276 | 286 | ||
277 | ret = seq_open(file, &stack_trace_seq_ops); | 287 | ret = seq_open(file, &stack_trace_seq_ops); |
278 | if (!ret) { | ||
279 | struct seq_file *m = file->private_data; | ||
280 | m->private = (void *)-1; | ||
281 | } | ||
282 | 288 | ||
283 | return ret; | 289 | return ret; |
284 | } | 290 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 9587d3bcba55..ae542e2e38d5 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu) | |||
202 | 202 | ||
203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
206 | 205 | ||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); | 206 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); |
208 | } | 207 | } |