diff options
author | Arnd Bergmann <arnd@arndb.de> | 2012-01-03 15:27:45 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2012-01-03 15:27:45 -0500 |
commit | 5d3cb0ffdd0c8987dc17a2ef4529b246198ceb72 (patch) | |
tree | c60bdca0529cbd44d32b3918b78d14e182ef57cd /kernel | |
parent | 3b0d597139efddfd8960b44249b4a4c977d172f1 (diff) | |
parent | 5f0a6e2d503896062f641639dacfe5055c2f593b (diff) |
Merge branch 'v3.2-rc7' into next/pm
Conflicts:
arch/arm/kernel/setup.c
arch/arm/mach-shmobile/board-kota2.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 5 | ||||
-rw-r--r-- | kernel/cpuset.c | 29 | ||||
-rw-r--r-- | kernel/events/core.c | 95 | ||||
-rw-r--r-- | kernel/events/internal.h | 3 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 5 | ||||
-rw-r--r-- | kernel/jump_label.c | 3 | ||||
-rw-r--r-- | kernel/lockdep.c | 8 | ||||
-rw-r--r-- | kernel/printk.c | 3 | ||||
-rw-r--r-- | kernel/sched.c | 17 | ||||
-rw-r--r-- | kernel/sched_fair.c | 159 | ||||
-rw-r--r-- | kernel/sched_features.h | 1 | ||||
-rw-r--r-- | kernel/sched_rt.c | 3 | ||||
-rw-r--r-- | kernel/sysctl_binary.c | 2 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 2 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 1 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 16 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 13 |
22 files changed, 311 insertions, 67 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d9d5648f3cdc..a184470cf9b5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2098 | continue; | 2098 | continue; |
2099 | /* get old css_set pointer */ | 2099 | /* get old css_set pointer */ |
2100 | task_lock(tsk); | 2100 | task_lock(tsk); |
2101 | if (tsk->flags & PF_EXITING) { | ||
2102 | /* ignore this task if it's going away */ | ||
2103 | task_unlock(tsk); | ||
2104 | continue; | ||
2105 | } | ||
2106 | oldcg = tsk->cgroups; | 2101 | oldcg = tsk->cgroups; |
2107 | get_css_set(oldcg); | 2102 | get_css_set(oldcg); |
2108 | task_unlock(tsk); | 2103 | task_unlock(tsk); |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 9fe58c46a426..0b1712dba587 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task) | |||
123 | struct cpuset, css); | 123 | struct cpuset, css); |
124 | } | 124 | } |
125 | 125 | ||
126 | #ifdef CONFIG_NUMA | ||
127 | static inline bool task_has_mempolicy(struct task_struct *task) | ||
128 | { | ||
129 | return task->mempolicy; | ||
130 | } | ||
131 | #else | ||
132 | static inline bool task_has_mempolicy(struct task_struct *task) | ||
133 | { | ||
134 | return false; | ||
135 | } | ||
136 | #endif | ||
137 | |||
138 | |||
126 | /* bits in struct cpuset flags field */ | 139 | /* bits in struct cpuset flags field */ |
127 | typedef enum { | 140 | typedef enum { |
128 | CS_CPU_EXCLUSIVE, | 141 | CS_CPU_EXCLUSIVE, |
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
949 | static void cpuset_change_task_nodemask(struct task_struct *tsk, | 962 | static void cpuset_change_task_nodemask(struct task_struct *tsk, |
950 | nodemask_t *newmems) | 963 | nodemask_t *newmems) |
951 | { | 964 | { |
952 | bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed); | 965 | bool need_loop; |
953 | 966 | ||
954 | repeat: | 967 | repeat: |
955 | /* | 968 | /* |
@@ -962,6 +975,14 @@ repeat: | |||
962 | return; | 975 | return; |
963 | 976 | ||
964 | task_lock(tsk); | 977 | task_lock(tsk); |
978 | /* | ||
979 | * Determine if a loop is necessary if another thread is doing | ||
980 | * get_mems_allowed(). If at least one node remains unchanged and | ||
981 | * tsk does not have a mempolicy, then an empty nodemask will not be | ||
982 | * possible when mems_allowed is larger than a word. | ||
983 | */ | ||
984 | need_loop = task_has_mempolicy(tsk) || | ||
985 | !nodes_intersects(*newmems, tsk->mems_allowed); | ||
965 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); | 986 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
966 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); | 987 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); |
967 | 988 | ||
@@ -981,11 +1002,9 @@ repeat: | |||
981 | 1002 | ||
982 | /* | 1003 | /* |
983 | * Allocation of memory is very fast, we needn't sleep when waiting | 1004 | * Allocation of memory is very fast, we needn't sleep when waiting |
984 | * for the read-side. No wait is necessary, however, if at least one | 1005 | * for the read-side. |
985 | * node remains unchanged. | ||
986 | */ | 1006 | */ |
987 | while (masks_disjoint && | 1007 | while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) { |
988 | ACCESS_ONCE(tsk->mems_allowed_change_disable)) { | ||
989 | task_unlock(tsk); | 1008 | task_unlock(tsk); |
990 | if (!task_curr(tsk)) | 1009 | if (!task_curr(tsk)) |
991 | yield(); | 1010 | yield(); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e8457da6f95..58690af323e4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | |||
185 | static void update_context_time(struct perf_event_context *ctx); | 185 | static void update_context_time(struct perf_event_context *ctx); |
186 | static u64 perf_event_time(struct perf_event *event); | 186 | static u64 perf_event_time(struct perf_event *event); |
187 | 187 | ||
188 | static void ring_buffer_attach(struct perf_event *event, | ||
189 | struct ring_buffer *rb); | ||
190 | |||
188 | void __weak perf_event_print_debug(void) { } | 191 | void __weak perf_event_print_debug(void) { } |
189 | 192 | ||
190 | extern __weak const char *perf_pmu_name(void) | 193 | extern __weak const char *perf_pmu_name(void) |
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2171 | */ | 2174 | */ |
2172 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 2175 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
2173 | 2176 | ||
2174 | perf_event_sched_in(cpuctx, ctx, task); | 2177 | if (ctx->nr_events) |
2178 | cpuctx->task_ctx = ctx; | ||
2175 | 2179 | ||
2176 | cpuctx->task_ctx = ctx; | 2180 | perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); |
2177 | 2181 | ||
2178 | perf_pmu_enable(ctx->pmu); | 2182 | perf_pmu_enable(ctx->pmu); |
2179 | perf_ctx_unlock(cpuctx, ctx); | 2183 | perf_ctx_unlock(cpuctx, ctx); |
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) | |||
3190 | struct ring_buffer *rb; | 3194 | struct ring_buffer *rb; |
3191 | unsigned int events = POLL_HUP; | 3195 | unsigned int events = POLL_HUP; |
3192 | 3196 | ||
3197 | /* | ||
3198 | * Race between perf_event_set_output() and perf_poll(): perf_poll() | ||
3199 | * grabs the rb reference but perf_event_set_output() overrides it. | ||
3200 | * Here is the timeline for two threads T1, T2: | ||
3201 | * t0: T1, rb = rcu_dereference(event->rb) | ||
3202 | * t1: T2, old_rb = event->rb | ||
3203 | * t2: T2, event->rb = new rb | ||
3204 | * t3: T2, ring_buffer_detach(old_rb) | ||
3205 | * t4: T1, ring_buffer_attach(rb1) | ||
3206 | * t5: T1, poll_wait(event->waitq) | ||
3207 | * | ||
3208 | * To avoid this problem, we grab mmap_mutex in perf_poll() | ||
3209 | * thereby ensuring that the assignment of the new ring buffer | ||
3210 | * and the detachment of the old buffer appear atomic to perf_poll() | ||
3211 | */ | ||
3212 | mutex_lock(&event->mmap_mutex); | ||
3213 | |||
3193 | rcu_read_lock(); | 3214 | rcu_read_lock(); |
3194 | rb = rcu_dereference(event->rb); | 3215 | rb = rcu_dereference(event->rb); |
3195 | if (rb) | 3216 | if (rb) { |
3217 | ring_buffer_attach(event, rb); | ||
3196 | events = atomic_xchg(&rb->poll, 0); | 3218 | events = atomic_xchg(&rb->poll, 0); |
3219 | } | ||
3197 | rcu_read_unlock(); | 3220 | rcu_read_unlock(); |
3198 | 3221 | ||
3222 | mutex_unlock(&event->mmap_mutex); | ||
3223 | |||
3199 | poll_wait(file, &event->waitq, wait); | 3224 | poll_wait(file, &event->waitq, wait); |
3200 | 3225 | ||
3201 | return events; | 3226 | return events; |
@@ -3496,6 +3521,53 @@ unlock: | |||
3496 | return ret; | 3521 | return ret; |
3497 | } | 3522 | } |
3498 | 3523 | ||
3524 | static void ring_buffer_attach(struct perf_event *event, | ||
3525 | struct ring_buffer *rb) | ||
3526 | { | ||
3527 | unsigned long flags; | ||
3528 | |||
3529 | if (!list_empty(&event->rb_entry)) | ||
3530 | return; | ||
3531 | |||
3532 | spin_lock_irqsave(&rb->event_lock, flags); | ||
3533 | if (!list_empty(&event->rb_entry)) | ||
3534 | goto unlock; | ||
3535 | |||
3536 | list_add(&event->rb_entry, &rb->event_list); | ||
3537 | unlock: | ||
3538 | spin_unlock_irqrestore(&rb->event_lock, flags); | ||
3539 | } | ||
3540 | |||
3541 | static void ring_buffer_detach(struct perf_event *event, | ||
3542 | struct ring_buffer *rb) | ||
3543 | { | ||
3544 | unsigned long flags; | ||
3545 | |||
3546 | if (list_empty(&event->rb_entry)) | ||
3547 | return; | ||
3548 | |||
3549 | spin_lock_irqsave(&rb->event_lock, flags); | ||
3550 | list_del_init(&event->rb_entry); | ||
3551 | wake_up_all(&event->waitq); | ||
3552 | spin_unlock_irqrestore(&rb->event_lock, flags); | ||
3553 | } | ||
3554 | |||
3555 | static void ring_buffer_wakeup(struct perf_event *event) | ||
3556 | { | ||
3557 | struct ring_buffer *rb; | ||
3558 | |||
3559 | rcu_read_lock(); | ||
3560 | rb = rcu_dereference(event->rb); | ||
3561 | if (!rb) | ||
3562 | goto unlock; | ||
3563 | |||
3564 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) | ||
3565 | wake_up_all(&event->waitq); | ||
3566 | |||
3567 | unlock: | ||
3568 | rcu_read_unlock(); | ||
3569 | } | ||
3570 | |||
3499 | static void rb_free_rcu(struct rcu_head *rcu_head) | 3571 | static void rb_free_rcu(struct rcu_head *rcu_head) |
3500 | { | 3572 | { |
3501 | struct ring_buffer *rb; | 3573 | struct ring_buffer *rb; |
@@ -3521,9 +3593,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event) | |||
3521 | 3593 | ||
3522 | static void ring_buffer_put(struct ring_buffer *rb) | 3594 | static void ring_buffer_put(struct ring_buffer *rb) |
3523 | { | 3595 | { |
3596 | struct perf_event *event, *n; | ||
3597 | unsigned long flags; | ||
3598 | |||
3524 | if (!atomic_dec_and_test(&rb->refcount)) | 3599 | if (!atomic_dec_and_test(&rb->refcount)) |
3525 | return; | 3600 | return; |
3526 | 3601 | ||
3602 | spin_lock_irqsave(&rb->event_lock, flags); | ||
3603 | list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { | ||
3604 | list_del_init(&event->rb_entry); | ||
3605 | wake_up_all(&event->waitq); | ||
3606 | } | ||
3607 | spin_unlock_irqrestore(&rb->event_lock, flags); | ||
3608 | |||
3527 | call_rcu(&rb->rcu_head, rb_free_rcu); | 3609 | call_rcu(&rb->rcu_head, rb_free_rcu); |
3528 | } | 3610 | } |
3529 | 3611 | ||
@@ -3546,6 +3628,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
3546 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 3628 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
3547 | vma->vm_mm->pinned_vm -= event->mmap_locked; | 3629 | vma->vm_mm->pinned_vm -= event->mmap_locked; |
3548 | rcu_assign_pointer(event->rb, NULL); | 3630 | rcu_assign_pointer(event->rb, NULL); |
3631 | ring_buffer_detach(event, rb); | ||
3549 | mutex_unlock(&event->mmap_mutex); | 3632 | mutex_unlock(&event->mmap_mutex); |
3550 | 3633 | ||
3551 | ring_buffer_put(rb); | 3634 | ring_buffer_put(rb); |
@@ -3700,7 +3783,7 @@ static const struct file_operations perf_fops = { | |||
3700 | 3783 | ||
3701 | void perf_event_wakeup(struct perf_event *event) | 3784 | void perf_event_wakeup(struct perf_event *event) |
3702 | { | 3785 | { |
3703 | wake_up_all(&event->waitq); | 3786 | ring_buffer_wakeup(event); |
3704 | 3787 | ||
3705 | if (event->pending_kill) { | 3788 | if (event->pending_kill) { |
3706 | kill_fasync(&event->fasync, SIGIO, event->pending_kill); | 3789 | kill_fasync(&event->fasync, SIGIO, event->pending_kill); |
@@ -5822,6 +5905,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
5822 | INIT_LIST_HEAD(&event->group_entry); | 5905 | INIT_LIST_HEAD(&event->group_entry); |
5823 | INIT_LIST_HEAD(&event->event_entry); | 5906 | INIT_LIST_HEAD(&event->event_entry); |
5824 | INIT_LIST_HEAD(&event->sibling_list); | 5907 | INIT_LIST_HEAD(&event->sibling_list); |
5908 | INIT_LIST_HEAD(&event->rb_entry); | ||
5909 | |||
5825 | init_waitqueue_head(&event->waitq); | 5910 | init_waitqueue_head(&event->waitq); |
5826 | init_irq_work(&event->pending, perf_pending_event); | 5911 | init_irq_work(&event->pending, perf_pending_event); |
5827 | 5912 | ||
@@ -6028,6 +6113,8 @@ set: | |||
6028 | 6113 | ||
6029 | old_rb = event->rb; | 6114 | old_rb = event->rb; |
6030 | rcu_assign_pointer(event->rb, rb); | 6115 | rcu_assign_pointer(event->rb, rb); |
6116 | if (old_rb) | ||
6117 | ring_buffer_detach(event, old_rb); | ||
6031 | ret = 0; | 6118 | ret = 0; |
6032 | unlock: | 6119 | unlock: |
6033 | mutex_unlock(&event->mmap_mutex); | 6120 | mutex_unlock(&event->mmap_mutex); |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 09097dd8116c..64568a699375 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h | |||
@@ -22,6 +22,9 @@ struct ring_buffer { | |||
22 | local_t lost; /* nr records lost */ | 22 | local_t lost; /* nr records lost */ |
23 | 23 | ||
24 | long watermark; /* wakeup watermark */ | 24 | long watermark; /* wakeup watermark */ |
25 | /* poll crap */ | ||
26 | spinlock_t event_lock; | ||
27 | struct list_head event_list; | ||
25 | 28 | ||
26 | struct perf_event_mmap_page *user_page; | 29 | struct perf_event_mmap_page *user_page; |
27 | void *data_pages[0]; | 30 | void *data_pages[0]; |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index a2a29205cc0f..7f3011c6b57f 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |||
209 | rb->writable = 1; | 209 | rb->writable = 1; |
210 | 210 | ||
211 | atomic_set(&rb->refcount, 1); | 211 | atomic_set(&rb->refcount, 1); |
212 | |||
213 | INIT_LIST_HEAD(&rb->event_list); | ||
214 | spin_lock_init(&rb->event_lock); | ||
212 | } | 215 | } |
213 | 216 | ||
214 | #ifndef CONFIG_PERF_USE_VMALLOC | 217 | #ifndef CONFIG_PERF_USE_VMALLOC |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0e2b179bc7b3..1da999f5e746 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |||
623 | 623 | ||
624 | static int irq_wait_for_interrupt(struct irqaction *action) | 624 | static int irq_wait_for_interrupt(struct irqaction *action) |
625 | { | 625 | { |
626 | set_current_state(TASK_INTERRUPTIBLE); | ||
627 | |||
626 | while (!kthread_should_stop()) { | 628 | while (!kthread_should_stop()) { |
627 | set_current_state(TASK_INTERRUPTIBLE); | ||
628 | 629 | ||
629 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | 630 | if (test_and_clear_bit(IRQTF_RUNTHREAD, |
630 | &action->thread_flags)) { | 631 | &action->thread_flags)) { |
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
632 | return 0; | 633 | return 0; |
633 | } | 634 | } |
634 | schedule(); | 635 | schedule(); |
636 | set_current_state(TASK_INTERRUPTIBLE); | ||
635 | } | 637 | } |
638 | __set_current_state(TASK_RUNNING); | ||
636 | return -1; | 639 | return -1; |
637 | } | 640 | } |
638 | 641 | ||
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index bbdfe2a462a0..66ff7109f697 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key) | |||
66 | return; | 66 | return; |
67 | 67 | ||
68 | jump_label_lock(); | 68 | jump_label_lock(); |
69 | if (atomic_add_return(1, &key->enabled) == 1) | 69 | if (atomic_read(&key->enabled) == 0) |
70 | jump_label_update(key, JUMP_LABEL_ENABLE); | 70 | jump_label_update(key, JUMP_LABEL_ENABLE); |
71 | atomic_inc(&key->enabled); | ||
71 | jump_label_unlock(); | 72 | jump_label_unlock(); |
72 | } | 73 | } |
73 | 74 | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e69434b070da..b2e08c932d91 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/stringify.h> | 44 | #include <linux/stringify.h> |
45 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
46 | #include <linux/gfp.h> | 46 | #include <linux/gfp.h> |
47 | #include <linux/kmemcheck.h> | ||
47 | 48 | ||
48 | #include <asm/sections.h> | 49 | #include <asm/sections.h> |
49 | 50 | ||
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2948 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2949 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
2949 | struct lock_class_key *key, int subclass) | 2950 | struct lock_class_key *key, int subclass) |
2950 | { | 2951 | { |
2951 | memset(lock, 0, sizeof(*lock)); | 2952 | int i; |
2953 | |||
2954 | kmemcheck_mark_initialized(lock, sizeof(*lock)); | ||
2955 | |||
2956 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | ||
2957 | lock->class_cache[i] = NULL; | ||
2952 | 2958 | ||
2953 | #ifdef CONFIG_LOCK_STAT | 2959 | #ifdef CONFIG_LOCK_STAT |
2954 | lock->cpu = raw_smp_processor_id(); | 2960 | lock->cpu = raw_smp_processor_id(); |
diff --git a/kernel/printk.c b/kernel/printk.c index 1455a0d4eedd..7982a0a841ea 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1293,10 +1293,11 @@ again: | |||
1293 | raw_spin_lock(&logbuf_lock); | 1293 | raw_spin_lock(&logbuf_lock); |
1294 | if (con_start != log_end) | 1294 | if (con_start != log_end) |
1295 | retry = 1; | 1295 | retry = 1; |
1296 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
1297 | |||
1296 | if (retry && console_trylock()) | 1298 | if (retry && console_trylock()) |
1297 | goto again; | 1299 | goto again; |
1298 | 1300 | ||
1299 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
1300 | if (wake_klogd) | 1301 | if (wake_klogd) |
1301 | wake_up_klogd(); | 1302 | wake_up_klogd(); |
1302 | } | 1303 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 0e9344a71be3..d6b149ccf925 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <linux/ctype.h> | 71 | #include <linux/ctype.h> |
72 | #include <linux/ftrace.h> | 72 | #include <linux/ftrace.h> |
73 | #include <linux/slab.h> | 73 | #include <linux/slab.h> |
74 | #include <linux/init_task.h> | ||
74 | 75 | ||
75 | #include <asm/tlb.h> | 76 | #include <asm/tlb.h> |
76 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion); | |||
4810 | * This waits for either a completion of a specific task to be signaled or for a | 4811 | * This waits for either a completion of a specific task to be signaled or for a |
4811 | * specified timeout to expire. The timeout is in jiffies. It is not | 4812 | * specified timeout to expire. The timeout is in jiffies. It is not |
4812 | * interruptible. | 4813 | * interruptible. |
4814 | * | ||
4815 | * The return value is 0 if timed out, and positive (at least 1, or number of | ||
4816 | * jiffies left till timeout) if completed. | ||
4813 | */ | 4817 | */ |
4814 | unsigned long __sched | 4818 | unsigned long __sched |
4815 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 4819 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout); | |||
4824 | * | 4828 | * |
4825 | * This waits for completion of a specific task to be signaled. It is | 4829 | * This waits for completion of a specific task to be signaled. It is |
4826 | * interruptible. | 4830 | * interruptible. |
4831 | * | ||
4832 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | ||
4827 | */ | 4833 | */ |
4828 | int __sched wait_for_completion_interruptible(struct completion *x) | 4834 | int __sched wait_for_completion_interruptible(struct completion *x) |
4829 | { | 4835 | { |
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); | |||
4841 | * | 4847 | * |
4842 | * This waits for either a completion of a specific task to be signaled or for a | 4848 | * This waits for either a completion of a specific task to be signaled or for a |
4843 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. | 4849 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
4850 | * | ||
4851 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | ||
4852 | * positive (at least 1, or number of jiffies left till timeout) if completed. | ||
4844 | */ | 4853 | */ |
4845 | long __sched | 4854 | long __sched |
4846 | wait_for_completion_interruptible_timeout(struct completion *x, | 4855 | wait_for_completion_interruptible_timeout(struct completion *x, |
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | |||
4856 | * | 4865 | * |
4857 | * This waits to be signaled for completion of a specific task. It can be | 4866 | * This waits to be signaled for completion of a specific task. It can be |
4858 | * interrupted by a kill signal. | 4867 | * interrupted by a kill signal. |
4868 | * | ||
4869 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | ||
4859 | */ | 4870 | */ |
4860 | int __sched wait_for_completion_killable(struct completion *x) | 4871 | int __sched wait_for_completion_killable(struct completion *x) |
4861 | { | 4872 | { |
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable); | |||
4874 | * This waits for either a completion of a specific task to be | 4885 | * This waits for either a completion of a specific task to be |
4875 | * signaled or for a specified timeout to expire. It can be | 4886 | * signaled or for a specified timeout to expire. It can be |
4876 | * interrupted by a kill signal. The timeout is in jiffies. | 4887 | * interrupted by a kill signal. The timeout is in jiffies. |
4888 | * | ||
4889 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | ||
4890 | * positive (at least 1, or number of jiffies left till timeout) if completed. | ||
4877 | */ | 4891 | */ |
4878 | long __sched | 4892 | long __sched |
4879 | wait_for_completion_killable_timeout(struct completion *x, | 4893 | wait_for_completion_killable_timeout(struct completion *x, |
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6099 | */ | 6113 | */ |
6100 | idle->sched_class = &idle_sched_class; | 6114 | idle->sched_class = &idle_sched_class; |
6101 | ftrace_graph_init_idle_task(idle, cpu); | 6115 | ftrace_graph_init_idle_task(idle, cpu); |
6116 | #if defined(CONFIG_SMP) | ||
6117 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); | ||
6118 | #endif | ||
6102 | } | 6119 | } |
6103 | 6120 | ||
6104 | /* | 6121 | /* |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5c9e67923b7c..8a39fa3e3c6c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
772 | list_del_leaf_cfs_rq(cfs_rq); | 772 | list_del_leaf_cfs_rq(cfs_rq); |
773 | } | 773 | } |
774 | 774 | ||
775 | static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) | ||
776 | { | ||
777 | long tg_weight; | ||
778 | |||
779 | /* | ||
780 | * Use this CPU's actual weight instead of the last load_contribution | ||
781 | * to gain a more accurate current total weight. See | ||
782 | * update_cfs_rq_load_contribution(). | ||
783 | */ | ||
784 | tg_weight = atomic_read(&tg->load_weight); | ||
785 | tg_weight -= cfs_rq->load_contribution; | ||
786 | tg_weight += cfs_rq->load.weight; | ||
787 | |||
788 | return tg_weight; | ||
789 | } | ||
790 | |||
775 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | 791 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) |
776 | { | 792 | { |
777 | long load_weight, load, shares; | 793 | long tg_weight, load, shares; |
778 | 794 | ||
795 | tg_weight = calc_tg_weight(tg, cfs_rq); | ||
779 | load = cfs_rq->load.weight; | 796 | load = cfs_rq->load.weight; |
780 | 797 | ||
781 | load_weight = atomic_read(&tg->load_weight); | ||
782 | load_weight += load; | ||
783 | load_weight -= cfs_rq->load_contribution; | ||
784 | |||
785 | shares = (tg->shares * load); | 798 | shares = (tg->shares * load); |
786 | if (load_weight) | 799 | if (tg_weight) |
787 | shares /= load_weight; | 800 | shares /= tg_weight; |
788 | 801 | ||
789 | if (shares < MIN_SHARES) | 802 | if (shares < MIN_SHARES) |
790 | shares = MIN_SHARES; | 803 | shares = MIN_SHARES; |
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
1743 | 1756 | ||
1744 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 1757 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
1745 | { | 1758 | { |
1746 | if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running) | 1759 | if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) |
1747 | return; | 1760 | return; |
1748 | 1761 | ||
1749 | __return_cfs_rq_runtime(cfs_rq); | 1762 | __return_cfs_rq_runtime(cfs_rq); |
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p) | |||
2036 | * Adding load to a group doesn't make a group heavier, but can cause movement | 2049 | * Adding load to a group doesn't make a group heavier, but can cause movement |
2037 | * of group shares between cpus. Assuming the shares were perfectly aligned one | 2050 | * of group shares between cpus. Assuming the shares were perfectly aligned one |
2038 | * can calculate the shift in shares. | 2051 | * can calculate the shift in shares. |
2052 | * | ||
2053 | * Calculate the effective load difference if @wl is added (subtracted) to @tg | ||
2054 | * on this @cpu and results in a total addition (subtraction) of @wg to the | ||
2055 | * total group weight. | ||
2056 | * | ||
2057 | * Given a runqueue weight distribution (rw_i) we can compute a shares | ||
2058 | * distribution (s_i) using: | ||
2059 | * | ||
2060 | * s_i = rw_i / \Sum rw_j (1) | ||
2061 | * | ||
2062 | * Suppose we have 4 CPUs and our @tg is a direct child of the root group and | ||
2063 | * has 7 equal weight tasks, distributed as below (rw_i), with the resulting | ||
2064 | * shares distribution (s_i): | ||
2065 | * | ||
2066 | * rw_i = { 2, 4, 1, 0 } | ||
2067 | * s_i = { 2/7, 4/7, 1/7, 0 } | ||
2068 | * | ||
2069 | * As per wake_affine() we're interested in the load of two CPUs (the CPU the | ||
2070 | * task used to run on and the CPU the waker is running on), we need to | ||
2071 | * compute the effect of waking a task on either CPU and, in case of a sync | ||
2072 | * wakeup, compute the effect of the current task going to sleep. | ||
2073 | * | ||
2074 | * So for a change of @wl to the local @cpu with an overall group weight change | ||
2075 | * of @wl we can compute the new shares distribution (s'_i) using: | ||
2076 | * | ||
2077 | * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2) | ||
2078 | * | ||
2079 | * Suppose we're interested in CPUs 0 and 1, and want to compute the load | ||
2080 | * differences in waking a task to CPU 0. The additional task changes the | ||
2081 | * weight and shares distributions like: | ||
2082 | * | ||
2083 | * rw'_i = { 3, 4, 1, 0 } | ||
2084 | * s'_i = { 3/8, 4/8, 1/8, 0 } | ||
2085 | * | ||
2086 | * We can then compute the difference in effective weight by using: | ||
2087 | * | ||
2088 | * dw_i = S * (s'_i - s_i) (3) | ||
2089 | * | ||
2090 | * Where 'S' is the group weight as seen by its parent. | ||
2091 | * | ||
2092 | * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) | ||
2093 | * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - | ||
2094 | * 4/7) times the weight of the group. | ||
2039 | */ | 2095 | */ |
2040 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | 2096 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) |
2041 | { | 2097 | { |
2042 | struct sched_entity *se = tg->se[cpu]; | 2098 | struct sched_entity *se = tg->se[cpu]; |
2043 | 2099 | ||
2044 | if (!tg->parent) | 2100 | if (!tg->parent) /* the trivial, non-cgroup case */ |
2045 | return wl; | 2101 | return wl; |
2046 | 2102 | ||
2047 | for_each_sched_entity(se) { | 2103 | for_each_sched_entity(se) { |
2048 | long lw, w; | 2104 | long w, W; |
2049 | 2105 | ||
2050 | tg = se->my_q->tg; | 2106 | tg = se->my_q->tg; |
2051 | w = se->my_q->load.weight; | ||
2052 | 2107 | ||
2053 | /* use this cpu's instantaneous contribution */ | 2108 | /* |
2054 | lw = atomic_read(&tg->load_weight); | 2109 | * W = @wg + \Sum rw_j |
2055 | lw -= se->my_q->load_contribution; | 2110 | */ |
2056 | lw += w + wg; | 2111 | W = wg + calc_tg_weight(tg, se->my_q); |
2057 | 2112 | ||
2058 | wl += w; | 2113 | /* |
2114 | * w = rw_i + @wl | ||
2115 | */ | ||
2116 | w = se->my_q->load.weight + wl; | ||
2059 | 2117 | ||
2060 | if (lw > 0 && wl < lw) | 2118 | /* |
2061 | wl = (wl * tg->shares) / lw; | 2119 | * wl = S * s'_i; see (2) |
2120 | */ | ||
2121 | if (W > 0 && w < W) | ||
2122 | wl = (w * tg->shares) / W; | ||
2062 | else | 2123 | else |
2063 | wl = tg->shares; | 2124 | wl = tg->shares; |
2064 | 2125 | ||
2065 | /* zero point is MIN_SHARES */ | 2126 | /* |
2127 | * Per the above, wl is the new se->load.weight value; since | ||
2128 | * those are clipped to [MIN_SHARES, ...) do so now. See | ||
2129 | * calc_cfs_shares(). | ||
2130 | */ | ||
2066 | if (wl < MIN_SHARES) | 2131 | if (wl < MIN_SHARES) |
2067 | wl = MIN_SHARES; | 2132 | wl = MIN_SHARES; |
2133 | |||
2134 | /* | ||
2135 | * wl = dw_i = S * (s'_i - s_i); see (3) | ||
2136 | */ | ||
2068 | wl -= se->load.weight; | 2137 | wl -= se->load.weight; |
2138 | |||
2139 | /* | ||
2140 | * Recursively apply this logic to all parent groups to compute | ||
2141 | * the final effective load change on the root group. Since | ||
2142 | * only the @tg group gets extra weight, all parent groups can | ||
2143 | * only redistribute existing shares. @wl is the shift in shares | ||
2144 | * resulting from this level per the above. | ||
2145 | */ | ||
2069 | wg = 0; | 2146 | wg = 0; |
2070 | } | 2147 | } |
2071 | 2148 | ||
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
2249 | int cpu = smp_processor_id(); | 2326 | int cpu = smp_processor_id(); |
2250 | int prev_cpu = task_cpu(p); | 2327 | int prev_cpu = task_cpu(p); |
2251 | struct sched_domain *sd; | 2328 | struct sched_domain *sd; |
2252 | int i; | 2329 | struct sched_group *sg; |
2330 | int i, smt = 0; | ||
2253 | 2331 | ||
2254 | /* | 2332 | /* |
2255 | * If the task is going to be woken-up on this cpu and if it is | 2333 | * If the task is going to be woken-up on this cpu and if it is |
@@ -2269,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
2269 | * Otherwise, iterate the domains and find an elegible idle cpu. | 2347 | * Otherwise, iterate the domains and find an elegible idle cpu. |
2270 | */ | 2348 | */ |
2271 | rcu_read_lock(); | 2349 | rcu_read_lock(); |
2350 | again: | ||
2272 | for_each_domain(target, sd) { | 2351 | for_each_domain(target, sd) { |
2352 | if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) | ||
2353 | continue; | ||
2354 | |||
2355 | if (smt && !(sd->flags & SD_SHARE_CPUPOWER)) | ||
2356 | break; | ||
2357 | |||
2273 | if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) | 2358 | if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) |
2274 | break; | 2359 | break; |
2275 | 2360 | ||
2276 | for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { | 2361 | sg = sd->groups; |
2277 | if (idle_cpu(i)) { | 2362 | do { |
2278 | target = i; | 2363 | if (!cpumask_intersects(sched_group_cpus(sg), |
2279 | break; | 2364 | tsk_cpus_allowed(p))) |
2365 | goto next; | ||
2366 | |||
2367 | for_each_cpu(i, sched_group_cpus(sg)) { | ||
2368 | if (!idle_cpu(i)) | ||
2369 | goto next; | ||
2280 | } | 2370 | } |
2281 | } | ||
2282 | 2371 | ||
2283 | /* | 2372 | target = cpumask_first_and(sched_group_cpus(sg), |
2284 | * Lets stop looking for an idle sibling when we reached | 2373 | tsk_cpus_allowed(p)); |
2285 | * the domain that spans the current cpu and prev_cpu. | 2374 | goto done; |
2286 | */ | 2375 | next: |
2287 | if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && | 2376 | sg = sg->next; |
2288 | cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) | 2377 | } while (sg != sd->groups); |
2289 | break; | 2378 | } |
2379 | if (!smt) { | ||
2380 | smt = 1; | ||
2381 | goto again; | ||
2290 | } | 2382 | } |
2383 | done: | ||
2291 | rcu_read_unlock(); | 2384 | rcu_read_unlock(); |
2292 | 2385 | ||
2293 | return target; | 2386 | return target; |
@@ -3511,7 +3604,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd, | |||
3511 | } | 3604 | } |
3512 | 3605 | ||
3513 | /** | 3606 | /** |
3514 | * update_sd_lb_stats - Update sched_group's statistics for load balancing. | 3607 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
3515 | * @sd: sched_domain whose statistics are to be updated. | 3608 | * @sd: sched_domain whose statistics are to be updated. |
3516 | * @this_cpu: Cpu for which load balance is currently performed. | 3609 | * @this_cpu: Cpu for which load balance is currently performed. |
3517 | * @idle: Idle status of this_cpu | 3610 | * @idle: Idle status of this_cpu |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index efa0a7b75dde..84802245abd2 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1) | |||
67 | SCHED_FEAT(TTWU_QUEUE, 1) | 67 | SCHED_FEAT(TTWU_QUEUE, 1) |
68 | 68 | ||
69 | SCHED_FEAT(FORCE_SD_OVERLAP, 0) | 69 | SCHED_FEAT(FORCE_SD_OVERLAP, 0) |
70 | SCHED_FEAT(RT_RUNTIME_SHARE, 1) | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 056cbd2e2a27..583a1368afe6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq) | |||
560 | { | 560 | { |
561 | int more = 0; | 561 | int more = 0; |
562 | 562 | ||
563 | if (!sched_feat(RT_RUNTIME_SHARE)) | ||
564 | return more; | ||
565 | |||
563 | if (rt_rq->rt_time > rt_rq->rt_runtime) { | 566 | if (rt_rq->rt_time > rt_rq->rt_runtime) { |
564 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 567 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
565 | more = do_balance_runtime(rt_rq); | 568 | more = do_balance_runtime(rt_rq); |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 6318b511afa1..a650694883a1 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen, | |||
1354 | 1354 | ||
1355 | fput(file); | 1355 | fput(file); |
1356 | out_putname: | 1356 | out_putname: |
1357 | putname(pathname); | 1357 | __putname(pathname); |
1358 | out: | 1358 | out: |
1359 | return result; | 1359 | return result; |
1360 | } | 1360 | } |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index c436e790b21b..8a46f5d64504 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) | |||
195 | struct alarm *alarm; | 195 | struct alarm *alarm; |
196 | ktime_t expired = next->expires; | 196 | ktime_t expired = next->expires; |
197 | 197 | ||
198 | if (expired.tv64 >= now.tv64) | 198 | if (expired.tv64 > now.tv64) |
199 | break; | 199 | break; |
200 | 200 | ||
201 | alarm = container_of(next, struct alarm, node); | 201 | alarm = container_of(next, struct alarm, node); |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1ecd6ba36d6c..c4eb71c8b2ea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
387 | * released list and do a notify add later. | 387 | * released list and do a notify add later. |
388 | */ | 388 | */ |
389 | if (old) { | 389 | if (old) { |
390 | old->event_handler = clockevents_handle_noop; | ||
390 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); | 391 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); |
391 | list_del(&old->list); | 392 | list_del(&old->list); |
392 | list_add(&old->list, &clockevents_released); | 393 | list_add(&old->list, &clockevents_released); |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index cfc65e1eb9fb..d3ad022136e5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -548,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
548 | * note a margin of 12.5% is used because this can be computed with | 548 | * note a margin of 12.5% is used because this can be computed with |
549 | * a shift, versus say 10% which would require division. | 549 | * a shift, versus say 10% which would require division. |
550 | */ | 550 | */ |
551 | return max_nsecs - (max_nsecs >> 5); | 551 | return max_nsecs - (max_nsecs >> 3); |
552 | } | 552 | } |
553 | 553 | ||
554 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET | 554 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
647 | 647 | ||
648 | /** | 648 | /** |
649 | * __clocksource_updatefreq_scale - Used update clocksource with new freq | 649 | * __clocksource_updatefreq_scale - Used update clocksource with new freq |
650 | * @t: clocksource to be registered | 650 | * @cs: clocksource to be registered |
651 | * @scale: Scale factor multiplied against freq to get clocksource hz | 651 | * @scale: Scale factor multiplied against freq to get clocksource hz |
652 | * @freq: clocksource frequency (cycles per second) divided by scale | 652 | * @freq: clocksource frequency (cycles per second) divided by scale |
653 | * | 653 | * |
@@ -669,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
669 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% | 669 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% |
670 | * margin as we do in clocksource_max_deferment() | 670 | * margin as we do in clocksource_max_deferment() |
671 | */ | 671 | */ |
672 | sec = (cs->mask - (cs->mask >> 5)); | 672 | sec = (cs->mask - (cs->mask >> 3)); |
673 | do_div(sec, freq); | 673 | do_div(sec, freq); |
674 | do_div(sec, scale); | 674 | do_div(sec, scale); |
675 | if (!sec) | 675 | if (!sec) |
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | |||
699 | 699 | ||
700 | /** | 700 | /** |
701 | * __clocksource_register_scale - Used to install new clocksources | 701 | * __clocksource_register_scale - Used to install new clocksources |
702 | * @t: clocksource to be registered | 702 | * @cs: clocksource to be registered |
703 | * @scale: Scale factor multiplied against freq to get clocksource hz | 703 | * @scale: Scale factor multiplied against freq to get clocksource hz |
704 | * @freq: clocksource frequency (cycles per second) divided by scale | 704 | * @freq: clocksource frequency (cycles per second) divided by scale |
705 | * | 705 | * |
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale); | |||
727 | 727 | ||
728 | /** | 728 | /** |
729 | * clocksource_register - Used to install new clocksources | 729 | * clocksource_register - Used to install new clocksources |
730 | * @t: clocksource to be registered | 730 | * @cs: clocksource to be registered |
731 | * | 731 | * |
732 | * Returns -EBUSY if registration fails, zero otherwise. | 732 | * Returns -EBUSY if registration fails, zero otherwise. |
733 | */ | 733 | */ |
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating) | |||
761 | 761 | ||
762 | /** | 762 | /** |
763 | * clocksource_change_rating - Change the rating of a registered clocksource | 763 | * clocksource_change_rating - Change the rating of a registered clocksource |
764 | * @cs: clocksource to be changed | ||
765 | * @rating: new rating | ||
764 | */ | 766 | */ |
765 | void clocksource_change_rating(struct clocksource *cs, int rating) | 767 | void clocksource_change_rating(struct clocksource *cs, int rating) |
766 | { | 768 | { |
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating); | |||
772 | 774 | ||
773 | /** | 775 | /** |
774 | * clocksource_unregister - remove a registered clocksource | 776 | * clocksource_unregister - remove a registered clocksource |
777 | * @cs: clocksource to be unregistered | ||
775 | */ | 778 | */ |
776 | void clocksource_unregister(struct clocksource *cs) | 779 | void clocksource_unregister(struct clocksource *cs) |
777 | { | 780 | { |
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister); | |||
787 | /** | 790 | /** |
788 | * sysfs_show_current_clocksources - sysfs interface for current clocksource | 791 | * sysfs_show_current_clocksources - sysfs interface for current clocksource |
789 | * @dev: unused | 792 | * @dev: unused |
793 | * @attr: unused | ||
790 | * @buf: char buffer to be filled with clocksource list | 794 | * @buf: char buffer to be filled with clocksource list |
791 | * | 795 | * |
792 | * Provides sysfs interface for listing current clocksource. | 796 | * Provides sysfs interface for listing current clocksource. |
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, | |||
807 | /** | 811 | /** |
808 | * sysfs_override_clocksource - interface for manually overriding clocksource | 812 | * sysfs_override_clocksource - interface for manually overriding clocksource |
809 | * @dev: unused | 813 | * @dev: unused |
814 | * @attr: unused | ||
810 | * @buf: name of override clocksource | 815 | * @buf: name of override clocksource |
811 | * @count: length of buffer | 816 | * @count: length of buffer |
812 | * | 817 | * |
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, | |||
842 | /** | 847 | /** |
843 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource | 848 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource |
844 | * @dev: unused | 849 | * @dev: unused |
850 | * @attr: unused | ||
845 | * @buf: char buffer to be filled with clocksource list | 851 | * @buf: char buffer to be filled with clocksource list |
846 | * | 852 | * |
847 | * Provides sysfs interface for listing registered clocksources | 853 | * Provides sysfs interface for listing registered clocksources |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f954282d9a82..fd4a7b1625a2 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev) | |||
71 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | 71 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) |
72 | return 0; | 72 | return 0; |
73 | 73 | ||
74 | clockevents_exchange_device(NULL, dev); | 74 | clockevents_exchange_device(tick_broadcast_device.evtdev, dev); |
75 | tick_broadcast_device.evtdev = dev; | 75 | tick_broadcast_device.evtdev = dev; |
76 | if (!cpumask_empty(tick_get_broadcast_mask())) | 76 | if (!cpumask_empty(tick_get_broadcast_mask())) |
77 | tick_broadcast_start_periodic(dev); | 77 | tick_broadcast_start_periodic(dev); |
diff --git a/kernel/timer.c b/kernel/timer.c index dbaa62422b13..9c3c62b0c4bc 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid) | |||
1368 | int pid; | 1368 | int pid; |
1369 | 1369 | ||
1370 | rcu_read_lock(); | 1370 | rcu_read_lock(); |
1371 | pid = task_tgid_vnr(current->real_parent); | 1371 | pid = task_tgid_vnr(rcu_dereference(current->real_parent)); |
1372 | rcu_read_unlock(); | 1372 | rcu_read_unlock(); |
1373 | 1373 | ||
1374 | return pid; | 1374 | return pid; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 900b409543db..b1e8943fed1d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -152,7 +152,6 @@ void clear_ftrace_function(void) | |||
152 | ftrace_pid_function = ftrace_stub; | 152 | ftrace_pid_function = ftrace_stub; |
153 | } | 153 | } |
154 | 154 | ||
155 | #undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
156 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 155 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
157 | /* | 156 | /* |
158 | * For those archs that do not test ftrace_trace_stop in their | 157 | * For those archs that do not test ftrace_trace_stop in their |
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1212 | if (!src->count) { | 1211 | if (!src->count) { |
1213 | free_ftrace_hash_rcu(*dst); | 1212 | free_ftrace_hash_rcu(*dst); |
1214 | rcu_assign_pointer(*dst, EMPTY_HASH); | 1213 | rcu_assign_pointer(*dst, EMPTY_HASH); |
1215 | return 0; | 1214 | /* still need to update the function records */ |
1215 | ret = 0; | ||
1216 | goto out; | ||
1216 | } | 1217 | } |
1217 | 1218 | ||
1218 | /* | 1219 | /* |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 581876f9f387..c212a7f934ec 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events) | |||
1078 | /* First see if we did not already create this dir */ | 1078 | /* First see if we did not already create this dir */ |
1079 | list_for_each_entry(system, &event_subsystems, list) { | 1079 | list_for_each_entry(system, &event_subsystems, list) { |
1080 | if (strcmp(system->name, name) == 0) { | 1080 | if (strcmp(system->name, name) == 0) { |
1081 | __get_system(system); | ||
1082 | system->nr_events++; | 1081 | system->nr_events++; |
1083 | return system->entry; | 1082 | return system->entry; |
1084 | } | 1083 | } |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 816d3d074979..95dc31efd6dd 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1649 | */ | 1649 | */ |
1650 | err = replace_preds(call, NULL, ps, filter_string, true); | 1650 | err = replace_preds(call, NULL, ps, filter_string, true); |
1651 | if (err) | 1651 | if (err) |
1652 | goto fail; | 1652 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; |
1653 | else | ||
1654 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; | ||
1653 | } | 1655 | } |
1654 | 1656 | ||
1655 | list_for_each_entry(call, &ftrace_events, list) { | 1657 | list_for_each_entry(call, &ftrace_events, list) { |
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1658 | if (strcmp(call->class->system, system->name) != 0) | 1660 | if (strcmp(call->class->system, system->name) != 0) |
1659 | continue; | 1661 | continue; |
1660 | 1662 | ||
1663 | if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) | ||
1664 | continue; | ||
1665 | |||
1661 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); | 1666 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); |
1662 | if (!filter_item) | 1667 | if (!filter_item) |
1663 | goto fail_mem; | 1668 | goto fail_mem; |
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1686 | * replace the filter for the call. | 1691 | * replace the filter for the call. |
1687 | */ | 1692 | */ |
1688 | filter = call->filter; | 1693 | filter = call->filter; |
1689 | call->filter = filter_item->filter; | 1694 | rcu_assign_pointer(call->filter, filter_item->filter); |
1690 | filter_item->filter = filter; | 1695 | filter_item->filter = filter; |
1691 | 1696 | ||
1692 | fail = false; | 1697 | fail = false; |
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1741 | filter = call->filter; | 1746 | filter = call->filter; |
1742 | if (!filter) | 1747 | if (!filter) |
1743 | goto out_unlock; | 1748 | goto out_unlock; |
1744 | call->filter = NULL; | 1749 | RCU_INIT_POINTER(call->filter, NULL); |
1745 | /* Make sure the filter is not being used */ | 1750 | /* Make sure the filter is not being used */ |
1746 | synchronize_sched(); | 1751 | synchronize_sched(); |
1747 | __free_filter(filter); | 1752 | __free_filter(filter); |
@@ -1782,7 +1787,7 @@ out: | |||
1782 | * string | 1787 | * string |
1783 | */ | 1788 | */ |
1784 | tmp = call->filter; | 1789 | tmp = call->filter; |
1785 | call->filter = filter; | 1790 | rcu_assign_pointer(call->filter, filter); |
1786 | if (tmp) { | 1791 | if (tmp) { |
1787 | /* Make sure the call is done with the filter */ | 1792 | /* Make sure the call is done with the filter */ |
1788 | synchronize_sched(); | 1793 | synchronize_sched(); |