diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2012-08-26 12:58:41 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-08-26 12:58:41 -0400 |
commit | c78aa4c4b94b5b148be576a9f1570e31fe282b46 (patch) | |
tree | 6d5801eb3e60bcec6a91c7b13a334c6d2ea11b05 /kernel | |
parent | 90993cdd1800dc6ef9587431a0c625b978584e81 (diff) | |
parent | 9acb172543aecb783e2e1e53e3f447d4c0f5c150 (diff) |
Merge remote-tracking branch 'upstream/master' into queue
Merging critical fixes from upstream required for development.
* upstream/master: (809 commits)
libata: Add a space to " 2GB ATA Flash Disk" DMA blacklist entry
Revert "powerpc: Update g5_defconfig"
powerpc/perf: Use pmc_overflow() to detect rolled back events
powerpc: Fix VMX in interrupt check in POWER7 copy loops
powerpc: POWER7 copy_to_user/copy_from_user patch applied twice
powerpc: Fix personality handling in ppc64_personality()
powerpc/dma-iommu: Fix IOMMU window check
powerpc: Remove unnecessary ifdefs
powerpc/kgdb: Restore current_thread_info properly
powerpc/kgdb: Bail out of KGDB when we've been triggered
powerpc/kgdb: Do not set kgdb_single_step on ppc
powerpc/mpic_msgr: Add missing includes
powerpc: Fix null pointer deref in perf hardware breakpoints
powerpc: Fixup whitespace in xmon
powerpc: Fix xmon dl command for new printk implementation
xfs: check for possible overflow in xfs_ioc_trim
xfs: unlock the AGI buffer when looping in xfs_dialloc
xfs: fix uninitialised variable in xfs_rtbuf_get()
powerpc/fsl: fix "Failed to mount /dev: No such device" errors
powerpc/fsl: update defconfigs
...
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit_tree.c | 19 | ||||
-rw-r--r-- | kernel/fork.c | 4 | ||||
-rw-r--r-- | kernel/power/suspend.c | 3 | ||||
-rw-r--r-- | kernel/printk.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 35 | ||||
-rw-r--r-- | kernel/sched/fair.c | 11 | ||||
-rw-r--r-- | kernel/sched/rt.c | 13 | ||||
-rw-r--r-- | kernel/sched/sched.h | 8 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 22 | ||||
-rw-r--r-- | kernel/task_work.c | 1 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 68 | ||||
-rw-r--r-- | kernel/timer.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 4 | ||||
-rw-r--r-- | kernel/watchdog.c | 21 |
14 files changed, 142 insertions, 78 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 3a5ca582ba1e..ed206fd88cca 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p) | |||
250 | spin_unlock(&hash_lock); | 250 | spin_unlock(&hash_lock); |
251 | spin_unlock(&entry->lock); | 251 | spin_unlock(&entry->lock); |
252 | fsnotify_destroy_mark(entry); | 252 | fsnotify_destroy_mark(entry); |
253 | fsnotify_put_mark(entry); | ||
254 | goto out; | 253 | goto out; |
255 | } | 254 | } |
256 | 255 | ||
@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p) | |||
259 | 258 | ||
260 | fsnotify_duplicate_mark(&new->mark, entry); | 259 | fsnotify_duplicate_mark(&new->mark, entry); |
261 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { | 260 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { |
262 | free_chunk(new); | 261 | fsnotify_put_mark(&new->mark); |
263 | goto Fallback; | 262 | goto Fallback; |
264 | } | 263 | } |
265 | 264 | ||
@@ -293,7 +292,7 @@ static void untag_chunk(struct node *p) | |||
293 | spin_unlock(&hash_lock); | 292 | spin_unlock(&hash_lock); |
294 | spin_unlock(&entry->lock); | 293 | spin_unlock(&entry->lock); |
295 | fsnotify_destroy_mark(entry); | 294 | fsnotify_destroy_mark(entry); |
296 | fsnotify_put_mark(entry); | 295 | fsnotify_put_mark(&new->mark); /* drop initial reference */ |
297 | goto out; | 296 | goto out; |
298 | 297 | ||
299 | Fallback: | 298 | Fallback: |
@@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) | |||
322 | 321 | ||
323 | entry = &chunk->mark; | 322 | entry = &chunk->mark; |
324 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { | 323 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { |
325 | free_chunk(chunk); | 324 | fsnotify_put_mark(entry); |
326 | return -ENOSPC; | 325 | return -ENOSPC; |
327 | } | 326 | } |
328 | 327 | ||
@@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) | |||
347 | insert_hash(chunk); | 346 | insert_hash(chunk); |
348 | spin_unlock(&hash_lock); | 347 | spin_unlock(&hash_lock); |
349 | spin_unlock(&entry->lock); | 348 | spin_unlock(&entry->lock); |
349 | fsnotify_put_mark(entry); /* drop initial reference */ | ||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
@@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
396 | fsnotify_duplicate_mark(chunk_entry, old_entry); | 396 | fsnotify_duplicate_mark(chunk_entry, old_entry); |
397 | if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { | 397 | if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { |
398 | spin_unlock(&old_entry->lock); | 398 | spin_unlock(&old_entry->lock); |
399 | free_chunk(chunk); | 399 | fsnotify_put_mark(chunk_entry); |
400 | fsnotify_put_mark(old_entry); | 400 | fsnotify_put_mark(old_entry); |
401 | return -ENOSPC; | 401 | return -ENOSPC; |
402 | } | 402 | } |
@@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |||
444 | spin_unlock(&chunk_entry->lock); | 444 | spin_unlock(&chunk_entry->lock); |
445 | spin_unlock(&old_entry->lock); | 445 | spin_unlock(&old_entry->lock); |
446 | fsnotify_destroy_mark(old_entry); | 446 | fsnotify_destroy_mark(old_entry); |
447 | fsnotify_put_mark(chunk_entry); /* drop initial reference */ | ||
447 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ | 448 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
448 | fsnotify_put_mark(old_entry); /* and kill it */ | ||
449 | return 0; | 449 | return 0; |
450 | } | 450 | } |
451 | 451 | ||
@@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify | |||
916 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | 916 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); |
917 | 917 | ||
918 | evict_chunk(chunk); | 918 | evict_chunk(chunk); |
919 | fsnotify_put_mark(entry); | 919 | |
920 | /* | ||
921 | * We are guaranteed to have at least one reference to the mark from | ||
922 | * either the inode or the caller of fsnotify_destroy_mark(). | ||
923 | */ | ||
924 | BUG_ON(atomic_read(&entry->refcnt) < 1); | ||
920 | } | 925 | } |
921 | 926 | ||
922 | static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, | 927 | static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, |
diff --git a/kernel/fork.c b/kernel/fork.c index 3bd2280d79f6..2c8857e12855 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -455,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
455 | if (retval) | 455 | if (retval) |
456 | goto out; | 456 | goto out; |
457 | 457 | ||
458 | if (file && uprobe_mmap(tmp)) | 458 | if (file) |
459 | goto out; | 459 | uprobe_mmap(tmp); |
460 | } | 460 | } |
461 | /* a new mm has just been created */ | 461 | /* a new mm has just been created */ |
462 | arch_dup_mmap(oldmm, mm); | 462 | arch_dup_mmap(oldmm, mm); |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 1da39ea248fd..c8b7446b27df 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -178,9 +178,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
178 | arch_suspend_enable_irqs(); | 178 | arch_suspend_enable_irqs(); |
179 | BUG_ON(irqs_disabled()); | 179 | BUG_ON(irqs_disabled()); |
180 | 180 | ||
181 | /* Kick the lockup detector */ | ||
182 | lockup_detector_bootcpu_resume(); | ||
183 | |||
184 | Enable_cpus: | 181 | Enable_cpus: |
185 | enable_nonboot_cpus(); | 182 | enable_nonboot_cpus(); |
186 | 183 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 6a76ab9d4476..66a2ea37b576 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1034 | struct log *msg = log_from_idx(idx); | 1034 | struct log *msg = log_from_idx(idx); |
1035 | 1035 | ||
1036 | len += msg_print_text(msg, prev, true, NULL, 0); | 1036 | len += msg_print_text(msg, prev, true, NULL, 0); |
1037 | prev = msg->flags; | ||
1037 | idx = log_next(idx); | 1038 | idx = log_next(idx); |
1038 | seq++; | 1039 | seq++; |
1039 | } | 1040 | } |
@@ -1046,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1046 | struct log *msg = log_from_idx(idx); | 1047 | struct log *msg = log_from_idx(idx); |
1047 | 1048 | ||
1048 | len -= msg_print_text(msg, prev, true, NULL, 0); | 1049 | len -= msg_print_text(msg, prev, true, NULL, 0); |
1050 | prev = msg->flags; | ||
1049 | idx = log_next(idx); | 1051 | idx = log_next(idx); |
1050 | seq++; | 1052 | seq++; |
1051 | } | 1053 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 82ad284f823b..fbf1fd098dc6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3142 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) | 3142 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) |
3143 | #endif | 3143 | #endif |
3144 | 3144 | ||
3145 | static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) | ||
3146 | { | ||
3147 | u64 temp = (__force u64) rtime; | ||
3148 | |||
3149 | temp *= (__force u64) utime; | ||
3150 | |||
3151 | if (sizeof(cputime_t) == 4) | ||
3152 | temp = div_u64(temp, (__force u32) total); | ||
3153 | else | ||
3154 | temp = div64_u64(temp, (__force u64) total); | ||
3155 | |||
3156 | return (__force cputime_t) temp; | ||
3157 | } | ||
3158 | |||
3145 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | 3159 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
3146 | { | 3160 | { |
3147 | cputime_t rtime, utime = p->utime, total = utime + p->stime; | 3161 | cputime_t rtime, utime = p->utime, total = utime + p->stime; |
@@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3151 | */ | 3165 | */ |
3152 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3166 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
3153 | 3167 | ||
3154 | if (total) { | 3168 | if (total) |
3155 | u64 temp = (__force u64) rtime; | 3169 | utime = scale_utime(utime, rtime, total); |
3156 | 3170 | else | |
3157 | temp *= (__force u64) utime; | ||
3158 | do_div(temp, (__force u32) total); | ||
3159 | utime = (__force cputime_t) temp; | ||
3160 | } else | ||
3161 | utime = rtime; | 3171 | utime = rtime; |
3162 | 3172 | ||
3163 | /* | 3173 | /* |
@@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
3184 | total = cputime.utime + cputime.stime; | 3194 | total = cputime.utime + cputime.stime; |
3185 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3195 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
3186 | 3196 | ||
3187 | if (total) { | 3197 | if (total) |
3188 | u64 temp = (__force u64) rtime; | 3198 | utime = scale_utime(cputime.utime, rtime, total); |
3189 | 3199 | else | |
3190 | temp *= (__force u64) cputime.utime; | ||
3191 | do_div(temp, (__force u32) total); | ||
3192 | utime = (__force cputime_t) temp; | ||
3193 | } else | ||
3194 | utime = rtime; | 3200 | utime = rtime; |
3195 | 3201 | ||
3196 | sig->prev_utime = max(sig->prev_utime, utime); | 3202 | sig->prev_utime = max(sig->prev_utime, utime); |
@@ -7246,6 +7252,7 @@ int in_sched_functions(unsigned long addr) | |||
7246 | 7252 | ||
7247 | #ifdef CONFIG_CGROUP_SCHED | 7253 | #ifdef CONFIG_CGROUP_SCHED |
7248 | struct task_group root_task_group; | 7254 | struct task_group root_task_group; |
7255 | LIST_HEAD(task_groups); | ||
7249 | #endif | 7256 | #endif |
7250 | 7257 | ||
7251 | DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | 7258 | DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d0cc03b3e70b..c219bf8d704c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3387,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data) | |||
3387 | 3387 | ||
3388 | static void update_h_load(long cpu) | 3388 | static void update_h_load(long cpu) |
3389 | { | 3389 | { |
3390 | struct rq *rq = cpu_rq(cpu); | ||
3391 | unsigned long now = jiffies; | ||
3392 | |||
3393 | if (rq->h_load_throttle == now) | ||
3394 | return; | ||
3395 | |||
3396 | rq->h_load_throttle = now; | ||
3397 | |||
3390 | rcu_read_lock(); | 3398 | rcu_read_lock(); |
3391 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 3399 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
3392 | rcu_read_unlock(); | 3400 | rcu_read_unlock(); |
@@ -4293,11 +4301,10 @@ redo: | |||
4293 | env.src_rq = busiest; | 4301 | env.src_rq = busiest; |
4294 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); | 4302 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); |
4295 | 4303 | ||
4304 | update_h_load(env.src_cpu); | ||
4296 | more_balance: | 4305 | more_balance: |
4297 | local_irq_save(flags); | 4306 | local_irq_save(flags); |
4298 | double_rq_lock(this_rq, busiest); | 4307 | double_rq_lock(this_rq, busiest); |
4299 | if (!env.loop) | ||
4300 | update_h_load(env.src_cpu); | ||
4301 | 4308 | ||
4302 | /* | 4309 | /* |
4303 | * cur_ld_moved - load moved in current iteration | 4310 | * cur_ld_moved - load moved in current iteration |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 573e1ca01102..944cb68420e9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
788 | const struct cpumask *span; | 788 | const struct cpumask *span; |
789 | 789 | ||
790 | span = sched_rt_period_mask(); | 790 | span = sched_rt_period_mask(); |
791 | #ifdef CONFIG_RT_GROUP_SCHED | ||
792 | /* | ||
793 | * FIXME: isolated CPUs should really leave the root task group, | ||
794 | * whether they are isolcpus or were isolated via cpusets, lest | ||
795 | * the timer run on a CPU which does not service all runqueues, | ||
796 | * potentially leaving other CPUs indefinitely throttled. If | ||
797 | * isolation is really required, the user will turn the throttle | ||
798 | * off to kill the perturbations it causes anyway. Meanwhile, | ||
799 | * this maintains functionality for boot and/or troubleshooting. | ||
800 | */ | ||
801 | if (rt_b == &root_task_group.rt_bandwidth) | ||
802 | span = cpu_online_mask; | ||
803 | #endif | ||
791 | for_each_cpu(i, span) { | 804 | for_each_cpu(i, span) { |
792 | int enqueue = 0; | 805 | int enqueue = 0; |
793 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 806 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c35a1a7dd4d6..f6714d009e77 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex; | |||
80 | struct cfs_rq; | 80 | struct cfs_rq; |
81 | struct rt_rq; | 81 | struct rt_rq; |
82 | 82 | ||
83 | static LIST_HEAD(task_groups); | 83 | extern struct list_head task_groups; |
84 | 84 | ||
85 | struct cfs_bandwidth { | 85 | struct cfs_bandwidth { |
86 | #ifdef CONFIG_CFS_BANDWIDTH | 86 | #ifdef CONFIG_CFS_BANDWIDTH |
@@ -374,7 +374,11 @@ struct rq { | |||
374 | #ifdef CONFIG_FAIR_GROUP_SCHED | 374 | #ifdef CONFIG_FAIR_GROUP_SCHED |
375 | /* list of leaf cfs_rq on this cpu: */ | 375 | /* list of leaf cfs_rq on this cpu: */ |
376 | struct list_head leaf_cfs_rq_list; | 376 | struct list_head leaf_cfs_rq_list; |
377 | #endif | 377 | #ifdef CONFIG_SMP |
378 | unsigned long h_load_throttle; | ||
379 | #endif /* CONFIG_SMP */ | ||
380 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
381 | |||
378 | #ifdef CONFIG_RT_GROUP_SCHED | 382 | #ifdef CONFIG_RT_GROUP_SCHED |
379 | struct list_head leaf_rt_rq_list; | 383 | struct list_head leaf_rt_rq_list; |
380 | #endif | 384 | #endif |
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 7b386e86fd23..da5eb5bed84a 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c | |||
@@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) | |||
27 | { | 27 | { |
28 | struct task_struct *stop = rq->stop; | 28 | struct task_struct *stop = rq->stop; |
29 | 29 | ||
30 | if (stop && stop->on_rq) | 30 | if (stop && stop->on_rq) { |
31 | stop->se.exec_start = rq->clock_task; | ||
31 | return stop; | 32 | return stop; |
33 | } | ||
32 | 34 | ||
33 | return NULL; | 35 | return NULL; |
34 | } | 36 | } |
@@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq) | |||
52 | 54 | ||
53 | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) | 55 | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) |
54 | { | 56 | { |
57 | struct task_struct *curr = rq->curr; | ||
58 | u64 delta_exec; | ||
59 | |||
60 | delta_exec = rq->clock_task - curr->se.exec_start; | ||
61 | if (unlikely((s64)delta_exec < 0)) | ||
62 | delta_exec = 0; | ||
63 | |||
64 | schedstat_set(curr->se.statistics.exec_max, | ||
65 | max(curr->se.statistics.exec_max, delta_exec)); | ||
66 | |||
67 | curr->se.sum_exec_runtime += delta_exec; | ||
68 | account_group_exec_runtime(curr, delta_exec); | ||
69 | |||
70 | curr->se.exec_start = rq->clock_task; | ||
71 | cpuacct_charge(curr, delta_exec); | ||
55 | } | 72 | } |
56 | 73 | ||
57 | static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) | 74 | static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) |
@@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) | |||
60 | 77 | ||
61 | static void set_curr_task_stop(struct rq *rq) | 78 | static void set_curr_task_stop(struct rq *rq) |
62 | { | 79 | { |
80 | struct task_struct *stop = rq->stop; | ||
81 | |||
82 | stop->se.exec_start = rq->clock_task; | ||
63 | } | 83 | } |
64 | 84 | ||
65 | static void switched_to_stop(struct rq *rq, struct task_struct *p) | 85 | static void switched_to_stop(struct rq *rq, struct task_struct *p) |
diff --git a/kernel/task_work.c b/kernel/task_work.c index 91d4e1742a0c..d320d44903bd 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c | |||
@@ -75,6 +75,7 @@ void task_work_run(void) | |||
75 | p = q->next; | 75 | p = q->next; |
76 | q->func(q); | 76 | q->func(q); |
77 | q = p; | 77 | q = p; |
78 | cond_resched(); | ||
78 | } | 79 | } |
79 | } | 80 | } |
80 | } | 81 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 2988bc819187..0c1485e42be6 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) | |||
115 | { | 115 | { |
116 | tk->xtime_sec += ts->tv_sec; | 116 | tk->xtime_sec += ts->tv_sec; |
117 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; | 117 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; |
118 | tk_normalize_xtime(tk); | ||
118 | } | 119 | } |
119 | 120 | ||
120 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) | 121 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) |
@@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk) | |||
276 | tk->xtime_nsec += cycle_delta * tk->mult; | 277 | tk->xtime_nsec += cycle_delta * tk->mult; |
277 | 278 | ||
278 | /* If arch requires, add in gettimeoffset() */ | 279 | /* If arch requires, add in gettimeoffset() */ |
279 | tk->xtime_nsec += arch_gettimeoffset() << tk->shift; | 280 | tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; |
280 | 281 | ||
281 | tk_normalize_xtime(tk); | 282 | tk_normalize_xtime(tk); |
282 | 283 | ||
@@ -427,7 +428,7 @@ int do_settimeofday(const struct timespec *tv) | |||
427 | struct timespec ts_delta, xt; | 428 | struct timespec ts_delta, xt; |
428 | unsigned long flags; | 429 | unsigned long flags; |
429 | 430 | ||
430 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 431 | if (!timespec_valid(tv)) |
431 | return -EINVAL; | 432 | return -EINVAL; |
432 | 433 | ||
433 | write_seqlock_irqsave(&tk->lock, flags); | 434 | write_seqlock_irqsave(&tk->lock, flags); |
@@ -463,6 +464,8 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
463 | { | 464 | { |
464 | struct timekeeper *tk = &timekeeper; | 465 | struct timekeeper *tk = &timekeeper; |
465 | unsigned long flags; | 466 | unsigned long flags; |
467 | struct timespec tmp; | ||
468 | int ret = 0; | ||
466 | 469 | ||
467 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | 470 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
468 | return -EINVAL; | 471 | return -EINVAL; |
@@ -471,10 +474,17 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
471 | 474 | ||
472 | timekeeping_forward_now(tk); | 475 | timekeeping_forward_now(tk); |
473 | 476 | ||
477 | /* Make sure the proposed value is valid */ | ||
478 | tmp = timespec_add(tk_xtime(tk), *ts); | ||
479 | if (!timespec_valid(&tmp)) { | ||
480 | ret = -EINVAL; | ||
481 | goto error; | ||
482 | } | ||
474 | 483 | ||
475 | tk_xtime_add(tk, ts); | 484 | tk_xtime_add(tk, ts); |
476 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); | 485 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); |
477 | 486 | ||
487 | error: /* even if we error out, we forwarded the time, so call update */ | ||
478 | timekeeping_update(tk, true); | 488 | timekeeping_update(tk, true); |
479 | 489 | ||
480 | write_sequnlock_irqrestore(&tk->lock, flags); | 490 | write_sequnlock_irqrestore(&tk->lock, flags); |
@@ -482,7 +492,7 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
482 | /* signal hrtimers about time change */ | 492 | /* signal hrtimers about time change */ |
483 | clock_was_set(); | 493 | clock_was_set(); |
484 | 494 | ||
485 | return 0; | 495 | return ret; |
486 | } | 496 | } |
487 | EXPORT_SYMBOL(timekeeping_inject_offset); | 497 | EXPORT_SYMBOL(timekeeping_inject_offset); |
488 | 498 | ||
@@ -649,7 +659,20 @@ void __init timekeeping_init(void) | |||
649 | struct timespec now, boot, tmp; | 659 | struct timespec now, boot, tmp; |
650 | 660 | ||
651 | read_persistent_clock(&now); | 661 | read_persistent_clock(&now); |
662 | if (!timespec_valid(&now)) { | ||
663 | pr_warn("WARNING: Persistent clock returned invalid value!\n" | ||
664 | " Check your CMOS/BIOS settings.\n"); | ||
665 | now.tv_sec = 0; | ||
666 | now.tv_nsec = 0; | ||
667 | } | ||
668 | |||
652 | read_boot_clock(&boot); | 669 | read_boot_clock(&boot); |
670 | if (!timespec_valid(&boot)) { | ||
671 | pr_warn("WARNING: Boot clock returned invalid value!\n" | ||
672 | " Check your CMOS/BIOS settings.\n"); | ||
673 | boot.tv_sec = 0; | ||
674 | boot.tv_nsec = 0; | ||
675 | } | ||
653 | 676 | ||
654 | seqlock_init(&tk->lock); | 677 | seqlock_init(&tk->lock); |
655 | 678 | ||
@@ -923,20 +946,22 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
923 | if (likely(error <= interval)) | 946 | if (likely(error <= interval)) |
924 | adj = 1; | 947 | adj = 1; |
925 | else | 948 | else |
926 | adj = timekeeping_bigadjust(tk, error, &interval, | 949 | adj = timekeeping_bigadjust(tk, error, &interval, &offset); |
927 | &offset); | 950 | } else { |
928 | } else if (error < -interval) { | 951 | if (error < -interval) { |
929 | /* See comment above, this is just switched for the negative */ | 952 | /* See comment above, this is just switched for the negative */ |
930 | error >>= 2; | 953 | error >>= 2; |
931 | if (likely(error >= -interval)) { | 954 | if (likely(error >= -interval)) { |
932 | adj = -1; | 955 | adj = -1; |
933 | interval = -interval; | 956 | interval = -interval; |
934 | offset = -offset; | 957 | offset = -offset; |
935 | } else | 958 | } else { |
936 | adj = timekeeping_bigadjust(tk, error, &interval, | 959 | adj = timekeeping_bigadjust(tk, error, &interval, &offset); |
937 | &offset); | 960 | } |
938 | } else | 961 | } else { |
939 | return; | 962 | goto out_adjust; |
963 | } | ||
964 | } | ||
940 | 965 | ||
941 | if (unlikely(tk->clock->maxadj && | 966 | if (unlikely(tk->clock->maxadj && |
942 | (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { | 967 | (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { |
@@ -999,6 +1024,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
999 | tk->xtime_nsec -= offset; | 1024 | tk->xtime_nsec -= offset; |
1000 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; | 1025 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; |
1001 | 1026 | ||
1027 | out_adjust: | ||
1002 | /* | 1028 | /* |
1003 | * It may be possible that when we entered this function, xtime_nsec | 1029 | * It may be possible that when we entered this function, xtime_nsec |
1004 | * was very small. Further, if we're slightly speeding the clocksource | 1030 | * was very small. Further, if we're slightly speeding the clocksource |
@@ -1126,6 +1152,10 @@ static void update_wall_time(void) | |||
1126 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | 1152 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
1127 | #endif | 1153 | #endif |
1128 | 1154 | ||
1155 | /* Check if there's really nothing to do */ | ||
1156 | if (offset < tk->cycle_interval) | ||
1157 | goto out; | ||
1158 | |||
1129 | /* | 1159 | /* |
1130 | * With NO_HZ we may have to accumulate many cycle_intervals | 1160 | * With NO_HZ we may have to accumulate many cycle_intervals |
1131 | * (think "ticks") worth of time at once. To do this efficiently, | 1161 | * (think "ticks") worth of time at once. To do this efficiently, |
@@ -1158,9 +1188,9 @@ static void update_wall_time(void) | |||
1158 | * the vsyscall implementations are converted to use xtime_nsec | 1188 | * the vsyscall implementations are converted to use xtime_nsec |
1159 | * (shifted nanoseconds), this can be killed. | 1189 | * (shifted nanoseconds), this can be killed. |
1160 | */ | 1190 | */ |
1161 | remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); | 1191 | remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); |
1162 | tk->xtime_nsec -= remainder; | 1192 | tk->xtime_nsec -= remainder; |
1163 | tk->xtime_nsec += 1 << tk->shift; | 1193 | tk->xtime_nsec += 1ULL << tk->shift; |
1164 | tk->ntp_error += remainder << tk->ntp_error_shift; | 1194 | tk->ntp_error += remainder << tk->ntp_error_shift; |
1165 | 1195 | ||
1166 | /* | 1196 | /* |
diff --git a/kernel/timer.c b/kernel/timer.c index a61c09374eba..8c5e7b908c68 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1407,13 +1407,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds) | |||
1407 | 1407 | ||
1408 | #endif | 1408 | #endif |
1409 | 1409 | ||
1410 | #ifndef __alpha__ | ||
1411 | |||
1412 | /* | ||
1413 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | ||
1414 | * should be moved into arch/i386 instead? | ||
1415 | */ | ||
1416 | |||
1417 | /** | 1410 | /** |
1418 | * sys_getpid - return the thread group id of the current process | 1411 | * sys_getpid - return the thread group id of the current process |
1419 | * | 1412 | * |
@@ -1469,8 +1462,6 @@ SYSCALL_DEFINE0(getegid) | |||
1469 | return from_kgid_munged(current_user_ns(), current_egid()); | 1462 | return from_kgid_munged(current_user_ns(), current_egid()); |
1470 | } | 1463 | } |
1471 | 1464 | ||
1472 | #endif | ||
1473 | |||
1474 | static void process_timeout(unsigned long __data) | 1465 | static void process_timeout(unsigned long __data) |
1475 | { | 1466 | { |
1476 | wake_up_process((struct task_struct *)__data); | 1467 | wake_up_process((struct task_struct *)__data); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 60e4d7875672..6b245f64c8dd 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
506 | int size; | 506 | int size; |
507 | 507 | ||
508 | syscall_nr = syscall_get_nr(current, regs); | 508 | syscall_nr = syscall_get_nr(current, regs); |
509 | if (syscall_nr < 0) | ||
510 | return; | ||
509 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | 511 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
510 | return; | 512 | return; |
511 | 513 | ||
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
580 | int size; | 582 | int size; |
581 | 583 | ||
582 | syscall_nr = syscall_get_nr(current, regs); | 584 | syscall_nr = syscall_get_nr(current, regs); |
585 | if (syscall_nr < 0) | ||
586 | return; | ||
583 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | 587 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
584 | return; | 588 | return; |
585 | 589 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 69add8a9da68..4b1dfba70f7c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -575,7 +575,7 @@ out: | |||
575 | /* | 575 | /* |
576 | * Create/destroy watchdog threads as CPUs come and go: | 576 | * Create/destroy watchdog threads as CPUs come and go: |
577 | */ | 577 | */ |
578 | static int | 578 | static int __cpuinit |
579 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 579 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
580 | { | 580 | { |
581 | int hotcpu = (unsigned long)hcpu; | 581 | int hotcpu = (unsigned long)hcpu; |
@@ -610,27 +610,10 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
610 | return NOTIFY_OK; | 610 | return NOTIFY_OK; |
611 | } | 611 | } |
612 | 612 | ||
613 | static struct notifier_block cpu_nfb = { | 613 | static struct notifier_block __cpuinitdata cpu_nfb = { |
614 | .notifier_call = cpu_callback | 614 | .notifier_call = cpu_callback |
615 | }; | 615 | }; |
616 | 616 | ||
617 | #ifdef CONFIG_SUSPEND | ||
618 | /* | ||
619 | * On exit from suspend we force an offline->online transition on the boot CPU | ||
620 | * so that the PMU state that was lost while in suspended state gets set up | ||
621 | * properly for the boot CPU. This information is required for restarting the | ||
622 | * NMI watchdog. | ||
623 | */ | ||
624 | void lockup_detector_bootcpu_resume(void) | ||
625 | { | ||
626 | void *cpu = (void *)(long)smp_processor_id(); | ||
627 | |||
628 | cpu_callback(&cpu_nfb, CPU_DEAD_FROZEN, cpu); | ||
629 | cpu_callback(&cpu_nfb, CPU_UP_PREPARE_FROZEN, cpu); | ||
630 | cpu_callback(&cpu_nfb, CPU_ONLINE_FROZEN, cpu); | ||
631 | } | ||
632 | #endif | ||
633 | |||
634 | void __init lockup_detector_init(void) | 617 | void __init lockup_detector_init(void) |
635 | { | 618 | { |
636 | void *cpu = (void *)(long)smp_processor_id(); | 619 | void *cpu = (void *)(long)smp_processor_id(); |