aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cred.c6
-rw-r--r--kernel/early_res.c6
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/manage.c22
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/rcupdate.c23
-rw-r--r--kernel/resource.c44
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/slow-work.c2
-rw-r--r--kernel/slow-work.h8
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/time/tick-oneshot.c52
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer_list.c3
-rw-r--r--kernel/timer.c1
-rw-r--r--kernel/trace/ring_buffer.c14
16 files changed, 195 insertions, 50 deletions
diff --git a/kernel/cred.c b/kernel/cred.c
index 1ed8ca18790c..1b1129d0cce8 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -364,7 +364,7 @@ struct cred *prepare_usermodehelper_creds(void)
364 364
365 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); 365 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
366 if (!new) 366 if (!new)
367 return NULL; 367 goto free_tgcred;
368 368
369 kdebug("prepare_usermodehelper_creds() alloc %p", new); 369 kdebug("prepare_usermodehelper_creds() alloc %p", new);
370 370
@@ -397,6 +397,10 @@ struct cred *prepare_usermodehelper_creds(void)
397 397
398error: 398error:
399 put_cred(new); 399 put_cred(new);
400free_tgcred:
401#ifdef CONFIG_KEYS
402 kfree(tgcred);
403#endif
400 return NULL; 404 return NULL;
401} 405}
402 406
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 3cb2c661bb78..31aa9332ef3f 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end)
333 struct early_res *r; 333 struct early_res *r;
334 int i; 334 int i;
335 335
336 if (start == end)
337 return;
338
339 if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
340 return;
341
336try_next: 342try_next:
337 i = find_overlapped_early(start, end); 343 i = find_overlapped_early(start, end);
338 if (i >= max_early_res) 344 if (i >= max_early_res)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 42ec11b2af8a..b7091d5ca2f8 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
359 if (desc->chip->ack) 359 if (desc->chip->ack)
360 desc->chip->ack(irq); 360 desc->chip->ack(irq);
361 } 361 }
362 desc->status |= IRQ_MASKED;
363}
364
365static inline void mask_irq(struct irq_desc *desc, int irq)
366{
367 if (desc->chip->mask) {
368 desc->chip->mask(irq);
369 desc->status |= IRQ_MASKED;
370 }
371}
372
373static inline void unmask_irq(struct irq_desc *desc, int irq)
374{
375 if (desc->chip->unmask) {
376 desc->chip->unmask(irq);
377 desc->status &= ~IRQ_MASKED;
378 }
362} 379}
363 380
364/* 381/*
@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
484 raw_spin_lock(&desc->lock); 501 raw_spin_lock(&desc->lock);
485 desc->status &= ~IRQ_INPROGRESS; 502 desc->status &= ~IRQ_INPROGRESS;
486 503
487 if (unlikely(desc->status & IRQ_ONESHOT)) 504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
488 desc->status |= IRQ_MASKED; 505 unmask_irq(desc, irq);
489 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
490 desc->chip->unmask(irq);
491out_unlock: 506out_unlock:
492 raw_spin_unlock(&desc->lock); 507 raw_spin_unlock(&desc->lock);
493} 508}
@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
524 action = desc->action; 539 action = desc->action;
525 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
526 desc->status |= IRQ_PENDING; 541 desc->status |= IRQ_PENDING;
527 if (desc->chip->mask) 542 mask_irq(desc, irq);
528 desc->chip->mask(irq);
529 goto out; 543 goto out;
530 } 544 }
531 545
@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
593 irqreturn_t action_ret; 607 irqreturn_t action_ret;
594 608
595 if (unlikely(!action)) { 609 if (unlikely(!action)) {
596 desc->chip->mask(irq); 610 mask_irq(desc, irq);
597 goto out_unlock; 611 goto out_unlock;
598 } 612 }
599 613
@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
605 if (unlikely((desc->status & 619 if (unlikely((desc->status &
606 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
607 (IRQ_PENDING | IRQ_MASKED))) { 621 (IRQ_PENDING | IRQ_MASKED))) {
608 desc->chip->unmask(irq); 622 unmask_irq(desc, irq);
609 desc->status &= ~IRQ_MASKED;
610 } 623 }
611 624
612 desc->status &= ~IRQ_PENDING; 625 desc->status &= ~IRQ_PENDING;
@@ -716,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
716 __set_irq_handler(irq, handle, 0, name); 729 __set_irq_handler(irq, handle, 0, name);
717} 730}
718 731
719void __init set_irq_noprobe(unsigned int irq) 732void set_irq_noprobe(unsigned int irq)
720{ 733{
721 struct irq_desc *desc = irq_to_desc(irq); 734 struct irq_desc *desc = irq_to_desc(irq);
722 unsigned long flags; 735 unsigned long flags;
@@ -731,7 +744,7 @@ void __init set_irq_noprobe(unsigned int irq)
731 raw_spin_unlock_irqrestore(&desc->lock, flags); 744 raw_spin_unlock_irqrestore(&desc->lock, flags);
732} 745}
733 746
734void __init set_irq_probe(unsigned int irq) 747void set_irq_probe(unsigned int irq)
735{ 748{
736 struct irq_desc *desc = irq_to_desc(irq); 749 struct irq_desc *desc = irq_to_desc(irq);
737 unsigned long flags; 750 unsigned long flags;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eb6078ca60c7..398fda155f6e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
382{ 382{
383 struct irq_desc *desc = irq_to_desc(irq); 383 struct irq_desc *desc = irq_to_desc(irq);
384 struct irqaction *action; 384 struct irqaction *action;
385 unsigned long flags;
385 386
386 if (!desc) 387 if (!desc)
387 return 0; 388 return 0;
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
389 if (desc->status & IRQ_NOREQUEST) 390 if (desc->status & IRQ_NOREQUEST)
390 return 0; 391 return 0;
391 392
393 raw_spin_lock_irqsave(&desc->lock, flags);
392 action = desc->action; 394 action = desc->action;
393 if (action) 395 if (action)
394 if (irqflags & action->flags & IRQF_SHARED) 396 if (irqflags & action->flags & IRQF_SHARED)
395 action = NULL; 397 action = NULL;
396 398
399 raw_spin_unlock_irqrestore(&desc->lock, flags);
400
397 return !action; 401 return !action;
398} 402}
399 403
@@ -483,8 +487,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
483 */ 487 */
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 488static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 489{
490again:
486 chip_bus_lock(irq, desc); 491 chip_bus_lock(irq, desc);
487 raw_spin_lock_irq(&desc->lock); 492 raw_spin_lock_irq(&desc->lock);
493
494 /*
495 * Implausible though it may be we need to protect us against
496 * the following scenario:
497 *
498 * The thread is faster done than the hard interrupt handler
499 * on the other CPU. If we unmask the irq line then the
500 * interrupt can come in again and masks the line, leaves due
501 * to IRQ_INPROGRESS and the irq line is masked forever.
502 */
503 if (unlikely(desc->status & IRQ_INPROGRESS)) {
504 raw_spin_unlock_irq(&desc->lock);
505 chip_bus_sync_unlock(irq, desc);
506 cpu_relax();
507 goto again;
508 }
509
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 510 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 511 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 512 desc->chip->unmask(irq);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 1a22dfd42df9..bc7704b3a443 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1061,9 +1061,9 @@ static void check_thread_timers(struct task_struct *tsk,
1061 } 1061 }
1062} 1062}
1063 1063
1064static void stop_process_timers(struct task_struct *tsk) 1064static void stop_process_timers(struct signal_struct *sig)
1065{ 1065{
1066 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 1066 struct thread_group_cputimer *cputimer = &sig->cputimer;
1067 unsigned long flags; 1067 unsigned long flags;
1068 1068
1069 if (!cputimer->running) 1069 if (!cputimer->running)
@@ -1072,6 +1072,10 @@ static void stop_process_timers(struct task_struct *tsk)
1072 spin_lock_irqsave(&cputimer->lock, flags); 1072 spin_lock_irqsave(&cputimer->lock, flags);
1073 cputimer->running = 0; 1073 cputimer->running = 0;
1074 spin_unlock_irqrestore(&cputimer->lock, flags); 1074 spin_unlock_irqrestore(&cputimer->lock, flags);
1075
1076 sig->cputime_expires.prof_exp = cputime_zero;
1077 sig->cputime_expires.virt_exp = cputime_zero;
1078 sig->cputime_expires.sched_exp = 0;
1075} 1079}
1076 1080
1077static u32 onecputick; 1081static u32 onecputick;
@@ -1133,7 +1137,7 @@ static void check_process_timers(struct task_struct *tsk,
1133 list_empty(&timers[CPUCLOCK_VIRT]) && 1137 list_empty(&timers[CPUCLOCK_VIRT]) &&
1134 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && 1138 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1135 list_empty(&timers[CPUCLOCK_SCHED])) { 1139 list_empty(&timers[CPUCLOCK_SCHED])) {
1136 stop_process_timers(tsk); 1140 stop_process_timers(sig);
1137 return; 1141 return;
1138 } 1142 }
1139 1143
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f1125c1a6321..63fe25433980 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,6 +45,7 @@
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h> 47#include <linux/kernel_stat.h>
48#include <linux/hardirq.h>
48 49
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 50#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 51static struct lock_class_key rcu_lock_key;
@@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
66int rcu_scheduler_active __read_mostly; 67int rcu_scheduler_active __read_mostly;
67EXPORT_SYMBOL_GPL(rcu_scheduler_active); 68EXPORT_SYMBOL_GPL(rcu_scheduler_active);
68 69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71
72/**
73 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
74 *
75 * Check for bottom half being disabled, which covers both the
76 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
77 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
78 * will show the situation.
79 *
80 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
81 */
82int rcu_read_lock_bh_held(void)
83{
84 if (!debug_lockdep_rcu_enabled())
85 return 1;
86 return in_softirq();
87}
88EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
89
90#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
91
69/* 92/*
70 * This function is invoked towards the end of the scheduler's initialization 93 * This function is invoked towards the end of the scheduler's initialization
71 * process. Before this is called, the idle task might contain 94 * process. Before this is called, the idle task might contain
diff --git a/kernel/resource.c b/kernel/resource.c
index 2d5be5d9bf5f..9c358e263534 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -219,19 +219,34 @@ void release_child_resources(struct resource *r)
219} 219}
220 220
221/** 221/**
222 * request_resource - request and reserve an I/O or memory resource 222 * request_resource_conflict - request and reserve an I/O or memory resource
223 * @root: root resource descriptor 223 * @root: root resource descriptor
224 * @new: resource descriptor desired by caller 224 * @new: resource descriptor desired by caller
225 * 225 *
226 * Returns 0 for success, negative error code on error. 226 * Returns 0 for success, conflict resource on error.
227 */ 227 */
228int request_resource(struct resource *root, struct resource *new) 228struct resource *request_resource_conflict(struct resource *root, struct resource *new)
229{ 229{
230 struct resource *conflict; 230 struct resource *conflict;
231 231
232 write_lock(&resource_lock); 232 write_lock(&resource_lock);
233 conflict = __request_resource(root, new); 233 conflict = __request_resource(root, new);
234 write_unlock(&resource_lock); 234 write_unlock(&resource_lock);
235 return conflict;
236}
237
238/**
239 * request_resource - request and reserve an I/O or memory resource
240 * @root: root resource descriptor
241 * @new: resource descriptor desired by caller
242 *
243 * Returns 0 for success, negative error code on error.
244 */
245int request_resource(struct resource *root, struct resource *new)
246{
247 struct resource *conflict;
248
249 conflict = request_resource_conflict(root, new);
235 return conflict ? -EBUSY : 0; 250 return conflict ? -EBUSY : 0;
236} 251}
237 252
@@ -474,25 +489,40 @@ static struct resource * __insert_resource(struct resource *parent, struct resou
474} 489}
475 490
476/** 491/**
477 * insert_resource - Inserts a resource in the resource tree 492 * insert_resource_conflict - Inserts resource in the resource tree
478 * @parent: parent of the new resource 493 * @parent: parent of the new resource
479 * @new: new resource to insert 494 * @new: new resource to insert
480 * 495 *
481 * Returns 0 on success, -EBUSY if the resource can't be inserted. 496 * Returns 0 on success, conflict resource if the resource can't be inserted.
482 * 497 *
483 * This function is equivalent to request_resource when no conflict 498 * This function is equivalent to request_resource_conflict when no conflict
484 * happens. If a conflict happens, and the conflicting resources 499 * happens. If a conflict happens, and the conflicting resources
485 * entirely fit within the range of the new resource, then the new 500 * entirely fit within the range of the new resource, then the new
486 * resource is inserted and the conflicting resources become children of 501 * resource is inserted and the conflicting resources become children of
487 * the new resource. 502 * the new resource.
488 */ 503 */
489int insert_resource(struct resource *parent, struct resource *new) 504struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
490{ 505{
491 struct resource *conflict; 506 struct resource *conflict;
492 507
493 write_lock(&resource_lock); 508 write_lock(&resource_lock);
494 conflict = __insert_resource(parent, new); 509 conflict = __insert_resource(parent, new);
495 write_unlock(&resource_lock); 510 write_unlock(&resource_lock);
511 return conflict;
512}
513
514/**
515 * insert_resource - Inserts a resource in the resource tree
516 * @parent: parent of the new resource
517 * @new: new resource to insert
518 *
519 * Returns 0 on success, -EBUSY if the resource can't be inserted.
520 */
521int insert_resource(struct resource *parent, struct resource *new)
522{
523 struct resource *conflict;
524
525 conflict = insert_resource_conflict(parent, new);
496 return conflict ? -EBUSY : 0; 526 return conflict ? -EBUSY : 0;
497} 527}
498 528
diff --git a/kernel/sched.c b/kernel/sched.c
index 9ab3cd7858d3..49d2fa7b687a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2650,7 +2650,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2650{ 2650{
2651 unsigned long flags; 2651 unsigned long flags;
2652 struct rq *rq; 2652 struct rq *rq;
2653 int cpu = get_cpu(); 2653 int cpu __maybe_unused = get_cpu();
2654 2654
2655#ifdef CONFIG_SMP 2655#ifdef CONFIG_SMP
2656 /* 2656 /*
@@ -4902,7 +4902,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4902 int ret; 4902 int ret;
4903 cpumask_var_t mask; 4903 cpumask_var_t mask;
4904 4904
4905 if (len < cpumask_size()) 4905 if (len < nr_cpu_ids)
4906 return -EINVAL;
4907 if (len & (sizeof(unsigned long)-1))
4906 return -EINVAL; 4908 return -EINVAL;
4907 4909
4908 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4910 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4910,10 +4912,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4910 4912
4911 ret = sched_getaffinity(pid, mask); 4913 ret = sched_getaffinity(pid, mask);
4912 if (ret == 0) { 4914 if (ret == 0) {
4913 if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 4915 size_t retlen = min_t(size_t, len, cpumask_size());
4916
4917 if (copy_to_user(user_mask_ptr, mask, retlen))
4914 ret = -EFAULT; 4918 ret = -EFAULT;
4915 else 4919 else
4916 ret = cpumask_size(); 4920 ret = retlen;
4917 } 4921 }
4918 free_cpumask_var(mask); 4922 free_cpumask_var(mask);
4919 4923
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 7494bbf5a270..7d3f4fa9ef4f 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -637,7 +637,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
637 goto cancelled; 637 goto cancelled;
638 638
639 /* the timer holds a reference whilst it is pending */ 639 /* the timer holds a reference whilst it is pending */
640 ret = work->ops->get_ref(work); 640 ret = slow_work_get_ref(work);
641 if (ret < 0) 641 if (ret < 0)
642 goto cant_get_ref; 642 goto cant_get_ref;
643 643
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
index 321f3c59d732..a29ebd1ef41d 100644
--- a/kernel/slow-work.h
+++ b/kernel/slow-work.h
@@ -43,28 +43,28 @@ extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
43 */ 43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid) 44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{ 45{
46#ifdef CONFIG_SLOW_WORK_PROC 46#ifdef CONFIG_SLOW_WORK_DEBUG
47 slow_work_pids[id] = pid; 47 slow_work_pids[id] = pid;
48#endif 48#endif
49} 49}
50 50
51static inline void slow_work_mark_time(struct slow_work *work) 51static inline void slow_work_mark_time(struct slow_work *work)
52{ 52{
53#ifdef CONFIG_SLOW_WORK_PROC 53#ifdef CONFIG_SLOW_WORK_DEBUG
54 work->mark = CURRENT_TIME; 54 work->mark = CURRENT_TIME;
55#endif 55#endif
56} 56}
57 57
58static inline void slow_work_begin_exec(int id, struct slow_work *work) 58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{ 59{
60#ifdef CONFIG_SLOW_WORK_PROC 60#ifdef CONFIG_SLOW_WORK_DEBUG
61 slow_work_execs[id] = work; 61 slow_work_execs[id] = work;
62#endif 62#endif
63} 63}
64 64
65static inline void slow_work_end_exec(int id, struct slow_work *work) 65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{ 66{
67#ifdef CONFIG_SLOW_WORK_PROC 67#ifdef CONFIG_SLOW_WORK_DEBUG
68 write_lock(&slow_work_execs_lock); 68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL; 69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock); 70 write_unlock(&slow_work_execs_lock);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 0d4c7898ab80..4b493f67dcb5 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -155,11 +155,11 @@ void softlockup_tick(void)
155 * Wake up the high-prio watchdog task twice per 155 * Wake up the high-prio watchdog task twice per
156 * threshold timespan. 156 * threshold timespan.
157 */ 157 */
158 if (now > touch_ts + softlockup_thresh/2) 158 if (time_after(now - softlockup_thresh/2, touch_ts))
159 wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); 159 wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
160 160
161 /* Warn about unreasonable delays: */ 161 /* Warn about unreasonable delays: */
162 if (now <= (touch_ts + softlockup_thresh)) 162 if (time_before_eq(now - softlockup_thresh, touch_ts))
163 return; 163 return;
164 164
165 per_cpu(softlockup_print_ts, this_cpu) = touch_ts; 165 per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 0a8a213016f0..aada0e52680a 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -22,6 +22,29 @@
22 22
23#include "tick-internal.h" 23#include "tick-internal.h"
24 24
25/* Limit min_delta to a jiffie */
26#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
27
28static int tick_increase_min_delta(struct clock_event_device *dev)
29{
30 /* Nothing to do if we already reached the limit */
31 if (dev->min_delta_ns >= MIN_DELTA_LIMIT)
32 return -ETIME;
33
34 if (dev->min_delta_ns < 5000)
35 dev->min_delta_ns = 5000;
36 else
37 dev->min_delta_ns += dev->min_delta_ns >> 1;
38
39 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
40 dev->min_delta_ns = MIN_DELTA_LIMIT;
41
42 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
43 dev->name ? dev->name : "?",
44 (unsigned long long) dev->min_delta_ns);
45 return 0;
46}
47
25/** 48/**
26 * tick_program_event internal worker function 49 * tick_program_event internal worker function
27 */ 50 */
@@ -37,23 +60,28 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
37 if (!ret || !force) 60 if (!ret || !force)
38 return ret; 61 return ret;
39 62
63 dev->retries++;
40 /* 64 /*
41 * We tried 2 times to program the device with the given 65 * We tried 3 times to program the device with the given
42 * min_delta_ns. If that's not working then we double it 66 * min_delta_ns. If that's not working then we increase it
43 * and emit a warning. 67 * and emit a warning.
44 */ 68 */
45 if (++i > 2) { 69 if (++i > 2) {
46 /* Increase the min. delta and try again */ 70 /* Increase the min. delta and try again */
47 if (!dev->min_delta_ns) 71 if (tick_increase_min_delta(dev)) {
48 dev->min_delta_ns = 5000; 72 /*
49 else 73 * Get out of the loop if min_delta_ns
50 dev->min_delta_ns += dev->min_delta_ns >> 1; 74 * hit the limit already. That's
51 75 * better than staying here forever.
52 printk(KERN_WARNING 76 *
53 "CE: %s increasing min_delta_ns to %llu nsec\n", 77 * We clear next_event so we have a
54 dev->name ? dev->name : "?", 78 * chance that the box survives.
55 (unsigned long long) dev->min_delta_ns << 1); 79 */
56 80 printk(KERN_WARNING
81 "CE: Reprogramming failure. Giving up\n");
82 dev->next_event.tv64 = KTIME_MAX;
83 return -ETIME;
84 }
57 i = 0; 85 i = 0;
58 } 86 }
59 87
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 16736379a9ca..39f6177fafac 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -818,7 +818,8 @@ void update_wall_time(void)
818 shift = min(shift, maxshift); 818 shift = min(shift, maxshift);
819 while (offset >= timekeeper.cycle_interval) { 819 while (offset >= timekeeper.cycle_interval) {
820 offset = logarithmic_accumulation(offset, shift); 820 offset = logarithmic_accumulation(offset, shift);
821 shift--; 821 if(offset < timekeeper.cycle_interval<<shift)
822 shift--;
822 } 823 }
823 824
824 /* correct the clock when NTP error is too big */ 825 /* correct the clock when NTP error is too big */
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index bdfb8dd1050c..1a4a7dd78777 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -228,6 +228,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
228 SEQ_printf(m, " event_handler: "); 228 SEQ_printf(m, " event_handler: ");
229 print_name_offset(m, dev->event_handler); 229 print_name_offset(m, dev->event_handler);
230 SEQ_printf(m, "\n"); 230 SEQ_printf(m, "\n");
231 SEQ_printf(m, " retries: %lu\n", dev->retries);
231} 232}
232 233
233static void timer_list_show_tickdevices(struct seq_file *m) 234static void timer_list_show_tickdevices(struct seq_file *m)
@@ -257,7 +258,7 @@ static int timer_list_show(struct seq_file *m, void *v)
257 u64 now = ktime_to_ns(ktime_get()); 258 u64 now = ktime_to_ns(ktime_get());
258 int cpu; 259 int cpu;
259 260
260 SEQ_printf(m, "Timer List Version: v0.5\n"); 261 SEQ_printf(m, "Timer List Version: v0.6\n");
261 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); 262 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
262 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); 263 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
263 264
diff --git a/kernel/timer.c b/kernel/timer.c
index c61a7949387f..fc965eae0e87 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -880,6 +880,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
880 if (base->running_timer == timer) 880 if (base->running_timer == timer)
881 goto out; 881 goto out;
882 882
883 timer_stats_timer_clear_start_info(timer);
883 ret = 0; 884 ret = 0;
884 if (timer_pending(timer)) { 885 if (timer_pending(timer)) {
885 detach_timer(timer, 1); 886 detach_timer(timer, 1);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 05a9f83b8819..d1187ef20caf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -207,6 +207,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 208#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
209 209
210#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
211# define RB_FORCE_8BYTE_ALIGNMENT 0
212# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
213#else
214# define RB_FORCE_8BYTE_ALIGNMENT 1
215# define RB_ARCH_ALIGNMENT 8U
216#endif
217
210/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 218/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
211#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 219#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
212 220
@@ -1547,7 +1555,7 @@ rb_update_event(struct ring_buffer_event *event,
1547 1555
1548 case 0: 1556 case 0:
1549 length -= RB_EVNT_HDR_SIZE; 1557 length -= RB_EVNT_HDR_SIZE;
1550 if (length > RB_MAX_SMALL_DATA) 1558 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1551 event->array[0] = length; 1559 event->array[0] = length;
1552 else 1560 else
1553 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 1561 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1722,11 +1730,11 @@ static unsigned rb_calculate_event_length(unsigned length)
1722 if (!length) 1730 if (!length)
1723 length = 1; 1731 length = 1;
1724 1732
1725 if (length > RB_MAX_SMALL_DATA) 1733 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1726 length += sizeof(event.array[0]); 1734 length += sizeof(event.array[0]);
1727 1735
1728 length += RB_EVNT_HDR_SIZE; 1736 length += RB_EVNT_HDR_SIZE;
1729 length = ALIGN(length, RB_ALIGNMENT); 1737 length = ALIGN(length, RB_ARCH_ALIGNMENT);
1730 1738
1731 return length; 1739 return length;
1732} 1740}