aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-13 07:56:44 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-13 07:56:44 -0400
commit73909f7a665991013dcff42a815fda76d3a7300a (patch)
tree84bb9899e2204bf6af6fd4f249bb909c2a89faf8 /kernel
parentd6672c501852d577097f6757c311d937aca0b04b (diff)
parent30a2f3c60a84092c8084dfe788b710f8d0768cd4 (diff)
Merge commit 'v2.6.27-rc3' into core/urgent
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/cpu.c7
-rw-r--r--kernel/dma-coherent.c10
-rw-r--r--kernel/irq/proc.c96
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/posix-timers.c19
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_clock.c178
-rw-r--r--kernel/sched_fair.c21
-rw-r--r--kernel/signal.c1
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/time/tick-sched.c2
12 files changed, 146 insertions, 195 deletions
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 382dd5a8b2d7..94fabd534b03 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
55 default 1000 if HZ_1000 55 default 1000 if HZ_1000
56 56
57config SCHED_HRTICK 57config SCHED_HRTICK
58 def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS 58 def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e202a68d1cc1..f17e9854c246 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -349,6 +349,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
349 goto out_notify; 349 goto out_notify;
350 BUG_ON(!cpu_online(cpu)); 350 BUG_ON(!cpu_online(cpu));
351 351
352 cpu_set(cpu, cpu_active_map);
353
352 /* Now call notifier in preparation. */ 354 /* Now call notifier in preparation. */
353 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 355 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
354 356
@@ -367,7 +369,7 @@ int __cpuinit cpu_up(unsigned int cpu)
367 if (!cpu_isset(cpu, cpu_possible_map)) { 369 if (!cpu_isset(cpu, cpu_possible_map)) {
368 printk(KERN_ERR "can't online cpu %d because it is not " 370 printk(KERN_ERR "can't online cpu %d because it is not "
369 "configured as may-hotadd at boot time\n", cpu); 371 "configured as may-hotadd at boot time\n", cpu);
370#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390) 372#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
371 printk(KERN_ERR "please check additional_cpus= boot " 373 printk(KERN_ERR "please check additional_cpus= boot "
372 "parameter\n"); 374 "parameter\n");
373#endif 375#endif
@@ -383,9 +385,6 @@ int __cpuinit cpu_up(unsigned int cpu)
383 385
384 err = _cpu_up(cpu, 0); 386 err = _cpu_up(cpu, 0);
385 387
386 if (cpu_online(cpu))
387 cpu_set(cpu, cpu_active_map);
388
389out: 388out:
390 cpu_maps_update_done(); 389 cpu_maps_update_done();
391 return err; 390 return err;
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index 91e96950cd52..c1d4d5b4c61c 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -92,7 +92,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
92EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 92EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
93 93
94/** 94/**
95 * Try to allocate memory from the per-device coherent area. 95 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
96 * 96 *
97 * @dev: device from which we allocate memory 97 * @dev: device from which we allocate memory
98 * @size: size of requested memory area 98 * @size: size of requested memory area
@@ -100,11 +100,11 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
100 * @ret: This pointer will be filled with the virtual address 100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area. 101 * to allocated area.
102 * 102 *
103 * This function should be only called from per-arch %dma_alloc_coherent() 103 * This function should be only called from per-arch dma_alloc_coherent()
104 * to support allocation from per-device coherent memory pools. 104 * to support allocation from per-device coherent memory pools.
105 * 105 *
106 * Returns 0 if dma_alloc_coherent should continue with allocating from 106 * Returns 0 if dma_alloc_coherent should continue with allocating from
107 * generic memory areas, or !0 if dma_alloc_coherent should return %ret. 107 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
108 */ 108 */
109int dma_alloc_from_coherent(struct device *dev, ssize_t size, 109int dma_alloc_from_coherent(struct device *dev, ssize_t size,
110 dma_addr_t *dma_handle, void **ret) 110 dma_addr_t *dma_handle, void **ret)
@@ -126,7 +126,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
126} 126}
127 127
128/** 128/**
129 * Try to free the memory allocated from per-device coherent memory pool. 129 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
130 * @dev: device from which the memory was allocated 130 * @dev: device from which the memory was allocated
131 * @order: the order of pages allocated 131 * @order: the order of pages allocated
132 * @vaddr: virtual address of allocated pages 132 * @vaddr: virtual address of allocated pages
@@ -135,7 +135,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
135 * coherent memory pool and if so, releases that memory. 135 * coherent memory pool and if so, releases that memory.
136 * 136 *
137 * Returns 1 if we correctly released the memory, or 0 if 137 * Returns 1 if we correctly released the memory, or 0 if
138 * %dma_release_coherent() should proceed with releasing memory from 138 * dma_release_coherent() should proceed with releasing memory from
139 * generic pools. 139 * generic pools.
140 */ 140 */
141int dma_release_from_coherent(struct device *dev, int order, void *vaddr) 141int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 6c6d35d68ee9..a09dd29c2fd7 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
12 13
13#include "internals.h" 14#include "internals.h"
@@ -16,23 +17,18 @@ static struct proc_dir_entry *root_irq_dir;
16 17
17#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
18 19
19static int irq_affinity_read_proc(char *page, char **start, off_t off, 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
20 int count, int *eof, void *data)
21{ 21{
22 struct irq_desc *desc = irq_desc + (long)data; 22 struct irq_desc *desc = irq_desc + (long)m->private;
23 cpumask_t *mask = &desc->affinity; 23 cpumask_t *mask = &desc->affinity;
24 int len;
25 24
26#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
27 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
28 mask = &desc->pending_mask; 27 mask = &desc->pending_mask;
29#endif 28#endif
30 len = cpumask_scnprintf(page, count, *mask); 29 seq_cpumask(m, mask);
31 30 seq_putc(m, '\n');
32 if (count - len < 2) 31 return 0;
33 return -EINVAL;
34 len += sprintf(page + len, "\n");
35 return len;
36} 32}
37 33
38#ifndef is_affinity_mask_valid 34#ifndef is_affinity_mask_valid
@@ -40,11 +36,12 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off,
40#endif 36#endif
41 37
42int no_irq_affinity; 38int no_irq_affinity;
43static int irq_affinity_write_proc(struct file *file, const char __user *buffer, 39static ssize_t irq_affinity_proc_write(struct file *file,
44 unsigned long count, void *data) 40 const char __user *buffer, size_t count, loff_t *pos)
45{ 41{
46 unsigned int irq = (int)(long)data, full_count = count, err; 42 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
47 cpumask_t new_value; 43 cpumask_t new_value;
44 int err;
48 45
49 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || 46 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
50 irq_balancing_disabled(irq)) 47 irq_balancing_disabled(irq))
@@ -65,28 +62,38 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
65 if (!cpus_intersects(new_value, cpu_online_map)) 62 if (!cpus_intersects(new_value, cpu_online_map))
66 /* Special case for empty set - allow the architecture 63 /* Special case for empty set - allow the architecture
67 code to set default SMP affinity. */ 64 code to set default SMP affinity. */
68 return irq_select_affinity(irq) ? -EINVAL : full_count; 65 return irq_select_affinity(irq) ? -EINVAL : count;
69 66
70 irq_set_affinity(irq, new_value); 67 irq_set_affinity(irq, new_value);
71 68
72 return full_count; 69 return count;
73} 70}
74 71
75static int default_affinity_read(char *page, char **start, off_t off, 72static int irq_affinity_proc_open(struct inode *inode, struct file *file)
76 int count, int *eof, void *data)
77{ 73{
78 int len = cpumask_scnprintf(page, count, irq_default_affinity); 74 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
79 if (count - len < 2)
80 return -EINVAL;
81 len += sprintf(page + len, "\n");
82 return len;
83} 75}
84 76
85static int default_affinity_write(struct file *file, const char __user *buffer, 77static const struct file_operations irq_affinity_proc_fops = {
86 unsigned long count, void *data) 78 .open = irq_affinity_proc_open,
79 .read = seq_read,
80 .llseek = seq_lseek,
81 .release = single_release,
82 .write = irq_affinity_proc_write,
83};
84
85static int default_affinity_show(struct seq_file *m, void *v)
86{
87 seq_cpumask(m, &irq_default_affinity);
88 seq_putc(m, '\n');
89 return 0;
90}
91
92static ssize_t default_affinity_write(struct file *file,
93 const char __user *buffer, size_t count, loff_t *ppos)
87{ 94{
88 unsigned int full_count = count, err;
89 cpumask_t new_value; 95 cpumask_t new_value;
96 int err;
90 97
91 err = cpumask_parse_user(buffer, count, new_value); 98 err = cpumask_parse_user(buffer, count, new_value);
92 if (err) 99 if (err)
@@ -105,8 +112,21 @@ static int default_affinity_write(struct file *file, const char __user *buffer,
105 112
106 irq_default_affinity = new_value; 113 irq_default_affinity = new_value;
107 114
108 return full_count; 115 return count;
109} 116}
117
118static int default_affinity_open(struct inode *inode, struct file *file)
119{
120 return single_open(file, default_affinity_show, NULL);
121}
122
123static const struct file_operations default_affinity_proc_fops = {
124 .open = default_affinity_open,
125 .read = seq_read,
126 .llseek = seq_lseek,
127 .release = single_release,
128 .write = default_affinity_write,
129};
110#endif 130#endif
111 131
112static int irq_spurious_read(char *page, char **start, off_t off, 132static int irq_spurious_read(char *page, char **start, off_t off,
@@ -178,16 +198,9 @@ void register_irq_proc(unsigned int irq)
178 irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); 198 irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
179 199
180#ifdef CONFIG_SMP 200#ifdef CONFIG_SMP
181 { 201 /* create /proc/irq/<irq>/smp_affinity */
182 /* create /proc/irq/<irq>/smp_affinity */ 202 proc_create_data("smp_affinity", 0600, irq_desc[irq].dir,
183 entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir); 203 &irq_affinity_proc_fops, (void *)(long)irq);
184
185 if (entry) {
186 entry->data = (void *)(long)irq;
187 entry->read_proc = irq_affinity_read_proc;
188 entry->write_proc = irq_affinity_write_proc;
189 }
190 }
191#endif 204#endif
192 205
193 entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); 206 entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
@@ -208,15 +221,8 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)
208void register_default_affinity_proc(void) 221void register_default_affinity_proc(void)
209{ 222{
210#ifdef CONFIG_SMP 223#ifdef CONFIG_SMP
211 struct proc_dir_entry *entry; 224 proc_create("irq/default_smp_affinity", 0600, NULL,
212 225 &default_affinity_proc_fops);
213 /* create /proc/irq/default_smp_affinity */
214 entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir);
215 if (entry) {
216 entry->data = NULL;
217 entry->read_proc = default_affinity_read;
218 entry->write_proc = default_affinity_write;
219 }
220#endif 226#endif
221} 227}
222 228
diff --git a/kernel/module.c b/kernel/module.c
index 61d212120df4..08864d257eb0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2288,7 +2288,7 @@ sys_init_module(void __user *umod,
2288 2288
2289 /* Start the module */ 2289 /* Start the module */
2290 if (mod->init != NULL) 2290 if (mod->init != NULL)
2291 ret = mod->init(); 2291 ret = do_one_initcall(mod->init);
2292 if (ret < 0) { 2292 if (ret < 0) {
2293 /* Init routine failed: abort. Try to protect us from 2293 /* Init routine failed: abort. Try to protect us from
2294 buggy refcounters. */ 2294 buggy refcounters. */
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9a21681aa80f..e36d5798cbff 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
289 else 289 else
290 schedule_next_timer(timr); 290 schedule_next_timer(timr);
291 291
292 info->si_overrun = timr->it_overrun_last; 292 info->si_overrun += timr->it_overrun_last;
293 } 293 }
294 294
295 if (timr) 295 if (timr)
296 unlock_timer(timr, flags); 296 unlock_timer(timr, flags);
297} 297}
298 298
299int posix_timer_event(struct k_itimer *timr,int si_private) 299int posix_timer_event(struct k_itimer *timr, int si_private)
300{ 300{
301 memset(&timr->sigq->info, 0, sizeof(siginfo_t)); 301 /*
302 * FIXME: if ->sigq is queued we can race with
303 * dequeue_signal()->do_schedule_next_timer().
304 *
305 * If dequeue_signal() sees the "right" value of
306 * si_sys_private it calls do_schedule_next_timer().
307 * We re-queue ->sigq and drop ->it_lock().
308 * do_schedule_next_timer() locks the timer
309 * and re-schedules it while ->sigq is pending.
310 * Not really bad, but not that we want.
311 */
302 timr->sigq->info.si_sys_private = si_private; 312 timr->sigq->info.si_sys_private = si_private;
303 /* Send signal to the process that owns this timer.*/
304 313
305 timr->sigq->info.si_signo = timr->it_sigev_signo; 314 timr->sigq->info.si_signo = timr->it_sigev_signo;
306 timr->sigq->info.si_errno = 0;
307 timr->sigq->info.si_code = SI_TIMER; 315 timr->sigq->info.si_code = SI_TIMER;
308 timr->sigq->info.si_tid = timr->it_id; 316 timr->sigq->info.si_tid = timr->it_id;
309 timr->sigq->info.si_value = timr->it_sigev_value; 317 timr->sigq->info.si_value = timr->it_sigev_value;
@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
435 kmem_cache_free(posix_timers_cache, tmr); 443 kmem_cache_free(posix_timers_cache, tmr);
436 tmr = NULL; 444 tmr = NULL;
437 } 445 }
446 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
438 return tmr; 447 return tmr;
439} 448}
440 449
diff --git a/kernel/sched.c b/kernel/sched.c
index ace566bdfc68..d601fb0406ca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -833,7 +833,7 @@ static inline u64 global_rt_period(void)
833 833
834static inline u64 global_rt_runtime(void) 834static inline u64 global_rt_runtime(void)
835{ 835{
836 if (sysctl_sched_rt_period < 0) 836 if (sysctl_sched_rt_runtime < 0)
837 return RUNTIME_INF; 837 return RUNTIME_INF;
838 838
839 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 839 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 22ed55d1167f..204991a0bfa7 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -32,13 +32,19 @@
32#include <linux/ktime.h> 32#include <linux/ktime.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35/*
36 * Scheduler clock - returns current time in nanosec units.
37 * This is default implementation.
38 * Architectures and sub-architectures can override this.
39 */
40unsigned long long __attribute__((weak)) sched_clock(void)
41{
42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
43}
35 44
36#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45static __read_mostly int sched_clock_running;
37 46
38#define MULTI_SHIFT 15 47#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
39/* Max is double, Min is 1/2 */
40#define MAX_MULTI (2LL << MULTI_SHIFT)
41#define MIN_MULTI (1LL << (MULTI_SHIFT-1))
42 48
43struct sched_clock_data { 49struct sched_clock_data {
44 /* 50 /*
@@ -49,14 +55,9 @@ struct sched_clock_data {
49 raw_spinlock_t lock; 55 raw_spinlock_t lock;
50 56
51 unsigned long tick_jiffies; 57 unsigned long tick_jiffies;
52 u64 prev_raw;
53 u64 tick_raw; 58 u64 tick_raw;
54 u64 tick_gtod; 59 u64 tick_gtod;
55 u64 clock; 60 u64 clock;
56 s64 multi;
57#ifdef CONFIG_NO_HZ
58 int check_max;
59#endif
60}; 61};
61 62
62static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); 63static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
@@ -71,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
71 return &per_cpu(sched_clock_data, cpu); 72 return &per_cpu(sched_clock_data, cpu);
72} 73}
73 74
74static __read_mostly int sched_clock_running;
75
76void sched_clock_init(void) 75void sched_clock_init(void)
77{ 76{
78 u64 ktime_now = ktime_to_ns(ktime_get()); 77 u64 ktime_now = ktime_to_ns(ktime_get());
@@ -84,90 +83,39 @@ void sched_clock_init(void)
84 83
85 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 84 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
86 scd->tick_jiffies = now_jiffies; 85 scd->tick_jiffies = now_jiffies;
87 scd->prev_raw = 0;
88 scd->tick_raw = 0; 86 scd->tick_raw = 0;
89 scd->tick_gtod = ktime_now; 87 scd->tick_gtod = ktime_now;
90 scd->clock = ktime_now; 88 scd->clock = ktime_now;
91 scd->multi = 1 << MULTI_SHIFT;
92#ifdef CONFIG_NO_HZ
93 scd->check_max = 1;
94#endif
95 } 89 }
96 90
97 sched_clock_running = 1; 91 sched_clock_running = 1;
98} 92}
99 93
100#ifdef CONFIG_NO_HZ
101/*
102 * The dynamic ticks makes the delta jiffies inaccurate. This
103 * prevents us from checking the maximum time update.
104 * Disable the maximum check during stopped ticks.
105 */
106void sched_clock_tick_stop(int cpu)
107{
108 struct sched_clock_data *scd = cpu_sdc(cpu);
109
110 scd->check_max = 0;
111}
112
113void sched_clock_tick_start(int cpu)
114{
115 struct sched_clock_data *scd = cpu_sdc(cpu);
116
117 scd->check_max = 1;
118}
119
120static int check_max(struct sched_clock_data *scd)
121{
122 return scd->check_max;
123}
124#else
125static int check_max(struct sched_clock_data *scd)
126{
127 return 1;
128}
129#endif /* CONFIG_NO_HZ */
130
131/* 94/*
132 * update the percpu scd from the raw @now value 95 * update the percpu scd from the raw @now value
133 * 96 *
134 * - filter out backward motion 97 * - filter out backward motion
135 * - use jiffies to generate a min,max window to clip the raw values 98 * - use jiffies to generate a min,max window to clip the raw values
136 */ 99 */
137static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) 100static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
138{ 101{
139 unsigned long now_jiffies = jiffies; 102 unsigned long now_jiffies = jiffies;
140 long delta_jiffies = now_jiffies - scd->tick_jiffies; 103 long delta_jiffies = now_jiffies - scd->tick_jiffies;
141 u64 clock = scd->clock; 104 u64 clock = scd->clock;
142 u64 min_clock, max_clock; 105 u64 min_clock, max_clock;
143 s64 delta = now - scd->prev_raw; 106 s64 delta = now - scd->tick_raw;
144 107
145 WARN_ON_ONCE(!irqs_disabled()); 108 WARN_ON_ONCE(!irqs_disabled());
146 109 min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
147 /*
148 * At schedule tick the clock can be just under the gtod. We don't
149 * want to push it too prematurely.
150 */
151 min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC);
152 if (min_clock > TICK_NSEC)
153 min_clock -= TICK_NSEC / 2;
154 110
155 if (unlikely(delta < 0)) { 111 if (unlikely(delta < 0)) {
156 clock++; 112 clock++;
157 goto out; 113 goto out;
158 } 114 }
159 115
160 /* 116 max_clock = min_clock + TICK_NSEC;
161 * The clock must stay within a jiffie of the gtod.
162 * But since we may be at the start of a jiffy or the end of one
163 * we add another jiffy buffer.
164 */
165 max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
166
167 delta *= scd->multi;
168 delta >>= MULTI_SHIFT;
169 117
170 if (unlikely(clock + delta > max_clock) && check_max(scd)) { 118 if (unlikely(clock + delta > max_clock)) {
171 if (clock < max_clock) 119 if (clock < max_clock)
172 clock = max_clock; 120 clock = max_clock;
173 else 121 else
@@ -180,12 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
180 if (unlikely(clock < min_clock)) 128 if (unlikely(clock < min_clock))
181 clock = min_clock; 129 clock = min_clock;
182 130
183 if (time) 131 scd->tick_jiffies = now_jiffies;
184 *time = clock; 132 scd->clock = clock;
185 else { 133
186 scd->prev_raw = now; 134 return clock;
187 scd->clock = clock;
188 }
189} 135}
190 136
191static void lock_double_clock(struct sched_clock_data *data1, 137static void lock_double_clock(struct sched_clock_data *data1,
@@ -203,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1,
203u64 sched_clock_cpu(int cpu) 149u64 sched_clock_cpu(int cpu)
204{ 150{
205 struct sched_clock_data *scd = cpu_sdc(cpu); 151 struct sched_clock_data *scd = cpu_sdc(cpu);
206 u64 now, clock; 152 u64 now, clock, this_clock, remote_clock;
207 153
208 if (unlikely(!sched_clock_running)) 154 if (unlikely(!sched_clock_running))
209 return 0ull; 155 return 0ull;
@@ -212,43 +158,44 @@ u64 sched_clock_cpu(int cpu)
212 now = sched_clock(); 158 now = sched_clock();
213 159
214 if (cpu != raw_smp_processor_id()) { 160 if (cpu != raw_smp_processor_id()) {
215 /*
216 * in order to update a remote cpu's clock based on our
217 * unstable raw time rebase it against:
218 * tick_raw (offset between raw counters)
219 * tick_gotd (tick offset between cpus)
220 */
221 struct sched_clock_data *my_scd = this_scd(); 161 struct sched_clock_data *my_scd = this_scd();
222 162
223 lock_double_clock(scd, my_scd); 163 lock_double_clock(scd, my_scd);
224 164
225 now -= my_scd->tick_raw; 165 this_clock = __update_sched_clock(my_scd, now);
226 now += scd->tick_raw; 166 remote_clock = scd->clock;
227 167
228 now += my_scd->tick_gtod; 168 /*
229 now -= scd->tick_gtod; 169 * Use the opportunity that we have both locks
170 * taken to couple the two clocks: we take the
171 * larger time as the latest time for both
172 * runqueues. (this creates monotonic movement)
173 */
174 if (likely(remote_clock < this_clock)) {
175 clock = this_clock;
176 scd->clock = clock;
177 } else {
178 /*
179 * Should be rare, but possible:
180 */
181 clock = remote_clock;
182 my_scd->clock = remote_clock;
183 }
230 184
231 __raw_spin_unlock(&my_scd->lock); 185 __raw_spin_unlock(&my_scd->lock);
232
233 __update_sched_clock(scd, now, &clock);
234
235 __raw_spin_unlock(&scd->lock);
236
237 } else { 186 } else {
238 __raw_spin_lock(&scd->lock); 187 __raw_spin_lock(&scd->lock);
239 __update_sched_clock(scd, now, NULL); 188 clock = __update_sched_clock(scd, now);
240 clock = scd->clock;
241 __raw_spin_unlock(&scd->lock);
242 } 189 }
243 190
191 __raw_spin_unlock(&scd->lock);
192
244 return clock; 193 return clock;
245} 194}
246 195
247void sched_clock_tick(void) 196void sched_clock_tick(void)
248{ 197{
249 struct sched_clock_data *scd = this_scd(); 198 struct sched_clock_data *scd = this_scd();
250 unsigned long now_jiffies = jiffies;
251 s64 mult, delta_gtod, delta_raw;
252 u64 now, now_gtod; 199 u64 now, now_gtod;
253 200
254 if (unlikely(!sched_clock_running)) 201 if (unlikely(!sched_clock_running))
@@ -260,29 +207,14 @@ void sched_clock_tick(void)
260 now = sched_clock(); 207 now = sched_clock();
261 208
262 __raw_spin_lock(&scd->lock); 209 __raw_spin_lock(&scd->lock);
263 __update_sched_clock(scd, now, NULL); 210 __update_sched_clock(scd, now);
264 /* 211 /*
265 * update tick_gtod after __update_sched_clock() because that will 212 * update tick_gtod after __update_sched_clock() because that will
266 * already observe 1 new jiffy; adding a new tick_gtod to that would 213 * already observe 1 new jiffy; adding a new tick_gtod to that would
267 * increase the clock 2 jiffies. 214 * increase the clock 2 jiffies.
268 */ 215 */
269 delta_gtod = now_gtod - scd->tick_gtod;
270 delta_raw = now - scd->tick_raw;
271
272 if ((long)delta_raw > 0) {
273 mult = delta_gtod << MULTI_SHIFT;
274 do_div(mult, delta_raw);
275 scd->multi = mult;
276 if (scd->multi > MAX_MULTI)
277 scd->multi = MAX_MULTI;
278 else if (scd->multi < MIN_MULTI)
279 scd->multi = MIN_MULTI;
280 } else
281 scd->multi = 1 << MULTI_SHIFT;
282
283 scd->tick_raw = now; 216 scd->tick_raw = now;
284 scd->tick_gtod = now_gtod; 217 scd->tick_gtod = now_gtod;
285 scd->tick_jiffies = now_jiffies;
286 __raw_spin_unlock(&scd->lock); 218 __raw_spin_unlock(&scd->lock);
287} 219}
288 220
@@ -301,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
301void sched_clock_idle_wakeup_event(u64 delta_ns) 233void sched_clock_idle_wakeup_event(u64 delta_ns)
302{ 234{
303 struct sched_clock_data *scd = this_scd(); 235 struct sched_clock_data *scd = this_scd();
304 u64 now = sched_clock();
305 236
306 /* 237 /*
307 * Override the previous timestamp and ignore all 238 * Override the previous timestamp and ignore all
@@ -310,27 +241,30 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
310 * rq clock: 241 * rq clock:
311 */ 242 */
312 __raw_spin_lock(&scd->lock); 243 __raw_spin_lock(&scd->lock);
313 scd->prev_raw = now;
314 scd->clock += delta_ns; 244 scd->clock += delta_ns;
315 scd->multi = 1 << MULTI_SHIFT;
316 __raw_spin_unlock(&scd->lock); 245 __raw_spin_unlock(&scd->lock);
317 246
318 touch_softlockup_watchdog(); 247 touch_softlockup_watchdog();
319} 248}
320EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 249EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
321 250
322#endif 251#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
323 252
324/* 253void sched_clock_init(void)
325 * Scheduler clock - returns current time in nanosec units.
326 * This is default implementation.
327 * Architectures and sub-architectures can override this.
328 */
329unsigned long long __attribute__((weak)) sched_clock(void)
330{ 254{
331 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); 255 sched_clock_running = 1;
332} 256}
333 257
258u64 sched_clock_cpu(int cpu)
259{
260 if (unlikely(!sched_clock_running))
261 return 0;
262
263 return sched_clock();
264}
265
266#endif
267
334unsigned long long cpu_clock(int cpu) 268unsigned long long cpu_clock(int cpu)
335{ 269{
336 unsigned long long clock; 270 unsigned long long clock;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index cf2cd6ce4cb2..fb8994c6d4bb 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -899,7 +899,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
899 * doesn't make sense. Rely on vruntime for fairness. 899 * doesn't make sense. Rely on vruntime for fairness.
900 */ 900 */
901 if (rq->curr != p) 901 if (rq->curr != p)
902 delta = max(10000LL, delta); 902 delta = max_t(s64, 10000LL, delta);
903 903
904 hrtick_start(rq, delta); 904 hrtick_start(rq, delta);
905 } 905 }
@@ -1442,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1442 struct task_struct *p = NULL; 1442 struct task_struct *p = NULL;
1443 struct sched_entity *se; 1443 struct sched_entity *se;
1444 1444
1445 while (next != &cfs_rq->tasks) { 1445 if (next == &cfs_rq->tasks)
1446 return NULL;
1447
1448 /* Skip over entities that are not tasks */
1449 do {
1446 se = list_entry(next, struct sched_entity, group_node); 1450 se = list_entry(next, struct sched_entity, group_node);
1447 next = next->next; 1451 next = next->next;
1452 } while (next != &cfs_rq->tasks && !entity_is_task(se));
1448 1453
1449 /* Skip over entities that are not tasks */ 1454 if (next == &cfs_rq->tasks)
1450 if (entity_is_task(se)) { 1455 return NULL;
1451 p = task_of(se);
1452 break;
1453 }
1454 }
1455 1456
1456 cfs_rq->balance_iterator = next; 1457 cfs_rq->balance_iterator = next;
1458
1459 if (entity_is_task(se))
1460 p = task_of(se);
1461
1457 return p; 1462 return p;
1458} 1463}
1459 1464
diff --git a/kernel/signal.c b/kernel/signal.c
index 954f77d7e3bc..c539f60c6f41 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1304,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1304 q->info.si_overrun++; 1304 q->info.si_overrun++;
1305 goto out; 1305 goto out;
1306 } 1306 }
1307 q->info.si_overrun = 0;
1307 1308
1308 signalfd_notify(t, sig); 1309 signalfd_notify(t, sig);
1309 pending = group ? &t->signal->shared_pending : &t->pending; 1310 pending = group ? &t->signal->shared_pending : &t->pending;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e446c7c7d6a9..af3c7cea258b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -65,7 +65,6 @@ static void ack_state(void)
65static int stop_cpu(struct stop_machine_data *smdata) 65static int stop_cpu(struct stop_machine_data *smdata)
66{ 66{
67 enum stopmachine_state curstate = STOPMACHINE_NONE; 67 enum stopmachine_state curstate = STOPMACHINE_NONE;
68 int uninitialized_var(ret);
69 68
70 /* Simple state machine */ 69 /* Simple state machine */
71 do { 70 do {
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 825b4c00fe44..f5da526424a9 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle)
289 ts->tick_stopped = 1; 289 ts->tick_stopped = 1;
290 ts->idle_jiffies = last_jiffies; 290 ts->idle_jiffies = last_jiffies;
291 rcu_enter_nohz(); 291 rcu_enter_nohz();
292 sched_clock_tick_stop(cpu);
293 } 292 }
294 293
295 /* 294 /*
@@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void)
392 select_nohz_load_balancer(0); 391 select_nohz_load_balancer(0);
393 now = ktime_get(); 392 now = ktime_get();
394 tick_do_update_jiffies64(now); 393 tick_do_update_jiffies64(now);
395 sched_clock_tick_start(cpu);
396 cpu_clear(cpu, nohz_cpu_mask); 394 cpu_clear(cpu, nohz_cpu_mask);
397 395
398 /* 396 /*