diff options
-rw-r--r-- | arch/um/kernel/process.c | 2 | ||||
-rw-r--r-- | fs/proc/base.c | 27 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 16 | ||||
-rw-r--r-- | kernel/sched_fair.c | 13 |
5 files changed, 27 insertions, 32 deletions
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index fc50d2f959d1..e8cb9ff183e9 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -128,8 +128,6 @@ void *get_current(void) | |||
128 | return current; | 128 | return current; |
129 | } | 129 | } |
130 | 130 | ||
131 | extern void schedule_tail(struct task_struct *prev); | ||
132 | |||
133 | /* | 131 | /* |
134 | * This is called magically, by its address being stuffed in a jmp_buf | 132 | * This is called magically, by its address being stuffed in a jmp_buf |
135 | * and being longjmp-d to. | 133 | * and being longjmp-d to. |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 96ee899d6502..91a1bd67ac1d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -314,9 +314,12 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer) | |||
314 | static int lstats_show_proc(struct seq_file *m, void *v) | 314 | static int lstats_show_proc(struct seq_file *m, void *v) |
315 | { | 315 | { |
316 | int i; | 316 | int i; |
317 | struct task_struct *task = m->private; | 317 | struct inode *inode = m->private; |
318 | seq_puts(m, "Latency Top version : v0.1\n"); | 318 | struct task_struct *task = get_proc_task(inode); |
319 | 319 | ||
320 | if (!task) | ||
321 | return -ESRCH; | ||
322 | seq_puts(m, "Latency Top version : v0.1\n"); | ||
320 | for (i = 0; i < 32; i++) { | 323 | for (i = 0; i < 32; i++) { |
321 | if (task->latency_record[i].backtrace[0]) { | 324 | if (task->latency_record[i].backtrace[0]) { |
322 | int q; | 325 | int q; |
@@ -341,32 +344,24 @@ static int lstats_show_proc(struct seq_file *m, void *v) | |||
341 | } | 344 | } |
342 | 345 | ||
343 | } | 346 | } |
347 | put_task_struct(task); | ||
344 | return 0; | 348 | return 0; |
345 | } | 349 | } |
346 | 350 | ||
347 | static int lstats_open(struct inode *inode, struct file *file) | 351 | static int lstats_open(struct inode *inode, struct file *file) |
348 | { | 352 | { |
349 | int ret; | 353 | return single_open(file, lstats_show_proc, inode); |
350 | struct seq_file *m; | ||
351 | struct task_struct *task = get_proc_task(inode); | ||
352 | |||
353 | ret = single_open(file, lstats_show_proc, NULL); | ||
354 | if (!ret) { | ||
355 | m = file->private_data; | ||
356 | m->private = task; | ||
357 | } | ||
358 | return ret; | ||
359 | } | 354 | } |
360 | 355 | ||
361 | static ssize_t lstats_write(struct file *file, const char __user *buf, | 356 | static ssize_t lstats_write(struct file *file, const char __user *buf, |
362 | size_t count, loff_t *offs) | 357 | size_t count, loff_t *offs) |
363 | { | 358 | { |
364 | struct seq_file *m; | 359 | struct task_struct *task = get_proc_task(file->f_dentry->d_inode); |
365 | struct task_struct *task; | ||
366 | 360 | ||
367 | m = file->private_data; | 361 | if (!task) |
368 | task = m->private; | 362 | return -ESRCH; |
369 | clear_all_latency_tracing(task); | 363 | clear_all_latency_tracing(task); |
364 | put_task_struct(task); | ||
370 | 365 | ||
371 | return count; | 366 | return count; |
372 | } | 367 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e217d188a102..9c17e828d6d4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -242,6 +242,7 @@ struct task_struct; | |||
242 | 242 | ||
243 | extern void sched_init(void); | 243 | extern void sched_init(void); |
244 | extern void sched_init_smp(void); | 244 | extern void sched_init_smp(void); |
245 | extern asmlinkage void schedule_tail(struct task_struct *prev); | ||
245 | extern void init_idle(struct task_struct *idle, int cpu); | 246 | extern void init_idle(struct task_struct *idle, int cpu); |
246 | extern void init_idle_bootup_task(struct task_struct *idle); | 247 | extern void init_idle_bootup_task(struct task_struct *idle); |
247 | 248 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index b387a8de26a5..f06950c8a6ce 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
668 | */ | 668 | */ |
669 | unsigned int sysctl_sched_rt_period = 1000000; | 669 | unsigned int sysctl_sched_rt_period = 1000000; |
670 | 670 | ||
671 | static __read_mostly int scheduler_running; | ||
672 | |||
671 | /* | 673 | /* |
672 | * part of the period that we allow rt tasks to run in us. | 674 | * part of the period that we allow rt tasks to run in us. |
673 | * default: 0.95s | 675 | * default: 0.95s |
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu) | |||
689 | unsigned long flags; | 691 | unsigned long flags; |
690 | struct rq *rq; | 692 | struct rq *rq; |
691 | 693 | ||
692 | local_irq_save(flags); | ||
693 | rq = cpu_rq(cpu); | ||
694 | /* | 694 | /* |
695 | * Only call sched_clock() if the scheduler has already been | 695 | * Only call sched_clock() if the scheduler has already been |
696 | * initialized (some code might call cpu_clock() very early): | 696 | * initialized (some code might call cpu_clock() very early): |
697 | */ | 697 | */ |
698 | if (rq->idle) | 698 | if (unlikely(!scheduler_running)) |
699 | update_rq_clock(rq); | 699 | return 0; |
700 | |||
701 | local_irq_save(flags); | ||
702 | rq = cpu_rq(cpu); | ||
703 | update_rq_clock(rq); | ||
700 | now = rq->clock; | 704 | now = rq->clock; |
701 | local_irq_restore(flags); | 705 | local_irq_restore(flags); |
702 | 706 | ||
@@ -3885,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev) | |||
3885 | asmlinkage void __sched schedule(void) | 3889 | asmlinkage void __sched schedule(void) |
3886 | { | 3890 | { |
3887 | struct task_struct *prev, *next; | 3891 | struct task_struct *prev, *next; |
3888 | long *switch_count; | 3892 | unsigned long *switch_count; |
3889 | struct rq *rq; | 3893 | struct rq *rq; |
3890 | int cpu; | 3894 | int cpu; |
3891 | 3895 | ||
@@ -7284,6 +7288,8 @@ void __init sched_init(void) | |||
7284 | * During early bootup we pretend to be a normal task: | 7288 | * During early bootup we pretend to be a normal task: |
7285 | */ | 7289 | */ |
7286 | current->sched_class = &fair_sched_class; | 7290 | current->sched_class = &fair_sched_class; |
7291 | |||
7292 | scheduler_running = 1; | ||
7287 | } | 7293 | } |
7288 | 7294 | ||
7289 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 7295 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6c091d6e159d..c8e6492c5925 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | |||
202 | 202 | ||
203 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 203 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) |
204 | { | 204 | { |
205 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 205 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); |
206 | struct sched_entity *se = NULL; | ||
207 | struct rb_node *parent; | ||
208 | 206 | ||
209 | while (*link) { | 207 | if (!last) |
210 | parent = *link; | 208 | return NULL; |
211 | se = rb_entry(parent, struct sched_entity, run_node); | ||
212 | link = &parent->rb_right; | ||
213 | } | ||
214 | 209 | ||
215 | return se; | 210 | return rb_entry(last, struct sched_entity, run_node); |
216 | } | 211 | } |
217 | 212 | ||
218 | /************************************************************** | 213 | /************************************************************** |