diff options
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/sched_fair.c | 6 |
2 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ed3caf26990d..d941ddc9ec1d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -757,14 +757,14 @@ const_debug unsigned int sysctl_sched_features = | |||
757 | #define SCHED_FEAT(name, enabled) \ | 757 | #define SCHED_FEAT(name, enabled) \ |
758 | #name , | 758 | #name , |
759 | 759 | ||
760 | __read_mostly char *sched_feat_names[] = { | 760 | static __read_mostly char *sched_feat_names[] = { |
761 | #include "sched_features.h" | 761 | #include "sched_features.h" |
762 | NULL | 762 | NULL |
763 | }; | 763 | }; |
764 | 764 | ||
765 | #undef SCHED_FEAT | 765 | #undef SCHED_FEAT |
766 | 766 | ||
767 | int sched_feat_open(struct inode *inode, struct file *filp) | 767 | static int sched_feat_open(struct inode *inode, struct file *filp) |
768 | { | 768 | { |
769 | filp->private_data = inode->i_private; | 769 | filp->private_data = inode->i_private; |
770 | return 0; | 770 | return 0; |
@@ -4341,8 +4341,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4341 | struct rq *rq = this_rq(); | 4341 | struct rq *rq = this_rq(); |
4342 | cputime64_t tmp; | 4342 | cputime64_t tmp; |
4343 | 4343 | ||
4344 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) | 4344 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
4345 | return account_guest_time(p, cputime); | 4345 | account_guest_time(p, cputime); |
4346 | return; | ||
4347 | } | ||
4346 | 4348 | ||
4347 | p->stime = cputime_add(p->stime, cputime); | 4349 | p->stime = cputime_add(p->stime, cputime); |
4348 | 4350 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 1295ddc5656b..e8e5ad2614b0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -841,8 +841,10 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
841 | * queued ticks are scheduled to match the slice, so don't bother | 841 | * queued ticks are scheduled to match the slice, so don't bother |
842 | * validating it and just reschedule. | 842 | * validating it and just reschedule. |
843 | */ | 843 | */ |
844 | if (queued) | 844 | if (queued) { |
845 | return resched_task(rq_of(cfs_rq)->curr); | 845 | resched_task(rq_of(cfs_rq)->curr); |
846 | return; | ||
847 | } | ||
846 | /* | 848 | /* |
847 | * don't let the period tick interfere with the hrtick preemption | 849 | * don't let the period tick interfere with the hrtick preemption |
848 | */ | 850 | */ |