diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 69 |
1 files changed, 63 insertions, 6 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0848fa36c383..7a7db09cfabc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
| 737 | */ | 737 | */ |
| 738 | next->on_cpu = 1; | 738 | next->on_cpu = 1; |
| 739 | #endif | 739 | #endif |
| 740 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
| 741 | raw_spin_unlock_irq(&rq->lock); | ||
| 742 | #else | ||
| 743 | raw_spin_unlock(&rq->lock); | 740 | raw_spin_unlock(&rq->lock); |
| 744 | #endif | ||
| 745 | } | 741 | } |
| 746 | 742 | ||
| 747 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | 743 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
| @@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
| 755 | smp_wmb(); | 751 | smp_wmb(); |
| 756 | prev->on_cpu = 0; | 752 | prev->on_cpu = 0; |
| 757 | #endif | 753 | #endif |
| 758 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
| 759 | local_irq_enable(); | 754 | local_irq_enable(); |
| 760 | #endif | ||
| 761 | } | 755 | } |
| 762 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 756 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
| 763 | 757 | ||
| @@ -891,6 +885,9 @@ struct cpuacct { | |||
| 891 | struct kernel_cpustat __percpu *cpustat; | 885 | struct kernel_cpustat __percpu *cpustat; |
| 892 | }; | 886 | }; |
| 893 | 887 | ||
| 888 | extern struct cgroup_subsys cpuacct_subsys; | ||
| 889 | extern struct cpuacct root_cpuacct; | ||
| 890 | |||
| 894 | /* return cpu accounting group corresponding to this container */ | 891 | /* return cpu accounting group corresponding to this container */ |
| 895 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) | 892 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) |
| 896 | { | 893 | { |
| @@ -917,6 +914,16 @@ extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); | |||
| 917 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | 914 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
| 918 | #endif | 915 | #endif |
| 919 | 916 | ||
| 917 | #ifdef CONFIG_PARAVIRT | ||
| 918 | static inline u64 steal_ticks(u64 steal) | ||
| 919 | { | ||
| 920 | if (unlikely(steal > NSEC_PER_SEC)) | ||
| 921 | return div_u64(steal, TICK_NSEC); | ||
| 922 | |||
| 923 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | ||
| 924 | } | ||
| 925 | #endif | ||
| 926 | |||
| 920 | static inline void inc_nr_running(struct rq *rq) | 927 | static inline void inc_nr_running(struct rq *rq) |
| 921 | { | 928 | { |
| 922 | rq->nr_running++; | 929 | rq->nr_running++; |
| @@ -1156,3 +1163,53 @@ enum rq_nohz_flag_bits { | |||
| 1156 | 1163 | ||
| 1157 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) | 1164 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
| 1158 | #endif | 1165 | #endif |
| 1166 | |||
| 1167 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
| 1168 | |||
| 1169 | DECLARE_PER_CPU(u64, cpu_hardirq_time); | ||
| 1170 | DECLARE_PER_CPU(u64, cpu_softirq_time); | ||
| 1171 | |||
| 1172 | #ifndef CONFIG_64BIT | ||
| 1173 | DECLARE_PER_CPU(seqcount_t, irq_time_seq); | ||
| 1174 | |||
| 1175 | static inline void irq_time_write_begin(void) | ||
| 1176 | { | ||
| 1177 | __this_cpu_inc(irq_time_seq.sequence); | ||
| 1178 | smp_wmb(); | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | static inline void irq_time_write_end(void) | ||
| 1182 | { | ||
| 1183 | smp_wmb(); | ||
| 1184 | __this_cpu_inc(irq_time_seq.sequence); | ||
| 1185 | } | ||
| 1186 | |||
| 1187 | static inline u64 irq_time_read(int cpu) | ||
| 1188 | { | ||
| 1189 | u64 irq_time; | ||
| 1190 | unsigned seq; | ||
| 1191 | |||
| 1192 | do { | ||
| 1193 | seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); | ||
| 1194 | irq_time = per_cpu(cpu_softirq_time, cpu) + | ||
| 1195 | per_cpu(cpu_hardirq_time, cpu); | ||
| 1196 | } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); | ||
| 1197 | |||
| 1198 | return irq_time; | ||
| 1199 | } | ||
| 1200 | #else /* CONFIG_64BIT */ | ||
| 1201 | static inline void irq_time_write_begin(void) | ||
| 1202 | { | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | static inline void irq_time_write_end(void) | ||
| 1206 | { | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | static inline u64 irq_time_read(int cpu) | ||
| 1210 | { | ||
| 1211 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); | ||
| 1212 | } | ||
| 1213 | #endif /* CONFIG_64BIT */ | ||
| 1214 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
| 1215 | |||
