diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-20 15:52:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-20 15:52:55 -0500 |
commit | 828cad8ea05d194d8a9452e0793261c2024c23a2 (patch) | |
tree | 0ad7c7e044cdcfe75d78da0b52eb2358d4686e02 /include/linux/sched.h | |
parent | 60c906bab124a0627fba04c9ca5e61bba4747c0c (diff) | |
parent | bb3bac2ca9a3a5b7fa601781adf70167a0449d75 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes in this (fairly busy) cycle were:
- There was a class of scheduler bugs related to forgetting to update
the rq-clock timestamp which can cause weird and hard to debug
problems, so there's a new debug facility for this: which uncovered
a whole lot of bugs which convinced us that we want to keep the
debug facility.
(Peter Zijlstra, Matt Fleming)
- Various cputime related updates: eliminate cputime and use u64
nanoseconds directly, simplify and improve the arch interfaces,
implement delayed accounting more widely, etc. - (Frederic
Weisbecker)
- Move code around for better structure plus cleanups (Ingo Molnar)
- Move IO schedule accounting deeper into the scheduler plus related
changes to improve the situation (Tejun Heo)
- ... plus a round of sched/rt and sched/deadline fixes, plus other
fixes, updats and cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (85 commits)
sched/core: Remove unlikely() annotation from sched_move_task()
sched/autogroup: Rename auto_group.[ch] to autogroup.[ch]
sched/topology: Split out scheduler topology code from core.c into topology.c
sched/core: Remove unnecessary #include headers
sched/rq_clock: Consolidate the ordering of the rq_clock methods
delayacct: Include <uapi/linux/taskstats.h>
sched/core: Clean up comments
sched/rt: Show the 'sched_rr_timeslice' SCHED_RR timeslice tuning knob in milliseconds
sched/clock: Add dummy clear_sched_clock_stable() stub function
sched/cputime: Remove generic asm headers
sched/cputime: Remove unused nsec_to_cputime()
s390, sched/cputime: Remove unused cputime definitions
powerpc, sched/cputime: Remove unused cputime definitions
s390, sched/cputime: Make arch_cpu_idle_time() to return nsecs
ia64, sched/cputime: Remove unused cputime definitions
ia64: Convert vtime to use nsec units directly
ia64, sched/cputime: Move the nsecs based cputime headers to the last arch using it
sched/cputime: Remove jiffies based cputime
sched/cputime, vtime: Return nsecs instead of cputime_t to account
sched/cputime: Complete nsec conversion of tick based accounting
...
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 80 |
1 files changed, 38 insertions, 42 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e4782eae076..c89b7fdec41e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -29,7 +29,6 @@ struct sched_param { | |||
29 | 29 | ||
30 | #include <asm/page.h> | 30 | #include <asm/page.h> |
31 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
32 | #include <linux/cputime.h> | ||
33 | 32 | ||
34 | #include <linux/smp.h> | 33 | #include <linux/smp.h> |
35 | #include <linux/sem.h> | 34 | #include <linux/sem.h> |
@@ -461,12 +460,10 @@ extern signed long schedule_timeout_idle(signed long timeout); | |||
461 | asmlinkage void schedule(void); | 460 | asmlinkage void schedule(void); |
462 | extern void schedule_preempt_disabled(void); | 461 | extern void schedule_preempt_disabled(void); |
463 | 462 | ||
463 | extern int __must_check io_schedule_prepare(void); | ||
464 | extern void io_schedule_finish(int token); | ||
464 | extern long io_schedule_timeout(long timeout); | 465 | extern long io_schedule_timeout(long timeout); |
465 | 466 | extern void io_schedule(void); | |
466 | static inline void io_schedule(void) | ||
467 | { | ||
468 | io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); | ||
469 | } | ||
470 | 467 | ||
471 | void __noreturn do_task_dead(void); | 468 | void __noreturn do_task_dead(void); |
472 | 469 | ||
@@ -565,15 +562,13 @@ struct pacct_struct { | |||
565 | int ac_flag; | 562 | int ac_flag; |
566 | long ac_exitcode; | 563 | long ac_exitcode; |
567 | unsigned long ac_mem; | 564 | unsigned long ac_mem; |
568 | cputime_t ac_utime, ac_stime; | 565 | u64 ac_utime, ac_stime; |
569 | unsigned long ac_minflt, ac_majflt; | 566 | unsigned long ac_minflt, ac_majflt; |
570 | }; | 567 | }; |
571 | 568 | ||
572 | struct cpu_itimer { | 569 | struct cpu_itimer { |
573 | cputime_t expires; | 570 | u64 expires; |
574 | cputime_t incr; | 571 | u64 incr; |
575 | u32 error; | ||
576 | u32 incr_error; | ||
577 | }; | 572 | }; |
578 | 573 | ||
579 | /** | 574 | /** |
@@ -587,8 +582,8 @@ struct cpu_itimer { | |||
587 | */ | 582 | */ |
588 | struct prev_cputime { | 583 | struct prev_cputime { |
589 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | 584 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
590 | cputime_t utime; | 585 | u64 utime; |
591 | cputime_t stime; | 586 | u64 stime; |
592 | raw_spinlock_t lock; | 587 | raw_spinlock_t lock; |
593 | #endif | 588 | #endif |
594 | }; | 589 | }; |
@@ -603,8 +598,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev) | |||
603 | 598 | ||
604 | /** | 599 | /** |
605 | * struct task_cputime - collected CPU time counts | 600 | * struct task_cputime - collected CPU time counts |
606 | * @utime: time spent in user mode, in &cputime_t units | 601 | * @utime: time spent in user mode, in nanoseconds |
607 | * @stime: time spent in kernel mode, in &cputime_t units | 602 | * @stime: time spent in kernel mode, in nanoseconds |
608 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds | 603 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
609 | * | 604 | * |
610 | * This structure groups together three kinds of CPU time that are tracked for | 605 | * This structure groups together three kinds of CPU time that are tracked for |
@@ -612,8 +607,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev) | |||
612 | * these counts together and treat all three of them in parallel. | 607 | * these counts together and treat all three of them in parallel. |
613 | */ | 608 | */ |
614 | struct task_cputime { | 609 | struct task_cputime { |
615 | cputime_t utime; | 610 | u64 utime; |
616 | cputime_t stime; | 611 | u64 stime; |
617 | unsigned long long sum_exec_runtime; | 612 | unsigned long long sum_exec_runtime; |
618 | }; | 613 | }; |
619 | 614 | ||
@@ -622,13 +617,6 @@ struct task_cputime { | |||
622 | #define prof_exp stime | 617 | #define prof_exp stime |
623 | #define sched_exp sum_exec_runtime | 618 | #define sched_exp sum_exec_runtime |
624 | 619 | ||
625 | #define INIT_CPUTIME \ | ||
626 | (struct task_cputime) { \ | ||
627 | .utime = 0, \ | ||
628 | .stime = 0, \ | ||
629 | .sum_exec_runtime = 0, \ | ||
630 | } | ||
631 | |||
632 | /* | 620 | /* |
633 | * This is the atomic variant of task_cputime, which can be used for | 621 | * This is the atomic variant of task_cputime, which can be used for |
634 | * storing and updating task_cputime statistics without locking. | 622 | * storing and updating task_cputime statistics without locking. |
@@ -787,9 +775,9 @@ struct signal_struct { | |||
787 | * in __exit_signal, except for the group leader. | 775 | * in __exit_signal, except for the group leader. |
788 | */ | 776 | */ |
789 | seqlock_t stats_lock; | 777 | seqlock_t stats_lock; |
790 | cputime_t utime, stime, cutime, cstime; | 778 | u64 utime, stime, cutime, cstime; |
791 | cputime_t gtime; | 779 | u64 gtime; |
792 | cputime_t cgtime; | 780 | u64 cgtime; |
793 | struct prev_cputime prev_cputime; | 781 | struct prev_cputime prev_cputime; |
794 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 782 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
795 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 783 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
@@ -1668,11 +1656,11 @@ struct task_struct { | |||
1668 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | 1656 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
1669 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ | 1657 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
1670 | 1658 | ||
1671 | cputime_t utime, stime; | 1659 | u64 utime, stime; |
1672 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME | 1660 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
1673 | cputime_t utimescaled, stimescaled; | 1661 | u64 utimescaled, stimescaled; |
1674 | #endif | 1662 | #endif |
1675 | cputime_t gtime; | 1663 | u64 gtime; |
1676 | struct prev_cputime prev_cputime; | 1664 | struct prev_cputime prev_cputime; |
1677 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 1665 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
1678 | seqcount_t vtime_seqcount; | 1666 | seqcount_t vtime_seqcount; |
@@ -1824,7 +1812,7 @@ struct task_struct { | |||
1824 | #if defined(CONFIG_TASK_XACCT) | 1812 | #if defined(CONFIG_TASK_XACCT) |
1825 | u64 acct_rss_mem1; /* accumulated rss usage */ | 1813 | u64 acct_rss_mem1; /* accumulated rss usage */ |
1826 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ | 1814 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ |
1827 | cputime_t acct_timexpd; /* stime + utime since last update */ | 1815 | u64 acct_timexpd; /* stime + utime since last update */ |
1828 | #endif | 1816 | #endif |
1829 | #ifdef CONFIG_CPUSETS | 1817 | #ifdef CONFIG_CPUSETS |
1830 | nodemask_t mems_allowed; /* Protected by alloc_lock */ | 1818 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
@@ -2269,17 +2257,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask); | |||
2269 | 2257 | ||
2270 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 2258 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
2271 | extern void task_cputime(struct task_struct *t, | 2259 | extern void task_cputime(struct task_struct *t, |
2272 | cputime_t *utime, cputime_t *stime); | 2260 | u64 *utime, u64 *stime); |
2273 | extern cputime_t task_gtime(struct task_struct *t); | 2261 | extern u64 task_gtime(struct task_struct *t); |
2274 | #else | 2262 | #else |
2275 | static inline void task_cputime(struct task_struct *t, | 2263 | static inline void task_cputime(struct task_struct *t, |
2276 | cputime_t *utime, cputime_t *stime) | 2264 | u64 *utime, u64 *stime) |
2277 | { | 2265 | { |
2278 | *utime = t->utime; | 2266 | *utime = t->utime; |
2279 | *stime = t->stime; | 2267 | *stime = t->stime; |
2280 | } | 2268 | } |
2281 | 2269 | ||
2282 | static inline cputime_t task_gtime(struct task_struct *t) | 2270 | static inline u64 task_gtime(struct task_struct *t) |
2283 | { | 2271 | { |
2284 | return t->gtime; | 2272 | return t->gtime; |
2285 | } | 2273 | } |
@@ -2287,23 +2275,23 @@ static inline cputime_t task_gtime(struct task_struct *t) | |||
2287 | 2275 | ||
2288 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME | 2276 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
2289 | static inline void task_cputime_scaled(struct task_struct *t, | 2277 | static inline void task_cputime_scaled(struct task_struct *t, |
2290 | cputime_t *utimescaled, | 2278 | u64 *utimescaled, |
2291 | cputime_t *stimescaled) | 2279 | u64 *stimescaled) |
2292 | { | 2280 | { |
2293 | *utimescaled = t->utimescaled; | 2281 | *utimescaled = t->utimescaled; |
2294 | *stimescaled = t->stimescaled; | 2282 | *stimescaled = t->stimescaled; |
2295 | } | 2283 | } |
2296 | #else | 2284 | #else |
2297 | static inline void task_cputime_scaled(struct task_struct *t, | 2285 | static inline void task_cputime_scaled(struct task_struct *t, |
2298 | cputime_t *utimescaled, | 2286 | u64 *utimescaled, |
2299 | cputime_t *stimescaled) | 2287 | u64 *stimescaled) |
2300 | { | 2288 | { |
2301 | task_cputime(t, utimescaled, stimescaled); | 2289 | task_cputime(t, utimescaled, stimescaled); |
2302 | } | 2290 | } |
2303 | #endif | 2291 | #endif |
2304 | 2292 | ||
2305 | extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); | 2293 | extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); |
2306 | extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); | 2294 | extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); |
2307 | 2295 | ||
2308 | /* | 2296 | /* |
2309 | * Per process flags | 2297 | * Per process flags |
@@ -2522,10 +2510,18 @@ extern u64 sched_clock_cpu(int cpu); | |||
2522 | extern void sched_clock_init(void); | 2510 | extern void sched_clock_init(void); |
2523 | 2511 | ||
2524 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 2512 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
2513 | static inline void sched_clock_init_late(void) | ||
2514 | { | ||
2515 | } | ||
2516 | |||
2525 | static inline void sched_clock_tick(void) | 2517 | static inline void sched_clock_tick(void) |
2526 | { | 2518 | { |
2527 | } | 2519 | } |
2528 | 2520 | ||
2521 | static inline void clear_sched_clock_stable(void) | ||
2522 | { | ||
2523 | } | ||
2524 | |||
2529 | static inline void sched_clock_idle_sleep_event(void) | 2525 | static inline void sched_clock_idle_sleep_event(void) |
2530 | { | 2526 | { |
2531 | } | 2527 | } |
@@ -2544,6 +2540,7 @@ static inline u64 local_clock(void) | |||
2544 | return sched_clock(); | 2540 | return sched_clock(); |
2545 | } | 2541 | } |
2546 | #else | 2542 | #else |
2543 | extern void sched_clock_init_late(void); | ||
2547 | /* | 2544 | /* |
2548 | * Architectures can set this to 1 if they have specified | 2545 | * Architectures can set this to 1 if they have specified |
2549 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | 2546 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, |
@@ -2551,7 +2548,6 @@ static inline u64 local_clock(void) | |||
2551 | * is reliable after all: | 2548 | * is reliable after all: |
2552 | */ | 2549 | */ |
2553 | extern int sched_clock_stable(void); | 2550 | extern int sched_clock_stable(void); |
2554 | extern void set_sched_clock_stable(void); | ||
2555 | extern void clear_sched_clock_stable(void); | 2551 | extern void clear_sched_clock_stable(void); |
2556 | 2552 | ||
2557 | extern void sched_clock_tick(void); | 2553 | extern void sched_clock_tick(void); |