aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorKen Chen <kenchen@google.com>2007-10-18 15:32:56 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-18 15:32:56 -0400
commit480b9434c542ddf2833aaed3dabba71bc0b787b5 (patch)
tree78c2638ac583cc57165ee1393ebbbbbe367f46fb /include/linux/sched.h
parentcc4ea79588e688ea9b1161650979a194dd709169 (diff)
sched: reduce schedstat variable overhead a bit
schedstat is useful in investigating CPU scheduler behavior. Ideally, I think it is beneficial to have it on all the time. However, the cost of turning it on in production system is quite high, largely due to number of events it collects and also due to its large memory footprint. Most of the fields probably don't need to be full 64-bit on 64-bit arch. Rolling over 4 billion events will most like take a long time and user space tool can be made to accommodate that. I'm proposing kernel to cut back most of variable width on 64-bit system. (note, the following patch doesn't affect 32-bit system). Signed-off-by: Ken Chen <kenchen@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h42
1 files changed, 21 insertions, 21 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c204ab0d4df1..2f9c1261f202 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -569,7 +569,7 @@ struct sched_info {
569 last_queued; /* when we were last queued to run */ 569 last_queued; /* when we were last queued to run */
570#ifdef CONFIG_SCHEDSTATS 570#ifdef CONFIG_SCHEDSTATS
571 /* BKL stats */ 571 /* BKL stats */
572 unsigned long bkl_count; 572 unsigned int bkl_count;
573#endif 573#endif
574}; 574};
575#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 575#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -705,34 +705,34 @@ struct sched_domain {
705 705
706#ifdef CONFIG_SCHEDSTATS 706#ifdef CONFIG_SCHEDSTATS
707 /* load_balance() stats */ 707 /* load_balance() stats */
708 unsigned long lb_count[CPU_MAX_IDLE_TYPES]; 708 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
709 unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; 709 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
710 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; 710 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
711 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; 711 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
712 unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; 712 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
713 unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; 713 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
714 unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; 714 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
715 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; 715 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
716 716
717 /* Active load balancing */ 717 /* Active load balancing */
718 unsigned long alb_count; 718 unsigned int alb_count;
719 unsigned long alb_failed; 719 unsigned int alb_failed;
720 unsigned long alb_pushed; 720 unsigned int alb_pushed;
721 721
722 /* SD_BALANCE_EXEC stats */ 722 /* SD_BALANCE_EXEC stats */
723 unsigned long sbe_count; 723 unsigned int sbe_count;
724 unsigned long sbe_balanced; 724 unsigned int sbe_balanced;
725 unsigned long sbe_pushed; 725 unsigned int sbe_pushed;
726 726
727 /* SD_BALANCE_FORK stats */ 727 /* SD_BALANCE_FORK stats */
728 unsigned long sbf_count; 728 unsigned int sbf_count;
729 unsigned long sbf_balanced; 729 unsigned int sbf_balanced;
730 unsigned long sbf_pushed; 730 unsigned int sbf_pushed;
731 731
732 /* try_to_wake_up() stats */ 732 /* try_to_wake_up() stats */
733 unsigned long ttwu_wake_remote; 733 unsigned int ttwu_wake_remote;
734 unsigned long ttwu_move_affine; 734 unsigned int ttwu_move_affine;
735 unsigned long ttwu_move_balance; 735 unsigned int ttwu_move_balance;
736#endif 736#endif
737}; 737};
738 738