diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-02 14:44:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-02 14:44:09 -0500 |
commit | b840d79631c882786925303c2b0f4fefc31845ed (patch) | |
tree | cda60a95d4507fe1321fc285af38982d7eb9693b /include | |
parent | 597b0d21626da4e6f09f132442caf0cc2b0eb47c (diff) | |
parent | c3d80000e3a812fe5a200d6bde755fbd7fa65481 (diff) |
Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits)
x86: export vector_used_by_percpu_irq
x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and()
sched: nominate preferred wakeup cpu, fix
x86: fix lguest used_vectors breakage, -v2
x86: fix warning in arch/x86/kernel/io_apic.c
sched: fix warning in kernel/sched.c
sched: move test_sd_parent() to an SMP section of sched.h
sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0
sched: activate active load balancing in new idle cpus
sched: bias task wakeups to preferred semi-idle packages
sched: nominate preferred wakeup cpu
sched: favour lower logical cpu number for sched_mc balance
sched: framework for sched_mc/smt_power_savings=N
sched: convert BALANCE_FOR_xx_POWER to inline functions
x86: use possible_cpus=NUM to extend the possible cpus allowed
x86: fix cpu_mask_to_apicid_and to include cpu_online_mask
x86: update io_apic.c to the new cpumask code
x86: Introduce topology_core_cpumask()/topology_thread_cpumask()
x86: xen: use smp_call_function_many()
x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c
...
Fixed up trivial conflict in kernel/time/tick-sched.c manually
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/topology.h | 14 | ||||
-rw-r--r-- | include/asm-m32r/smp.h | 2 | ||||
-rw-r--r-- | include/linux/clockchips.h | 4 | ||||
-rw-r--r-- | include/linux/cpumask.h | 98 | ||||
-rw-r--r-- | include/linux/interrupt.h | 4 | ||||
-rw-r--r-- | include/linux/irq.h | 3 | ||||
-rw-r--r-- | include/linux/sched.h | 92 | ||||
-rw-r--r-- | include/linux/topology.h | 6 |
8 files changed, 159 insertions, 64 deletions
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 54bbf6e04ee8..0e9e2bc0ee96 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h | |||
@@ -40,6 +40,9 @@ | |||
40 | #ifndef node_to_cpumask | 40 | #ifndef node_to_cpumask |
41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) | 41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) |
42 | #endif | 42 | #endif |
43 | #ifndef cpumask_of_node | ||
44 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
45 | #endif | ||
43 | #ifndef node_to_first_cpu | 46 | #ifndef node_to_first_cpu |
44 | #define node_to_first_cpu(node) ((void)(node),0) | 47 | #define node_to_first_cpu(node) ((void)(node),0) |
45 | #endif | 48 | #endif |
@@ -54,9 +57,18 @@ | |||
54 | ) | 57 | ) |
55 | #endif | 58 | #endif |
56 | 59 | ||
60 | #ifndef cpumask_of_pcibus | ||
61 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
62 | cpu_all_mask : \ | ||
63 | cpumask_of_node(pcibus_to_node(bus))) | ||
64 | #endif | ||
65 | |||
57 | #endif /* CONFIG_NUMA */ | 66 | #endif /* CONFIG_NUMA */ |
58 | 67 | ||
59 | /* returns pointer to cpumask for specified node */ | 68 | /* |
69 | * returns pointer to cpumask for specified node | ||
70 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
71 | */ | ||
60 | #ifndef node_to_cpumask_ptr | 72 | #ifndef node_to_cpumask_ptr |
61 | 73 | ||
62 | #define node_to_cpumask_ptr(v, node) \ | 74 | #define node_to_cpumask_ptr(v, node) \ |
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index c5dd66916692..b96a6d2ffbc3 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h | |||
@@ -63,8 +63,6 @@ extern volatile int cpu_2_physid[NR_CPUS]; | |||
63 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 63 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
64 | 64 | ||
65 | extern cpumask_t cpu_callout_map; | 65 | extern cpumask_t cpu_callout_map; |
66 | extern cpumask_t cpu_possible_map; | ||
67 | extern cpumask_t cpu_present_map; | ||
68 | 66 | ||
69 | static __inline__ int hard_smp_processor_id(void) | 67 | static __inline__ int hard_smp_processor_id(void) |
70 | { | 68 | { |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index ed3a5d473e52..cea153697ec7 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -82,13 +82,13 @@ struct clock_event_device { | |||
82 | int shift; | 82 | int shift; |
83 | int rating; | 83 | int rating; |
84 | int irq; | 84 | int irq; |
85 | cpumask_t cpumask; | 85 | const struct cpumask *cpumask; |
86 | int (*set_next_event)(unsigned long evt, | 86 | int (*set_next_event)(unsigned long evt, |
87 | struct clock_event_device *); | 87 | struct clock_event_device *); |
88 | void (*set_mode)(enum clock_event_mode mode, | 88 | void (*set_mode)(enum clock_event_mode mode, |
89 | struct clock_event_device *); | 89 | struct clock_event_device *); |
90 | void (*event_handler)(struct clock_event_device *); | 90 | void (*event_handler)(struct clock_event_device *); |
91 | void (*broadcast)(cpumask_t mask); | 91 | void (*broadcast)(const struct cpumask *mask); |
92 | struct list_head list; | 92 | struct list_head list; |
93 | enum clock_event_mode mode; | 93 | enum clock_event_mode mode; |
94 | ktime_t next_event; | 94 | ktime_t next_event; |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21e1dd43e52a..d4bf52603e6b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -339,36 +339,6 @@ extern cpumask_t cpu_mask_all; | |||
339 | #endif | 339 | #endif |
340 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) | 340 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) |
341 | 341 | ||
342 | #define cpumask_scnprintf(buf, len, src) \ | ||
343 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) | ||
344 | static inline int __cpumask_scnprintf(char *buf, int len, | ||
345 | const cpumask_t *srcp, int nbits) | ||
346 | { | ||
347 | return bitmap_scnprintf(buf, len, srcp->bits, nbits); | ||
348 | } | ||
349 | |||
350 | #define cpumask_parse_user(ubuf, ulen, dst) \ | ||
351 | __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) | ||
352 | static inline int __cpumask_parse_user(const char __user *buf, int len, | ||
353 | cpumask_t *dstp, int nbits) | ||
354 | { | ||
355 | return bitmap_parse_user(buf, len, dstp->bits, nbits); | ||
356 | } | ||
357 | |||
358 | #define cpulist_scnprintf(buf, len, src) \ | ||
359 | __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) | ||
360 | static inline int __cpulist_scnprintf(char *buf, int len, | ||
361 | const cpumask_t *srcp, int nbits) | ||
362 | { | ||
363 | return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); | ||
364 | } | ||
365 | |||
366 | #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) | ||
367 | static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) | ||
368 | { | ||
369 | return bitmap_parselist(buf, dstp->bits, nbits); | ||
370 | } | ||
371 | |||
372 | #define cpu_remap(oldbit, old, new) \ | 342 | #define cpu_remap(oldbit, old, new) \ |
373 | __cpu_remap((oldbit), &(old), &(new), NR_CPUS) | 343 | __cpu_remap((oldbit), &(old), &(new), NR_CPUS) |
374 | static inline int __cpu_remap(int oldbit, | 344 | static inline int __cpu_remap(int oldbit, |
@@ -540,9 +510,6 @@ extern cpumask_t cpu_active_map; | |||
540 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 510 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
541 | } | 511 | } |
542 | 512 | ||
543 | /* This produces more efficient code. */ | ||
544 | #define nr_cpumask_bits NR_CPUS | ||
545 | |||
546 | #else /* NR_CPUS > BITS_PER_LONG */ | 513 | #else /* NR_CPUS > BITS_PER_LONG */ |
547 | 514 | ||
548 | #define CPU_BITS_ALL \ | 515 | #define CPU_BITS_ALL \ |
@@ -550,9 +517,15 @@ extern cpumask_t cpu_active_map; | |||
550 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ | 517 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
551 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 518 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
552 | } | 519 | } |
520 | #endif /* NR_CPUS > BITS_PER_LONG */ | ||
553 | 521 | ||
522 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
523 | /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, | ||
524 | * not all bits may be allocated. */ | ||
554 | #define nr_cpumask_bits nr_cpu_ids | 525 | #define nr_cpumask_bits nr_cpu_ids |
555 | #endif /* NR_CPUS > BITS_PER_LONG */ | 526 | #else |
527 | #define nr_cpumask_bits NR_CPUS | ||
528 | #endif | ||
556 | 529 | ||
557 | /* verify cpu argument to cpumask_* operators */ | 530 | /* verify cpu argument to cpumask_* operators */ |
558 | static inline unsigned int cpumask_check(unsigned int cpu) | 531 | static inline unsigned int cpumask_check(unsigned int cpu) |
@@ -946,6 +919,63 @@ static inline void cpumask_copy(struct cpumask *dstp, | |||
946 | #define cpumask_of(cpu) (get_cpu_mask(cpu)) | 919 | #define cpumask_of(cpu) (get_cpu_mask(cpu)) |
947 | 920 | ||
948 | /** | 921 | /** |
922 | * cpumask_scnprintf - print a cpumask into a string as comma-separated hex | ||
923 | * @buf: the buffer to sprintf into | ||
924 | * @len: the length of the buffer | ||
925 | * @srcp: the cpumask to print | ||
926 | * | ||
927 | * If len is zero, returns zero. Otherwise returns the length of the | ||
928 | * (nul-terminated) @buf string. | ||
929 | */ | ||
930 | static inline int cpumask_scnprintf(char *buf, int len, | ||
931 | const struct cpumask *srcp) | ||
932 | { | ||
933 | return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); | ||
934 | } | ||
935 | |||
936 | /** | ||
937 | * cpumask_parse_user - extract a cpumask from a user string | ||
938 | * @buf: the buffer to extract from | ||
939 | * @len: the length of the buffer | ||
940 | * @dstp: the cpumask to set. | ||
941 | * | ||
942 | * Returns -errno, or 0 for success. | ||
943 | */ | ||
944 | static inline int cpumask_parse_user(const char __user *buf, int len, | ||
945 | struct cpumask *dstp) | ||
946 | { | ||
947 | return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * cpulist_scnprintf - print a cpumask into a string as comma-separated list | ||
952 | * @buf: the buffer to sprintf into | ||
953 | * @len: the length of the buffer | ||
954 | * @srcp: the cpumask to print | ||
955 | * | ||
956 | * If len is zero, returns zero. Otherwise returns the length of the | ||
957 | * (nul-terminated) @buf string. | ||
958 | */ | ||
959 | static inline int cpulist_scnprintf(char *buf, int len, | ||
960 | const struct cpumask *srcp) | ||
961 | { | ||
962 | return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); | ||
963 | } | ||
964 | |||
965 | /** | ||
966 | * cpulist_parse_user - extract a cpumask from a user string of ranges | ||
967 | * @buf: the buffer to extract from | ||
968 | * @len: the length of the buffer | ||
969 | * @dstp: the cpumask to set. | ||
970 | * | ||
971 | * Returns -errno, or 0 for success. | ||
972 | */ | ||
973 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | ||
974 | { | ||
975 | return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); | ||
976 | } | ||
977 | |||
978 | /** | ||
949 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | 979 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * |
950 | * @bitmap: the bitmap | 980 | * @bitmap: the bitmap |
951 | * | 981 | * |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8cc8ef47f5b6..990355fbc54e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -111,13 +111,13 @@ extern void enable_irq(unsigned int irq); | |||
111 | 111 | ||
112 | extern cpumask_t irq_default_affinity; | 112 | extern cpumask_t irq_default_affinity; |
113 | 113 | ||
114 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | 114 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
115 | extern int irq_can_set_affinity(unsigned int irq); | 115 | extern int irq_can_set_affinity(unsigned int irq); |
116 | extern int irq_select_affinity(unsigned int irq); | 116 | extern int irq_select_affinity(unsigned int irq); |
117 | 117 | ||
118 | #else /* CONFIG_SMP */ | 118 | #else /* CONFIG_SMP */ |
119 | 119 | ||
120 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 120 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
121 | { | 121 | { |
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | } | 123 | } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index d64a6d49bdef..f899b502f186 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -113,7 +113,8 @@ struct irq_chip { | |||
113 | void (*eoi)(unsigned int irq); | 113 | void (*eoi)(unsigned int irq); |
114 | 114 | ||
115 | void (*end)(unsigned int irq); | 115 | void (*end)(unsigned int irq); |
116 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | 116 | void (*set_affinity)(unsigned int irq, |
117 | const struct cpumask *dest); | ||
117 | int (*retrigger)(unsigned int irq); | 118 | int (*retrigger)(unsigned int irq); |
118 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 119 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
119 | int (*set_wake)(unsigned int irq, unsigned int on); | 120 | int (*set_wake)(unsigned int irq, unsigned int on); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8395e715809d..158d53d07765 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
250 | extern int runqueue_is_locked(void); | 250 | extern int runqueue_is_locked(void); |
251 | extern void task_rq_unlock_wait(struct task_struct *p); | 251 | extern void task_rq_unlock_wait(struct task_struct *p); |
252 | 252 | ||
253 | extern cpumask_t nohz_cpu_mask; | 253 | extern cpumask_var_t nohz_cpu_mask; |
254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
255 | extern int select_nohz_load_balancer(int cpu); | 255 | extern int select_nohz_load_balancer(int cpu); |
256 | #else | 256 | #else |
@@ -758,20 +758,51 @@ enum cpu_idle_type { | |||
758 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 758 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
759 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | 759 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ |
760 | 760 | ||
761 | #define BALANCE_FOR_MC_POWER \ | 761 | enum powersavings_balance_level { |
762 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 762 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
763 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
764 | * first for long running threads | ||
765 | */ | ||
766 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
767 | * cpu package for power savings | ||
768 | */ | ||
769 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
770 | }; | ||
763 | 771 | ||
764 | #define BALANCE_FOR_PKG_POWER \ | 772 | extern int sched_mc_power_savings, sched_smt_power_savings; |
765 | ((sched_mc_power_savings || sched_smt_power_savings) ? \ | ||
766 | SD_POWERSAVINGS_BALANCE : 0) | ||
767 | 773 | ||
768 | #define test_sd_parent(sd, flag) ((sd->parent && \ | 774 | static inline int sd_balance_for_mc_power(void) |
769 | (sd->parent->flags & flag)) ? 1 : 0) | 775 | { |
776 | if (sched_smt_power_savings) | ||
777 | return SD_POWERSAVINGS_BALANCE; | ||
770 | 778 | ||
779 | return 0; | ||
780 | } | ||
781 | |||
782 | static inline int sd_balance_for_package_power(void) | ||
783 | { | ||
784 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
785 | return SD_POWERSAVINGS_BALANCE; | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * Optimise SD flags for power savings: | ||
792 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | ||
793 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
794 | */ | ||
795 | |||
796 | static inline int sd_power_saving_flags(void) | ||
797 | { | ||
798 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
799 | return SD_BALANCE_NEWIDLE; | ||
800 | |||
801 | return 0; | ||
802 | } | ||
771 | 803 | ||
772 | struct sched_group { | 804 | struct sched_group { |
773 | struct sched_group *next; /* Must be a circular list */ | 805 | struct sched_group *next; /* Must be a circular list */ |
774 | cpumask_t cpumask; | ||
775 | 806 | ||
776 | /* | 807 | /* |
777 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 808 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -784,8 +815,15 @@ struct sched_group { | |||
784 | * (see include/linux/reciprocal_div.h) | 815 | * (see include/linux/reciprocal_div.h) |
785 | */ | 816 | */ |
786 | u32 reciprocal_cpu_power; | 817 | u32 reciprocal_cpu_power; |
818 | |||
819 | unsigned long cpumask[]; | ||
787 | }; | 820 | }; |
788 | 821 | ||
822 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
823 | { | ||
824 | return to_cpumask(sg->cpumask); | ||
825 | } | ||
826 | |||
789 | enum sched_domain_level { | 827 | enum sched_domain_level { |
790 | SD_LV_NONE = 0, | 828 | SD_LV_NONE = 0, |
791 | SD_LV_SIBLING, | 829 | SD_LV_SIBLING, |
@@ -809,7 +847,6 @@ struct sched_domain { | |||
809 | struct sched_domain *parent; /* top domain must be null terminated */ | 847 | struct sched_domain *parent; /* top domain must be null terminated */ |
810 | struct sched_domain *child; /* bottom domain must be null terminated */ | 848 | struct sched_domain *child; /* bottom domain must be null terminated */ |
811 | struct sched_group *groups; /* the balancing groups of the domain */ | 849 | struct sched_group *groups; /* the balancing groups of the domain */ |
812 | cpumask_t span; /* span of all CPUs in this domain */ | ||
813 | unsigned long min_interval; /* Minimum balance interval ms */ | 850 | unsigned long min_interval; /* Minimum balance interval ms */ |
814 | unsigned long max_interval; /* Maximum balance interval ms */ | 851 | unsigned long max_interval; /* Maximum balance interval ms */ |
815 | unsigned int busy_factor; /* less balancing by factor if busy */ | 852 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -864,18 +901,35 @@ struct sched_domain { | |||
864 | #ifdef CONFIG_SCHED_DEBUG | 901 | #ifdef CONFIG_SCHED_DEBUG |
865 | char *name; | 902 | char *name; |
866 | #endif | 903 | #endif |
904 | |||
905 | /* span of all CPUs in this domain */ | ||
906 | unsigned long span[]; | ||
867 | }; | 907 | }; |
868 | 908 | ||
869 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 909 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
910 | { | ||
911 | return to_cpumask(sd->span); | ||
912 | } | ||
913 | |||
914 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
870 | struct sched_domain_attr *dattr_new); | 915 | struct sched_domain_attr *dattr_new); |
871 | extern int arch_reinit_sched_domains(void); | 916 | extern int arch_reinit_sched_domains(void); |
872 | 917 | ||
918 | /* Test a flag in parent sched domain */ | ||
919 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
920 | { | ||
921 | if (sd->parent && (sd->parent->flags & flag)) | ||
922 | return 1; | ||
923 | |||
924 | return 0; | ||
925 | } | ||
926 | |||
873 | #else /* CONFIG_SMP */ | 927 | #else /* CONFIG_SMP */ |
874 | 928 | ||
875 | struct sched_domain_attr; | 929 | struct sched_domain_attr; |
876 | 930 | ||
877 | static inline void | 931 | static inline void |
878 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 932 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, |
879 | struct sched_domain_attr *dattr_new) | 933 | struct sched_domain_attr *dattr_new) |
880 | { | 934 | { |
881 | } | 935 | } |
@@ -926,7 +980,7 @@ struct sched_class { | |||
926 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 980 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
927 | 981 | ||
928 | void (*set_cpus_allowed)(struct task_struct *p, | 982 | void (*set_cpus_allowed)(struct task_struct *p, |
929 | const cpumask_t *newmask); | 983 | const struct cpumask *newmask); |
930 | 984 | ||
931 | void (*rq_online)(struct rq *rq); | 985 | void (*rq_online)(struct rq *rq); |
932 | void (*rq_offline)(struct rq *rq); | 986 | void (*rq_offline)(struct rq *rq); |
@@ -1579,12 +1633,12 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1579 | 1633 | ||
1580 | #ifdef CONFIG_SMP | 1634 | #ifdef CONFIG_SMP |
1581 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1635 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1582 | const cpumask_t *new_mask); | 1636 | const struct cpumask *new_mask); |
1583 | #else | 1637 | #else |
1584 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1638 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1585 | const cpumask_t *new_mask) | 1639 | const struct cpumask *new_mask) |
1586 | { | 1640 | { |
1587 | if (!cpu_isset(0, *new_mask)) | 1641 | if (!cpumask_test_cpu(0, new_mask)) |
1588 | return -EINVAL; | 1642 | return -EINVAL; |
1589 | return 0; | 1643 | return 0; |
1590 | } | 1644 | } |
@@ -2195,10 +2249,8 @@ __trace_special(void *__tr, void *__data, | |||
2195 | } | 2249 | } |
2196 | #endif | 2250 | #endif |
2197 | 2251 | ||
2198 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2252 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2199 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2253 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2200 | |||
2201 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
2202 | 2254 | ||
2203 | extern void normalize_rt_tasks(void); | 2255 | extern void normalize_rt_tasks(void); |
2204 | 2256 | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index 0c5b5ac36d8e..e632d29f0544 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -125,7 +125,8 @@ int arch_update_cpu_topology(void); | |||
125 | | SD_WAKE_AFFINE \ | 125 | | SD_WAKE_AFFINE \ |
126 | | SD_WAKE_BALANCE \ | 126 | | SD_WAKE_BALANCE \ |
127 | | SD_SHARE_PKG_RESOURCES\ | 127 | | SD_SHARE_PKG_RESOURCES\ |
128 | | BALANCE_FOR_MC_POWER, \ | 128 | | sd_balance_for_mc_power()\ |
129 | | sd_power_saving_flags(),\ | ||
129 | .last_balance = jiffies, \ | 130 | .last_balance = jiffies, \ |
130 | .balance_interval = 1, \ | 131 | .balance_interval = 1, \ |
131 | } | 132 | } |
@@ -150,7 +151,8 @@ int arch_update_cpu_topology(void); | |||
150 | | SD_BALANCE_FORK \ | 151 | | SD_BALANCE_FORK \ |
151 | | SD_WAKE_AFFINE \ | 152 | | SD_WAKE_AFFINE \ |
152 | | SD_WAKE_BALANCE \ | 153 | | SD_WAKE_BALANCE \ |
153 | | BALANCE_FOR_PKG_POWER,\ | 154 | | sd_balance_for_package_power()\ |
155 | | sd_power_saving_flags(),\ | ||
154 | .last_balance = jiffies, \ | 156 | .last_balance = jiffies, \ |
155 | .balance_interval = 1, \ | 157 | .balance_interval = 1, \ |
156 | } | 158 | } |