diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/bitmap.h | 1 | ||||
-rw-r--r-- | include/linux/cpumask.h | 25 | ||||
-rw-r--r-- | include/linux/cpuset.h | 13 | ||||
-rw-r--r-- | include/linux/init_task.h | 3 | ||||
-rw-r--r-- | include/linux/ktime.h | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 56 | ||||
-rw-r--r-- | include/linux/sysdev.h | 17 | ||||
-rw-r--r-- | include/linux/topology.h | 46 |
8 files changed, 106 insertions, 61 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index acad1105d942..1dbe074f1c64 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -108,6 +108,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); | |||
108 | 108 | ||
109 | extern int bitmap_scnprintf(char *buf, unsigned int len, | 109 | extern int bitmap_scnprintf(char *buf, unsigned int len, |
110 | const unsigned long *src, int nbits); | 110 | const unsigned long *src, int nbits); |
111 | extern int bitmap_scnprintf_len(unsigned int len); | ||
111 | extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, | 112 | extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
112 | unsigned long *dst, int nbits); | 113 | unsigned long *dst, int nbits); |
113 | extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, | 114 | extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 7047f58306a7..259c8051155d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -222,8 +222,13 @@ int __next_cpu(int n, const cpumask_t *srcp); | |||
222 | #define next_cpu(n, src) ({ (void)(src); 1; }) | 222 | #define next_cpu(n, src) ({ (void)(src); 1; }) |
223 | #endif | 223 | #endif |
224 | 224 | ||
225 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | ||
226 | extern cpumask_t *cpumask_of_cpu_map; | ||
227 | #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) | ||
228 | |||
229 | #else | ||
225 | #define cpumask_of_cpu(cpu) \ | 230 | #define cpumask_of_cpu(cpu) \ |
226 | ({ \ | 231 | (*({ \ |
227 | typeof(_unused_cpumask_arg_) m; \ | 232 | typeof(_unused_cpumask_arg_) m; \ |
228 | if (sizeof(m) == sizeof(unsigned long)) { \ | 233 | if (sizeof(m) == sizeof(unsigned long)) { \ |
229 | m.bits[0] = 1UL<<(cpu); \ | 234 | m.bits[0] = 1UL<<(cpu); \ |
@@ -231,8 +236,9 @@ int __next_cpu(int n, const cpumask_t *srcp); | |||
231 | cpus_clear(m); \ | 236 | cpus_clear(m); \ |
232 | cpu_set((cpu), m); \ | 237 | cpu_set((cpu), m); \ |
233 | } \ | 238 | } \ |
234 | m; \ | 239 | &m; \ |
235 | }) | 240 | })) |
241 | #endif | ||
236 | 242 | ||
237 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) | 243 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) |
238 | 244 | ||
@@ -243,6 +249,8 @@ int __next_cpu(int n, const cpumask_t *srcp); | |||
243 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 249 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
244 | } } | 250 | } } |
245 | 251 | ||
252 | #define CPU_MASK_ALL_PTR (&CPU_MASK_ALL) | ||
253 | |||
246 | #else | 254 | #else |
247 | 255 | ||
248 | #define CPU_MASK_ALL \ | 256 | #define CPU_MASK_ALL \ |
@@ -251,6 +259,10 @@ int __next_cpu(int n, const cpumask_t *srcp); | |||
251 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 259 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
252 | } } | 260 | } } |
253 | 261 | ||
262 | /* cpu_mask_all is in init/main.c */ | ||
263 | extern cpumask_t cpu_mask_all; | ||
264 | #define CPU_MASK_ALL_PTR (&cpu_mask_all) | ||
265 | |||
254 | #endif | 266 | #endif |
255 | 267 | ||
256 | #define CPU_MASK_NONE \ | 268 | #define CPU_MASK_NONE \ |
@@ -273,6 +285,13 @@ static inline int __cpumask_scnprintf(char *buf, int len, | |||
273 | return bitmap_scnprintf(buf, len, srcp->bits, nbits); | 285 | return bitmap_scnprintf(buf, len, srcp->bits, nbits); |
274 | } | 286 | } |
275 | 287 | ||
288 | #define cpumask_scnprintf_len(len) \ | ||
289 | __cpumask_scnprintf_len((len)) | ||
290 | static inline int __cpumask_scnprintf_len(int len) | ||
291 | { | ||
292 | return bitmap_scnprintf_len(len); | ||
293 | } | ||
294 | |||
276 | #define cpumask_parse_user(ubuf, ulen, dst) \ | 295 | #define cpumask_parse_user(ubuf, ulen, dst) \ |
277 | __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) | 296 | __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) |
278 | static inline int __cpumask_parse_user(const char __user *buf, int len, | 297 | static inline int __cpumask_parse_user(const char __user *buf, int len, |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 0a26be353cb3..726761e24003 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -20,8 +20,8 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
20 | extern int cpuset_init_early(void); | 20 | extern int cpuset_init_early(void); |
21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
23 | extern cpumask_t cpuset_cpus_allowed(struct task_struct *p); | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); |
24 | extern cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p); | 24 | extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); |
25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
26 | #define cpuset_current_mems_allowed (current->mems_allowed) | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) |
27 | void cpuset_init_current_mems_allowed(void); | 27 | void cpuset_init_current_mems_allowed(void); |
@@ -84,13 +84,14 @@ static inline int cpuset_init_early(void) { return 0; } | |||
84 | static inline int cpuset_init(void) { return 0; } | 84 | static inline int cpuset_init(void) { return 0; } |
85 | static inline void cpuset_init_smp(void) {} | 85 | static inline void cpuset_init_smp(void) {} |
86 | 86 | ||
87 | static inline cpumask_t cpuset_cpus_allowed(struct task_struct *p) | 87 | static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) |
88 | { | 88 | { |
89 | return cpu_possible_map; | 89 | *mask = cpu_possible_map; |
90 | } | 90 | } |
91 | static inline cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p) | 91 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, |
92 | cpumask_t *mask) | ||
92 | { | 93 | { |
93 | return cpu_possible_map; | 94 | *mask = cpu_possible_map; |
94 | } | 95 | } |
95 | 96 | ||
96 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 97 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1f74e1d7415f..37a6f5bc4a92 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -151,6 +151,9 @@ extern struct group_info init_groups; | |||
151 | .cpus_allowed = CPU_MASK_ALL, \ | 151 | .cpus_allowed = CPU_MASK_ALL, \ |
152 | .mm = NULL, \ | 152 | .mm = NULL, \ |
153 | .active_mm = &init_mm, \ | 153 | .active_mm = &init_mm, \ |
154 | .se = { \ | ||
155 | .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ | ||
156 | }, \ | ||
154 | .rt = { \ | 157 | .rt = { \ |
155 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ | 158 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ |
156 | .time_slice = HZ, \ | 159 | .time_slice = HZ, \ |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 2cd7fa73d1af..ce5983225be4 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
@@ -327,4 +327,10 @@ extern void ktime_get_ts(struct timespec *ts); | |||
327 | /* Get the real (wall-) time in timespec format: */ | 327 | /* Get the real (wall-) time in timespec format: */ |
328 | #define ktime_get_real_ts(ts) getnstimeofday(ts) | 328 | #define ktime_get_real_ts(ts) getnstimeofday(ts) |
329 | 329 | ||
330 | static inline ktime_t ns_to_ktime(u64 ns) | ||
331 | { | ||
332 | static const ktime_t ktime_zero = { .tv64 = 0 }; | ||
333 | return ktime_add_ns(ktime_zero, ns); | ||
334 | } | ||
335 | |||
330 | #endif | 336 | #endif |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6a1e7afb099b..be6914014c70 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -704,6 +704,7 @@ enum cpu_idle_type { | |||
704 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | 704 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ |
705 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ | 705 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ |
706 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 706 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
707 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | ||
707 | 708 | ||
708 | #define BALANCE_FOR_MC_POWER \ | 709 | #define BALANCE_FOR_MC_POWER \ |
709 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 710 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) |
@@ -733,12 +734,31 @@ struct sched_group { | |||
733 | u32 reciprocal_cpu_power; | 734 | u32 reciprocal_cpu_power; |
734 | }; | 735 | }; |
735 | 736 | ||
737 | enum sched_domain_level { | ||
738 | SD_LV_NONE = 0, | ||
739 | SD_LV_SIBLING, | ||
740 | SD_LV_MC, | ||
741 | SD_LV_CPU, | ||
742 | SD_LV_NODE, | ||
743 | SD_LV_ALLNODES, | ||
744 | SD_LV_MAX | ||
745 | }; | ||
746 | |||
747 | struct sched_domain_attr { | ||
748 | int relax_domain_level; | ||
749 | }; | ||
750 | |||
751 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ | ||
752 | .relax_domain_level = -1, \ | ||
753 | } | ||
754 | |||
736 | struct sched_domain { | 755 | struct sched_domain { |
737 | /* These fields must be setup */ | 756 | /* These fields must be setup */ |
738 | struct sched_domain *parent; /* top domain must be null terminated */ | 757 | struct sched_domain *parent; /* top domain must be null terminated */ |
739 | struct sched_domain *child; /* bottom domain must be null terminated */ | 758 | struct sched_domain *child; /* bottom domain must be null terminated */ |
740 | struct sched_group *groups; /* the balancing groups of the domain */ | 759 | struct sched_group *groups; /* the balancing groups of the domain */ |
741 | cpumask_t span; /* span of all CPUs in this domain */ | 760 | cpumask_t span; /* span of all CPUs in this domain */ |
761 | int first_cpu; /* cache of the first cpu in this domain */ | ||
742 | unsigned long min_interval; /* Minimum balance interval ms */ | 762 | unsigned long min_interval; /* Minimum balance interval ms */ |
743 | unsigned long max_interval; /* Maximum balance interval ms */ | 763 | unsigned long max_interval; /* Maximum balance interval ms */ |
744 | unsigned int busy_factor; /* less balancing by factor if busy */ | 764 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -750,6 +770,7 @@ struct sched_domain { | |||
750 | unsigned int wake_idx; | 770 | unsigned int wake_idx; |
751 | unsigned int forkexec_idx; | 771 | unsigned int forkexec_idx; |
752 | int flags; /* See SD_* */ | 772 | int flags; /* See SD_* */ |
773 | enum sched_domain_level level; | ||
753 | 774 | ||
754 | /* Runtime fields. */ | 775 | /* Runtime fields. */ |
755 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 776 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -789,7 +810,8 @@ struct sched_domain { | |||
789 | #endif | 810 | #endif |
790 | }; | 811 | }; |
791 | 812 | ||
792 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); | 813 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
814 | struct sched_domain_attr *dattr_new); | ||
793 | extern int arch_reinit_sched_domains(void); | 815 | extern int arch_reinit_sched_domains(void); |
794 | 816 | ||
795 | #endif /* CONFIG_SMP */ | 817 | #endif /* CONFIG_SMP */ |
@@ -889,7 +911,8 @@ struct sched_class { | |||
889 | void (*set_curr_task) (struct rq *rq); | 911 | void (*set_curr_task) (struct rq *rq); |
890 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 912 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
891 | void (*task_new) (struct rq *rq, struct task_struct *p); | 913 | void (*task_new) (struct rq *rq, struct task_struct *p); |
892 | void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); | 914 | void (*set_cpus_allowed)(struct task_struct *p, |
915 | const cpumask_t *newmask); | ||
893 | 916 | ||
894 | void (*join_domain)(struct rq *rq); | 917 | void (*join_domain)(struct rq *rq); |
895 | void (*leave_domain)(struct rq *rq); | 918 | void (*leave_domain)(struct rq *rq); |
@@ -923,6 +946,7 @@ struct load_weight { | |||
923 | struct sched_entity { | 946 | struct sched_entity { |
924 | struct load_weight load; /* for load-balancing */ | 947 | struct load_weight load; /* for load-balancing */ |
925 | struct rb_node run_node; | 948 | struct rb_node run_node; |
949 | struct list_head group_node; | ||
926 | unsigned int on_rq; | 950 | unsigned int on_rq; |
927 | 951 | ||
928 | u64 exec_start; | 952 | u64 exec_start; |
@@ -982,6 +1006,7 @@ struct sched_rt_entity { | |||
982 | unsigned long timeout; | 1006 | unsigned long timeout; |
983 | int nr_cpus_allowed; | 1007 | int nr_cpus_allowed; |
984 | 1008 | ||
1009 | struct sched_rt_entity *back; | ||
985 | #ifdef CONFIG_RT_GROUP_SCHED | 1010 | #ifdef CONFIG_RT_GROUP_SCHED |
986 | struct sched_rt_entity *parent; | 1011 | struct sched_rt_entity *parent; |
987 | /* rq on which this entity is (to be) queued: */ | 1012 | /* rq on which this entity is (to be) queued: */ |
@@ -1502,15 +1527,21 @@ static inline void put_task_struct(struct task_struct *t) | |||
1502 | #define used_math() tsk_used_math(current) | 1527 | #define used_math() tsk_used_math(current) |
1503 | 1528 | ||
1504 | #ifdef CONFIG_SMP | 1529 | #ifdef CONFIG_SMP |
1505 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); | 1530 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1531 | const cpumask_t *new_mask); | ||
1506 | #else | 1532 | #else |
1507 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 1533 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1534 | const cpumask_t *new_mask) | ||
1508 | { | 1535 | { |
1509 | if (!cpu_isset(0, new_mask)) | 1536 | if (!cpu_isset(0, *new_mask)) |
1510 | return -EINVAL; | 1537 | return -EINVAL; |
1511 | return 0; | 1538 | return 0; |
1512 | } | 1539 | } |
1513 | #endif | 1540 | #endif |
1541 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | ||
1542 | { | ||
1543 | return set_cpus_allowed_ptr(p, &new_mask); | ||
1544 | } | ||
1514 | 1545 | ||
1515 | extern unsigned long long sched_clock(void); | 1546 | extern unsigned long long sched_clock(void); |
1516 | 1547 | ||
@@ -1551,7 +1582,6 @@ static inline void wake_up_idle_cpu(int cpu) { } | |||
1551 | extern unsigned int sysctl_sched_latency; | 1582 | extern unsigned int sysctl_sched_latency; |
1552 | extern unsigned int sysctl_sched_min_granularity; | 1583 | extern unsigned int sysctl_sched_min_granularity; |
1553 | extern unsigned int sysctl_sched_wakeup_granularity; | 1584 | extern unsigned int sysctl_sched_wakeup_granularity; |
1554 | extern unsigned int sysctl_sched_batch_wakeup_granularity; | ||
1555 | extern unsigned int sysctl_sched_child_runs_first; | 1585 | extern unsigned int sysctl_sched_child_runs_first; |
1556 | extern unsigned int sysctl_sched_features; | 1586 | extern unsigned int sysctl_sched_features; |
1557 | extern unsigned int sysctl_sched_migration_cost; | 1587 | extern unsigned int sysctl_sched_migration_cost; |
@@ -1564,6 +1594,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, | |||
1564 | extern unsigned int sysctl_sched_rt_period; | 1594 | extern unsigned int sysctl_sched_rt_period; |
1565 | extern int sysctl_sched_rt_runtime; | 1595 | extern int sysctl_sched_rt_runtime; |
1566 | 1596 | ||
1597 | int sched_rt_handler(struct ctl_table *table, int write, | ||
1598 | struct file *filp, void __user *buffer, size_t *lenp, | ||
1599 | loff_t *ppos); | ||
1600 | |||
1567 | extern unsigned int sysctl_sched_compat_yield; | 1601 | extern unsigned int sysctl_sched_compat_yield; |
1568 | 1602 | ||
1569 | #ifdef CONFIG_RT_MUTEXES | 1603 | #ifdef CONFIG_RT_MUTEXES |
@@ -2031,7 +2065,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
2031 | } | 2065 | } |
2032 | #endif | 2066 | #endif |
2033 | 2067 | ||
2034 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 2068 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); |
2035 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2069 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
2036 | 2070 | ||
2037 | extern int sched_mc_power_savings, sched_smt_power_savings; | 2071 | extern int sched_mc_power_savings, sched_smt_power_savings; |
@@ -2041,8 +2075,11 @@ extern void normalize_rt_tasks(void); | |||
2041 | #ifdef CONFIG_GROUP_SCHED | 2075 | #ifdef CONFIG_GROUP_SCHED |
2042 | 2076 | ||
2043 | extern struct task_group init_task_group; | 2077 | extern struct task_group init_task_group; |
2078 | #ifdef CONFIG_USER_SCHED | ||
2079 | extern struct task_group root_task_group; | ||
2080 | #endif | ||
2044 | 2081 | ||
2045 | extern struct task_group *sched_create_group(void); | 2082 | extern struct task_group *sched_create_group(struct task_group *parent); |
2046 | extern void sched_destroy_group(struct task_group *tg); | 2083 | extern void sched_destroy_group(struct task_group *tg); |
2047 | extern void sched_move_task(struct task_struct *tsk); | 2084 | extern void sched_move_task(struct task_struct *tsk); |
2048 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2085 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -2053,6 +2090,9 @@ extern unsigned long sched_group_shares(struct task_group *tg); | |||
2053 | extern int sched_group_set_rt_runtime(struct task_group *tg, | 2090 | extern int sched_group_set_rt_runtime(struct task_group *tg, |
2054 | long rt_runtime_us); | 2091 | long rt_runtime_us); |
2055 | extern long sched_group_rt_runtime(struct task_group *tg); | 2092 | extern long sched_group_rt_runtime(struct task_group *tg); |
2093 | extern int sched_group_set_rt_period(struct task_group *tg, | ||
2094 | long rt_period_us); | ||
2095 | extern long sched_group_rt_period(struct task_group *tg); | ||
2056 | #endif | 2096 | #endif |
2057 | #endif | 2097 | #endif |
2058 | 2098 | ||
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index f752e73bf977..f2767bc6b735 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h | |||
@@ -45,12 +45,16 @@ struct sysdev_class_attribute { | |||
45 | ssize_t (*store)(struct sysdev_class *, const char *, size_t); | 45 | ssize_t (*store)(struct sysdev_class *, const char *, size_t); |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) \ | 48 | #define _SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) \ |
49 | struct sysdev_class_attribute attr_##_name = { \ | 49 | { \ |
50 | .attr = {.name = __stringify(_name), .mode = _mode }, \ | 50 | .attr = {.name = __stringify(_name), .mode = _mode }, \ |
51 | .show = _show, \ | 51 | .show = _show, \ |
52 | .store = _store, \ | 52 | .store = _store, \ |
53 | }; | 53 | } |
54 | |||
55 | #define SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) \ | ||
56 | struct sysdev_class_attribute attr_##_name = \ | ||
57 | _SYSDEV_CLASS_ATTR(_name,_mode,_show,_store) | ||
54 | 58 | ||
55 | 59 | ||
56 | extern int sysdev_class_register(struct sysdev_class *); | 60 | extern int sysdev_class_register(struct sysdev_class *); |
@@ -100,15 +104,16 @@ struct sysdev_attribute { | |||
100 | }; | 104 | }; |
101 | 105 | ||
102 | 106 | ||
103 | #define _SYSDEV_ATTR(_name,_mode,_show,_store) \ | 107 | #define _SYSDEV_ATTR(_name, _mode, _show, _store) \ |
104 | { \ | 108 | { \ |
105 | .attr = { .name = __stringify(_name), .mode = _mode }, \ | 109 | .attr = { .name = __stringify(_name), .mode = _mode }, \ |
106 | .show = _show, \ | 110 | .show = _show, \ |
107 | .store = _store, \ | 111 | .store = _store, \ |
108 | } | 112 | } |
109 | 113 | ||
110 | #define SYSDEV_ATTR(_name,_mode,_show,_store) \ | 114 | #define SYSDEV_ATTR(_name, _mode, _show, _store) \ |
111 | struct sysdev_attribute attr_##_name = _SYSDEV_ATTR(_name,_mode,_show,_store); | 115 | struct sysdev_attribute attr_##_name = \ |
116 | _SYSDEV_ATTR(_name, _mode, _show, _store); | ||
112 | 117 | ||
113 | extern int sysdev_create_file(struct sys_device *, struct sysdev_attribute *); | 118 | extern int sysdev_create_file(struct sys_device *, struct sysdev_attribute *); |
114 | extern void sysdev_remove_file(struct sys_device *, struct sysdev_attribute *); | 119 | extern void sysdev_remove_file(struct sys_device *, struct sysdev_attribute *); |
diff --git a/include/linux/topology.h b/include/linux/topology.h index bd14f8b30f09..4bb7074a2c3a 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -38,16 +38,15 @@ | |||
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifndef nr_cpus_node | 40 | #ifndef nr_cpus_node |
41 | #define nr_cpus_node(node) \ | 41 | #define nr_cpus_node(node) \ |
42 | ({ \ | 42 | ({ \ |
43 | cpumask_t __tmp__; \ | 43 | node_to_cpumask_ptr(__tmp__, node); \ |
44 | __tmp__ = node_to_cpumask(node); \ | 44 | cpus_weight(*__tmp__); \ |
45 | cpus_weight(__tmp__); \ | ||
46 | }) | 45 | }) |
47 | #endif | 46 | #endif |
48 | 47 | ||
49 | #define for_each_node_with_cpus(node) \ | 48 | #define for_each_node_with_cpus(node) \ |
50 | for_each_online_node(node) \ | 49 | for_each_online_node(node) \ |
51 | if (nr_cpus_node(node)) | 50 | if (nr_cpus_node(node)) |
52 | 51 | ||
53 | void arch_update_cpu_topology(void); | 52 | void arch_update_cpu_topology(void); |
@@ -80,7 +79,9 @@ void arch_update_cpu_topology(void); | |||
80 | * by defining their own arch-specific initializer in include/asm/topology.h. | 79 | * by defining their own arch-specific initializer in include/asm/topology.h. |
81 | * A definition there will automagically override these default initializers | 80 | * A definition there will automagically override these default initializers |
82 | * and allow arch-specific performance tuning of sched_domains. | 81 | * and allow arch-specific performance tuning of sched_domains. |
82 | * (Only non-zero and non-null fields need be specified.) | ||
83 | */ | 83 | */ |
84 | |||
84 | #ifdef CONFIG_SCHED_SMT | 85 | #ifdef CONFIG_SCHED_SMT |
85 | /* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is, | 86 | /* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is, |
86 | * so can't we drop this in favor of CONFIG_SCHED_SMT? | 87 | * so can't we drop this in favor of CONFIG_SCHED_SMT? |
@@ -89,20 +90,10 @@ void arch_update_cpu_topology(void); | |||
89 | /* Common values for SMT siblings */ | 90 | /* Common values for SMT siblings */ |
90 | #ifndef SD_SIBLING_INIT | 91 | #ifndef SD_SIBLING_INIT |
91 | #define SD_SIBLING_INIT (struct sched_domain) { \ | 92 | #define SD_SIBLING_INIT (struct sched_domain) { \ |
92 | .span = CPU_MASK_NONE, \ | ||
93 | .parent = NULL, \ | ||
94 | .child = NULL, \ | ||
95 | .groups = NULL, \ | ||
96 | .min_interval = 1, \ | 93 | .min_interval = 1, \ |
97 | .max_interval = 2, \ | 94 | .max_interval = 2, \ |
98 | .busy_factor = 64, \ | 95 | .busy_factor = 64, \ |
99 | .imbalance_pct = 110, \ | 96 | .imbalance_pct = 110, \ |
100 | .cache_nice_tries = 0, \ | ||
101 | .busy_idx = 0, \ | ||
102 | .idle_idx = 0, \ | ||
103 | .newidle_idx = 0, \ | ||
104 | .wake_idx = 0, \ | ||
105 | .forkexec_idx = 0, \ | ||
106 | .flags = SD_LOAD_BALANCE \ | 97 | .flags = SD_LOAD_BALANCE \ |
107 | | SD_BALANCE_NEWIDLE \ | 98 | | SD_BALANCE_NEWIDLE \ |
108 | | SD_BALANCE_FORK \ | 99 | | SD_BALANCE_FORK \ |
@@ -112,7 +103,6 @@ void arch_update_cpu_topology(void); | |||
112 | | SD_SHARE_CPUPOWER, \ | 103 | | SD_SHARE_CPUPOWER, \ |
113 | .last_balance = jiffies, \ | 104 | .last_balance = jiffies, \ |
114 | .balance_interval = 1, \ | 105 | .balance_interval = 1, \ |
115 | .nr_balance_failed = 0, \ | ||
116 | } | 106 | } |
117 | #endif | 107 | #endif |
118 | #endif /* CONFIG_SCHED_SMT */ | 108 | #endif /* CONFIG_SCHED_SMT */ |
@@ -121,18 +111,12 @@ void arch_update_cpu_topology(void); | |||
121 | /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ | 111 | /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ |
122 | #ifndef SD_MC_INIT | 112 | #ifndef SD_MC_INIT |
123 | #define SD_MC_INIT (struct sched_domain) { \ | 113 | #define SD_MC_INIT (struct sched_domain) { \ |
124 | .span = CPU_MASK_NONE, \ | ||
125 | .parent = NULL, \ | ||
126 | .child = NULL, \ | ||
127 | .groups = NULL, \ | ||
128 | .min_interval = 1, \ | 114 | .min_interval = 1, \ |
129 | .max_interval = 4, \ | 115 | .max_interval = 4, \ |
130 | .busy_factor = 64, \ | 116 | .busy_factor = 64, \ |
131 | .imbalance_pct = 125, \ | 117 | .imbalance_pct = 125, \ |
132 | .cache_nice_tries = 1, \ | 118 | .cache_nice_tries = 1, \ |
133 | .busy_idx = 2, \ | 119 | .busy_idx = 2, \ |
134 | .idle_idx = 0, \ | ||
135 | .newidle_idx = 0, \ | ||
136 | .wake_idx = 1, \ | 120 | .wake_idx = 1, \ |
137 | .forkexec_idx = 1, \ | 121 | .forkexec_idx = 1, \ |
138 | .flags = SD_LOAD_BALANCE \ | 122 | .flags = SD_LOAD_BALANCE \ |
@@ -144,7 +128,6 @@ void arch_update_cpu_topology(void); | |||
144 | | BALANCE_FOR_MC_POWER, \ | 128 | | BALANCE_FOR_MC_POWER, \ |
145 | .last_balance = jiffies, \ | 129 | .last_balance = jiffies, \ |
146 | .balance_interval = 1, \ | 130 | .balance_interval = 1, \ |
147 | .nr_balance_failed = 0, \ | ||
148 | } | 131 | } |
149 | #endif | 132 | #endif |
150 | #endif /* CONFIG_SCHED_MC */ | 133 | #endif /* CONFIG_SCHED_MC */ |
@@ -152,10 +135,6 @@ void arch_update_cpu_topology(void); | |||
152 | /* Common values for CPUs */ | 135 | /* Common values for CPUs */ |
153 | #ifndef SD_CPU_INIT | 136 | #ifndef SD_CPU_INIT |
154 | #define SD_CPU_INIT (struct sched_domain) { \ | 137 | #define SD_CPU_INIT (struct sched_domain) { \ |
155 | .span = CPU_MASK_NONE, \ | ||
156 | .parent = NULL, \ | ||
157 | .child = NULL, \ | ||
158 | .groups = NULL, \ | ||
159 | .min_interval = 1, \ | 138 | .min_interval = 1, \ |
160 | .max_interval = 4, \ | 139 | .max_interval = 4, \ |
161 | .busy_factor = 64, \ | 140 | .busy_factor = 64, \ |
@@ -174,16 +153,11 @@ void arch_update_cpu_topology(void); | |||
174 | | BALANCE_FOR_PKG_POWER,\ | 153 | | BALANCE_FOR_PKG_POWER,\ |
175 | .last_balance = jiffies, \ | 154 | .last_balance = jiffies, \ |
176 | .balance_interval = 1, \ | 155 | .balance_interval = 1, \ |
177 | .nr_balance_failed = 0, \ | ||
178 | } | 156 | } |
179 | #endif | 157 | #endif |
180 | 158 | ||
181 | /* sched_domains SD_ALLNODES_INIT for NUMA machines */ | 159 | /* sched_domains SD_ALLNODES_INIT for NUMA machines */ |
182 | #define SD_ALLNODES_INIT (struct sched_domain) { \ | 160 | #define SD_ALLNODES_INIT (struct sched_domain) { \ |
183 | .span = CPU_MASK_NONE, \ | ||
184 | .parent = NULL, \ | ||
185 | .child = NULL, \ | ||
186 | .groups = NULL, \ | ||
187 | .min_interval = 64, \ | 161 | .min_interval = 64, \ |
188 | .max_interval = 64*num_online_cpus(), \ | 162 | .max_interval = 64*num_online_cpus(), \ |
189 | .busy_factor = 128, \ | 163 | .busy_factor = 128, \ |
@@ -191,14 +165,10 @@ void arch_update_cpu_topology(void); | |||
191 | .cache_nice_tries = 1, \ | 165 | .cache_nice_tries = 1, \ |
192 | .busy_idx = 3, \ | 166 | .busy_idx = 3, \ |
193 | .idle_idx = 3, \ | 167 | .idle_idx = 3, \ |
194 | .newidle_idx = 0, /* unused */ \ | ||
195 | .wake_idx = 0, /* unused */ \ | ||
196 | .forkexec_idx = 0, /* unused */ \ | ||
197 | .flags = SD_LOAD_BALANCE \ | 168 | .flags = SD_LOAD_BALANCE \ |
198 | | SD_SERIALIZE, \ | 169 | | SD_SERIALIZE, \ |
199 | .last_balance = jiffies, \ | 170 | .last_balance = jiffies, \ |
200 | .balance_interval = 64, \ | 171 | .balance_interval = 64, \ |
201 | .nr_balance_failed = 0, \ | ||
202 | } | 172 | } |
203 | 173 | ||
204 | #ifdef CONFIG_NUMA | 174 | #ifdef CONFIG_NUMA |