aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/cpumask.h18
-rw-r--r--arch/x86/include/asm/pci.h5
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/smp.h13
-rw-r--r--arch/x86/include/asm/topology.h87
5 files changed, 26 insertions, 100 deletions
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
index a7f3c75f8ad7..61c852fa346b 100644
--- a/arch/x86/include/asm/cpumask.h
+++ b/arch/x86/include/asm/cpumask.h
@@ -3,8 +3,6 @@
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5
6#ifdef CONFIG_X86_64
7
8extern cpumask_var_t cpu_callin_mask; 6extern cpumask_var_t cpu_callin_mask;
9extern cpumask_var_t cpu_callout_mask; 7extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask; 8extern cpumask_var_t cpu_initialized_mask;
@@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask;
12 10
13extern void setup_cpu_local_masks(void); 11extern void setup_cpu_local_masks(void);
14 12
15#else /* CONFIG_X86_32 */
16
17extern cpumask_t cpu_callin_map;
18extern cpumask_t cpu_callout_map;
19extern cpumask_t cpu_initialized;
20extern cpumask_t cpu_sibling_setup_map;
21
22#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
23#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
26
27static inline void setup_cpu_local_masks(void) { }
28
29#endif /* CONFIG_X86_32 */
30
31#endif /* __ASSEMBLY__ */ 13#endif /* __ASSEMBLY__ */
32#endif /* _ASM_X86_CPUMASK_H */ 14#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index e545ea01abcf..b51a1e8b0baf 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -140,11 +140,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus)
140 return sd->node; 140 return sd->node;
141} 141}
142 142
143static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
144{
145 return node_to_cpumask(__pcibus_to_node(bus));
146}
147
148static inline const struct cpumask * 143static inline const struct cpumask *
149cpumask_of_pcibus(const struct pci_bus *bus) 144cpumask_of_pcibus(const struct pci_bus *bus)
150{ 145{
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ae85a8d66a30..34c52370f2fe 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
94 unsigned long loops_per_jiffy; 94 unsigned long loops_per_jiffy;
95#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
96 /* cpus sharing the last level cache: */ 96 /* cpus sharing the last level cache: */
97 cpumask_t llc_shared_map; 97 cpumask_var_t llc_shared_map;
98#endif 98#endif
99 /* cpuid returned max cores value: */ 99 /* cpuid returned max cores value: */
100 u16 x86_max_cores; 100 u16 x86_max_cores;
@@ -736,6 +736,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
736extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 736extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
737 737
738extern void select_idle_routine(const struct cpuinfo_x86 *c); 738extern void select_idle_routine(const struct cpuinfo_x86 *c);
739extern void init_c1e_mask(void);
739 740
740extern unsigned long boot_option_idle_override; 741extern unsigned long boot_option_idle_override;
741extern unsigned long idle_halt; 742extern unsigned long idle_halt;
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 47d0e21f2b9e..19e0d88b966d 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,19 +21,19 @@
21extern int smp_num_siblings; 21extern int smp_num_siblings;
22extern unsigned int num_processors; 22extern unsigned int num_processors;
23 23
24DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 24DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
25DECLARE_PER_CPU(cpumask_t, cpu_core_map); 25DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
26DECLARE_PER_CPU(u16, cpu_llc_id); 26DECLARE_PER_CPU(u16, cpu_llc_id);
27DECLARE_PER_CPU(int, cpu_number); 27DECLARE_PER_CPU(int, cpu_number);
28 28
29static inline struct cpumask *cpu_sibling_mask(int cpu) 29static inline struct cpumask *cpu_sibling_mask(int cpu)
30{ 30{
31 return &per_cpu(cpu_sibling_map, cpu); 31 return per_cpu(cpu_sibling_map, cpu);
32} 32}
33 33
34static inline struct cpumask *cpu_core_mask(int cpu) 34static inline struct cpumask *cpu_core_mask(int cpu)
35{ 35{
36 return &per_cpu(cpu_core_map, cpu); 36 return per_cpu(cpu_core_map, cpu);
37} 37}
38 38
39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
@@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu)
121 smp_ops.send_call_func_single_ipi(cpu); 121 smp_ops.send_call_func_single_ipi(cpu);
122} 122}
123 123
124static inline void arch_send_call_function_ipi(cpumask_t mask) 124#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
125static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
125{ 126{
126 smp_ops.send_call_func_ipi(&mask); 127 smp_ops.send_call_func_ipi(mask);
127} 128}
128 129
129void cpu_disable_common(void); 130void cpu_disable_common(void);
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 744299c0b774..892b119dba6f 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -44,9 +44,6 @@
44 44
45#ifdef CONFIG_X86_32 45#ifdef CONFIG_X86_32
46 46
47/* Mappings between node number and cpus on that node. */
48extern cpumask_t node_to_cpumask_map[];
49
50/* Mappings between logical cpu number and node number */ 47/* Mappings between logical cpu number and node number */
51extern int cpu_to_node_map[]; 48extern int cpu_to_node_map[];
52 49
@@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu)
57} 54}
58#define early_cpu_to_node(cpu) cpu_to_node(cpu) 55#define early_cpu_to_node(cpu) cpu_to_node(cpu)
59 56
60/* Returns a bitmask of CPUs on Node 'node'.
61 *
62 * Side note: this function creates the returned cpumask on the stack
63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * cpumask_of_node function should be used whenever possible.
65 */
66static inline cpumask_t node_to_cpumask(int node)
67{
68 return node_to_cpumask_map[node];
69}
70
71/* Returns a bitmask of CPUs on Node 'node'. */
72static inline const struct cpumask *cpumask_of_node(int node)
73{
74 return &node_to_cpumask_map[node];
75}
76
77static inline void setup_node_to_cpumask_map(void) { }
78
79#else /* CONFIG_X86_64 */ 57#else /* CONFIG_X86_64 */
80 58
81/* Mappings between node number and cpus on that node. */
82extern cpumask_t *node_to_cpumask_map;
83
84/* Mappings between logical cpu number and node number */ 59/* Mappings between logical cpu number and node number */
85DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 60DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
86 61
@@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number);
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS 66#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92extern int cpu_to_node(int cpu); 67extern int cpu_to_node(int cpu);
93extern int early_cpu_to_node(int cpu); 68extern int early_cpu_to_node(int cpu);
94extern const cpumask_t *cpumask_of_node(int node);
95extern cpumask_t node_to_cpumask(int node);
96 69
97#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 70#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
98 71
@@ -108,42 +81,32 @@ static inline int early_cpu_to_node(int cpu)
108 return early_per_cpu(x86_cpu_to_node_map, cpu); 81 return early_per_cpu(x86_cpu_to_node_map, cpu);
109} 82}
110 83
111/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 84#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
112static inline const cpumask_t *cpumask_of_node(int node) 85
113{ 86#endif /* CONFIG_X86_64 */
114 return &node_to_cpumask_map[node];
115}
116 87
117/* Returns a bitmask of CPUs on Node 'node'. */ 88/* Mappings between node number and cpus on that node. */
118static inline cpumask_t node_to_cpumask(int node) 89extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
90
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92extern const struct cpumask *cpumask_of_node(int node);
93#else
94/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
95static inline const struct cpumask *cpumask_of_node(int node)
119{ 96{
120 return node_to_cpumask_map[node]; 97 return node_to_cpumask_map[node];
121} 98}
122 99#endif
123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
124 100
125extern void setup_node_to_cpumask_map(void); 101extern void setup_node_to_cpumask_map(void);
126 102
127/* 103/*
128 * Replace default node_to_cpumask_ptr with optimized version
129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
130 */
131#define node_to_cpumask_ptr(v, node) \
132 const cpumask_t *v = cpumask_of_node(node)
133
134#define node_to_cpumask_ptr_next(v, node) \
135 v = cpumask_of_node(node)
136
137#endif /* CONFIG_X86_64 */
138
139/*
140 * Returns the number of the node containing Node 'node'. This 104 * Returns the number of the node containing Node 'node'. This
141 * architecture is flat, so it is a pretty simple function! 105 * architecture is flat, so it is a pretty simple function!
142 */ 106 */
143#define parent_node(node) (node) 107#define parent_node(node) (node)
144 108
145#define pcibus_to_node(bus) __pcibus_to_node(bus) 109#define pcibus_to_node(bus) __pcibus_to_node(bus)
146#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
147 110
148#ifdef CONFIG_X86_32 111#ifdef CONFIG_X86_32
149extern unsigned long node_start_pfn[]; 112extern unsigned long node_start_pfn[];
@@ -209,40 +172,24 @@ static inline int early_cpu_to_node(int cpu)
209 return 0; 172 return 0;
210} 173}
211 174
212static inline const cpumask_t *cpumask_of_node(int node) 175static inline const struct cpumask *cpumask_of_node(int node)
213{
214 return &cpu_online_map;
215}
216static inline cpumask_t node_to_cpumask(int node)
217{ 176{
218 return cpu_online_map; 177 return cpu_online_mask;
219} 178}
220 179
221static inline void setup_node_to_cpumask_map(void) { } 180static inline void setup_node_to_cpumask_map(void) { }
222 181
223/*
224 * Replace default node_to_cpumask_ptr with optimized version
225 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
226 */
227#define node_to_cpumask_ptr(v, node) \
228 const cpumask_t *v = cpumask_of_node(node)
229
230#define node_to_cpumask_ptr_next(v, node) \
231 v = cpumask_of_node(node)
232#endif 182#endif
233 183
234#include <asm-generic/topology.h> 184#include <asm-generic/topology.h>
235 185
236extern cpumask_t cpu_coregroup_map(int cpu);
237extern const struct cpumask *cpu_coregroup_mask(int cpu); 186extern const struct cpumask *cpu_coregroup_mask(int cpu);
238 187
239#ifdef ENABLE_TOPO_DEFINES 188#ifdef ENABLE_TOPO_DEFINES
240#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 189#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
241#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 190#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
242#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 191#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
243#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 192#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
244#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
245#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
246 193
247/* indicates that pointers to the topology cpumask_t maps are valid */ 194/* indicates that pointers to the topology cpumask_t maps are valid */
248#define arch_provides_topology_pointers yes 195#define arch_provides_topology_pointers yes
@@ -256,7 +203,7 @@ struct pci_bus;
256void set_pci_bus_resources_arch_default(struct pci_bus *b); 203void set_pci_bus_resources_arch_default(struct pci_bus *b);
257 204
258#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
259#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) 206#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
260#define smt_capable() (smp_num_siblings > 1) 207#define smt_capable() (smp_num_siblings > 1)
261#endif 208#endif
262 209