diff options
author | Anton Altaparmakov <aia21@cantab.net> | 2005-06-26 17:19:40 -0400 |
---|---|---|
committer | Anton Altaparmakov <aia21@cantab.net> | 2005-06-26 17:19:40 -0400 |
commit | 2a322e4c08be4e7cb0c04b427ddaaa679fd88863 (patch) | |
tree | ad8cc17bfd3b5e57e36f07a249028667d72f0b96 /arch/ia64 | |
parent | ba6d2377c85c9b8a793f455d8c9b6cf31985d70f (diff) | |
parent | 8678887e7fb43cd6c9be6c9807b05e77848e0920 (diff) |
Automatic merge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kernel/domain.c | 78 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc.h | 12 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 12 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_partition.c | 2 |
5 files changed, 63 insertions, 45 deletions
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c index fe532c970438..d65e87b6394f 100644 --- a/arch/ia64/kernel/domain.c +++ b/arch/ia64/kernel/domain.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/topology.h> | 14 | #include <linux/topology.h> |
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | 16 | ||
17 | #define SD_NODES_PER_DOMAIN 6 | 17 | #define SD_NODES_PER_DOMAIN 16 |
18 | 18 | ||
19 | #ifdef CONFIG_NUMA | 19 | #ifdef CONFIG_NUMA |
20 | /** | 20 | /** |
@@ -27,7 +27,7 @@ | |||
27 | * | 27 | * |
28 | * Should use nodemask_t. | 28 | * Should use nodemask_t. |
29 | */ | 29 | */ |
30 | static int __devinit find_next_best_node(int node, unsigned long *used_nodes) | 30 | static int find_next_best_node(int node, unsigned long *used_nodes) |
31 | { | 31 | { |
32 | int i, n, val, min_val, best_node = 0; | 32 | int i, n, val, min_val, best_node = 0; |
33 | 33 | ||
@@ -66,7 +66,7 @@ static int __devinit find_next_best_node(int node, unsigned long *used_nodes) | |||
66 | * should be one that prevents unnecessary balancing, but also spreads tasks | 66 | * should be one that prevents unnecessary balancing, but also spreads tasks |
67 | * out optimally. | 67 | * out optimally. |
68 | */ | 68 | */ |
69 | static cpumask_t __devinit sched_domain_node_span(int node) | 69 | static cpumask_t sched_domain_node_span(int node) |
70 | { | 70 | { |
71 | int i; | 71 | int i; |
72 | cpumask_t span, nodemask; | 72 | cpumask_t span, nodemask; |
@@ -96,7 +96,7 @@ static cpumask_t __devinit sched_domain_node_span(int node) | |||
96 | #ifdef CONFIG_SCHED_SMT | 96 | #ifdef CONFIG_SCHED_SMT |
97 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 97 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); |
98 | static struct sched_group sched_group_cpus[NR_CPUS]; | 98 | static struct sched_group sched_group_cpus[NR_CPUS]; |
99 | static int __devinit cpu_to_cpu_group(int cpu) | 99 | static int cpu_to_cpu_group(int cpu) |
100 | { | 100 | { |
101 | return cpu; | 101 | return cpu; |
102 | } | 102 | } |
@@ -104,7 +104,7 @@ static int __devinit cpu_to_cpu_group(int cpu) | |||
104 | 104 | ||
105 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 105 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
106 | static struct sched_group sched_group_phys[NR_CPUS]; | 106 | static struct sched_group sched_group_phys[NR_CPUS]; |
107 | static int __devinit cpu_to_phys_group(int cpu) | 107 | static int cpu_to_phys_group(int cpu) |
108 | { | 108 | { |
109 | #ifdef CONFIG_SCHED_SMT | 109 | #ifdef CONFIG_SCHED_SMT |
110 | return first_cpu(cpu_sibling_map[cpu]); | 110 | return first_cpu(cpu_sibling_map[cpu]); |
@@ -125,44 +125,36 @@ static struct sched_group *sched_group_nodes[MAX_NUMNODES]; | |||
125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
126 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; | 126 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; |
127 | 127 | ||
128 | static int __devinit cpu_to_allnodes_group(int cpu) | 128 | static int cpu_to_allnodes_group(int cpu) |
129 | { | 129 | { |
130 | return cpu_to_node(cpu); | 130 | return cpu_to_node(cpu); |
131 | } | 131 | } |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 135 | * Build sched domains for a given set of cpus and attach the sched domains |
136 | * to the individual cpus | ||
136 | */ | 137 | */ |
137 | void __devinit arch_init_sched_domains(void) | 138 | void build_sched_domains(const cpumask_t *cpu_map) |
138 | { | 139 | { |
139 | int i; | 140 | int i; |
140 | cpumask_t cpu_default_map; | ||
141 | 141 | ||
142 | /* | 142 | /* |
143 | * Setup mask for cpus without special case scheduling requirements. | 143 | * Set up domains for cpus specified by the cpu_map. |
144 | * For now this just excludes isolated cpus, but could be used to | ||
145 | * exclude other special cases in the future. | ||
146 | */ | 144 | */ |
147 | cpus_complement(cpu_default_map, cpu_isolated_map); | 145 | for_each_cpu_mask(i, *cpu_map) { |
148 | cpus_and(cpu_default_map, cpu_default_map, cpu_online_map); | ||
149 | |||
150 | /* | ||
151 | * Set up domains. Isolated domains just stay on the dummy domain. | ||
152 | */ | ||
153 | for_each_cpu_mask(i, cpu_default_map) { | ||
154 | int group; | 146 | int group; |
155 | struct sched_domain *sd = NULL, *p; | 147 | struct sched_domain *sd = NULL, *p; |
156 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); | 148 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); |
157 | 149 | ||
158 | cpus_and(nodemask, nodemask, cpu_default_map); | 150 | cpus_and(nodemask, nodemask, *cpu_map); |
159 | 151 | ||
160 | #ifdef CONFIG_NUMA | 152 | #ifdef CONFIG_NUMA |
161 | if (num_online_cpus() | 153 | if (num_online_cpus() |
162 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { | 154 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { |
163 | sd = &per_cpu(allnodes_domains, i); | 155 | sd = &per_cpu(allnodes_domains, i); |
164 | *sd = SD_ALLNODES_INIT; | 156 | *sd = SD_ALLNODES_INIT; |
165 | sd->span = cpu_default_map; | 157 | sd->span = *cpu_map; |
166 | group = cpu_to_allnodes_group(i); | 158 | group = cpu_to_allnodes_group(i); |
167 | sd->groups = &sched_group_allnodes[group]; | 159 | sd->groups = &sched_group_allnodes[group]; |
168 | p = sd; | 160 | p = sd; |
@@ -173,7 +165,7 @@ void __devinit arch_init_sched_domains(void) | |||
173 | *sd = SD_NODE_INIT; | 165 | *sd = SD_NODE_INIT; |
174 | sd->span = sched_domain_node_span(cpu_to_node(i)); | 166 | sd->span = sched_domain_node_span(cpu_to_node(i)); |
175 | sd->parent = p; | 167 | sd->parent = p; |
176 | cpus_and(sd->span, sd->span, cpu_default_map); | 168 | cpus_and(sd->span, sd->span, *cpu_map); |
177 | #endif | 169 | #endif |
178 | 170 | ||
179 | p = sd; | 171 | p = sd; |
@@ -190,7 +182,7 @@ void __devinit arch_init_sched_domains(void) | |||
190 | group = cpu_to_cpu_group(i); | 182 | group = cpu_to_cpu_group(i); |
191 | *sd = SD_SIBLING_INIT; | 183 | *sd = SD_SIBLING_INIT; |
192 | sd->span = cpu_sibling_map[i]; | 184 | sd->span = cpu_sibling_map[i]; |
193 | cpus_and(sd->span, sd->span, cpu_default_map); | 185 | cpus_and(sd->span, sd->span, *cpu_map); |
194 | sd->parent = p; | 186 | sd->parent = p; |
195 | sd->groups = &sched_group_cpus[group]; | 187 | sd->groups = &sched_group_cpus[group]; |
196 | #endif | 188 | #endif |
@@ -198,9 +190,9 @@ void __devinit arch_init_sched_domains(void) | |||
198 | 190 | ||
199 | #ifdef CONFIG_SCHED_SMT | 191 | #ifdef CONFIG_SCHED_SMT |
200 | /* Set up CPU (sibling) groups */ | 192 | /* Set up CPU (sibling) groups */ |
201 | for_each_cpu_mask(i, cpu_default_map) { | 193 | for_each_cpu_mask(i, *cpu_map) { |
202 | cpumask_t this_sibling_map = cpu_sibling_map[i]; | 194 | cpumask_t this_sibling_map = cpu_sibling_map[i]; |
203 | cpus_and(this_sibling_map, this_sibling_map, cpu_default_map); | 195 | cpus_and(this_sibling_map, this_sibling_map, *cpu_map); |
204 | if (i != first_cpu(this_sibling_map)) | 196 | if (i != first_cpu(this_sibling_map)) |
205 | continue; | 197 | continue; |
206 | 198 | ||
@@ -213,7 +205,7 @@ void __devinit arch_init_sched_domains(void) | |||
213 | for (i = 0; i < MAX_NUMNODES; i++) { | 205 | for (i = 0; i < MAX_NUMNODES; i++) { |
214 | cpumask_t nodemask = node_to_cpumask(i); | 206 | cpumask_t nodemask = node_to_cpumask(i); |
215 | 207 | ||
216 | cpus_and(nodemask, nodemask, cpu_default_map); | 208 | cpus_and(nodemask, nodemask, *cpu_map); |
217 | if (cpus_empty(nodemask)) | 209 | if (cpus_empty(nodemask)) |
218 | continue; | 210 | continue; |
219 | 211 | ||
@@ -222,7 +214,7 @@ void __devinit arch_init_sched_domains(void) | |||
222 | } | 214 | } |
223 | 215 | ||
224 | #ifdef CONFIG_NUMA | 216 | #ifdef CONFIG_NUMA |
225 | init_sched_build_groups(sched_group_allnodes, cpu_default_map, | 217 | init_sched_build_groups(sched_group_allnodes, *cpu_map, |
226 | &cpu_to_allnodes_group); | 218 | &cpu_to_allnodes_group); |
227 | 219 | ||
228 | for (i = 0; i < MAX_NUMNODES; i++) { | 220 | for (i = 0; i < MAX_NUMNODES; i++) { |
@@ -233,12 +225,12 @@ void __devinit arch_init_sched_domains(void) | |||
233 | cpumask_t covered = CPU_MASK_NONE; | 225 | cpumask_t covered = CPU_MASK_NONE; |
234 | int j; | 226 | int j; |
235 | 227 | ||
236 | cpus_and(nodemask, nodemask, cpu_default_map); | 228 | cpus_and(nodemask, nodemask, *cpu_map); |
237 | if (cpus_empty(nodemask)) | 229 | if (cpus_empty(nodemask)) |
238 | continue; | 230 | continue; |
239 | 231 | ||
240 | domainspan = sched_domain_node_span(i); | 232 | domainspan = sched_domain_node_span(i); |
241 | cpus_and(domainspan, domainspan, cpu_default_map); | 233 | cpus_and(domainspan, domainspan, *cpu_map); |
242 | 234 | ||
243 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 235 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); |
244 | sched_group_nodes[i] = sg; | 236 | sched_group_nodes[i] = sg; |
@@ -266,7 +258,7 @@ void __devinit arch_init_sched_domains(void) | |||
266 | int n = (i + j) % MAX_NUMNODES; | 258 | int n = (i + j) % MAX_NUMNODES; |
267 | 259 | ||
268 | cpus_complement(notcovered, covered); | 260 | cpus_complement(notcovered, covered); |
269 | cpus_and(tmp, notcovered, cpu_default_map); | 261 | cpus_and(tmp, notcovered, *cpu_map); |
270 | cpus_and(tmp, tmp, domainspan); | 262 | cpus_and(tmp, tmp, domainspan); |
271 | if (cpus_empty(tmp)) | 263 | if (cpus_empty(tmp)) |
272 | break; | 264 | break; |
@@ -293,7 +285,7 @@ void __devinit arch_init_sched_domains(void) | |||
293 | #endif | 285 | #endif |
294 | 286 | ||
295 | /* Calculate CPU power for physical packages and nodes */ | 287 | /* Calculate CPU power for physical packages and nodes */ |
296 | for_each_cpu_mask(i, cpu_default_map) { | 288 | for_each_cpu_mask(i, *cpu_map) { |
297 | int power; | 289 | int power; |
298 | struct sched_domain *sd; | 290 | struct sched_domain *sd; |
299 | #ifdef CONFIG_SCHED_SMT | 291 | #ifdef CONFIG_SCHED_SMT |
@@ -359,13 +351,35 @@ next_sg: | |||
359 | cpu_attach_domain(sd, i); | 351 | cpu_attach_domain(sd, i); |
360 | } | 352 | } |
361 | } | 353 | } |
354 | /* | ||
355 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | ||
356 | */ | ||
357 | void arch_init_sched_domains(const cpumask_t *cpu_map) | ||
358 | { | ||
359 | cpumask_t cpu_default_map; | ||
360 | |||
361 | /* | ||
362 | * Setup mask for cpus without special case scheduling requirements. | ||
363 | * For now this just excludes isolated cpus, but could be used to | ||
364 | * exclude other special cases in the future. | ||
365 | */ | ||
366 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | ||
367 | |||
368 | build_sched_domains(&cpu_default_map); | ||
369 | } | ||
362 | 370 | ||
363 | void __devinit arch_destroy_sched_domains(void) | 371 | void arch_destroy_sched_domains(const cpumask_t *cpu_map) |
364 | { | 372 | { |
365 | #ifdef CONFIG_NUMA | 373 | #ifdef CONFIG_NUMA |
366 | int i; | 374 | int i; |
367 | for (i = 0; i < MAX_NUMNODES; i++) { | 375 | for (i = 0; i < MAX_NUMNODES; i++) { |
376 | cpumask_t nodemask = node_to_cpumask(i); | ||
368 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 377 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
378 | |||
379 | cpus_and(nodemask, nodemask, *cpu_map); | ||
380 | if (cpus_empty(nodemask)) | ||
381 | continue; | ||
382 | |||
369 | if (sg == NULL) | 383 | if (sg == NULL) |
370 | continue; | 384 | continue; |
371 | sg = sg->next; | 385 | sg = sg->next; |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 3865f088ffa2..623b0a546709 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -346,6 +346,7 @@ smp_callin (void) | |||
346 | lock_ipi_calllock(); | 346 | lock_ipi_calllock(); |
347 | cpu_set(cpuid, cpu_online_map); | 347 | cpu_set(cpuid, cpu_online_map); |
348 | unlock_ipi_calllock(); | 348 | unlock_ipi_calllock(); |
349 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | ||
349 | 350 | ||
350 | smp_setup_percpu_timer(); | 351 | smp_setup_percpu_timer(); |
351 | 352 | ||
@@ -611,6 +612,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
611 | { | 612 | { |
612 | cpu_set(smp_processor_id(), cpu_online_map); | 613 | cpu_set(smp_processor_id(), cpu_online_map); |
613 | cpu_set(smp_processor_id(), cpu_callin_map); | 614 | cpu_set(smp_processor_id(), cpu_callin_map); |
615 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | ||
614 | } | 616 | } |
615 | 617 | ||
616 | /* | 618 | /* |
@@ -688,6 +690,7 @@ int __cpu_disable(void) | |||
688 | return -EBUSY; | 690 | return -EBUSY; |
689 | 691 | ||
690 | remove_siblinginfo(cpu); | 692 | remove_siblinginfo(cpu); |
693 | cpu_clear(cpu, cpu_online_map); | ||
691 | fixup_irqs(); | 694 | fixup_irqs(); |
692 | local_flush_tlb_all(); | 695 | local_flush_tlb_all(); |
693 | cpu_clear(cpu, cpu_callin_map); | 696 | cpu_clear(cpu, cpu_callin_map); |
@@ -774,6 +777,7 @@ __cpu_up (unsigned int cpu) | |||
774 | if (cpu_isset(cpu, cpu_callin_map)) | 777 | if (cpu_isset(cpu, cpu_callin_map)) |
775 | return -EINVAL; | 778 | return -EINVAL; |
776 | 779 | ||
780 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
777 | /* Processor goes to start_secondary(), sets online flag */ | 781 | /* Processor goes to start_secondary(), sets online flag */ |
778 | ret = do_boot_cpu(sapicid, cpu); | 782 | ret = do_boot_cpu(sapicid, cpu); |
779 | if (ret < 0) | 783 | if (ret < 0) |
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h index 1a0aed8490d1..d0ee635daf2e 100644 --- a/arch/ia64/sn/kernel/xpc.h +++ b/arch/ia64/sn/kernel/xpc.h | |||
@@ -87,7 +87,7 @@ struct xpc_rsvd_page { | |||
87 | u8 partid; /* partition ID from SAL */ | 87 | u8 partid; /* partition ID from SAL */ |
88 | u8 version; | 88 | u8 version; |
89 | u8 pad[6]; /* pad to u64 align */ | 89 | u8 pad[6]; /* pad to u64 align */ |
90 | u64 vars_pa; | 90 | volatile u64 vars_pa; |
91 | u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; | 91 | u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; |
92 | u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; | 92 | u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; |
93 | }; | 93 | }; |
@@ -138,7 +138,7 @@ struct xpc_vars { | |||
138 | * occupies half a cacheline. | 138 | * occupies half a cacheline. |
139 | */ | 139 | */ |
140 | struct xpc_vars_part { | 140 | struct xpc_vars_part { |
141 | u64 magic; | 141 | volatile u64 magic; |
142 | 142 | ||
143 | u64 openclose_args_pa; /* physical address of open and close args */ | 143 | u64 openclose_args_pa; /* physical address of open and close args */ |
144 | u64 GPs_pa; /* physical address of Get/Put values */ | 144 | u64 GPs_pa; /* physical address of Get/Put values */ |
@@ -185,8 +185,8 @@ struct xpc_vars_part { | |||
185 | * Define a Get/Put value pair (pointers) used with a message queue. | 185 | * Define a Get/Put value pair (pointers) used with a message queue. |
186 | */ | 186 | */ |
187 | struct xpc_gp { | 187 | struct xpc_gp { |
188 | s64 get; /* Get value */ | 188 | volatile s64 get; /* Get value */ |
189 | s64 put; /* Put value */ | 189 | volatile s64 put; /* Put value */ |
190 | }; | 190 | }; |
191 | 191 | ||
192 | #define XPC_GP_SIZE \ | 192 | #define XPC_GP_SIZE \ |
@@ -231,7 +231,7 @@ struct xpc_openclose_args { | |||
231 | */ | 231 | */ |
232 | struct xpc_notify { | 232 | struct xpc_notify { |
233 | struct semaphore sema; /* notify semaphore */ | 233 | struct semaphore sema; /* notify semaphore */ |
234 | u8 type; /* type of notification */ | 234 | volatile u8 type; /* type of notification */ |
235 | 235 | ||
236 | /* the following two fields are only used if type == XPC_N_CALL */ | 236 | /* the following two fields are only used if type == XPC_N_CALL */ |
237 | xpc_notify_func func; /* user's notify function */ | 237 | xpc_notify_func func; /* user's notify function */ |
@@ -439,7 +439,7 @@ struct xpc_partition { | |||
439 | 439 | ||
440 | /* XPC infrastructure referencing and teardown control */ | 440 | /* XPC infrastructure referencing and teardown control */ |
441 | 441 | ||
442 | u8 setup_state; /* infrastructure setup state */ | 442 | volatile u8 setup_state; /* infrastructure setup state */ |
443 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ | 443 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ |
444 | atomic_t references; /* #of references to infrastructure */ | 444 | atomic_t references; /* #of references to infrastructure */ |
445 | 445 | ||
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 0bf6fbcc46d2..6d02dac8056f 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -209,7 +209,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
209 | * With the setting of the partition setup_state to XPC_P_SETUP, we're | 209 | * With the setting of the partition setup_state to XPC_P_SETUP, we're |
210 | * declaring that this partition is ready to go. | 210 | * declaring that this partition is ready to go. |
211 | */ | 211 | */ |
212 | (volatile u8) part->setup_state = XPC_P_SETUP; | 212 | part->setup_state = XPC_P_SETUP; |
213 | 213 | ||
214 | 214 | ||
215 | /* | 215 | /* |
@@ -227,7 +227,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
227 | xpc_vars_part[partid].IPI_phys_cpuid = | 227 | xpc_vars_part[partid].IPI_phys_cpuid = |
228 | cpu_physical_id(smp_processor_id()); | 228 | cpu_physical_id(smp_processor_id()); |
229 | xpc_vars_part[partid].nchannels = part->nchannels; | 229 | xpc_vars_part[partid].nchannels = part->nchannels; |
230 | (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | 230 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; |
231 | 231 | ||
232 | return xpcSuccess; | 232 | return xpcSuccess; |
233 | } | 233 | } |
@@ -355,7 +355,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
355 | 355 | ||
356 | /* let the other side know that we've pulled their variables */ | 356 | /* let the other side know that we've pulled their variables */ |
357 | 357 | ||
358 | (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2; | 358 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (pulled_entry->magic == XPC_VP_MAGIC1) { | 361 | if (pulled_entry->magic == XPC_VP_MAGIC1) { |
@@ -1183,7 +1183,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1183 | */ | 1183 | */ |
1184 | xpc_clear_local_msgqueue_flags(ch); | 1184 | xpc_clear_local_msgqueue_flags(ch); |
1185 | 1185 | ||
1186 | (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get; | 1186 | ch->w_remote_GP.get = ch->remote_GP.get; |
1187 | 1187 | ||
1188 | dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " | 1188 | dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " |
1189 | "channel=%d\n", ch->w_remote_GP.get, ch->partid, | 1189 | "channel=%d\n", ch->w_remote_GP.get, ch->partid, |
@@ -1211,7 +1211,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1211 | */ | 1211 | */ |
1212 | xpc_clear_remote_msgqueue_flags(ch); | 1212 | xpc_clear_remote_msgqueue_flags(ch); |
1213 | 1213 | ||
1214 | (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put; | 1214 | ch->w_remote_GP.put = ch->remote_GP.put; |
1215 | 1215 | ||
1216 | dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " | 1216 | dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " |
1217 | "channel=%d\n", ch->w_remote_GP.put, ch->partid, | 1217 | "channel=%d\n", ch->w_remote_GP.put, ch->partid, |
@@ -1875,7 +1875,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | |||
1875 | notify = &ch->notify_queue[msg_number % ch->local_nentries]; | 1875 | notify = &ch->notify_queue[msg_number % ch->local_nentries]; |
1876 | notify->func = func; | 1876 | notify->func = func; |
1877 | notify->key = key; | 1877 | notify->key = key; |
1878 | (volatile u8) notify->type = notify_type; | 1878 | notify->type = notify_type; |
1879 | 1879 | ||
1880 | // >>> is a mb() needed here? | 1880 | // >>> is a mb() needed here? |
1881 | 1881 | ||
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index cd7ed73f0e7a..578265ea9e67 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c | |||
@@ -253,7 +253,7 @@ xpc_rsvd_page_init(void) | |||
253 | * This signifies to the remote partition that our reserved | 253 | * This signifies to the remote partition that our reserved |
254 | * page is initialized. | 254 | * page is initialized. |
255 | */ | 255 | */ |
256 | (volatile u64) rp->vars_pa = __pa(xpc_vars); | 256 | rp->vars_pa = __pa(xpc_vars); |
257 | 257 | ||
258 | return rp; | 258 | return rp; |
259 | } | 259 | } |