aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-04-22 15:48:55 -0400
committerTony Luck <tony.luck@intel.com>2008-04-22 18:08:44 -0400
commit35190506b1a18eda7df24b285fdcd94dec7800ef (patch)
treece0101cd6867738ff8bbe4edf343cbe2517540e9 /drivers/misc/sgi-xp
parent4a3ad2ddc0b920cd3ead84b0c67599be02d689ca (diff)
[IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
Ran patches through scripts/Lindent (part 2). Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'drivers/misc/sgi-xp')
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c390
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c329
-rw-r--r--drivers/misc/sgi-xp/xpnet.c109
3 files changed, 322 insertions, 506 deletions
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index d7a215eeaaf6..15cb91a82102 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -6,7 +6,6 @@
6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) channel support. 10 * Cross Partition Communication (XPC) channel support.
12 * 11 *
@@ -15,7 +14,6 @@
15 * 14 *
16 */ 15 */
17 16
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/init.h> 18#include <linux/init.h>
21#include <linux/sched.h> 19#include <linux/sched.h>
@@ -27,7 +25,6 @@
27#include <asm/sn/sn_sal.h> 25#include <asm/sn/sn_sal.h>
28#include "xpc.h" 26#include "xpc.h"
29 27
30
31/* 28/*
32 * Guarantee that the kzalloc'd memory is cacheline aligned. 29 * Guarantee that the kzalloc'd memory is cacheline aligned.
33 */ 30 */
@@ -39,7 +36,7 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
39 if (*base == NULL) { 36 if (*base == NULL) {
40 return NULL; 37 return NULL;
41 } 38 }
42 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 39 if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {
43 return *base; 40 return *base;
44 } 41 }
45 kfree(*base); 42 kfree(*base);
@@ -49,10 +46,9 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
49 if (*base == NULL) { 46 if (*base == NULL) {
50 return NULL; 47 return NULL;
51 } 48 }
52 return (void *) L1_CACHE_ALIGN((u64) *base); 49 return (void *)L1_CACHE_ALIGN((u64)*base);
53} 50}
54 51
55
56/* 52/*
57 * Set up the initial values for the XPartition Communication channels. 53 * Set up the initial values for the XPartition Communication channels.
58 */ 54 */
@@ -62,7 +58,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
62 int ch_number; 58 int ch_number;
63 struct xpc_channel *ch; 59 struct xpc_channel *ch;
64 60
65
66 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 61 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
67 ch = &part->channels[ch_number]; 62 ch = &part->channels[ch_number];
68 63
@@ -72,7 +67,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
72 67
73 ch->local_GP = &part->local_GPs[ch_number]; 68 ch->local_GP = &part->local_GPs[ch_number];
74 ch->local_openclose_args = 69 ch->local_openclose_args =
75 &part->local_openclose_args[ch_number]; 70 &part->local_openclose_args[ch_number];
76 71
77 atomic_set(&ch->kthreads_assigned, 0); 72 atomic_set(&ch->kthreads_assigned, 0);
78 atomic_set(&ch->kthreads_idle, 0); 73 atomic_set(&ch->kthreads_idle, 0);
@@ -91,7 +86,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
91 } 86 }
92} 87}
93 88
94
95/* 89/*
96 * Setup the infrastructure necessary to support XPartition Communication 90 * Setup the infrastructure necessary to support XPartition Communication
97 * between the specified remote partition and the local one. 91 * between the specified remote partition and the local one.
@@ -103,7 +97,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
103 struct timer_list *timer; 97 struct timer_list *timer;
104 partid_t partid = XPC_PARTID(part); 98 partid_t partid = XPC_PARTID(part);
105 99
106
107 /* 100 /*
108 * Zero out MOST of the entry for this partition. Only the fields 101 * Zero out MOST of the entry for this partition. Only the fields
109 * starting with `nchannels' will be zeroed. The preceding fields must 102 * starting with `nchannels' will be zeroed. The preceding fields must
@@ -111,14 +104,14 @@ xpc_setup_infrastructure(struct xpc_partition *part)
111 * referenced during this memset() operation. 104 * referenced during this memset() operation.
112 */ 105 */
113 memset(&part->nchannels, 0, sizeof(struct xpc_partition) - 106 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
114 offsetof(struct xpc_partition, nchannels)); 107 offsetof(struct xpc_partition, nchannels));
115 108
116 /* 109 /*
117 * Allocate all of the channel structures as a contiguous chunk of 110 * Allocate all of the channel structures as a contiguous chunk of
118 * memory. 111 * memory.
119 */ 112 */
120 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, 113 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
121 GFP_KERNEL); 114 GFP_KERNEL);
122 if (part->channels == NULL) { 115 if (part->channels == NULL) {
123 dev_err(xpc_chan, "can't get memory for channels\n"); 116 dev_err(xpc_chan, "can't get memory for channels\n");
124 return xpcNoMemory; 117 return xpcNoMemory;
@@ -126,11 +119,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
126 119
127 part->nchannels = XPC_NCHANNELS; 120 part->nchannels = XPC_NCHANNELS;
128 121
129
130 /* allocate all the required GET/PUT values */ 122 /* allocate all the required GET/PUT values */
131 123
132 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 124 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
133 GFP_KERNEL, &part->local_GPs_base); 125 GFP_KERNEL,
126 &part->local_GPs_base);
134 if (part->local_GPs == NULL) { 127 if (part->local_GPs == NULL) {
135 kfree(part->channels); 128 kfree(part->channels);
136 part->channels = NULL; 129 part->channels = NULL;
@@ -140,7 +133,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
140 } 133 }
141 134
142 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 135 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
143 GFP_KERNEL, &part->remote_GPs_base); 136 GFP_KERNEL,
137 &part->
138 remote_GPs_base);
144 if (part->remote_GPs == NULL) { 139 if (part->remote_GPs == NULL) {
145 dev_err(xpc_chan, "can't get memory for remote get/put " 140 dev_err(xpc_chan, "can't get memory for remote get/put "
146 "values\n"); 141 "values\n");
@@ -151,12 +146,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
151 return xpcNoMemory; 146 return xpcNoMemory;
152 } 147 }
153 148
154
155 /* allocate all the required open and close args */ 149 /* allocate all the required open and close args */
156 150
157 part->local_openclose_args = xpc_kzalloc_cacheline_aligned( 151 part->local_openclose_args =
158 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 152 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
159 &part->local_openclose_args_base); 153 &part->local_openclose_args_base);
160 if (part->local_openclose_args == NULL) { 154 if (part->local_openclose_args == NULL) {
161 dev_err(xpc_chan, "can't get memory for local connect args\n"); 155 dev_err(xpc_chan, "can't get memory for local connect args\n");
162 kfree(part->remote_GPs_base); 156 kfree(part->remote_GPs_base);
@@ -168,9 +162,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
168 return xpcNoMemory; 162 return xpcNoMemory;
169 } 163 }
170 164
171 part->remote_openclose_args = xpc_kzalloc_cacheline_aligned( 165 part->remote_openclose_args =
172 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 166 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
173 &part->remote_openclose_args_base); 167 &part->remote_openclose_args_base);
174 if (part->remote_openclose_args == NULL) { 168 if (part->remote_openclose_args == NULL) {
175 dev_err(xpc_chan, "can't get memory for remote connect args\n"); 169 dev_err(xpc_chan, "can't get memory for remote connect args\n");
176 kfree(part->local_openclose_args_base); 170 kfree(part->local_openclose_args_base);
@@ -184,13 +178,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
184 return xpcNoMemory; 178 return xpcNoMemory;
185 } 179 }
186 180
187
188 xpc_initialize_channels(part, partid); 181 xpc_initialize_channels(part, partid);
189 182
190 atomic_set(&part->nchannels_active, 0); 183 atomic_set(&part->nchannels_active, 0);
191 atomic_set(&part->nchannels_engaged, 0); 184 atomic_set(&part->nchannels_engaged, 0);
192 185
193
194 /* local_IPI_amo were set to 0 by an earlier memset() */ 186 /* local_IPI_amo were set to 0 by an earlier memset() */
195 187
196 /* Initialize this partitions AMO_t structure */ 188 /* Initialize this partitions AMO_t structure */
@@ -203,7 +195,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
203 195
204 sprintf(part->IPI_owner, "xpc%02d", partid); 196 sprintf(part->IPI_owner, "xpc%02d", partid);
205 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, 197 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
206 part->IPI_owner, (void *) (u64) partid); 198 part->IPI_owner, (void *)(u64)partid);
207 if (ret != 0) { 199 if (ret != 0) {
208 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 200 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
209 "errno=%d\n", -ret); 201 "errno=%d\n", -ret);
@@ -223,8 +215,8 @@ xpc_setup_infrastructure(struct xpc_partition *part)
223 /* Setup a timer to check for dropped IPIs */ 215 /* Setup a timer to check for dropped IPIs */
224 timer = &part->dropped_IPI_timer; 216 timer = &part->dropped_IPI_timer;
225 init_timer(timer); 217 init_timer(timer);
226 timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; 218 timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
227 timer->data = (unsigned long) part; 219 timer->data = (unsigned long)part;
228 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; 220 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
229 add_timer(timer); 221 add_timer(timer);
230 222
@@ -234,7 +226,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
234 */ 226 */
235 part->setup_state = XPC_P_SETUP; 227 part->setup_state = XPC_P_SETUP;
236 228
237
238 /* 229 /*
239 * Setup the per partition specific variables required by the 230 * Setup the per partition specific variables required by the
240 * remote partition to establish channel connections with us. 231 * remote partition to establish channel connections with us.
@@ -244,7 +235,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
244 */ 235 */
245 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); 236 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
246 xpc_vars_part[partid].openclose_args_pa = 237 xpc_vars_part[partid].openclose_args_pa =
247 __pa(part->local_openclose_args); 238 __pa(part->local_openclose_args);
248 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); 239 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
249 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 240 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
250 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); 241 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
@@ -255,7 +246,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
255 return xpcSuccess; 246 return xpcSuccess;
256} 247}
257 248
258
259/* 249/*
260 * Create a wrapper that hides the underlying mechanism for pulling a cacheline 250 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
261 * (or multiple cachelines) from a remote partition. 251 * (or multiple cachelines) from a remote partition.
@@ -266,21 +256,20 @@ xpc_setup_infrastructure(struct xpc_partition *part)
266 */ 256 */
267static enum xpc_retval 257static enum xpc_retval
268xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, 258xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
269 const void *src, size_t cnt) 259 const void *src, size_t cnt)
270{ 260{
271 bte_result_t bte_ret; 261 bte_result_t bte_ret;
272 262
273 263 DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
274 DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); 264 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
275 DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
276 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); 265 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
277 266
278 if (part->act_state == XPC_P_DEACTIVATING) { 267 if (part->act_state == XPC_P_DEACTIVATING) {
279 return part->reason; 268 return part->reason;
280 } 269 }
281 270
282 bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, 271 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
283 (BTE_NORMAL | BTE_WACQUIRE), NULL); 272 (BTE_NORMAL | BTE_WACQUIRE), NULL);
284 if (bte_ret == BTE_SUCCESS) { 273 if (bte_ret == BTE_SUCCESS) {
285 return xpcSuccess; 274 return xpcSuccess;
286 } 275 }
@@ -291,7 +280,6 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
291 return xpc_map_bte_errors(bte_ret); 280 return xpc_map_bte_errors(bte_ret);
292} 281}
293 282
294
295/* 283/*
296 * Pull the remote per partition specific variables from the specified 284 * Pull the remote per partition specific variables from the specified
297 * partition. 285 * partition.
@@ -301,41 +289,40 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
301{ 289{
302 u8 buffer[L1_CACHE_BYTES * 2]; 290 u8 buffer[L1_CACHE_BYTES * 2];
303 struct xpc_vars_part *pulled_entry_cacheline = 291 struct xpc_vars_part *pulled_entry_cacheline =
304 (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); 292 (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
305 struct xpc_vars_part *pulled_entry; 293 struct xpc_vars_part *pulled_entry;
306 u64 remote_entry_cacheline_pa, remote_entry_pa; 294 u64 remote_entry_cacheline_pa, remote_entry_pa;
307 partid_t partid = XPC_PARTID(part); 295 partid_t partid = XPC_PARTID(part);
308 enum xpc_retval ret; 296 enum xpc_retval ret;
309 297
310
311 /* pull the cacheline that contains the variables we're interested in */ 298 /* pull the cacheline that contains the variables we're interested in */
312 299
313 DBUG_ON(part->remote_vars_part_pa != 300 DBUG_ON(part->remote_vars_part_pa !=
314 L1_CACHE_ALIGN(part->remote_vars_part_pa)); 301 L1_CACHE_ALIGN(part->remote_vars_part_pa));
315 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); 302 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
316 303
317 remote_entry_pa = part->remote_vars_part_pa + 304 remote_entry_pa = part->remote_vars_part_pa +
318 sn_partition_id * sizeof(struct xpc_vars_part); 305 sn_partition_id * sizeof(struct xpc_vars_part);
319 306
320 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); 307 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
321 308
322 pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + 309 pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
323 (remote_entry_pa & (L1_CACHE_BYTES - 1))); 310 (remote_entry_pa &
311 (L1_CACHE_BYTES - 1)));
324 312
325 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, 313 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
326 (void *) remote_entry_cacheline_pa, 314 (void *)remote_entry_cacheline_pa,
327 L1_CACHE_BYTES); 315 L1_CACHE_BYTES);
328 if (ret != xpcSuccess) { 316 if (ret != xpcSuccess) {
329 dev_dbg(xpc_chan, "failed to pull XPC vars_part from " 317 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
330 "partition %d, ret=%d\n", partid, ret); 318 "partition %d, ret=%d\n", partid, ret);
331 return ret; 319 return ret;
332 } 320 }
333 321
334
335 /* see if they've been set up yet */ 322 /* see if they've been set up yet */
336 323
337 if (pulled_entry->magic != XPC_VP_MAGIC1 && 324 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
338 pulled_entry->magic != XPC_VP_MAGIC2) { 325 pulled_entry->magic != XPC_VP_MAGIC2) {
339 326
340 if (pulled_entry->magic != 0) { 327 if (pulled_entry->magic != 0) {
341 dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 328 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -353,8 +340,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
353 /* validate the variables */ 340 /* validate the variables */
354 341
355 if (pulled_entry->GPs_pa == 0 || 342 if (pulled_entry->GPs_pa == 0 ||
356 pulled_entry->openclose_args_pa == 0 || 343 pulled_entry->openclose_args_pa == 0 ||
357 pulled_entry->IPI_amo_pa == 0) { 344 pulled_entry->IPI_amo_pa == 0) {
358 345
359 dev_err(xpc_chan, "partition %d's XPC vars_part for " 346 dev_err(xpc_chan, "partition %d's XPC vars_part for "
360 "partition %d are not valid\n", partid, 347 "partition %d are not valid\n", partid,
@@ -366,9 +353,9 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
366 353
367 part->remote_GPs_pa = pulled_entry->GPs_pa; 354 part->remote_GPs_pa = pulled_entry->GPs_pa;
368 part->remote_openclose_args_pa = 355 part->remote_openclose_args_pa =
369 pulled_entry->openclose_args_pa; 356 pulled_entry->openclose_args_pa;
370 part->remote_IPI_amo_va = 357 part->remote_IPI_amo_va =
371 (AMO_t *) __va(pulled_entry->IPI_amo_pa); 358 (AMO_t *)__va(pulled_entry->IPI_amo_pa);
372 part->remote_IPI_nasid = pulled_entry->IPI_nasid; 359 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
373 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 360 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
374 361
@@ -388,7 +375,6 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
388 return xpcSuccess; 375 return xpcSuccess;
389} 376}
390 377
391
392/* 378/*
393 * Get the IPI flags and pull the openclose args and/or remote GPs as needed. 379 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
394 */ 380 */
@@ -399,7 +385,6 @@ xpc_get_IPI_flags(struct xpc_partition *part)
399 u64 IPI_amo; 385 u64 IPI_amo;
400 enum xpc_retval ret; 386 enum xpc_retval ret;
401 387
402
403 /* 388 /*
404 * See if there are any IPI flags to be handled. 389 * See if there are any IPI flags to be handled.
405 */ 390 */
@@ -410,12 +395,12 @@ xpc_get_IPI_flags(struct xpc_partition *part)
410 } 395 }
411 spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 396 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
412 397
413
414 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 398 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
415 ret = xpc_pull_remote_cachelines(part, 399 ret = xpc_pull_remote_cachelines(part,
416 part->remote_openclose_args, 400 part->remote_openclose_args,
417 (void *) part->remote_openclose_args_pa, 401 (void *)part->
418 XPC_OPENCLOSE_ARGS_SIZE); 402 remote_openclose_args_pa,
403 XPC_OPENCLOSE_ARGS_SIZE);
419 if (ret != xpcSuccess) { 404 if (ret != xpcSuccess) {
420 XPC_DEACTIVATE_PARTITION(part, ret); 405 XPC_DEACTIVATE_PARTITION(part, ret);
421 406
@@ -430,8 +415,8 @@ xpc_get_IPI_flags(struct xpc_partition *part)
430 415
431 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { 416 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
432 ret = xpc_pull_remote_cachelines(part, part->remote_GPs, 417 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
433 (void *) part->remote_GPs_pa, 418 (void *)part->remote_GPs_pa,
434 XPC_GP_SIZE); 419 XPC_GP_SIZE);
435 if (ret != xpcSuccess) { 420 if (ret != xpcSuccess) {
436 XPC_DEACTIVATE_PARTITION(part, ret); 421 XPC_DEACTIVATE_PARTITION(part, ret);
437 422
@@ -446,7 +431,6 @@ xpc_get_IPI_flags(struct xpc_partition *part)
446 return IPI_amo; 431 return IPI_amo;
447} 432}
448 433
449
450/* 434/*
451 * Allocate the local message queue and the notify queue. 435 * Allocate the local message queue and the notify queue.
452 */ 436 */
@@ -457,7 +441,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
457 int nentries; 441 int nentries;
458 size_t nbytes; 442 size_t nbytes;
459 443
460
461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 444 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
462 // >>> iterations of the for-loop, bail if set? 445 // >>> iterations of the for-loop, bail if set?
463 446
@@ -466,8 +449,9 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
466 449
467 nbytes = nentries * ch->msg_size; 450 nbytes = nentries * ch->msg_size;
468 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 451 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
469 GFP_KERNEL, 452 GFP_KERNEL,
470 &ch->local_msgqueue_base); 453 &ch->
454 local_msgqueue_base);
471 if (ch->local_msgqueue == NULL) { 455 if (ch->local_msgqueue == NULL) {
472 continue; 456 continue;
473 } 457 }
@@ -497,7 +481,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
497 return xpcNoMemory; 481 return xpcNoMemory;
498} 482}
499 483
500
501/* 484/*
502 * Allocate the cached remote message queue. 485 * Allocate the cached remote message queue.
503 */ 486 */
@@ -508,7 +491,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
508 int nentries; 491 int nentries;
509 size_t nbytes; 492 size_t nbytes;
510 493
511
512 DBUG_ON(ch->remote_nentries <= 0); 494 DBUG_ON(ch->remote_nentries <= 0);
513 495
514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between 496 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
@@ -519,8 +501,9 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
519 501
520 nbytes = nentries * ch->msg_size; 502 nbytes = nentries * ch->msg_size;
521 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 503 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
522 GFP_KERNEL, 504 GFP_KERNEL,
523 &ch->remote_msgqueue_base); 505 &ch->
506 remote_msgqueue_base);
524 if (ch->remote_msgqueue == NULL) { 507 if (ch->remote_msgqueue == NULL) {
525 continue; 508 continue;
526 } 509 }
@@ -542,7 +525,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
542 return xpcNoMemory; 525 return xpcNoMemory;
543} 526}
544 527
545
546/* 528/*
547 * Allocate message queues and other stuff associated with a channel. 529 * Allocate message queues and other stuff associated with a channel.
548 * 530 *
@@ -554,7 +536,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
554 unsigned long irq_flags; 536 unsigned long irq_flags;
555 enum xpc_retval ret; 537 enum xpc_retval ret;
556 538
557
558 DBUG_ON(ch->flags & XPC_C_SETUP); 539 DBUG_ON(ch->flags & XPC_C_SETUP);
559 540
560 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { 541 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
@@ -576,7 +557,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
576 return xpcSuccess; 557 return xpcSuccess;
577} 558}
578 559
579
580/* 560/*
581 * Process a connect message from a remote partition. 561 * Process a connect message from a remote partition.
582 * 562 *
@@ -588,11 +568,10 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
588{ 568{
589 enum xpc_retval ret; 569 enum xpc_retval ret;
590 570
591
592 DBUG_ON(!spin_is_locked(&ch->lock)); 571 DBUG_ON(!spin_is_locked(&ch->lock));
593 572
594 if (!(ch->flags & XPC_C_OPENREQUEST) || 573 if (!(ch->flags & XPC_C_OPENREQUEST) ||
595 !(ch->flags & XPC_C_ROPENREQUEST)) { 574 !(ch->flags & XPC_C_ROPENREQUEST)) {
596 /* nothing more to do for now */ 575 /* nothing more to do for now */
597 return; 576 return;
598 } 577 }
@@ -629,14 +608,13 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
629 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 608 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
630 609
631 dev_info(xpc_chan, "channel %d to partition %d connected\n", 610 dev_info(xpc_chan, "channel %d to partition %d connected\n",
632 ch->number, ch->partid); 611 ch->number, ch->partid);
633 612
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 613 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1, 0); 614 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 615 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 616}
638 617
639
640/* 618/*
641 * Notify those who wanted to be notified upon delivery of their message. 619 * Notify those who wanted to be notified upon delivery of their message.
642 */ 620 */
@@ -647,7 +625,6 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
647 u8 notify_type; 625 u8 notify_type;
648 s64 get = ch->w_remote_GP.get - 1; 626 s64 get = ch->w_remote_GP.get - 1;
649 627
650
651 while (++get < put && atomic_read(&ch->n_to_notify) > 0) { 628 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
652 629
653 notify = &ch->notify_queue[get % ch->local_nentries]; 630 notify = &ch->notify_queue[get % ch->local_nentries];
@@ -660,8 +637,7 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
660 */ 637 */
661 notify_type = notify->type; 638 notify_type = notify->type;
662 if (notify_type == 0 || 639 if (notify_type == 0 ||
663 cmpxchg(&notify->type, notify_type, 0) != 640 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
664 notify_type) {
665 continue; 641 continue;
666 } 642 }
667 643
@@ -672,20 +648,19 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
672 if (notify->func != NULL) { 648 if (notify->func != NULL) {
673 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " 649 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
674 "msg_number=%ld, partid=%d, channel=%d\n", 650 "msg_number=%ld, partid=%d, channel=%d\n",
675 (void *) notify, get, ch->partid, ch->number); 651 (void *)notify, get, ch->partid, ch->number);
676 652
677 notify->func(reason, ch->partid, ch->number, 653 notify->func(reason, ch->partid, ch->number,
678 notify->key); 654 notify->key);
679 655
680 dev_dbg(xpc_chan, "notify->func() returned, " 656 dev_dbg(xpc_chan, "notify->func() returned, "
681 "notify=0x%p, msg_number=%ld, partid=%d, " 657 "notify=0x%p, msg_number=%ld, partid=%d, "
682 "channel=%d\n", (void *) notify, get, 658 "channel=%d\n", (void *)notify, get,
683 ch->partid, ch->number); 659 ch->partid, ch->number);
684 } 660 }
685 } 661 }
686} 662}
687 663
688
689/* 664/*
690 * Free up message queues and other stuff that were allocated for the specified 665 * Free up message queues and other stuff that were allocated for the specified
691 * channel. 666 * channel.
@@ -733,7 +708,6 @@ xpc_free_msgqueues(struct xpc_channel *ch)
733 } 708 }
734} 709}
735 710
736
737/* 711/*
738 * spin_lock_irqsave() is expected to be held on entry. 712 * spin_lock_irqsave() is expected to be held on entry.
739 */ 713 */
@@ -743,7 +717,6 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
743 struct xpc_partition *part = &xpc_partitions[ch->partid]; 717 struct xpc_partition *part = &xpc_partitions[ch->partid];
744 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); 718 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
745 719
746
747 DBUG_ON(!spin_is_locked(&ch->lock)); 720 DBUG_ON(!spin_is_locked(&ch->lock));
748 721
749 if (!(ch->flags & XPC_C_DISCONNECTING)) { 722 if (!(ch->flags & XPC_C_DISCONNECTING)) {
@@ -755,11 +728,11 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
755 /* make sure all activity has settled down first */ 728 /* make sure all activity has settled down first */
756 729
757 if (atomic_read(&ch->kthreads_assigned) > 0 || 730 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 atomic_read(&ch->references) > 0) { 731 atomic_read(&ch->references) > 0) {
759 return; 732 return;
760 } 733 }
761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 734 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 735 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 736
764 if (part->act_state == XPC_P_DEACTIVATING) { 737 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 738 /* can't proceed until the other side disengages from us */
@@ -809,7 +782,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
809 782
810 if (channel_was_connected) { 783 if (channel_was_connected) {
811 dev_info(xpc_chan, "channel %d to partition %d disconnected, " 784 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
812 "reason=%d\n", ch->number, ch->partid, ch->reason); 785 "reason=%d\n", ch->number, ch->partid, ch->reason);
813 } 786 }
814 787
815 if (ch->flags & XPC_C_WDISCONNECT) { 788 if (ch->flags & XPC_C_WDISCONNECT) {
@@ -820,35 +793,31 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
820 /* time to take action on any delayed IPI flags */ 793 /* time to take action on any delayed IPI flags */
821 spin_lock(&part->IPI_lock); 794 spin_lock(&part->IPI_lock);
822 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, 795 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
823 ch->delayed_IPI_flags); 796 ch->delayed_IPI_flags);
824 spin_unlock(&part->IPI_lock); 797 spin_unlock(&part->IPI_lock);
825 } 798 }
826 ch->delayed_IPI_flags = 0; 799 ch->delayed_IPI_flags = 0;
827 } 800 }
828} 801}
829 802
830
831/* 803/*
832 * Process a change in the channel's remote connection state. 804 * Process a change in the channel's remote connection state.
833 */ 805 */
834static void 806static void
835xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, 807xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
836 u8 IPI_flags) 808 u8 IPI_flags)
837{ 809{
838 unsigned long irq_flags; 810 unsigned long irq_flags;
839 struct xpc_openclose_args *args = 811 struct xpc_openclose_args *args =
840 &part->remote_openclose_args[ch_number]; 812 &part->remote_openclose_args[ch_number];
841 struct xpc_channel *ch = &part->channels[ch_number]; 813 struct xpc_channel *ch = &part->channels[ch_number];
842 enum xpc_retval reason; 814 enum xpc_retval reason;
843 815
844
845
846 spin_lock_irqsave(&ch->lock, irq_flags); 816 spin_lock_irqsave(&ch->lock, irq_flags);
847 817
848again: 818 again:
849 819
850 if ((ch->flags & XPC_C_DISCONNECTED) && 820 if ((ch->flags & XPC_C_DISCONNECTED) && (ch->flags & XPC_C_WDISCONNECT)) {
851 (ch->flags & XPC_C_WDISCONNECT)) {
852 /* 821 /*
853 * Delay processing IPI flags until thread waiting disconnect 822 * Delay processing IPI flags until thread waiting disconnect
854 * has had a chance to see that the channel is disconnected. 823 * has had a chance to see that the channel is disconnected.
@@ -858,7 +827,6 @@ again:
858 return; 827 return;
859 } 828 }
860 829
861
862 if (IPI_flags & XPC_IPI_CLOSEREQUEST) { 830 if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
863 831
864 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " 832 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
@@ -890,13 +858,14 @@ again:
890 if (ch->flags & XPC_C_DISCONNECTED) { 858 if (ch->flags & XPC_C_DISCONNECTED) {
891 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { 859 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
892 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, 860 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
893 ch_number) & XPC_IPI_OPENREQUEST)) { 861 ch_number) &
862 XPC_IPI_OPENREQUEST)) {
894 863
895 DBUG_ON(ch->delayed_IPI_flags != 0); 864 DBUG_ON(ch->delayed_IPI_flags != 0);
896 spin_lock(&part->IPI_lock); 865 spin_lock(&part->IPI_lock);
897 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 866 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
898 ch_number, 867 ch_number,
899 XPC_IPI_CLOSEREQUEST); 868 XPC_IPI_CLOSEREQUEST);
900 spin_unlock(&part->IPI_lock); 869 spin_unlock(&part->IPI_lock);
901 } 870 }
902 spin_unlock_irqrestore(&ch->lock, irq_flags); 871 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -937,7 +906,6 @@ again:
937 xpc_process_disconnect(ch, &irq_flags); 906 xpc_process_disconnect(ch, &irq_flags);
938 } 907 }
939 908
940
941 if (IPI_flags & XPC_IPI_CLOSEREPLY) { 909 if (IPI_flags & XPC_IPI_CLOSEREPLY) {
942 910
943 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," 911 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
@@ -953,12 +921,13 @@ again:
953 921
954 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 922 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
955 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) 923 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
956 & XPC_IPI_CLOSEREQUEST)) { 924 & XPC_IPI_CLOSEREQUEST)) {
957 925
958 DBUG_ON(ch->delayed_IPI_flags != 0); 926 DBUG_ON(ch->delayed_IPI_flags != 0);
959 spin_lock(&part->IPI_lock); 927 spin_lock(&part->IPI_lock);
960 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 928 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
961 ch_number, XPC_IPI_CLOSEREPLY); 929 ch_number,
930 XPC_IPI_CLOSEREPLY);
962 spin_unlock(&part->IPI_lock); 931 spin_unlock(&part->IPI_lock);
963 } 932 }
964 spin_unlock_irqrestore(&ch->lock, irq_flags); 933 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -973,7 +942,6 @@ again:
973 } 942 }
974 } 943 }
975 944
976
977 if (IPI_flags & XPC_IPI_OPENREQUEST) { 945 if (IPI_flags & XPC_IPI_OPENREQUEST) {
978 946
979 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " 947 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
@@ -982,7 +950,7 @@ again:
982 ch->partid, ch->number); 950 ch->partid, ch->number);
983 951
984 if (part->act_state == XPC_P_DEACTIVATING || 952 if (part->act_state == XPC_P_DEACTIVATING ||
985 (ch->flags & XPC_C_ROPENREQUEST)) { 953 (ch->flags & XPC_C_ROPENREQUEST)) {
986 spin_unlock_irqrestore(&ch->lock, irq_flags); 954 spin_unlock_irqrestore(&ch->lock, irq_flags);
987 return; 955 return;
988 } 956 }
@@ -993,9 +961,9 @@ again:
993 return; 961 return;
994 } 962 }
995 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | 963 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
996 XPC_C_OPENREQUEST))); 964 XPC_C_OPENREQUEST)));
997 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 965 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
998 XPC_C_OPENREPLY | XPC_C_CONNECTED)); 966 XPC_C_OPENREPLY | XPC_C_CONNECTED));
999 967
1000 /* 968 /*
1001 * The meaningful OPENREQUEST connection state fields are: 969 * The meaningful OPENREQUEST connection state fields are:
@@ -1011,11 +979,10 @@ again:
1011 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); 979 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
1012 ch->remote_nentries = args->local_nentries; 980 ch->remote_nentries = args->local_nentries;
1013 981
1014
1015 if (ch->flags & XPC_C_OPENREQUEST) { 982 if (ch->flags & XPC_C_OPENREQUEST) {
1016 if (args->msg_size != ch->msg_size) { 983 if (args->msg_size != ch->msg_size) {
1017 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 984 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1018 &irq_flags); 985 &irq_flags);
1019 spin_unlock_irqrestore(&ch->lock, irq_flags); 986 spin_unlock_irqrestore(&ch->lock, irq_flags);
1020 return; 987 return;
1021 } 988 }
@@ -1031,7 +998,6 @@ again:
1031 xpc_process_connect(ch, &irq_flags); 998 xpc_process_connect(ch, &irq_flags);
1032 } 999 }
1033 1000
1034
1035 if (IPI_flags & XPC_IPI_OPENREPLY) { 1001 if (IPI_flags & XPC_IPI_OPENREPLY) {
1036 1002
1037 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " 1003 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
@@ -1046,7 +1012,7 @@ again:
1046 } 1012 }
1047 if (!(ch->flags & XPC_C_OPENREQUEST)) { 1013 if (!(ch->flags & XPC_C_OPENREQUEST)) {
1048 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, 1014 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
1049 &irq_flags); 1015 &irq_flags);
1050 spin_unlock_irqrestore(&ch->lock, irq_flags); 1016 spin_unlock_irqrestore(&ch->lock, irq_flags);
1051 return; 1017 return;
1052 } 1018 }
@@ -1057,7 +1023,7 @@ again:
1057 /* 1023 /*
1058 * The meaningful OPENREPLY connection state fields are: 1024 * The meaningful OPENREPLY connection state fields are:
1059 * local_msgqueue_pa = physical address of remote 1025 * local_msgqueue_pa = physical address of remote
1060 * partition's local_msgqueue 1026 * partition's local_msgqueue
1061 * local_nentries = remote partition's local_nentries 1027 * local_nentries = remote partition's local_nentries
1062 * remote_nentries = remote partition's remote_nentries 1028 * remote_nentries = remote partition's remote_nentries
1063 */ 1029 */
@@ -1093,7 +1059,6 @@ again:
1093 spin_unlock_irqrestore(&ch->lock, irq_flags); 1059 spin_unlock_irqrestore(&ch->lock, irq_flags);
1094} 1060}
1095 1061
1096
1097/* 1062/*
1098 * Attempt to establish a channel connection to a remote partition. 1063 * Attempt to establish a channel connection to a remote partition.
1099 */ 1064 */
@@ -1103,7 +1068,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1103 unsigned long irq_flags; 1068 unsigned long irq_flags;
1104 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1069 struct xpc_registration *registration = &xpc_registrations[ch->number];
1105 1070
1106
1107 if (mutex_trylock(&registration->mutex) == 0) { 1071 if (mutex_trylock(&registration->mutex) == 0) {
1108 return xpcRetry; 1072 return xpcRetry;
1109 } 1073 }
@@ -1124,7 +1088,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1124 return ch->reason; 1088 return ch->reason;
1125 } 1089 }
1126 1090
1127
1128 /* add info from the channel connect registration to the channel */ 1091 /* add info from the channel connect registration to the channel */
1129 1092
1130 ch->kthreads_assigned_limit = registration->assigned_limit; 1093 ch->kthreads_assigned_limit = registration->assigned_limit;
@@ -1154,7 +1117,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1154 */ 1117 */
1155 mutex_unlock(&registration->mutex); 1118 mutex_unlock(&registration->mutex);
1156 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1119 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1157 &irq_flags); 1120 &irq_flags);
1158 spin_unlock_irqrestore(&ch->lock, irq_flags); 1121 spin_unlock_irqrestore(&ch->lock, irq_flags);
1159 return xpcUnequalMsgSizes; 1122 return xpcUnequalMsgSizes;
1160 } 1123 }
@@ -1169,7 +1132,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1169 1132
1170 mutex_unlock(&registration->mutex); 1133 mutex_unlock(&registration->mutex);
1171 1134
1172
1173 /* initiate the connection */ 1135 /* initiate the connection */
1174 1136
1175 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 1137 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
@@ -1182,7 +1144,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1182 return xpcSuccess; 1144 return xpcSuccess;
1183} 1145}
1184 1146
1185
1186/* 1147/*
1187 * Clear some of the msg flags in the local message queue. 1148 * Clear some of the msg flags in the local message queue.
1188 */ 1149 */
@@ -1192,16 +1153,15 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1192 struct xpc_msg *msg; 1153 struct xpc_msg *msg;
1193 s64 get; 1154 s64 get;
1194 1155
1195
1196 get = ch->w_remote_GP.get; 1156 get = ch->w_remote_GP.get;
1197 do { 1157 do {
1198 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1158 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1199 (get % ch->local_nentries) * ch->msg_size); 1159 (get % ch->local_nentries) *
1160 ch->msg_size);
1200 msg->flags = 0; 1161 msg->flags = 0;
1201 } while (++get < (volatile s64) ch->remote_GP.get); 1162 } while (++get < (volatile s64)ch->remote_GP.get);
1202} 1163}
1203 1164
1204
1205/* 1165/*
1206 * Clear some of the msg flags in the remote message queue. 1166 * Clear some of the msg flags in the remote message queue.
1207 */ 1167 */
@@ -1211,43 +1171,39 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1211 struct xpc_msg *msg; 1171 struct xpc_msg *msg;
1212 s64 put; 1172 s64 put;
1213 1173
1214
1215 put = ch->w_remote_GP.put; 1174 put = ch->w_remote_GP.put;
1216 do { 1175 do {
1217 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 1176 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1218 (put % ch->remote_nentries) * ch->msg_size); 1177 (put % ch->remote_nentries) *
1178 ch->msg_size);
1219 msg->flags = 0; 1179 msg->flags = 0;
1220 } while (++put < (volatile s64) ch->remote_GP.put); 1180 } while (++put < (volatile s64)ch->remote_GP.put);
1221} 1181}
1222 1182
1223
1224static void 1183static void
1225xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) 1184xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1226{ 1185{
1227 struct xpc_channel *ch = &part->channels[ch_number]; 1186 struct xpc_channel *ch = &part->channels[ch_number];
1228 int nmsgs_sent; 1187 int nmsgs_sent;
1229 1188
1230
1231 ch->remote_GP = part->remote_GPs[ch_number]; 1189 ch->remote_GP = part->remote_GPs[ch_number];
1232 1190
1233
1234 /* See what, if anything, has changed for each connected channel */ 1191 /* See what, if anything, has changed for each connected channel */
1235 1192
1236 xpc_msgqueue_ref(ch); 1193 xpc_msgqueue_ref(ch);
1237 1194
1238 if (ch->w_remote_GP.get == ch->remote_GP.get && 1195 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1239 ch->w_remote_GP.put == ch->remote_GP.put) { 1196 ch->w_remote_GP.put == ch->remote_GP.put) {
1240 /* nothing changed since GPs were last pulled */ 1197 /* nothing changed since GPs were last pulled */
1241 xpc_msgqueue_deref(ch); 1198 xpc_msgqueue_deref(ch);
1242 return; 1199 return;
1243 } 1200 }
1244 1201
1245 if (!(ch->flags & XPC_C_CONNECTED)){ 1202 if (!(ch->flags & XPC_C_CONNECTED)) {
1246 xpc_msgqueue_deref(ch); 1203 xpc_msgqueue_deref(ch);
1247 return; 1204 return;
1248 } 1205 }
1249 1206
1250
1251 /* 1207 /*
1252 * First check to see if messages recently sent by us have been 1208 * First check to see if messages recently sent by us have been
1253 * received by the other side. (The remote GET value will have 1209 * received by the other side. (The remote GET value will have
@@ -1269,7 +1225,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1269 * received and delivered by the other side. 1225 * received and delivered by the other side.
1270 */ 1226 */
1271 xpc_notify_senders(ch, xpcMsgDelivered, 1227 xpc_notify_senders(ch, xpcMsgDelivered,
1272 ch->remote_GP.get); 1228 ch->remote_GP.get);
1273 } 1229 }
1274 1230
1275 /* 1231 /*
@@ -1293,7 +1249,6 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1293 } 1249 }
1294 } 1250 }
1295 1251
1296
1297 /* 1252 /*
1298 * Now check for newly sent messages by the other side. (The remote 1253 * Now check for newly sent messages by the other side. (The remote
1299 * PUT value will have changed since we last looked at it.) 1254 * PUT value will have changed since we last looked at it.)
@@ -1327,7 +1282,6 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1327 xpc_msgqueue_deref(ch); 1282 xpc_msgqueue_deref(ch);
1328} 1283}
1329 1284
1330
1331void 1285void
1332xpc_process_channel_activity(struct xpc_partition *part) 1286xpc_process_channel_activity(struct xpc_partition *part)
1333{ 1287{
@@ -1337,7 +1291,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1337 int ch_number; 1291 int ch_number;
1338 u32 ch_flags; 1292 u32 ch_flags;
1339 1293
1340
1341 IPI_amo = xpc_get_IPI_flags(part); 1294 IPI_amo = xpc_get_IPI_flags(part);
1342 1295
1343 /* 1296 /*
@@ -1350,7 +1303,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1350 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1303 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1351 ch = &part->channels[ch_number]; 1304 ch = &part->channels[ch_number];
1352 1305
1353
1354 /* 1306 /*
1355 * Process any open or close related IPI flags, and then deal 1307 * Process any open or close related IPI flags, and then deal
1356 * with connecting or disconnecting the channel as required. 1308 * with connecting or disconnecting the channel as required.
@@ -1378,7 +1330,7 @@ xpc_process_channel_activity(struct xpc_partition *part)
1378 if (!(ch_flags & XPC_C_CONNECTED)) { 1330 if (!(ch_flags & XPC_C_CONNECTED)) {
1379 if (!(ch_flags & XPC_C_OPENREQUEST)) { 1331 if (!(ch_flags & XPC_C_OPENREQUEST)) {
1380 DBUG_ON(ch_flags & XPC_C_SETUP); 1332 DBUG_ON(ch_flags & XPC_C_SETUP);
1381 (void) xpc_connect_channel(ch); 1333 (void)xpc_connect_channel(ch);
1382 } else { 1334 } else {
1383 spin_lock_irqsave(&ch->lock, irq_flags); 1335 spin_lock_irqsave(&ch->lock, irq_flags);
1384 xpc_process_connect(ch, &irq_flags); 1336 xpc_process_connect(ch, &irq_flags);
@@ -1387,7 +1339,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1387 continue; 1339 continue;
1388 } 1340 }
1389 1341
1390
1391 /* 1342 /*
1392 * Process any message related IPI flags, this may involve the 1343 * Process any message related IPI flags, this may involve the
1393 * activation of kthreads to deliver any pending messages sent 1344 * activation of kthreads to deliver any pending messages sent
@@ -1400,7 +1351,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1400 } 1351 }
1401} 1352}
1402 1353
1403
1404/* 1354/*
1405 * XPC's heartbeat code calls this function to inform XPC that a partition is 1355 * XPC's heartbeat code calls this function to inform XPC that a partition is
1406 * going down. XPC responds by tearing down the XPartition Communication 1356 * going down. XPC responds by tearing down the XPartition Communication
@@ -1417,7 +1367,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1417 int ch_number; 1367 int ch_number;
1418 struct xpc_channel *ch; 1368 struct xpc_channel *ch;
1419 1369
1420
1421 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", 1370 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
1422 XPC_PARTID(part), reason); 1371 XPC_PARTID(part), reason);
1423 1372
@@ -1426,7 +1375,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1426 return; 1375 return;
1427 } 1376 }
1428 1377
1429
1430 /* disconnect channels associated with the partition going down */ 1378 /* disconnect channels associated with the partition going down */
1431 1379
1432 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1380 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
@@ -1446,7 +1394,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1446 xpc_part_deref(part); 1394 xpc_part_deref(part);
1447} 1395}
1448 1396
1449
1450/* 1397/*
1451 * Teardown the infrastructure necessary to support XPartition Communication 1398 * Teardown the infrastructure necessary to support XPartition Communication
1452 * between the specified remote partition and the local one. 1399 * between the specified remote partition and the local one.
@@ -1456,7 +1403,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1456{ 1403{
1457 partid_t partid = XPC_PARTID(part); 1404 partid_t partid = XPC_PARTID(part);
1458 1405
1459
1460 /* 1406 /*
1461 * We start off by making this partition inaccessible to local 1407 * We start off by making this partition inaccessible to local
1462 * processes by marking it as no longer setup. Then we make it 1408 * processes by marking it as no longer setup. Then we make it
@@ -1473,9 +1419,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1473 1419
1474 xpc_vars_part[partid].magic = 0; 1420 xpc_vars_part[partid].magic = 0;
1475 1421
1476 1422 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1477 free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
1478
1479 1423
1480 /* 1424 /*
1481 * Before proceeding with the teardown we have to wait until all 1425 * Before proceeding with the teardown we have to wait until all
@@ -1483,7 +1427,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1483 */ 1427 */
1484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); 1428 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1485 1429
1486
1487 /* now we can begin tearing down the infrastructure */ 1430 /* now we can begin tearing down the infrastructure */
1488 1431
1489 part->setup_state = XPC_P_TORNDOWN; 1432 part->setup_state = XPC_P_TORNDOWN;
@@ -1504,7 +1447,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1504 part->local_IPI_amo_va = NULL; 1447 part->local_IPI_amo_va = NULL;
1505} 1448}
1506 1449
1507
1508/* 1450/*
1509 * Called by XP at the time of channel connection registration to cause 1451 * Called by XP at the time of channel connection registration to cause
1510 * XPC to establish connections to all currently active partitions. 1452 * XPC to establish connections to all currently active partitions.
@@ -1516,7 +1458,6 @@ xpc_initiate_connect(int ch_number)
1516 struct xpc_partition *part; 1458 struct xpc_partition *part;
1517 struct xpc_channel *ch; 1459 struct xpc_channel *ch;
1518 1460
1519
1520 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1461 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1521 1462
1522 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1463 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
@@ -1535,7 +1476,6 @@ xpc_initiate_connect(int ch_number)
1535 } 1476 }
1536} 1477}
1537 1478
1538
1539void 1479void
1540xpc_connected_callout(struct xpc_channel *ch) 1480xpc_connected_callout(struct xpc_channel *ch)
1541{ 1481{
@@ -1546,14 +1486,13 @@ xpc_connected_callout(struct xpc_channel *ch)
1546 "partid=%d, channel=%d\n", ch->partid, ch->number); 1486 "partid=%d, channel=%d\n", ch->partid, ch->number);
1547 1487
1548 ch->func(xpcConnected, ch->partid, ch->number, 1488 ch->func(xpcConnected, ch->partid, ch->number,
1549 (void *) (u64) ch->local_nentries, ch->key); 1489 (void *)(u64)ch->local_nentries, ch->key);
1550 1490
1551 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " 1491 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
1552 "partid=%d, channel=%d\n", ch->partid, ch->number); 1492 "partid=%d, channel=%d\n", ch->partid, ch->number);
1553 } 1493 }
1554} 1494}
1555 1495
1556
1557/* 1496/*
1558 * Called by XP at the time of channel connection unregistration to cause 1497 * Called by XP at the time of channel connection unregistration to cause
1559 * XPC to teardown all current connections for the specified channel. 1498 * XPC to teardown all current connections for the specified channel.
@@ -1575,7 +1514,6 @@ xpc_initiate_disconnect(int ch_number)
1575 struct xpc_partition *part; 1514 struct xpc_partition *part;
1576 struct xpc_channel *ch; 1515 struct xpc_channel *ch;
1577 1516
1578
1579 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1517 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1580 1518
1581 /* initiate the channel disconnect for every active partition */ 1519 /* initiate the channel disconnect for every active partition */
@@ -1592,7 +1530,7 @@ xpc_initiate_disconnect(int ch_number)
1592 ch->flags |= XPC_C_WDISCONNECT; 1530 ch->flags |= XPC_C_WDISCONNECT;
1593 1531
1594 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, 1532 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
1595 &irq_flags); 1533 &irq_flags);
1596 } 1534 }
1597 1535
1598 spin_unlock_irqrestore(&ch->lock, irq_flags); 1536 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1605,7 +1543,6 @@ xpc_initiate_disconnect(int ch_number)
1605 xpc_disconnect_wait(ch_number); 1543 xpc_disconnect_wait(ch_number);
1606} 1544}
1607 1545
1608
1609/* 1546/*
1610 * To disconnect a channel, and reflect it back to all who may be waiting. 1547 * To disconnect a channel, and reflect it back to all who may be waiting.
1611 * 1548 *
@@ -1617,11 +1554,10 @@ xpc_initiate_disconnect(int ch_number)
1617 */ 1554 */
1618void 1555void
1619xpc_disconnect_channel(const int line, struct xpc_channel *ch, 1556xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1620 enum xpc_retval reason, unsigned long *irq_flags) 1557 enum xpc_retval reason, unsigned long *irq_flags)
1621{ 1558{
1622 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 1559 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
1623 1560
1624
1625 DBUG_ON(!spin_is_locked(&ch->lock)); 1561 DBUG_ON(!spin_is_locked(&ch->lock));
1626 1562
1627 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 1563 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
@@ -1637,8 +1573,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1637 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); 1573 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
1638 /* some of these may not have been set */ 1574 /* some of these may not have been set */
1639 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | 1575 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1640 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 1576 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1641 XPC_C_CONNECTING | XPC_C_CONNECTED); 1577 XPC_C_CONNECTING | XPC_C_CONNECTED);
1642 1578
1643 xpc_IPI_send_closerequest(ch, irq_flags); 1579 xpc_IPI_send_closerequest(ch, irq_flags);
1644 1580
@@ -1653,7 +1589,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1653 wake_up_all(&ch->idle_wq); 1589 wake_up_all(&ch->idle_wq);
1654 1590
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 1591 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 1592 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */ 1593 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1); 1594 xpc_create_kthreads(ch, 1, 1);
1659 } 1595 }
@@ -1666,7 +1602,6 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1666 spin_lock_irqsave(&ch->lock, *irq_flags); 1602 spin_lock_irqsave(&ch->lock, *irq_flags);
1667} 1603}
1668 1604
1669
1670void 1605void
1671xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) 1606xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1672{ 1607{
@@ -1687,7 +1622,6 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1687 } 1622 }
1688} 1623}
1689 1624
1690
1691/* 1625/*
1692 * Wait for a message entry to become available for the specified channel, 1626 * Wait for a message entry to become available for the specified channel,
1693 * but don't wait any longer than 1 jiffy. 1627 * but don't wait any longer than 1 jiffy.
@@ -1697,9 +1631,8 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1697{ 1631{
1698 enum xpc_retval ret; 1632 enum xpc_retval ret;
1699 1633
1700
1701 if (ch->flags & XPC_C_DISCONNECTING) { 1634 if (ch->flags & XPC_C_DISCONNECTING) {
1702 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1635 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1703 return ch->reason; 1636 return ch->reason;
1704 } 1637 }
1705 1638
@@ -1709,7 +1642,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1709 1642
1710 if (ch->flags & XPC_C_DISCONNECTING) { 1643 if (ch->flags & XPC_C_DISCONNECTING) {
1711 ret = ch->reason; 1644 ret = ch->reason;
1712 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1645 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1713 } else if (ret == 0) { 1646 } else if (ret == 0) {
1714 ret = xpcTimeout; 1647 ret = xpcTimeout;
1715 } else { 1648 } else {
@@ -1719,20 +1652,18 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1719 return ret; 1652 return ret;
1720} 1653}
1721 1654
1722
1723/* 1655/*
1724 * Allocate an entry for a message from the message queue associated with the 1656 * Allocate an entry for a message from the message queue associated with the
1725 * specified channel. 1657 * specified channel.
1726 */ 1658 */
1727static enum xpc_retval 1659static enum xpc_retval
1728xpc_allocate_msg(struct xpc_channel *ch, u32 flags, 1660xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1729 struct xpc_msg **address_of_msg) 1661 struct xpc_msg **address_of_msg)
1730{ 1662{
1731 struct xpc_msg *msg; 1663 struct xpc_msg *msg;
1732 enum xpc_retval ret; 1664 enum xpc_retval ret;
1733 s64 put; 1665 s64 put;
1734 1666
1735
1736 /* this reference will be dropped in xpc_send_msg() */ 1667 /* this reference will be dropped in xpc_send_msg() */
1737 xpc_msgqueue_ref(ch); 1668 xpc_msgqueue_ref(ch);
1738 1669
@@ -1745,7 +1676,6 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1745 return xpcNotConnected; 1676 return xpcNotConnected;
1746 } 1677 }
1747 1678
1748
1749 /* 1679 /*
1750 * Get the next available message entry from the local message queue. 1680 * Get the next available message entry from the local message queue.
1751 * If none are available, we'll make sure that we grab the latest 1681 * If none are available, we'll make sure that we grab the latest
@@ -1755,25 +1685,23 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1755 1685
1756 while (1) { 1686 while (1) {
1757 1687
1758 put = (volatile s64) ch->w_local_GP.put; 1688 put = (volatile s64)ch->w_local_GP.put;
1759 if (put - (volatile s64) ch->w_remote_GP.get < 1689 if (put - (volatile s64)ch->w_remote_GP.get <
1760 ch->local_nentries) { 1690 ch->local_nentries) {
1761 1691
1762 /* There are available message entries. We need to try 1692 /* There are available message entries. We need to try
1763 * to secure one for ourselves. We'll do this by trying 1693 * to secure one for ourselves. We'll do this by trying
1764 * to increment w_local_GP.put as long as someone else 1694 * to increment w_local_GP.put as long as someone else
1765 * doesn't beat us to it. If they do, we'll have to 1695 * doesn't beat us to it. If they do, we'll have to
1766 * try again. 1696 * try again.
1767 */ 1697 */
1768 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == 1698 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
1769 put) {
1770 /* we got the entry referenced by put */ 1699 /* we got the entry referenced by put */
1771 break; 1700 break;
1772 } 1701 }
1773 continue; /* try again */ 1702 continue; /* try again */
1774 } 1703 }
1775 1704
1776
1777 /* 1705 /*
1778 * There aren't any available msg entries at this time. 1706 * There aren't any available msg entries at this time.
1779 * 1707 *
@@ -1799,25 +1727,22 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1799 } 1727 }
1800 } 1728 }
1801 1729
1802
1803 /* get the message's address and initialize it */ 1730 /* get the message's address and initialize it */
1804 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1731 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1805 (put % ch->local_nentries) * ch->msg_size); 1732 (put % ch->local_nentries) * ch->msg_size);
1806
1807 1733
1808 DBUG_ON(msg->flags != 0); 1734 DBUG_ON(msg->flags != 0);
1809 msg->number = put; 1735 msg->number = put;
1810 1736
1811 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " 1737 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1812 "msg_number=%ld, partid=%d, channel=%d\n", put + 1, 1738 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1813 (void *) msg, msg->number, ch->partid, ch->number); 1739 (void *)msg, msg->number, ch->partid, ch->number);
1814 1740
1815 *address_of_msg = msg; 1741 *address_of_msg = msg;
1816 1742
1817 return xpcSuccess; 1743 return xpcSuccess;
1818} 1744}
1819 1745
1820
1821/* 1746/*
1822 * Allocate an entry for a message from the message queue associated with the 1747 * Allocate an entry for a message from the message queue associated with the
1823 * specified channel. NOTE that this routine can sleep waiting for a message 1748 * specified channel. NOTE that this routine can sleep waiting for a message
@@ -1838,7 +1763,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1838 enum xpc_retval ret = xpcUnknownReason; 1763 enum xpc_retval ret = xpcUnknownReason;
1839 struct xpc_msg *msg = NULL; 1764 struct xpc_msg *msg = NULL;
1840 1765
1841
1842 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1766 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1843 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 1767 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1844 1768
@@ -1856,7 +1780,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1856 return ret; 1780 return ret;
1857} 1781}
1858 1782
1859
1860/* 1783/*
1861 * Now we actually send the messages that are ready to be sent by advancing 1784 * Now we actually send the messages that are ready to be sent by advancing
1862 * the local message queue's Put value and then send an IPI to the recipient 1785 * the local message queue's Put value and then send an IPI to the recipient
@@ -1869,16 +1792,16 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1869 s64 put = initial_put + 1; 1792 s64 put = initial_put + 1;
1870 int send_IPI = 0; 1793 int send_IPI = 0;
1871 1794
1872
1873 while (1) { 1795 while (1) {
1874 1796
1875 while (1) { 1797 while (1) {
1876 if (put == (volatile s64) ch->w_local_GP.put) { 1798 if (put == (volatile s64)ch->w_local_GP.put) {
1877 break; 1799 break;
1878 } 1800 }
1879 1801
1880 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1802 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1881 (put % ch->local_nentries) * ch->msg_size); 1803 (put % ch->local_nentries) *
1804 ch->msg_size);
1882 1805
1883 if (!(msg->flags & XPC_M_READY)) { 1806 if (!(msg->flags & XPC_M_READY)) {
1884 break; 1807 break;
@@ -1893,9 +1816,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1893 } 1816 }
1894 1817
1895 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != 1818 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1896 initial_put) { 1819 initial_put) {
1897 /* someone else beat us to it */ 1820 /* someone else beat us to it */
1898 DBUG_ON((volatile s64) ch->local_GP->put < initial_put); 1821 DBUG_ON((volatile s64)ch->local_GP->put < initial_put);
1899 break; 1822 break;
1900 } 1823 }
1901 1824
@@ -1919,7 +1842,6 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1919 } 1842 }
1920} 1843}
1921 1844
1922
1923/* 1845/*
1924 * Common code that does the actual sending of the message by advancing the 1846 * Common code that does the actual sending of the message by advancing the
1925 * local message queue's Put value and sends an IPI to the partition the 1847 * local message queue's Put value and sends an IPI to the partition the
@@ -1927,16 +1849,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1927 */ 1849 */
1928static enum xpc_retval 1850static enum xpc_retval
1929xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, 1851xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1930 xpc_notify_func func, void *key) 1852 xpc_notify_func func, void *key)
1931{ 1853{
1932 enum xpc_retval ret = xpcSuccess; 1854 enum xpc_retval ret = xpcSuccess;
1933 struct xpc_notify *notify = notify; 1855 struct xpc_notify *notify = notify;
1934 s64 put, msg_number = msg->number; 1856 s64 put, msg_number = msg->number;
1935 1857
1936
1937 DBUG_ON(notify_type == XPC_N_CALL && func == NULL); 1858 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1938 DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != 1859 DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
1939 msg_number % ch->local_nentries); 1860 msg_number % ch->local_nentries);
1940 DBUG_ON(msg->flags & XPC_M_READY); 1861 DBUG_ON(msg->flags & XPC_M_READY);
1941 1862
1942 if (ch->flags & XPC_C_DISCONNECTING) { 1863 if (ch->flags & XPC_C_DISCONNECTING) {
@@ -1970,7 +1891,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1970 * the notify entry. 1891 * the notify entry.
1971 */ 1892 */
1972 if (cmpxchg(&notify->type, notify_type, 0) == 1893 if (cmpxchg(&notify->type, notify_type, 0) ==
1973 notify_type) { 1894 notify_type) {
1974 atomic_dec(&ch->n_to_notify); 1895 atomic_dec(&ch->n_to_notify);
1975 ret = ch->reason; 1896 ret = ch->reason;
1976 } 1897 }
@@ -2001,7 +1922,6 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
2001 return ret; 1922 return ret;
2002} 1923}
2003 1924
2004
2005/* 1925/*
2006 * Send a message previously allocated using xpc_initiate_allocate() on the 1926 * Send a message previously allocated using xpc_initiate_allocate() on the
2007 * specified channel connected to the specified partition. 1927 * specified channel connected to the specified partition.
@@ -2029,8 +1949,7 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2029 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1949 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2030 enum xpc_retval ret; 1950 enum xpc_retval ret;
2031 1951
2032 1952 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
2033 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2034 partid, ch_number); 1953 partid, ch_number);
2035 1954
2036 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1955 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2042,7 +1961,6 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2042 return ret; 1961 return ret;
2043} 1962}
2044 1963
2045
2046/* 1964/*
2047 * Send a message previously allocated using xpc_initiate_allocate on the 1965 * Send a message previously allocated using xpc_initiate_allocate on the
2048 * specified channel connected to the specified partition. 1966 * specified channel connected to the specified partition.
@@ -2075,14 +1993,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2075 */ 1993 */
2076enum xpc_retval 1994enum xpc_retval
2077xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, 1995xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2078 xpc_notify_func func, void *key) 1996 xpc_notify_func func, void *key)
2079{ 1997{
2080 struct xpc_partition *part = &xpc_partitions[partid]; 1998 struct xpc_partition *part = &xpc_partitions[partid];
2081 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1999 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2082 enum xpc_retval ret; 2000 enum xpc_retval ret;
2083 2001
2084 2002 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
2085 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2086 partid, ch_number); 2003 partid, ch_number);
2087 2004
2088 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 2005 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2091,11 +2008,10 @@ xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2091 DBUG_ON(func == NULL); 2008 DBUG_ON(func == NULL);
2092 2009
2093 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, 2010 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
2094 func, key); 2011 func, key);
2095 return ret; 2012 return ret;
2096} 2013}
2097 2014
2098
2099static struct xpc_msg * 2015static struct xpc_msg *
2100xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) 2016xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2101{ 2017{
@@ -2105,7 +2021,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2105 u64 msg_offset; 2021 u64 msg_offset;
2106 enum xpc_retval ret; 2022 enum xpc_retval ret;
2107 2023
2108
2109 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { 2024 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
2110 /* we were interrupted by a signal */ 2025 /* we were interrupted by a signal */
2111 return NULL; 2026 return NULL;
@@ -2118,22 +2033,22 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2118 msg_index = ch->next_msg_to_pull % ch->remote_nentries; 2033 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2119 2034
2120 DBUG_ON(ch->next_msg_to_pull >= 2035 DBUG_ON(ch->next_msg_to_pull >=
2121 (volatile s64) ch->w_remote_GP.put); 2036 (volatile s64)ch->w_remote_GP.put);
2122 nmsgs = (volatile s64) ch->w_remote_GP.put - 2037 nmsgs = (volatile s64)ch->w_remote_GP.put -
2123 ch->next_msg_to_pull; 2038 ch->next_msg_to_pull;
2124 if (msg_index + nmsgs > ch->remote_nentries) { 2039 if (msg_index + nmsgs > ch->remote_nentries) {
2125 /* ignore the ones that wrap the msg queue for now */ 2040 /* ignore the ones that wrap the msg queue for now */
2126 nmsgs = ch->remote_nentries - msg_index; 2041 nmsgs = ch->remote_nentries - msg_index;
2127 } 2042 }
2128 2043
2129 msg_offset = msg_index * ch->msg_size; 2044 msg_offset = msg_index * ch->msg_size;
2130 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2045 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2131 msg_offset); 2046 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
2132 remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + 2047 msg_offset);
2133 msg_offset);
2134 2048
2135 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, 2049 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2136 nmsgs * ch->msg_size)) != xpcSuccess) { 2050 nmsgs * ch->msg_size)) !=
2051 xpcSuccess) {
2137 2052
2138 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 2053 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2139 " msg %ld from partition %d, channel=%d, " 2054 " msg %ld from partition %d, channel=%d, "
@@ -2146,7 +2061,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2146 return NULL; 2061 return NULL;
2147 } 2062 }
2148 2063
2149 mb(); /* >>> this may not be needed, we're not sure */ 2064 mb(); /* >>> this may not be needed, we're not sure */
2150 2065
2151 ch->next_msg_to_pull += nmsgs; 2066 ch->next_msg_to_pull += nmsgs;
2152 } 2067 }
@@ -2155,12 +2070,11 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2155 2070
2156 /* return the message we were looking for */ 2071 /* return the message we were looking for */
2157 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2072 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2158 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); 2073 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2159 2074
2160 return msg; 2075 return msg;
2161} 2076}
2162 2077
2163
2164/* 2078/*
2165 * Get a message to be delivered. 2079 * Get a message to be delivered.
2166 */ 2080 */
@@ -2170,14 +2084,13 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2170 struct xpc_msg *msg = NULL; 2084 struct xpc_msg *msg = NULL;
2171 s64 get; 2085 s64 get;
2172 2086
2173
2174 do { 2087 do {
2175 if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { 2088 if ((volatile u32)ch->flags & XPC_C_DISCONNECTING) {
2176 break; 2089 break;
2177 } 2090 }
2178 2091
2179 get = (volatile s64) ch->w_local_GP.get; 2092 get = (volatile s64)ch->w_local_GP.get;
2180 if (get == (volatile s64) ch->w_remote_GP.put) { 2093 if (get == (volatile s64)ch->w_remote_GP.put) {
2181 break; 2094 break;
2182 } 2095 }
2183 2096
@@ -2186,7 +2099,7 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2186 * by trying to increment w_local_GP.get and hope that no one 2099 * by trying to increment w_local_GP.get and hope that no one
2187 * else beats us to it. If they do, we'll we'll simply have 2100 * else beats us to it. If they do, we'll we'll simply have
2188 * to try again for the next one. 2101 * to try again for the next one.
2189 */ 2102 */
2190 2103
2191 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { 2104 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2192 /* we got the entry referenced by get */ 2105 /* we got the entry referenced by get */
@@ -2211,7 +2124,6 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2211 return msg; 2124 return msg;
2212} 2125}
2213 2126
2214
2215/* 2127/*
2216 * Deliver a message to its intended recipient. 2128 * Deliver a message to its intended recipient.
2217 */ 2129 */
@@ -2220,7 +2132,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
2220{ 2132{
2221 struct xpc_msg *msg; 2133 struct xpc_msg *msg;
2222 2134
2223
2224 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { 2135 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
2225 2136
2226 /* 2137 /*
@@ -2235,16 +2146,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
2235 if (ch->func != NULL) { 2146 if (ch->func != NULL) {
2236 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " 2147 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
2237 "msg_number=%ld, partid=%d, channel=%d\n", 2148 "msg_number=%ld, partid=%d, channel=%d\n",
2238 (void *) msg, msg->number, ch->partid, 2149 (void *)msg, msg->number, ch->partid,
2239 ch->number); 2150 ch->number);
2240 2151
2241 /* deliver the message to its intended recipient */ 2152 /* deliver the message to its intended recipient */
2242 ch->func(xpcMsgReceived, ch->partid, ch->number, 2153 ch->func(xpcMsgReceived, ch->partid, ch->number,
2243 &msg->payload, ch->key); 2154 &msg->payload, ch->key);
2244 2155
2245 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " 2156 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
2246 "msg_number=%ld, partid=%d, channel=%d\n", 2157 "msg_number=%ld, partid=%d, channel=%d\n",
2247 (void *) msg, msg->number, ch->partid, 2158 (void *)msg, msg->number, ch->partid,
2248 ch->number); 2159 ch->number);
2249 } 2160 }
2250 2161
@@ -2252,7 +2163,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
2252 } 2163 }
2253} 2164}
2254 2165
2255
2256/* 2166/*
2257 * Now we actually acknowledge the messages that have been delivered and ack'd 2167 * Now we actually acknowledge the messages that have been delivered and ack'd
2258 * by advancing the cached remote message queue's Get value and if requested 2168 * by advancing the cached remote message queue's Get value and if requested
@@ -2265,16 +2175,16 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2265 s64 get = initial_get + 1; 2175 s64 get = initial_get + 1;
2266 int send_IPI = 0; 2176 int send_IPI = 0;
2267 2177
2268
2269 while (1) { 2178 while (1) {
2270 2179
2271 while (1) { 2180 while (1) {
2272 if (get == (volatile s64) ch->w_local_GP.get) { 2181 if (get == (volatile s64)ch->w_local_GP.get) {
2273 break; 2182 break;
2274 } 2183 }
2275 2184
2276 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2185 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2277 (get % ch->remote_nentries) * ch->msg_size); 2186 (get % ch->remote_nentries) *
2187 ch->msg_size);
2278 2188
2279 if (!(msg->flags & XPC_M_DONE)) { 2189 if (!(msg->flags & XPC_M_DONE)) {
2280 break; 2190 break;
@@ -2290,10 +2200,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2290 } 2200 }
2291 2201
2292 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != 2202 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2293 initial_get) { 2203 initial_get) {
2294 /* someone else beat us to it */ 2204 /* someone else beat us to it */
2295 DBUG_ON((volatile s64) ch->local_GP->get <= 2205 DBUG_ON((volatile s64)ch->local_GP->get <= initial_get);
2296 initial_get);
2297 break; 2206 break;
2298 } 2207 }
2299 2208
@@ -2317,7 +2226,6 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2317 } 2226 }
2318} 2227}
2319 2228
2320
2321/* 2229/*
2322 * Acknowledge receipt of a delivered message. 2230 * Acknowledge receipt of a delivered message.
2323 * 2231 *
@@ -2343,17 +2251,16 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2343 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 2251 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2344 s64 get, msg_number = msg->number; 2252 s64 get, msg_number = msg->number;
2345 2253
2346
2347 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 2254 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2348 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 2255 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2349 2256
2350 ch = &part->channels[ch_number]; 2257 ch = &part->channels[ch_number];
2351 2258
2352 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 2259 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2353 (void *) msg, msg_number, ch->partid, ch->number); 2260 (void *)msg, msg_number, ch->partid, ch->number);
2354 2261
2355 DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != 2262 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
2356 msg_number % ch->remote_nentries); 2263 msg_number % ch->remote_nentries);
2357 DBUG_ON(msg->flags & XPC_M_DONE); 2264 DBUG_ON(msg->flags & XPC_M_DONE);
2358 2265
2359 msg->flags |= XPC_M_DONE; 2266 msg->flags |= XPC_M_DONE;
@@ -2376,4 +2283,3 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2376 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ 2283 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2377 xpc_msgqueue_deref(ch); 2284 xpc_msgqueue_deref(ch);
2378} 2285}
2379
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index bdb2cf1fcbcc..d81a2dd787ac 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -6,7 +6,6 @@
6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) support - standard version. 10 * Cross Partition Communication (XPC) support - standard version.
12 * 11 *
@@ -44,7 +43,6 @@
44 * 43 *
45 */ 44 */
46 45
47
48#include <linux/kernel.h> 46#include <linux/kernel.h>
49#include <linux/module.h> 47#include <linux/module.h>
50#include <linux/init.h> 48#include <linux/init.h>
@@ -61,7 +59,6 @@
61#include <asm/uaccess.h> 59#include <asm/uaccess.h>
62#include "xpc.h" 60#include "xpc.h"
63 61
64
65/* define two XPC debug device structures to be used with dev_dbg() et al */ 62/* define two XPC debug device structures to be used with dev_dbg() et al */
66 63
67struct device_driver xpc_dbg_name = { 64struct device_driver xpc_dbg_name = {
@@ -81,10 +78,8 @@ struct device xpc_chan_dbg_subname = {
81struct device *xpc_part = &xpc_part_dbg_subname; 78struct device *xpc_part = &xpc_part_dbg_subname;
82struct device *xpc_chan = &xpc_chan_dbg_subname; 79struct device *xpc_chan = &xpc_chan_dbg_subname;
83 80
84
85static int xpc_kdebug_ignore; 81static int xpc_kdebug_ignore;
86 82
87
88/* systune related variables for /proc/sys directories */ 83/* systune related variables for /proc/sys directories */
89 84
90static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; 85static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -101,56 +96,51 @@ static int xpc_disengage_request_max_timelimit = 120;
101 96
102static ctl_table xpc_sys_xpc_hb_dir[] = { 97static ctl_table xpc_sys_xpc_hb_dir[] = {
103 { 98 {
104 .ctl_name = CTL_UNNUMBERED, 99 .ctl_name = CTL_UNNUMBERED,
105 .procname = "hb_interval", 100 .procname = "hb_interval",
106 .data = &xpc_hb_interval, 101 .data = &xpc_hb_interval,
107 .maxlen = sizeof(int), 102 .maxlen = sizeof(int),
108 .mode = 0644, 103 .mode = 0644,
109 .proc_handler = &proc_dointvec_minmax, 104 .proc_handler = &proc_dointvec_minmax,
110 .strategy = &sysctl_intvec, 105 .strategy = &sysctl_intvec,
111 .extra1 = &xpc_hb_min_interval, 106 .extra1 = &xpc_hb_min_interval,
112 .extra2 = &xpc_hb_max_interval 107 .extra2 = &xpc_hb_max_interval},
113 },
114 { 108 {
115 .ctl_name = CTL_UNNUMBERED, 109 .ctl_name = CTL_UNNUMBERED,
116 .procname = "hb_check_interval", 110 .procname = "hb_check_interval",
117 .data = &xpc_hb_check_interval, 111 .data = &xpc_hb_check_interval,
118 .maxlen = sizeof(int), 112 .maxlen = sizeof(int),
119 .mode = 0644, 113 .mode = 0644,
120 .proc_handler = &proc_dointvec_minmax, 114 .proc_handler = &proc_dointvec_minmax,
121 .strategy = &sysctl_intvec, 115 .strategy = &sysctl_intvec,
122 .extra1 = &xpc_hb_check_min_interval, 116 .extra1 = &xpc_hb_check_min_interval,
123 .extra2 = &xpc_hb_check_max_interval 117 .extra2 = &xpc_hb_check_max_interval},
124 },
125 {} 118 {}
126}; 119};
127static ctl_table xpc_sys_xpc_dir[] = { 120static ctl_table xpc_sys_xpc_dir[] = {
128 { 121 {
129 .ctl_name = CTL_UNNUMBERED, 122 .ctl_name = CTL_UNNUMBERED,
130 .procname = "hb", 123 .procname = "hb",
131 .mode = 0555, 124 .mode = 0555,
132 .child = xpc_sys_xpc_hb_dir 125 .child = xpc_sys_xpc_hb_dir},
133 },
134 { 126 {
135 .ctl_name = CTL_UNNUMBERED, 127 .ctl_name = CTL_UNNUMBERED,
136 .procname = "disengage_request_timelimit", 128 .procname = "disengage_request_timelimit",
137 .data = &xpc_disengage_request_timelimit, 129 .data = &xpc_disengage_request_timelimit,
138 .maxlen = sizeof(int), 130 .maxlen = sizeof(int),
139 .mode = 0644, 131 .mode = 0644,
140 .proc_handler = &proc_dointvec_minmax, 132 .proc_handler = &proc_dointvec_minmax,
141 .strategy = &sysctl_intvec, 133 .strategy = &sysctl_intvec,
142 .extra1 = &xpc_disengage_request_min_timelimit, 134 .extra1 = &xpc_disengage_request_min_timelimit,
143 .extra2 = &xpc_disengage_request_max_timelimit 135 .extra2 = &xpc_disengage_request_max_timelimit},
144 },
145 {} 136 {}
146}; 137};
147static ctl_table xpc_sys_dir[] = { 138static ctl_table xpc_sys_dir[] = {
148 { 139 {
149 .ctl_name = CTL_UNNUMBERED, 140 .ctl_name = CTL_UNNUMBERED,
150 .procname = "xpc", 141 .procname = "xpc",
151 .mode = 0555, 142 .mode = 0555,
152 .child = xpc_sys_xpc_dir 143 .child = xpc_sys_xpc_dir},
153 },
154 {} 144 {}
155}; 145};
156static struct ctl_table_header *xpc_sysctl; 146static struct ctl_table_header *xpc_sysctl;
@@ -172,13 +162,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
172/* notification that the xpc_discovery thread has exited */ 162/* notification that the xpc_discovery thread has exited */
173static DECLARE_COMPLETION(xpc_discovery_exited); 163static DECLARE_COMPLETION(xpc_discovery_exited);
174 164
175
176static struct timer_list xpc_hb_timer; 165static struct timer_list xpc_hb_timer;
177 166
178
179static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); 167static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
180 168
181
182static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); 169static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
183static struct notifier_block xpc_reboot_notifier = { 170static struct notifier_block xpc_reboot_notifier = {
184 .notifier_call = xpc_system_reboot, 171 .notifier_call = xpc_system_reboot,
@@ -189,25 +176,22 @@ static struct notifier_block xpc_die_notifier = {
189 .notifier_call = xpc_system_die, 176 .notifier_call = xpc_system_die,
190}; 177};
191 178
192
193/* 179/*
194 * Timer function to enforce the timelimit on the partition disengage request. 180 * Timer function to enforce the timelimit on the partition disengage request.
195 */ 181 */
196static void 182static void
197xpc_timeout_partition_disengage_request(unsigned long data) 183xpc_timeout_partition_disengage_request(unsigned long data)
198{ 184{
199 struct xpc_partition *part = (struct xpc_partition *) data; 185 struct xpc_partition *part = (struct xpc_partition *)data;
200
201 186
202 DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); 187 DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
203 188
204 (void) xpc_partition_disengaged(part); 189 (void)xpc_partition_disengaged(part);
205 190
206 DBUG_ON(part->disengage_request_timeout != 0); 191 DBUG_ON(part->disengage_request_timeout != 0);
207 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); 192 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
208} 193}
209 194
210
211/* 195/*
212 * Notify the heartbeat check thread that an IRQ has been received. 196 * Notify the heartbeat check thread that an IRQ has been received.
213 */ 197 */
@@ -219,7 +203,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
219 return IRQ_HANDLED; 203 return IRQ_HANDLED;
220} 204}
221 205
222
223/* 206/*
224 * Timer to produce the heartbeat. The timer structures function is 207 * Timer to produce the heartbeat. The timer structures function is
225 * already set when this is initially called. A tunable is used to 208 * already set when this is initially called. A tunable is used to
@@ -238,7 +221,6 @@ xpc_hb_beater(unsigned long dummy)
238 add_timer(&xpc_hb_timer); 221 add_timer(&xpc_hb_timer);
239} 222}
240 223
241
242/* 224/*
243 * This thread is responsible for nearly all of the partition 225 * This thread is responsible for nearly all of the partition
244 * activation/deactivation. 226 * activation/deactivation.
@@ -248,8 +230,7 @@ xpc_hb_checker(void *ignore)
248{ 230{
249 int last_IRQ_count = 0; 231 int last_IRQ_count = 0;
250 int new_IRQ_count; 232 int new_IRQ_count;
251 int force_IRQ=0; 233 int force_IRQ = 0;
252
253 234
254 /* this thread was marked active by xpc_hb_init() */ 235 /* this thread was marked active by xpc_hb_init() */
255 236
@@ -261,14 +242,13 @@ xpc_hb_checker(void *ignore)
261 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 242 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
262 xpc_hb_beater(0); 243 xpc_hb_beater(0);
263 244
264 while (!(volatile int) xpc_exiting) { 245 while (!(volatile int)xpc_exiting) {
265 246
266 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 247 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
267 "been received\n", 248 "been received\n",
268 (int) (xpc_hb_check_timeout - jiffies), 249 (int)(xpc_hb_check_timeout - jiffies),
269 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); 250 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
270 251
271
272 /* checking of remote heartbeats is skewed by IRQ handling */ 252 /* checking of remote heartbeats is skewed by IRQ handling */
273 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 253 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
274 dev_dbg(xpc_part, "checking remote heartbeats\n"); 254 dev_dbg(xpc_part, "checking remote heartbeats\n");
@@ -282,7 +262,6 @@ xpc_hb_checker(void *ignore)
282 force_IRQ = 1; 262 force_IRQ = 1;
283 } 263 }
284 264
285
286 /* check for outstanding IRQs */ 265 /* check for outstanding IRQs */
287 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); 266 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
288 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { 267 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
@@ -294,30 +273,30 @@ xpc_hb_checker(void *ignore)
294 last_IRQ_count += xpc_identify_act_IRQ_sender(); 273 last_IRQ_count += xpc_identify_act_IRQ_sender();
295 if (last_IRQ_count < new_IRQ_count) { 274 if (last_IRQ_count < new_IRQ_count) {
296 /* retry once to help avoid missing AMO */ 275 /* retry once to help avoid missing AMO */
297 (void) xpc_identify_act_IRQ_sender(); 276 (void)xpc_identify_act_IRQ_sender();
298 } 277 }
299 last_IRQ_count = new_IRQ_count; 278 last_IRQ_count = new_IRQ_count;
300 279
301 xpc_hb_check_timeout = jiffies + 280 xpc_hb_check_timeout = jiffies +
302 (xpc_hb_check_interval * HZ); 281 (xpc_hb_check_interval * HZ);
303 } 282 }
304 283
305 /* wait for IRQ or timeout */ 284 /* wait for IRQ or timeout */
306 (void) wait_event_interruptible(xpc_act_IRQ_wq, 285 (void)wait_event_interruptible(xpc_act_IRQ_wq,
307 (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || 286 (last_IRQ_count <
308 time_after_eq(jiffies, xpc_hb_check_timeout) || 287 atomic_read(&xpc_act_IRQ_rcvd)
309 (volatile int) xpc_exiting)); 288 || time_after_eq(jiffies,
289 xpc_hb_check_timeout) ||
290 (volatile int)xpc_exiting));
310 } 291 }
311 292
312 dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 293 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
313 294
314
315 /* mark this thread as having exited */ 295 /* mark this thread as having exited */
316 complete(&xpc_hb_checker_exited); 296 complete(&xpc_hb_checker_exited);
317 return 0; 297 return 0;
318} 298}
319 299
320
321/* 300/*
322 * This thread will attempt to discover other partitions to activate 301 * This thread will attempt to discover other partitions to activate
323 * based on info provided by SAL. This new thread is short lived and 302 * based on info provided by SAL. This new thread is short lived and
@@ -337,7 +316,6 @@ xpc_initiate_discovery(void *ignore)
337 return 0; 316 return 0;
338} 317}
339 318
340
341/* 319/*
342 * Establish first contact with the remote partititon. This involves pulling 320 * Establish first contact with the remote partititon. This involves pulling
343 * the XPC per partition variables from the remote partition and waiting for 321 * the XPC per partition variables from the remote partition and waiting for
@@ -348,7 +326,6 @@ xpc_make_first_contact(struct xpc_partition *part)
348{ 326{
349 enum xpc_retval ret; 327 enum xpc_retval ret;
350 328
351
352 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { 329 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
353 if (ret != xpcRetry) { 330 if (ret != xpcRetry) {
354 XPC_DEACTIVATE_PARTITION(part, ret); 331 XPC_DEACTIVATE_PARTITION(part, ret);
@@ -359,7 +336,7 @@ xpc_make_first_contact(struct xpc_partition *part)
359 "partition %d\n", XPC_PARTID(part)); 336 "partition %d\n", XPC_PARTID(part));
360 337
361 /* wait a 1/4 of a second or so */ 338 /* wait a 1/4 of a second or so */
362 (void) msleep_interruptible(250); 339 (void)msleep_interruptible(250);
363 340
364 if (part->act_state == XPC_P_DEACTIVATING) { 341 if (part->act_state == XPC_P_DEACTIVATING) {
365 return part->reason; 342 return part->reason;
@@ -369,7 +346,6 @@ xpc_make_first_contact(struct xpc_partition *part)
369 return xpc_mark_partition_active(part); 346 return xpc_mark_partition_active(part);
370} 347}
371 348
372
373/* 349/*
374 * The first kthread assigned to a newly activated partition is the one 350 * The first kthread assigned to a newly activated partition is the one
375 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to 351 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
@@ -386,12 +362,11 @@ static void
386xpc_channel_mgr(struct xpc_partition *part) 362xpc_channel_mgr(struct xpc_partition *part)
387{ 363{
388 while (part->act_state != XPC_P_DEACTIVATING || 364 while (part->act_state != XPC_P_DEACTIVATING ||
389 atomic_read(&part->nchannels_active) > 0 || 365 atomic_read(&part->nchannels_active) > 0 ||
390 !xpc_partition_disengaged(part)) { 366 !xpc_partition_disengaged(part)) {
391 367
392 xpc_process_channel_activity(part); 368 xpc_process_channel_activity(part);
393 369
394
395 /* 370 /*
396 * Wait until we've been requested to activate kthreads or 371 * Wait until we've been requested to activate kthreads or
397 * all of the channel's message queues have been torn down or 372 * all of the channel's message queues have been torn down or
@@ -406,13 +381,19 @@ xpc_channel_mgr(struct xpc_partition *part)
406 * wake him up. 381 * wake him up.
407 */ 382 */
408 atomic_dec(&part->channel_mgr_requests); 383 atomic_dec(&part->channel_mgr_requests);
409 (void) wait_event_interruptible(part->channel_mgr_wq, 384 (void)wait_event_interruptible(part->channel_mgr_wq,
410 (atomic_read(&part->channel_mgr_requests) > 0 || 385 (atomic_read
411 (volatile u64) part->local_IPI_amo != 0 || 386 (&part->channel_mgr_requests) >
412 ((volatile u8) part->act_state == 387 0 ||
413 XPC_P_DEACTIVATING && 388 (volatile u64)part->
414 atomic_read(&part->nchannels_active) == 0 && 389 local_IPI_amo != 0 ||
415 xpc_partition_disengaged(part)))); 390 ((volatile u8)part->act_state ==
391 XPC_P_DEACTIVATING &&
392 atomic_read(&part->
393 nchannels_active)
394 == 0 &&
395 xpc_partition_disengaged
396 (part))));
416 atomic_set(&part->channel_mgr_requests, 1); 397 atomic_set(&part->channel_mgr_requests, 1);
417 398
418 // >>> Does it need to wakeup periodically as well? In case we 399 // >>> Does it need to wakeup periodically as well? In case we
@@ -420,7 +401,6 @@ xpc_channel_mgr(struct xpc_partition *part)
420 } 401 }
421} 402}
422 403
423
424/* 404/*
425 * When XPC HB determines that a partition has come up, it will create a new 405 * When XPC HB determines that a partition has come up, it will create a new
426 * kthread and that kthread will call this function to attempt to set up the 406 * kthread and that kthread will call this function to attempt to set up the
@@ -454,7 +434,7 @@ xpc_partition_up(struct xpc_partition *part)
454 * has been dismantled. 434 * has been dismantled.
455 */ 435 */
456 436
457 (void) xpc_part_ref(part); /* this will always succeed */ 437 (void)xpc_part_ref(part); /* this will always succeed */
458 438
459 if (xpc_make_first_contact(part) == xpcSuccess) { 439 if (xpc_make_first_contact(part) == xpcSuccess) {
460 xpc_channel_mgr(part); 440 xpc_channel_mgr(part);
@@ -465,17 +445,15 @@ xpc_partition_up(struct xpc_partition *part)
465 xpc_teardown_infrastructure(part); 445 xpc_teardown_infrastructure(part);
466} 446}
467 447
468
469static int 448static int
470xpc_activating(void *__partid) 449xpc_activating(void *__partid)
471{ 450{
472 partid_t partid = (u64) __partid; 451 partid_t partid = (u64)__partid;
473 struct xpc_partition *part = &xpc_partitions[partid]; 452 struct xpc_partition *part = &xpc_partitions[partid];
474 unsigned long irq_flags; 453 unsigned long irq_flags;
475 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 454 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
476 int ret; 455 int ret;
477 456
478
479 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 457 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
480 458
481 spin_lock_irqsave(&part->act_lock, irq_flags); 459 spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -505,7 +483,7 @@ xpc_activating(void *__partid)
505 ret = sched_setscheduler(current, SCHED_FIFO, &param); 483 ret = sched_setscheduler(current, SCHED_FIFO, &param);
506 if (ret != 0) { 484 if (ret != 0) {
507 dev_warn(xpc_part, "unable to set pid %d to a realtime " 485 dev_warn(xpc_part, "unable to set pid %d to a realtime "
508 "priority, ret=%d\n", current->pid, ret); 486 "priority, ret=%d\n", current->pid, ret);
509 } 487 }
510 488
511 /* allow this thread and its children to run on any CPU */ 489 /* allow this thread and its children to run on any CPU */
@@ -522,9 +500,9 @@ xpc_activating(void *__partid)
522 * reloads and system reboots. 500 * reloads and system reboots.
523 */ 501 */
524 if (sn_register_xp_addr_region(part->remote_amos_page_pa, 502 if (sn_register_xp_addr_region(part->remote_amos_page_pa,
525 PAGE_SIZE, 1) < 0) { 503 PAGE_SIZE, 1) < 0) {
526 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " 504 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
527 "xp_addr region\n", partid); 505 "xp_addr region\n", partid);
528 506
529 spin_lock_irqsave(&part->act_lock, irq_flags); 507 spin_lock_irqsave(&part->act_lock, irq_flags);
530 part->act_state = XPC_P_INACTIVE; 508 part->act_state = XPC_P_INACTIVE;
@@ -537,12 +515,11 @@ xpc_activating(void *__partid)
537 xpc_allow_hb(partid, xpc_vars); 515 xpc_allow_hb(partid, xpc_vars);
538 xpc_IPI_send_activated(part); 516 xpc_IPI_send_activated(part);
539 517
540
541 /* 518 /*
542 * xpc_partition_up() holds this thread and marks this partition as 519 * xpc_partition_up() holds this thread and marks this partition as
543 * XPC_P_ACTIVE by calling xpc_hb_mark_active(). 520 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
544 */ 521 */
545 (void) xpc_partition_up(part); 522 (void)xpc_partition_up(part);
546 523
547 xpc_disallow_hb(partid, xpc_vars); 524 xpc_disallow_hb(partid, xpc_vars);
548 xpc_mark_partition_inactive(part); 525 xpc_mark_partition_inactive(part);
@@ -555,7 +532,6 @@ xpc_activating(void *__partid)
555 return 0; 532 return 0;
556} 533}
557 534
558
559void 535void
560xpc_activate_partition(struct xpc_partition *part) 536xpc_activate_partition(struct xpc_partition *part)
561{ 537{
@@ -563,7 +539,6 @@ xpc_activate_partition(struct xpc_partition *part)
563 unsigned long irq_flags; 539 unsigned long irq_flags;
564 pid_t pid; 540 pid_t pid;
565 541
566
567 spin_lock_irqsave(&part->act_lock, irq_flags); 542 spin_lock_irqsave(&part->act_lock, irq_flags);
568 543
569 DBUG_ON(part->act_state != XPC_P_INACTIVE); 544 DBUG_ON(part->act_state != XPC_P_INACTIVE);
@@ -573,7 +548,7 @@ xpc_activate_partition(struct xpc_partition *part)
573 548
574 spin_unlock_irqrestore(&part->act_lock, irq_flags); 549 spin_unlock_irqrestore(&part->act_lock, irq_flags);
575 550
576 pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); 551 pid = kernel_thread(xpc_activating, (void *)((u64)partid), 0);
577 552
578 if (unlikely(pid <= 0)) { 553 if (unlikely(pid <= 0)) {
579 spin_lock_irqsave(&part->act_lock, irq_flags); 554 spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -583,7 +558,6 @@ xpc_activate_partition(struct xpc_partition *part)
583 } 558 }
584} 559}
585 560
586
587/* 561/*
588 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 562 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
589 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 563 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
@@ -603,10 +577,9 @@ xpc_activate_partition(struct xpc_partition *part)
603irqreturn_t 577irqreturn_t
604xpc_notify_IRQ_handler(int irq, void *dev_id) 578xpc_notify_IRQ_handler(int irq, void *dev_id)
605{ 579{
606 partid_t partid = (partid_t) (u64) dev_id; 580 partid_t partid = (partid_t) (u64)dev_id;
607 struct xpc_partition *part = &xpc_partitions[partid]; 581 struct xpc_partition *part = &xpc_partitions[partid];
608 582
609
610 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 583 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
611 584
612 if (xpc_part_ref(part)) { 585 if (xpc_part_ref(part)) {
@@ -617,7 +590,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id)
617 return IRQ_HANDLED; 590 return IRQ_HANDLED;
618} 591}
619 592
620
621/* 593/*
622 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor 594 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
623 * because the write to their associated IPI amo completed after the IRQ/IPI 595 * because the write to their associated IPI amo completed after the IRQ/IPI
@@ -630,13 +602,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
630 xpc_check_for_channel_activity(part); 602 xpc_check_for_channel_activity(part);
631 603
632 part->dropped_IPI_timer.expires = jiffies + 604 part->dropped_IPI_timer.expires = jiffies +
633 XPC_P_DROPPED_IPI_WAIT; 605 XPC_P_DROPPED_IPI_WAIT;
634 add_timer(&part->dropped_IPI_timer); 606 add_timer(&part->dropped_IPI_timer);
635 xpc_part_deref(part); 607 xpc_part_deref(part);
636 } 608 }
637} 609}
638 610
639
640void 611void
641xpc_activate_kthreads(struct xpc_channel *ch, int needed) 612xpc_activate_kthreads(struct xpc_channel *ch, int needed)
642{ 613{
@@ -644,7 +615,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
644 int assigned = atomic_read(&ch->kthreads_assigned); 615 int assigned = atomic_read(&ch->kthreads_assigned);
645 int wakeup; 616 int wakeup;
646 617
647
648 DBUG_ON(needed <= 0); 618 DBUG_ON(needed <= 0);
649 619
650 if (idle > 0) { 620 if (idle > 0) {
@@ -676,7 +646,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
676 xpc_create_kthreads(ch, needed, 0); 646 xpc_create_kthreads(ch, needed, 0);
677} 647}
678 648
679
680/* 649/*
681 * This function is where XPC's kthreads wait for messages to deliver. 650 * This function is where XPC's kthreads wait for messages to deliver.
682 */ 651 */
@@ -686,15 +655,14 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
686 do { 655 do {
687 /* deliver messages to their intended recipients */ 656 /* deliver messages to their intended recipients */
688 657
689 while ((volatile s64) ch->w_local_GP.get < 658 while ((volatile s64)ch->w_local_GP.get <
690 (volatile s64) ch->w_remote_GP.put && 659 (volatile s64)ch->w_remote_GP.put &&
691 !((volatile u32) ch->flags & 660 !((volatile u32)ch->flags & XPC_C_DISCONNECTING)) {
692 XPC_C_DISCONNECTING)) {
693 xpc_deliver_msg(ch); 661 xpc_deliver_msg(ch);
694 } 662 }
695 663
696 if (atomic_inc_return(&ch->kthreads_idle) > 664 if (atomic_inc_return(&ch->kthreads_idle) >
697 ch->kthreads_idle_limit) { 665 ch->kthreads_idle_limit) {
698 /* too many idle kthreads on this channel */ 666 /* too many idle kthreads on this channel */
699 atomic_dec(&ch->kthreads_idle); 667 atomic_dec(&ch->kthreads_idle);
700 break; 668 break;
@@ -703,18 +671,20 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
703 dev_dbg(xpc_chan, "idle kthread calling " 671 dev_dbg(xpc_chan, "idle kthread calling "
704 "wait_event_interruptible_exclusive()\n"); 672 "wait_event_interruptible_exclusive()\n");
705 673
706 (void) wait_event_interruptible_exclusive(ch->idle_wq, 674 (void)wait_event_interruptible_exclusive(ch->idle_wq,
707 ((volatile s64) ch->w_local_GP.get < 675 ((volatile s64)ch->
708 (volatile s64) ch->w_remote_GP.put || 676 w_local_GP.get <
709 ((volatile u32) ch->flags & 677 (volatile s64)ch->
710 XPC_C_DISCONNECTING))); 678 w_remote_GP.put ||
679 ((volatile u32)ch->
680 flags &
681 XPC_C_DISCONNECTING)));
711 682
712 atomic_dec(&ch->kthreads_idle); 683 atomic_dec(&ch->kthreads_idle);
713 684
714 } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); 685 } while (!((volatile u32)ch->flags & XPC_C_DISCONNECTING));
715} 686}
716 687
717
718static int 688static int
719xpc_daemonize_kthread(void *args) 689xpc_daemonize_kthread(void *args)
720{ 690{
@@ -725,7 +695,6 @@ xpc_daemonize_kthread(void *args)
725 int n_needed; 695 int n_needed;
726 unsigned long irq_flags; 696 unsigned long irq_flags;
727 697
728
729 daemonize("xpc%02dc%d", partid, ch_number); 698 daemonize("xpc%02dc%d", partid, ch_number);
730 699
731 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", 700 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
@@ -756,8 +725,7 @@ xpc_daemonize_kthread(void *args)
756 * need one less than total #of messages to deliver. 725 * need one less than total #of messages to deliver.
757 */ 726 */
758 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 727 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
759 if (n_needed > 0 && 728 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) {
760 !(ch->flags & XPC_C_DISCONNECTING)) {
761 xpc_activate_kthreads(ch, n_needed); 729 xpc_activate_kthreads(ch, n_needed);
762 } 730 }
763 } else { 731 } else {
@@ -771,7 +739,7 @@ xpc_daemonize_kthread(void *args)
771 739
772 spin_lock_irqsave(&ch->lock, irq_flags); 740 spin_lock_irqsave(&ch->lock, irq_flags);
773 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 741 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
774 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 742 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
775 ch->flags |= XPC_C_DISCONNECTINGCALLOUT; 743 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
776 spin_unlock_irqrestore(&ch->lock, irq_flags); 744 spin_unlock_irqrestore(&ch->lock, irq_flags);
777 745
@@ -798,7 +766,6 @@ xpc_daemonize_kthread(void *args)
798 return 0; 766 return 0;
799} 767}
800 768
801
802/* 769/*
803 * For each partition that XPC has established communications with, there is 770 * For each partition that XPC has established communications with, there is
804 * a minimum of one kernel thread assigned to perform any operation that 771 * a minimum of one kernel thread assigned to perform any operation that
@@ -813,14 +780,13 @@ xpc_daemonize_kthread(void *args)
813 */ 780 */
814void 781void
815xpc_create_kthreads(struct xpc_channel *ch, int needed, 782xpc_create_kthreads(struct xpc_channel *ch, int needed,
816 int ignore_disconnecting) 783 int ignore_disconnecting)
817{ 784{
818 unsigned long irq_flags; 785 unsigned long irq_flags;
819 pid_t pid; 786 pid_t pid;
820 u64 args = XPC_PACK_ARGS(ch->partid, ch->number); 787 u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
821 struct xpc_partition *part = &xpc_partitions[ch->partid]; 788 struct xpc_partition *part = &xpc_partitions[ch->partid];
822 789
823
824 while (needed-- > 0) { 790 while (needed-- > 0) {
825 791
826 /* 792 /*
@@ -832,7 +798,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
832 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { 798 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
833 /* kthreads assigned had gone to zero */ 799 /* kthreads assigned had gone to zero */
834 BUG_ON(!(ch->flags & 800 BUG_ON(!(ch->flags &
835 XPC_C_DISCONNECTINGCALLOUT_MADE)); 801 XPC_C_DISCONNECTINGCALLOUT_MADE));
836 break; 802 break;
837 } 803 }
838 804
@@ -843,10 +809,10 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
843 if (atomic_inc_return(&part->nchannels_engaged) == 1) 809 if (atomic_inc_return(&part->nchannels_engaged) == 1)
844 xpc_mark_partition_engaged(part); 810 xpc_mark_partition_engaged(part);
845 } 811 }
846 (void) xpc_part_ref(part); 812 (void)xpc_part_ref(part);
847 xpc_msgqueue_ref(ch); 813 xpc_msgqueue_ref(ch);
848 814
849 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 815 pid = kernel_thread(xpc_daemonize_kthread, (void *)args, 0);
850 if (pid < 0) { 816 if (pid < 0) {
851 /* the fork failed */ 817 /* the fork failed */
852 818
@@ -869,7 +835,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
869 xpc_part_deref(part); 835 xpc_part_deref(part);
870 836
871 if (atomic_read(&ch->kthreads_assigned) < 837 if (atomic_read(&ch->kthreads_assigned) <
872 ch->kthreads_idle_limit) { 838 ch->kthreads_idle_limit) {
873 /* 839 /*
874 * Flag this as an error only if we have an 840 * Flag this as an error only if we have an
875 * insufficient #of kthreads for the channel 841 * insufficient #of kthreads for the channel
@@ -877,7 +843,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
877 */ 843 */
878 spin_lock_irqsave(&ch->lock, irq_flags); 844 spin_lock_irqsave(&ch->lock, irq_flags);
879 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 845 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
880 &irq_flags); 846 &irq_flags);
881 spin_unlock_irqrestore(&ch->lock, irq_flags); 847 spin_unlock_irqrestore(&ch->lock, irq_flags);
882 } 848 }
883 break; 849 break;
@@ -887,7 +853,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
887 } 853 }
888} 854}
889 855
890
891void 856void
892xpc_disconnect_wait(int ch_number) 857xpc_disconnect_wait(int ch_number)
893{ 858{
@@ -897,7 +862,6 @@ xpc_disconnect_wait(int ch_number)
897 struct xpc_channel *ch; 862 struct xpc_channel *ch;
898 int wakeup_channel_mgr; 863 int wakeup_channel_mgr;
899 864
900
901 /* now wait for all callouts to the caller's function to cease */ 865 /* now wait for all callouts to the caller's function to cease */
902 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 866 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
903 part = &xpc_partitions[partid]; 867 part = &xpc_partitions[partid];
@@ -923,7 +887,8 @@ xpc_disconnect_wait(int ch_number)
923 if (part->act_state != XPC_P_DEACTIVATING) { 887 if (part->act_state != XPC_P_DEACTIVATING) {
924 spin_lock(&part->IPI_lock); 888 spin_lock(&part->IPI_lock);
925 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 889 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
926 ch->number, ch->delayed_IPI_flags); 890 ch->number,
891 ch->delayed_IPI_flags);
927 spin_unlock(&part->IPI_lock); 892 spin_unlock(&part->IPI_lock);
928 wakeup_channel_mgr = 1; 893 wakeup_channel_mgr = 1;
929 } 894 }
@@ -941,7 +906,6 @@ xpc_disconnect_wait(int ch_number)
941 } 906 }
942} 907}
943 908
944
945static void 909static void
946xpc_do_exit(enum xpc_retval reason) 910xpc_do_exit(enum xpc_retval reason)
947{ 911{
@@ -950,7 +914,6 @@ xpc_do_exit(enum xpc_retval reason)
950 struct xpc_partition *part; 914 struct xpc_partition *part;
951 unsigned long printmsg_time, disengage_request_timeout = 0; 915 unsigned long printmsg_time, disengage_request_timeout = 0;
952 916
953
954 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 917 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
955 DBUG_ON(xpc_exiting == 1); 918 DBUG_ON(xpc_exiting == 1);
956 919
@@ -971,10 +934,8 @@ xpc_do_exit(enum xpc_retval reason)
971 /* wait for the heartbeat checker thread to exit */ 934 /* wait for the heartbeat checker thread to exit */
972 wait_for_completion(&xpc_hb_checker_exited); 935 wait_for_completion(&xpc_hb_checker_exited);
973 936
974
975 /* sleep for a 1/3 of a second or so */ 937 /* sleep for a 1/3 of a second or so */
976 (void) msleep_interruptible(300); 938 (void)msleep_interruptible(300);
977
978 939
979 /* wait for all partitions to become inactive */ 940 /* wait for all partitions to become inactive */
980 941
@@ -988,7 +949,7 @@ xpc_do_exit(enum xpc_retval reason)
988 part = &xpc_partitions[partid]; 949 part = &xpc_partitions[partid];
989 950
990 if (xpc_partition_disengaged(part) && 951 if (xpc_partition_disengaged(part) &&
991 part->act_state == XPC_P_INACTIVE) { 952 part->act_state == XPC_P_INACTIVE) {
992 continue; 953 continue;
993 } 954 }
994 955
@@ -997,47 +958,46 @@ xpc_do_exit(enum xpc_retval reason)
997 XPC_DEACTIVATE_PARTITION(part, reason); 958 XPC_DEACTIVATE_PARTITION(part, reason);
998 959
999 if (part->disengage_request_timeout > 960 if (part->disengage_request_timeout >
1000 disengage_request_timeout) { 961 disengage_request_timeout) {
1001 disengage_request_timeout = 962 disengage_request_timeout =
1002 part->disengage_request_timeout; 963 part->disengage_request_timeout;
1003 } 964 }
1004 } 965 }
1005 966
1006 if (xpc_partition_engaged(-1UL)) { 967 if (xpc_partition_engaged(-1UL)) {
1007 if (time_after(jiffies, printmsg_time)) { 968 if (time_after(jiffies, printmsg_time)) {
1008 dev_info(xpc_part, "waiting for remote " 969 dev_info(xpc_part, "waiting for remote "
1009 "partitions to disengage, timeout in " 970 "partitions to disengage, timeout in "
1010 "%ld seconds\n", 971 "%ld seconds\n",
1011 (disengage_request_timeout - jiffies) 972 (disengage_request_timeout - jiffies)
1012 / HZ); 973 / HZ);
1013 printmsg_time = jiffies + 974 printmsg_time = jiffies +
1014 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 975 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
1015 printed_waiting_msg = 1; 976 printed_waiting_msg = 1;
1016 } 977 }
1017 978
1018 } else if (active_part_count > 0) { 979 } else if (active_part_count > 0) {
1019 if (printed_waiting_msg) { 980 if (printed_waiting_msg) {
1020 dev_info(xpc_part, "waiting for local partition" 981 dev_info(xpc_part, "waiting for local partition"
1021 " to disengage\n"); 982 " to disengage\n");
1022 printed_waiting_msg = 0; 983 printed_waiting_msg = 0;
1023 } 984 }
1024 985
1025 } else { 986 } else {
1026 if (!xpc_disengage_request_timedout) { 987 if (!xpc_disengage_request_timedout) {
1027 dev_info(xpc_part, "all partitions have " 988 dev_info(xpc_part, "all partitions have "
1028 "disengaged\n"); 989 "disengaged\n");
1029 } 990 }
1030 break; 991 break;
1031 } 992 }
1032 993
1033 /* sleep for a 1/3 of a second or so */ 994 /* sleep for a 1/3 of a second or so */
1034 (void) msleep_interruptible(300); 995 (void)msleep_interruptible(300);
1035 996
1036 } while (1); 997 } while (1);
1037 998
1038 DBUG_ON(xpc_partition_engaged(-1UL)); 999 DBUG_ON(xpc_partition_engaged(-1UL));
1039 1000
1040
1041 /* indicate to others that our reserved page is uninitialized */ 1001 /* indicate to others that our reserved page is uninitialized */
1042 xpc_rsvd_page->vars_pa = 0; 1002 xpc_rsvd_page->vars_pa = 0;
1043 1003
@@ -1047,16 +1007,15 @@ xpc_do_exit(enum xpc_retval reason)
1047 1007
1048 if (reason == xpcUnloading) { 1008 if (reason == xpcUnloading) {
1049 /* take ourselves off of the reboot_notifier_list */ 1009 /* take ourselves off of the reboot_notifier_list */
1050 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1010 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1051 1011
1052 /* take ourselves off of the die_notifier list */ 1012 /* take ourselves off of the die_notifier list */
1053 (void) unregister_die_notifier(&xpc_die_notifier); 1013 (void)unregister_die_notifier(&xpc_die_notifier);
1054 } 1014 }
1055 1015
1056 /* close down protections for IPI operations */ 1016 /* close down protections for IPI operations */
1057 xpc_restrict_IPI_ops(); 1017 xpc_restrict_IPI_ops();
1058 1018
1059
1060 /* clear the interface to XPC's functions */ 1019 /* clear the interface to XPC's functions */
1061 xpc_clear_interface(); 1020 xpc_clear_interface();
1062 1021
@@ -1067,7 +1026,6 @@ xpc_do_exit(enum xpc_retval reason)
1067 kfree(xpc_remote_copy_buffer_base); 1026 kfree(xpc_remote_copy_buffer_base);
1068} 1027}
1069 1028
1070
1071/* 1029/*
1072 * This function is called when the system is being rebooted. 1030 * This function is called when the system is being rebooted.
1073 */ 1031 */
@@ -1076,7 +1034,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1076{ 1034{
1077 enum xpc_retval reason; 1035 enum xpc_retval reason;
1078 1036
1079
1080 switch (event) { 1037 switch (event) {
1081 case SYS_RESTART: 1038 case SYS_RESTART:
1082 reason = xpcSystemReboot; 1039 reason = xpcSystemReboot;
@@ -1095,7 +1052,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1095 return NOTIFY_DONE; 1052 return NOTIFY_DONE;
1096} 1053}
1097 1054
1098
1099/* 1055/*
1100 * Notify other partitions to disengage from all references to our memory. 1056 * Notify other partitions to disengage from all references to our memory.
1101 */ 1057 */
@@ -1107,17 +1063,15 @@ xpc_die_disengage(void)
1107 unsigned long engaged; 1063 unsigned long engaged;
1108 long time, printmsg_time, disengage_request_timeout; 1064 long time, printmsg_time, disengage_request_timeout;
1109 1065
1110
1111 /* keep xpc_hb_checker thread from doing anything (just in case) */ 1066 /* keep xpc_hb_checker thread from doing anything (just in case) */
1112 xpc_exiting = 1; 1067 xpc_exiting = 1;
1113 1068
1114 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ 1069 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
1115 1070
1116 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1071 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1117 part = &xpc_partitions[partid]; 1072 part = &xpc_partitions[partid];
1118 1073
1119 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 1074 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
1120 remote_vars_version)) {
1121 1075
1122 /* just in case it was left set by an earlier XPC */ 1076 /* just in case it was left set by an earlier XPC */
1123 xpc_clear_partition_engaged(1UL << partid); 1077 xpc_clear_partition_engaged(1UL << partid);
@@ -1125,7 +1079,7 @@ xpc_die_disengage(void)
1125 } 1079 }
1126 1080
1127 if (xpc_partition_engaged(1UL << partid) || 1081 if (xpc_partition_engaged(1UL << partid) ||
1128 part->act_state != XPC_P_INACTIVE) { 1082 part->act_state != XPC_P_INACTIVE) {
1129 xpc_request_partition_disengage(part); 1083 xpc_request_partition_disengage(part);
1130 xpc_mark_partition_disengaged(part); 1084 xpc_mark_partition_disengaged(part);
1131 xpc_IPI_send_disengage(part); 1085 xpc_IPI_send_disengage(part);
@@ -1134,9 +1088,9 @@ xpc_die_disengage(void)
1134 1088
1135 time = rtc_time(); 1089 time = rtc_time();
1136 printmsg_time = time + 1090 printmsg_time = time +
1137 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 1091 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1138 disengage_request_timeout = time + 1092 disengage_request_timeout = time +
1139 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1093 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1140 1094
1141 /* wait for all other partitions to disengage from us */ 1095 /* wait for all other partitions to disengage from us */
1142 1096
@@ -1152,8 +1106,8 @@ xpc_die_disengage(void)
1152 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1106 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1153 if (engaged & (1UL << partid)) { 1107 if (engaged & (1UL << partid)) {
1154 dev_info(xpc_part, "disengage from " 1108 dev_info(xpc_part, "disengage from "
1155 "remote partition %d timed " 1109 "remote partition %d timed "
1156 "out\n", partid); 1110 "out\n", partid);
1157 } 1111 }
1158 } 1112 }
1159 break; 1113 break;
@@ -1161,17 +1115,16 @@ xpc_die_disengage(void)
1161 1115
1162 if (time >= printmsg_time) { 1116 if (time >= printmsg_time) {
1163 dev_info(xpc_part, "waiting for remote partitions to " 1117 dev_info(xpc_part, "waiting for remote partitions to "
1164 "disengage, timeout in %ld seconds\n", 1118 "disengage, timeout in %ld seconds\n",
1165 (disengage_request_timeout - time) / 1119 (disengage_request_timeout - time) /
1166 sn_rtc_cycles_per_second); 1120 sn_rtc_cycles_per_second);
1167 printmsg_time = time + 1121 printmsg_time = time +
1168 (XPC_DISENGAGE_PRINTMSG_INTERVAL * 1122 (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1169 sn_rtc_cycles_per_second); 1123 sn_rtc_cycles_per_second);
1170 } 1124 }
1171 } 1125 }
1172} 1126}
1173 1127
1174
1175/* 1128/*
1176 * This function is called when the system is being restarted or halted due 1129 * This function is called when the system is being restarted or halted due
1177 * to some sort of system failure. If this is the case we need to notify the 1130 * to some sort of system failure. If this is the case we need to notify the
@@ -1217,7 +1170,6 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1217 return NOTIFY_DONE; 1170 return NOTIFY_DONE;
1218} 1171}
1219 1172
1220
1221int __init 1173int __init
1222xpc_init(void) 1174xpc_init(void)
1223{ 1175{
@@ -1227,16 +1179,15 @@ xpc_init(void)
1227 pid_t pid; 1179 pid_t pid;
1228 size_t buf_size; 1180 size_t buf_size;
1229 1181
1230
1231 if (!ia64_platform_is("sn2")) { 1182 if (!ia64_platform_is("sn2")) {
1232 return -ENODEV; 1183 return -ENODEV;
1233 } 1184 }
1234 1185
1235
1236 buf_size = max(XPC_RP_VARS_SIZE, 1186 buf_size = max(XPC_RP_VARS_SIZE,
1237 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); 1187 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1238 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, 1188 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1239 GFP_KERNEL, &xpc_remote_copy_buffer_base); 1189 GFP_KERNEL,
1190 &xpc_remote_copy_buffer_base);
1240 if (xpc_remote_copy_buffer == NULL) 1191 if (xpc_remote_copy_buffer == NULL)
1241 return -ENOMEM; 1192 return -ENOMEM;
1242 1193
@@ -1256,7 +1207,7 @@ xpc_init(void)
1256 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1207 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1257 part = &xpc_partitions[partid]; 1208 part = &xpc_partitions[partid];
1258 1209
1259 DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); 1210 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1260 1211
1261 part->act_IRQ_rcvd = 0; 1212 part->act_IRQ_rcvd = 0;
1262 spin_lock_init(&part->act_lock); 1213 spin_lock_init(&part->act_lock);
@@ -1265,8 +1216,8 @@ xpc_init(void)
1265 1216
1266 init_timer(&part->disengage_request_timer); 1217 init_timer(&part->disengage_request_timer);
1267 part->disengage_request_timer.function = 1218 part->disengage_request_timer.function =
1268 xpc_timeout_partition_disengage_request; 1219 xpc_timeout_partition_disengage_request;
1269 part->disengage_request_timer.data = (unsigned long) part; 1220 part->disengage_request_timer.data = (unsigned long)part;
1270 1221
1271 part->setup_state = XPC_P_UNSET; 1222 part->setup_state = XPC_P_UNSET;
1272 init_waitqueue_head(&part->teardown_wq); 1223 init_waitqueue_head(&part->teardown_wq);
@@ -1292,7 +1243,7 @@ xpc_init(void)
1292 * but rather immediately process the interrupt. 1243 * but rather immediately process the interrupt.
1293 */ 1244 */
1294 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, 1245 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1295 "xpc hb", NULL); 1246 "xpc hb", NULL);
1296 if (ret != 0) { 1247 if (ret != 0) {
1297 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " 1248 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1298 "errno=%d\n", -ret); 1249 "errno=%d\n", -ret);
@@ -1327,7 +1278,6 @@ xpc_init(void)
1327 return -EBUSY; 1278 return -EBUSY;
1328 } 1279 }
1329 1280
1330
1331 /* add ourselves to the reboot_notifier_list */ 1281 /* add ourselves to the reboot_notifier_list */
1332 ret = register_reboot_notifier(&xpc_reboot_notifier); 1282 ret = register_reboot_notifier(&xpc_reboot_notifier);
1333 if (ret != 0) { 1283 if (ret != 0) {
@@ -1355,10 +1305,10 @@ xpc_init(void)
1355 xpc_rsvd_page->vars_pa = 0; 1305 xpc_rsvd_page->vars_pa = 0;
1356 1306
1357 /* take ourselves off of the reboot_notifier_list */ 1307 /* take ourselves off of the reboot_notifier_list */
1358 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1308 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1359 1309
1360 /* take ourselves off of the die_notifier list */ 1310 /* take ourselves off of the die_notifier list */
1361 (void) unregister_die_notifier(&xpc_die_notifier); 1311 (void)unregister_die_notifier(&xpc_die_notifier);
1362 1312
1363 del_timer_sync(&xpc_hb_timer); 1313 del_timer_sync(&xpc_hb_timer);
1364 free_irq(SGI_XPC_ACTIVATE, NULL); 1314 free_irq(SGI_XPC_ACTIVATE, NULL);
@@ -1372,7 +1322,6 @@ xpc_init(void)
1372 return -EBUSY; 1322 return -EBUSY;
1373 } 1323 }
1374 1324
1375
1376 /* 1325 /*
1377 * Startup a thread that will attempt to discover other partitions to 1326 * Startup a thread that will attempt to discover other partitions to
1378 * activate based on info provided by SAL. This new thread is short 1327 * activate based on info provided by SAL. This new thread is short
@@ -1389,7 +1338,6 @@ xpc_init(void)
1389 return -EBUSY; 1338 return -EBUSY;
1390 } 1339 }
1391 1340
1392
1393 /* set the interface to point at XPC's functions */ 1341 /* set the interface to point at XPC's functions */
1394 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, 1342 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1395 xpc_initiate_allocate, xpc_initiate_send, 1343 xpc_initiate_allocate, xpc_initiate_send,
@@ -1398,16 +1346,16 @@ xpc_init(void)
1398 1346
1399 return 0; 1347 return 0;
1400} 1348}
1401module_init(xpc_init);
1402 1349
1350module_init(xpc_init);
1403 1351
1404void __exit 1352void __exit
1405xpc_exit(void) 1353xpc_exit(void)
1406{ 1354{
1407 xpc_do_exit(xpcUnloading); 1355 xpc_do_exit(xpcUnloading);
1408} 1356}
1409module_exit(xpc_exit);
1410 1357
1358module_exit(xpc_exit);
1411 1359
1412MODULE_AUTHOR("Silicon Graphics, Inc."); 1360MODULE_AUTHOR("Silicon Graphics, Inc.");
1413MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); 1361MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
@@ -1415,17 +1363,16 @@ MODULE_LICENSE("GPL");
1415 1363
1416module_param(xpc_hb_interval, int, 0); 1364module_param(xpc_hb_interval, int, 0);
1417MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " 1365MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1418 "heartbeat increments."); 1366 "heartbeat increments.");
1419 1367
1420module_param(xpc_hb_check_interval, int, 0); 1368module_param(xpc_hb_check_interval, int, 0);
1421MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1369MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1422 "heartbeat checks."); 1370 "heartbeat checks.");
1423 1371
1424module_param(xpc_disengage_request_timelimit, int, 0); 1372module_param(xpc_disengage_request_timelimit, int, 0);
1425MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1373MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1426 "for disengage request to complete."); 1374 "for disengage request to complete.");
1427 1375
1428module_param(xpc_kdebug_ignore, int, 0); 1376module_param(xpc_kdebug_ignore, int, 0);
1429MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1377MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1430 "other partitions when dropping into kdebug."); 1378 "other partitions when dropping into kdebug.");
1431
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 38552f37e53d..e41cb93b8c89 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -6,7 +6,6 @@
6 * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Network Interface (XPNET) support 10 * Cross Partition Network Interface (XPNET) support
12 * 11 *
@@ -21,7 +20,6 @@
21 * 20 *
22 */ 21 */
23 22
24
25#include <linux/module.h> 23#include <linux/module.h>
26#include <linux/kernel.h> 24#include <linux/kernel.h>
27#include <linux/init.h> 25#include <linux/init.h>
@@ -40,7 +38,6 @@
40#include <asm/atomic.h> 38#include <asm/atomic.h>
41#include "xp.h" 39#include "xp.h"
42 40
43
44/* 41/*
45 * The message payload transferred by XPC. 42 * The message payload transferred by XPC.
46 * 43 *
@@ -79,7 +76,6 @@ struct xpnet_message {
79#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) 76#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
80#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) 77#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
81 78
82
83#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) 79#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
84#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) 80#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
85 81
@@ -91,9 +87,9 @@ struct xpnet_message {
91#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) 87#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
92#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) 88#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
93 89
94#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ 90#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
95#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ 91#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
96#define XPNET_MAGIC 0x88786984 /* "XNET" */ 92#define XPNET_MAGIC 0x88786984 /* "XNET" */
97 93
98#define XPNET_VALID_MSG(_m) \ 94#define XPNET_VALID_MSG(_m) \
99 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ 95 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
@@ -101,7 +97,6 @@ struct xpnet_message {
101 97
102#define XPNET_DEVICE_NAME "xp0" 98#define XPNET_DEVICE_NAME "xp0"
103 99
104
105/* 100/*
106 * When messages are queued with xpc_send_notify, a kmalloc'd buffer 101 * When messages are queued with xpc_send_notify, a kmalloc'd buffer
107 * of the following type is passed as a notification cookie. When the 102 * of the following type is passed as a notification cookie. When the
@@ -145,7 +140,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
145/* 32KB has been determined to be the ideal */ 140/* 32KB has been determined to be the ideal */
146#define XPNET_DEF_MTU (0x8000UL) 141#define XPNET_DEF_MTU (0x8000UL)
147 142
148
149/* 143/*
150 * The partition id is encapsulated in the MAC address. The following 144 * The partition id is encapsulated in the MAC address. The following
151 * define locates the octet the partid is in. 145 * define locates the octet the partid is in.
@@ -153,7 +147,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
153#define XPNET_PARTID_OCTET 1 147#define XPNET_PARTID_OCTET 1
154#define XPNET_LICENSE_OCTET 2 148#define XPNET_LICENSE_OCTET 2
155 149
156
157/* 150/*
158 * Define the XPNET debug device structure that is to be used with dev_dbg(), 151 * Define the XPNET debug device structure that is to be used with dev_dbg(),
159 * dev_err(), dev_warn(), and dev_info(). 152 * dev_err(), dev_warn(), and dev_info().
@@ -163,7 +156,7 @@ struct device_driver xpnet_dbg_name = {
163}; 156};
164 157
165struct device xpnet_dbg_subname = { 158struct device xpnet_dbg_subname = {
166 .bus_id = {0}, /* set to "" */ 159 .bus_id = {0}, /* set to "" */
167 .driver = &xpnet_dbg_name 160 .driver = &xpnet_dbg_name
168}; 161};
169 162
@@ -178,14 +171,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
178 struct sk_buff *skb; 171 struct sk_buff *skb;
179 bte_result_t bret; 172 bte_result_t bret;
180 struct xpnet_dev_private *priv = 173 struct xpnet_dev_private *priv =
181 (struct xpnet_dev_private *) xpnet_device->priv; 174 (struct xpnet_dev_private *)xpnet_device->priv;
182
183 175
184 if (!XPNET_VALID_MSG(msg)) { 176 if (!XPNET_VALID_MSG(msg)) {
185 /* 177 /*
186 * Packet with a different XPC version. Ignore. 178 * Packet with a different XPC version. Ignore.
187 */ 179 */
188 xpc_received(partid, channel, (void *) msg); 180 xpc_received(partid, channel, (void *)msg);
189 181
190 priv->stats.rx_errors++; 182 priv->stats.rx_errors++;
191 183
@@ -194,14 +186,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
194 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, 186 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
195 msg->leadin_ignore, msg->tailout_ignore); 187 msg->leadin_ignore, msg->tailout_ignore);
196 188
197
198 /* reserve an extra cache line */ 189 /* reserve an extra cache line */
199 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); 190 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
200 if (!skb) { 191 if (!skb) {
201 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", 192 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
202 msg->size + L1_CACHE_BYTES); 193 msg->size + L1_CACHE_BYTES);
203 194
204 xpc_received(partid, channel, (void *) msg); 195 xpc_received(partid, channel, (void *)msg);
205 196
206 priv->stats.rx_errors++; 197 priv->stats.rx_errors++;
207 198
@@ -227,12 +218,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
227 * Move the data over from the other side. 218 * Move the data over from the other side.
228 */ 219 */
229 if ((XPNET_VERSION_MINOR(msg->version) == 1) && 220 if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
230 (msg->embedded_bytes != 0)) { 221 (msg->embedded_bytes != 0)) {
231 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " 222 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
232 "%lu)\n", skb->data, &msg->data, 223 "%lu)\n", skb->data, &msg->data,
233 (size_t) msg->embedded_bytes); 224 (size_t)msg->embedded_bytes);
234 225
235 skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes); 226 skb_copy_to_linear_data(skb, &msg->data,
227 (size_t)msg->embedded_bytes);
236 } else { 228 } else {
237 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" 229 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
238 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, 230 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -250,10 +242,10 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
250 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " 242 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
251 "error=0x%x\n", (void *)msg->buf_pa, 243 "error=0x%x\n", (void *)msg->buf_pa,
252 (void *)__pa((u64)skb->data & 244 (void *)__pa((u64)skb->data &
253 ~(L1_CACHE_BYTES - 1)), 245 ~(L1_CACHE_BYTES - 1)),
254 msg->size, bret); 246 msg->size, bret);
255 247
256 xpc_received(partid, channel, (void *) msg); 248 xpc_received(partid, channel, (void *)msg);
257 249
258 priv->stats.rx_errors++; 250 priv->stats.rx_errors++;
259 251
@@ -262,7 +254,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
262 } 254 }
263 255
264 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 256 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
265 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 257 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
266 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 258 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
267 skb->len); 259 skb->len);
268 260
@@ -275,16 +267,14 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
275 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 267 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
276 skb_end_pointer(skb), skb->len); 268 skb_end_pointer(skb), skb->len);
277 269
278
279 xpnet_device->last_rx = jiffies; 270 xpnet_device->last_rx = jiffies;
280 priv->stats.rx_packets++; 271 priv->stats.rx_packets++;
281 priv->stats.rx_bytes += skb->len + ETH_HLEN; 272 priv->stats.rx_bytes += skb->len + ETH_HLEN;
282 273
283 netif_rx_ni(skb); 274 netif_rx_ni(skb);
284 xpc_received(partid, channel, (void *) msg); 275 xpc_received(partid, channel, (void *)msg);
285} 276}
286 277
287
288/* 278/*
289 * This is the handler which XPC calls during any sort of change in 279 * This is the handler which XPC calls during any sort of change in
290 * state or message reception on a connection. 280 * state or message reception on a connection.
@@ -295,20 +285,19 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
295{ 285{
296 long bp; 286 long bp;
297 287
298
299 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 288 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
300 DBUG_ON(channel != XPC_NET_CHANNEL); 289 DBUG_ON(channel != XPC_NET_CHANNEL);
301 290
302 switch(reason) { 291 switch (reason) {
303 case xpcMsgReceived: /* message received */ 292 case xpcMsgReceived: /* message received */
304 DBUG_ON(data == NULL); 293 DBUG_ON(data == NULL);
305 294
306 xpnet_receive(partid, channel, (struct xpnet_message *) data); 295 xpnet_receive(partid, channel, (struct xpnet_message *)data);
307 break; 296 break;
308 297
309 case xpcConnected: /* connection completed to a partition */ 298 case xpcConnected: /* connection completed to a partition */
310 spin_lock_bh(&xpnet_broadcast_lock); 299 spin_lock_bh(&xpnet_broadcast_lock);
311 xpnet_broadcast_partitions |= 1UL << (partid -1 ); 300 xpnet_broadcast_partitions |= 1UL << (partid - 1);
312 bp = xpnet_broadcast_partitions; 301 bp = xpnet_broadcast_partitions;
313 spin_unlock_bh(&xpnet_broadcast_lock); 302 spin_unlock_bh(&xpnet_broadcast_lock);
314 303
@@ -321,7 +310,7 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
321 310
322 default: 311 default:
323 spin_lock_bh(&xpnet_broadcast_lock); 312 spin_lock_bh(&xpnet_broadcast_lock);
324 xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); 313 xpnet_broadcast_partitions &= ~(1UL << (partid - 1));
325 bp = xpnet_broadcast_partitions; 314 bp = xpnet_broadcast_partitions;
326 spin_unlock_bh(&xpnet_broadcast_lock); 315 spin_unlock_bh(&xpnet_broadcast_lock);
327 316
@@ -337,13 +326,11 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
337 } 326 }
338} 327}
339 328
340
341static int 329static int
342xpnet_dev_open(struct net_device *dev) 330xpnet_dev_open(struct net_device *dev)
343{ 331{
344 enum xpc_retval ret; 332 enum xpc_retval ret;
345 333
346
347 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " 334 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
348 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, 335 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
349 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, 336 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
@@ -364,7 +351,6 @@ xpnet_dev_open(struct net_device *dev)
364 return 0; 351 return 0;
365} 352}
366 353
367
368static int 354static int
369xpnet_dev_stop(struct net_device *dev) 355xpnet_dev_stop(struct net_device *dev)
370{ 356{
@@ -375,7 +361,6 @@ xpnet_dev_stop(struct net_device *dev)
375 return 0; 361 return 0;
376} 362}
377 363
378
379static int 364static int
380xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) 365xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
381{ 366{
@@ -392,7 +377,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
392 return 0; 377 return 0;
393} 378}
394 379
395
396/* 380/*
397 * Required for the net_device structure. 381 * Required for the net_device structure.
398 */ 382 */
@@ -402,7 +386,6 @@ xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
402 return 0; 386 return 0;
403} 387}
404 388
405
406/* 389/*
407 * Return statistics to the caller. 390 * Return statistics to the caller.
408 */ 391 */
@@ -411,13 +394,11 @@ xpnet_dev_get_stats(struct net_device *dev)
411{ 394{
412 struct xpnet_dev_private *priv; 395 struct xpnet_dev_private *priv;
413 396
414 397 priv = (struct xpnet_dev_private *)dev->priv;
415 priv = (struct xpnet_dev_private *) dev->priv;
416 398
417 return &priv->stats; 399 return &priv->stats;
418} 400}
419 401
420
421/* 402/*
422 * Notification that the other end has received the message and 403 * Notification that the other end has received the message and
423 * DMA'd the skb information. At this point, they are done with 404 * DMA'd the skb information. At this point, they are done with
@@ -426,11 +407,9 @@ xpnet_dev_get_stats(struct net_device *dev)
426 */ 407 */
427static void 408static void
428xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, 409xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
429 void *__qm) 410 void *__qm)
430{ 411{
431 struct xpnet_pending_msg *queued_msg = 412 struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
432 (struct xpnet_pending_msg *) __qm;
433
434 413
435 DBUG_ON(queued_msg == NULL); 414 DBUG_ON(queued_msg == NULL);
436 415
@@ -439,14 +418,13 @@ xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
439 418
440 if (atomic_dec_return(&queued_msg->use_count) == 0) { 419 if (atomic_dec_return(&queued_msg->use_count) == 0) {
441 dev_dbg(xpnet, "all acks for skb->head=-x%p\n", 420 dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
442 (void *) queued_msg->skb->head); 421 (void *)queued_msg->skb->head);
443 422
444 dev_kfree_skb_any(queued_msg->skb); 423 dev_kfree_skb_any(queued_msg->skb);
445 kfree(queued_msg); 424 kfree(queued_msg);
446 } 425 }
447} 426}
448 427
449
450/* 428/*
451 * Network layer has formatted a packet (skb) and is ready to place it 429 * Network layer has formatted a packet (skb) and is ready to place it
452 * "on the wire". Prepare and send an xpnet_message to all partitions 430 * "on the wire". Prepare and send an xpnet_message to all partitions
@@ -469,16 +447,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
469 struct xpnet_dev_private *priv; 447 struct xpnet_dev_private *priv;
470 u16 embedded_bytes; 448 u16 embedded_bytes;
471 449
472 450 priv = (struct xpnet_dev_private *)dev->priv;
473 priv = (struct xpnet_dev_private *) dev->priv;
474
475 451
476 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 452 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
477 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 453 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
478 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 454 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
479 skb->len); 455 skb->len);
480 456
481
482 /* 457 /*
483 * The xpnet_pending_msg tracks how many outstanding 458 * The xpnet_pending_msg tracks how many outstanding
484 * xpc_send_notifies are relying on this skb. When none 459 * xpc_send_notifies are relying on this skb. When none
@@ -487,16 +462,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
487 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); 462 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
488 if (queued_msg == NULL) { 463 if (queued_msg == NULL) {
489 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " 464 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
490 "packet\n", sizeof(struct xpnet_pending_msg)); 465 "packet\n", sizeof(struct xpnet_pending_msg));
491 466
492 priv->stats.tx_errors++; 467 priv->stats.tx_errors++;
493 468
494 return -ENOMEM; 469 return -ENOMEM;
495 } 470 }
496 471
497
498 /* get the beginning of the first cacheline and end of last */ 472 /* get the beginning of the first cacheline and end of last */
499 start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); 473 start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
500 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); 474 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
501 475
502 /* calculate how many bytes to embed in the XPC message */ 476 /* calculate how many bytes to embed in the XPC message */
@@ -506,7 +480,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
506 embedded_bytes = skb->len; 480 embedded_bytes = skb->len;
507 } 481 }
508 482
509
510 /* 483 /*
511 * Since the send occurs asynchronously, we set the count to one 484 * Since the send occurs asynchronously, we set the count to one
512 * and begin sending. Any sends that happen to complete before 485 * and begin sending. Any sends that happen to complete before
@@ -517,14 +490,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
517 atomic_set(&queued_msg->use_count, 1); 490 atomic_set(&queued_msg->use_count, 1);
518 queued_msg->skb = skb; 491 queued_msg->skb = skb;
519 492
520
521 second_mac_octet = skb->data[XPNET_PARTID_OCTET]; 493 second_mac_octet = skb->data[XPNET_PARTID_OCTET];
522 if (second_mac_octet == 0xff) { 494 if (second_mac_octet == 0xff) {
523 /* we are being asked to broadcast to all partitions */ 495 /* we are being asked to broadcast to all partitions */
524 dp = xpnet_broadcast_partitions; 496 dp = xpnet_broadcast_partitions;
525 } else if (second_mac_octet != 0) { 497 } else if (second_mac_octet != 0) {
526 dp = xpnet_broadcast_partitions & 498 dp = xpnet_broadcast_partitions &
527 (1UL << (second_mac_octet - 1)); 499 (1UL << (second_mac_octet - 1));
528 } else { 500 } else {
529 /* 0 is an invalid partid. Ignore */ 501 /* 0 is an invalid partid. Ignore */
530 dp = 0; 502 dp = 0;
@@ -543,7 +515,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; 515 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
544 dest_partid++) { 516 dest_partid++) {
545 517
546
547 if (!(dp & (1UL << (dest_partid - 1)))) { 518 if (!(dp & (1UL << (dest_partid - 1)))) {
548 /* not destined for this partition */ 519 /* not destined for this partition */
549 continue; 520 continue;
@@ -552,7 +523,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
552 /* remove this partition from the destinations mask */ 523 /* remove this partition from the destinations mask */
553 dp &= ~(1UL << (dest_partid - 1)); 524 dp &= ~(1UL << (dest_partid - 1));
554 525
555
556 /* found a partition to send to */ 526 /* found a partition to send to */
557 527
558 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 528 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
@@ -565,7 +535,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
565 if (unlikely(embedded_bytes != 0)) { 535 if (unlikely(embedded_bytes != 0)) {
566 msg->version = XPNET_VERSION_EMBED; 536 msg->version = XPNET_VERSION_EMBED;
567 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", 537 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
568 &msg->data, skb->data, (size_t) embedded_bytes); 538 &msg->data, skb->data, (size_t)embedded_bytes);
569 skb_copy_from_linear_data(skb, &msg->data, 539 skb_copy_from_linear_data(skb, &msg->data,
570 (size_t)embedded_bytes); 540 (size_t)embedded_bytes);
571 } else { 541 } else {
@@ -573,7 +543,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
573 } 543 }
574 msg->magic = XPNET_MAGIC; 544 msg->magic = XPNET_MAGIC;
575 msg->size = end_addr - start_addr; 545 msg->size = end_addr - start_addr;
576 msg->leadin_ignore = (u64) skb->data - start_addr; 546 msg->leadin_ignore = (u64)skb->data - start_addr;
577 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); 547 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
578 msg->buf_pa = __pa(start_addr); 548 msg->buf_pa = __pa(start_addr);
579 549
@@ -583,7 +553,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, 553 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
584 msg->leadin_ignore, msg->tailout_ignore); 554 msg->leadin_ignore, msg->tailout_ignore);
585 555
586
587 atomic_inc(&queued_msg->use_count); 556 atomic_inc(&queued_msg->use_count);
588 557
589 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, 558 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
@@ -599,7 +568,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
599 dev_dbg(xpnet, "no partitions to receive packet destined for " 568 dev_dbg(xpnet, "no partitions to receive packet destined for "
600 "%d\n", dest_partid); 569 "%d\n", dest_partid);
601 570
602
603 dev_kfree_skb(skb); 571 dev_kfree_skb(skb);
604 kfree(queued_msg); 572 kfree(queued_msg);
605 } 573 }
@@ -610,23 +578,20 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
610 return 0; 578 return 0;
611} 579}
612 580
613
614/* 581/*
615 * Deal with transmit timeouts coming from the network layer. 582 * Deal with transmit timeouts coming from the network layer.
616 */ 583 */
617static void 584static void
618xpnet_dev_tx_timeout (struct net_device *dev) 585xpnet_dev_tx_timeout(struct net_device *dev)
619{ 586{
620 struct xpnet_dev_private *priv; 587 struct xpnet_dev_private *priv;
621 588
622 589 priv = (struct xpnet_dev_private *)dev->priv;
623 priv = (struct xpnet_dev_private *) dev->priv;
624 590
625 priv->stats.tx_errors++; 591 priv->stats.tx_errors++;
626 return; 592 return;
627} 593}
628 594
629
630static int __init 595static int __init
631xpnet_init(void) 596xpnet_init(void)
632{ 597{
@@ -634,7 +599,6 @@ xpnet_init(void)
634 u32 license_num; 599 u32 license_num;
635 int result = -ENOMEM; 600 int result = -ENOMEM;
636 601
637
638 if (!ia64_platform_is("sn2")) { 602 if (!ia64_platform_is("sn2")) {
639 return -ENODEV; 603 return -ENODEV;
640 } 604 }
@@ -672,7 +636,7 @@ xpnet_init(void)
672 license_num = sn_partition_serial_number_val(); 636 license_num = sn_partition_serial_number_val();
673 for (i = 3; i >= 0; i--) { 637 for (i = 3; i >= 0; i--) {
674 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = 638 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
675 license_num & 0xff; 639 license_num & 0xff;
676 license_num = license_num >> 8; 640 license_num = license_num >> 8;
677 } 641 }
678 642
@@ -696,23 +660,22 @@ xpnet_init(void)
696 660
697 return result; 661 return result;
698} 662}
699module_init(xpnet_init);
700 663
664module_init(xpnet_init);
701 665
702static void __exit 666static void __exit
703xpnet_exit(void) 667xpnet_exit(void)
704{ 668{
705 dev_info(xpnet, "unregistering network device %s\n", 669 dev_info(xpnet, "unregistering network device %s\n",
706 xpnet_device[0].name); 670 xpnet_device[0].name);
707 671
708 unregister_netdev(xpnet_device); 672 unregister_netdev(xpnet_device);
709 673
710 free_netdev(xpnet_device); 674 free_netdev(xpnet_device);
711} 675}
712module_exit(xpnet_exit);
713 676
677module_exit(xpnet_exit);
714 678
715MODULE_AUTHOR("Silicon Graphics, Inc."); 679MODULE_AUTHOR("Silicon Graphics, Inc.");
716MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); 680MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
717MODULE_LICENSE("GPL"); 681MODULE_LICENSE("GPL");
718