aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-04-22 15:50:17 -0400
committerTony Luck <tony.luck@intel.com>2008-04-22 18:08:55 -0400
commit2c2b94f93f4732c3b9703ce62627e6187e7d6128 (patch)
tree47fbdee38bc7cf0eec8c7c254a6c1c045ebbdb7e /drivers/misc/sgi-xp
parent35190506b1a18eda7df24b285fdcd94dec7800ef (diff)
[IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl
Addressed issues raised by scripts/checkpatch.pl. Removed unnecessary curly braces. Eliminated uses of volatiles and use of kernel_thread() and daemonize(). Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'drivers/misc/sgi-xp')
-rw-r--r--drivers/misc/sgi-xp/xp.h6
-rw-r--r--drivers/misc/sgi-xp/xp_main.c41
-rw-r--r--drivers/misc/sgi-xp/xpc.h124
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c192
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c167
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c99
-rw-r--r--drivers/misc/sgi-xp/xpnet.c30
7 files changed, 268 insertions, 391 deletions
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 87171682664..5515234be86 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -79,9 +79,9 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
79 79
80 ret = bte_copy(src, pdst, len, mode, notification); 80 ret = bte_copy(src, pdst, len, mode, notification);
81 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { 81 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
82 if (!in_interrupt()) { 82 if (!in_interrupt())
83 cond_resched(); 83 cond_resched();
84 } 84
85 ret = bte_copy(src, pdst, len, mode, notification); 85 ret = bte_copy(src, pdst, len, mode, notification);
86 } 86 }
87 87
@@ -255,7 +255,7 @@ enum xpc_retval {
255 /* 115: BTE end */ 255 /* 115: BTE end */
256 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, 256 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
257 257
258 xpcUnknownReason /* 116: unknown reason -- must be last in list */ 258 xpcUnknownReason /* 116: unknown reason - must be last in enum */
259}; 259};
260 260
261/* 261/*
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index bb9257642fc..1fbf99bae96 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -23,15 +23,21 @@
23#include "xp.h" 23#include "xp.h"
24 24
25/* 25/*
26 * Target of nofault PIO read. 26 * The export of xp_nofault_PIOR needs to happen here since it is defined
27 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
28 * defined here.
27 */ 29 */
30EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
31
28u64 xp_nofault_PIOR_target; 32u64 xp_nofault_PIOR_target;
33EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
29 34
30/* 35/*
31 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 36 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
32 * users of XPC. 37 * users of XPC.
33 */ 38 */
34struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 39struct xpc_registration xpc_registrations[XPC_NCHANNELS];
40EXPORT_SYMBOL_GPL(xpc_registrations);
35 41
36/* 42/*
37 * Initialize the XPC interface to indicate that XPC isn't loaded. 43 * Initialize the XPC interface to indicate that XPC isn't loaded.
@@ -52,6 +58,7 @@ struct xpc_interface xpc_interface = {
52 (void (*)(partid_t, int, void *))xpc_notloaded, 58 (void (*)(partid_t, int, void *))xpc_notloaded,
53 (enum xpc_retval(*)(partid_t, void *))xpc_notloaded 59 (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
54}; 60};
61EXPORT_SYMBOL_GPL(xpc_interface);
55 62
56/* 63/*
57 * XPC calls this when it (the XPC module) has been loaded. 64 * XPC calls this when it (the XPC module) has been loaded.
@@ -74,6 +81,7 @@ xpc_set_interface(void (*connect) (int),
74 xpc_interface.received = received; 81 xpc_interface.received = received;
75 xpc_interface.partid_to_nasids = partid_to_nasids; 82 xpc_interface.partid_to_nasids = partid_to_nasids;
76} 83}
84EXPORT_SYMBOL_GPL(xpc_set_interface);
77 85
78/* 86/*
79 * XPC calls this when it (the XPC module) is being unloaded. 87 * XPC calls this when it (the XPC module) is being unloaded.
@@ -95,6 +103,7 @@ xpc_clear_interface(void)
95 xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) 103 xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
96 xpc_notloaded; 104 xpc_notloaded;
97} 105}
106EXPORT_SYMBOL_GPL(xpc_clear_interface);
98 107
99/* 108/*
100 * Register for automatic establishment of a channel connection whenever 109 * Register for automatic establishment of a channel connection whenever
@@ -133,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
133 142
134 registration = &xpc_registrations[ch_number]; 143 registration = &xpc_registrations[ch_number];
135 144
136 if (mutex_lock_interruptible(&registration->mutex) != 0) { 145 if (mutex_lock_interruptible(&registration->mutex) != 0)
137 return xpcInterrupted; 146 return xpcInterrupted;
138 }
139 147
140 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 148 /* if XPC_CHANNEL_REGISTERED(ch_number) */
141 if (registration->func != NULL) { 149 if (registration->func != NULL) {
@@ -157,6 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
157 165
158 return xpcSuccess; 166 return xpcSuccess;
159} 167}
168EXPORT_SYMBOL_GPL(xpc_connect);
160 169
161/* 170/*
162 * Remove the registration for automatic connection of the specified channel 171 * Remove the registration for automatic connection of the specified channel
@@ -207,6 +216,7 @@ xpc_disconnect(int ch_number)
207 216
208 return; 217 return;
209} 218}
219EXPORT_SYMBOL_GPL(xpc_disconnect);
210 220
211int __init 221int __init
212xp_init(void) 222xp_init(void)
@@ -215,9 +225,8 @@ xp_init(void)
215 u64 func_addr = *(u64 *)xp_nofault_PIOR; 225 u64 func_addr = *(u64 *)xp_nofault_PIOR;
216 u64 err_func_addr = *(u64 *)xp_error_PIOR; 226 u64 err_func_addr = *(u64 *)xp_error_PIOR;
217 227
218 if (!ia64_platform_is("sn2")) { 228 if (!ia64_platform_is("sn2"))
219 return -ENODEV; 229 return -ENODEV;
220 }
221 230
222 /* 231 /*
223 * Register a nofault code region which performs a cross-partition 232 * Register a nofault code region which performs a cross-partition
@@ -228,8 +237,9 @@ xp_init(void)
228 * least some CPUs on Shubs <= v1.2, which unfortunately we have to 237 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
229 * work around). 238 * work around).
230 */ 239 */
231 if ((ret = sn_register_nofault_code(func_addr, err_func_addr, 240 ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
232 err_func_addr, 1, 1)) != 0) { 241 1, 1);
242 if (ret != 0) {
233 printk(KERN_ERR "XP: can't register nofault code, error=%d\n", 243 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
234 ret); 244 ret);
235 } 245 }
@@ -237,16 +247,14 @@ xp_init(void)
237 * Setup the nofault PIO read target. (There is no special reason why 247 * Setup the nofault PIO read target. (There is no special reason why
238 * SH_IPI_ACCESS was selected.) 248 * SH_IPI_ACCESS was selected.)
239 */ 249 */
240 if (is_shub2()) { 250 if (is_shub2())
241 xp_nofault_PIOR_target = SH2_IPI_ACCESS0; 251 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
242 } else { 252 else
243 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 253 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
244 }
245 254
246 /* initialize the connection registration mutex */ 255 /* initialize the connection registration mutex */
247 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 256 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
248 mutex_init(&xpc_registrations[ch_number].mutex); 257 mutex_init(&xpc_registrations[ch_number].mutex);
249 }
250 258
251 return 0; 259 return 0;
252} 260}
@@ -269,12 +277,3 @@ module_exit(xp_exit);
269MODULE_AUTHOR("Silicon Graphics, Inc."); 277MODULE_AUTHOR("Silicon Graphics, Inc.");
270MODULE_DESCRIPTION("Cross Partition (XP) base"); 278MODULE_DESCRIPTION("Cross Partition (XP) base");
271MODULE_LICENSE("GPL"); 279MODULE_LICENSE("GPL");
272
273EXPORT_SYMBOL(xp_nofault_PIOR);
274EXPORT_SYMBOL(xp_nofault_PIOR_target);
275EXPORT_SYMBOL(xpc_registrations);
276EXPORT_SYMBOL(xpc_interface);
277EXPORT_SYMBOL(xpc_clear_interface);
278EXPORT_SYMBOL(xpc_set_interface);
279EXPORT_SYMBOL(xpc_connect);
280EXPORT_SYMBOL(xpc_disconnect);
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 64368bb8889..9eb6d4a3269 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -110,16 +110,16 @@ struct xpc_rsvd_page {
110 u8 partid; /* SAL: partition ID */ 110 u8 partid; /* SAL: partition ID */
111 u8 version; 111 u8 version;
112 u8 pad1[6]; /* align to next u64 in cacheline */ 112 u8 pad1[6]; /* align to next u64 in cacheline */
113 volatile u64 vars_pa; 113 u64 vars_pa; /* physical address of struct xpc_vars */
114 struct timespec stamp; /* time when reserved page was setup by XPC */ 114 struct timespec stamp; /* time when reserved page was setup by XPC */
115 u64 pad2[9]; /* align to last u64 in cacheline */ 115 u64 pad2[9]; /* align to last u64 in cacheline */
116 u64 nasids_size; /* SAL: size of each nasid mask in bytes */ 116 u64 nasids_size; /* SAL: size of each nasid mask in bytes */
117}; 117};
118 118
119#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ 119#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */
120 120
121#define XPC_SUPPORTS_RP_STAMP(_version) \ 121#define XPC_SUPPORTS_RP_STAMP(_version) \
122 (_version >= _XPC_VERSION(1,1)) 122 (_version >= _XPC_VERSION(1, 1))
123 123
124/* 124/*
125 * compare stamps - the return value is: 125 * compare stamps - the return value is:
@@ -133,9 +133,10 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
133{ 133{
134 int ret; 134 int ret;
135 135
136 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { 136 ret = stamp1->tv_sec - stamp2->tv_sec;
137 if (ret == 0)
137 ret = stamp1->tv_nsec - stamp2->tv_nsec; 138 ret = stamp1->tv_nsec - stamp2->tv_nsec;
138 } 139
139 return ret; 140 return ret;
140} 141}
141 142
@@ -165,10 +166,10 @@ struct xpc_vars {
165 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 166 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
166}; 167};
167 168
168#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ 169#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
169 170
170#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ 171#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
171 (_version >= _XPC_VERSION(3,1)) 172 (_version >= _XPC_VERSION(3, 1))
172 173
173static inline int 174static inline int
174xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) 175xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
@@ -224,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
224 * occupies half a cacheline. 225 * occupies half a cacheline.
225 */ 226 */
226struct xpc_vars_part { 227struct xpc_vars_part {
227 volatile u64 magic; 228 u64 magic;
228 229
229 u64 openclose_args_pa; /* physical address of open and close args */ 230 u64 openclose_args_pa; /* physical address of open and close args */
230 u64 GPs_pa; /* physical address of Get/Put values */ 231 u64 GPs_pa; /* physical address of Get/Put values */
@@ -247,18 +248,20 @@ struct xpc_vars_part {
247 * MAGIC2 indicates that this partition has pulled the remote partititions 248 * MAGIC2 indicates that this partition has pulled the remote partititions
248 * per partition variables that pertain to this partition. 249 * per partition variables that pertain to this partition.
249 */ 250 */
250#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 251#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
251#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 252#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
252 253
253/* the reserved page sizes and offsets */ 254/* the reserved page sizes and offsets */
254 255
255#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) 256#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
256#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) 257#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
257 258
258#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE) 259#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
259#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) 260#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
260#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) 261#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
261#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) 262 xp_nasid_mask_words))
263#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \
264 ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
262 265
263/* 266/*
264 * Functions registered by add_timer() or called by kernel_thread() only 267 * Functions registered by add_timer() or called by kernel_thread() only
@@ -277,8 +280,8 @@ struct xpc_vars_part {
277 * Define a Get/Put value pair (pointers) used with a message queue. 280 * Define a Get/Put value pair (pointers) used with a message queue.
278 */ 281 */
279struct xpc_gp { 282struct xpc_gp {
280 volatile s64 get; /* Get value */ 283 s64 get; /* Get value */
281 volatile s64 put; /* Put value */ 284 s64 put; /* Put value */
282}; 285};
283 286
284#define XPC_GP_SIZE \ 287#define XPC_GP_SIZE \
@@ -315,7 +318,7 @@ struct xpc_openclose_args {
315 * and consumed by the intended recipient. 318 * and consumed by the intended recipient.
316 */ 319 */
317struct xpc_notify { 320struct xpc_notify {
318 volatile u8 type; /* type of notification */ 321 u8 type; /* type of notification */
319 322
320 /* the following two fields are only used if type == XPC_N_CALL */ 323 /* the following two fields are only used if type == XPC_N_CALL */
321 xpc_notify_func func; /* user's notify function */ 324 xpc_notify_func func; /* user's notify function */
@@ -421,8 +424,8 @@ struct xpc_channel {
421 void *local_msgqueue_base; /* base address of kmalloc'd space */ 424 void *local_msgqueue_base; /* base address of kmalloc'd space */
422 struct xpc_msg *local_msgqueue; /* local message queue */ 425 struct xpc_msg *local_msgqueue; /* local message queue */
423 void *remote_msgqueue_base; /* base address of kmalloc'd space */ 426 void *remote_msgqueue_base; /* base address of kmalloc'd space */
424 struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ 427 struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
425 /* local message queue */ 428 /* local message queue */
426 u64 remote_msgqueue_pa; /* phys addr of remote partition's */ 429 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
427 /* local message queue */ 430 /* local message queue */
428 431
@@ -437,16 +440,16 @@ struct xpc_channel {
437 /* queue of msg senders who want to be notified when msg received */ 440 /* queue of msg senders who want to be notified when msg received */
438 441
439 atomic_t n_to_notify; /* #of msg senders to notify */ 442 atomic_t n_to_notify; /* #of msg senders to notify */
440 struct xpc_notify *notify_queue; /* notify queue for messages sent */ 443 struct xpc_notify *notify_queue; /* notify queue for messages sent */
441 444
442 xpc_channel_func func; /* user's channel function */ 445 xpc_channel_func func; /* user's channel function */
443 void *key; /* pointer to user's key */ 446 void *key; /* pointer to user's key */
444 447
445 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ 448 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
446 struct completion wdisconnect_wait; /* wait for channel disconnect */ 449 struct completion wdisconnect_wait; /* wait for channel disconnect */
447 450
448 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 451 struct xpc_openclose_args *local_openclose_args; /* args passed on */
449 /* opening or closing of channel */ 452 /* opening or closing of channel */
450 453
451 /* various flavors of local and remote Get/Put values */ 454 /* various flavors of local and remote Get/Put values */
452 455
@@ -458,16 +461,11 @@ struct xpc_channel {
458 461
459 /* kthread management related fields */ 462 /* kthread management related fields */
460 463
461// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
462// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
463// >>> dependent on activity over the last interval of time
464 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 464 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
465 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ 465 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
466 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ 466 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
467 u32 kthreads_idle_limit; /* limit on #of kthreads idle */ 467 u32 kthreads_idle_limit; /* limit on #of kthreads idle */
468 atomic_t kthreads_active; /* #of kthreads actively working */ 468 atomic_t kthreads_active; /* #of kthreads actively working */
469 // >>> following field is temporary
470 u32 kthreads_created; /* total #of kthreads created */
471 469
472 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 470 wait_queue_head_t idle_wq; /* idle kthread wait queue */
473 471
@@ -479,28 +477,28 @@ struct xpc_channel {
479 477
480#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ 478#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
481#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ 479#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
482#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ 480#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
483#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 481#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
484 482
485#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 483#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
486#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ 484#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
487#define XPC_C_CONNECTEDCALLOUT_MADE \ 485#define XPC_C_CONNECTEDCALLOUT_MADE \
488 0x00000080 /* connected callout completed */ 486 0x00000080 /* connected callout completed */
489#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ 487#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
490#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ 488#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
491 489
492#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ 490#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
493#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ 491#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
494#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ 492#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
495#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ 493#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
496 494
497#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ 495#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
498#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ 496#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
499#define XPC_C_DISCONNECTINGCALLOUT \ 497#define XPC_C_DISCONNECTINGCALLOUT \
500 0x00010000 /* disconnecting callout initiated */ 498 0x00010000 /* disconnecting callout initiated */
501#define XPC_C_DISCONNECTINGCALLOUT_MADE \ 499#define XPC_C_DISCONNECTINGCALLOUT_MADE \
502 0x00020000 /* disconnecting callout completed */ 500 0x00020000 /* disconnecting callout completed */
503#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 501#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
504 502
505/* 503/*
506 * Manages channels on a partition basis. There is one of these structures 504 * Manages channels on a partition basis. There is one of these structures
@@ -512,7 +510,7 @@ struct xpc_partition {
512 /* XPC HB infrastructure */ 510 /* XPC HB infrastructure */
513 511
514 u8 remote_rp_version; /* version# of partition's rsvd pg */ 512 u8 remote_rp_version; /* version# of partition's rsvd pg */
515 struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */ 513 struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
516 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 514 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
517 u64 remote_vars_pa; /* phys addr of partition's vars */ 515 u64 remote_vars_pa; /* phys addr of partition's vars */
518 u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 516 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
@@ -533,7 +531,7 @@ struct xpc_partition {
533 531
534 /* XPC infrastructure referencing and teardown control */ 532 /* XPC infrastructure referencing and teardown control */
535 533
536 volatile u8 setup_state; /* infrastructure setup state */ 534 u8 setup_state; /* infrastructure setup state */
537 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 535 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
538 atomic_t references; /* #of references to infrastructure */ 536 atomic_t references; /* #of references to infrastructure */
539 537
@@ -545,32 +543,32 @@ struct xpc_partition {
545 */ 543 */
546 544
547 u8 nchannels; /* #of defined channels supported */ 545 u8 nchannels; /* #of defined channels supported */
548 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 546 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
549 atomic_t nchannels_engaged; /* #of channels engaged with remote part */ 547 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
550 struct xpc_channel *channels; /* array of channel structures */ 548 struct xpc_channel *channels; /* array of channel structures */
551 549
552 void *local_GPs_base; /* base address of kmalloc'd space */ 550 void *local_GPs_base; /* base address of kmalloc'd space */
553 struct xpc_gp *local_GPs; /* local Get/Put values */ 551 struct xpc_gp *local_GPs; /* local Get/Put values */
554 void *remote_GPs_base; /* base address of kmalloc'd space */ 552 void *remote_GPs_base; /* base address of kmalloc'd space */
555 struct xpc_gp *remote_GPs; /* copy of remote partition's local Get/Put */ 553 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
556 /* values */ 554 /* Get/Put values */
557 u64 remote_GPs_pa; /* phys address of remote partition's local */ 555 u64 remote_GPs_pa; /* phys address of remote partition's local */
558 /* Get/Put values */ 556 /* Get/Put values */
559 557
560 /* fields used to pass args when opening or closing a channel */ 558 /* fields used to pass args when opening or closing a channel */
561 559
562 void *local_openclose_args_base; /* base address of kmalloc'd space */ 560 void *local_openclose_args_base; /* base address of kmalloc'd space */
563 struct xpc_openclose_args *local_openclose_args; /* local's args */ 561 struct xpc_openclose_args *local_openclose_args; /* local's args */
564 void *remote_openclose_args_base; /* base address of kmalloc'd space */ 562 void *remote_openclose_args_base; /* base address of kmalloc'd space */
565 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 563 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
566 /* args */ 564 /* args */
567 u64 remote_openclose_args_pa; /* phys addr of remote's args */ 565 u64 remote_openclose_args_pa; /* phys addr of remote's args */
568 566
569 /* IPI sending, receiving and handling related fields */ 567 /* IPI sending, receiving and handling related fields */
570 568
571 int remote_IPI_nasid; /* nasid of where to send IPIs */ 569 int remote_IPI_nasid; /* nasid of where to send IPIs */
572 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 570 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
573 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 571 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
574 572
575 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ 573 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
576 u64 local_IPI_amo; /* IPI amo flags yet to be handled */ 574 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
@@ -678,9 +676,8 @@ extern void xpc_teardown_infrastructure(struct xpc_partition *);
678static inline void 676static inline void
679xpc_wakeup_channel_mgr(struct xpc_partition *part) 677xpc_wakeup_channel_mgr(struct xpc_partition *part)
680{ 678{
681 if (atomic_inc_return(&part->channel_mgr_requests) == 1) { 679 if (atomic_inc_return(&part->channel_mgr_requests) == 1)
682 wake_up(&part->channel_mgr_wq); 680 wake_up(&part->channel_mgr_wq);
683 }
684} 681}
685 682
686/* 683/*
@@ -699,9 +696,8 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
699 s32 refs = atomic_dec_return(&ch->references); 696 s32 refs = atomic_dec_return(&ch->references);
700 697
701 DBUG_ON(refs < 0); 698 DBUG_ON(refs < 0);
702 if (refs == 0) { 699 if (refs == 0)
703 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); 700 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
704 }
705} 701}
706 702
707#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ 703#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
@@ -717,9 +713,8 @@ xpc_part_deref(struct xpc_partition *part)
717 s32 refs = atomic_dec_return(&part->references); 713 s32 refs = atomic_dec_return(&part->references);
718 714
719 DBUG_ON(refs < 0); 715 DBUG_ON(refs < 0);
720 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { 716 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
721 wake_up(&part->teardown_wq); 717 wake_up(&part->teardown_wq);
722 }
723} 718}
724 719
725static inline int 720static inline int
@@ -729,9 +724,9 @@ xpc_part_ref(struct xpc_partition *part)
729 724
730 atomic_inc(&part->references); 725 atomic_inc(&part->references);
731 setup = (part->setup_state == XPC_P_SETUP); 726 setup = (part->setup_state == XPC_P_SETUP);
732 if (!setup) { 727 if (!setup)
733 xpc_part_deref(part); 728 xpc_part_deref(part);
734 } 729
735 return setup; 730 return setup;
736} 731}
737 732
@@ -1007,13 +1002,11 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1007 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 1002 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1008 ipi_flag_string, ch->partid, ch->number, ret); 1003 ipi_flag_string, ch->partid, ch->number, ret);
1009 if (unlikely(ret != xpcSuccess)) { 1004 if (unlikely(ret != xpcSuccess)) {
1010 if (irq_flags != NULL) { 1005 if (irq_flags != NULL)
1011 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1006 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1012 }
1013 XPC_DEACTIVATE_PARTITION(part, ret); 1007 XPC_DEACTIVATE_PARTITION(part, ret);
1014 if (irq_flags != NULL) { 1008 if (irq_flags != NULL)
1015 spin_lock_irqsave(&ch->lock, *irq_flags); 1009 spin_lock_irqsave(&ch->lock, *irq_flags);
1016 }
1017 } 1010 }
1018 } 1011 }
1019} 1012}
@@ -1056,8 +1049,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1056#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1049#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1057#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) 1050#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1058 1051
1059#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) 1052#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
1060#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) 1053#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
1061 1054
1062static inline void 1055static inline void
1063xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) 1056xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
@@ -1178,9 +1171,8 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
1178 unsigned long irq_flags; 1171 unsigned long irq_flags;
1179 1172
1180 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); 1173 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1181 if (IPI_amo == 0) { 1174 if (IPI_amo == 0)
1182 return; 1175 return;
1183 }
1184 1176
1185 spin_lock_irqsave(&part->IPI_lock, irq_flags); 1177 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1186 part->local_IPI_amo |= IPI_amo; 1178 part->local_IPI_amo |= IPI_amo;
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 15cb91a8210..bfcb9ea968e 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -33,19 +33,19 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
33{ 33{
34 /* see if kzalloc will give us cachline aligned memory by default */ 34 /* see if kzalloc will give us cachline aligned memory by default */
35 *base = kzalloc(size, flags); 35 *base = kzalloc(size, flags);
36 if (*base == NULL) { 36 if (*base == NULL)
37 return NULL; 37 return NULL;
38 } 38
39 if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) { 39 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
40 return *base; 40 return *base;
41 } 41
42 kfree(*base); 42 kfree(*base);
43 43
44 /* nope, we'll have to do it ourselves */ 44 /* nope, we'll have to do it ourselves */
45 *base = kzalloc(size + L1_CACHE_BYTES, flags); 45 *base = kzalloc(size + L1_CACHE_BYTES, flags);
46 if (*base == NULL) { 46 if (*base == NULL)
47 return NULL; 47 return NULL;
48 } 48
49 return (void *)L1_CACHE_ALIGN((u64)*base); 49 return (void *)L1_CACHE_ALIGN((u64)*base);
50} 50}
51 51
@@ -264,15 +264,13 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
264 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); 264 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
265 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); 265 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
266 266
267 if (part->act_state == XPC_P_DEACTIVATING) { 267 if (part->act_state == XPC_P_DEACTIVATING)
268 return part->reason; 268 return part->reason;
269 }
270 269
271 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, 270 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
272 (BTE_NORMAL | BTE_WACQUIRE), NULL); 271 (BTE_NORMAL | BTE_WACQUIRE), NULL);
273 if (bte_ret == BTE_SUCCESS) { 272 if (bte_ret == BTE_SUCCESS)
274 return xpcSuccess; 273 return xpcSuccess;
275 }
276 274
277 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", 275 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
278 XPC_PARTID(part), bte_ret); 276 XPC_PARTID(part), bte_ret);
@@ -359,18 +357,16 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
359 part->remote_IPI_nasid = pulled_entry->IPI_nasid; 357 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
360 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 358 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
361 359
362 if (part->nchannels > pulled_entry->nchannels) { 360 if (part->nchannels > pulled_entry->nchannels)
363 part->nchannels = pulled_entry->nchannels; 361 part->nchannels = pulled_entry->nchannels;
364 }
365 362
366 /* let the other side know that we've pulled their variables */ 363 /* let the other side know that we've pulled their variables */
367 364
368 xpc_vars_part[partid].magic = XPC_VP_MAGIC2; 365 xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
369 } 366 }
370 367
371 if (pulled_entry->magic == XPC_VP_MAGIC1) { 368 if (pulled_entry->magic == XPC_VP_MAGIC1)
372 return xpcRetry; 369 return xpcRetry;
373 }
374 370
375 return xpcSuccess; 371 return xpcSuccess;
376} 372}
@@ -390,9 +386,10 @@ xpc_get_IPI_flags(struct xpc_partition *part)
390 */ 386 */
391 387
392 spin_lock_irqsave(&part->IPI_lock, irq_flags); 388 spin_lock_irqsave(&part->IPI_lock, irq_flags);
393 if ((IPI_amo = part->local_IPI_amo) != 0) { 389 IPI_amo = part->local_IPI_amo;
390 if (IPI_amo != 0)
394 part->local_IPI_amo = 0; 391 part->local_IPI_amo = 0;
395 } 392
396 spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 393 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
397 394
398 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 395 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
@@ -441,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
441 int nentries; 438 int nentries;
442 size_t nbytes; 439 size_t nbytes;
443 440
444 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
445 // >>> iterations of the for-loop, bail if set?
446
447 // >>> should we impose a minimum #of entries? like 4 or 8?
448 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 441 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
449 442
450 nbytes = nentries * ch->msg_size; 443 nbytes = nentries * ch->msg_size;
451 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 444 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
452 GFP_KERNEL, 445 GFP_KERNEL,
453 &ch-> 446 &ch->local_msgqueue_base);
454 local_msgqueue_base); 447 if (ch->local_msgqueue == NULL)
455 if (ch->local_msgqueue == NULL) {
456 continue; 448 continue;
457 }
458 449
459 nbytes = nentries * sizeof(struct xpc_notify); 450 nbytes = nentries * sizeof(struct xpc_notify);
460 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); 451 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -493,20 +484,14 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
493 484
494 DBUG_ON(ch->remote_nentries <= 0); 485 DBUG_ON(ch->remote_nentries <= 0);
495 486
496 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
497 // >>> iterations of the for-loop, bail if set?
498
499 // >>> should we impose a minimum #of entries? like 4 or 8?
500 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 487 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
501 488
502 nbytes = nentries * ch->msg_size; 489 nbytes = nentries * ch->msg_size;
503 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 490 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
504 GFP_KERNEL, 491 GFP_KERNEL,
505 &ch-> 492 &ch->remote_msgqueue_base);
506 remote_msgqueue_base); 493 if (ch->remote_msgqueue == NULL)
507 if (ch->remote_msgqueue == NULL) {
508 continue; 494 continue;
509 }
510 495
511 spin_lock_irqsave(&ch->lock, irq_flags); 496 spin_lock_irqsave(&ch->lock, irq_flags);
512 if (nentries < ch->remote_nentries) { 497 if (nentries < ch->remote_nentries) {
@@ -538,11 +523,12 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
538 523
539 DBUG_ON(ch->flags & XPC_C_SETUP); 524 DBUG_ON(ch->flags & XPC_C_SETUP);
540 525
541 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { 526 ret = xpc_allocate_local_msgqueue(ch);
527 if (ret != xpcSuccess)
542 return ret; 528 return ret;
543 }
544 529
545 if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { 530 ret = xpc_allocate_remote_msgqueue(ch);
531 if (ret != xpcSuccess) {
546 kfree(ch->local_msgqueue_base); 532 kfree(ch->local_msgqueue_base);
547 ch->local_msgqueue = NULL; 533 ch->local_msgqueue = NULL;
548 kfree(ch->notify_queue); 534 kfree(ch->notify_queue);
@@ -582,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
582 ret = xpc_allocate_msgqueues(ch); 568 ret = xpc_allocate_msgqueues(ch);
583 spin_lock_irqsave(&ch->lock, *irq_flags); 569 spin_lock_irqsave(&ch->lock, *irq_flags);
584 570
585 if (ret != xpcSuccess) { 571 if (ret != xpcSuccess)
586 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 572 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
587 } 573
588 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { 574 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
589 return; 575 return;
590 }
591 576
592 DBUG_ON(!(ch->flags & XPC_C_SETUP)); 577 DBUG_ON(!(ch->flags & XPC_C_SETUP));
593 DBUG_ON(ch->local_msgqueue == NULL); 578 DBUG_ON(ch->local_msgqueue == NULL);
@@ -599,9 +584,8 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
599 xpc_IPI_send_openreply(ch, irq_flags); 584 xpc_IPI_send_openreply(ch, irq_flags);
600 } 585 }
601 586
602 if (!(ch->flags & XPC_C_ROPENREPLY)) { 587 if (!(ch->flags & XPC_C_ROPENREPLY))
603 return; 588 return;
604 }
605 589
606 DBUG_ON(ch->remote_msgqueue_pa == 0); 590 DBUG_ON(ch->remote_msgqueue_pa == 0);
607 591
@@ -719,9 +703,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
719 703
720 DBUG_ON(!spin_is_locked(&ch->lock)); 704 DBUG_ON(!spin_is_locked(&ch->lock));
721 705
722 if (!(ch->flags & XPC_C_DISCONNECTING)) { 706 if (!(ch->flags & XPC_C_DISCONNECTING))
723 return; 707 return;
724 }
725 708
726 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 709 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
727 710
@@ -736,26 +719,23 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
736 719
737 if (part->act_state == XPC_P_DEACTIVATING) { 720 if (part->act_state == XPC_P_DEACTIVATING) {
738 /* can't proceed until the other side disengages from us */ 721 /* can't proceed until the other side disengages from us */
739 if (xpc_partition_engaged(1UL << ch->partid)) { 722 if (xpc_partition_engaged(1UL << ch->partid))
740 return; 723 return;
741 }
742 724
743 } else { 725 } else {
744 726
745 /* as long as the other side is up do the full protocol */ 727 /* as long as the other side is up do the full protocol */
746 728
747 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 729 if (!(ch->flags & XPC_C_RCLOSEREQUEST))
748 return; 730 return;
749 }
750 731
751 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 732 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
752 ch->flags |= XPC_C_CLOSEREPLY; 733 ch->flags |= XPC_C_CLOSEREPLY;
753 xpc_IPI_send_closereply(ch, irq_flags); 734 xpc_IPI_send_closereply(ch, irq_flags);
754 } 735 }
755 736
756 if (!(ch->flags & XPC_C_RCLOSEREPLY)) { 737 if (!(ch->flags & XPC_C_RCLOSEREPLY))
757 return; 738 return;
758 }
759 } 739 }
760 740
761 /* wake those waiting for notify completion */ 741 /* wake those waiting for notify completion */
@@ -815,9 +795,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
815 795
816 spin_lock_irqsave(&ch->lock, irq_flags); 796 spin_lock_irqsave(&ch->lock, irq_flags);
817 797
818 again: 798again:
819 799
820 if ((ch->flags & XPC_C_DISCONNECTED) && (ch->flags & XPC_C_WDISCONNECT)) { 800 if ((ch->flags & XPC_C_DISCONNECTED) &&
801 (ch->flags & XPC_C_WDISCONNECT)) {
821 /* 802 /*
822 * Delay processing IPI flags until thread waiting disconnect 803 * Delay processing IPI flags until thread waiting disconnect
823 * has had a chance to see that the channel is disconnected. 804 * has had a chance to see that the channel is disconnected.
@@ -890,11 +871,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
890 871
891 if (!(ch->flags & XPC_C_DISCONNECTING)) { 872 if (!(ch->flags & XPC_C_DISCONNECTING)) {
892 reason = args->reason; 873 reason = args->reason;
893 if (reason <= xpcSuccess || reason > xpcUnknownReason) { 874 if (reason <= xpcSuccess || reason > xpcUnknownReason)
894 reason = xpcUnknownReason; 875 reason = xpcUnknownReason;
895 } else if (reason == xpcUnregistering) { 876 else if (reason == xpcUnregistering)
896 reason = xpcOtherUnregistering; 877 reason = xpcOtherUnregistering;
897 }
898 878
899 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 879 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
900 880
@@ -1068,9 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch)
1068 unsigned long irq_flags; 1048 unsigned long irq_flags;
1069 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1049 struct xpc_registration *registration = &xpc_registrations[ch->number];
1070 1050
1071 if (mutex_trylock(&registration->mutex) == 0) { 1051 if (mutex_trylock(&registration->mutex) == 0)
1072 return xpcRetry; 1052 return xpcRetry;
1073 }
1074 1053
1075 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1054 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1076 mutex_unlock(&registration->mutex); 1055 mutex_unlock(&registration->mutex);
@@ -1159,7 +1138,7 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1159 (get % ch->local_nentries) * 1138 (get % ch->local_nentries) *
1160 ch->msg_size); 1139 ch->msg_size);
1161 msg->flags = 0; 1140 msg->flags = 0;
1162 } while (++get < (volatile s64)ch->remote_GP.get); 1141 } while (++get < ch->remote_GP.get);
1163} 1142}
1164 1143
1165/* 1144/*
@@ -1177,7 +1156,7 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1177 (put % ch->remote_nentries) * 1156 (put % ch->remote_nentries) *
1178 ch->msg_size); 1157 ch->msg_size);
1179 msg->flags = 0; 1158 msg->flags = 0;
1180 } while (++put < (volatile s64)ch->remote_GP.put); 1159 } while (++put < ch->remote_GP.put);
1181} 1160}
1182 1161
1183static void 1162static void
@@ -1244,9 +1223,8 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1244 * If anyone was waiting for message queue entries to become 1223 * If anyone was waiting for message queue entries to become
1245 * available, wake them up. 1224 * available, wake them up.
1246 */ 1225 */
1247 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1226 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1248 wake_up(&ch->msg_allocate_wq); 1227 wake_up(&ch->msg_allocate_wq);
1249 }
1250 } 1228 }
1251 1229
1252 /* 1230 /*
@@ -1273,9 +1251,8 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1273 "delivered=%d, partid=%d, channel=%d\n", 1251 "delivered=%d, partid=%d, channel=%d\n",
1274 nmsgs_sent, ch->partid, ch->number); 1252 nmsgs_sent, ch->partid, ch->number);
1275 1253
1276 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1254 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1277 xpc_activate_kthreads(ch, nmsgs_sent); 1255 xpc_activate_kthreads(ch, nmsgs_sent);
1278 }
1279 } 1256 }
1280 } 1257 }
1281 1258
@@ -1310,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
1310 1287
1311 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); 1288 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
1312 1289
1313 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { 1290 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
1314 xpc_process_openclose_IPI(part, ch_number, IPI_flags); 1291 xpc_process_openclose_IPI(part, ch_number, IPI_flags);
1315 }
1316 1292
1317 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 1293 ch_flags = ch->flags; /* need an atomic snapshot of flags */
1318 1294
@@ -1323,9 +1299,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
1323 continue; 1299 continue;
1324 } 1300 }
1325 1301
1326 if (part->act_state == XPC_P_DEACTIVATING) { 1302 if (part->act_state == XPC_P_DEACTIVATING)
1327 continue; 1303 continue;
1328 }
1329 1304
1330 if (!(ch_flags & XPC_C_CONNECTED)) { 1305 if (!(ch_flags & XPC_C_CONNECTED)) {
1331 if (!(ch_flags & XPC_C_OPENREQUEST)) { 1306 if (!(ch_flags & XPC_C_OPENREQUEST)) {
@@ -1345,9 +1320,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
1345 * from the other partition. 1320 * from the other partition.
1346 */ 1321 */
1347 1322
1348 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { 1323 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
1349 xpc_process_msg_IPI(part, ch_number); 1324 xpc_process_msg_IPI(part, ch_number);
1350 }
1351 } 1325 }
1352} 1326}
1353 1327
@@ -1560,9 +1534,9 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1560 1534
1561 DBUG_ON(!spin_is_locked(&ch->lock)); 1535 DBUG_ON(!spin_is_locked(&ch->lock));
1562 1536
1563 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 1537 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
1564 return; 1538 return;
1565 } 1539
1566 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); 1540 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
1567 1541
1568 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", 1542 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1578,9 +1552,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1578 1552
1579 xpc_IPI_send_closerequest(ch, irq_flags); 1553 xpc_IPI_send_closerequest(ch, irq_flags);
1580 1554
1581 if (channel_was_connected) { 1555 if (channel_was_connected)
1582 ch->flags |= XPC_C_WASCONNECTED; 1556 ch->flags |= XPC_C_WASCONNECTED;
1583 }
1584 1557
1585 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1558 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1586 1559
@@ -1595,9 +1568,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1595 } 1568 }
1596 1569
1597 /* wake those waiting to allocate an entry from the local msg queue */ 1570 /* wake those waiting to allocate an entry from the local msg queue */
1598 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1571 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1599 wake_up(&ch->msg_allocate_wq); 1572 wake_up(&ch->msg_allocate_wq);
1600 }
1601 1573
1602 spin_lock_irqsave(&ch->lock, *irq_flags); 1574 spin_lock_irqsave(&ch->lock, *irq_flags);
1603} 1575}
@@ -1632,7 +1604,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1632 enum xpc_retval ret; 1604 enum xpc_retval ret;
1633 1605
1634 if (ch->flags & XPC_C_DISCONNECTING) { 1606 if (ch->flags & XPC_C_DISCONNECTING) {
1635 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1607 DBUG_ON(ch->reason == xpcInterrupted);
1636 return ch->reason; 1608 return ch->reason;
1637 } 1609 }
1638 1610
@@ -1642,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1642 1614
1643 if (ch->flags & XPC_C_DISCONNECTING) { 1615 if (ch->flags & XPC_C_DISCONNECTING) {
1644 ret = ch->reason; 1616 ret = ch->reason;
1645 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1617 DBUG_ON(ch->reason == xpcInterrupted);
1646 } else if (ret == 0) { 1618 } else if (ret == 0) {
1647 ret = xpcTimeout; 1619 ret = xpcTimeout;
1648 } else { 1620 } else {
@@ -1685,9 +1657,9 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1685 1657
1686 while (1) { 1658 while (1) {
1687 1659
1688 put = (volatile s64)ch->w_local_GP.put; 1660 put = ch->w_local_GP.put;
1689 if (put - (volatile s64)ch->w_remote_GP.get < 1661 rmb(); /* guarantee that .put loads before .get */
1690 ch->local_nentries) { 1662 if (put - ch->w_remote_GP.get < ch->local_nentries) {
1691 1663
1692 /* There are available message entries. We need to try 1664 /* There are available message entries. We need to try
1693 * to secure one for ourselves. We'll do this by trying 1665 * to secure one for ourselves. We'll do this by trying
@@ -1711,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1711 * that will cause the IPI handler to fetch the latest 1683 * that will cause the IPI handler to fetch the latest
1712 * GP values as if an IPI was sent by the other side. 1684 * GP values as if an IPI was sent by the other side.
1713 */ 1685 */
1714 if (ret == xpcTimeout) { 1686 if (ret == xpcTimeout)
1715 xpc_IPI_send_local_msgrequest(ch); 1687 xpc_IPI_send_local_msgrequest(ch);
1716 }
1717 1688
1718 if (flags & XPC_NOWAIT) { 1689 if (flags & XPC_NOWAIT) {
1719 xpc_msgqueue_deref(ch); 1690 xpc_msgqueue_deref(ch);
@@ -1772,9 +1743,8 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1772 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); 1743 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1773 xpc_part_deref(part); 1744 xpc_part_deref(part);
1774 1745
1775 if (msg != NULL) { 1746 if (msg != NULL)
1776 *payload = &msg->payload; 1747 *payload = &msg->payload;
1777 }
1778 } 1748 }
1779 1749
1780 return ret; 1750 return ret;
@@ -1795,17 +1765,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1795 while (1) { 1765 while (1) {
1796 1766
1797 while (1) { 1767 while (1) {
1798 if (put == (volatile s64)ch->w_local_GP.put) { 1768 if (put == ch->w_local_GP.put)
1799 break; 1769 break;
1800 }
1801 1770
1802 msg = (struct xpc_msg *)((u64)ch->local_msgqueue + 1771 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1803 (put % ch->local_nentries) * 1772 (put % ch->local_nentries) *
1804 ch->msg_size); 1773 ch->msg_size);
1805 1774
1806 if (!(msg->flags & XPC_M_READY)) { 1775 if (!(msg->flags & XPC_M_READY))
1807 break; 1776 break;
1808 }
1809 1777
1810 put++; 1778 put++;
1811 } 1779 }
@@ -1818,7 +1786,7 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1818 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != 1786 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1819 initial_put) { 1787 initial_put) {
1820 /* someone else beat us to it */ 1788 /* someone else beat us to it */
1821 DBUG_ON((volatile s64)ch->local_GP->put < initial_put); 1789 DBUG_ON(ch->local_GP->put < initial_put);
1822 break; 1790 break;
1823 } 1791 }
1824 1792
@@ -1837,9 +1805,8 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1837 initial_put = put; 1805 initial_put = put;
1838 } 1806 }
1839 1807
1840 if (send_IPI) { 1808 if (send_IPI)
1841 xpc_IPI_send_msgrequest(ch); 1809 xpc_IPI_send_msgrequest(ch);
1842 }
1843} 1810}
1844 1811
1845/* 1812/*
@@ -1880,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1880 notify->key = key; 1847 notify->key = key;
1881 notify->type = notify_type; 1848 notify->type = notify_type;
1882 1849
1883 // >>> is a mb() needed here? 1850 /* >>> is a mb() needed here? */
1884 1851
1885 if (ch->flags & XPC_C_DISCONNECTING) { 1852 if (ch->flags & XPC_C_DISCONNECTING) {
1886 /* 1853 /*
@@ -1913,9 +1880,8 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1913 /* see if the message is next in line to be sent, if so send it */ 1880 /* see if the message is next in line to be sent, if so send it */
1914 1881
1915 put = ch->local_GP->put; 1882 put = ch->local_GP->put;
1916 if (put == msg_number) { 1883 if (put == msg_number)
1917 xpc_send_msgs(ch, put); 1884 xpc_send_msgs(ch, put);
1918 }
1919 1885
1920 /* drop the reference grabbed in xpc_allocate_msg() */ 1886 /* drop the reference grabbed in xpc_allocate_msg() */
1921 xpc_msgqueue_deref(ch); 1887 xpc_msgqueue_deref(ch);
@@ -2032,10 +1998,8 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2032 1998
2033 msg_index = ch->next_msg_to_pull % ch->remote_nentries; 1999 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2034 2000
2035 DBUG_ON(ch->next_msg_to_pull >= 2001 DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
2036 (volatile s64)ch->w_remote_GP.put); 2002 nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
2037 nmsgs = (volatile s64)ch->w_remote_GP.put -
2038 ch->next_msg_to_pull;
2039 if (msg_index + nmsgs > ch->remote_nentries) { 2003 if (msg_index + nmsgs > ch->remote_nentries) {
2040 /* ignore the ones that wrap the msg queue for now */ 2004 /* ignore the ones that wrap the msg queue for now */
2041 nmsgs = ch->remote_nentries - msg_index; 2005 nmsgs = ch->remote_nentries - msg_index;
@@ -2046,9 +2010,9 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2046 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + 2010 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
2047 msg_offset); 2011 msg_offset);
2048 2012
2049 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, 2013 ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2050 nmsgs * ch->msg_size)) != 2014 nmsgs * ch->msg_size);
2051 xpcSuccess) { 2015 if (ret != xpcSuccess) {
2052 2016
2053 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 2017 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2054 " msg %ld from partition %d, channel=%d, " 2018 " msg %ld from partition %d, channel=%d, "
@@ -2061,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2061 return NULL; 2025 return NULL;
2062 } 2026 }
2063 2027
2064 mb(); /* >>> this may not be needed, we're not sure */
2065
2066 ch->next_msg_to_pull += nmsgs; 2028 ch->next_msg_to_pull += nmsgs;
2067 } 2029 }
2068 2030
@@ -2085,14 +2047,13 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2085 s64 get; 2047 s64 get;
2086 2048
2087 do { 2049 do {
2088 if ((volatile u32)ch->flags & XPC_C_DISCONNECTING) { 2050 if (ch->flags & XPC_C_DISCONNECTING)
2089 break; 2051 break;
2090 }
2091 2052
2092 get = (volatile s64)ch->w_local_GP.get; 2053 get = ch->w_local_GP.get;
2093 if (get == (volatile s64)ch->w_remote_GP.put) { 2054 rmb(); /* guarantee that .get loads before .put */
2055 if (get == ch->w_remote_GP.put)
2094 break; 2056 break;
2095 }
2096 2057
2097 /* There are messages waiting to be pulled and delivered. 2058 /* There are messages waiting to be pulled and delivered.
2098 * We need to try to secure one for ourselves. We'll do this 2059 * We need to try to secure one for ourselves. We'll do this
@@ -2132,7 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
2132{ 2093{
2133 struct xpc_msg *msg; 2094 struct xpc_msg *msg;
2134 2095
2135 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { 2096 msg = xpc_get_deliverable_msg(ch);
2097 if (msg != NULL) {
2136 2098
2137 /* 2099 /*
2138 * This ref is taken to protect the payload itself from being 2100 * This ref is taken to protect the payload itself from being
@@ -2178,17 +2140,15 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2178 while (1) { 2140 while (1) {
2179 2141
2180 while (1) { 2142 while (1) {
2181 if (get == (volatile s64)ch->w_local_GP.get) { 2143 if (get == ch->w_local_GP.get)
2182 break; 2144 break;
2183 }
2184 2145
2185 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + 2146 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2186 (get % ch->remote_nentries) * 2147 (get % ch->remote_nentries) *
2187 ch->msg_size); 2148 ch->msg_size);
2188 2149
2189 if (!(msg->flags & XPC_M_DONE)) { 2150 if (!(msg->flags & XPC_M_DONE))
2190 break; 2151 break;
2191 }
2192 2152
2193 msg_flags |= msg->flags; 2153 msg_flags |= msg->flags;
2194 get++; 2154 get++;
@@ -2202,7 +2162,7 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2202 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != 2162 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2203 initial_get) { 2163 initial_get) {
2204 /* someone else beat us to it */ 2164 /* someone else beat us to it */
2205 DBUG_ON((volatile s64)ch->local_GP->get <= initial_get); 2165 DBUG_ON(ch->local_GP->get <= initial_get);
2206 break; 2166 break;
2207 } 2167 }
2208 2168
@@ -2221,9 +2181,8 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2221 initial_get = get; 2181 initial_get = get;
2222 } 2182 }
2223 2183
2224 if (send_IPI) { 2184 if (send_IPI)
2225 xpc_IPI_send_msgrequest(ch); 2185 xpc_IPI_send_msgrequest(ch);
2226 }
2227} 2186}
2228 2187
2229/* 2188/*
@@ -2276,9 +2235,8 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2276 * been delivered. 2235 * been delivered.
2277 */ 2236 */
2278 get = ch->local_GP->get; 2237 get = ch->local_GP->get;
2279 if (get == msg_number) { 2238 if (get == msg_number)
2280 xpc_acknowledge_msgs(ch, get, msg->flags); 2239 xpc_acknowledge_msgs(ch, get, msg->flags);
2281 }
2282 2240
2283 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ 2241 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2284 xpc_msgqueue_deref(ch); 2242 xpc_msgqueue_deref(ch);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index d81a2dd787a..f673ba90eb0 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -46,17 +46,16 @@
46#include <linux/kernel.h> 46#include <linux/kernel.h>
47#include <linux/module.h> 47#include <linux/module.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/sched.h>
50#include <linux/syscalls.h>
51#include <linux/cache.h> 49#include <linux/cache.h>
52#include <linux/interrupt.h> 50#include <linux/interrupt.h>
53#include <linux/delay.h> 51#include <linux/delay.h>
54#include <linux/reboot.h> 52#include <linux/reboot.h>
55#include <linux/completion.h> 53#include <linux/completion.h>
56#include <linux/kdebug.h> 54#include <linux/kdebug.h>
55#include <linux/kthread.h>
56#include <linux/uaccess.h>
57#include <asm/sn/intr.h> 57#include <asm/sn/intr.h>
58#include <asm/sn/sn_sal.h> 58#include <asm/sn/sn_sal.h>
59#include <asm/uaccess.h>
60#include "xpc.h" 59#include "xpc.h"
61 60
62/* define two XPC debug device structures to be used with dev_dbg() et al */ 61/* define two XPC debug device structures to be used with dev_dbg() et al */
@@ -91,7 +90,7 @@ static int xpc_hb_check_min_interval = 10;
91static int xpc_hb_check_max_interval = 120; 90static int xpc_hb_check_max_interval = 120;
92 91
93int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; 92int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
94static int xpc_disengage_request_min_timelimit = 0; 93static int xpc_disengage_request_min_timelimit; /* = 0 */
95static int xpc_disengage_request_max_timelimit = 120; 94static int xpc_disengage_request_max_timelimit = 120;
96 95
97static ctl_table xpc_sys_xpc_hb_dir[] = { 96static ctl_table xpc_sys_xpc_hb_dir[] = {
@@ -213,9 +212,8 @@ xpc_hb_beater(unsigned long dummy)
213{ 212{
214 xpc_vars->heartbeat++; 213 xpc_vars->heartbeat++;
215 214
216 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 215 if (time_after_eq(jiffies, xpc_hb_check_timeout))
217 wake_up_interruptible(&xpc_act_IRQ_wq); 216 wake_up_interruptible(&xpc_act_IRQ_wq);
218 }
219 217
220 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); 218 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
221 add_timer(&xpc_hb_timer); 219 add_timer(&xpc_hb_timer);
@@ -234,15 +232,13 @@ xpc_hb_checker(void *ignore)
234 232
235 /* this thread was marked active by xpc_hb_init() */ 233 /* this thread was marked active by xpc_hb_init() */
236 234
237 daemonize(XPC_HB_CHECK_THREAD_NAME);
238
239 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
240 236
241 /* set our heartbeating to other partitions into motion */ 237 /* set our heartbeating to other partitions into motion */
242 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
243 xpc_hb_beater(0); 239 xpc_hb_beater(0);
244 240
245 while (!(volatile int)xpc_exiting) { 241 while (!xpc_exiting) {
246 242
247 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 243 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
248 "been received\n", 244 "been received\n",
@@ -287,7 +283,7 @@ xpc_hb_checker(void *ignore)
287 atomic_read(&xpc_act_IRQ_rcvd) 283 atomic_read(&xpc_act_IRQ_rcvd)
288 || time_after_eq(jiffies, 284 || time_after_eq(jiffies,
289 xpc_hb_check_timeout) || 285 xpc_hb_check_timeout) ||
290 (volatile int)xpc_exiting)); 286 xpc_exiting));
291 } 287 }
292 288
293 dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 289 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
@@ -305,8 +301,6 @@ xpc_hb_checker(void *ignore)
305static int 301static int
306xpc_initiate_discovery(void *ignore) 302xpc_initiate_discovery(void *ignore)
307{ 303{
308 daemonize(XPC_DISCOVERY_THREAD_NAME);
309
310 xpc_discovery(); 304 xpc_discovery();
311 305
312 dev_dbg(xpc_part, "discovery thread is exiting\n"); 306 dev_dbg(xpc_part, "discovery thread is exiting\n");
@@ -338,9 +332,8 @@ xpc_make_first_contact(struct xpc_partition *part)
338 /* wait a 1/4 of a second or so */ 332 /* wait a 1/4 of a second or so */
339 (void)msleep_interruptible(250); 333 (void)msleep_interruptible(250);
340 334
341 if (part->act_state == XPC_P_DEACTIVATING) { 335 if (part->act_state == XPC_P_DEACTIVATING)
342 return part->reason; 336 return part->reason;
343 }
344 } 337 }
345 338
346 return xpc_mark_partition_active(part); 339 return xpc_mark_partition_active(part);
@@ -382,22 +375,12 @@ xpc_channel_mgr(struct xpc_partition *part)
382 */ 375 */
383 atomic_dec(&part->channel_mgr_requests); 376 atomic_dec(&part->channel_mgr_requests);
384 (void)wait_event_interruptible(part->channel_mgr_wq, 377 (void)wait_event_interruptible(part->channel_mgr_wq,
385 (atomic_read 378 (atomic_read(&part->channel_mgr_requests) > 0 ||
386 (&part->channel_mgr_requests) > 379 part->local_IPI_amo != 0 ||
387 0 || 380 (part->act_state == XPC_P_DEACTIVATING &&
388 (volatile u64)part-> 381 atomic_read(&part->nchannels_active) == 0 &&
389 local_IPI_amo != 0 || 382 xpc_partition_disengaged(part))));
390 ((volatile u8)part->act_state ==
391 XPC_P_DEACTIVATING &&
392 atomic_read(&part->
393 nchannels_active)
394 == 0 &&
395 xpc_partition_disengaged
396 (part))));
397 atomic_set(&part->channel_mgr_requests, 1); 383 atomic_set(&part->channel_mgr_requests, 1);
398
399 // >>> Does it need to wakeup periodically as well? In case we
400 // >>> miscalculated the #of kthreads to wakeup or create?
401 } 384 }
402} 385}
403 386
@@ -423,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part)
423 406
424 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); 407 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
425 408
426 if (xpc_setup_infrastructure(part) != xpcSuccess) { 409 if (xpc_setup_infrastructure(part) != xpcSuccess)
427 return; 410 return;
428 }
429 411
430 /* 412 /*
431 * The kthread that XPC HB called us with will become the 413 * The kthread that XPC HB called us with will become the
@@ -436,9 +418,8 @@ xpc_partition_up(struct xpc_partition *part)
436 418
437 (void)xpc_part_ref(part); /* this will always succeed */ 419 (void)xpc_part_ref(part); /* this will always succeed */
438 420
439 if (xpc_make_first_contact(part) == xpcSuccess) { 421 if (xpc_make_first_contact(part) == xpcSuccess)
440 xpc_channel_mgr(part); 422 xpc_channel_mgr(part);
441 }
442 423
443 xpc_part_deref(part); 424 xpc_part_deref(part);
444 425
@@ -451,8 +432,6 @@ xpc_activating(void *__partid)
451 partid_t partid = (u64)__partid; 432 partid_t partid = (u64)__partid;
452 struct xpc_partition *part = &xpc_partitions[partid]; 433 struct xpc_partition *part = &xpc_partitions[partid];
453 unsigned long irq_flags; 434 unsigned long irq_flags;
454 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
455 int ret;
456 435
457 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 436 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
458 437
@@ -474,21 +453,6 @@ xpc_activating(void *__partid)
474 453
475 dev_dbg(xpc_part, "bringing partition %d up\n", partid); 454 dev_dbg(xpc_part, "bringing partition %d up\n", partid);
476 455
477 daemonize("xpc%02d", partid);
478
479 /*
480 * This thread needs to run at a realtime priority to prevent a
481 * significant performance degradation.
482 */
483 ret = sched_setscheduler(current, SCHED_FIFO, &param);
484 if (ret != 0) {
485 dev_warn(xpc_part, "unable to set pid %d to a realtime "
486 "priority, ret=%d\n", current->pid, ret);
487 }
488
489 /* allow this thread and its children to run on any CPU */
490 set_cpus_allowed(current, CPU_MASK_ALL);
491
492 /* 456 /*
493 * Register the remote partition's AMOs with SAL so it can handle 457 * Register the remote partition's AMOs with SAL so it can handle
494 * and cleanup errors within that address range should the remote 458 * and cleanup errors within that address range should the remote
@@ -537,7 +501,7 @@ xpc_activate_partition(struct xpc_partition *part)
537{ 501{
538 partid_t partid = XPC_PARTID(part); 502 partid_t partid = XPC_PARTID(part);
539 unsigned long irq_flags; 503 unsigned long irq_flags;
540 pid_t pid; 504 struct task_struct *kthread;
541 505
542 spin_lock_irqsave(&part->act_lock, irq_flags); 506 spin_lock_irqsave(&part->act_lock, irq_flags);
543 507
@@ -548,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part)
548 512
549 spin_unlock_irqrestore(&part->act_lock, irq_flags); 513 spin_unlock_irqrestore(&part->act_lock, irq_flags);
550 514
551 pid = kernel_thread(xpc_activating, (void *)((u64)partid), 0); 515 kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
552 516 partid);
553 if (unlikely(pid <= 0)) { 517 if (IS_ERR(kthread)) {
554 spin_lock_irqsave(&part->act_lock, irq_flags); 518 spin_lock_irqsave(&part->act_lock, irq_flags);
555 part->act_state = XPC_P_INACTIVE; 519 part->act_state = XPC_P_INACTIVE;
556 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); 520 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
@@ -562,7 +526,7 @@ xpc_activate_partition(struct xpc_partition *part)
562 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 526 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
563 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 527 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
564 * than one partition, we use an AMO_t structure per partition to indicate 528 * than one partition, we use an AMO_t structure per partition to indicate
565 * whether a partition has sent an IPI or not. >>> If it has, then wake up the 529 * whether a partition has sent an IPI or not. If it has, then wake up the
566 * associated kthread to handle it. 530 * associated kthread to handle it.
567 * 531 *
568 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC 532 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
@@ -628,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
628 wake_up_nr(&ch->idle_wq, wakeup); 592 wake_up_nr(&ch->idle_wq, wakeup);
629 } 593 }
630 594
631 if (needed <= 0) { 595 if (needed <= 0)
632 return; 596 return;
633 }
634 597
635 if (needed + assigned > ch->kthreads_assigned_limit) { 598 if (needed + assigned > ch->kthreads_assigned_limit) {
636 needed = ch->kthreads_assigned_limit - assigned; 599 needed = ch->kthreads_assigned_limit - assigned;
637 // >>>should never be less than 0 600 if (needed <= 0)
638 if (needed <= 0) {
639 return; 601 return;
640 }
641 } 602 }
642 603
643 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 604 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
@@ -655,9 +616,8 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
655 do { 616 do {
656 /* deliver messages to their intended recipients */ 617 /* deliver messages to their intended recipients */
657 618
658 while ((volatile s64)ch->w_local_GP.get < 619 while (ch->w_local_GP.get < ch->w_remote_GP.put &&
659 (volatile s64)ch->w_remote_GP.put && 620 !(ch->flags & XPC_C_DISCONNECTING)) {
660 !((volatile u32)ch->flags & XPC_C_DISCONNECTING)) {
661 xpc_deliver_msg(ch); 621 xpc_deliver_msg(ch);
662 } 622 }
663 623
@@ -672,21 +632,16 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
672 "wait_event_interruptible_exclusive()\n"); 632 "wait_event_interruptible_exclusive()\n");
673 633
674 (void)wait_event_interruptible_exclusive(ch->idle_wq, 634 (void)wait_event_interruptible_exclusive(ch->idle_wq,
675 ((volatile s64)ch-> 635 (ch->w_local_GP.get < ch->w_remote_GP.put ||
676 w_local_GP.get < 636 (ch->flags & XPC_C_DISCONNECTING)));
677 (volatile s64)ch->
678 w_remote_GP.put ||
679 ((volatile u32)ch->
680 flags &
681 XPC_C_DISCONNECTING)));
682 637
683 atomic_dec(&ch->kthreads_idle); 638 atomic_dec(&ch->kthreads_idle);
684 639
685 } while (!((volatile u32)ch->flags & XPC_C_DISCONNECTING)); 640 } while (!(ch->flags & XPC_C_DISCONNECTING));
686} 641}
687 642
688static int 643static int
689xpc_daemonize_kthread(void *args) 644xpc_kthread_start(void *args)
690{ 645{
691 partid_t partid = XPC_UNPACK_ARG1(args); 646 partid_t partid = XPC_UNPACK_ARG1(args);
692 u16 ch_number = XPC_UNPACK_ARG2(args); 647 u16 ch_number = XPC_UNPACK_ARG2(args);
@@ -695,8 +650,6 @@ xpc_daemonize_kthread(void *args)
695 int n_needed; 650 int n_needed;
696 unsigned long irq_flags; 651 unsigned long irq_flags;
697 652
698 daemonize("xpc%02dc%d", partid, ch_number);
699
700 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", 653 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
701 partid, ch_number); 654 partid, ch_number);
702 655
@@ -725,9 +678,9 @@ xpc_daemonize_kthread(void *args)
725 * need one less than total #of messages to deliver. 678 * need one less than total #of messages to deliver.
726 */ 679 */
727 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 680 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
728 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) { 681 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
729 xpc_activate_kthreads(ch, n_needed); 682 xpc_activate_kthreads(ch, n_needed);
730 } 683
731 } else { 684 } else {
732 spin_unlock_irqrestore(&ch->lock, irq_flags); 685 spin_unlock_irqrestore(&ch->lock, irq_flags);
733 } 686 }
@@ -783,9 +736,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
783 int ignore_disconnecting) 736 int ignore_disconnecting)
784{ 737{
785 unsigned long irq_flags; 738 unsigned long irq_flags;
786 pid_t pid;
787 u64 args = XPC_PACK_ARGS(ch->partid, ch->number); 739 u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
788 struct xpc_partition *part = &xpc_partitions[ch->partid]; 740 struct xpc_partition *part = &xpc_partitions[ch->partid];
741 struct task_struct *kthread;
789 742
790 while (needed-- > 0) { 743 while (needed-- > 0) {
791 744
@@ -812,8 +765,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
812 (void)xpc_part_ref(part); 765 (void)xpc_part_ref(part);
813 xpc_msgqueue_ref(ch); 766 xpc_msgqueue_ref(ch);
814 767
815 pid = kernel_thread(xpc_daemonize_kthread, (void *)args, 0); 768 kthread = kthread_run(xpc_kthread_start, (void *)args,
816 if (pid < 0) { 769 "xpc%02dc%d", ch->partid, ch->number);
770 if (IS_ERR(kthread)) {
817 /* the fork failed */ 771 /* the fork failed */
818 772
819 /* 773 /*
@@ -823,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
823 * to this channel are blocked in the channel's 777 * to this channel are blocked in the channel's
824 * registerer, because the only thing that will unblock 778 * registerer, because the only thing that will unblock
825 * them is the xpcDisconnecting callout that this 779 * them is the xpcDisconnecting callout that this
826 * failed kernel_thread would have made. 780 * failed kthread_run() would have made.
827 */ 781 */
828 782
829 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 783 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
@@ -848,8 +802,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
848 } 802 }
849 break; 803 break;
850 } 804 }
851
852 ch->kthreads_created++; // >>> temporary debug only!!!
853 } 805 }
854} 806}
855 807
@@ -866,9 +818,8 @@ xpc_disconnect_wait(int ch_number)
866 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 818 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
867 part = &xpc_partitions[partid]; 819 part = &xpc_partitions[partid];
868 820
869 if (!xpc_part_ref(part)) { 821 if (!xpc_part_ref(part))
870 continue; 822 continue;
871 }
872 823
873 ch = &part->channels[ch_number]; 824 ch = &part->channels[ch_number];
874 825
@@ -898,9 +849,8 @@ xpc_disconnect_wait(int ch_number)
898 ch->flags &= ~XPC_C_WDISCONNECT; 849 ch->flags &= ~XPC_C_WDISCONNECT;
899 spin_unlock_irqrestore(&ch->lock, irq_flags); 850 spin_unlock_irqrestore(&ch->lock, irq_flags);
900 851
901 if (wakeup_channel_mgr) { 852 if (wakeup_channel_mgr)
902 xpc_wakeup_channel_mgr(part); 853 xpc_wakeup_channel_mgr(part);
903 }
904 854
905 xpc_part_deref(part); 855 xpc_part_deref(part);
906 } 856 }
@@ -1019,9 +969,8 @@ xpc_do_exit(enum xpc_retval reason)
1019 /* clear the interface to XPC's functions */ 969 /* clear the interface to XPC's functions */
1020 xpc_clear_interface(); 970 xpc_clear_interface();
1021 971
1022 if (xpc_sysctl) { 972 if (xpc_sysctl)
1023 unregister_sysctl_table(xpc_sysctl); 973 unregister_sysctl_table(xpc_sysctl);
1024 }
1025 974
1026 kfree(xpc_remote_copy_buffer_base); 975 kfree(xpc_remote_copy_buffer_base);
1027} 976}
@@ -1071,7 +1020,8 @@ xpc_die_disengage(void)
1071 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1020 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1072 part = &xpc_partitions[partid]; 1021 part = &xpc_partitions[partid];
1073 1022
1074 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 1023 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1024 remote_vars_version)) {
1075 1025
1076 /* just in case it was left set by an earlier XPC */ 1026 /* just in case it was left set by an earlier XPC */
1077 xpc_clear_partition_engaged(1UL << partid); 1027 xpc_clear_partition_engaged(1UL << partid);
@@ -1144,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1144 1094
1145 case DIE_KDEBUG_ENTER: 1095 case DIE_KDEBUG_ENTER:
1146 /* Should lack of heartbeat be ignored by other partitions? */ 1096 /* Should lack of heartbeat be ignored by other partitions? */
1147 if (!xpc_kdebug_ignore) { 1097 if (!xpc_kdebug_ignore)
1148 break; 1098 break;
1149 } 1099
1150 /* fall through */ 1100 /* fall through */
1151 case DIE_MCA_MONARCH_ENTER: 1101 case DIE_MCA_MONARCH_ENTER:
1152 case DIE_INIT_MONARCH_ENTER: 1102 case DIE_INIT_MONARCH_ENTER:
@@ -1156,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1156 1106
1157 case DIE_KDEBUG_LEAVE: 1107 case DIE_KDEBUG_LEAVE:
1158 /* Is lack of heartbeat being ignored by other partitions? */ 1108 /* Is lack of heartbeat being ignored by other partitions? */
1159 if (!xpc_kdebug_ignore) { 1109 if (!xpc_kdebug_ignore)
1160 break; 1110 break;
1161 } 1111
1162 /* fall through */ 1112 /* fall through */
1163 case DIE_MCA_MONARCH_LEAVE: 1113 case DIE_MCA_MONARCH_LEAVE:
1164 case DIE_INIT_MONARCH_LEAVE: 1114 case DIE_INIT_MONARCH_LEAVE:
@@ -1176,18 +1126,17 @@ xpc_init(void)
1176 int ret; 1126 int ret;
1177 partid_t partid; 1127 partid_t partid;
1178 struct xpc_partition *part; 1128 struct xpc_partition *part;
1179 pid_t pid; 1129 struct task_struct *kthread;
1180 size_t buf_size; 1130 size_t buf_size;
1181 1131
1182 if (!ia64_platform_is("sn2")) { 1132 if (!ia64_platform_is("sn2"))
1183 return -ENODEV; 1133 return -ENODEV;
1184 }
1185 1134
1186 buf_size = max(XPC_RP_VARS_SIZE, 1135 buf_size = max(XPC_RP_VARS_SIZE,
1187 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); 1136 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1188 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, 1137 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1189 GFP_KERNEL, 1138 GFP_KERNEL,
1190 &xpc_remote_copy_buffer_base); 1139 &xpc_remote_copy_buffer_base);
1191 if (xpc_remote_copy_buffer == NULL) 1140 if (xpc_remote_copy_buffer == NULL)
1192 return -ENOMEM; 1141 return -ENOMEM;
1193 1142
@@ -1250,9 +1199,8 @@ xpc_init(void)
1250 1199
1251 xpc_restrict_IPI_ops(); 1200 xpc_restrict_IPI_ops();
1252 1201
1253 if (xpc_sysctl) { 1202 if (xpc_sysctl)
1254 unregister_sysctl_table(xpc_sysctl); 1203 unregister_sysctl_table(xpc_sysctl);
1255 }
1256 1204
1257 kfree(xpc_remote_copy_buffer_base); 1205 kfree(xpc_remote_copy_buffer_base);
1258 return -EBUSY; 1206 return -EBUSY;
@@ -1270,9 +1218,8 @@ xpc_init(void)
1270 free_irq(SGI_XPC_ACTIVATE, NULL); 1218 free_irq(SGI_XPC_ACTIVATE, NULL);
1271 xpc_restrict_IPI_ops(); 1219 xpc_restrict_IPI_ops();
1272 1220
1273 if (xpc_sysctl) { 1221 if (xpc_sysctl)
1274 unregister_sysctl_table(xpc_sysctl); 1222 unregister_sysctl_table(xpc_sysctl);
1275 }
1276 1223
1277 kfree(xpc_remote_copy_buffer_base); 1224 kfree(xpc_remote_copy_buffer_base);
1278 return -EBUSY; 1225 return -EBUSY;
@@ -1280,15 +1227,13 @@ xpc_init(void)
1280 1227
1281 /* add ourselves to the reboot_notifier_list */ 1228 /* add ourselves to the reboot_notifier_list */
1282 ret = register_reboot_notifier(&xpc_reboot_notifier); 1229 ret = register_reboot_notifier(&xpc_reboot_notifier);
1283 if (ret != 0) { 1230 if (ret != 0)
1284 dev_warn(xpc_part, "can't register reboot notifier\n"); 1231 dev_warn(xpc_part, "can't register reboot notifier\n");
1285 }
1286 1232
1287 /* add ourselves to the die_notifier list */ 1233 /* add ourselves to the die_notifier list */
1288 ret = register_die_notifier(&xpc_die_notifier); 1234 ret = register_die_notifier(&xpc_die_notifier);
1289 if (ret != 0) { 1235 if (ret != 0)
1290 dev_warn(xpc_part, "can't register die notifier\n"); 1236 dev_warn(xpc_part, "can't register die notifier\n");
1291 }
1292 1237
1293 init_timer(&xpc_hb_timer); 1238 init_timer(&xpc_hb_timer);
1294 xpc_hb_timer.function = xpc_hb_beater; 1239 xpc_hb_timer.function = xpc_hb_beater;
@@ -1297,8 +1242,8 @@ xpc_init(void)
1297 * The real work-horse behind xpc. This processes incoming 1242 * The real work-horse behind xpc. This processes incoming
1298 * interrupts and monitors remote heartbeats. 1243 * interrupts and monitors remote heartbeats.
1299 */ 1244 */
1300 pid = kernel_thread(xpc_hb_checker, NULL, 0); 1245 kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1301 if (pid < 0) { 1246 if (IS_ERR(kthread)) {
1302 dev_err(xpc_part, "failed while forking hb check thread\n"); 1247 dev_err(xpc_part, "failed while forking hb check thread\n");
1303 1248
1304 /* indicate to others that our reserved page is uninitialized */ 1249 /* indicate to others that our reserved page is uninitialized */
@@ -1314,9 +1259,8 @@ xpc_init(void)
1314 free_irq(SGI_XPC_ACTIVATE, NULL); 1259 free_irq(SGI_XPC_ACTIVATE, NULL);
1315 xpc_restrict_IPI_ops(); 1260 xpc_restrict_IPI_ops();
1316 1261
1317 if (xpc_sysctl) { 1262 if (xpc_sysctl)
1318 unregister_sysctl_table(xpc_sysctl); 1263 unregister_sysctl_table(xpc_sysctl);
1319 }
1320 1264
1321 kfree(xpc_remote_copy_buffer_base); 1265 kfree(xpc_remote_copy_buffer_base);
1322 return -EBUSY; 1266 return -EBUSY;
@@ -1327,8 +1271,9 @@ xpc_init(void)
1327 * activate based on info provided by SAL. This new thread is short 1271 * activate based on info provided by SAL. This new thread is short
1328 * lived and will exit once discovery is complete. 1272 * lived and will exit once discovery is complete.
1329 */ 1273 */
1330 pid = kernel_thread(xpc_initiate_discovery, NULL, 0); 1274 kthread = kthread_run(xpc_initiate_discovery, NULL,
1331 if (pid < 0) { 1275 XPC_DISCOVERY_THREAD_NAME);
1276 if (IS_ERR(kthread)) {
1332 dev_err(xpc_part, "failed while forking discovery thread\n"); 1277 dev_err(xpc_part, "failed while forking discovery thread\n");
1333 1278
1334 /* mark this new thread as a non-starter */ 1279 /* mark this new thread as a non-starter */
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 57f1d0b3ac2..27e200ec582 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -75,19 +75,19 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
75{ 75{
76 /* see if kmalloc will give us cachline aligned memory by default */ 76 /* see if kmalloc will give us cachline aligned memory by default */
77 *base = kmalloc(size, flags); 77 *base = kmalloc(size, flags);
78 if (*base == NULL) { 78 if (*base == NULL)
79 return NULL; 79 return NULL;
80 } 80
81 if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) { 81 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
82 return *base; 82 return *base;
83 } 83
84 kfree(*base); 84 kfree(*base);
85 85
86 /* nope, we'll have to do it ourselves */ 86 /* nope, we'll have to do it ourselves */
87 *base = kmalloc(size + L1_CACHE_BYTES, flags); 87 *base = kmalloc(size + L1_CACHE_BYTES, flags);
88 if (*base == NULL) { 88 if (*base == NULL)
89 return NULL; 89 return NULL;
90 } 90
91 return (void *)L1_CACHE_ALIGN((u64)*base); 91 return (void *)L1_CACHE_ALIGN((u64)*base);
92} 92}
93 93
@@ -116,9 +116,8 @@ xpc_get_rsvd_page_pa(int nasid)
116 "0x%016lx, address=0x%016lx, len=0x%016lx\n", 116 "0x%016lx, address=0x%016lx, len=0x%016lx\n",
117 status, cookie, rp_pa, len); 117 status, cookie, rp_pa, len);
118 118
119 if (status != SALRET_MORE_PASSES) { 119 if (status != SALRET_MORE_PASSES)
120 break; 120 break;
121 }
122 121
123 if (L1_CACHE_ALIGN(len) > buf_len) { 122 if (L1_CACHE_ALIGN(len) > buf_len) {
124 kfree(buf_base); 123 kfree(buf_base);
@@ -145,9 +144,9 @@ xpc_get_rsvd_page_pa(int nasid)
145 144
146 kfree(buf_base); 145 kfree(buf_base);
147 146
148 if (status != SALRET_OK) { 147 if (status != SALRET_OK)
149 rp_pa = 0; 148 rp_pa = 0;
150 } 149
151 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); 150 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
152 return rp_pa; 151 return rp_pa;
153} 152}
@@ -210,7 +209,8 @@ xpc_rsvd_page_init(void)
210 * on subsequent loads of XPC. This AMO page is never freed, and its 209 * on subsequent loads of XPC. This AMO page is never freed, and its
211 * memory protections are never restricted. 210 * memory protections are never restricted.
212 */ 211 */
213 if ((amos_page = xpc_vars->amos_page) == NULL) { 212 amos_page = xpc_vars->amos_page;
213 if (amos_page == NULL) {
214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0)); 214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
215 if (amos_page == NULL) { 215 if (amos_page == NULL) {
216 dev_err(xpc_part, "can't allocate page of AMOs\n"); 216 dev_err(xpc_part, "can't allocate page of AMOs\n");
@@ -264,9 +264,8 @@ xpc_rsvd_page_init(void)
264 XP_MAX_PARTITIONS); 264 XP_MAX_PARTITIONS);
265 265
266 /* initialize the activate IRQ related AMO variables */ 266 /* initialize the activate IRQ related AMO variables */
267 for (i = 0; i < xp_nasid_mask_words; i++) { 267 for (i = 0; i < xp_nasid_mask_words; i++)
268 (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); 268 (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
269 }
270 269
271 /* initialize the engaged remote partitions related AMO variables */ 270 /* initialize the engaged remote partitions related AMO variables */
272 (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); 271 (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
@@ -294,7 +293,7 @@ xpc_allow_IPI_ops(void)
294 int node; 293 int node;
295 int nasid; 294 int nasid;
296 295
297 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 296 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
298 297
299 if (is_shub2()) { 298 if (is_shub2()) {
300 xpc_sh2_IPI_access0 = 299 xpc_sh2_IPI_access0 =
@@ -336,14 +335,14 @@ xpc_allow_IPI_ops(void)
336 xpc_prot_vec[node] = (u64)HUB_L((u64 *) 335 xpc_prot_vec[node] = (u64)HUB_L((u64 *)
337 GLOBAL_MMR_ADDR 336 GLOBAL_MMR_ADDR
338 (nasid, 337 (nasid,
339 SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 338 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
340 HUB_S((u64 *) 339 HUB_S((u64 *)
341 GLOBAL_MMR_ADDR(nasid, 340 GLOBAL_MMR_ADDR(nasid,
342 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 341 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
343 -1UL); 342 -1UL);
344 HUB_S((u64 *) 343 HUB_S((u64 *)
345 GLOBAL_MMR_ADDR(nasid, 344 GLOBAL_MMR_ADDR(nasid,
346 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 345 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
347 -1UL); 346 -1UL);
348 } 347 }
349 } 348 }
@@ -360,7 +359,7 @@ xpc_restrict_IPI_ops(void)
360 int node; 359 int node;
361 int nasid; 360 int nasid;
362 361
363 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 362 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
364 363
365 if (is_shub2()) { 364 if (is_shub2()) {
366 365
@@ -385,10 +384,10 @@ xpc_restrict_IPI_ops(void)
385 384
386 if (enable_shub_wars_1_1()) { 385 if (enable_shub_wars_1_1()) {
387 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, 386 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
388 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 387 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
389 xpc_prot_vec[node]); 388 xpc_prot_vec[node]);
390 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, 389 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
391 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 390 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
392 xpc_prot_vec[node]); 391 xpc_prot_vec[node]);
393 } 392 }
394 } 393 }
@@ -411,13 +410,11 @@ xpc_check_remote_hb(void)
411 410
412 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 411 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
413 412
414 if (xpc_exiting) { 413 if (xpc_exiting)
415 break; 414 break;
416 }
417 415
418 if (partid == sn_partition_id) { 416 if (partid == sn_partition_id)
419 continue; 417 continue;
420 }
421 418
422 part = &xpc_partitions[partid]; 419 part = &xpc_partitions[partid];
423 420
@@ -471,24 +468,21 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
471 /* get the reserved page's physical address */ 468 /* get the reserved page's physical address */
472 469
473 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); 470 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
474 if (*remote_rp_pa == 0) { 471 if (*remote_rp_pa == 0)
475 return xpcNoRsvdPageAddr; 472 return xpcNoRsvdPageAddr;
476 }
477 473
478 /* pull over the reserved page header and part_nasids mask */ 474 /* pull over the reserved page header and part_nasids mask */
479 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, 475 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
480 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 476 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
481 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 477 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
482 if (bres != BTE_SUCCESS) { 478 if (bres != BTE_SUCCESS)
483 return xpc_map_bte_errors(bres); 479 return xpc_map_bte_errors(bres);
484 }
485 480
486 if (discovered_nasids != NULL) { 481 if (discovered_nasids != NULL) {
487 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); 482 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
488 483
489 for (i = 0; i < xp_nasid_mask_words; i++) { 484 for (i = 0; i < xp_nasid_mask_words; i++)
490 discovered_nasids[i] |= remote_part_nasids[i]; 485 discovered_nasids[i] |= remote_part_nasids[i];
491 }
492 } 486 }
493 487
494 /* check that the partid is for another partition */ 488 /* check that the partid is for another partition */
@@ -498,9 +492,8 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
498 return xpcInvalidPartid; 492 return xpcInvalidPartid;
499 } 493 }
500 494
501 if (remote_rp->partid == sn_partition_id) { 495 if (remote_rp->partid == sn_partition_id)
502 return xpcLocalPartid; 496 return xpcLocalPartid;
503 }
504 497
505 if (XPC_VERSION_MAJOR(remote_rp->version) != 498 if (XPC_VERSION_MAJOR(remote_rp->version) !=
506 XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 499 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
@@ -521,16 +514,14 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
521{ 514{
522 int bres; 515 int bres;
523 516
524 if (remote_vars_pa == 0) { 517 if (remote_vars_pa == 0)
525 return xpcVarsNotSet; 518 return xpcVarsNotSet;
526 }
527 519
528 /* pull over the cross partition variables */ 520 /* pull over the cross partition variables */
529 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, 521 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
530 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 522 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
531 if (bres != BTE_SUCCESS) { 523 if (bres != BTE_SUCCESS)
532 return xpc_map_bte_errors(bres); 524 return xpc_map_bte_errors(bres);
533 }
534 525
535 if (XPC_VERSION_MAJOR(remote_vars->version) != 526 if (XPC_VERSION_MAJOR(remote_vars->version) !=
536 XPC_VERSION_MAJOR(XPC_V_VERSION)) { 527 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
@@ -630,9 +621,9 @@ xpc_identify_act_IRQ_req(int nasid)
630 621
631 remote_vars_pa = remote_rp->vars_pa; 622 remote_vars_pa = remote_rp->vars_pa;
632 remote_rp_version = remote_rp->version; 623 remote_rp_version = remote_rp->version;
633 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 624 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
634 remote_rp_stamp = remote_rp->stamp; 625 remote_rp_stamp = remote_rp->stamp;
635 } 626
636 partid = remote_rp->partid; 627 partid = remote_rp->partid;
637 part = &xpc_partitions[partid]; 628 part = &xpc_partitions[partid];
638 629
@@ -656,7 +647,8 @@ xpc_identify_act_IRQ_req(int nasid)
656 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd, 647 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
657 remote_vars->heartbeat, remote_vars->heartbeating_to_mask); 648 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
658 649
659 if (xpc_partition_disengaged(part) && part->act_state == XPC_P_INACTIVE) { 650 if (xpc_partition_disengaged(part) &&
651 part->act_state == XPC_P_INACTIVE) {
660 652
661 xpc_update_partition_info(part, remote_rp_version, 653 xpc_update_partition_info(part, remote_rp_version,
662 &remote_rp_stamp, remote_rp_pa, 654 &remote_rp_stamp, remote_rp_pa,
@@ -791,9 +783,8 @@ xpc_identify_act_IRQ_sender(void)
791 /* scan through act AMO variable looking for non-zero entries */ 783 /* scan through act AMO variable looking for non-zero entries */
792 for (word = 0; word < xp_nasid_mask_words; word++) { 784 for (word = 0; word < xp_nasid_mask_words; word++) {
793 785
794 if (xpc_exiting) { 786 if (xpc_exiting)
795 break; 787 break;
796 }
797 788
798 nasid_mask = xpc_IPI_receive(&act_amos[word]); 789 nasid_mask = xpc_IPI_receive(&act_amos[word]);
799 if (nasid_mask == 0) { 790 if (nasid_mask == 0) {
@@ -840,7 +831,8 @@ xpc_partition_disengaged(struct xpc_partition *part)
840 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 831 disengaged = (xpc_partition_engaged(1UL << partid) == 0);
841 if (part->disengage_request_timeout) { 832 if (part->disengage_request_timeout) {
842 if (!disengaged) { 833 if (!disengaged) {
843 if (time_before(jiffies, part->disengage_request_timeout)) { 834 if (time_before(jiffies,
835 part->disengage_request_timeout)) {
844 /* timelimit hasn't been reached yet */ 836 /* timelimit hasn't been reached yet */
845 return 0; 837 return 0;
846 } 838 }
@@ -866,13 +858,11 @@ xpc_partition_disengaged(struct xpc_partition *part)
866 858
867 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 859 DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
868 part->act_state != XPC_P_INACTIVE); 860 part->act_state != XPC_P_INACTIVE);
869 if (part->act_state != XPC_P_INACTIVE) { 861 if (part->act_state != XPC_P_INACTIVE)
870 xpc_wakeup_channel_mgr(part); 862 xpc_wakeup_channel_mgr(part);
871 }
872 863
873 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 864 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
874 xpc_cancel_partition_disengage_request(part); 865 xpc_cancel_partition_disengage_request(part);
875 }
876 } 866 }
877 return disengaged; 867 return disengaged;
878} 868}
@@ -1000,9 +990,9 @@ xpc_discovery(void)
1000 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 990 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
1001 xp_nasid_mask_bytes, 991 xp_nasid_mask_bytes,
1002 GFP_KERNEL, &remote_rp_base); 992 GFP_KERNEL, &remote_rp_base);
1003 if (remote_rp == NULL) { 993 if (remote_rp == NULL)
1004 return; 994 return;
1005 } 995
1006 remote_vars = (struct xpc_vars *)remote_rp; 996 remote_vars = (struct xpc_vars *)remote_rp;
1007 997
1008 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, 998 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
@@ -1035,18 +1025,16 @@ xpc_discovery(void)
1035 1025
1036 for (region = 0; region < max_regions; region++) { 1026 for (region = 0; region < max_regions; region++) {
1037 1027
1038 if ((volatile int)xpc_exiting) { 1028 if (xpc_exiting)
1039 break; 1029 break;
1040 }
1041 1030
1042 dev_dbg(xpc_part, "searching region %d\n", region); 1031 dev_dbg(xpc_part, "searching region %d\n", region);
1043 1032
1044 for (nasid = (region * region_size * 2); 1033 for (nasid = (region * region_size * 2);
1045 nasid < ((region + 1) * region_size * 2); nasid += 2) { 1034 nasid < ((region + 1) * region_size * 2); nasid += 2) {
1046 1035
1047 if ((volatile int)xpc_exiting) { 1036 if (xpc_exiting)
1048 break; 1037 break;
1049 }
1050 1038
1051 dev_dbg(xpc_part, "checking nasid %d\n", nasid); 1039 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
1052 1040
@@ -1080,9 +1068,9 @@ xpc_discovery(void)
1080 "from nasid %d, reason=%d\n", nasid, 1068 "from nasid %d, reason=%d\n", nasid,
1081 ret); 1069 ret);
1082 1070
1083 if (ret == xpcLocalPartid) { 1071 if (ret == xpcLocalPartid)
1084 break; 1072 break;
1085 } 1073
1086 continue; 1074 continue;
1087 } 1075 }
1088 1076
@@ -1171,9 +1159,8 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1171 int bte_res; 1159 int bte_res;
1172 1160
1173 part = &xpc_partitions[partid]; 1161 part = &xpc_partitions[partid];
1174 if (part->remote_rp_pa == 0) { 1162 if (part->remote_rp_pa == 0)
1175 return xpcPartitionDown; 1163 return xpcPartitionDown;
1176 }
1177 1164
1178 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 1165 memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
1179 1166
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index e41cb93b8c8..a9543c65814 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/types.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/ioport.h> 27#include <linux/ioport.h>
@@ -34,7 +35,6 @@
34#include <asm/sn/bte.h> 35#include <asm/sn/bte.h>
35#include <asm/sn/io.h> 36#include <asm/sn/io.h>
36#include <asm/sn/sn_sal.h> 37#include <asm/sn/sn_sal.h>
37#include <asm/types.h>
38#include <asm/atomic.h> 38#include <asm/atomic.h>
39#include "xp.h" 39#include "xp.h"
40 40
@@ -87,8 +87,8 @@ struct xpnet_message {
87#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) 87#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
88#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) 88#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
89 89
90#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ 90#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
91#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ 91#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
92#define XPNET_MAGIC 0x88786984 /* "XNET" */ 92#define XPNET_MAGIC 0x88786984 /* "XNET" */
93 93
94#define XPNET_VALID_MSG(_m) \ 94#define XPNET_VALID_MSG(_m) \
@@ -236,9 +236,11 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
236 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 236 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
237 237
238 if (bret != BTE_SUCCESS) { 238 if (bret != BTE_SUCCESS) {
239 // >>> Need better way of cleaning skb. Currently skb 239 /*
240 // >>> appears in_use and we can't just call 240 * >>> Need better way of cleaning skb. Currently skb
241 // >>> dev_kfree_skb. 241 * >>> appears in_use and we can't just call
242 * >>> dev_kfree_skb.
243 */
242 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " 244 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
243 "error=0x%x\n", (void *)msg->buf_pa, 245 "error=0x%x\n", (void *)msg->buf_pa,
244 (void *)__pa((u64)skb->data & 246 (void *)__pa((u64)skb->data &
@@ -314,9 +316,8 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
314 bp = xpnet_broadcast_partitions; 316 bp = xpnet_broadcast_partitions;
315 spin_unlock_bh(&xpnet_broadcast_lock); 317 spin_unlock_bh(&xpnet_broadcast_lock);
316 318
317 if (bp == 0) { 319 if (bp == 0)
318 netif_carrier_off(xpnet_device); 320 netif_carrier_off(xpnet_device);
319 }
320 321
321 dev_dbg(xpnet, "%s disconnected from partition %d; " 322 dev_dbg(xpnet, "%s disconnected from partition %d; "
322 "xpnet_broadcast_partitions=0x%lx\n", 323 "xpnet_broadcast_partitions=0x%lx\n",
@@ -527,9 +528,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
527 528
528 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 529 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
529 XPC_NOWAIT, (void **)&msg); 530 XPC_NOWAIT, (void **)&msg);
530 if (unlikely(ret != xpcSuccess)) { 531 if (unlikely(ret != xpcSuccess))
531 continue; 532 continue;
532 }
533 533
534 msg->embedded_bytes = embedded_bytes; 534 msg->embedded_bytes = embedded_bytes;
535 if (unlikely(embedded_bytes != 0)) { 535 if (unlikely(embedded_bytes != 0)) {
@@ -561,7 +561,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
561 atomic_dec(&queued_msg->use_count); 561 atomic_dec(&queued_msg->use_count);
562 continue; 562 continue;
563 } 563 }
564
565 } 564 }
566 565
567 if (atomic_dec_return(&queued_msg->use_count) == 0) { 566 if (atomic_dec_return(&queued_msg->use_count) == 0) {
@@ -599,9 +598,8 @@ xpnet_init(void)
599 u32 license_num; 598 u32 license_num;
600 int result = -ENOMEM; 599 int result = -ENOMEM;
601 600
602 if (!ia64_platform_is("sn2")) { 601 if (!ia64_platform_is("sn2"))
603 return -ENODEV; 602 return -ENODEV;
604 }
605 603
606 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); 604 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
607 605
@@ -611,9 +609,8 @@ xpnet_init(void)
611 */ 609 */
612 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), 610 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
613 XPNET_DEVICE_NAME, ether_setup); 611 XPNET_DEVICE_NAME, ether_setup);
614 if (xpnet_device == NULL) { 612 if (xpnet_device == NULL)
615 return -ENOMEM; 613 return -ENOMEM;
616 }
617 614
618 netif_carrier_off(xpnet_device); 615 netif_carrier_off(xpnet_device);
619 616
@@ -654,9 +651,8 @@ xpnet_init(void)
654 xpnet_device->features = NETIF_F_NO_CSUM; 651 xpnet_device->features = NETIF_F_NO_CSUM;
655 652
656 result = register_netdev(xpnet_device); 653 result = register_netdev(xpnet_device);
657 if (result != 0) { 654 if (result != 0)
658 free_netdev(xpnet_device); 655 free_netdev(xpnet_device);
659 }
660 656
661 return result; 657 return result;
662} 658}