aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/sn/kernel/mca.c7
-rw-r--r--arch/ia64/sn/kernel/xp_main.c17
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c34
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c17
-rw-r--r--include/asm-ia64/sn/xp.h3
-rw-r--r--include/asm-ia64/sn/xpc.h9
6 files changed, 43 insertions, 44 deletions
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 6546db6abdba..9ab684d1bb55 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/timer.h> 11#include <linux/timer.h>
12#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
13#include <linux/mutex.h>
13#include <asm/mca.h> 14#include <asm/mca.h>
14#include <asm/sal.h> 15#include <asm/sal.h>
15#include <asm/sn/sn_sal.h> 16#include <asm/sn/sn_sal.h>
@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void);
27/* Printing oemdata from mca uses data that is not passed through SAL, it is 28/* Printing oemdata from mca uses data that is not passed through SAL, it is
28 * global. Only one user at a time. 29 * global. Only one user at a time.
29 */ 30 */
30static DECLARE_MUTEX(sn_oemdata_mutex); 31static DEFINE_MUTEX(sn_oemdata_mutex);
31static u8 **sn_oemdata; 32static u8 **sn_oemdata;
32static u64 *sn_oemdata_size, sn_oemdata_bufsize; 33static u64 *sn_oemdata_size, sn_oemdata_bufsize;
33 34
@@ -89,7 +90,7 @@ static int
89sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, 90sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
90 u64 * oemdata_size) 91 u64 * oemdata_size)
91{ 92{
92 down(&sn_oemdata_mutex); 93 mutex_lock(&sn_oemdata_mutex);
93 sn_oemdata = oemdata; 94 sn_oemdata = oemdata;
94 sn_oemdata_size = oemdata_size; 95 sn_oemdata_size = oemdata_size;
95 sn_oemdata_bufsize = 0; 96 sn_oemdata_bufsize = 0;
@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
107 *sn_oemdata_size = 0; 108 *sn_oemdata_size = 0;
108 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); 109 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
109 } 110 }
110 up(&sn_oemdata_mutex); 111 mutex_unlock(&sn_oemdata_mutex);
111 return 0; 112 return 0;
112} 113}
113 114
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
index 3be52a34c80f..b7ea46645e12 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/arch/ia64/sn/kernel/xp_main.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mutex.h>
22#include <asm/sn/intr.h> 23#include <asm/sn/intr.h>
23#include <asm/sn/sn_sal.h> 24#include <asm/sn/sn_sal.h>
24#include <asm/sn/xp.h> 25#include <asm/sn/xp.h>
@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
136 137
137 registration = &xpc_registrations[ch_number]; 138 registration = &xpc_registrations[ch_number];
138 139
139 if (down_interruptible(&registration->sema) != 0) { 140 if (mutex_lock_interruptible(&registration->mutex) != 0) {
140 return xpcInterrupted; 141 return xpcInterrupted;
141 } 142 }
142 143
143 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 144 /* if XPC_CHANNEL_REGISTERED(ch_number) */
144 if (registration->func != NULL) { 145 if (registration->func != NULL) {
145 up(&registration->sema); 146 mutex_unlock(&registration->mutex);
146 return xpcAlreadyRegistered; 147 return xpcAlreadyRegistered;
147 } 148 }
148 149
@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
154 registration->key = key; 155 registration->key = key;
155 registration->func = func; 156 registration->func = func;
156 157
157 up(&registration->sema); 158 mutex_unlock(&registration->mutex);
158 159
159 xpc_interface.connect(ch_number); 160 xpc_interface.connect(ch_number);
160 161
@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number)
190 * figured XPC's users will just turn around and call xpc_disconnect() 191 * figured XPC's users will just turn around and call xpc_disconnect()
191 * again anyways, so we might as well wait, if need be. 192 * again anyways, so we might as well wait, if need be.
192 */ 193 */
193 down(&registration->sema); 194 mutex_lock(&registration->mutex);
194 195
195 /* if !XPC_CHANNEL_REGISTERED(ch_number) */ 196 /* if !XPC_CHANNEL_REGISTERED(ch_number) */
196 if (registration->func == NULL) { 197 if (registration->func == NULL) {
197 up(&registration->sema); 198 mutex_unlock(&registration->mutex);
198 return; 199 return;
199 } 200 }
200 201
@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number)
208 209
209 xpc_interface.disconnect(ch_number); 210 xpc_interface.disconnect(ch_number);
210 211
211 up(&registration->sema); 212 mutex_unlock(&registration->mutex);
212 213
213 return; 214 return;
214} 215}
@@ -250,9 +251,9 @@ xp_init(void)
250 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 251 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
251 } 252 }
252 253
253 /* initialize the connection registration semaphores */ 254 /* initialize the connection registration mutex */
254 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 255 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
255 sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ 256 mutex_init(&xpc_registrations[ch_number].mutex);
256 } 257 }
257 258
258 return 0; 259 return 0;
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 0c0a68902409..8d950c778bb6 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -22,6 +22,8 @@
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/mutex.h>
26#include <linux/completion.h>
25#include <asm/sn/bte.h> 27#include <asm/sn/bte.h>
26#include <asm/sn/sn_sal.h> 28#include <asm/sn/sn_sal.h>
27#include <asm/sn/xpc.h> 29#include <asm/sn/xpc.h>
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
56 atomic_set(&ch->n_to_notify, 0); 58 atomic_set(&ch->n_to_notify, 0);
57 59
58 spin_lock_init(&ch->lock); 60 spin_lock_init(&ch->lock);
59 sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ 61 mutex_init(&ch->msg_to_pull_mutex);
60 sema_init(&ch->wdisconnect_sema, 0); /* event wait */ 62 init_completion(&ch->wdisconnect_wait);
61 63
62 atomic_set(&ch->n_on_msg_allocate_wq, 0); 64 atomic_set(&ch->n_on_msg_allocate_wq, 0);
63 init_waitqueue_head(&ch->msg_allocate_wq); 65 init_waitqueue_head(&ch->msg_allocate_wq);
@@ -534,7 +536,6 @@ static enum xpc_retval
534xpc_allocate_msgqueues(struct xpc_channel *ch) 536xpc_allocate_msgqueues(struct xpc_channel *ch)
535{ 537{
536 unsigned long irq_flags; 538 unsigned long irq_flags;
537 int i;
538 enum xpc_retval ret; 539 enum xpc_retval ret;
539 540
540 541
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
552 return ret; 553 return ret;
553 } 554 }
554 555
555 for (i = 0; i < ch->local_nentries; i++) {
556 /* use a semaphore as an event wait queue */
557 sema_init(&ch->notify_queue[i].sema, 0);
558 }
559
560 spin_lock_irqsave(&ch->lock, irq_flags); 556 spin_lock_irqsave(&ch->lock, irq_flags);
561 ch->flags |= XPC_C_SETUP; 557 ch->flags |= XPC_C_SETUP;
562 spin_unlock_irqrestore(&ch->lock, irq_flags); 558 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
799 } 795 }
800 796
801 if (ch->flags & XPC_C_WDISCONNECT) { 797 if (ch->flags & XPC_C_WDISCONNECT) {
802 spin_unlock_irqrestore(&ch->lock, *irq_flags); 798 /* we won't lose the CPU since we're holding ch->lock */
803 up(&ch->wdisconnect_sema); 799 complete(&ch->wdisconnect_wait);
804 spin_lock_irqsave(&ch->lock, *irq_flags);
805
806 } else if (ch->delayed_IPI_flags) { 800 } else if (ch->delayed_IPI_flags) {
807 if (part->act_state != XPC_P_DEACTIVATING) { 801 if (part->act_state != XPC_P_DEACTIVATING) {
808 /* time to take action on any delayed IPI flags */ 802 /* time to take action on any delayed IPI flags */
@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
1092 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1086 struct xpc_registration *registration = &xpc_registrations[ch->number];
1093 1087
1094 1088
1095 if (down_trylock(&registration->sema) != 0) { 1089 if (mutex_trylock(&registration->mutex) == 0) {
1096 return xpcRetry; 1090 return xpcRetry;
1097 } 1091 }
1098 1092
1099 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1093 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1100 up(&registration->sema); 1094 mutex_unlock(&registration->mutex);
1101 return xpcUnregistered; 1095 return xpcUnregistered;
1102 } 1096 }
1103 1097
@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1108 1102
1109 if (ch->flags & XPC_C_DISCONNECTING) { 1103 if (ch->flags & XPC_C_DISCONNECTING) {
1110 spin_unlock_irqrestore(&ch->lock, irq_flags); 1104 spin_unlock_irqrestore(&ch->lock, irq_flags);
1111 up(&registration->sema); 1105 mutex_unlock(&registration->mutex);
1112 return ch->reason; 1106 return ch->reason;
1113 } 1107 }
1114 1108
@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1140 * channel lock be locked and will unlock and relock 1134 * channel lock be locked and will unlock and relock
1141 * the channel lock as needed. 1135 * the channel lock as needed.
1142 */ 1136 */
1143 up(&registration->sema); 1137 mutex_unlock(&registration->mutex);
1144 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1138 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1145 &irq_flags); 1139 &irq_flags);
1146 spin_unlock_irqrestore(&ch->lock, irq_flags); 1140 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1155 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); 1149 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
1156 } 1150 }
1157 1151
1158 up(&registration->sema); 1152 mutex_unlock(&registration->mutex);
1159 1153
1160 1154
1161 /* initiate the connection */ 1155 /* initiate the connection */
@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2089 enum xpc_retval ret; 2083 enum xpc_retval ret;
2090 2084
2091 2085
2092 if (down_interruptible(&ch->msg_to_pull_sema) != 0) { 2086 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
2093 /* we were interrupted by a signal */ 2087 /* we were interrupted by a signal */
2094 return NULL; 2088 return NULL;
2095 } 2089 }
@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2125 2119
2126 XPC_DEACTIVATE_PARTITION(part, ret); 2120 XPC_DEACTIVATE_PARTITION(part, ret);
2127 2121
2128 up(&ch->msg_to_pull_sema); 2122 mutex_unlock(&ch->msg_to_pull_mutex);
2129 return NULL; 2123 return NULL;
2130 } 2124 }
2131 2125
@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2134 ch->next_msg_to_pull += nmsgs; 2128 ch->next_msg_to_pull += nmsgs;
2135 } 2129 }
2136 2130
2137 up(&ch->msg_to_pull_sema); 2131 mutex_unlock(&ch->msg_to_pull_mutex);
2138 2132
2139 /* return the message we were looking for */ 2133 /* return the message we were looking for */
2140 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2134 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 8930586e0eb4..c75f8aeefc2b 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -55,6 +55,7 @@
55#include <linux/slab.h> 55#include <linux/slab.h>
56#include <linux/delay.h> 56#include <linux/delay.h>
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/completion.h>
58#include <asm/sn/intr.h> 59#include <asm/sn/intr.h>
59#include <asm/sn/sn_sal.h> 60#include <asm/sn/sn_sal.h>
60#include <asm/kdebug.h> 61#include <asm/kdebug.h>
@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
177static unsigned long xpc_hb_check_timeout; 178static unsigned long xpc_hb_check_timeout;
178 179
179/* notification that the xpc_hb_checker thread has exited */ 180/* notification that the xpc_hb_checker thread has exited */
180static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); 181static DECLARE_COMPLETION(xpc_hb_checker_exited);
181 182
182/* notification that the xpc_discovery thread has exited */ 183/* notification that the xpc_discovery thread has exited */
183static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); 184static DECLARE_COMPLETION(xpc_discovery_exited);
184 185
185 186
186static struct timer_list xpc_hb_timer; 187static struct timer_list xpc_hb_timer;
@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore)
321 322
322 323
323 /* mark this thread as having exited */ 324 /* mark this thread as having exited */
324 up(&xpc_hb_checker_exited); 325 complete(&xpc_hb_checker_exited);
325 return 0; 326 return 0;
326} 327}
327 328
@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore)
341 dev_dbg(xpc_part, "discovery thread is exiting\n"); 342 dev_dbg(xpc_part, "discovery thread is exiting\n");
342 343
343 /* mark this thread as having exited */ 344 /* mark this thread as having exited */
344 up(&xpc_discovery_exited); 345 complete(&xpc_discovery_exited);
345 return 0; 346 return 0;
346} 347}
347 348
@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number)
893 continue; 894 continue;
894 } 895 }
895 896
896 (void) down(&ch->wdisconnect_sema); 897 wait_for_completion(&ch->wdisconnect_wait);
897 898
898 spin_lock_irqsave(&ch->lock, irq_flags); 899 spin_lock_irqsave(&ch->lock, irq_flags);
899 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 900 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason)
946 free_irq(SGI_XPC_ACTIVATE, NULL); 947 free_irq(SGI_XPC_ACTIVATE, NULL);
947 948
948 /* wait for the discovery thread to exit */ 949 /* wait for the discovery thread to exit */
949 down(&xpc_discovery_exited); 950 wait_for_completion(&xpc_discovery_exited);
950 951
951 /* wait for the heartbeat checker thread to exit */ 952 /* wait for the heartbeat checker thread to exit */
952 down(&xpc_hb_checker_exited); 953 wait_for_completion(&xpc_hb_checker_exited);
953 954
954 955
955 /* sleep for a 1/3 of a second or so */ 956 /* sleep for a 1/3 of a second or so */
@@ -1367,7 +1368,7 @@ xpc_init(void)
1367 dev_err(xpc_part, "failed while forking discovery thread\n"); 1368 dev_err(xpc_part, "failed while forking discovery thread\n");
1368 1369
1369 /* mark this new thread as a non-starter */ 1370 /* mark this new thread as a non-starter */
1370 up(&xpc_discovery_exited); 1371 complete(&xpc_discovery_exited);
1371 1372
1372 xpc_do_exit(xpcUnloading); 1373 xpc_do_exit(xpcUnloading);
1373 return -EBUSY; 1374 return -EBUSY;
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
index 203945ae034e..9bd2f9bf329b 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/include/asm-ia64/sn/xp.h
@@ -18,6 +18,7 @@
18 18
19#include <linux/cache.h> 19#include <linux/cache.h>
20#include <linux/hardirq.h> 20#include <linux/hardirq.h>
21#include <linux/mutex.h>
21#include <asm/sn/types.h> 22#include <asm/sn/types.h>
22#include <asm/sn/bte.h> 23#include <asm/sn/bte.h>
23 24
@@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
359 * the channel. 360 * the channel.
360 */ 361 */
361struct xpc_registration { 362struct xpc_registration {
362 struct semaphore sema; 363 struct mutex mutex;
363 xpc_channel_func func; /* function to call */ 364 xpc_channel_func func; /* function to call */
364 void *key; /* pointer to user's key */ 365 void *key; /* pointer to user's key */
365 u16 nentries; /* #of msg entries in local msg queue */ 366 u16 nentries; /* #of msg entries in local msg queue */
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 87e9cd588510..0c36928ffd8b 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -19,6 +19,8 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sysctl.h> 20#include <linux/sysctl.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/mutex.h>
23#include <linux/completion.h>
22#include <asm/pgtable.h> 24#include <asm/pgtable.h>
23#include <asm/processor.h> 25#include <asm/processor.h>
24#include <asm/sn/bte.h> 26#include <asm/sn/bte.h>
@@ -335,8 +337,7 @@ struct xpc_openclose_args {
335 * and consumed by the intended recipient. 337 * and consumed by the intended recipient.
336 */ 338 */
337struct xpc_notify { 339struct xpc_notify {
338 struct semaphore sema; /* notify semaphore */ 340 volatile u8 type; /* type of notification */
339 volatile u8 type; /* type of notification */
340 341
341 /* the following two fields are only used if type == XPC_N_CALL */ 342 /* the following two fields are only used if type == XPC_N_CALL */
342 xpc_notify_func func; /* user's notify function */ 343 xpc_notify_func func; /* user's notify function */
@@ -465,8 +466,8 @@ struct xpc_channel {
465 xpc_channel_func func; /* user's channel function */ 466 xpc_channel_func func; /* user's channel function */
466 void *key; /* pointer to user's key */ 467 void *key; /* pointer to user's key */
467 468
468 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ 469 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
469 struct semaphore wdisconnect_sema; /* wait for channel disconnect */ 470 struct completion wdisconnect_wait; /* wait for channel disconnect */
470 471
471 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 472 struct xpc_openclose_args *local_openclose_args; /* args passed on */
472 /* opening or closing of channel */ 473 /* opening or closing of channel */