aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc_channel.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_channel.c')
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c197
1 files changed, 7 insertions, 190 deletions
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 0d3c153d1d0b..1c73423665bd 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -25,145 +25,6 @@
25#include "xpc.h" 25#include "xpc.h"
26 26
27/* 27/*
28 * Guarantee that the kzalloc'd memory is cacheline aligned.
29 */
30void *
31xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
32{
33 /* see if kzalloc will give us cachline aligned memory by default */
34 *base = kzalloc(size, flags);
35 if (*base == NULL)
36 return NULL;
37
38 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
39 return *base;
40
41 kfree(*base);
42
43 /* nope, we'll have to do it ourselves */
44 *base = kzalloc(size + L1_CACHE_BYTES, flags);
45 if (*base == NULL)
46 return NULL;
47
48 return (void *)L1_CACHE_ALIGN((u64)*base);
49}
50
51/*
52 * Allocate the local message queue and the notify queue.
53 */
54static enum xp_retval
55xpc_allocate_local_msgqueue(struct xpc_channel *ch)
56{
57 unsigned long irq_flags;
58 int nentries;
59 size_t nbytes;
60
61 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
62
63 nbytes = nentries * ch->msg_size;
64 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
65 GFP_KERNEL,
66 &ch->local_msgqueue_base);
67 if (ch->local_msgqueue == NULL)
68 continue;
69
70 nbytes = nentries * sizeof(struct xpc_notify);
71 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
72 if (ch->notify_queue == NULL) {
73 kfree(ch->local_msgqueue_base);
74 ch->local_msgqueue = NULL;
75 continue;
76 }
77
78 spin_lock_irqsave(&ch->lock, irq_flags);
79 if (nentries < ch->local_nentries) {
80 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
81 "partid=%d, channel=%d\n", nentries,
82 ch->local_nentries, ch->partid, ch->number);
83
84 ch->local_nentries = nentries;
85 }
86 spin_unlock_irqrestore(&ch->lock, irq_flags);
87 return xpSuccess;
88 }
89
90 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
91 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
92 return xpNoMemory;
93}
94
95/*
96 * Allocate the cached remote message queue.
97 */
98static enum xp_retval
99xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
100{
101 unsigned long irq_flags;
102 int nentries;
103 size_t nbytes;
104
105 DBUG_ON(ch->remote_nentries <= 0);
106
107 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
108
109 nbytes = nentries * ch->msg_size;
110 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
111 GFP_KERNEL,
112 &ch->remote_msgqueue_base);
113 if (ch->remote_msgqueue == NULL)
114 continue;
115
116 spin_lock_irqsave(&ch->lock, irq_flags);
117 if (nentries < ch->remote_nentries) {
118 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
119 "partid=%d, channel=%d\n", nentries,
120 ch->remote_nentries, ch->partid, ch->number);
121
122 ch->remote_nentries = nentries;
123 }
124 spin_unlock_irqrestore(&ch->lock, irq_flags);
125 return xpSuccess;
126 }
127
128 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
129 "partid=%d, channel=%d\n", ch->partid, ch->number);
130 return xpNoMemory;
131}
132
133/*
134 * Allocate message queues and other stuff associated with a channel.
135 *
136 * Note: Assumes all of the channel sizes are filled in.
137 */
138static enum xp_retval
139xpc_allocate_msgqueues(struct xpc_channel *ch)
140{
141 unsigned long irq_flags;
142 enum xp_retval ret;
143
144 DBUG_ON(ch->flags & XPC_C_SETUP);
145
146 ret = xpc_allocate_local_msgqueue(ch);
147 if (ret != xpSuccess)
148 return ret;
149
150 ret = xpc_allocate_remote_msgqueue(ch);
151 if (ret != xpSuccess) {
152 kfree(ch->local_msgqueue_base);
153 ch->local_msgqueue = NULL;
154 kfree(ch->notify_queue);
155 ch->notify_queue = NULL;
156 return ret;
157 }
158
159 spin_lock_irqsave(&ch->lock, irq_flags);
160 ch->flags |= XPC_C_SETUP;
161 spin_unlock_irqrestore(&ch->lock, irq_flags);
162
163 return xpSuccess;
164}
165
166/*
167 * Process a connect message from a remote partition. 28 * Process a connect message from a remote partition.
168 * 29 *
169 * Note: xpc_process_connect() is expecting to be called with the 30 * Note: xpc_process_connect() is expecting to be called with the
@@ -191,10 +52,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
191 if (ret != xpSuccess) 52 if (ret != xpSuccess)
192 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 53 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
193 54
55 ch->flags |= XPC_C_SETUP;
56
194 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) 57 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
195 return; 58 return;
196 59
197 DBUG_ON(!(ch->flags & XPC_C_SETUP));
198 DBUG_ON(ch->local_msgqueue == NULL); 60 DBUG_ON(ch->local_msgqueue == NULL);
199 DBUG_ON(ch->remote_msgqueue == NULL); 61 DBUG_ON(ch->remote_msgqueue == NULL);
200 } 62 }
@@ -220,55 +82,6 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
220} 82}
221 83
222/* 84/*
223 * Free up message queues and other stuff that were allocated for the specified
224 * channel.
225 *
226 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
227 * they're cleared when XPC_C_DISCONNECTED is cleared.
228 */
229static void
230xpc_free_msgqueues(struct xpc_channel *ch)
231{
232 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
233
234 DBUG_ON(!spin_is_locked(&ch->lock));
235 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
236
237 ch->remote_msgqueue_pa = 0;
238 ch->func = NULL;
239 ch->key = NULL;
240 ch->msg_size = 0;
241 ch->local_nentries = 0;
242 ch->remote_nentries = 0;
243 ch->kthreads_assigned_limit = 0;
244 ch->kthreads_idle_limit = 0;
245
246 ch_sn2->local_GP->get = 0;
247 ch_sn2->local_GP->put = 0;
248 ch_sn2->remote_GP.get = 0;
249 ch_sn2->remote_GP.put = 0;
250 ch_sn2->w_local_GP.get = 0;
251 ch_sn2->w_local_GP.put = 0;
252 ch_sn2->w_remote_GP.get = 0;
253 ch_sn2->w_remote_GP.put = 0;
254 ch_sn2->next_msg_to_pull = 0;
255
256 if (ch->flags & XPC_C_SETUP) {
257 ch->flags &= ~XPC_C_SETUP;
258
259 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
260 ch->flags, ch->partid, ch->number);
261
262 kfree(ch->local_msgqueue_base);
263 ch->local_msgqueue = NULL;
264 kfree(ch->remote_msgqueue_base);
265 ch->remote_msgqueue = NULL;
266 kfree(ch->notify_queue);
267 ch->notify_queue = NULL;
268 }
269}
270
271/*
272 * spin_lock_irqsave() is expected to be held on entry. 85 * spin_lock_irqsave() is expected to be held on entry.
273 */ 86 */
274static void 87static void
@@ -331,7 +144,11 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
331 /* it's now safe to free the channel's message queues */ 144 /* it's now safe to free the channel's message queues */
332 xpc_free_msgqueues(ch); 145 xpc_free_msgqueues(ch);
333 146
334 /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */ 147 /*
148 * Mark the channel disconnected and clear all other flags, including
149 * XPC_C_SETUP (because of call to xpc_free_msgqueues()) but not
150 * including XPC_C_WDISCONNECT (if it was set).
151 */
335 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); 152 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
336 153
337 atomic_dec(&part->nchannels_active); 154 atomic_dec(&part->nchannels_active);