diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:49 -0400 |
commit | e17d416b1bc947df68499863f13b401fb42b48f6 (patch) | |
tree | d0c766c93dce9acb27948022b1613347981fd9b3 /drivers/misc/sgi-xp/xpc_channel.c | |
parent | 94bd2708d4a95d7da5a1c7c28a063eccd127fb69 (diff) |
sgi-xp: isolate xpc_vars_part structure to sn2 only
Isolate the xpc_vars_part structure of XPC's reserved page to sn2 only.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_channel.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 538 |
1 files changed, 1 insertions, 537 deletions
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 9e79ad7eafe..8081e8155df 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -27,7 +27,7 @@ | |||
27 | /* | 27 | /* |
28 | * Guarantee that the kzalloc'd memory is cacheline aligned. | 28 | * Guarantee that the kzalloc'd memory is cacheline aligned. |
29 | */ | 29 | */ |
30 | static void * | 30 | void * |
31 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | 31 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) |
32 | { | 32 | { |
33 | /* see if kzalloc will give us cachline aligned memory by default */ | 33 | /* see if kzalloc will give us cachline aligned memory by default */ |
@@ -49,382 +49,6 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * Set up the initial values for the XPartition Communication channels. | ||
53 | */ | ||
54 | static void | ||
55 | xpc_initialize_channels(struct xpc_partition *part, short partid) | ||
56 | { | ||
57 | int ch_number; | ||
58 | struct xpc_channel *ch; | ||
59 | |||
60 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
61 | ch = &part->channels[ch_number]; | ||
62 | |||
63 | ch->partid = partid; | ||
64 | ch->number = ch_number; | ||
65 | ch->flags = XPC_C_DISCONNECTED; | ||
66 | |||
67 | ch->local_GP = &part->local_GPs[ch_number]; | ||
68 | ch->local_openclose_args = | ||
69 | &part->local_openclose_args[ch_number]; | ||
70 | |||
71 | atomic_set(&ch->kthreads_assigned, 0); | ||
72 | atomic_set(&ch->kthreads_idle, 0); | ||
73 | atomic_set(&ch->kthreads_active, 0); | ||
74 | |||
75 | atomic_set(&ch->references, 0); | ||
76 | atomic_set(&ch->n_to_notify, 0); | ||
77 | |||
78 | spin_lock_init(&ch->lock); | ||
79 | mutex_init(&ch->msg_to_pull_mutex); | ||
80 | init_completion(&ch->wdisconnect_wait); | ||
81 | |||
82 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | ||
83 | init_waitqueue_head(&ch->msg_allocate_wq); | ||
84 | init_waitqueue_head(&ch->idle_wq); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Setup the infrastructure necessary to support XPartition Communication | ||
90 | * between the specified remote partition and the local one. | ||
91 | */ | ||
92 | enum xp_retval | ||
93 | xpc_setup_infrastructure(struct xpc_partition *part) | ||
94 | { | ||
95 | int ret, cpuid; | ||
96 | struct timer_list *timer; | ||
97 | short partid = XPC_PARTID(part); | ||
98 | |||
99 | /* | ||
100 | * Zero out MOST of the entry for this partition. Only the fields | ||
101 | * starting with `nchannels' will be zeroed. The preceding fields must | ||
102 | * remain `viable' across partition ups and downs, since they may be | ||
103 | * referenced during this memset() operation. | ||
104 | */ | ||
105 | memset(&part->nchannels, 0, sizeof(struct xpc_partition) - | ||
106 | offsetof(struct xpc_partition, nchannels)); | ||
107 | |||
108 | /* | ||
109 | * Allocate all of the channel structures as a contiguous chunk of | ||
110 | * memory. | ||
111 | */ | ||
112 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, | ||
113 | GFP_KERNEL); | ||
114 | if (part->channels == NULL) { | ||
115 | dev_err(xpc_chan, "can't get memory for channels\n"); | ||
116 | return xpNoMemory; | ||
117 | } | ||
118 | |||
119 | part->nchannels = XPC_MAX_NCHANNELS; | ||
120 | |||
121 | /* allocate all the required GET/PUT values */ | ||
122 | |||
123 | part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | ||
124 | GFP_KERNEL, | ||
125 | &part->local_GPs_base); | ||
126 | if (part->local_GPs == NULL) { | ||
127 | kfree(part->channels); | ||
128 | part->channels = NULL; | ||
129 | dev_err(xpc_chan, "can't get memory for local get/put " | ||
130 | "values\n"); | ||
131 | return xpNoMemory; | ||
132 | } | ||
133 | |||
134 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | ||
135 | GFP_KERNEL, | ||
136 | &part-> | ||
137 | remote_GPs_base); | ||
138 | if (part->remote_GPs == NULL) { | ||
139 | dev_err(xpc_chan, "can't get memory for remote get/put " | ||
140 | "values\n"); | ||
141 | kfree(part->local_GPs_base); | ||
142 | part->local_GPs = NULL; | ||
143 | kfree(part->channels); | ||
144 | part->channels = NULL; | ||
145 | return xpNoMemory; | ||
146 | } | ||
147 | |||
148 | /* allocate all the required open and close args */ | ||
149 | |||
150 | part->local_openclose_args = | ||
151 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
152 | &part->local_openclose_args_base); | ||
153 | if (part->local_openclose_args == NULL) { | ||
154 | dev_err(xpc_chan, "can't get memory for local connect args\n"); | ||
155 | kfree(part->remote_GPs_base); | ||
156 | part->remote_GPs = NULL; | ||
157 | kfree(part->local_GPs_base); | ||
158 | part->local_GPs = NULL; | ||
159 | kfree(part->channels); | ||
160 | part->channels = NULL; | ||
161 | return xpNoMemory; | ||
162 | } | ||
163 | |||
164 | part->remote_openclose_args = | ||
165 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
166 | &part->remote_openclose_args_base); | ||
167 | if (part->remote_openclose_args == NULL) { | ||
168 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | ||
169 | kfree(part->local_openclose_args_base); | ||
170 | part->local_openclose_args = NULL; | ||
171 | kfree(part->remote_GPs_base); | ||
172 | part->remote_GPs = NULL; | ||
173 | kfree(part->local_GPs_base); | ||
174 | part->local_GPs = NULL; | ||
175 | kfree(part->channels); | ||
176 | part->channels = NULL; | ||
177 | return xpNoMemory; | ||
178 | } | ||
179 | |||
180 | xpc_initialize_channels(part, partid); | ||
181 | |||
182 | atomic_set(&part->nchannels_active, 0); | ||
183 | atomic_set(&part->nchannels_engaged, 0); | ||
184 | |||
185 | /* local_IPI_amo were set to 0 by an earlier memset() */ | ||
186 | |||
187 | /* Initialize this partitions AMO_t structure */ | ||
188 | part->local_IPI_amo_va = xpc_IPI_init(partid); | ||
189 | |||
190 | spin_lock_init(&part->IPI_lock); | ||
191 | |||
192 | atomic_set(&part->channel_mgr_requests, 1); | ||
193 | init_waitqueue_head(&part->channel_mgr_wq); | ||
194 | |||
195 | sprintf(part->IPI_owner, "xpc%02d", partid); | ||
196 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, | ||
197 | part->IPI_owner, (void *)(u64)partid); | ||
198 | if (ret != 0) { | ||
199 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " | ||
200 | "errno=%d\n", -ret); | ||
201 | kfree(part->remote_openclose_args_base); | ||
202 | part->remote_openclose_args = NULL; | ||
203 | kfree(part->local_openclose_args_base); | ||
204 | part->local_openclose_args = NULL; | ||
205 | kfree(part->remote_GPs_base); | ||
206 | part->remote_GPs = NULL; | ||
207 | kfree(part->local_GPs_base); | ||
208 | part->local_GPs = NULL; | ||
209 | kfree(part->channels); | ||
210 | part->channels = NULL; | ||
211 | return xpLackOfResources; | ||
212 | } | ||
213 | |||
214 | /* Setup a timer to check for dropped IPIs */ | ||
215 | timer = &part->dropped_IPI_timer; | ||
216 | init_timer(timer); | ||
217 | timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check; | ||
218 | timer->data = (unsigned long)part; | ||
219 | timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; | ||
220 | add_timer(timer); | ||
221 | |||
222 | /* | ||
223 | * With the setting of the partition setup_state to XPC_P_SETUP, we're | ||
224 | * declaring that this partition is ready to go. | ||
225 | */ | ||
226 | part->setup_state = XPC_P_SETUP; | ||
227 | |||
228 | /* | ||
229 | * Setup the per partition specific variables required by the | ||
230 | * remote partition to establish channel connections with us. | ||
231 | * | ||
232 | * The setting of the magic # indicates that these per partition | ||
233 | * specific variables are ready to be used. | ||
234 | */ | ||
235 | xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); | ||
236 | xpc_vars_part[partid].openclose_args_pa = | ||
237 | __pa(part->local_openclose_args); | ||
238 | xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); | ||
239 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ | ||
240 | xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); | ||
241 | xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); | ||
242 | xpc_vars_part[partid].nchannels = part->nchannels; | ||
243 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | ||
244 | |||
245 | return xpSuccess; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline | ||
250 | * (or multiple cachelines) from a remote partition. | ||
251 | * | ||
252 | * src must be a cacheline aligned physical address on the remote partition. | ||
253 | * dst must be a cacheline aligned virtual address on this partition. | ||
254 | * cnt must be cacheline sized | ||
255 | */ | ||
256 | static enum xp_retval | ||
257 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | ||
258 | const void *src, size_t cnt) | ||
259 | { | ||
260 | enum xp_retval ret; | ||
261 | |||
262 | DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); | ||
263 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); | ||
264 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); | ||
265 | |||
266 | if (part->act_state == XPC_P_DEACTIVATING) | ||
267 | return part->reason; | ||
268 | |||
269 | ret = xp_remote_memcpy(dst, src, cnt); | ||
270 | if (ret != xpSuccess) { | ||
271 | dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," | ||
272 | " ret=%d\n", XPC_PARTID(part), ret); | ||
273 | } | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Pull the remote per partition specific variables from the specified | ||
279 | * partition. | ||
280 | */ | ||
281 | enum xp_retval | ||
282 | xpc_pull_remote_vars_part(struct xpc_partition *part) | ||
283 | { | ||
284 | u8 buffer[L1_CACHE_BYTES * 2]; | ||
285 | struct xpc_vars_part *pulled_entry_cacheline = | ||
286 | (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer); | ||
287 | struct xpc_vars_part *pulled_entry; | ||
288 | u64 remote_entry_cacheline_pa, remote_entry_pa; | ||
289 | short partid = XPC_PARTID(part); | ||
290 | enum xp_retval ret; | ||
291 | |||
292 | /* pull the cacheline that contains the variables we're interested in */ | ||
293 | |||
294 | DBUG_ON(part->remote_vars_part_pa != | ||
295 | L1_CACHE_ALIGN(part->remote_vars_part_pa)); | ||
296 | DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); | ||
297 | |||
298 | remote_entry_pa = part->remote_vars_part_pa + | ||
299 | sn_partition_id * sizeof(struct xpc_vars_part); | ||
300 | |||
301 | remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); | ||
302 | |||
303 | pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline + | ||
304 | (remote_entry_pa & | ||
305 | (L1_CACHE_BYTES - 1))); | ||
306 | |||
307 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, | ||
308 | (void *)remote_entry_cacheline_pa, | ||
309 | L1_CACHE_BYTES); | ||
310 | if (ret != xpSuccess) { | ||
311 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | ||
312 | "partition %d, ret=%d\n", partid, ret); | ||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | /* see if they've been set up yet */ | ||
317 | |||
318 | if (pulled_entry->magic != XPC_VP_MAGIC1 && | ||
319 | pulled_entry->magic != XPC_VP_MAGIC2) { | ||
320 | |||
321 | if (pulled_entry->magic != 0) { | ||
322 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " | ||
323 | "partition %d has bad magic value (=0x%lx)\n", | ||
324 | partid, sn_partition_id, pulled_entry->magic); | ||
325 | return xpBadMagic; | ||
326 | } | ||
327 | |||
328 | /* they've not been initialized yet */ | ||
329 | return xpRetry; | ||
330 | } | ||
331 | |||
332 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { | ||
333 | |||
334 | /* validate the variables */ | ||
335 | |||
336 | if (pulled_entry->GPs_pa == 0 || | ||
337 | pulled_entry->openclose_args_pa == 0 || | ||
338 | pulled_entry->IPI_amo_pa == 0) { | ||
339 | |||
340 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | ||
341 | "partition %d are not valid\n", partid, | ||
342 | sn_partition_id); | ||
343 | return xpInvalidAddress; | ||
344 | } | ||
345 | |||
346 | /* the variables we imported look to be valid */ | ||
347 | |||
348 | part->remote_GPs_pa = pulled_entry->GPs_pa; | ||
349 | part->remote_openclose_args_pa = | ||
350 | pulled_entry->openclose_args_pa; | ||
351 | part->remote_IPI_amo_va = | ||
352 | (AMO_t *)__va(pulled_entry->IPI_amo_pa); | ||
353 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; | ||
354 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; | ||
355 | |||
356 | if (part->nchannels > pulled_entry->nchannels) | ||
357 | part->nchannels = pulled_entry->nchannels; | ||
358 | |||
359 | /* let the other side know that we've pulled their variables */ | ||
360 | |||
361 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; | ||
362 | } | ||
363 | |||
364 | if (pulled_entry->magic == XPC_VP_MAGIC1) | ||
365 | return xpRetry; | ||
366 | |||
367 | return xpSuccess; | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Get the IPI flags and pull the openclose args and/or remote GPs as needed. | ||
372 | */ | ||
373 | static u64 | ||
374 | xpc_get_IPI_flags(struct xpc_partition *part) | ||
375 | { | ||
376 | unsigned long irq_flags; | ||
377 | u64 IPI_amo; | ||
378 | enum xp_retval ret; | ||
379 | |||
380 | /* | ||
381 | * See if there are any IPI flags to be handled. | ||
382 | */ | ||
383 | |||
384 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | ||
385 | IPI_amo = part->local_IPI_amo; | ||
386 | if (IPI_amo != 0) | ||
387 | part->local_IPI_amo = 0; | ||
388 | |||
389 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | ||
390 | |||
391 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { | ||
392 | ret = xpc_pull_remote_cachelines(part, | ||
393 | part->remote_openclose_args, | ||
394 | (void *)part-> | ||
395 | remote_openclose_args_pa, | ||
396 | XPC_OPENCLOSE_ARGS_SIZE); | ||
397 | if (ret != xpSuccess) { | ||
398 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
399 | |||
400 | dev_dbg(xpc_chan, "failed to pull openclose args from " | ||
401 | "partition %d, ret=%d\n", XPC_PARTID(part), | ||
402 | ret); | ||
403 | |||
404 | /* don't bother processing IPIs anymore */ | ||
405 | IPI_amo = 0; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { | ||
410 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, | ||
411 | (void *)part->remote_GPs_pa, | ||
412 | XPC_GP_SIZE); | ||
413 | if (ret != xpSuccess) { | ||
414 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
415 | |||
416 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | ||
417 | "%d, ret=%d\n", XPC_PARTID(part), ret); | ||
418 | |||
419 | /* don't bother processing IPIs anymore */ | ||
420 | IPI_amo = 0; | ||
421 | } | ||
422 | } | ||
423 | |||
424 | return IPI_amo; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Allocate the local message queue and the notify queue. | 52 | * Allocate the local message queue and the notify queue. |
429 | */ | 53 | */ |
430 | static enum xp_retval | 54 | static enum xp_retval |
@@ -1365,59 +989,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) | |||
1365 | } | 989 | } |
1366 | 990 | ||
1367 | /* | 991 | /* |
1368 | * Teardown the infrastructure necessary to support XPartition Communication | ||
1369 | * between the specified remote partition and the local one. | ||
1370 | */ | ||
1371 | void | ||
1372 | xpc_teardown_infrastructure(struct xpc_partition *part) | ||
1373 | { | ||
1374 | short partid = XPC_PARTID(part); | ||
1375 | |||
1376 | /* | ||
1377 | * We start off by making this partition inaccessible to local | ||
1378 | * processes by marking it as no longer setup. Then we make it | ||
1379 | * inaccessible to remote processes by clearing the XPC per partition | ||
1380 | * specific variable's magic # (which indicates that these variables | ||
1381 | * are no longer valid) and by ignoring all XPC notify IPIs sent to | ||
1382 | * this partition. | ||
1383 | */ | ||
1384 | |||
1385 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | ||
1386 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | ||
1387 | DBUG_ON(part->setup_state != XPC_P_SETUP); | ||
1388 | part->setup_state = XPC_P_WTEARDOWN; | ||
1389 | |||
1390 | xpc_vars_part[partid].magic = 0; | ||
1391 | |||
1392 | free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); | ||
1393 | |||
1394 | /* | ||
1395 | * Before proceeding with the teardown we have to wait until all | ||
1396 | * existing references cease. | ||
1397 | */ | ||
1398 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | ||
1399 | |||
1400 | /* now we can begin tearing down the infrastructure */ | ||
1401 | |||
1402 | part->setup_state = XPC_P_TORNDOWN; | ||
1403 | |||
1404 | /* in case we've still got outstanding timers registered... */ | ||
1405 | del_timer_sync(&part->dropped_IPI_timer); | ||
1406 | |||
1407 | kfree(part->remote_openclose_args_base); | ||
1408 | part->remote_openclose_args = NULL; | ||
1409 | kfree(part->local_openclose_args_base); | ||
1410 | part->local_openclose_args = NULL; | ||
1411 | kfree(part->remote_GPs_base); | ||
1412 | part->remote_GPs = NULL; | ||
1413 | kfree(part->local_GPs_base); | ||
1414 | part->local_GPs = NULL; | ||
1415 | kfree(part->channels); | ||
1416 | part->channels = NULL; | ||
1417 | part->local_IPI_amo_va = NULL; | ||
1418 | } | ||
1419 | |||
1420 | /* | ||
1421 | * Called by XP at the time of channel connection registration to cause | 992 | * Called by XP at the time of channel connection registration to cause |
1422 | * XPC to establish connections to all currently active partitions. | 993 | * XPC to establish connections to all currently active partitions. |
1423 | */ | 994 | */ |
@@ -1974,113 +1545,6 @@ xpc_initiate_send_notify(short partid, int ch_number, void *payload, | |||
1974 | return ret; | 1545 | return ret; |
1975 | } | 1546 | } |
1976 | 1547 | ||
1977 | static struct xpc_msg * | ||
1978 | xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | ||
1979 | { | ||
1980 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
1981 | struct xpc_msg *remote_msg, *msg; | ||
1982 | u32 msg_index, nmsgs; | ||
1983 | u64 msg_offset; | ||
1984 | enum xp_retval ret; | ||
1985 | |||
1986 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { | ||
1987 | /* we were interrupted by a signal */ | ||
1988 | return NULL; | ||
1989 | } | ||
1990 | |||
1991 | while (get >= ch->next_msg_to_pull) { | ||
1992 | |||
1993 | /* pull as many messages as are ready and able to be pulled */ | ||
1994 | |||
1995 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; | ||
1996 | |||
1997 | DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); | ||
1998 | nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; | ||
1999 | if (msg_index + nmsgs > ch->remote_nentries) { | ||
2000 | /* ignore the ones that wrap the msg queue for now */ | ||
2001 | nmsgs = ch->remote_nentries - msg_index; | ||
2002 | } | ||
2003 | |||
2004 | msg_offset = msg_index * ch->msg_size; | ||
2005 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | ||
2006 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + | ||
2007 | msg_offset); | ||
2008 | |||
2009 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, | ||
2010 | nmsgs * ch->msg_size); | ||
2011 | if (ret != xpSuccess) { | ||
2012 | |||
2013 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | ||
2014 | " msg %ld from partition %d, channel=%d, " | ||
2015 | "ret=%d\n", nmsgs, ch->next_msg_to_pull, | ||
2016 | ch->partid, ch->number, ret); | ||
2017 | |||
2018 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
2019 | |||
2020 | mutex_unlock(&ch->msg_to_pull_mutex); | ||
2021 | return NULL; | ||
2022 | } | ||
2023 | |||
2024 | ch->next_msg_to_pull += nmsgs; | ||
2025 | } | ||
2026 | |||
2027 | mutex_unlock(&ch->msg_to_pull_mutex); | ||
2028 | |||
2029 | /* return the message we were looking for */ | ||
2030 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; | ||
2031 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | ||
2032 | |||
2033 | return msg; | ||
2034 | } | ||
2035 | |||
2036 | /* | ||
2037 | * Get a message to be delivered. | ||
2038 | */ | ||
2039 | static struct xpc_msg * | ||
2040 | xpc_get_deliverable_msg(struct xpc_channel *ch) | ||
2041 | { | ||
2042 | struct xpc_msg *msg = NULL; | ||
2043 | s64 get; | ||
2044 | |||
2045 | do { | ||
2046 | if (ch->flags & XPC_C_DISCONNECTING) | ||
2047 | break; | ||
2048 | |||
2049 | get = ch->w_local_GP.get; | ||
2050 | rmb(); /* guarantee that .get loads before .put */ | ||
2051 | if (get == ch->w_remote_GP.put) | ||
2052 | break; | ||
2053 | |||
2054 | /* There are messages waiting to be pulled and delivered. | ||
2055 | * We need to try to secure one for ourselves. We'll do this | ||
2056 | * by trying to increment w_local_GP.get and hope that no one | ||
2057 | * else beats us to it. If they do, we'll we'll simply have | ||
2058 | * to try again for the next one. | ||
2059 | */ | ||
2060 | |||
2061 | if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { | ||
2062 | /* we got the entry referenced by get */ | ||
2063 | |||
2064 | dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " | ||
2065 | "partid=%d, channel=%d\n", get + 1, | ||
2066 | ch->partid, ch->number); | ||
2067 | |||
2068 | /* pull the message from the remote partition */ | ||
2069 | |||
2070 | msg = xpc_pull_remote_msg(ch, get); | ||
2071 | |||
2072 | DBUG_ON(msg != NULL && msg->number != get); | ||
2073 | DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); | ||
2074 | DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); | ||
2075 | |||
2076 | break; | ||
2077 | } | ||
2078 | |||
2079 | } while (1); | ||
2080 | |||
2081 | return msg; | ||
2082 | } | ||
2083 | |||
2084 | /* | 1548 | /* |
2085 | * Deliver a message to its intended recipient. | 1549 | * Deliver a message to its intended recipient. |
2086 | */ | 1550 | */ |