diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:49 -0400 |
commit | e17d416b1bc947df68499863f13b401fb42b48f6 (patch) | |
tree | d0c766c93dce9acb27948022b1613347981fd9b3 | |
parent | 94bd2708d4a95d7da5a1c7c28a063eccd127fb69 (diff) |
sgi-xp: isolate xpc_vars_part structure to sn2 only
Isolate the xpc_vars_part structure of XPC's reserved page to sn2 only.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 26 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 538 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 97 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_partition.c | 1 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_sn2.c | 563 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 48 |
6 files changed, 644 insertions, 629 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 94b52bb8151e..e8c2a1629606 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -227,9 +227,9 @@ xpc_disallow_hb(short partid, struct xpc_vars *vars) | |||
227 | * itself from that partition. It is desirable that the size of this structure | 227 | * itself from that partition. It is desirable that the size of this structure |
228 | * evenly divides into a 128-byte cacheline, such that none of the entries in | 228 | * evenly divides into a 128-byte cacheline, such that none of the entries in |
229 | * this array crosses a 128-byte cacheline boundary. As it is now, each entry | 229 | * this array crosses a 128-byte cacheline boundary. As it is now, each entry |
230 | * occupies a 64-byte cacheline. | 230 | * occupies 64-bytes. |
231 | */ | 231 | */ |
232 | struct xpc_vars_part { | 232 | struct xpc_vars_part_sn2 { |
233 | u64 magic; | 233 | u64 magic; |
234 | 234 | ||
235 | u64 openclose_args_pa; /* physical address of open and close args */ | 235 | u64 openclose_args_pa; /* physical address of open and close args */ |
@@ -265,8 +265,6 @@ struct xpc_vars_part { | |||
265 | #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) | 265 | #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) |
266 | #define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ | 266 | #define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ |
267 | xp_nasid_mask_words)) | 267 | xp_nasid_mask_words)) |
268 | #define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \ | ||
269 | ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)) | ||
270 | 268 | ||
271 | /* | 269 | /* |
272 | * Functions registered by add_timer() or called by kernel_thread() only | 270 | * Functions registered by add_timer() or called by kernel_thread() only |
@@ -541,13 +539,6 @@ struct xpc_partition { | |||
541 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ | 539 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ |
542 | atomic_t references; /* #of references to infrastructure */ | 540 | atomic_t references; /* #of references to infrastructure */ |
543 | 541 | ||
544 | /* | ||
545 | * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN | ||
546 | * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION | ||
547 | * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE | ||
548 | * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) | ||
549 | */ | ||
550 | |||
551 | u8 nchannels; /* #of defined channels supported */ | 542 | u8 nchannels; /* #of defined channels supported */ |
552 | atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ | 543 | atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ |
553 | atomic_t nchannels_engaged; /* #of channels engaged with remote part */ | 544 | atomic_t nchannels_engaged; /* #of channels engaged with remote part */ |
@@ -613,7 +604,7 @@ struct xpc_partition { | |||
613 | * dropped IPIs. These occur whenever an IPI amo write doesn't complete until | 604 | * dropped IPIs. These occur whenever an IPI amo write doesn't complete until |
614 | * after the IPI was received. | 605 | * after the IPI was received. |
615 | */ | 606 | */ |
616 | #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) | 607 | #define XPC_P_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ) |
617 | 608 | ||
618 | /* number of seconds to wait for other partitions to disengage */ | 609 | /* number of seconds to wait for other partitions to disengage */ |
619 | #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 | 610 | #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 |
@@ -637,13 +628,16 @@ extern void xpc_activate_partition(struct xpc_partition *); | |||
637 | extern void xpc_activate_kthreads(struct xpc_channel *, int); | 628 | extern void xpc_activate_kthreads(struct xpc_channel *, int); |
638 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); | 629 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); |
639 | extern void xpc_disconnect_wait(int); | 630 | extern void xpc_disconnect_wait(int); |
640 | |||
641 | extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); | 631 | extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); |
632 | extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); | ||
633 | extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *); | ||
634 | extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); | ||
635 | extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *); | ||
636 | extern void (*xpc_teardown_infrastructure) (struct xpc_partition *); | ||
642 | 637 | ||
643 | /* found in xpc_sn2.c */ | 638 | /* found in xpc_sn2.c */ |
644 | extern void xpc_init_sn2(void); | 639 | extern void xpc_init_sn2(void); |
645 | extern struct xpc_vars *xpc_vars; /*>>> eliminate from here */ | 640 | extern struct xpc_vars *xpc_vars; /*>>> eliminate from here */ |
646 | extern struct xpc_vars_part *xpc_vars_part; /*>>> eliminate from here */ | ||
647 | 641 | ||
648 | /* found in xpc_uv.c */ | 642 | /* found in xpc_uv.c */ |
649 | extern void xpc_init_uv(void); | 643 | extern void xpc_init_uv(void); |
@@ -670,6 +664,7 @@ extern void xpc_deactivate_partition(const int, struct xpc_partition *, | |||
670 | extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); | 664 | extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); |
671 | 665 | ||
672 | /* found in xpc_channel.c */ | 666 | /* found in xpc_channel.c */ |
667 | extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); | ||
673 | extern void xpc_initiate_connect(int); | 668 | extern void xpc_initiate_connect(int); |
674 | extern void xpc_initiate_disconnect(int); | 669 | extern void xpc_initiate_disconnect(int); |
675 | extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); | 670 | extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); |
@@ -677,8 +672,6 @@ extern enum xp_retval xpc_initiate_send(short, int, void *); | |||
677 | extern enum xp_retval xpc_initiate_send_notify(short, int, void *, | 672 | extern enum xp_retval xpc_initiate_send_notify(short, int, void *, |
678 | xpc_notify_func, void *); | 673 | xpc_notify_func, void *); |
679 | extern void xpc_initiate_received(short, int, void *); | 674 | extern void xpc_initiate_received(short, int, void *); |
680 | extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *); | ||
681 | extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *); | ||
682 | extern void xpc_process_channel_activity(struct xpc_partition *); | 675 | extern void xpc_process_channel_activity(struct xpc_partition *); |
683 | extern void xpc_connected_callout(struct xpc_channel *); | 676 | extern void xpc_connected_callout(struct xpc_channel *); |
684 | extern void xpc_deliver_msg(struct xpc_channel *); | 677 | extern void xpc_deliver_msg(struct xpc_channel *); |
@@ -686,7 +679,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *, | |||
686 | enum xp_retval, unsigned long *); | 679 | enum xp_retval, unsigned long *); |
687 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); | 680 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); |
688 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); | 681 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); |
689 | extern void xpc_teardown_infrastructure(struct xpc_partition *); | ||
690 | 682 | ||
691 | static inline void | 683 | static inline void |
692 | xpc_wakeup_channel_mgr(struct xpc_partition *part) | 684 | xpc_wakeup_channel_mgr(struct xpc_partition *part) |
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 9e79ad7eafe5..8081e8155dff 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -27,7 +27,7 @@ | |||
27 | /* | 27 | /* |
28 | * Guarantee that the kzalloc'd memory is cacheline aligned. | 28 | * Guarantee that the kzalloc'd memory is cacheline aligned. |
29 | */ | 29 | */ |
30 | static void * | 30 | void * |
31 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | 31 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) |
32 | { | 32 | { |
33 | /* see if kzalloc will give us cachline aligned memory by default */ | 33 | /* see if kzalloc will give us cachline aligned memory by default */ |
@@ -49,382 +49,6 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * Set up the initial values for the XPartition Communication channels. | ||
53 | */ | ||
54 | static void | ||
55 | xpc_initialize_channels(struct xpc_partition *part, short partid) | ||
56 | { | ||
57 | int ch_number; | ||
58 | struct xpc_channel *ch; | ||
59 | |||
60 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
61 | ch = &part->channels[ch_number]; | ||
62 | |||
63 | ch->partid = partid; | ||
64 | ch->number = ch_number; | ||
65 | ch->flags = XPC_C_DISCONNECTED; | ||
66 | |||
67 | ch->local_GP = &part->local_GPs[ch_number]; | ||
68 | ch->local_openclose_args = | ||
69 | &part->local_openclose_args[ch_number]; | ||
70 | |||
71 | atomic_set(&ch->kthreads_assigned, 0); | ||
72 | atomic_set(&ch->kthreads_idle, 0); | ||
73 | atomic_set(&ch->kthreads_active, 0); | ||
74 | |||
75 | atomic_set(&ch->references, 0); | ||
76 | atomic_set(&ch->n_to_notify, 0); | ||
77 | |||
78 | spin_lock_init(&ch->lock); | ||
79 | mutex_init(&ch->msg_to_pull_mutex); | ||
80 | init_completion(&ch->wdisconnect_wait); | ||
81 | |||
82 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | ||
83 | init_waitqueue_head(&ch->msg_allocate_wq); | ||
84 | init_waitqueue_head(&ch->idle_wq); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Setup the infrastructure necessary to support XPartition Communication | ||
90 | * between the specified remote partition and the local one. | ||
91 | */ | ||
92 | enum xp_retval | ||
93 | xpc_setup_infrastructure(struct xpc_partition *part) | ||
94 | { | ||
95 | int ret, cpuid; | ||
96 | struct timer_list *timer; | ||
97 | short partid = XPC_PARTID(part); | ||
98 | |||
99 | /* | ||
100 | * Zero out MOST of the entry for this partition. Only the fields | ||
101 | * starting with `nchannels' will be zeroed. The preceding fields must | ||
102 | * remain `viable' across partition ups and downs, since they may be | ||
103 | * referenced during this memset() operation. | ||
104 | */ | ||
105 | memset(&part->nchannels, 0, sizeof(struct xpc_partition) - | ||
106 | offsetof(struct xpc_partition, nchannels)); | ||
107 | |||
108 | /* | ||
109 | * Allocate all of the channel structures as a contiguous chunk of | ||
110 | * memory. | ||
111 | */ | ||
112 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, | ||
113 | GFP_KERNEL); | ||
114 | if (part->channels == NULL) { | ||
115 | dev_err(xpc_chan, "can't get memory for channels\n"); | ||
116 | return xpNoMemory; | ||
117 | } | ||
118 | |||
119 | part->nchannels = XPC_MAX_NCHANNELS; | ||
120 | |||
121 | /* allocate all the required GET/PUT values */ | ||
122 | |||
123 | part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | ||
124 | GFP_KERNEL, | ||
125 | &part->local_GPs_base); | ||
126 | if (part->local_GPs == NULL) { | ||
127 | kfree(part->channels); | ||
128 | part->channels = NULL; | ||
129 | dev_err(xpc_chan, "can't get memory for local get/put " | ||
130 | "values\n"); | ||
131 | return xpNoMemory; | ||
132 | } | ||
133 | |||
134 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | ||
135 | GFP_KERNEL, | ||
136 | &part-> | ||
137 | remote_GPs_base); | ||
138 | if (part->remote_GPs == NULL) { | ||
139 | dev_err(xpc_chan, "can't get memory for remote get/put " | ||
140 | "values\n"); | ||
141 | kfree(part->local_GPs_base); | ||
142 | part->local_GPs = NULL; | ||
143 | kfree(part->channels); | ||
144 | part->channels = NULL; | ||
145 | return xpNoMemory; | ||
146 | } | ||
147 | |||
148 | /* allocate all the required open and close args */ | ||
149 | |||
150 | part->local_openclose_args = | ||
151 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
152 | &part->local_openclose_args_base); | ||
153 | if (part->local_openclose_args == NULL) { | ||
154 | dev_err(xpc_chan, "can't get memory for local connect args\n"); | ||
155 | kfree(part->remote_GPs_base); | ||
156 | part->remote_GPs = NULL; | ||
157 | kfree(part->local_GPs_base); | ||
158 | part->local_GPs = NULL; | ||
159 | kfree(part->channels); | ||
160 | part->channels = NULL; | ||
161 | return xpNoMemory; | ||
162 | } | ||
163 | |||
164 | part->remote_openclose_args = | ||
165 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
166 | &part->remote_openclose_args_base); | ||
167 | if (part->remote_openclose_args == NULL) { | ||
168 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | ||
169 | kfree(part->local_openclose_args_base); | ||
170 | part->local_openclose_args = NULL; | ||
171 | kfree(part->remote_GPs_base); | ||
172 | part->remote_GPs = NULL; | ||
173 | kfree(part->local_GPs_base); | ||
174 | part->local_GPs = NULL; | ||
175 | kfree(part->channels); | ||
176 | part->channels = NULL; | ||
177 | return xpNoMemory; | ||
178 | } | ||
179 | |||
180 | xpc_initialize_channels(part, partid); | ||
181 | |||
182 | atomic_set(&part->nchannels_active, 0); | ||
183 | atomic_set(&part->nchannels_engaged, 0); | ||
184 | |||
185 | /* local_IPI_amo were set to 0 by an earlier memset() */ | ||
186 | |||
187 | /* Initialize this partitions AMO_t structure */ | ||
188 | part->local_IPI_amo_va = xpc_IPI_init(partid); | ||
189 | |||
190 | spin_lock_init(&part->IPI_lock); | ||
191 | |||
192 | atomic_set(&part->channel_mgr_requests, 1); | ||
193 | init_waitqueue_head(&part->channel_mgr_wq); | ||
194 | |||
195 | sprintf(part->IPI_owner, "xpc%02d", partid); | ||
196 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, | ||
197 | part->IPI_owner, (void *)(u64)partid); | ||
198 | if (ret != 0) { | ||
199 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " | ||
200 | "errno=%d\n", -ret); | ||
201 | kfree(part->remote_openclose_args_base); | ||
202 | part->remote_openclose_args = NULL; | ||
203 | kfree(part->local_openclose_args_base); | ||
204 | part->local_openclose_args = NULL; | ||
205 | kfree(part->remote_GPs_base); | ||
206 | part->remote_GPs = NULL; | ||
207 | kfree(part->local_GPs_base); | ||
208 | part->local_GPs = NULL; | ||
209 | kfree(part->channels); | ||
210 | part->channels = NULL; | ||
211 | return xpLackOfResources; | ||
212 | } | ||
213 | |||
214 | /* Setup a timer to check for dropped IPIs */ | ||
215 | timer = &part->dropped_IPI_timer; | ||
216 | init_timer(timer); | ||
217 | timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check; | ||
218 | timer->data = (unsigned long)part; | ||
219 | timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; | ||
220 | add_timer(timer); | ||
221 | |||
222 | /* | ||
223 | * With the setting of the partition setup_state to XPC_P_SETUP, we're | ||
224 | * declaring that this partition is ready to go. | ||
225 | */ | ||
226 | part->setup_state = XPC_P_SETUP; | ||
227 | |||
228 | /* | ||
229 | * Setup the per partition specific variables required by the | ||
230 | * remote partition to establish channel connections with us. | ||
231 | * | ||
232 | * The setting of the magic # indicates that these per partition | ||
233 | * specific variables are ready to be used. | ||
234 | */ | ||
235 | xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); | ||
236 | xpc_vars_part[partid].openclose_args_pa = | ||
237 | __pa(part->local_openclose_args); | ||
238 | xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); | ||
239 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ | ||
240 | xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); | ||
241 | xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); | ||
242 | xpc_vars_part[partid].nchannels = part->nchannels; | ||
243 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | ||
244 | |||
245 | return xpSuccess; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline | ||
250 | * (or multiple cachelines) from a remote partition. | ||
251 | * | ||
252 | * src must be a cacheline aligned physical address on the remote partition. | ||
253 | * dst must be a cacheline aligned virtual address on this partition. | ||
254 | * cnt must be cacheline sized | ||
255 | */ | ||
256 | static enum xp_retval | ||
257 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | ||
258 | const void *src, size_t cnt) | ||
259 | { | ||
260 | enum xp_retval ret; | ||
261 | |||
262 | DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); | ||
263 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); | ||
264 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); | ||
265 | |||
266 | if (part->act_state == XPC_P_DEACTIVATING) | ||
267 | return part->reason; | ||
268 | |||
269 | ret = xp_remote_memcpy(dst, src, cnt); | ||
270 | if (ret != xpSuccess) { | ||
271 | dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," | ||
272 | " ret=%d\n", XPC_PARTID(part), ret); | ||
273 | } | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Pull the remote per partition specific variables from the specified | ||
279 | * partition. | ||
280 | */ | ||
281 | enum xp_retval | ||
282 | xpc_pull_remote_vars_part(struct xpc_partition *part) | ||
283 | { | ||
284 | u8 buffer[L1_CACHE_BYTES * 2]; | ||
285 | struct xpc_vars_part *pulled_entry_cacheline = | ||
286 | (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer); | ||
287 | struct xpc_vars_part *pulled_entry; | ||
288 | u64 remote_entry_cacheline_pa, remote_entry_pa; | ||
289 | short partid = XPC_PARTID(part); | ||
290 | enum xp_retval ret; | ||
291 | |||
292 | /* pull the cacheline that contains the variables we're interested in */ | ||
293 | |||
294 | DBUG_ON(part->remote_vars_part_pa != | ||
295 | L1_CACHE_ALIGN(part->remote_vars_part_pa)); | ||
296 | DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); | ||
297 | |||
298 | remote_entry_pa = part->remote_vars_part_pa + | ||
299 | sn_partition_id * sizeof(struct xpc_vars_part); | ||
300 | |||
301 | remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); | ||
302 | |||
303 | pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline + | ||
304 | (remote_entry_pa & | ||
305 | (L1_CACHE_BYTES - 1))); | ||
306 | |||
307 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, | ||
308 | (void *)remote_entry_cacheline_pa, | ||
309 | L1_CACHE_BYTES); | ||
310 | if (ret != xpSuccess) { | ||
311 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | ||
312 | "partition %d, ret=%d\n", partid, ret); | ||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | /* see if they've been set up yet */ | ||
317 | |||
318 | if (pulled_entry->magic != XPC_VP_MAGIC1 && | ||
319 | pulled_entry->magic != XPC_VP_MAGIC2) { | ||
320 | |||
321 | if (pulled_entry->magic != 0) { | ||
322 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " | ||
323 | "partition %d has bad magic value (=0x%lx)\n", | ||
324 | partid, sn_partition_id, pulled_entry->magic); | ||
325 | return xpBadMagic; | ||
326 | } | ||
327 | |||
328 | /* they've not been initialized yet */ | ||
329 | return xpRetry; | ||
330 | } | ||
331 | |||
332 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { | ||
333 | |||
334 | /* validate the variables */ | ||
335 | |||
336 | if (pulled_entry->GPs_pa == 0 || | ||
337 | pulled_entry->openclose_args_pa == 0 || | ||
338 | pulled_entry->IPI_amo_pa == 0) { | ||
339 | |||
340 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | ||
341 | "partition %d are not valid\n", partid, | ||
342 | sn_partition_id); | ||
343 | return xpInvalidAddress; | ||
344 | } | ||
345 | |||
346 | /* the variables we imported look to be valid */ | ||
347 | |||
348 | part->remote_GPs_pa = pulled_entry->GPs_pa; | ||
349 | part->remote_openclose_args_pa = | ||
350 | pulled_entry->openclose_args_pa; | ||
351 | part->remote_IPI_amo_va = | ||
352 | (AMO_t *)__va(pulled_entry->IPI_amo_pa); | ||
353 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; | ||
354 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; | ||
355 | |||
356 | if (part->nchannels > pulled_entry->nchannels) | ||
357 | part->nchannels = pulled_entry->nchannels; | ||
358 | |||
359 | /* let the other side know that we've pulled their variables */ | ||
360 | |||
361 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; | ||
362 | } | ||
363 | |||
364 | if (pulled_entry->magic == XPC_VP_MAGIC1) | ||
365 | return xpRetry; | ||
366 | |||
367 | return xpSuccess; | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Get the IPI flags and pull the openclose args and/or remote GPs as needed. | ||
372 | */ | ||
373 | static u64 | ||
374 | xpc_get_IPI_flags(struct xpc_partition *part) | ||
375 | { | ||
376 | unsigned long irq_flags; | ||
377 | u64 IPI_amo; | ||
378 | enum xp_retval ret; | ||
379 | |||
380 | /* | ||
381 | * See if there are any IPI flags to be handled. | ||
382 | */ | ||
383 | |||
384 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | ||
385 | IPI_amo = part->local_IPI_amo; | ||
386 | if (IPI_amo != 0) | ||
387 | part->local_IPI_amo = 0; | ||
388 | |||
389 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | ||
390 | |||
391 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { | ||
392 | ret = xpc_pull_remote_cachelines(part, | ||
393 | part->remote_openclose_args, | ||
394 | (void *)part-> | ||
395 | remote_openclose_args_pa, | ||
396 | XPC_OPENCLOSE_ARGS_SIZE); | ||
397 | if (ret != xpSuccess) { | ||
398 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
399 | |||
400 | dev_dbg(xpc_chan, "failed to pull openclose args from " | ||
401 | "partition %d, ret=%d\n", XPC_PARTID(part), | ||
402 | ret); | ||
403 | |||
404 | /* don't bother processing IPIs anymore */ | ||
405 | IPI_amo = 0; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { | ||
410 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, | ||
411 | (void *)part->remote_GPs_pa, | ||
412 | XPC_GP_SIZE); | ||
413 | if (ret != xpSuccess) { | ||
414 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
415 | |||
416 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | ||
417 | "%d, ret=%d\n", XPC_PARTID(part), ret); | ||
418 | |||
419 | /* don't bother processing IPIs anymore */ | ||
420 | IPI_amo = 0; | ||
421 | } | ||
422 | } | ||
423 | |||
424 | return IPI_amo; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Allocate the local message queue and the notify queue. | 52 | * Allocate the local message queue and the notify queue. |
429 | */ | 53 | */ |
430 | static enum xp_retval | 54 | static enum xp_retval |
@@ -1365,59 +989,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) | |||
1365 | } | 989 | } |
1366 | 990 | ||
1367 | /* | 991 | /* |
1368 | * Teardown the infrastructure necessary to support XPartition Communication | ||
1369 | * between the specified remote partition and the local one. | ||
1370 | */ | ||
1371 | void | ||
1372 | xpc_teardown_infrastructure(struct xpc_partition *part) | ||
1373 | { | ||
1374 | short partid = XPC_PARTID(part); | ||
1375 | |||
1376 | /* | ||
1377 | * We start off by making this partition inaccessible to local | ||
1378 | * processes by marking it as no longer setup. Then we make it | ||
1379 | * inaccessible to remote processes by clearing the XPC per partition | ||
1380 | * specific variable's magic # (which indicates that these variables | ||
1381 | * are no longer valid) and by ignoring all XPC notify IPIs sent to | ||
1382 | * this partition. | ||
1383 | */ | ||
1384 | |||
1385 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | ||
1386 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | ||
1387 | DBUG_ON(part->setup_state != XPC_P_SETUP); | ||
1388 | part->setup_state = XPC_P_WTEARDOWN; | ||
1389 | |||
1390 | xpc_vars_part[partid].magic = 0; | ||
1391 | |||
1392 | free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); | ||
1393 | |||
1394 | /* | ||
1395 | * Before proceeding with the teardown we have to wait until all | ||
1396 | * existing references cease. | ||
1397 | */ | ||
1398 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | ||
1399 | |||
1400 | /* now we can begin tearing down the infrastructure */ | ||
1401 | |||
1402 | part->setup_state = XPC_P_TORNDOWN; | ||
1403 | |||
1404 | /* in case we've still got outstanding timers registered... */ | ||
1405 | del_timer_sync(&part->dropped_IPI_timer); | ||
1406 | |||
1407 | kfree(part->remote_openclose_args_base); | ||
1408 | part->remote_openclose_args = NULL; | ||
1409 | kfree(part->local_openclose_args_base); | ||
1410 | part->local_openclose_args = NULL; | ||
1411 | kfree(part->remote_GPs_base); | ||
1412 | part->remote_GPs = NULL; | ||
1413 | kfree(part->local_GPs_base); | ||
1414 | part->local_GPs = NULL; | ||
1415 | kfree(part->channels); | ||
1416 | part->channels = NULL; | ||
1417 | part->local_IPI_amo_va = NULL; | ||
1418 | } | ||
1419 | |||
1420 | /* | ||
1421 | * Called by XP at the time of channel connection registration to cause | 992 | * Called by XP at the time of channel connection registration to cause |
1422 | * XPC to establish connections to all currently active partitions. | 993 | * XPC to establish connections to all currently active partitions. |
1423 | */ | 994 | */ |
@@ -1974,113 +1545,6 @@ xpc_initiate_send_notify(short partid, int ch_number, void *payload, | |||
1974 | return ret; | 1545 | return ret; |
1975 | } | 1546 | } |
1976 | 1547 | ||
1977 | static struct xpc_msg * | ||
1978 | xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | ||
1979 | { | ||
1980 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
1981 | struct xpc_msg *remote_msg, *msg; | ||
1982 | u32 msg_index, nmsgs; | ||
1983 | u64 msg_offset; | ||
1984 | enum xp_retval ret; | ||
1985 | |||
1986 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { | ||
1987 | /* we were interrupted by a signal */ | ||
1988 | return NULL; | ||
1989 | } | ||
1990 | |||
1991 | while (get >= ch->next_msg_to_pull) { | ||
1992 | |||
1993 | /* pull as many messages as are ready and able to be pulled */ | ||
1994 | |||
1995 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; | ||
1996 | |||
1997 | DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); | ||
1998 | nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; | ||
1999 | if (msg_index + nmsgs > ch->remote_nentries) { | ||
2000 | /* ignore the ones that wrap the msg queue for now */ | ||
2001 | nmsgs = ch->remote_nentries - msg_index; | ||
2002 | } | ||
2003 | |||
2004 | msg_offset = msg_index * ch->msg_size; | ||
2005 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | ||
2006 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + | ||
2007 | msg_offset); | ||
2008 | |||
2009 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, | ||
2010 | nmsgs * ch->msg_size); | ||
2011 | if (ret != xpSuccess) { | ||
2012 | |||
2013 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | ||
2014 | " msg %ld from partition %d, channel=%d, " | ||
2015 | "ret=%d\n", nmsgs, ch->next_msg_to_pull, | ||
2016 | ch->partid, ch->number, ret); | ||
2017 | |||
2018 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
2019 | |||
2020 | mutex_unlock(&ch->msg_to_pull_mutex); | ||
2021 | return NULL; | ||
2022 | } | ||
2023 | |||
2024 | ch->next_msg_to_pull += nmsgs; | ||
2025 | } | ||
2026 | |||
2027 | mutex_unlock(&ch->msg_to_pull_mutex); | ||
2028 | |||
2029 | /* return the message we were looking for */ | ||
2030 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; | ||
2031 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | ||
2032 | |||
2033 | return msg; | ||
2034 | } | ||
2035 | |||
2036 | /* | ||
2037 | * Get a message to be delivered. | ||
2038 | */ | ||
2039 | static struct xpc_msg * | ||
2040 | xpc_get_deliverable_msg(struct xpc_channel *ch) | ||
2041 | { | ||
2042 | struct xpc_msg *msg = NULL; | ||
2043 | s64 get; | ||
2044 | |||
2045 | do { | ||
2046 | if (ch->flags & XPC_C_DISCONNECTING) | ||
2047 | break; | ||
2048 | |||
2049 | get = ch->w_local_GP.get; | ||
2050 | rmb(); /* guarantee that .get loads before .put */ | ||
2051 | if (get == ch->w_remote_GP.put) | ||
2052 | break; | ||
2053 | |||
2054 | /* There are messages waiting to be pulled and delivered. | ||
2055 | * We need to try to secure one for ourselves. We'll do this | ||
2056 | * by trying to increment w_local_GP.get and hope that no one | ||
2057 | * else beats us to it. If they do, we'll we'll simply have | ||
2058 | * to try again for the next one. | ||
2059 | */ | ||
2060 | |||
2061 | if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { | ||
2062 | /* we got the entry referenced by get */ | ||
2063 | |||
2064 | dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " | ||
2065 | "partid=%d, channel=%d\n", get + 1, | ||
2066 | ch->partid, ch->number); | ||
2067 | |||
2068 | /* pull the message from the remote partition */ | ||
2069 | |||
2070 | msg = xpc_pull_remote_msg(ch, get); | ||
2071 | |||
2072 | DBUG_ON(msg != NULL && msg->number != get); | ||
2073 | DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); | ||
2074 | DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); | ||
2075 | |||
2076 | break; | ||
2077 | } | ||
2078 | |||
2079 | } while (1); | ||
2080 | |||
2081 | return msg; | ||
2082 | } | ||
2083 | |||
2084 | /* | 1548 | /* |
2085 | * Deliver a message to its intended recipient. | 1549 | * Deliver a message to its intended recipient. |
2086 | */ | 1550 | */ |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 2180f1f7e087..be3a48539307 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -176,6 +176,12 @@ static struct notifier_block xpc_die_notifier = { | |||
176 | }; | 176 | }; |
177 | 177 | ||
178 | enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); | 178 | enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); |
179 | enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); | ||
180 | u64 (*xpc_get_IPI_flags) (struct xpc_partition *part); | ||
181 | struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); | ||
182 | enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); | ||
183 | void (*xpc_teardown_infrastructure) (struct xpc_partition *part); | ||
184 | |||
179 | 185 | ||
180 | /* | 186 | /* |
181 | * Timer function to enforce the timelimit on the partition disengage request. | 187 | * Timer function to enforce the timelimit on the partition disengage request. |
@@ -313,37 +319,8 @@ xpc_initiate_discovery(void *ignore) | |||
313 | } | 319 | } |
314 | 320 | ||
315 | /* | 321 | /* |
316 | * Establish first contact with the remote partititon. This involves pulling | ||
317 | * the XPC per partition variables from the remote partition and waiting for | ||
318 | * the remote partition to pull ours. | ||
319 | */ | ||
320 | static enum xp_retval | ||
321 | xpc_make_first_contact(struct xpc_partition *part) | ||
322 | { | ||
323 | enum xp_retval ret; | ||
324 | |||
325 | while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) { | ||
326 | if (ret != xpRetry) { | ||
327 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | dev_dbg(xpc_chan, "waiting to make first contact with " | ||
332 | "partition %d\n", XPC_PARTID(part)); | ||
333 | |||
334 | /* wait a 1/4 of a second or so */ | ||
335 | (void)msleep_interruptible(250); | ||
336 | |||
337 | if (part->act_state == XPC_P_DEACTIVATING) | ||
338 | return part->reason; | ||
339 | } | ||
340 | |||
341 | return xpc_mark_partition_active(part); | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * The first kthread assigned to a newly activated partition is the one | 322 | * The first kthread assigned to a newly activated partition is the one |
346 | * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to | 323 | * created by XPC HB with which it calls xpc_activating(). XPC hangs on to |
347 | * that kthread until the partition is brought down, at which time that kthread | 324 | * that kthread until the partition is brought down, at which time that kthread |
348 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB | 325 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB |
349 | * that XPC has dismantled all communication infrastructure for the associated | 326 | * that XPC has dismantled all communication infrastructure for the associated |
@@ -393,41 +370,10 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
393 | * upped partition. | 370 | * upped partition. |
394 | * | 371 | * |
395 | * The kthread that was created by XPC HB and which setup the XPC | 372 | * The kthread that was created by XPC HB and which setup the XPC |
396 | * infrastructure will remain assigned to the partition until the partition | 373 | * infrastructure will remain assigned to the partition becoming the channel |
397 | * goes down. At which time the kthread will teardown the XPC infrastructure | 374 | * manager for that partition until the partition is deactivating, at which |
398 | * and then exit. | 375 | * time the kthread will teardown the XPC infrastructure and then exit. |
399 | * | ||
400 | * XPC HB will put the remote partition's XPC per partition specific variables | ||
401 | * physical address into xpc_partitions[partid].remote_vars_part_pa prior to | ||
402 | * calling xpc_partition_up(). | ||
403 | */ | 376 | */ |
404 | static void | ||
405 | xpc_partition_up(struct xpc_partition *part) | ||
406 | { | ||
407 | DBUG_ON(part->channels != NULL); | ||
408 | |||
409 | dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); | ||
410 | |||
411 | if (xpc_setup_infrastructure(part) != xpSuccess) | ||
412 | return; | ||
413 | |||
414 | /* | ||
415 | * The kthread that XPC HB called us with will become the | ||
416 | * channel manager for this partition. It will not return | ||
417 | * back to XPC HB until the partition's XPC infrastructure | ||
418 | * has been dismantled. | ||
419 | */ | ||
420 | |||
421 | (void)xpc_part_ref(part); /* this will always succeed */ | ||
422 | |||
423 | if (xpc_make_first_contact(part) == xpSuccess) | ||
424 | xpc_channel_mgr(part); | ||
425 | |||
426 | xpc_part_deref(part); | ||
427 | |||
428 | xpc_teardown_infrastructure(part); | ||
429 | } | ||
430 | |||
431 | static int | 377 | static int |
432 | xpc_activating(void *__partid) | 378 | xpc_activating(void *__partid) |
433 | { | 379 | { |
@@ -453,7 +399,7 @@ xpc_activating(void *__partid) | |||
453 | XPC_SET_REASON(part, 0, 0); | 399 | XPC_SET_REASON(part, 0, 0); |
454 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 400 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
455 | 401 | ||
456 | dev_dbg(xpc_part, "bringing partition %d up\n", partid); | 402 | dev_dbg(xpc_part, "activating partition %d\n", partid); |
457 | 403 | ||
458 | /* | 404 | /* |
459 | * Register the remote partition's AMOs with SAL so it can handle | 405 | * Register the remote partition's AMOs with SAL so it can handle |
@@ -467,7 +413,7 @@ xpc_activating(void *__partid) | |||
467 | */ | 413 | */ |
468 | if (sn_register_xp_addr_region(part->remote_amos_page_pa, | 414 | if (sn_register_xp_addr_region(part->remote_amos_page_pa, |
469 | PAGE_SIZE, 1) < 0) { | 415 | PAGE_SIZE, 1) < 0) { |
470 | dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " | 416 | dev_warn(xpc_part, "xpc_activating(%d) failed to register " |
471 | "xp_addr region\n", partid); | 417 | "xp_addr region\n", partid); |
472 | 418 | ||
473 | spin_lock_irqsave(&part->act_lock, irq_flags); | 419 | spin_lock_irqsave(&part->act_lock, irq_flags); |
@@ -481,11 +427,18 @@ xpc_activating(void *__partid) | |||
481 | xpc_allow_hb(partid, xpc_vars); | 427 | xpc_allow_hb(partid, xpc_vars); |
482 | xpc_IPI_send_activated(part); | 428 | xpc_IPI_send_activated(part); |
483 | 429 | ||
484 | /* | 430 | if (xpc_setup_infrastructure(part) == xpSuccess) { |
485 | * xpc_partition_up() holds this thread and marks this partition as | 431 | (void)xpc_part_ref(part); /* this will always succeed */ |
486 | * XPC_P_ACTIVE by calling xpc_hb_mark_active(). | 432 | |
487 | */ | 433 | if (xpc_make_first_contact(part) == xpSuccess) { |
488 | (void)xpc_partition_up(part); | 434 | xpc_mark_partition_active(part); |
435 | xpc_channel_mgr(part); | ||
436 | /* won't return until partition is deactivating */ | ||
437 | } | ||
438 | |||
439 | xpc_part_deref(part); | ||
440 | xpc_teardown_infrastructure(part); | ||
441 | } | ||
489 | 442 | ||
490 | xpc_disallow_hb(partid, xpc_vars); | 443 | xpc_disallow_hb(partid, xpc_vars); |
491 | xpc_mark_partition_inactive(part); | 444 | xpc_mark_partition_inactive(part); |
@@ -568,7 +521,7 @@ xpc_dropped_IPI_check(struct xpc_partition *part) | |||
568 | xpc_check_for_channel_activity(part); | 521 | xpc_check_for_channel_activity(part); |
569 | 522 | ||
570 | part->dropped_IPI_timer.expires = jiffies + | 523 | part->dropped_IPI_timer.expires = jiffies + |
571 | XPC_P_DROPPED_IPI_WAIT; | 524 | XPC_P_DROPPED_IPI_WAIT_INTERVAL; |
572 | add_timer(&part->dropped_IPI_timer); | 525 | add_timer(&part->dropped_IPI_timer); |
573 | xpc_part_deref(part); | 526 | xpc_part_deref(part); |
574 | } | 527 | } |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 1db84cb49143..be5b7547dab4 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -486,6 +486,7 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, | |||
486 | dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", | 486 | dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", |
487 | part->last_heartbeat); | 487 | part->last_heartbeat); |
488 | 488 | ||
489 | /* >>> remote_vars_part_pa and vars_part_pa are sn2 only!!! */ | ||
489 | part->remote_vars_part_pa = remote_vars->vars_part_pa; | 490 | part->remote_vars_part_pa = remote_vars->vars_part_pa; |
490 | dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", | 491 | dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", |
491 | part->remote_vars_part_pa); | 492 | part->remote_vars_part_pa); |
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 5a37348715c7..ee28e231dc47 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -14,12 +14,13 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/delay.h> | ||
17 | #include <asm/uncached.h> | 18 | #include <asm/uncached.h> |
18 | #include <asm/sn/sn_sal.h> | 19 | #include <asm/sn/sn_sal.h> |
19 | #include "xpc.h" | 20 | #include "xpc.h" |
20 | 21 | ||
21 | struct xpc_vars *xpc_vars; | 22 | struct xpc_vars *xpc_vars; |
22 | struct xpc_vars_part *xpc_vars_part; | 23 | static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */ |
23 | 24 | ||
24 | static enum xp_retval | 25 | static enum xp_retval |
25 | xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | 26 | xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) |
@@ -33,7 +34,10 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | |||
33 | 34 | ||
34 | rp->sn.vars_pa = __pa(xpc_vars); | 35 | rp->sn.vars_pa = __pa(xpc_vars); |
35 | 36 | ||
36 | xpc_vars_part = XPC_RP_VARS_PART(rp); | 37 | /* vars_part array follows immediately after vars */ |
38 | xpc_vars_part = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + | ||
39 | XPC_RP_VARS_SIZE); | ||
40 | |||
37 | 41 | ||
38 | /* | 42 | /* |
39 | * Before clearing xpc_vars, see if a page of AMOs had been previously | 43 | * Before clearing xpc_vars, see if a page of AMOs had been previously |
@@ -85,7 +89,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | |||
85 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ | 89 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ |
86 | 90 | ||
87 | /* clear xpc_vars_part */ | 91 | /* clear xpc_vars_part */ |
88 | memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * | 92 | memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part_sn2) * |
89 | xp_max_npartitions); | 93 | xp_max_npartitions); |
90 | 94 | ||
91 | /* initialize the activate IRQ related AMO variables */ | 95 | /* initialize the activate IRQ related AMO variables */ |
@@ -99,10 +103,563 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | |||
99 | return xpSuccess; | 103 | return xpSuccess; |
100 | } | 104 | } |
101 | 105 | ||
106 | /* | ||
107 | * Setup the infrastructure necessary to support XPartition Communication | ||
108 | * between the specified remote partition and the local one. | ||
109 | */ | ||
110 | static enum xp_retval | ||
111 | xpc_setup_infrastructure_sn2(struct xpc_partition *part) | ||
112 | { | ||
113 | enum xp_retval retval; | ||
114 | int ret; | ||
115 | int cpuid; | ||
116 | int ch_number; | ||
117 | struct xpc_channel *ch; | ||
118 | struct timer_list *timer; | ||
119 | short partid = XPC_PARTID(part); | ||
120 | |||
121 | /* | ||
122 | * Allocate all of the channel structures as a contiguous chunk of | ||
123 | * memory. | ||
124 | */ | ||
125 | DBUG_ON(part->channels != NULL); | ||
126 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, | ||
127 | GFP_KERNEL); | ||
128 | if (part->channels == NULL) { | ||
129 | dev_err(xpc_chan, "can't get memory for channels\n"); | ||
130 | return xpNoMemory; | ||
131 | } | ||
132 | |||
133 | /* allocate all the required GET/PUT values */ | ||
134 | |||
135 | part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | ||
136 | GFP_KERNEL, | ||
137 | &part->local_GPs_base); | ||
138 | if (part->local_GPs == NULL) { | ||
139 | dev_err(xpc_chan, "can't get memory for local get/put " | ||
140 | "values\n"); | ||
141 | retval = xpNoMemory; | ||
142 | goto out_1; | ||
143 | } | ||
144 | |||
145 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | ||
146 | GFP_KERNEL, | ||
147 | &part-> | ||
148 | remote_GPs_base); | ||
149 | if (part->remote_GPs == NULL) { | ||
150 | dev_err(xpc_chan, "can't get memory for remote get/put " | ||
151 | "values\n"); | ||
152 | retval = xpNoMemory; | ||
153 | goto out_2; | ||
154 | } | ||
155 | |||
156 | part->remote_GPs_pa = 0; | ||
157 | |||
158 | /* allocate all the required open and close args */ | ||
159 | |||
160 | part->local_openclose_args = | ||
161 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
162 | &part->local_openclose_args_base); | ||
163 | if (part->local_openclose_args == NULL) { | ||
164 | dev_err(xpc_chan, "can't get memory for local connect args\n"); | ||
165 | retval = xpNoMemory; | ||
166 | goto out_3; | ||
167 | } | ||
168 | |||
169 | part->remote_openclose_args = | ||
170 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
171 | &part->remote_openclose_args_base); | ||
172 | if (part->remote_openclose_args == NULL) { | ||
173 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | ||
174 | retval = xpNoMemory; | ||
175 | goto out_4; | ||
176 | } | ||
177 | |||
178 | part->remote_openclose_args_pa = 0; | ||
179 | |||
180 | part->local_IPI_amo_va = xpc_IPI_init(partid); | ||
181 | part->local_IPI_amo = 0; | ||
182 | spin_lock_init(&part->IPI_lock); | ||
183 | |||
184 | part->remote_IPI_nasid = 0; | ||
185 | part->remote_IPI_phys_cpuid = 0; | ||
186 | part->remote_IPI_amo_va = NULL; | ||
187 | |||
188 | atomic_set(&part->channel_mgr_requests, 1); | ||
189 | init_waitqueue_head(&part->channel_mgr_wq); | ||
190 | |||
191 | sprintf(part->IPI_owner, "xpc%02d", partid); | ||
192 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, | ||
193 | part->IPI_owner, (void *)(u64)partid); | ||
194 | if (ret != 0) { | ||
195 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " | ||
196 | "errno=%d\n", -ret); | ||
197 | retval = xpLackOfResources; | ||
198 | goto out_5; | ||
199 | } | ||
200 | |||
201 | /* Setup a timer to check for dropped IPIs */ | ||
202 | timer = &part->dropped_IPI_timer; | ||
203 | init_timer(timer); | ||
204 | timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check; | ||
205 | timer->data = (unsigned long)part; | ||
206 | timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL; | ||
207 | add_timer(timer); | ||
208 | |||
209 | part->nchannels = XPC_MAX_NCHANNELS; | ||
210 | |||
211 | atomic_set(&part->nchannels_active, 0); | ||
212 | atomic_set(&part->nchannels_engaged, 0); | ||
213 | |||
214 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
215 | ch = &part->channels[ch_number]; | ||
216 | |||
217 | ch->partid = partid; | ||
218 | ch->number = ch_number; | ||
219 | ch->flags = XPC_C_DISCONNECTED; | ||
220 | |||
221 | ch->local_GP = &part->local_GPs[ch_number]; | ||
222 | ch->local_openclose_args = | ||
223 | &part->local_openclose_args[ch_number]; | ||
224 | |||
225 | atomic_set(&ch->kthreads_assigned, 0); | ||
226 | atomic_set(&ch->kthreads_idle, 0); | ||
227 | atomic_set(&ch->kthreads_active, 0); | ||
228 | |||
229 | atomic_set(&ch->references, 0); | ||
230 | atomic_set(&ch->n_to_notify, 0); | ||
231 | |||
232 | spin_lock_init(&ch->lock); | ||
233 | mutex_init(&ch->msg_to_pull_mutex); | ||
234 | init_completion(&ch->wdisconnect_wait); | ||
235 | |||
236 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | ||
237 | init_waitqueue_head(&ch->msg_allocate_wq); | ||
238 | init_waitqueue_head(&ch->idle_wq); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * With the setting of the partition setup_state to XPC_P_SETUP, we're | ||
243 | * declaring that this partition is ready to go. | ||
244 | */ | ||
245 | part->setup_state = XPC_P_SETUP; | ||
246 | |||
247 | /* | ||
248 | * Setup the per partition specific variables required by the | ||
249 | * remote partition to establish channel connections with us. | ||
250 | * | ||
251 | * The setting of the magic # indicates that these per partition | ||
252 | * specific variables are ready to be used. | ||
253 | */ | ||
254 | xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); | ||
255 | xpc_vars_part[partid].openclose_args_pa = | ||
256 | __pa(part->local_openclose_args); | ||
257 | xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); | ||
258 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ | ||
259 | xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); | ||
260 | xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); | ||
261 | xpc_vars_part[partid].nchannels = part->nchannels; | ||
262 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | ||
263 | |||
264 | return xpSuccess; | ||
265 | |||
266 | /* setup of infrastructure failed */ | ||
267 | out_5: | ||
268 | kfree(part->remote_openclose_args_base); | ||
269 | part->remote_openclose_args = NULL; | ||
270 | out_4: | ||
271 | kfree(part->local_openclose_args_base); | ||
272 | part->local_openclose_args = NULL; | ||
273 | out_3: | ||
274 | kfree(part->remote_GPs_base); | ||
275 | part->remote_GPs = NULL; | ||
276 | out_2: | ||
277 | kfree(part->local_GPs_base); | ||
278 | part->local_GPs = NULL; | ||
279 | out_1: | ||
280 | kfree(part->channels); | ||
281 | part->channels = NULL; | ||
282 | return retval; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Teardown the infrastructure necessary to support XPartition Communication | ||
287 | * between the specified remote partition and the local one. | ||
288 | */ | ||
289 | static void | ||
290 | xpc_teardown_infrastructure_sn2(struct xpc_partition *part) | ||
291 | { | ||
292 | short partid = XPC_PARTID(part); | ||
293 | |||
294 | /* | ||
295 | * We start off by making this partition inaccessible to local | ||
296 | * processes by marking it as no longer setup. Then we make it | ||
297 | * inaccessible to remote processes by clearing the XPC per partition | ||
298 | * specific variable's magic # (which indicates that these variables | ||
299 | * are no longer valid) and by ignoring all XPC notify IPIs sent to | ||
300 | * this partition. | ||
301 | */ | ||
302 | |||
303 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | ||
304 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | ||
305 | DBUG_ON(part->setup_state != XPC_P_SETUP); | ||
306 | part->setup_state = XPC_P_WTEARDOWN; | ||
307 | |||
308 | xpc_vars_part[partid].magic = 0; | ||
309 | |||
310 | free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); | ||
311 | |||
312 | /* | ||
313 | * Before proceeding with the teardown we have to wait until all | ||
314 | * existing references cease. | ||
315 | */ | ||
316 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | ||
317 | |||
318 | /* now we can begin tearing down the infrastructure */ | ||
319 | |||
320 | part->setup_state = XPC_P_TORNDOWN; | ||
321 | |||
322 | /* in case we've still got outstanding timers registered... */ | ||
323 | del_timer_sync(&part->dropped_IPI_timer); | ||
324 | |||
325 | kfree(part->remote_openclose_args_base); | ||
326 | part->remote_openclose_args = NULL; | ||
327 | kfree(part->local_openclose_args_base); | ||
328 | part->local_openclose_args = NULL; | ||
329 | kfree(part->remote_GPs_base); | ||
330 | part->remote_GPs = NULL; | ||
331 | kfree(part->local_GPs_base); | ||
332 | part->local_GPs = NULL; | ||
333 | kfree(part->channels); | ||
334 | part->channels = NULL; | ||
335 | part->local_IPI_amo_va = NULL; | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline | ||
340 | * (or multiple cachelines) from a remote partition. | ||
341 | * | ||
342 | * src must be a cacheline aligned physical address on the remote partition. | ||
343 | * dst must be a cacheline aligned virtual address on this partition. | ||
344 | * cnt must be cacheline sized | ||
345 | */ | ||
346 | /* >>> Replace this function by call to xp_remote_memcpy() or bte_copy()? */ | ||
347 | static enum xp_retval | ||
348 | xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, | ||
349 | const void *src, size_t cnt) | ||
350 | { | ||
351 | enum xp_retval ret; | ||
352 | |||
353 | DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); | ||
354 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); | ||
355 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); | ||
356 | |||
357 | if (part->act_state == XPC_P_DEACTIVATING) | ||
358 | return part->reason; | ||
359 | |||
360 | ret = xp_remote_memcpy(dst, src, cnt); | ||
361 | if (ret != xpSuccess) { | ||
362 | dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," | ||
363 | " ret=%d\n", XPC_PARTID(part), ret); | ||
364 | } | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Pull the remote per partition specific variables from the specified | ||
370 | * partition. | ||
371 | */ | ||
372 | static enum xp_retval | ||
373 | xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) | ||
374 | { | ||
375 | u8 buffer[L1_CACHE_BYTES * 2]; | ||
376 | struct xpc_vars_part_sn2 *pulled_entry_cacheline = | ||
377 | (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); | ||
378 | struct xpc_vars_part_sn2 *pulled_entry; | ||
379 | u64 remote_entry_cacheline_pa, remote_entry_pa; | ||
380 | short partid = XPC_PARTID(part); | ||
381 | enum xp_retval ret; | ||
382 | |||
383 | /* pull the cacheline that contains the variables we're interested in */ | ||
384 | |||
385 | DBUG_ON(part->remote_vars_part_pa != | ||
386 | L1_CACHE_ALIGN(part->remote_vars_part_pa)); | ||
387 | DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2); | ||
388 | |||
389 | remote_entry_pa = part->remote_vars_part_pa + | ||
390 | sn_partition_id * sizeof(struct xpc_vars_part_sn2); | ||
391 | |||
392 | remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); | ||
393 | |||
394 | pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline | ||
395 | + (remote_entry_pa & | ||
396 | (L1_CACHE_BYTES - 1))); | ||
397 | |||
398 | ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, | ||
399 | (void *)remote_entry_cacheline_pa, | ||
400 | L1_CACHE_BYTES); | ||
401 | if (ret != xpSuccess) { | ||
402 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | ||
403 | "partition %d, ret=%d\n", partid, ret); | ||
404 | return ret; | ||
405 | } | ||
406 | |||
407 | /* see if they've been set up yet */ | ||
408 | |||
409 | if (pulled_entry->magic != XPC_VP_MAGIC1 && | ||
410 | pulled_entry->magic != XPC_VP_MAGIC2) { | ||
411 | |||
412 | if (pulled_entry->magic != 0) { | ||
413 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " | ||
414 | "partition %d has bad magic value (=0x%lx)\n", | ||
415 | partid, sn_partition_id, pulled_entry->magic); | ||
416 | return xpBadMagic; | ||
417 | } | ||
418 | |||
419 | /* they've not been initialized yet */ | ||
420 | return xpRetry; | ||
421 | } | ||
422 | |||
423 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { | ||
424 | |||
425 | /* validate the variables */ | ||
426 | |||
427 | if (pulled_entry->GPs_pa == 0 || | ||
428 | pulled_entry->openclose_args_pa == 0 || | ||
429 | pulled_entry->IPI_amo_pa == 0) { | ||
430 | |||
431 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | ||
432 | "partition %d are not valid\n", partid, | ||
433 | sn_partition_id); | ||
434 | return xpInvalidAddress; | ||
435 | } | ||
436 | |||
437 | /* the variables we imported look to be valid */ | ||
438 | |||
439 | part->remote_GPs_pa = pulled_entry->GPs_pa; | ||
440 | part->remote_openclose_args_pa = | ||
441 | pulled_entry->openclose_args_pa; | ||
442 | part->remote_IPI_amo_va = | ||
443 | (AMO_t *)__va(pulled_entry->IPI_amo_pa); | ||
444 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; | ||
445 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; | ||
446 | |||
447 | if (part->nchannels > pulled_entry->nchannels) | ||
448 | part->nchannels = pulled_entry->nchannels; | ||
449 | |||
450 | /* let the other side know that we've pulled their variables */ | ||
451 | |||
452 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; | ||
453 | } | ||
454 | |||
455 | if (pulled_entry->magic == XPC_VP_MAGIC1) | ||
456 | return xpRetry; | ||
457 | |||
458 | return xpSuccess; | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * Establish first contact with the remote partititon. This involves pulling | ||
463 | * the XPC per partition variables from the remote partition and waiting for | ||
464 | * the remote partition to pull ours. | ||
465 | */ | ||
466 | static enum xp_retval | ||
467 | xpc_make_first_contact_sn2(struct xpc_partition *part) | ||
468 | { | ||
469 | enum xp_retval ret; | ||
470 | |||
471 | while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) { | ||
472 | if (ret != xpRetry) { | ||
473 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
474 | return ret; | ||
475 | } | ||
476 | |||
477 | dev_dbg(xpc_part, "waiting to make first contact with " | ||
478 | "partition %d\n", XPC_PARTID(part)); | ||
479 | |||
480 | /* wait a 1/4 of a second or so */ | ||
481 | (void)msleep_interruptible(250); | ||
482 | |||
483 | if (part->act_state == XPC_P_DEACTIVATING) | ||
484 | return part->reason; | ||
485 | } | ||
486 | |||
487 | return xpSuccess; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * Get the IPI flags and pull the openclose args and/or remote GPs as needed. | ||
492 | */ | ||
493 | static u64 | ||
494 | xpc_get_IPI_flags_sn2(struct xpc_partition *part) | ||
495 | { | ||
496 | unsigned long irq_flags; | ||
497 | u64 IPI_amo; | ||
498 | enum xp_retval ret; | ||
499 | |||
500 | /* | ||
501 | * See if there are any IPI flags to be handled. | ||
502 | */ | ||
503 | |||
504 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | ||
505 | IPI_amo = part->local_IPI_amo; | ||
506 | if (IPI_amo != 0) | ||
507 | part->local_IPI_amo = 0; | ||
508 | |||
509 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | ||
510 | |||
511 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { | ||
512 | ret = xpc_pull_remote_cachelines_sn2(part, | ||
513 | part->remote_openclose_args, | ||
514 | (void *)part-> | ||
515 | remote_openclose_args_pa, | ||
516 | XPC_OPENCLOSE_ARGS_SIZE); | ||
517 | if (ret != xpSuccess) { | ||
518 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
519 | |||
520 | dev_dbg(xpc_chan, "failed to pull openclose args from " | ||
521 | "partition %d, ret=%d\n", XPC_PARTID(part), | ||
522 | ret); | ||
523 | |||
524 | /* don't bother processing IPIs anymore */ | ||
525 | IPI_amo = 0; | ||
526 | } | ||
527 | } | ||
528 | |||
529 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { | ||
530 | ret = xpc_pull_remote_cachelines_sn2(part, part->remote_GPs, | ||
531 | (void *)part->remote_GPs_pa, | ||
532 | XPC_GP_SIZE); | ||
533 | if (ret != xpSuccess) { | ||
534 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
535 | |||
536 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | ||
537 | "%d, ret=%d\n", XPC_PARTID(part), ret); | ||
538 | |||
539 | /* don't bother processing IPIs anymore */ | ||
540 | IPI_amo = 0; | ||
541 | } | ||
542 | } | ||
543 | |||
544 | return IPI_amo; | ||
545 | } | ||
546 | |||
547 | static struct xpc_msg * | ||
548 | xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) | ||
549 | { | ||
550 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
551 | struct xpc_msg *remote_msg, *msg; | ||
552 | u32 msg_index, nmsgs; | ||
553 | u64 msg_offset; | ||
554 | enum xp_retval ret; | ||
555 | |||
556 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { | ||
557 | /* we were interrupted by a signal */ | ||
558 | return NULL; | ||
559 | } | ||
560 | |||
561 | while (get >= ch->next_msg_to_pull) { | ||
562 | |||
563 | /* pull as many messages as are ready and able to be pulled */ | ||
564 | |||
565 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; | ||
566 | |||
567 | DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); | ||
568 | nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; | ||
569 | if (msg_index + nmsgs > ch->remote_nentries) { | ||
570 | /* ignore the ones that wrap the msg queue for now */ | ||
571 | nmsgs = ch->remote_nentries - msg_index; | ||
572 | } | ||
573 | |||
574 | msg_offset = msg_index * ch->msg_size; | ||
575 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | ||
576 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + | ||
577 | msg_offset); | ||
578 | |||
579 | ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg, | ||
580 | nmsgs * ch->msg_size); | ||
581 | if (ret != xpSuccess) { | ||
582 | |||
583 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | ||
584 | " msg %ld from partition %d, channel=%d, " | ||
585 | "ret=%d\n", nmsgs, ch->next_msg_to_pull, | ||
586 | ch->partid, ch->number, ret); | ||
587 | |||
588 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
589 | |||
590 | mutex_unlock(&ch->msg_to_pull_mutex); | ||
591 | return NULL; | ||
592 | } | ||
593 | |||
594 | ch->next_msg_to_pull += nmsgs; | ||
595 | } | ||
596 | |||
597 | mutex_unlock(&ch->msg_to_pull_mutex); | ||
598 | |||
599 | /* return the message we were looking for */ | ||
600 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; | ||
601 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | ||
602 | |||
603 | return msg; | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * Get a message to be delivered. | ||
608 | */ | ||
609 | static struct xpc_msg * | ||
610 | xpc_get_deliverable_msg_sn2(struct xpc_channel *ch) | ||
611 | { | ||
612 | struct xpc_msg *msg = NULL; | ||
613 | s64 get; | ||
614 | |||
615 | do { | ||
616 | if (ch->flags & XPC_C_DISCONNECTING) | ||
617 | break; | ||
618 | |||
619 | get = ch->w_local_GP.get; | ||
620 | rmb(); /* guarantee that .get loads before .put */ | ||
621 | if (get == ch->w_remote_GP.put) | ||
622 | break; | ||
623 | |||
624 | /* There are messages waiting to be pulled and delivered. | ||
625 | * We need to try to secure one for ourselves. We'll do this | ||
626 | * by trying to increment w_local_GP.get and hope that no one | ||
627 | * else beats us to it. If they do, we'll we'll simply have | ||
628 | * to try again for the next one. | ||
629 | */ | ||
630 | |||
631 | if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { | ||
632 | /* we got the entry referenced by get */ | ||
633 | |||
634 | dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " | ||
635 | "partid=%d, channel=%d\n", get + 1, | ||
636 | ch->partid, ch->number); | ||
637 | |||
638 | /* pull the message from the remote partition */ | ||
639 | |||
640 | msg = xpc_pull_remote_msg_sn2(ch, get); | ||
641 | |||
642 | DBUG_ON(msg != NULL && msg->number != get); | ||
643 | DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); | ||
644 | DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); | ||
645 | |||
646 | break; | ||
647 | } | ||
648 | |||
649 | } while (1); | ||
650 | |||
651 | return msg; | ||
652 | } | ||
653 | |||
102 | void | 654 | void |
103 | xpc_init_sn2(void) | 655 | xpc_init_sn2(void) |
104 | { | 656 | { |
105 | xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; | 657 | xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; |
658 | xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; | ||
659 | xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; | ||
660 | xpc_make_first_contact = xpc_make_first_contact_sn2; | ||
661 | xpc_get_IPI_flags = xpc_get_IPI_flags_sn2; | ||
662 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; | ||
106 | } | 663 | } |
107 | 664 | ||
108 | void | 665 | void |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 8327cd4017ec..770f0a8c669e 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -36,10 +36,58 @@ xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) | |||
36 | return xpSuccess; | 36 | return xpSuccess; |
37 | } | 37 | } |
38 | 38 | ||
39 | /* | ||
40 | * Setup the infrastructure necessary to support XPartition Communication | ||
41 | * between the specified remote partition and the local one. | ||
42 | */ | ||
43 | static enum xp_retval | ||
44 | xpc_setup_infrastructure_uv(struct xpc_partition *part) | ||
45 | { | ||
46 | /* >>> this function needs fleshing out */ | ||
47 | return xpUnsupported; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Teardown the infrastructure necessary to support XPartition Communication | ||
52 | * between the specified remote partition and the local one. | ||
53 | */ | ||
54 | static void | ||
55 | xpc_teardown_infrastructure_uv(struct xpc_partition *part) | ||
56 | { | ||
57 | /* >>> this function needs fleshing out */ | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | static enum xp_retval | ||
62 | xpc_make_first_contact_uv(struct xpc_partition *part) | ||
63 | { | ||
64 | /* >>> this function needs fleshing out */ | ||
65 | return xpUnsupported; | ||
66 | } | ||
67 | |||
68 | static u64 | ||
69 | xpc_get_IPI_flags_uv(struct xpc_partition *part) | ||
70 | { | ||
71 | /* >>> this function needs fleshing out */ | ||
72 | return 0UL; | ||
73 | } | ||
74 | |||
75 | static struct xpc_msg * | ||
76 | xpc_get_deliverable_msg_uv(struct xpc_channel *ch) | ||
77 | { | ||
78 | /* >>> this function needs fleshing out */ | ||
79 | return NULL; | ||
80 | } | ||
81 | |||
39 | void | 82 | void |
40 | xpc_init_uv(void) | 83 | xpc_init_uv(void) |
41 | { | 84 | { |
42 | xpc_rsvd_page_init = xpc_rsvd_page_init_uv; | 85 | xpc_rsvd_page_init = xpc_rsvd_page_init_uv; |
86 | xpc_setup_infrastructure = xpc_setup_infrastructure_uv; | ||
87 | xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv; | ||
88 | xpc_make_first_contact = xpc_make_first_contact_uv; | ||
89 | xpc_get_IPI_flags = xpc_get_IPI_flags_uv; | ||
90 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; | ||
43 | } | 91 | } |
44 | 92 | ||
45 | void | 93 | void |