diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:49 -0400 |
commit | 7fb5e59d63deda89a8eefdbd5b3c8d622076afd4 (patch) | |
tree | 4c78f9e016dd0998e8539a1da358b4ba961db8e9 /drivers/misc/sgi-xp/xpc_sn2.c | |
parent | a47d5dac9d8481766382f8cf1483dd581df38b99 (diff) |
sgi-xp: separate chctl_flags from XPC's notify IRQ
Tie current IPI references to either XPC's notify IRQ or channel control
flags.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_sn2.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_sn2.c | 301 |
1 files changed, 152 insertions, 149 deletions
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 69d74bd56899..0fef7d86a5a2 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -104,20 +104,20 @@ xpc_disallow_IPI_ops_sn2(void) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * The following set of macros and functions are used for the sending and | 107 | * The following set of functions are used for the sending and receiving of |
108 | * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, | 108 | * IRQs (also known as IPIs). There are two flavors of IRQs, one that is |
109 | * one that is associated with partition activity (SGI_XPC_ACTIVATE) and | 109 | * associated with partition activity (SGI_XPC_ACTIVATE) and the other that |
110 | * the other that is associated with channel activity (SGI_XPC_NOTIFY). | 110 | * is associated with channel activity (SGI_XPC_NOTIFY). |
111 | */ | 111 | */ |
112 | 112 | ||
113 | static u64 | 113 | static u64 |
114 | xpc_IPI_receive_sn2(AMO_t *amo) | 114 | xpc_receive_IRQ_amo_sn2(AMO_t *amo) |
115 | { | 115 | { |
116 | return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); | 116 | return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); |
117 | } | 117 | } |
118 | 118 | ||
119 | static enum xp_retval | 119 | static enum xp_retval |
120 | xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | 120 | xpc_send_IRQ_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) |
121 | { | 121 | { |
122 | int ret = 0; | 122 | int ret = 0; |
123 | unsigned long irq_flags; | 123 | unsigned long irq_flags; |
@@ -131,7 +131,7 @@ xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | |||
131 | * We must always use the nofault function regardless of whether we | 131 | * We must always use the nofault function regardless of whether we |
132 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 132 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
133 | * didn't, we'd never know that the other partition is down and would | 133 | * didn't, we'd never know that the other partition is down and would |
134 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 134 | * keep sending IRQs and AMOs to it until the heartbeat times out. |
135 | */ | 135 | */ |
136 | ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), | 136 | ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), |
137 | xp_nofault_PIOR_target)); | 137 | xp_nofault_PIOR_target)); |
@@ -142,16 +142,16 @@ xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | static AMO_t * | 144 | static AMO_t * |
145 | xpc_IPI_init_sn2(int index) | 145 | xpc_init_IRQ_amo_sn2(int index) |
146 | { | 146 | { |
147 | AMO_t *amo = xpc_vars->amos_page + index; | 147 | AMO_t *amo = xpc_vars->amos_page + index; |
148 | 148 | ||
149 | (void)xpc_IPI_receive_sn2(amo); /* clear AMO variable */ | 149 | (void)xpc_receive_IRQ_amo_sn2(amo); /* clear AMO variable */ |
150 | return amo; | 150 | return amo; |
151 | } | 151 | } |
152 | 152 | ||
153 | /* | 153 | /* |
154 | * IPIs associated with SGI_XPC_ACTIVATE IRQ. | 154 | * Functions associated with SGI_XPC_ACTIVATE IRQ. |
155 | */ | 155 | */ |
156 | 156 | ||
157 | /* | 157 | /* |
@@ -166,23 +166,23 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Flag the appropriate AMO variable and send an IPI to the specified node. | 169 | * Flag the appropriate AMO variable and send an IRQ to the specified node. |
170 | */ | 170 | */ |
171 | static void | 171 | static void |
172 | xpc_activate_IRQ_send_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, | 172 | xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, |
173 | int to_phys_cpuid) | 173 | int to_phys_cpuid) |
174 | { | 174 | { |
175 | int w_index = XPC_NASID_W_INDEX(from_nasid); | 175 | int w_index = XPC_NASID_W_INDEX(from_nasid); |
176 | int b_index = XPC_NASID_B_INDEX(from_nasid); | 176 | int b_index = XPC_NASID_B_INDEX(from_nasid); |
177 | AMO_t *amos = (AMO_t *)__va(amos_page_pa + | 177 | AMO_t *amos = (AMO_t *)__va(amos_page_pa + |
178 | (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); | 178 | (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); |
179 | 179 | ||
180 | (void)xpc_IPI_send_sn2(&amos[w_index], (1UL << b_index), to_nasid, | 180 | (void)xpc_send_IRQ_sn2(&amos[w_index], (1UL << b_index), to_nasid, |
181 | to_phys_cpuid, SGI_XPC_ACTIVATE); | 181 | to_phys_cpuid, SGI_XPC_ACTIVATE); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void | 184 | static void |
185 | xpc_activate_IRQ_send_local_sn2(int from_nasid) | 185 | xpc_send_local_activate_IRQ_sn2(int from_nasid) |
186 | { | 186 | { |
187 | int w_index = XPC_NASID_W_INDEX(from_nasid); | 187 | int w_index = XPC_NASID_W_INDEX(from_nasid); |
188 | int b_index = XPC_NASID_B_INDEX(from_nasid); | 188 | int b_index = XPC_NASID_B_INDEX(from_nasid); |
@@ -197,29 +197,29 @@ xpc_activate_IRQ_send_local_sn2(int from_nasid) | |||
197 | } | 197 | } |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * IPIs associated with SGI_XPC_NOTIFY IRQ. | 200 | * Functions associated with SGI_XPC_NOTIFY IRQ. |
201 | */ | 201 | */ |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Check to see if there is any channel activity to/from the specified | 204 | * Check to see if any chctl flags were sent from the specified partition. |
205 | * partition. | ||
206 | */ | 205 | */ |
207 | static void | 206 | static void |
208 | xpc_check_for_channel_activity_sn2(struct xpc_partition *part) | 207 | xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part) |
209 | { | 208 | { |
210 | u64 IPI_amo; | 209 | union xpc_channel_ctl_flags chctl; |
211 | unsigned long irq_flags; | 210 | unsigned long irq_flags; |
212 | 211 | ||
213 | IPI_amo = xpc_IPI_receive_sn2(part->sn.sn2.local_IPI_amo_va); | 212 | chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2. |
214 | if (IPI_amo == 0) | 213 | local_chctl_amo_va); |
214 | if (chctl.all_flags == 0) | ||
215 | return; | 215 | return; |
216 | 216 | ||
217 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | 217 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
218 | part->local_IPI_amo |= IPI_amo; | 218 | part->chctl.all_flags |= chctl.all_flags; |
219 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | 219 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
220 | 220 | ||
221 | dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", | 221 | dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags=" |
222 | XPC_PARTID(part), IPI_amo); | 222 | "0x%lx\n", XPC_PARTID(part), chctl.all_flags); |
223 | 223 | ||
224 | xpc_wakeup_channel_mgr(part); | 224 | xpc_wakeup_channel_mgr(part); |
225 | } | 225 | } |
@@ -228,17 +228,17 @@ xpc_check_for_channel_activity_sn2(struct xpc_partition *part) | |||
228 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified | 228 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified |
229 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more | 229 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more |
230 | * than one partition, we use an AMO_t structure per partition to indicate | 230 | * than one partition, we use an AMO_t structure per partition to indicate |
231 | * whether a partition has sent an IPI or not. If it has, then wake up the | 231 | * whether a partition has sent an IRQ or not. If it has, then wake up the |
232 | * associated kthread to handle it. | 232 | * associated kthread to handle it. |
233 | * | 233 | * |
234 | * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC | 234 | * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC |
235 | * running on other partitions. | 235 | * running on other partitions. |
236 | * | 236 | * |
237 | * Noteworthy Arguments: | 237 | * Noteworthy Arguments: |
238 | * | 238 | * |
239 | * irq - Interrupt ReQuest number. NOT USED. | 239 | * irq - Interrupt ReQuest number. NOT USED. |
240 | * | 240 | * |
241 | * dev_id - partid of IPI's potential sender. | 241 | * dev_id - partid of IRQ's potential sender. |
242 | */ | 242 | */ |
243 | static irqreturn_t | 243 | static irqreturn_t |
244 | xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) | 244 | xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) |
@@ -249,7 +249,7 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) | |||
249 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); | 249 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
250 | 250 | ||
251 | if (xpc_part_ref(part)) { | 251 | if (xpc_part_ref(part)) { |
252 | xpc_check_for_channel_activity_sn2(part); | 252 | xpc_check_for_sent_chctl_flags_sn2(part); |
253 | 253 | ||
254 | xpc_part_deref(part); | 254 | xpc_part_deref(part); |
255 | } | 255 | } |
@@ -257,45 +257,47 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IPIs on the floor | 260 | * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor |
261 | * because the write to their associated IPI amo completed after the IRQ/IPI | 261 | * because the write to their associated amo variable completed after the IRQ |
262 | * was received. | 262 | * was received. |
263 | */ | 263 | */ |
264 | static void | 264 | static void |
265 | xpc_dropped_notify_IRQ_check_sn2(struct xpc_partition *part) | 265 | xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part) |
266 | { | 266 | { |
267 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; | 267 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; |
268 | 268 | ||
269 | if (xpc_part_ref(part)) { | 269 | if (xpc_part_ref(part)) { |
270 | xpc_check_for_channel_activity_sn2(part); | 270 | xpc_check_for_sent_chctl_flags_sn2(part); |
271 | 271 | ||
272 | part_sn2->dropped_notify_IRQ_timer.expires = jiffies + | 272 | part_sn2->dropped_notify_IRQ_timer.expires = jiffies + |
273 | XPC_P_DROPPED_IPI_WAIT_INTERVAL; | 273 | XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; |
274 | add_timer(&part_sn2->dropped_notify_IRQ_timer); | 274 | add_timer(&part_sn2->dropped_notify_IRQ_timer); |
275 | xpc_part_deref(part); | 275 | xpc_part_deref(part); |
276 | } | 276 | } |
277 | } | 277 | } |
278 | 278 | ||
279 | /* | 279 | /* |
280 | * Send an IPI to the remote partition that is associated with the | 280 | * Send a notify IRQ to the remote partition that is associated with the |
281 | * specified channel. | 281 | * specified channel. |
282 | */ | 282 | */ |
283 | static void | 283 | static void |
284 | xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag, | 284 | xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, |
285 | char *ipi_flag_string, unsigned long *irq_flags) | 285 | char *chctl_flag_string, unsigned long *irq_flags) |
286 | { | 286 | { |
287 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 287 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
288 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; | 288 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; |
289 | union xpc_channel_ctl_flags chctl = { 0 }; | ||
289 | enum xp_retval ret; | 290 | enum xp_retval ret; |
290 | 291 | ||
291 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { | 292 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { |
292 | ret = xpc_IPI_send_sn2(part_sn2->remote_IPI_amo_va, | 293 | chctl.flags[ch->number] = chctl_flag; |
293 | (u64)ipi_flag << (ch->number * 8), | 294 | ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va, |
294 | part_sn2->remote_IPI_nasid, | 295 | chctl.all_flags, |
295 | part_sn2->remote_IPI_phys_cpuid, | 296 | part_sn2->notify_IRQ_nasid, |
297 | part_sn2->notify_IRQ_phys_cpuid, | ||
296 | SGI_XPC_NOTIFY); | 298 | SGI_XPC_NOTIFY); |
297 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", | 299 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", |
298 | ipi_flag_string, ch->partid, ch->number, ret); | 300 | chctl_flag_string, ch->partid, ch->number, ret); |
299 | if (unlikely(ret != xpSuccess)) { | 301 | if (unlikely(ret != xpSuccess)) { |
300 | if (irq_flags != NULL) | 302 | if (irq_flags != NULL) |
301 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 303 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
@@ -306,78 +308,78 @@ xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag, | |||
306 | } | 308 | } |
307 | } | 309 | } |
308 | 310 | ||
309 | #define XPC_NOTIFY_IRQ_SEND_SN2(_ch, _ipi_f, _irq_f) \ | 311 | #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \ |
310 | xpc_notify_IRQ_send_sn2(_ch, _ipi_f, #_ipi_f, _irq_f) | 312 | xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f) |
311 | 313 | ||
312 | /* | 314 | /* |
313 | * Make it look like the remote partition, which is associated with the | 315 | * Make it look like the remote partition, which is associated with the |
314 | * specified channel, sent us an IPI. This faked IPI will be handled | 316 | * specified channel, sent us a notify IRQ. This faked IRQ will be handled |
315 | * by xpc_dropped_notify_IRQ_check_sn2(). | 317 | * by xpc_check_for_dropped_notify_IRQ_sn2(). |
316 | */ | 318 | */ |
317 | static void | 319 | static void |
318 | xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag, | 320 | xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, |
319 | char *ipi_flag_string) | 321 | char *chctl_flag_string) |
320 | { | 322 | { |
321 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 323 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
324 | union xpc_channel_ctl_flags chctl = { 0 }; | ||
322 | 325 | ||
323 | FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_IPI_amo_va->variable), | 326 | chctl.flags[ch->number] = chctl_flag; |
324 | FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); | 327 | FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va-> |
328 | variable), FETCHOP_OR, chctl.all_flags); | ||
325 | dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", | 329 | dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", |
326 | ipi_flag_string, ch->partid, ch->number); | 330 | chctl_flag_string, ch->partid, ch->number); |
327 | } | 331 | } |
328 | 332 | ||
329 | #define XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(_ch, _ipi_f) \ | 333 | #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \ |
330 | xpc_notify_IRQ_send_local_sn2(_ch, _ipi_f, #_ipi_f) | 334 | xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f) |
331 | 335 | ||
332 | static void | 336 | static void |
333 | xpc_send_channel_closerequest_sn2(struct xpc_channel *ch, | 337 | xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, |
334 | unsigned long *irq_flags) | 338 | unsigned long *irq_flags) |
335 | { | 339 | { |
336 | struct xpc_openclose_args *args = ch->local_openclose_args; | 340 | struct xpc_openclose_args *args = ch->local_openclose_args; |
337 | 341 | ||
338 | args->reason = ch->reason; | 342 | args->reason = ch->reason; |
339 | XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREQUEST, irq_flags); | 343 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); |
340 | } | 344 | } |
341 | 345 | ||
342 | static void | 346 | static void |
343 | xpc_send_channel_closereply_sn2(struct xpc_channel *ch, | 347 | xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) |
344 | unsigned long *irq_flags) | ||
345 | { | 348 | { |
346 | XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREPLY, irq_flags); | 349 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags); |
347 | } | 350 | } |
348 | 351 | ||
349 | static void | 352 | static void |
350 | xpc_send_channel_openrequest_sn2(struct xpc_channel *ch, | 353 | xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) |
351 | unsigned long *irq_flags) | ||
352 | { | 354 | { |
353 | struct xpc_openclose_args *args = ch->local_openclose_args; | 355 | struct xpc_openclose_args *args = ch->local_openclose_args; |
354 | 356 | ||
355 | args->msg_size = ch->msg_size; | 357 | args->msg_size = ch->msg_size; |
356 | args->local_nentries = ch->local_nentries; | 358 | args->local_nentries = ch->local_nentries; |
357 | XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_OPENREQUEST, irq_flags); | 359 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags); |
358 | } | 360 | } |
359 | 361 | ||
360 | static void | 362 | static void |
361 | xpc_send_channel_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) | 363 | xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) |
362 | { | 364 | { |
363 | struct xpc_openclose_args *args = ch->local_openclose_args; | 365 | struct xpc_openclose_args *args = ch->local_openclose_args; |
364 | 366 | ||
365 | args->remote_nentries = ch->remote_nentries; | 367 | args->remote_nentries = ch->remote_nentries; |
366 | args->local_nentries = ch->local_nentries; | 368 | args->local_nentries = ch->local_nentries; |
367 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); | 369 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); |
368 | XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_OPENREPLY, irq_flags); | 370 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); |
369 | } | 371 | } |
370 | 372 | ||
371 | static void | 373 | static void |
372 | xpc_send_channel_msgrequest_sn2(struct xpc_channel *ch) | 374 | xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) |
373 | { | 375 | { |
374 | XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_MSGREQUEST, NULL); | 376 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); |
375 | } | 377 | } |
376 | 378 | ||
377 | static void | 379 | static void |
378 | xpc_send_channel_local_msgrequest_sn2(struct xpc_channel *ch) | 380 | xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) |
379 | { | 381 | { |
380 | XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(ch, XPC_IPI_MSGREQUEST); | 382 | XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); |
381 | } | 383 | } |
382 | 384 | ||
383 | /* | 385 | /* |
@@ -402,7 +404,7 @@ xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) | |||
402 | * We must always use the nofault function regardless of whether we | 404 | * We must always use the nofault function regardless of whether we |
403 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 405 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
404 | * didn't, we'd never know that the other partition is down and would | 406 | * didn't, we'd never know that the other partition is down and would |
405 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 407 | * keep sending IRQs and AMOs to it until the heartbeat times out. |
406 | */ | 408 | */ |
407 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 409 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
408 | variable), | 410 | variable), |
@@ -429,7 +431,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) | |||
429 | * We must always use the nofault function regardless of whether we | 431 | * We must always use the nofault function regardless of whether we |
430 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 432 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
431 | * didn't, we'd never know that the other partition is down and would | 433 | * didn't, we'd never know that the other partition is down and would |
432 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 434 | * keep sending IRQs and AMOs to it until the heartbeat times out. |
433 | */ | 435 | */ |
434 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 436 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
435 | variable), | 437 | variable), |
@@ -441,7 +443,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) | |||
441 | * Send activate IRQ to get other side to see that we've cleared our | 443 | * Send activate IRQ to get other side to see that we've cleared our |
442 | * bit in their engaged partitions AMO. | 444 | * bit in their engaged partitions AMO. |
443 | */ | 445 | */ |
444 | xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa, | 446 | xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, |
445 | cnodeid_to_nasid(0), | 447 | cnodeid_to_nasid(0), |
446 | part_sn2->activate_IRQ_nasid, | 448 | part_sn2->activate_IRQ_nasid, |
447 | part_sn2->activate_IRQ_phys_cpuid); | 449 | part_sn2->activate_IRQ_phys_cpuid); |
@@ -595,11 +597,11 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | |||
595 | 597 | ||
596 | /* initialize the activate IRQ related AMO variables */ | 598 | /* initialize the activate IRQ related AMO variables */ |
597 | for (i = 0; i < xp_nasid_mask_words; i++) | 599 | for (i = 0; i < xp_nasid_mask_words; i++) |
598 | (void)xpc_IPI_init_sn2(XPC_ACTIVATE_IRQ_AMOS + i); | 600 | (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS + i); |
599 | 601 | ||
600 | /* initialize the engaged remote partitions related AMO variables */ | 602 | /* initialize the engaged remote partitions related AMO variables */ |
601 | (void)xpc_IPI_init_sn2(XPC_ENGAGED_PARTITIONS_AMO); | 603 | (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO); |
602 | (void)xpc_IPI_init_sn2(XPC_DEACTIVATE_REQUEST_AMO); | 604 | (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO); |
603 | 605 | ||
604 | return xpSuccess; | 606 | return xpSuccess; |
605 | } | 607 | } |
@@ -729,13 +731,13 @@ static void | |||
729 | xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, | 731 | xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, |
730 | u64 remote_rp_pa, int nasid) | 732 | u64 remote_rp_pa, int nasid) |
731 | { | 733 | { |
732 | xpc_activate_IRQ_send_local_sn2(nasid); | 734 | xpc_send_local_activate_IRQ_sn2(nasid); |
733 | } | 735 | } |
734 | 736 | ||
735 | static void | 737 | static void |
736 | xpc_request_partition_reactivation_sn2(struct xpc_partition *part) | 738 | xpc_request_partition_reactivation_sn2(struct xpc_partition *part) |
737 | { | 739 | { |
738 | xpc_activate_IRQ_send_local_sn2(part->sn.sn2.activate_IRQ_nasid); | 740 | xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid); |
739 | } | 741 | } |
740 | 742 | ||
741 | static void | 743 | static void |
@@ -755,7 +757,7 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part) | |||
755 | * We must always use the nofault function regardless of whether we | 757 | * We must always use the nofault function regardless of whether we |
756 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 758 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
757 | * didn't, we'd never know that the other partition is down and would | 759 | * didn't, we'd never know that the other partition is down and would |
758 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 760 | * keep sending IRQs and AMOs to it until the heartbeat times out. |
759 | */ | 761 | */ |
760 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 762 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
761 | variable), | 763 | variable), |
@@ -767,7 +769,7 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part) | |||
767 | * Send activate IRQ to get other side to see that we've set our | 769 | * Send activate IRQ to get other side to see that we've set our |
768 | * bit in their deactivate request AMO. | 770 | * bit in their deactivate request AMO. |
769 | */ | 771 | */ |
770 | xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa, | 772 | xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, |
771 | cnodeid_to_nasid(0), | 773 | cnodeid_to_nasid(0), |
772 | part_sn2->activate_IRQ_nasid, | 774 | part_sn2->activate_IRQ_nasid, |
773 | part_sn2->activate_IRQ_phys_cpuid); | 775 | part_sn2->activate_IRQ_phys_cpuid); |
@@ -789,7 +791,7 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) | |||
789 | * We must always use the nofault function regardless of whether we | 791 | * We must always use the nofault function regardless of whether we |
790 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 792 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
791 | * didn't, we'd never know that the other partition is down and would | 793 | * didn't, we'd never know that the other partition is down and would |
792 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 794 | * keep sending IRQs and AMOs to it until the heartbeat times out. |
793 | */ | 795 | */ |
794 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 796 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
795 | variable), | 797 | variable), |
@@ -861,11 +863,11 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, | |||
861 | } | 863 | } |
862 | 864 | ||
863 | /* | 865 | /* |
864 | * Prior code has determined the nasid which generated an IPI. Inspect | 866 | * Prior code has determined the nasid which generated a activate IRQ. |
865 | * that nasid to determine if its partition needs to be activated or | 867 | * Inspect that nasid to determine if its partition needs to be activated |
866 | * deactivated. | 868 | * or deactivated. |
867 | * | 869 | * |
868 | * A partition is consider "awaiting activation" if our partition | 870 | * A partition is considered "awaiting activation" if our partition |
869 | * flags indicate it is not active and it has a heartbeat. A | 871 | * flags indicate it is not active and it has a heartbeat. A |
870 | * partition is considered "awaiting deactivation" if our partition | 872 | * partition is considered "awaiting deactivation" if our partition |
871 | * flags indicate it is active but it has no heartbeat or it is not | 873 | * flags indicate it is active but it has no heartbeat or it is not |
@@ -997,7 +999,7 @@ xpc_identify_activate_IRQ_sender_sn2(void) | |||
997 | if (xpc_exiting) | 999 | if (xpc_exiting) |
998 | break; | 1000 | break; |
999 | 1001 | ||
1000 | nasid_mask = xpc_IPI_receive_sn2(&act_amos[word]); | 1002 | nasid_mask = xpc_receive_IRQ_amo_sn2(&act_amos[word]); |
1001 | if (nasid_mask == 0) { | 1003 | if (nasid_mask == 0) { |
1002 | /* no IRQs from nasids in this variable */ | 1004 | /* no IRQs from nasids in this variable */ |
1003 | continue; | 1005 | continue; |
@@ -1117,20 +1119,20 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) | |||
1117 | 1119 | ||
1118 | part_sn2->remote_openclose_args_pa = 0; | 1120 | part_sn2->remote_openclose_args_pa = 0; |
1119 | 1121 | ||
1120 | part_sn2->local_IPI_amo_va = xpc_IPI_init_sn2(partid); | 1122 | part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid); |
1121 | part->local_IPI_amo = 0; | 1123 | part->chctl.all_flags = 0; |
1122 | spin_lock_init(&part->IPI_lock); | 1124 | spin_lock_init(&part->chctl_lock); |
1123 | 1125 | ||
1124 | part_sn2->remote_IPI_nasid = 0; | 1126 | part_sn2->notify_IRQ_nasid = 0; |
1125 | part_sn2->remote_IPI_phys_cpuid = 0; | 1127 | part_sn2->notify_IRQ_phys_cpuid = 0; |
1126 | part_sn2->remote_IPI_amo_va = NULL; | 1128 | part_sn2->remote_chctl_amo_va = NULL; |
1127 | 1129 | ||
1128 | atomic_set(&part->channel_mgr_requests, 1); | 1130 | atomic_set(&part->channel_mgr_requests, 1); |
1129 | init_waitqueue_head(&part->channel_mgr_wq); | 1131 | init_waitqueue_head(&part->channel_mgr_wq); |
1130 | 1132 | ||
1131 | sprintf(part_sn2->IPI_owner, "xpc%02d", partid); | 1133 | sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid); |
1132 | ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, | 1134 | ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, |
1133 | IRQF_SHARED, part_sn2->IPI_owner, | 1135 | IRQF_SHARED, part_sn2->notify_IRQ_owner, |
1134 | (void *)(u64)partid); | 1136 | (void *)(u64)partid); |
1135 | if (ret != 0) { | 1137 | if (ret != 0) { |
1136 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " | 1138 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " |
@@ -1139,13 +1141,13 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) | |||
1139 | goto out_5; | 1141 | goto out_5; |
1140 | } | 1142 | } |
1141 | 1143 | ||
1142 | /* Setup a timer to check for dropped IPIs */ | 1144 | /* Setup a timer to check for dropped notify IRQs */ |
1143 | timer = &part_sn2->dropped_notify_IRQ_timer; | 1145 | timer = &part_sn2->dropped_notify_IRQ_timer; |
1144 | init_timer(timer); | 1146 | init_timer(timer); |
1145 | timer->function = | 1147 | timer->function = |
1146 | (void (*)(unsigned long))xpc_dropped_notify_IRQ_check_sn2; | 1148 | (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2; |
1147 | timer->data = (unsigned long)part; | 1149 | timer->data = (unsigned long)part; |
1148 | timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL; | 1150 | timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; |
1149 | add_timer(timer); | 1151 | add_timer(timer); |
1150 | 1152 | ||
1151 | part->nchannels = XPC_MAX_NCHANNELS; | 1153 | part->nchannels = XPC_MAX_NCHANNELS; |
@@ -1196,10 +1198,10 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) | |||
1196 | xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs); | 1198 | xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs); |
1197 | xpc_vars_part[partid].openclose_args_pa = | 1199 | xpc_vars_part[partid].openclose_args_pa = |
1198 | __pa(part->local_openclose_args); | 1200 | __pa(part->local_openclose_args); |
1199 | xpc_vars_part[partid].IPI_amo_pa = __pa(part_sn2->local_IPI_amo_va); | 1201 | xpc_vars_part[partid].chctl_amo_pa = __pa(part_sn2->local_chctl_amo_va); |
1200 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ | 1202 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ |
1201 | xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); | 1203 | xpc_vars_part[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); |
1202 | xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); | 1204 | xpc_vars_part[partid].notify_IRQ_phys_cpuid = cpu_physical_id(cpuid); |
1203 | xpc_vars_part[partid].nchannels = part->nchannels; | 1205 | xpc_vars_part[partid].nchannels = part->nchannels; |
1204 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | 1206 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; |
1205 | 1207 | ||
@@ -1239,7 +1241,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part) | |||
1239 | * processes by marking it as no longer setup. Then we make it | 1241 | * processes by marking it as no longer setup. Then we make it |
1240 | * inaccessible to remote processes by clearing the XPC per partition | 1242 | * inaccessible to remote processes by clearing the XPC per partition |
1241 | * specific variable's magic # (which indicates that these variables | 1243 | * specific variable's magic # (which indicates that these variables |
1242 | * are no longer valid) and by ignoring all XPC notify IPIs sent to | 1244 | * are no longer valid) and by ignoring all XPC notify IRQs sent to |
1243 | * this partition. | 1245 | * this partition. |
1244 | */ | 1246 | */ |
1245 | 1247 | ||
@@ -1275,7 +1277,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part) | |||
1275 | part_sn2->local_GPs = NULL; | 1277 | part_sn2->local_GPs = NULL; |
1276 | kfree(part->channels); | 1278 | kfree(part->channels); |
1277 | part->channels = NULL; | 1279 | part->channels = NULL; |
1278 | part_sn2->local_IPI_amo_va = NULL; | 1280 | part_sn2->local_chctl_amo_va = NULL; |
1279 | } | 1281 | } |
1280 | 1282 | ||
1281 | /* | 1283 | /* |
@@ -1370,7 +1372,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) | |||
1370 | 1372 | ||
1371 | if (pulled_entry->GPs_pa == 0 || | 1373 | if (pulled_entry->GPs_pa == 0 || |
1372 | pulled_entry->openclose_args_pa == 0 || | 1374 | pulled_entry->openclose_args_pa == 0 || |
1373 | pulled_entry->IPI_amo_pa == 0) { | 1375 | pulled_entry->chctl_amo_pa == 0) { |
1374 | 1376 | ||
1375 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | 1377 | dev_err(xpc_chan, "partition %d's XPC vars_part for " |
1376 | "partition %d are not valid\n", partid, | 1378 | "partition %d are not valid\n", partid, |
@@ -1383,10 +1385,11 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) | |||
1383 | part_sn2->remote_GPs_pa = pulled_entry->GPs_pa; | 1385 | part_sn2->remote_GPs_pa = pulled_entry->GPs_pa; |
1384 | part_sn2->remote_openclose_args_pa = | 1386 | part_sn2->remote_openclose_args_pa = |
1385 | pulled_entry->openclose_args_pa; | 1387 | pulled_entry->openclose_args_pa; |
1386 | part_sn2->remote_IPI_amo_va = | 1388 | part_sn2->remote_chctl_amo_va = |
1387 | (AMO_t *)__va(pulled_entry->IPI_amo_pa); | 1389 | (AMO_t *)__va(pulled_entry->chctl_amo_pa); |
1388 | part_sn2->remote_IPI_nasid = pulled_entry->IPI_nasid; | 1390 | part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid; |
1389 | part_sn2->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; | 1391 | part_sn2->notify_IRQ_phys_cpuid = |
1392 | pulled_entry->notify_IRQ_phys_cpuid; | ||
1390 | 1393 | ||
1391 | if (part->nchannels > pulled_entry->nchannels) | 1394 | if (part->nchannels > pulled_entry->nchannels) |
1392 | part->nchannels = pulled_entry->nchannels; | 1395 | part->nchannels = pulled_entry->nchannels; |
@@ -1437,7 +1440,7 @@ xpc_make_first_contact_sn2(struct xpc_partition *part) | |||
1437 | * Send activate IRQ to get other side to activate if they've not | 1440 | * Send activate IRQ to get other side to activate if they've not |
1438 | * already begun to do so. | 1441 | * already begun to do so. |
1439 | */ | 1442 | */ |
1440 | xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa, | 1443 | xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, |
1441 | cnodeid_to_nasid(0), | 1444 | cnodeid_to_nasid(0), |
1442 | part_sn2->activate_IRQ_nasid, | 1445 | part_sn2->activate_IRQ_nasid, |
1443 | part_sn2->activate_IRQ_phys_cpuid); | 1446 | part_sn2->activate_IRQ_phys_cpuid); |
@@ -1462,28 +1465,28 @@ xpc_make_first_contact_sn2(struct xpc_partition *part) | |||
1462 | } | 1465 | } |
1463 | 1466 | ||
1464 | /* | 1467 | /* |
1465 | * Get the IPI flags and pull the openclose args and/or remote GPs as needed. | 1468 | * Get the chctl flags and pull the openclose args and/or remote GPs as needed. |
1466 | */ | 1469 | */ |
1467 | static u64 | 1470 | static u64 |
1468 | xpc_get_IPI_flags_sn2(struct xpc_partition *part) | 1471 | xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) |
1469 | { | 1472 | { |
1470 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; | 1473 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; |
1471 | unsigned long irq_flags; | 1474 | unsigned long irq_flags; |
1472 | u64 IPI_amo; | 1475 | union xpc_channel_ctl_flags chctl; |
1473 | enum xp_retval ret; | 1476 | enum xp_retval ret; |
1474 | 1477 | ||
1475 | /* | 1478 | /* |
1476 | * See if there are any IPI flags to be handled. | 1479 | * See if there are any chctl flags to be handled. |
1477 | */ | 1480 | */ |
1478 | 1481 | ||
1479 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | 1482 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
1480 | IPI_amo = part->local_IPI_amo; | 1483 | chctl = part->chctl; |
1481 | if (IPI_amo != 0) | 1484 | if (chctl.all_flags != 0) |
1482 | part->local_IPI_amo = 0; | 1485 | part->chctl.all_flags = 0; |
1483 | 1486 | ||
1484 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | 1487 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
1485 | 1488 | ||
1486 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { | 1489 | if (xpc_any_openclose_chctl_flags_set(&chctl)) { |
1487 | ret = xpc_pull_remote_cachelines_sn2(part, part-> | 1490 | ret = xpc_pull_remote_cachelines_sn2(part, part-> |
1488 | remote_openclose_args, | 1491 | remote_openclose_args, |
1489 | (void *)part_sn2-> | 1492 | (void *)part_sn2-> |
@@ -1496,12 +1499,12 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part) | |||
1496 | "partition %d, ret=%d\n", XPC_PARTID(part), | 1499 | "partition %d, ret=%d\n", XPC_PARTID(part), |
1497 | ret); | 1500 | ret); |
1498 | 1501 | ||
1499 | /* don't bother processing IPIs anymore */ | 1502 | /* don't bother processing chctl flags anymore */ |
1500 | IPI_amo = 0; | 1503 | chctl.all_flags = 0; |
1501 | } | 1504 | } |
1502 | } | 1505 | } |
1503 | 1506 | ||
1504 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { | 1507 | if (xpc_any_msg_chctl_flags_set(&chctl)) { |
1505 | ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, | 1508 | ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, |
1506 | (void *)part_sn2->remote_GPs_pa, | 1509 | (void *)part_sn2->remote_GPs_pa, |
1507 | XPC_GP_SIZE); | 1510 | XPC_GP_SIZE); |
@@ -1511,12 +1514,12 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part) | |||
1511 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | 1514 | dev_dbg(xpc_chan, "failed to pull GPs from partition " |
1512 | "%d, ret=%d\n", XPC_PARTID(part), ret); | 1515 | "%d, ret=%d\n", XPC_PARTID(part), ret); |
1513 | 1516 | ||
1514 | /* don't bother processing IPIs anymore */ | 1517 | /* don't bother processing chctl flags anymore */ |
1515 | IPI_amo = 0; | 1518 | chctl.all_flags = 0; |
1516 | } | 1519 | } |
1517 | } | 1520 | } |
1518 | 1521 | ||
1519 | return IPI_amo; | 1522 | return chctl.all_flags; |
1520 | } | 1523 | } |
1521 | 1524 | ||
1522 | /* | 1525 | /* |
@@ -1610,7 +1613,7 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) | |||
1610 | } | 1613 | } |
1611 | 1614 | ||
1612 | static void | 1615 | static void |
1613 | xpc_process_msg_IPI_sn2(struct xpc_partition *part, int ch_number) | 1616 | xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) |
1614 | { | 1617 | { |
1615 | struct xpc_channel *ch = &part->channels[ch_number]; | 1618 | struct xpc_channel *ch = &part->channels[ch_number]; |
1616 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; | 1619 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; |
@@ -1827,8 +1830,8 @@ xpc_get_deliverable_msg_sn2(struct xpc_channel *ch) | |||
1827 | 1830 | ||
1828 | /* | 1831 | /* |
1829 | * Now we actually send the messages that are ready to be sent by advancing | 1832 | * Now we actually send the messages that are ready to be sent by advancing |
1830 | * the local message queue's Put value and then send an IPI to the recipient | 1833 | * the local message queue's Put value and then send a chctl msgrequest to the |
1831 | * partition. | 1834 | * recipient partition. |
1832 | */ | 1835 | */ |
1833 | static void | 1836 | static void |
1834 | xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) | 1837 | xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) |
@@ -1836,7 +1839,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) | |||
1836 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; | 1839 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; |
1837 | struct xpc_msg *msg; | 1840 | struct xpc_msg *msg; |
1838 | s64 put = initial_put + 1; | 1841 | s64 put = initial_put + 1; |
1839 | int send_IPI = 0; | 1842 | int send_msgrequest = 0; |
1840 | 1843 | ||
1841 | while (1) { | 1844 | while (1) { |
1842 | 1845 | ||
@@ -1871,7 +1874,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) | |||
1871 | dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " | 1874 | dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " |
1872 | "channel=%d\n", put, ch->partid, ch->number); | 1875 | "channel=%d\n", put, ch->partid, ch->number); |
1873 | 1876 | ||
1874 | send_IPI = 1; | 1877 | send_msgrequest = 1; |
1875 | 1878 | ||
1876 | /* | 1879 | /* |
1877 | * We need to ensure that the message referenced by | 1880 | * We need to ensure that the message referenced by |
@@ -1881,8 +1884,8 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) | |||
1881 | initial_put = put; | 1884 | initial_put = put; |
1882 | } | 1885 | } |
1883 | 1886 | ||
1884 | if (send_IPI) | 1887 | if (send_msgrequest) |
1885 | xpc_send_channel_msgrequest_sn2(ch); | 1888 | xpc_send_chctl_msgrequest_sn2(ch); |
1886 | } | 1889 | } |
1887 | 1890 | ||
1888 | /* | 1891 | /* |
@@ -1929,13 +1932,13 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, | |||
1929 | * There aren't any available msg entries at this time. | 1932 | * There aren't any available msg entries at this time. |
1930 | * | 1933 | * |
1931 | * In waiting for a message entry to become available, | 1934 | * In waiting for a message entry to become available, |
1932 | * we set a timeout in case the other side is not | 1935 | * we set a timeout in case the other side is not sending |
1933 | * sending completion IPIs. This lets us fake an IPI | 1936 | * completion interrupts. This lets us fake a notify IRQ |
1934 | * that will cause the IPI handler to fetch the latest | 1937 | * that will cause the notify IRQ handler to fetch the latest |
1935 | * GP values as if an IPI was sent by the other side. | 1938 | * GP values as if an interrupt was sent by the other side. |
1936 | */ | 1939 | */ |
1937 | if (ret == xpTimeout) | 1940 | if (ret == xpTimeout) |
1938 | xpc_send_channel_local_msgrequest_sn2(ch); | 1941 | xpc_send_chctl_local_msgrequest_sn2(ch); |
1939 | 1942 | ||
1940 | if (flags & XPC_NOWAIT) | 1943 | if (flags & XPC_NOWAIT) |
1941 | return xpNoWait; | 1944 | return xpNoWait; |
@@ -1962,8 +1965,8 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, | |||
1962 | 1965 | ||
1963 | /* | 1966 | /* |
1964 | * Common code that does the actual sending of the message by advancing the | 1967 | * Common code that does the actual sending of the message by advancing the |
1965 | * local message queue's Put value and sends an IPI to the partition the | 1968 | * local message queue's Put value and sends a chctl msgrequest to the |
1966 | * message is being sent to. | 1969 | * partition the message is being sent to. |
1967 | */ | 1970 | */ |
1968 | static enum xp_retval | 1971 | static enum xp_retval |
1969 | xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, | 1972 | xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, |
@@ -2055,7 +2058,7 @@ out_1: | |||
2055 | /* | 2058 | /* |
2056 | * Now we actually acknowledge the messages that have been delivered and ack'd | 2059 | * Now we actually acknowledge the messages that have been delivered and ack'd |
2057 | * by advancing the cached remote message queue's Get value and if requested | 2060 | * by advancing the cached remote message queue's Get value and if requested |
2058 | * send an IPI to the message sender's partition. | 2061 | * send a chctl msgrequest to the message sender's partition. |
2059 | */ | 2062 | */ |
2060 | static void | 2063 | static void |
2061 | xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | 2064 | xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) |
@@ -2063,7 +2066,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2063 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; | 2066 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; |
2064 | struct xpc_msg *msg; | 2067 | struct xpc_msg *msg; |
2065 | s64 get = initial_get + 1; | 2068 | s64 get = initial_get + 1; |
2066 | int send_IPI = 0; | 2069 | int send_msgrequest = 0; |
2067 | 2070 | ||
2068 | while (1) { | 2071 | while (1) { |
2069 | 2072 | ||
@@ -2099,7 +2102,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2099 | dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " | 2102 | dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " |
2100 | "channel=%d\n", get, ch->partid, ch->number); | 2103 | "channel=%d\n", get, ch->partid, ch->number); |
2101 | 2104 | ||
2102 | send_IPI = (msg_flags & XPC_M_INTERRUPT); | 2105 | send_msgrequest = (msg_flags & XPC_M_INTERRUPT); |
2103 | 2106 | ||
2104 | /* | 2107 | /* |
2105 | * We need to ensure that the message referenced by | 2108 | * We need to ensure that the message referenced by |
@@ -2109,8 +2112,8 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2109 | initial_get = get; | 2112 | initial_get = get; |
2110 | } | 2113 | } |
2111 | 2114 | ||
2112 | if (send_IPI) | 2115 | if (send_msgrequest) |
2113 | xpc_send_channel_msgrequest_sn2(ch); | 2116 | xpc_send_chctl_msgrequest_sn2(ch); |
2114 | } | 2117 | } |
2115 | 2118 | ||
2116 | static void | 2119 | static void |
@@ -2168,9 +2171,9 @@ xpc_init_sn2(void) | |||
2168 | xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; | 2171 | xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; |
2169 | xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; | 2172 | xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; |
2170 | xpc_make_first_contact = xpc_make_first_contact_sn2; | 2173 | xpc_make_first_contact = xpc_make_first_contact_sn2; |
2171 | xpc_get_IPI_flags = xpc_get_IPI_flags_sn2; | 2174 | xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2; |
2172 | xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; | 2175 | xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; |
2173 | xpc_process_msg_IPI = xpc_process_msg_IPI_sn2; | 2176 | xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; |
2174 | xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2; | 2177 | xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2; |
2175 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; | 2178 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; |
2176 | 2179 | ||
@@ -2181,10 +2184,10 @@ xpc_init_sn2(void) | |||
2181 | xpc_indicate_partition_disengaged_sn2; | 2184 | xpc_indicate_partition_disengaged_sn2; |
2182 | xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; | 2185 | xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; |
2183 | 2186 | ||
2184 | xpc_send_channel_closerequest = xpc_send_channel_closerequest_sn2; | 2187 | xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2; |
2185 | xpc_send_channel_closereply = xpc_send_channel_closereply_sn2; | 2188 | xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2; |
2186 | xpc_send_channel_openrequest = xpc_send_channel_openrequest_sn2; | 2189 | xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2; |
2187 | xpc_send_channel_openreply = xpc_send_channel_openreply_sn2; | 2190 | xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2; |
2188 | 2191 | ||
2189 | xpc_send_msg = xpc_send_msg_sn2; | 2192 | xpc_send_msg = xpc_send_msg_sn2; |
2190 | xpc_received_msg = xpc_received_msg_sn2; | 2193 | xpc_received_msg = xpc_received_msg_sn2; |