aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc_sn2.c
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-07-30 01:34:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:50 -0400
commit5b8669dfd110a62a74eea525a009342f73987ea0 (patch)
tree04572d8508f450131298b6ec072e97aa9fdba539 /drivers/misc/sgi-xp/xpc_sn2.c
parent83469b5525b4a35be40b17cb41d64118d84d9f80 (diff)
sgi-xp: setup the activate GRU message queue
Setup the activate GRU message queue that is used for partition activation and channel connection on UV systems. Signed-off-by: Dean Nelson <dcn@sgi.com> Cc: Jack Steiner <steiner@sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_sn2.c')
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c387
1 files changed, 154 insertions, 233 deletions
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index d1ccadc0857d..8b4b0653d9e9 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -53,12 +53,19 @@
53 * Buffer used to store a local copy of portions of a remote partition's 53 * Buffer used to store a local copy of portions of a remote partition's
54 * reserved page (either its header and part_nasids mask, or its vars). 54 * reserved page (either its header and part_nasids mask, or its vars).
55 */ 55 */
56static char *xpc_remote_copy_buffer_sn2;
57static void *xpc_remote_copy_buffer_base_sn2; 56static void *xpc_remote_copy_buffer_base_sn2;
57static char *xpc_remote_copy_buffer_sn2;
58 58
59static struct xpc_vars_sn2 *xpc_vars_sn2; 59static struct xpc_vars_sn2 *xpc_vars_sn2;
60static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; 60static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
61 61
62static int
63xpc_setup_partitions_sn_sn2(void)
64{
65 /* nothing needs to be done */
66 return 0;
67}
68
62/* SH_IPI_ACCESS shub register value on startup */ 69/* SH_IPI_ACCESS shub register value on startup */
63static u64 xpc_sh1_IPI_access_sn2; 70static u64 xpc_sh1_IPI_access_sn2;
64static u64 xpc_sh2_IPI_access0_sn2; 71static u64 xpc_sh2_IPI_access0_sn2;
@@ -198,7 +205,12 @@ xpc_init_IRQ_amo_sn2(int index)
198static irqreturn_t 205static irqreturn_t
199xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) 206xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
200{ 207{
201 atomic_inc(&xpc_activate_IRQ_rcvd); 208 unsigned long irq_flags;
209
210 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
211 xpc_activate_IRQ_rcvd++;
212 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
213
202 wake_up_interruptible(&xpc_activate_IRQ_wq); 214 wake_up_interruptible(&xpc_activate_IRQ_wq);
203 return IRQ_HANDLED; 215 return IRQ_HANDLED;
204} 216}
@@ -222,6 +234,7 @@ xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
222static void 234static void
223xpc_send_local_activate_IRQ_sn2(int from_nasid) 235xpc_send_local_activate_IRQ_sn2(int from_nasid)
224{ 236{
237 unsigned long irq_flags;
225 struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa + 238 struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
226 (XPC_ACTIVATE_IRQ_AMOS_SN2 * 239 (XPC_ACTIVATE_IRQ_AMOS_SN2 *
227 sizeof(struct amo))); 240 sizeof(struct amo)));
@@ -230,7 +243,10 @@ xpc_send_local_activate_IRQ_sn2(int from_nasid)
230 FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable), 243 FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable),
231 FETCHOP_OR, BIT_MASK(from_nasid / 2)); 244 FETCHOP_OR, BIT_MASK(from_nasid / 2));
232 245
233 atomic_inc(&xpc_activate_IRQ_rcvd); 246 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
247 xpc_activate_IRQ_rcvd++;
248 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
249
234 wake_up_interruptible(&xpc_activate_IRQ_wq); 250 wake_up_interruptible(&xpc_activate_IRQ_wq);
235} 251}
236 252
@@ -375,7 +391,7 @@ static void
375xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, 391xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
376 unsigned long *irq_flags) 392 unsigned long *irq_flags)
377{ 393{
378 struct xpc_openclose_args *args = ch->local_openclose_args; 394 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
379 395
380 args->reason = ch->reason; 396 args->reason = ch->reason;
381 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); 397 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags);
@@ -390,7 +406,7 @@ xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
390static void 406static void
391xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 407xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
392{ 408{
393 struct xpc_openclose_args *args = ch->local_openclose_args; 409 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
394 410
395 args->msg_size = ch->msg_size; 411 args->msg_size = ch->msg_size;
396 args->local_nentries = ch->local_nentries; 412 args->local_nentries = ch->local_nentries;
@@ -400,11 +416,11 @@ xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
400static void 416static void
401xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 417xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
402{ 418{
403 struct xpc_openclose_args *args = ch->local_openclose_args; 419 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
404 420
405 args->remote_nentries = ch->remote_nentries; 421 args->remote_nentries = ch->remote_nentries;
406 args->local_nentries = ch->local_nentries; 422 args->local_nentries = ch->local_nentries;
407 args->local_msgqueue_pa = xp_pa(ch->local_msgqueue); 423 args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue);
408 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); 424 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
409} 425}
410 426
@@ -420,6 +436,13 @@ xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
420 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); 436 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
421} 437}
422 438
439static void
440xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch,
441 unsigned long msgqueue_pa)
442{
443 ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa;
444}
445
423/* 446/*
424 * This next set of functions are used to keep track of when a partition is 447 * This next set of functions are used to keep track of when a partition is
425 * potentially engaged in accessing memory belonging to another partition. 448 * potentially engaged in accessing memory belonging to another partition.
@@ -489,6 +512,17 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
489 part_sn2->activate_IRQ_phys_cpuid); 512 part_sn2->activate_IRQ_phys_cpuid);
490} 513}
491 514
515static void
516xpc_assume_partition_disengaged_sn2(short partid)
517{
518 struct amo *amo = xpc_vars_sn2->amos_page +
519 XPC_ENGAGED_PARTITIONS_AMO_SN2;
520
521 /* clear bit(s) based on partid mask in our partition's amo */
522 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
523 ~BIT(partid));
524}
525
492static int 526static int
493xpc_partition_engaged_sn2(short partid) 527xpc_partition_engaged_sn2(short partid)
494{ 528{
@@ -510,17 +544,6 @@ xpc_any_partition_engaged_sn2(void)
510 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; 544 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
511} 545}
512 546
513static void
514xpc_assume_partition_disengaged_sn2(short partid)
515{
516 struct amo *amo = xpc_vars_sn2->amos_page +
517 XPC_ENGAGED_PARTITIONS_AMO_SN2;
518
519 /* clear bit(s) based on partid mask in our partition's amo */
520 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
521 ~BIT(partid));
522}
523
524/* original protection values for each node */ 547/* original protection values for each node */
525static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; 548static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
526 549
@@ -595,8 +618,8 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
595} 618}
596 619
597 620
598static enum xp_retval 621static int
599xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) 622xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
600{ 623{
601 struct amo *amos_page; 624 struct amo *amos_page;
602 int i; 625 int i;
@@ -627,7 +650,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
627 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1)); 650 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
628 if (amos_page == NULL) { 651 if (amos_page == NULL) {
629 dev_err(xpc_part, "can't allocate page of amos\n"); 652 dev_err(xpc_part, "can't allocate page of amos\n");
630 return xpNoMemory; 653 return -ENOMEM;
631 } 654 }
632 655
633 /* 656 /*
@@ -639,7 +662,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
639 dev_err(xpc_part, "can't allow amo operations\n"); 662 dev_err(xpc_part, "can't allow amo operations\n");
640 uncached_free_page(__IA64_UNCACHED_OFFSET | 663 uncached_free_page(__IA64_UNCACHED_OFFSET |
641 TO_PHYS((u64)amos_page), 1); 664 TO_PHYS((u64)amos_page), 1);
642 return ret; 665 return -EPERM;
643 } 666 }
644 } 667 }
645 668
@@ -665,7 +688,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
665 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2); 688 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2);
666 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2); 689 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2);
667 690
668 return xpSuccess; 691 return 0;
669} 692}
670 693
671static void 694static void
@@ -1082,10 +1105,19 @@ xpc_identify_activate_IRQ_sender_sn2(void)
1082} 1105}
1083 1106
1084static void 1107static void
1085xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected) 1108xpc_process_activate_IRQ_rcvd_sn2(void)
1086{ 1109{
1110 unsigned long irq_flags;
1111 int n_IRQs_expected;
1087 int n_IRQs_detected; 1112 int n_IRQs_detected;
1088 1113
1114 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
1115
1116 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1117 n_IRQs_expected = xpc_activate_IRQ_rcvd;
1118 xpc_activate_IRQ_rcvd = 0;
1119 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1120
1089 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2(); 1121 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
1090 if (n_IRQs_detected < n_IRQs_expected) { 1122 if (n_IRQs_detected < n_IRQs_expected) {
1091 /* retry once to help avoid missing amo */ 1123 /* retry once to help avoid missing amo */
@@ -1094,116 +1126,63 @@ xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
1094} 1126}
1095 1127
1096/* 1128/*
1097 * Guarantee that the kzalloc'd memory is cacheline aligned. 1129 * Setup the channel structures that are sn2 specific.
1098 */
1099static void *
1100xpc_kzalloc_cacheline_aligned_sn2(size_t size, gfp_t flags, void **base)
1101{
1102 /* see if kzalloc will give us cachline aligned memory by default */
1103 *base = kzalloc(size, flags);
1104 if (*base == NULL)
1105 return NULL;
1106
1107 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
1108 return *base;
1109
1110 kfree(*base);
1111
1112 /* nope, we'll have to do it ourselves */
1113 *base = kzalloc(size + L1_CACHE_BYTES, flags);
1114 if (*base == NULL)
1115 return NULL;
1116
1117 return (void *)L1_CACHE_ALIGN((u64)*base);
1118}
1119
1120/*
1121 * Setup the infrastructure necessary to support XPartition Communication
1122 * between the specified remote partition and the local one.
1123 */ 1130 */
1124static enum xp_retval 1131static enum xp_retval
1125xpc_setup_infrastructure_sn2(struct xpc_partition *part) 1132xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part)
1126{ 1133{
1127 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 1134 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1135 struct xpc_channel_sn2 *ch_sn2;
1128 enum xp_retval retval; 1136 enum xp_retval retval;
1129 int ret; 1137 int ret;
1130 int cpuid; 1138 int cpuid;
1131 int ch_number; 1139 int ch_number;
1132 struct xpc_channel *ch;
1133 struct timer_list *timer; 1140 struct timer_list *timer;
1134 short partid = XPC_PARTID(part); 1141 short partid = XPC_PARTID(part);
1135 1142
1136 /*
1137 * Allocate all of the channel structures as a contiguous chunk of
1138 * memory.
1139 */
1140 DBUG_ON(part->channels != NULL);
1141 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
1142 GFP_KERNEL);
1143 if (part->channels == NULL) {
1144 dev_err(xpc_chan, "can't get memory for channels\n");
1145 return xpNoMemory;
1146 }
1147
1148 /* allocate all the required GET/PUT values */ 1143 /* allocate all the required GET/PUT values */
1149 1144
1150 part_sn2->local_GPs = 1145 part_sn2->local_GPs =
1151 xpc_kzalloc_cacheline_aligned_sn2(XPC_GP_SIZE, GFP_KERNEL, 1146 xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
1152 &part_sn2->local_GPs_base); 1147 &part_sn2->local_GPs_base);
1153 if (part_sn2->local_GPs == NULL) { 1148 if (part_sn2->local_GPs == NULL) {
1154 dev_err(xpc_chan, "can't get memory for local get/put " 1149 dev_err(xpc_chan, "can't get memory for local get/put "
1155 "values\n"); 1150 "values\n");
1156 retval = xpNoMemory; 1151 return xpNoMemory;
1157 goto out_1;
1158 } 1152 }
1159 1153
1160 part_sn2->remote_GPs = 1154 part_sn2->remote_GPs =
1161 xpc_kzalloc_cacheline_aligned_sn2(XPC_GP_SIZE, GFP_KERNEL, 1155 xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
1162 &part_sn2->remote_GPs_base); 1156 &part_sn2->remote_GPs_base);
1163 if (part_sn2->remote_GPs == NULL) { 1157 if (part_sn2->remote_GPs == NULL) {
1164 dev_err(xpc_chan, "can't get memory for remote get/put " 1158 dev_err(xpc_chan, "can't get memory for remote get/put "
1165 "values\n"); 1159 "values\n");
1166 retval = xpNoMemory; 1160 retval = xpNoMemory;
1167 goto out_2; 1161 goto out_1;
1168 } 1162 }
1169 1163
1170 part_sn2->remote_GPs_pa = 0; 1164 part_sn2->remote_GPs_pa = 0;
1171 1165
1172 /* allocate all the required open and close args */ 1166 /* allocate all the required open and close args */
1173 1167
1174 part->local_openclose_args = 1168 part_sn2->local_openclose_args =
1175 xpc_kzalloc_cacheline_aligned_sn2(XPC_OPENCLOSE_ARGS_SIZE, 1169 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
1176 GFP_KERNEL, 1170 GFP_KERNEL, &part_sn2->
1177 &part->local_openclose_args_base); 1171 local_openclose_args_base);
1178 if (part->local_openclose_args == NULL) { 1172 if (part_sn2->local_openclose_args == NULL) {
1179 dev_err(xpc_chan, "can't get memory for local connect args\n"); 1173 dev_err(xpc_chan, "can't get memory for local connect args\n");
1180 retval = xpNoMemory; 1174 retval = xpNoMemory;
1181 goto out_3; 1175 goto out_2;
1182 }
1183
1184 part->remote_openclose_args =
1185 xpc_kzalloc_cacheline_aligned_sn2(XPC_OPENCLOSE_ARGS_SIZE,
1186 GFP_KERNEL,
1187 &part->remote_openclose_args_base);
1188 if (part->remote_openclose_args == NULL) {
1189 dev_err(xpc_chan, "can't get memory for remote connect args\n");
1190 retval = xpNoMemory;
1191 goto out_4;
1192 } 1176 }
1193 1177
1194 part_sn2->remote_openclose_args_pa = 0; 1178 part_sn2->remote_openclose_args_pa = 0;
1195 1179
1196 part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid); 1180 part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
1197 part->chctl.all_flags = 0;
1198 spin_lock_init(&part->chctl_lock);
1199 1181
1200 part_sn2->notify_IRQ_nasid = 0; 1182 part_sn2->notify_IRQ_nasid = 0;
1201 part_sn2->notify_IRQ_phys_cpuid = 0; 1183 part_sn2->notify_IRQ_phys_cpuid = 0;
1202 part_sn2->remote_chctl_amo_va = NULL; 1184 part_sn2->remote_chctl_amo_va = NULL;
1203 1185
1204 atomic_set(&part->channel_mgr_requests, 1);
1205 init_waitqueue_head(&part->channel_mgr_wq);
1206
1207 sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid); 1186 sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
1208 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, 1187 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1209 IRQF_SHARED, part_sn2->notify_IRQ_owner, 1188 IRQF_SHARED, part_sn2->notify_IRQ_owner,
@@ -1212,7 +1191,7 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1212 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 1191 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
1213 "errno=%d\n", -ret); 1192 "errno=%d\n", -ret);
1214 retval = xpLackOfResources; 1193 retval = xpLackOfResources;
1215 goto out_5; 1194 goto out_3;
1216 } 1195 }
1217 1196
1218 /* Setup a timer to check for dropped notify IRQs */ 1197 /* Setup a timer to check for dropped notify IRQs */
@@ -1224,45 +1203,17 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1224 timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; 1203 timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
1225 add_timer(timer); 1204 add_timer(timer);
1226 1205
1227 part->nchannels = XPC_MAX_NCHANNELS;
1228
1229 atomic_set(&part->nchannels_active, 0);
1230 atomic_set(&part->nchannels_engaged, 0);
1231
1232 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1206 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1233 ch = &part->channels[ch_number]; 1207 ch_sn2 = &part->channels[ch_number].sn.sn2;
1234
1235 ch->partid = partid;
1236 ch->number = ch_number;
1237 ch->flags = XPC_C_DISCONNECTED;
1238
1239 ch->sn.sn2.local_GP = &part_sn2->local_GPs[ch_number];
1240 ch->local_openclose_args =
1241 &part->local_openclose_args[ch_number];
1242
1243 atomic_set(&ch->kthreads_assigned, 0);
1244 atomic_set(&ch->kthreads_idle, 0);
1245 atomic_set(&ch->kthreads_active, 0);
1246 1208
1247 atomic_set(&ch->references, 0); 1209 ch_sn2->local_GP = &part_sn2->local_GPs[ch_number];
1248 atomic_set(&ch->n_to_notify, 0); 1210 ch_sn2->local_openclose_args =
1211 &part_sn2->local_openclose_args[ch_number];
1249 1212
1250 spin_lock_init(&ch->lock); 1213 mutex_init(&ch_sn2->msg_to_pull_mutex);
1251 mutex_init(&ch->sn.sn2.msg_to_pull_mutex);
1252 init_completion(&ch->wdisconnect_wait);
1253
1254 atomic_set(&ch->n_on_msg_allocate_wq, 0);
1255 init_waitqueue_head(&ch->msg_allocate_wq);
1256 init_waitqueue_head(&ch->idle_wq);
1257 } 1214 }
1258 1215
1259 /* 1216 /*
1260 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
1261 * we're declaring that this partition is ready to go.
1262 */
1263 part->setup_state = XPC_P_SS_SETUP;
1264
1265 /*
1266 * Setup the per partition specific variables required by the 1217 * Setup the per partition specific variables required by the
1267 * remote partition to establish channel connections with us. 1218 * remote partition to establish channel connections with us.
1268 * 1219 *
@@ -1271,7 +1222,7 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1271 */ 1222 */
1272 xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); 1223 xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
1273 xpc_vars_part_sn2[partid].openclose_args_pa = 1224 xpc_vars_part_sn2[partid].openclose_args_pa =
1274 xp_pa(part->local_openclose_args); 1225 xp_pa(part_sn2->local_openclose_args);
1275 xpc_vars_part_sn2[partid].chctl_amo_pa = 1226 xpc_vars_part_sn2[partid].chctl_amo_pa =
1276 xp_pa(part_sn2->local_chctl_amo_va); 1227 xp_pa(part_sn2->local_chctl_amo_va);
1277 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 1228 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
@@ -1279,80 +1230,48 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1279 xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = 1230 xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
1280 cpu_physical_id(cpuid); 1231 cpu_physical_id(cpuid);
1281 xpc_vars_part_sn2[partid].nchannels = part->nchannels; 1232 xpc_vars_part_sn2[partid].nchannels = part->nchannels;
1282 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1; 1233 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2;
1283 1234
1284 return xpSuccess; 1235 return xpSuccess;
1285 1236
1286 /* setup of infrastructure failed */ 1237 /* setup of ch structures failed */
1287out_5:
1288 kfree(part->remote_openclose_args_base);
1289 part->remote_openclose_args = NULL;
1290out_4:
1291 kfree(part->local_openclose_args_base);
1292 part->local_openclose_args = NULL;
1293out_3: 1238out_3:
1239 kfree(part_sn2->local_openclose_args_base);
1240 part_sn2->local_openclose_args = NULL;
1241out_2:
1294 kfree(part_sn2->remote_GPs_base); 1242 kfree(part_sn2->remote_GPs_base);
1295 part_sn2->remote_GPs = NULL; 1243 part_sn2->remote_GPs = NULL;
1296out_2: 1244out_1:
1297 kfree(part_sn2->local_GPs_base); 1245 kfree(part_sn2->local_GPs_base);
1298 part_sn2->local_GPs = NULL; 1246 part_sn2->local_GPs = NULL;
1299out_1:
1300 kfree(part->channels);
1301 part->channels = NULL;
1302 return retval; 1247 return retval;
1303} 1248}
1304 1249
1305/* 1250/*
1306 * Teardown the infrastructure necessary to support XPartition Communication 1251 * Teardown the channel structures that are sn2 specific.
1307 * between the specified remote partition and the local one.
1308 */ 1252 */
1309static void 1253static void
1310xpc_teardown_infrastructure_sn2(struct xpc_partition *part) 1254xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part)
1311{ 1255{
1312 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 1256 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1313 short partid = XPC_PARTID(part); 1257 short partid = XPC_PARTID(part);
1314 1258
1315 /* 1259 /*
1316 * We start off by making this partition inaccessible to local 1260 * Indicate that the variables specific to the remote partition are no
1317 * processes by marking it as no longer setup. Then we make it 1261 * longer available for its use.
1318 * inaccessible to remote processes by clearing the XPC per partition
1319 * specific variable's magic # (which indicates that these variables
1320 * are no longer valid) and by ignoring all XPC notify IRQs sent to
1321 * this partition.
1322 */ 1262 */
1323
1324 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
1325 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1326 DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
1327 part->setup_state = XPC_P_SS_WTEARDOWN;
1328
1329 xpc_vars_part_sn2[partid].magic = 0; 1263 xpc_vars_part_sn2[partid].magic = 0;
1330 1264
1331 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1332
1333 /*
1334 * Before proceeding with the teardown we have to wait until all
1335 * existing references cease.
1336 */
1337 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1338
1339 /* now we can begin tearing down the infrastructure */
1340
1341 part->setup_state = XPC_P_SS_TORNDOWN;
1342
1343 /* in case we've still got outstanding timers registered... */ 1265 /* in case we've still got outstanding timers registered... */
1344 del_timer_sync(&part_sn2->dropped_notify_IRQ_timer); 1266 del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
1267 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1345 1268
1346 kfree(part->remote_openclose_args_base); 1269 kfree(part_sn2->local_openclose_args_base);
1347 part->remote_openclose_args = NULL; 1270 part_sn2->local_openclose_args = NULL;
1348 kfree(part->local_openclose_args_base);
1349 part->local_openclose_args = NULL;
1350 kfree(part_sn2->remote_GPs_base); 1271 kfree(part_sn2->remote_GPs_base);
1351 part_sn2->remote_GPs = NULL; 1272 part_sn2->remote_GPs = NULL;
1352 kfree(part_sn2->local_GPs_base); 1273 kfree(part_sn2->local_GPs_base);
1353 part_sn2->local_GPs = NULL; 1274 part_sn2->local_GPs = NULL;
1354 kfree(part->channels);
1355 part->channels = NULL;
1356 part_sn2->local_chctl_amo_va = NULL; 1275 part_sn2->local_chctl_amo_va = NULL;
1357} 1276}
1358 1277
@@ -1429,8 +1348,8 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1429 1348
1430 /* see if they've been set up yet */ 1349 /* see if they've been set up yet */
1431 1350
1432 if (pulled_entry->magic != XPC_VP_MAGIC1 && 1351 if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 &&
1433 pulled_entry->magic != XPC_VP_MAGIC2) { 1352 pulled_entry->magic != XPC_VP_MAGIC2_SN2) {
1434 1353
1435 if (pulled_entry->magic != 0) { 1354 if (pulled_entry->magic != 0) {
1436 dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 1355 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -1443,7 +1362,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1443 return xpRetry; 1362 return xpRetry;
1444 } 1363 }
1445 1364
1446 if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1) { 1365 if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) {
1447 1366
1448 /* validate the variables */ 1367 /* validate the variables */
1449 1368
@@ -1473,10 +1392,10 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1473 1392
1474 /* let the other side know that we've pulled their variables */ 1393 /* let the other side know that we've pulled their variables */
1475 1394
1476 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2; 1395 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2;
1477 } 1396 }
1478 1397
1479 if (pulled_entry->magic == XPC_VP_MAGIC1) 1398 if (pulled_entry->magic == XPC_VP_MAGIC1_SN2)
1480 return xpRetry; 1399 return xpRetry;
1481 1400
1482 return xpSuccess; 1401 return xpSuccess;
@@ -1605,6 +1524,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
1605static enum xp_retval 1524static enum xp_retval
1606xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) 1525xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
1607{ 1526{
1527 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1608 unsigned long irq_flags; 1528 unsigned long irq_flags;
1609 int nentries; 1529 int nentries;
1610 size_t nbytes; 1530 size_t nbytes;
@@ -1612,17 +1532,17 @@ xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
1612 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 1532 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1613 1533
1614 nbytes = nentries * ch->msg_size; 1534 nbytes = nentries * ch->msg_size;
1615 ch->local_msgqueue = 1535 ch_sn2->local_msgqueue =
1616 xpc_kzalloc_cacheline_aligned_sn2(nbytes, GFP_KERNEL, 1536 xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL,
1617 &ch->local_msgqueue_base); 1537 &ch_sn2->local_msgqueue_base);
1618 if (ch->local_msgqueue == NULL) 1538 if (ch_sn2->local_msgqueue == NULL)
1619 continue; 1539 continue;
1620 1540
1621 nbytes = nentries * sizeof(struct xpc_notify); 1541 nbytes = nentries * sizeof(struct xpc_notify);
1622 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); 1542 ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL);
1623 if (ch->notify_queue == NULL) { 1543 if (ch_sn2->notify_queue == NULL) {
1624 kfree(ch->local_msgqueue_base); 1544 kfree(ch_sn2->local_msgqueue_base);
1625 ch->local_msgqueue = NULL; 1545 ch_sn2->local_msgqueue = NULL;
1626 continue; 1546 continue;
1627 } 1547 }
1628 1548
@@ -1649,6 +1569,7 @@ xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
1649static enum xp_retval 1569static enum xp_retval
1650xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) 1570xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
1651{ 1571{
1572 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1652 unsigned long irq_flags; 1573 unsigned long irq_flags;
1653 int nentries; 1574 int nentries;
1654 size_t nbytes; 1575 size_t nbytes;
@@ -1658,10 +1579,10 @@ xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
1658 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 1579 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1659 1580
1660 nbytes = nentries * ch->msg_size; 1581 nbytes = nentries * ch->msg_size;
1661 ch->remote_msgqueue = 1582 ch_sn2->remote_msgqueue =
1662 xpc_kzalloc_cacheline_aligned_sn2(nbytes, GFP_KERNEL, 1583 xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->
1663 &ch->remote_msgqueue_base); 1584 remote_msgqueue_base);
1664 if (ch->remote_msgqueue == NULL) 1585 if (ch_sn2->remote_msgqueue == NULL)
1665 continue; 1586 continue;
1666 1587
1667 spin_lock_irqsave(&ch->lock, irq_flags); 1588 spin_lock_irqsave(&ch->lock, irq_flags);
@@ -1687,8 +1608,9 @@ xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
1687 * Note: Assumes all of the channel sizes are filled in. 1608 * Note: Assumes all of the channel sizes are filled in.
1688 */ 1609 */
1689static enum xp_retval 1610static enum xp_retval
1690xpc_allocate_msgqueues_sn2(struct xpc_channel *ch) 1611xpc_setup_msg_structures_sn2(struct xpc_channel *ch)
1691{ 1612{
1613 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1692 enum xp_retval ret; 1614 enum xp_retval ret;
1693 1615
1694 DBUG_ON(ch->flags & XPC_C_SETUP); 1616 DBUG_ON(ch->flags & XPC_C_SETUP);
@@ -1698,10 +1620,10 @@ xpc_allocate_msgqueues_sn2(struct xpc_channel *ch)
1698 1620
1699 ret = xpc_allocate_remote_msgqueue_sn2(ch); 1621 ret = xpc_allocate_remote_msgqueue_sn2(ch);
1700 if (ret != xpSuccess) { 1622 if (ret != xpSuccess) {
1701 kfree(ch->local_msgqueue_base); 1623 kfree(ch_sn2->local_msgqueue_base);
1702 ch->local_msgqueue = NULL; 1624 ch_sn2->local_msgqueue = NULL;
1703 kfree(ch->notify_queue); 1625 kfree(ch_sn2->notify_queue);
1704 ch->notify_queue = NULL; 1626 ch_sn2->notify_queue = NULL;
1705 } 1627 }
1706 } 1628 }
1707 return ret; 1629 return ret;
@@ -1715,21 +1637,13 @@ xpc_allocate_msgqueues_sn2(struct xpc_channel *ch)
1715 * they're cleared when XPC_C_DISCONNECTED is cleared. 1637 * they're cleared when XPC_C_DISCONNECTED is cleared.
1716 */ 1638 */
1717static void 1639static void
1718xpc_free_msgqueues_sn2(struct xpc_channel *ch) 1640xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
1719{ 1641{
1720 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; 1642 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1721 1643
1722 DBUG_ON(!spin_is_locked(&ch->lock)); 1644 DBUG_ON(!spin_is_locked(&ch->lock));
1723 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
1724 1645
1725 ch->remote_msgqueue_pa = 0; 1646 ch_sn2->remote_msgqueue_pa = 0;
1726 ch->func = NULL;
1727 ch->key = NULL;
1728 ch->msg_size = 0;
1729 ch->local_nentries = 0;
1730 ch->remote_nentries = 0;
1731 ch->kthreads_assigned_limit = 0;
1732 ch->kthreads_idle_limit = 0;
1733 1647
1734 ch_sn2->local_GP->get = 0; 1648 ch_sn2->local_GP->get = 0;
1735 ch_sn2->local_GP->put = 0; 1649 ch_sn2->local_GP->put = 0;
@@ -1745,12 +1659,12 @@ xpc_free_msgqueues_sn2(struct xpc_channel *ch)
1745 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", 1659 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
1746 ch->flags, ch->partid, ch->number); 1660 ch->flags, ch->partid, ch->number);
1747 1661
1748 kfree(ch->local_msgqueue_base); 1662 kfree(ch_sn2->local_msgqueue_base);
1749 ch->local_msgqueue = NULL; 1663 ch_sn2->local_msgqueue = NULL;
1750 kfree(ch->remote_msgqueue_base); 1664 kfree(ch_sn2->remote_msgqueue_base);
1751 ch->remote_msgqueue = NULL; 1665 ch_sn2->remote_msgqueue = NULL;
1752 kfree(ch->notify_queue); 1666 kfree(ch_sn2->notify_queue);
1753 ch->notify_queue = NULL; 1667 ch_sn2->notify_queue = NULL;
1754 } 1668 }
1755} 1669}
1756 1670
@@ -1766,7 +1680,7 @@ xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1766 1680
1767 while (++get < put && atomic_read(&ch->n_to_notify) > 0) { 1681 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1768 1682
1769 notify = &ch->notify_queue[get % ch->local_nentries]; 1683 notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries];
1770 1684
1771 /* 1685 /*
1772 * See if the notify entry indicates it was associated with 1686 * See if the notify entry indicates it was associated with
@@ -1818,7 +1732,7 @@ xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1818 1732
1819 get = ch_sn2->w_remote_GP.get; 1733 get = ch_sn2->w_remote_GP.get;
1820 do { 1734 do {
1821 msg = (struct xpc_msg *)((u64)ch->local_msgqueue + 1735 msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue +
1822 (get % ch->local_nentries) * 1736 (get % ch->local_nentries) *
1823 ch->msg_size); 1737 ch->msg_size);
1824 msg->flags = 0; 1738 msg->flags = 0;
@@ -1837,7 +1751,7 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1837 1751
1838 put = ch_sn2->w_remote_GP.put; 1752 put = ch_sn2->w_remote_GP.put;
1839 do { 1753 do {
1840 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + 1754 msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue +
1841 (put % ch->remote_nentries) * 1755 (put % ch->remote_nentries) *
1842 ch->msg_size); 1756 ch->msg_size);
1843 msg->flags = 0; 1757 msg->flags = 0;
@@ -1976,8 +1890,9 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1976 } 1890 }
1977 1891
1978 msg_offset = msg_index * ch->msg_size; 1892 msg_offset = msg_index * ch->msg_size;
1979 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); 1893 msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue +
1980 remote_msg_pa = ch->remote_msgqueue_pa + msg_offset; 1894 msg_offset);
1895 remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset;
1981 1896
1982 ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, 1897 ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
1983 nmsgs * ch->msg_size); 1898 nmsgs * ch->msg_size);
@@ -2001,7 +1916,7 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
2001 1916
2002 /* return the message we were looking for */ 1917 /* return the message we were looking for */
2003 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 1918 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2004 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); 1919 msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + msg_offset);
2005 1920
2006 return msg; 1921 return msg;
2007} 1922}
@@ -2080,7 +1995,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
2080 if (put == ch_sn2->w_local_GP.put) 1995 if (put == ch_sn2->w_local_GP.put)
2081 break; 1996 break;
2082 1997
2083 msg = (struct xpc_msg *)((u64)ch->local_msgqueue + 1998 msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue +
2084 (put % ch->local_nentries) * 1999 (put % ch->local_nentries) *
2085 ch->msg_size); 2000 ch->msg_size);
2086 2001
@@ -2182,7 +2097,7 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
2182 } 2097 }
2183 2098
2184 /* get the message's address and initialize it */ 2099 /* get the message's address and initialize it */
2185 msg = (struct xpc_msg *)((u64)ch->local_msgqueue + 2100 msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue +
2186 (put % ch->local_nentries) * ch->msg_size); 2101 (put % ch->local_nentries) * ch->msg_size);
2187 2102
2188 DBUG_ON(msg->flags != 0); 2103 DBUG_ON(msg->flags != 0);
@@ -2207,6 +2122,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2207 void *key) 2122 void *key)
2208{ 2123{
2209 enum xp_retval ret = xpSuccess; 2124 enum xp_retval ret = xpSuccess;
2125 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2210 struct xpc_msg *msg = msg; 2126 struct xpc_msg *msg = msg;
2211 struct xpc_notify *notify = notify; 2127 struct xpc_notify *notify = notify;
2212 s64 msg_number; 2128 s64 msg_number;
@@ -2243,7 +2159,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2243 2159
2244 atomic_inc(&ch->n_to_notify); 2160 atomic_inc(&ch->n_to_notify);
2245 2161
2246 notify = &ch->notify_queue[msg_number % ch->local_nentries]; 2162 notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries];
2247 notify->func = func; 2163 notify->func = func;
2248 notify->key = key; 2164 notify->key = key;
2249 notify->type = notify_type; 2165 notify->type = notify_type;
@@ -2279,7 +2195,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2279 2195
2280 /* see if the message is next in line to be sent, if so send it */ 2196 /* see if the message is next in line to be sent, if so send it */
2281 2197
2282 put = ch->sn.sn2.local_GP->put; 2198 put = ch_sn2->local_GP->put;
2283 if (put == msg_number) 2199 if (put == msg_number)
2284 xpc_send_msgs_sn2(ch, put); 2200 xpc_send_msgs_sn2(ch, put);
2285 2201
@@ -2307,7 +2223,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2307 if (get == ch_sn2->w_local_GP.get) 2223 if (get == ch_sn2->w_local_GP.get)
2308 break; 2224 break;
2309 2225
2310 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + 2226 msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue +
2311 (get % ch->remote_nentries) * 2227 (get % ch->remote_nentries) *
2312 ch->msg_size); 2228 ch->msg_size);
2313 2229
@@ -2385,8 +2301,9 @@ xpc_init_sn2(void)
2385 int ret; 2301 int ret;
2386 size_t buf_size; 2302 size_t buf_size;
2387 2303
2304 xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2;
2388 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; 2305 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2;
2389 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; 2306 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2;
2390 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; 2307 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
2391 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; 2308 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
2392 xpc_online_heartbeat = xpc_online_heartbeat_sn2; 2309 xpc_online_heartbeat = xpc_online_heartbeat_sn2;
@@ -2403,29 +2320,33 @@ xpc_init_sn2(void)
2403 xpc_cancel_partition_deactivation_request_sn2; 2320 xpc_cancel_partition_deactivation_request_sn2;
2404 2321
2405 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2; 2322 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
2406 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; 2323 xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2;
2407 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; 2324 xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2;
2408 xpc_make_first_contact = xpc_make_first_contact_sn2; 2325 xpc_make_first_contact = xpc_make_first_contact_sn2;
2326
2409 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2; 2327 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2;
2410 xpc_allocate_msgqueues = xpc_allocate_msgqueues_sn2; 2328 xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
2411 xpc_free_msgqueues = xpc_free_msgqueues_sn2; 2329 xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
2330 xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
2331 xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
2332
2333 xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2;
2334
2335 xpc_setup_msg_structures = xpc_setup_msg_structures_sn2;
2336 xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2;
2337
2412 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; 2338 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
2413 xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; 2339 xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2;
2414 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2; 2340 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2;
2415 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; 2341 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2;
2416 2342
2417 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2; 2343 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
2418 xpc_partition_engaged = xpc_partition_engaged_sn2;
2419 xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
2420 xpc_indicate_partition_disengaged = 2344 xpc_indicate_partition_disengaged =
2421 xpc_indicate_partition_disengaged_sn2; 2345 xpc_indicate_partition_disengaged_sn2;
2346 xpc_partition_engaged = xpc_partition_engaged_sn2;
2347 xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
2422 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; 2348 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
2423 2349
2424 xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
2425 xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
2426 xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
2427 xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
2428
2429 xpc_send_msg = xpc_send_msg_sn2; 2350 xpc_send_msg = xpc_send_msg_sn2;
2430 xpc_received_msg = xpc_received_msg_sn2; 2351 xpc_received_msg = xpc_received_msg_sn2;
2431 2352