diff options
author | Robin Holt <holt@sgi.com> | 2009-04-13 17:40:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-13 18:04:33 -0400 |
commit | a7665b0a380585fbd70a2275f3120c6086e0c92d (patch) | |
tree | 01465e3d7a8bc917084e9a47789f3508c38307cf /drivers/misc/sgi-xp/xpc_main.c | |
parent | efdd06ed181a88a11e612238c1ac04668e665395 (diff) |
sgi-xpc: clean up numerous globals
Introduce xpc_arch_ops and eliminate numerous individual global definitions.
Signed-off-by: Robin Holt <holt@sgi.com>
Cc: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 130 |
1 files changed, 38 insertions, 92 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 2bb070e17222..fd3688a3e23f 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -169,68 +169,7 @@ static struct notifier_block xpc_die_notifier = { | |||
169 | .notifier_call = xpc_system_die, | 169 | .notifier_call = xpc_system_die, |
170 | }; | 170 | }; |
171 | 171 | ||
172 | int (*xpc_setup_partitions_sn) (void); | 172 | struct xpc_arch_operations xpc_arch_ops; |
173 | void (*xpc_teardown_partitions_sn) (void); | ||
174 | enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, | ||
175 | unsigned long *rp_pa, | ||
176 | size_t *len); | ||
177 | int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp); | ||
178 | |||
179 | void (*xpc_allow_hb) (short partid); | ||
180 | void (*xpc_disallow_hb) (short partid); | ||
181 | void (*xpc_disallow_all_hbs) (void); | ||
182 | void (*xpc_heartbeat_init) (void); | ||
183 | void (*xpc_heartbeat_exit) (void); | ||
184 | void (*xpc_increment_heartbeat) (void); | ||
185 | void (*xpc_offline_heartbeat) (void); | ||
186 | void (*xpc_online_heartbeat) (void); | ||
187 | enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part); | ||
188 | |||
189 | enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); | ||
190 | void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); | ||
191 | u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); | ||
192 | enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); | ||
193 | void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); | ||
194 | void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); | ||
195 | int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch); | ||
196 | void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch); | ||
197 | |||
198 | void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, | ||
199 | unsigned long remote_rp_pa, | ||
200 | int nasid); | ||
201 | void (*xpc_request_partition_reactivation) (struct xpc_partition *part); | ||
202 | void (*xpc_request_partition_deactivation) (struct xpc_partition *part); | ||
203 | void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); | ||
204 | |||
205 | void (*xpc_process_activate_IRQ_rcvd) (void); | ||
206 | enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part); | ||
207 | void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part); | ||
208 | |||
209 | void (*xpc_indicate_partition_engaged) (struct xpc_partition *part); | ||
210 | int (*xpc_partition_engaged) (short partid); | ||
211 | int (*xpc_any_partition_engaged) (void); | ||
212 | void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); | ||
213 | void (*xpc_assume_partition_disengaged) (short partid); | ||
214 | |||
215 | void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch, | ||
216 | unsigned long *irq_flags); | ||
217 | void (*xpc_send_chctl_closereply) (struct xpc_channel *ch, | ||
218 | unsigned long *irq_flags); | ||
219 | void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, | ||
220 | unsigned long *irq_flags); | ||
221 | void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, | ||
222 | unsigned long *irq_flags); | ||
223 | void (*xpc_send_chctl_opencomplete) (struct xpc_channel *ch, | ||
224 | unsigned long *irq_flags); | ||
225 | |||
226 | enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, | ||
227 | unsigned long msgqueue_pa); | ||
228 | |||
229 | enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, | ||
230 | void *payload, u16 payload_size, | ||
231 | u8 notify_type, xpc_notify_func func, | ||
232 | void *key); | ||
233 | void (*xpc_received_payload) (struct xpc_channel *ch, void *payload); | ||
234 | 173 | ||
235 | /* | 174 | /* |
236 | * Timer function to enforce the timelimit on the partition disengage. | 175 | * Timer function to enforce the timelimit on the partition disengage. |
@@ -245,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data) | |||
245 | (void)xpc_partition_disengaged(part); | 184 | (void)xpc_partition_disengaged(part); |
246 | 185 | ||
247 | DBUG_ON(part->disengage_timeout != 0); | 186 | DBUG_ON(part->disengage_timeout != 0); |
248 | DBUG_ON(xpc_partition_engaged(XPC_PARTID(part))); | 187 | DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); |
249 | } | 188 | } |
250 | 189 | ||
251 | /* | 190 | /* |
@@ -256,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data) | |||
256 | static void | 195 | static void |
257 | xpc_hb_beater(unsigned long dummy) | 196 | xpc_hb_beater(unsigned long dummy) |
258 | { | 197 | { |
259 | xpc_increment_heartbeat(); | 198 | xpc_arch_ops.increment_heartbeat(); |
260 | 199 | ||
261 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) | 200 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
262 | wake_up_interruptible(&xpc_activate_IRQ_wq); | 201 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
@@ -268,7 +207,7 @@ xpc_hb_beater(unsigned long dummy) | |||
268 | static void | 207 | static void |
269 | xpc_start_hb_beater(void) | 208 | xpc_start_hb_beater(void) |
270 | { | 209 | { |
271 | xpc_heartbeat_init(); | 210 | xpc_arch_ops.heartbeat_init(); |
272 | init_timer(&xpc_hb_timer); | 211 | init_timer(&xpc_hb_timer); |
273 | xpc_hb_timer.function = xpc_hb_beater; | 212 | xpc_hb_timer.function = xpc_hb_beater; |
274 | xpc_hb_beater(0); | 213 | xpc_hb_beater(0); |
@@ -278,7 +217,7 @@ static void | |||
278 | xpc_stop_hb_beater(void) | 217 | xpc_stop_hb_beater(void) |
279 | { | 218 | { |
280 | del_timer_sync(&xpc_hb_timer); | 219 | del_timer_sync(&xpc_hb_timer); |
281 | xpc_heartbeat_exit(); | 220 | xpc_arch_ops.heartbeat_exit(); |
282 | } | 221 | } |
283 | 222 | ||
284 | /* | 223 | /* |
@@ -307,7 +246,7 @@ xpc_check_remote_hb(void) | |||
307 | continue; | 246 | continue; |
308 | } | 247 | } |
309 | 248 | ||
310 | ret = xpc_get_remote_heartbeat(part); | 249 | ret = xpc_arch_ops.get_remote_heartbeat(part); |
311 | if (ret != xpSuccess) | 250 | if (ret != xpSuccess) |
312 | XPC_DEACTIVATE_PARTITION(part, ret); | 251 | XPC_DEACTIVATE_PARTITION(part, ret); |
313 | } | 252 | } |
@@ -358,7 +297,7 @@ xpc_hb_checker(void *ignore) | |||
358 | force_IRQ = 0; | 297 | force_IRQ = 0; |
359 | dev_dbg(xpc_part, "processing activate IRQs " | 298 | dev_dbg(xpc_part, "processing activate IRQs " |
360 | "received\n"); | 299 | "received\n"); |
361 | xpc_process_activate_IRQ_rcvd(); | 300 | xpc_arch_ops.process_activate_IRQ_rcvd(); |
362 | } | 301 | } |
363 | 302 | ||
364 | /* wait for IRQ or timeout */ | 303 | /* wait for IRQ or timeout */ |
@@ -533,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part) | |||
533 | init_waitqueue_head(&ch->idle_wq); | 472 | init_waitqueue_head(&ch->idle_wq); |
534 | } | 473 | } |
535 | 474 | ||
536 | ret = xpc_setup_ch_structures_sn(part); | 475 | ret = xpc_arch_ops.setup_ch_structures(part); |
537 | if (ret != xpSuccess) | 476 | if (ret != xpSuccess) |
538 | goto out_2; | 477 | goto out_2; |
539 | 478 | ||
@@ -577,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part) | |||
577 | 516 | ||
578 | /* now we can begin tearing down the infrastructure */ | 517 | /* now we can begin tearing down the infrastructure */ |
579 | 518 | ||
580 | xpc_teardown_ch_structures_sn(part); | 519 | xpc_arch_ops.teardown_ch_structures(part); |
581 | 520 | ||
582 | kfree(part->remote_openclose_args_base); | 521 | kfree(part->remote_openclose_args_base); |
583 | part->remote_openclose_args = NULL; | 522 | part->remote_openclose_args = NULL; |
@@ -625,12 +564,12 @@ xpc_activating(void *__partid) | |||
625 | 564 | ||
626 | dev_dbg(xpc_part, "activating partition %d\n", partid); | 565 | dev_dbg(xpc_part, "activating partition %d\n", partid); |
627 | 566 | ||
628 | xpc_allow_hb(partid); | 567 | xpc_arch_ops.allow_hb(partid); |
629 | 568 | ||
630 | if (xpc_setup_ch_structures(part) == xpSuccess) { | 569 | if (xpc_setup_ch_structures(part) == xpSuccess) { |
631 | (void)xpc_part_ref(part); /* this will always succeed */ | 570 | (void)xpc_part_ref(part); /* this will always succeed */ |
632 | 571 | ||
633 | if (xpc_make_first_contact(part) == xpSuccess) { | 572 | if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { |
634 | xpc_mark_partition_active(part); | 573 | xpc_mark_partition_active(part); |
635 | xpc_channel_mgr(part); | 574 | xpc_channel_mgr(part); |
636 | /* won't return until partition is deactivating */ | 575 | /* won't return until partition is deactivating */ |
@@ -640,12 +579,12 @@ xpc_activating(void *__partid) | |||
640 | xpc_teardown_ch_structures(part); | 579 | xpc_teardown_ch_structures(part); |
641 | } | 580 | } |
642 | 581 | ||
643 | xpc_disallow_hb(partid); | 582 | xpc_arch_ops.disallow_hb(partid); |
644 | xpc_mark_partition_inactive(part); | 583 | xpc_mark_partition_inactive(part); |
645 | 584 | ||
646 | if (part->reason == xpReactivating) { | 585 | if (part->reason == xpReactivating) { |
647 | /* interrupting ourselves results in activating partition */ | 586 | /* interrupting ourselves results in activating partition */ |
648 | xpc_request_partition_reactivation(part); | 587 | xpc_arch_ops.request_partition_reactivation(part); |
649 | } | 588 | } |
650 | 589 | ||
651 | return 0; | 590 | return 0; |
@@ -718,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |||
718 | static void | 657 | static void |
719 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | 658 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) |
720 | { | 659 | { |
660 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = | ||
661 | xpc_arch_ops.n_of_deliverable_payloads; | ||
662 | |||
721 | do { | 663 | do { |
722 | /* deliver messages to their intended recipients */ | 664 | /* deliver messages to their intended recipients */ |
723 | 665 | ||
724 | while (xpc_n_of_deliverable_payloads(ch) > 0 && | 666 | while (n_of_deliverable_payloads(ch) > 0 && |
725 | !(ch->flags & XPC_C_DISCONNECTING)) { | 667 | !(ch->flags & XPC_C_DISCONNECTING)) { |
726 | xpc_deliver_payload(ch); | 668 | xpc_deliver_payload(ch); |
727 | } | 669 | } |
@@ -737,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |||
737 | "wait_event_interruptible_exclusive()\n"); | 679 | "wait_event_interruptible_exclusive()\n"); |
738 | 680 | ||
739 | (void)wait_event_interruptible_exclusive(ch->idle_wq, | 681 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
740 | (xpc_n_of_deliverable_payloads(ch) > 0 || | 682 | (n_of_deliverable_payloads(ch) > 0 || |
741 | (ch->flags & XPC_C_DISCONNECTING))); | 683 | (ch->flags & XPC_C_DISCONNECTING))); |
742 | 684 | ||
743 | atomic_dec(&ch->kthreads_idle); | 685 | atomic_dec(&ch->kthreads_idle); |
@@ -754,6 +696,8 @@ xpc_kthread_start(void *args) | |||
754 | struct xpc_channel *ch; | 696 | struct xpc_channel *ch; |
755 | int n_needed; | 697 | int n_needed; |
756 | unsigned long irq_flags; | 698 | unsigned long irq_flags; |
699 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = | ||
700 | xpc_arch_ops.n_of_deliverable_payloads; | ||
757 | 701 | ||
758 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", | 702 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
759 | partid, ch_number); | 703 | partid, ch_number); |
@@ -782,7 +726,7 @@ xpc_kthread_start(void *args) | |||
782 | * additional kthreads to help deliver them. We only | 726 | * additional kthreads to help deliver them. We only |
783 | * need one less than total #of messages to deliver. | 727 | * need one less than total #of messages to deliver. |
784 | */ | 728 | */ |
785 | n_needed = xpc_n_of_deliverable_payloads(ch) - 1; | 729 | n_needed = n_of_deliverable_payloads(ch) - 1; |
786 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) | 730 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
787 | xpc_activate_kthreads(ch, n_needed); | 731 | xpc_activate_kthreads(ch, n_needed); |
788 | 732 | ||
@@ -810,7 +754,7 @@ xpc_kthread_start(void *args) | |||
810 | 754 | ||
811 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 755 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
812 | atomic_dec_return(&part->nchannels_engaged) == 0) { | 756 | atomic_dec_return(&part->nchannels_engaged) == 0) { |
813 | xpc_indicate_partition_disengaged(part); | 757 | xpc_arch_ops.indicate_partition_disengaged(part); |
814 | } | 758 | } |
815 | 759 | ||
816 | xpc_msgqueue_deref(ch); | 760 | xpc_msgqueue_deref(ch); |
@@ -842,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
842 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | 786 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
843 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 787 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
844 | struct task_struct *kthread; | 788 | struct task_struct *kthread; |
789 | void (*indicate_partition_disengaged) (struct xpc_partition *) = | ||
790 | xpc_arch_ops.indicate_partition_disengaged; | ||
845 | 791 | ||
846 | while (needed-- > 0) { | 792 | while (needed-- > 0) { |
847 | 793 | ||
@@ -863,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
863 | 809 | ||
864 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && | 810 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && |
865 | atomic_inc_return(&part->nchannels_engaged) == 1) { | 811 | atomic_inc_return(&part->nchannels_engaged) == 1) { |
866 | xpc_indicate_partition_engaged(part); | 812 | xpc_arch_ops.indicate_partition_engaged(part); |
867 | } | 813 | } |
868 | (void)xpc_part_ref(part); | 814 | (void)xpc_part_ref(part); |
869 | xpc_msgqueue_ref(ch); | 815 | xpc_msgqueue_ref(ch); |
@@ -885,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
885 | 831 | ||
886 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 832 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
887 | atomic_dec_return(&part->nchannels_engaged) == 0) { | 833 | atomic_dec_return(&part->nchannels_engaged) == 0) { |
888 | xpc_indicate_partition_disengaged(part); | 834 | indicate_partition_disengaged(part); |
889 | } | 835 | } |
890 | xpc_msgqueue_deref(ch); | 836 | xpc_msgqueue_deref(ch); |
891 | xpc_part_deref(part); | 837 | xpc_part_deref(part); |
@@ -998,13 +944,13 @@ xpc_setup_partitions(void) | |||
998 | atomic_set(&part->references, 0); | 944 | atomic_set(&part->references, 0); |
999 | } | 945 | } |
1000 | 946 | ||
1001 | return xpc_setup_partitions_sn(); | 947 | return xpc_arch_ops.setup_partitions(); |
1002 | } | 948 | } |
1003 | 949 | ||
1004 | static void | 950 | static void |
1005 | xpc_teardown_partitions(void) | 951 | xpc_teardown_partitions(void) |
1006 | { | 952 | { |
1007 | xpc_teardown_partitions_sn(); | 953 | xpc_arch_ops.teardown_partitions(); |
1008 | kfree(xpc_partitions); | 954 | kfree(xpc_partitions); |
1009 | } | 955 | } |
1010 | 956 | ||
@@ -1060,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason) | |||
1060 | disengage_timeout = part->disengage_timeout; | 1006 | disengage_timeout = part->disengage_timeout; |
1061 | } | 1007 | } |
1062 | 1008 | ||
1063 | if (xpc_any_partition_engaged()) { | 1009 | if (xpc_arch_ops.any_partition_engaged()) { |
1064 | if (time_is_before_jiffies(printmsg_time)) { | 1010 | if (time_is_before_jiffies(printmsg_time)) { |
1065 | dev_info(xpc_part, "waiting for remote " | 1011 | dev_info(xpc_part, "waiting for remote " |
1066 | "partitions to deactivate, timeout in " | 1012 | "partitions to deactivate, timeout in " |
@@ -1091,7 +1037,7 @@ xpc_do_exit(enum xp_retval reason) | |||
1091 | 1037 | ||
1092 | } while (1); | 1038 | } while (1); |
1093 | 1039 | ||
1094 | DBUG_ON(xpc_any_partition_engaged()); | 1040 | DBUG_ON(xpc_arch_ops.any_partition_engaged()); |
1095 | 1041 | ||
1096 | xpc_teardown_rsvd_page(); | 1042 | xpc_teardown_rsvd_page(); |
1097 | 1043 | ||
@@ -1156,15 +1102,15 @@ xpc_die_deactivate(void) | |||
1156 | /* keep xpc_hb_checker thread from doing anything (just in case) */ | 1102 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1157 | xpc_exiting = 1; | 1103 | xpc_exiting = 1; |
1158 | 1104 | ||
1159 | xpc_disallow_all_hbs(); /*indicate we're deactivated */ | 1105 | xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ |
1160 | 1106 | ||
1161 | for (partid = 0; partid < xp_max_npartitions; partid++) { | 1107 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
1162 | part = &xpc_partitions[partid]; | 1108 | part = &xpc_partitions[partid]; |
1163 | 1109 | ||
1164 | if (xpc_partition_engaged(partid) || | 1110 | if (xpc_arch_ops.partition_engaged(partid) || |
1165 | part->act_state != XPC_P_AS_INACTIVE) { | 1111 | part->act_state != XPC_P_AS_INACTIVE) { |
1166 | xpc_request_partition_deactivation(part); | 1112 | xpc_arch_ops.request_partition_deactivation(part); |
1167 | xpc_indicate_partition_disengaged(part); | 1113 | xpc_arch_ops.indicate_partition_disengaged(part); |
1168 | } | 1114 | } |
1169 | } | 1115 | } |
1170 | 1116 | ||
@@ -1181,7 +1127,7 @@ xpc_die_deactivate(void) | |||
1181 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; | 1127 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; |
1182 | 1128 | ||
1183 | while (1) { | 1129 | while (1) { |
1184 | any_engaged = xpc_any_partition_engaged(); | 1130 | any_engaged = xpc_arch_ops.any_partition_engaged(); |
1185 | if (!any_engaged) { | 1131 | if (!any_engaged) { |
1186 | dev_info(xpc_part, "all partitions have deactivated\n"); | 1132 | dev_info(xpc_part, "all partitions have deactivated\n"); |
1187 | break; | 1133 | break; |
@@ -1190,7 +1136,7 @@ xpc_die_deactivate(void) | |||
1190 | if (!keep_waiting--) { | 1136 | if (!keep_waiting--) { |
1191 | for (partid = 0; partid < xp_max_npartitions; | 1137 | for (partid = 0; partid < xp_max_npartitions; |
1192 | partid++) { | 1138 | partid++) { |
1193 | if (xpc_partition_engaged(partid)) { | 1139 | if (xpc_arch_ops.partition_engaged(partid)) { |
1194 | dev_info(xpc_part, "deactivate from " | 1140 | dev_info(xpc_part, "deactivate from " |
1195 | "remote partition %d timed " | 1141 | "remote partition %d timed " |
1196 | "out\n", partid); | 1142 | "out\n", partid); |
@@ -1237,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1237 | /* fall through */ | 1183 | /* fall through */ |
1238 | case DIE_MCA_MONARCH_ENTER: | 1184 | case DIE_MCA_MONARCH_ENTER: |
1239 | case DIE_INIT_MONARCH_ENTER: | 1185 | case DIE_INIT_MONARCH_ENTER: |
1240 | xpc_offline_heartbeat(); | 1186 | xpc_arch_ops.offline_heartbeat(); |
1241 | break; | 1187 | break; |
1242 | 1188 | ||
1243 | case DIE_KDEBUG_LEAVE: | 1189 | case DIE_KDEBUG_LEAVE: |
@@ -1248,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1248 | /* fall through */ | 1194 | /* fall through */ |
1249 | case DIE_MCA_MONARCH_LEAVE: | 1195 | case DIE_MCA_MONARCH_LEAVE: |
1250 | case DIE_INIT_MONARCH_LEAVE: | 1196 | case DIE_INIT_MONARCH_LEAVE: |
1251 | xpc_online_heartbeat(); | 1197 | xpc_arch_ops.online_heartbeat(); |
1252 | break; | 1198 | break; |
1253 | } | 1199 | } |
1254 | #else | 1200 | #else |