aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel/xpc.h
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2005-09-01 15:01:37 -0400
committerTony Luck <tony.luck@intel.com>2005-09-06 19:15:38 -0400
commita607c38971fd078865fa9bef39e6c1d4435680c8 (patch)
treecb7853f0d74ee6a9cd92ccc721096b57367d0390 /arch/ia64/sn/kernel/xpc.h
parent4706df3d3c42af802597d82c8b1542c3d52eab23 (diff)
[IA64-SGI] get XPC to cleanly disengage from remote memory references
When XPC is being shutdown (i.e., rmmod, reboot) it doesn't ensure that other partitions with whom it was connected have completely disengaged from any attempt at cross-partition memory references. This can lead to MCAs in any of these other partitions when the partition is reset. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/sn/kernel/xpc.h')
-rw-r--r--arch/ia64/sn/kernel/xpc.h288
1 files changed, 265 insertions, 23 deletions
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
index d0ee635daf2e..565822ab3d08 100644
--- a/arch/ia64/sn/kernel/xpc.h
+++ b/arch/ia64/sn/kernel/xpc.h
@@ -57,7 +57,7 @@
57#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) 57#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
58 58
59#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ 59#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
60#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */ 60#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
61 61
62/* define the process name of HB checker and the CPU it is pinned to */ 62/* define the process name of HB checker and the CPU it is pinned to */
63#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" 63#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
@@ -67,11 +67,6 @@
67#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" 67#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
68 68
69 69
70#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p)))
71#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p))
72#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p)))
73
74
75/* 70/*
76 * Reserved Page provided by SAL. 71 * Reserved Page provided by SAL.
77 * 72 *
@@ -88,14 +83,38 @@ struct xpc_rsvd_page {
88 u8 version; 83 u8 version;
89 u8 pad[6]; /* pad to u64 align */ 84 u8 pad[6]; /* pad to u64 align */
90 volatile u64 vars_pa; 85 volatile u64 vars_pa;
86 struct timespec stamp; /* time when reserved page was initialized */
91 u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; 87 u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
92 u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; 88 u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
93}; 89};
94#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */
95 90
96#define XPC_RSVD_PAGE_ALIGNED_SIZE \ 91#define XPC_RSVD_PAGE_ALIGNED_SIZE \
97 (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) 92 (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)))
98 93
94#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
95
96#define XPC_SUPPORTS_RP_STAMP(_version) \
97 (_version >= _XPC_VERSION(1,1))
98
99/*
100 * compare stamps - the return value is:
101 *
102 * < 0, if stamp1 < stamp2
103 * = 0, if stamp1 == stamp2
104 * > 0, if stamp1 > stamp2
105 */
106static inline int
107xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
108{
109 int ret;
110
111
112 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
113 ret = stamp1->tv_nsec - stamp2->tv_nsec;
114 }
115 return ret;
116}
117
99 118
100/* 119/*
101 * Define the structures by which XPC variables can be exported to other 120 * Define the structures by which XPC variables can be exported to other
@@ -121,12 +140,61 @@ struct xpc_vars {
121 u64 vars_part_pa; 140 u64 vars_part_pa;
122 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 141 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
123 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 142 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
124 AMO_t *act_amos; /* pointer to the first activation AMO */
125}; 143};
126#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */
127 144
128#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) 145#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars)))
129 146
147#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
148
149#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
150 (_version >= _XPC_VERSION(3,1))
151
152
153static inline int
154xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
155{
156 return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
157}
158
159static inline void
160xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
161{
162 u64 old_mask, new_mask;
163
164 do {
165 old_mask = vars->heartbeating_to_mask;
166 new_mask = (old_mask | (1UL << partid));
167 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
168 old_mask);
169}
170
171static inline void
172xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
173{
174 u64 old_mask, new_mask;
175
176 do {
177 old_mask = vars->heartbeating_to_mask;
178 new_mask = (old_mask & ~(1UL << partid));
179 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
180 old_mask);
181}
182
183
184/*
185 * The AMOs page consists of a number of AMO variables which are divided into
186 * four groups, The first two groups are used to identify an IRQ's sender.
187 * These two groups consist of 64 and 16 AMO variables respectively. The last
188 * two groups, consisting of just one AMO variable each, are used to identify
189 * the remote partitions that are currently engaged (from the viewpoint of
190 * the XPC running on the remote partition).
191 */
192#define XPC_NOTIFY_IRQ_AMOS 0
193#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
194#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
195#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
196
197
130/* 198/*
131 * The following structure describes the per partition specific variables. 199 * The following structure describes the per partition specific variables.
132 * 200 *
@@ -358,7 +426,7 @@ struct xpc_channel {
358 void *key; /* pointer to user's key */ 426 void *key; /* pointer to user's key */
359 427
360 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ 428 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
361 struct semaphore teardown_sema; /* wait for teardown completion */ 429 struct semaphore wdisconnect_sema; /* wait for channel disconnect */
362 430
363 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 431 struct xpc_openclose_args *local_openclose_args; /* args passed on */
364 /* opening or closing of channel */ 432 /* opening or closing of channel */
@@ -410,6 +478,7 @@ struct xpc_channel {
410 478
411#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ 479#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */
412#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ 480#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */
481#define XPC_C_WDISCONNECT 0x00008000 /* waiting for channel disconnect */
413 482
414 483
415 484
@@ -422,6 +491,8 @@ struct xpc_partition {
422 491
423 /* XPC HB infrastructure */ 492 /* XPC HB infrastructure */
424 493
494 u8 remote_rp_version; /* version# of partition's rsvd pg */
495 struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
425 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 496 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
426 u64 remote_vars_pa; /* phys addr of partition's vars */ 497 u64 remote_vars_pa; /* phys addr of partition's vars */
427 u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 498 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
@@ -432,10 +503,14 @@ struct xpc_partition {
432 u32 act_IRQ_rcvd; /* IRQs since activation */ 503 u32 act_IRQ_rcvd; /* IRQs since activation */
433 spinlock_t act_lock; /* protect updating of act_state */ 504 spinlock_t act_lock; /* protect updating of act_state */
434 u8 act_state; /* from XPC HB viewpoint */ 505 u8 act_state; /* from XPC HB viewpoint */
506 u8 remote_vars_version; /* version# of partition's vars */
435 enum xpc_retval reason; /* reason partition is deactivating */ 507 enum xpc_retval reason; /* reason partition is deactivating */
436 int reason_line; /* line# deactivation initiated from */ 508 int reason_line; /* line# deactivation initiated from */
437 int reactivate_nasid; /* nasid in partition to reactivate */ 509 int reactivate_nasid; /* nasid in partition to reactivate */
438 510
511 unsigned long disengage_request_timeout; /* timeout in XPC_TICKS */
512 struct timer_list disengage_request_timer;
513
439 514
440 /* XPC infrastructure referencing and teardown control */ 515 /* XPC infrastructure referencing and teardown control */
441 516
@@ -454,6 +529,7 @@ struct xpc_partition {
454 529
455 u8 nchannels; /* #of defined channels supported */ 530 u8 nchannels; /* #of defined channels supported */
456 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 531 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
532 atomic_t nchannels_engaged;/* #of channels engaged with remote part */
457 struct xpc_channel *channels;/* array of channel structures */ 533 struct xpc_channel *channels;/* array of channel structures */
458 534
459 void *local_GPs_base; /* base address of kmalloc'd space */ 535 void *local_GPs_base; /* base address of kmalloc'd space */
@@ -518,6 +594,7 @@ struct xpc_partition {
518#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 594#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
519 595
520 596
597
521/* 598/*
522 * struct xpc_partition IPI_timer #of seconds to wait before checking for 599 * struct xpc_partition IPI_timer #of seconds to wait before checking for
523 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 600 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -526,6 +603,13 @@ struct xpc_partition {
526#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 603#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
527 604
528 605
606/* number of seconds to wait for other partitions to disengage */
607#define XPC_DISENGAGE_REQUEST_TIMELIMIT 90
608
609/* interval in seconds to print 'waiting disengagement' messages */
610#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
611
612
529#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) 613#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
530 614
531 615
@@ -550,8 +634,6 @@ extern void xpc_activate_partition(struct xpc_partition *);
550 634
551/* found in xpc_partition.c */ 635/* found in xpc_partition.c */
552extern int xpc_exiting; 636extern int xpc_exiting;
553extern int xpc_hb_interval;
554extern int xpc_hb_check_interval;
555extern struct xpc_vars *xpc_vars; 637extern struct xpc_vars *xpc_vars;
556extern struct xpc_rsvd_page *xpc_rsvd_page; 638extern struct xpc_rsvd_page *xpc_rsvd_page;
557extern struct xpc_vars_part *xpc_vars_part; 639extern struct xpc_vars_part *xpc_vars_part;
@@ -561,6 +643,7 @@ extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
561extern void xpc_allow_IPI_ops(void); 643extern void xpc_allow_IPI_ops(void);
562extern void xpc_restrict_IPI_ops(void); 644extern void xpc_restrict_IPI_ops(void);
563extern int xpc_identify_act_IRQ_sender(void); 645extern int xpc_identify_act_IRQ_sender(void);
646extern int xpc_partition_disengaged(struct xpc_partition *);
564extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); 647extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
565extern void xpc_mark_partition_inactive(struct xpc_partition *); 648extern void xpc_mark_partition_inactive(struct xpc_partition *);
566extern void xpc_discovery(void); 649extern void xpc_discovery(void);
@@ -585,8 +668,8 @@ extern void xpc_connected_callout(struct xpc_channel *);
585extern void xpc_deliver_msg(struct xpc_channel *); 668extern void xpc_deliver_msg(struct xpc_channel *);
586extern void xpc_disconnect_channel(const int, struct xpc_channel *, 669extern void xpc_disconnect_channel(const int, struct xpc_channel *,
587 enum xpc_retval, unsigned long *); 670 enum xpc_retval, unsigned long *);
588extern void xpc_disconnected_callout(struct xpc_channel *); 671extern void xpc_disconnecting_callout(struct xpc_channel *);
589extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval); 672extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
590extern void xpc_teardown_infrastructure(struct xpc_partition *); 673extern void xpc_teardown_infrastructure(struct xpc_partition *);
591 674
592 675
@@ -674,6 +757,157 @@ xpc_part_ref(struct xpc_partition *part)
674 757
675 758
676/* 759/*
760 * This next set of inlines are used to keep track of when a partition is
761 * potentially engaged in accessing memory belonging to another partition.
762 */
763
764static inline void
765xpc_mark_partition_engaged(struct xpc_partition *part)
766{
767 unsigned long irq_flags;
768 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
769 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
770
771
772 local_irq_save(irq_flags);
773
774 /* set bit corresponding to our partid in remote partition's AMO */
775 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
776 (1UL << sn_partition_id));
777 /*
778 * We must always use the nofault function regardless of whether we
779 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
780 * didn't, we'd never know that the other partition is down and would
781 * keep sending IPIs and AMOs to it until the heartbeat times out.
782 */
783 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
784 variable), xp_nofault_PIOR_target));
785
786 local_irq_restore(irq_flags);
787}
788
789static inline void
790xpc_mark_partition_disengaged(struct xpc_partition *part)
791{
792 unsigned long irq_flags;
793 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
794 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
795
796
797 local_irq_save(irq_flags);
798
799 /* clear bit corresponding to our partid in remote partition's AMO */
800 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
801 ~(1UL << sn_partition_id));
802 /*
803 * We must always use the nofault function regardless of whether we
804 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
805 * didn't, we'd never know that the other partition is down and would
806 * keep sending IPIs and AMOs to it until the heartbeat times out.
807 */
808 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
809 variable), xp_nofault_PIOR_target));
810
811 local_irq_restore(irq_flags);
812}
813
814static inline void
815xpc_request_partition_disengage(struct xpc_partition *part)
816{
817 unsigned long irq_flags;
818 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
819 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
820
821
822 local_irq_save(irq_flags);
823
824 /* set bit corresponding to our partid in remote partition's AMO */
825 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
826 (1UL << sn_partition_id));
827 /*
828 * We must always use the nofault function regardless of whether we
829 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
830 * didn't, we'd never know that the other partition is down and would
831 * keep sending IPIs and AMOs to it until the heartbeat times out.
832 */
833 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
834 variable), xp_nofault_PIOR_target));
835
836 local_irq_restore(irq_flags);
837}
838
839static inline void
840xpc_cancel_partition_disengage_request(struct xpc_partition *part)
841{
842 unsigned long irq_flags;
843 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
844 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
845
846
847 local_irq_save(irq_flags);
848
849 /* clear bit corresponding to our partid in remote partition's AMO */
850 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
851 ~(1UL << sn_partition_id));
852 /*
853 * We must always use the nofault function regardless of whether we
854 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
855 * didn't, we'd never know that the other partition is down and would
856 * keep sending IPIs and AMOs to it until the heartbeat times out.
857 */
858 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
859 variable), xp_nofault_PIOR_target));
860
861 local_irq_restore(irq_flags);
862}
863
864static inline u64
865xpc_partition_engaged(u64 partid_mask)
866{
867 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
868
869
870 /* return our partition's AMO variable ANDed with partid_mask */
871 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
872 partid_mask);
873}
874
875static inline u64
876xpc_partition_disengage_requested(u64 partid_mask)
877{
878 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
879
880
881 /* return our partition's AMO variable ANDed with partid_mask */
882 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
883 partid_mask);
884}
885
886static inline void
887xpc_clear_partition_engaged(u64 partid_mask)
888{
889 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
890
891
892 /* clear bit(s) based on partid_mask in our partition's AMO */
893 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
894 ~partid_mask);
895}
896
897static inline void
898xpc_clear_partition_disengage_request(u64 partid_mask)
899{
900 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
901
902
903 /* clear bit(s) based on partid_mask in our partition's AMO */
904 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
905 ~partid_mask);
906}
907
908
909
910/*
677 * The following set of macros and inlines are used for the sending and 911 * The following set of macros and inlines are used for the sending and
678 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 912 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
679 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and 913 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
@@ -722,13 +956,13 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
722 * Flag the appropriate AMO variable and send an IPI to the specified node. 956 * Flag the appropriate AMO variable and send an IPI to the specified node.
723 */ 957 */
724static inline void 958static inline void
725xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid, 959xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
726 int to_phys_cpuid) 960 int to_phys_cpuid)
727{ 961{
728 int w_index = XPC_NASID_W_INDEX(from_nasid); 962 int w_index = XPC_NASID_W_INDEX(from_nasid);
729 int b_index = XPC_NASID_B_INDEX(from_nasid); 963 int b_index = XPC_NASID_B_INDEX(from_nasid);
730 AMO_t *amos = (AMO_t *) __va(amos_page + 964 AMO_t *amos = (AMO_t *) __va(amos_page_pa +
731 (XP_MAX_PARTITIONS * sizeof(AMO_t))); 965 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
732 966
733 967
734 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 968 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
@@ -756,6 +990,13 @@ xpc_IPI_send_reactivate(struct xpc_partition *part)
756 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 990 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
757} 991}
758 992
993static inline void
994xpc_IPI_send_disengage(struct xpc_partition *part)
995{
996 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
997 part->remote_act_nasid, part->remote_act_phys_cpuid);
998}
999
759 1000
760/* 1001/*
761 * IPIs associated with SGI_XPC_NOTIFY IRQ. 1002 * IPIs associated with SGI_XPC_NOTIFY IRQ.
@@ -903,17 +1144,18 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
903 * cacheable mapping for the entire region. This will prevent speculative 1144 * cacheable mapping for the entire region. This will prevent speculative
904 * reading of cached copies of our lines from being issued which will cause 1145 * reading of cached copies of our lines from being issued which will cause
905 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 1146 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
906 * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c) 1147 * (XP_MAX_PARTITIONS) AMO variables for message notification and an
907 * and an additional 16 AMO variables for partition activation (xpc_hb.c). 1148 * additional 16 (XP_NASID_MASK_WORDS) AMO variables for partition activation
1149 * and 2 AMO variables for partition deactivation.
908 */ 1150 */
909static inline AMO_t * 1151static inline AMO_t *
910xpc_IPI_init(partid_t partid) 1152xpc_IPI_init(int index)
911{ 1153{
912 AMO_t *part_amo = xpc_vars->amos_page + partid; 1154 AMO_t *amo = xpc_vars->amos_page + index;
913 1155
914 1156
915 xpc_IPI_receive(part_amo); 1157 (void) xpc_IPI_receive(amo); /* clear AMO variable */
916 return part_amo; 1158 return amo;
917} 1159}
918 1160
919 1161