diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:49 -0400 |
commit | 33ba3c7724be79f7cdbfc611335572c056d9a05a (patch) | |
tree | db0371c031b0bbab13ccb2aeaf015424633cf2d8 /drivers/misc/sgi-xp/xpc.h | |
parent | e17d416b1bc947df68499863f13b401fb42b48f6 (diff) |
sgi-xp: isolate xpc_vars structure to sn2 only
Isolate the xpc_vars structure of XPC's reserved page to sn2 only.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc.h')
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 529 |
1 files changed, 97 insertions, 432 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index e8c2a1629606..a3a67485cf8d 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -159,10 +159,10 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2) | |||
159 | * reflected by incrementing either the major or minor version numbers | 159 | * reflected by incrementing either the major or minor version numbers |
160 | * of struct xpc_vars. | 160 | * of struct xpc_vars. |
161 | */ | 161 | */ |
162 | struct xpc_vars { | 162 | struct xpc_vars_sn2 { |
163 | u8 version; | 163 | u8 version; |
164 | u64 heartbeat; | 164 | u64 heartbeat; |
165 | u64 heartbeating_to_mask; | 165 | DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); |
166 | u64 heartbeat_offline; /* if 0, heartbeat should be changing */ | 166 | u64 heartbeat_offline; /* if 0, heartbeat should be changing */ |
167 | int act_nasid; | 167 | int act_nasid; |
168 | int act_phys_cpuid; | 168 | int act_phys_cpuid; |
@@ -176,46 +176,23 @@ struct xpc_vars { | |||
176 | #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ | 176 | #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ |
177 | (_version >= _XPC_VERSION(3, 1)) | 177 | (_version >= _XPC_VERSION(3, 1)) |
178 | 178 | ||
179 | static inline int | ||
180 | xpc_hb_allowed(short partid, struct xpc_vars *vars) | ||
181 | { | ||
182 | return ((vars->heartbeating_to_mask & (1UL << partid)) != 0); | ||
183 | } | ||
184 | |||
185 | static inline void | ||
186 | xpc_allow_hb(short partid, struct xpc_vars *vars) | ||
187 | { | ||
188 | u64 old_mask, new_mask; | ||
189 | |||
190 | do { | ||
191 | old_mask = vars->heartbeating_to_mask; | ||
192 | new_mask = (old_mask | (1UL << partid)); | ||
193 | } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != | ||
194 | old_mask); | ||
195 | } | ||
196 | |||
197 | static inline void | ||
198 | xpc_disallow_hb(short partid, struct xpc_vars *vars) | ||
199 | { | ||
200 | u64 old_mask, new_mask; | ||
201 | |||
202 | do { | ||
203 | old_mask = vars->heartbeating_to_mask; | ||
204 | new_mask = (old_mask & ~(1UL << partid)); | ||
205 | } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != | ||
206 | old_mask); | ||
207 | } | ||
208 | |||
209 | /* | 179 | /* |
210 | * The AMOs page consists of a number of AMO variables which are divided into | 180 | * The following pertains to ia64-sn2 only. |
211 | * four groups, The first two groups are used to identify an IRQ's sender. | 181 | * |
212 | * These two groups consist of 64 and 128 AMO variables respectively. The last | 182 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These |
213 | * two groups, consisting of just one AMO variable each, are used to identify | 183 | * pages are located in the lowest granule. The lowest granule uses 4k pages |
214 | * the remote partitions that are currently engaged (from the viewpoint of | 184 | * for cached references and an alternate TLB handler to never provide a |
215 | * the XPC running on the remote partition). | 185 | * cacheable mapping for the entire region. This will prevent speculative |
186 | * reading of cached copies of our lines from being issued which will cause | ||
187 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 | ||
188 | * AMO variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of | ||
189 | * NOTIFY IRQs, 128 AMO variables (based on XP_NASID_MASK_WORDS) to identify | ||
190 | * the senders of ACTIVATE IRQs, and 2 AMO variables to identify which remote | ||
191 | * partitions (i.e., XPCs) consider themselves currently engaged with the | ||
192 | * local XPC. | ||
216 | */ | 193 | */ |
217 | #define XPC_NOTIFY_IRQ_AMOS 0 | 194 | #define XPC_NOTIFY_IRQ_AMOS 0 |
218 | #define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) | 195 | #define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) |
219 | #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) | 196 | #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) |
220 | #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) | 197 | #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) |
221 | 198 | ||
@@ -259,11 +236,11 @@ struct xpc_vars_part_sn2 { | |||
259 | /* the reserved page sizes and offsets */ | 236 | /* the reserved page sizes and offsets */ |
260 | 237 | ||
261 | #define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) | 238 | #define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) |
262 | #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) | 239 | #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2)) |
263 | 240 | ||
264 | #define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) | 241 | #define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) |
265 | #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) | 242 | #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) |
266 | #define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ | 243 | #define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *)(XPC_RP_MACH_NASIDS(_rp) + \ |
267 | xp_nasid_mask_words)) | 244 | xp_nasid_mask_words)) |
268 | 245 | ||
269 | /* | 246 | /* |
@@ -344,6 +321,7 @@ struct xpc_notify { | |||
344 | * allocated at the time a partition becomes active. The array contains one | 321 | * allocated at the time a partition becomes active. The array contains one |
345 | * of these structures for each potential channel connection to that partition. | 322 | * of these structures for each potential channel connection to that partition. |
346 | * | 323 | * |
324 | >>> sn2 only!!! | ||
347 | * Each of these structures manages two message queues (circular buffers). | 325 | * Each of these structures manages two message queues (circular buffers). |
348 | * They are allocated at the time a channel connection is made. One of | 326 | * They are allocated at the time a channel connection is made. One of |
349 | * these message queues (local_msgqueue) holds the locally created messages | 327 | * these message queues (local_msgqueue) holds the locally created messages |
@@ -622,6 +600,9 @@ extern struct device *xpc_part; | |||
622 | extern struct device *xpc_chan; | 600 | extern struct device *xpc_chan; |
623 | extern int xpc_disengage_request_timelimit; | 601 | extern int xpc_disengage_request_timelimit; |
624 | extern int xpc_disengage_request_timedout; | 602 | extern int xpc_disengage_request_timedout; |
603 | extern atomic_t xpc_act_IRQ_rcvd; | ||
604 | extern wait_queue_head_t xpc_act_IRQ_wq; | ||
605 | extern void *xpc_heartbeating_to_mask; | ||
625 | extern irqreturn_t xpc_notify_IRQ_handler(int, void *); | 606 | extern irqreturn_t xpc_notify_IRQ_handler(int, void *); |
626 | extern void xpc_dropped_IPI_check(struct xpc_partition *); | 607 | extern void xpc_dropped_IPI_check(struct xpc_partition *); |
627 | extern void xpc_activate_partition(struct xpc_partition *); | 608 | extern void xpc_activate_partition(struct xpc_partition *); |
@@ -629,15 +610,48 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int); | |||
629 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); | 610 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); |
630 | extern void xpc_disconnect_wait(int); | 611 | extern void xpc_disconnect_wait(int); |
631 | extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); | 612 | extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); |
613 | extern void (*xpc_heartbeat_init) (void); | ||
614 | extern void (*xpc_heartbeat_exit) (void); | ||
615 | extern void (*xpc_increment_heartbeat) (void); | ||
616 | extern void (*xpc_offline_heartbeat) (void); | ||
617 | extern void (*xpc_online_heartbeat) (void); | ||
618 | extern void (*xpc_check_remote_hb) (void); | ||
632 | extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); | 619 | extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); |
633 | extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *); | 620 | extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *); |
634 | extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); | 621 | extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); |
622 | extern void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *, u64, | ||
623 | int); | ||
624 | extern void (*xpc_process_act_IRQ_rcvd) (int); | ||
635 | extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *); | 625 | extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *); |
636 | extern void (*xpc_teardown_infrastructure) (struct xpc_partition *); | 626 | extern void (*xpc_teardown_infrastructure) (struct xpc_partition *); |
627 | extern void (*xpc_mark_partition_engaged) (struct xpc_partition *); | ||
628 | extern void (*xpc_mark_partition_disengaged) (struct xpc_partition *); | ||
629 | extern void (*xpc_request_partition_disengage) (struct xpc_partition *); | ||
630 | extern void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *); | ||
631 | extern u64 (*xpc_partition_engaged) (u64); | ||
632 | extern u64 (*xpc_partition_disengage_requested) (u64);; | ||
633 | extern void (*xpc_clear_partition_engaged) (u64); | ||
634 | extern void (*xpc_clear_partition_disengage_request) (u64); | ||
635 | |||
636 | extern void (*xpc_IPI_send_local_activate) (int); | ||
637 | extern void (*xpc_IPI_send_activated) (struct xpc_partition *); | ||
638 | extern void (*xpc_IPI_send_local_reactivate) (int); | ||
639 | extern void (*xpc_IPI_send_disengage) (struct xpc_partition *); | ||
640 | |||
641 | extern void (*xpc_IPI_send_closerequest) (struct xpc_channel *, | ||
642 | unsigned long *); | ||
643 | extern void (*xpc_IPI_send_closereply) (struct xpc_channel *, unsigned long *); | ||
644 | extern void (*xpc_IPI_send_openrequest) (struct xpc_channel *, unsigned long *); | ||
645 | extern void (*xpc_IPI_send_openreply) (struct xpc_channel *, unsigned long *); | ||
646 | |||
647 | extern enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *, u32, | ||
648 | struct xpc_msg **); | ||
649 | extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, struct xpc_msg *, | ||
650 | u8, xpc_notify_func, void *); | ||
651 | extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *); | ||
637 | 652 | ||
638 | /* found in xpc_sn2.c */ | 653 | /* found in xpc_sn2.c */ |
639 | extern void xpc_init_sn2(void); | 654 | extern void xpc_init_sn2(void); |
640 | extern struct xpc_vars *xpc_vars; /*>>> eliminate from here */ | ||
641 | 655 | ||
642 | /* found in xpc_uv.c */ | 656 | /* found in xpc_uv.c */ |
643 | extern void xpc_init_uv(void); | 657 | extern void xpc_init_uv(void); |
@@ -646,6 +660,7 @@ extern void xpc_init_uv(void); | |||
646 | extern int xpc_exiting; | 660 | extern int xpc_exiting; |
647 | extern int xp_nasid_mask_words; | 661 | extern int xp_nasid_mask_words; |
648 | extern struct xpc_rsvd_page *xpc_rsvd_page; | 662 | extern struct xpc_rsvd_page *xpc_rsvd_page; |
663 | extern u64 *xpc_mach_nasids; | ||
649 | extern struct xpc_partition *xpc_partitions; | 664 | extern struct xpc_partition *xpc_partitions; |
650 | extern char *xpc_remote_copy_buffer; | 665 | extern char *xpc_remote_copy_buffer; |
651 | extern void *xpc_remote_copy_buffer_base; | 666 | extern void *xpc_remote_copy_buffer_base; |
@@ -658,7 +673,8 @@ extern int xpc_partition_disengaged(struct xpc_partition *); | |||
658 | extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); | 673 | extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); |
659 | extern void xpc_mark_partition_inactive(struct xpc_partition *); | 674 | extern void xpc_mark_partition_inactive(struct xpc_partition *); |
660 | extern void xpc_discovery(void); | 675 | extern void xpc_discovery(void); |
661 | extern void xpc_check_remote_hb(void); | 676 | extern enum xp_retval xpc_get_remote_rp(int, u64 *, struct xpc_rsvd_page *, |
677 | u64 *); | ||
662 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, | 678 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, |
663 | enum xp_retval); | 679 | enum xp_retval); |
664 | extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); | 680 | extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); |
@@ -667,6 +683,7 @@ extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); | |||
667 | extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); | 683 | extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); |
668 | extern void xpc_initiate_connect(int); | 684 | extern void xpc_initiate_connect(int); |
669 | extern void xpc_initiate_disconnect(int); | 685 | extern void xpc_initiate_disconnect(int); |
686 | extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *); | ||
670 | extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); | 687 | extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); |
671 | extern enum xp_retval xpc_initiate_send(short, int, void *); | 688 | extern enum xp_retval xpc_initiate_send(short, int, void *); |
672 | extern enum xp_retval xpc_initiate_send_notify(short, int, void *, | 689 | extern enum xp_retval xpc_initiate_send_notify(short, int, void *, |
@@ -680,6 +697,40 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *, | |||
680 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); | 697 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); |
681 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); | 698 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); |
682 | 699 | ||
700 | static inline int | ||
701 | xpc_hb_allowed(short partid, void *heartbeating_to_mask) | ||
702 | { | ||
703 | return test_bit(partid, heartbeating_to_mask); | ||
704 | } | ||
705 | |||
706 | static inline int | ||
707 | xpc_any_hbs_allowed(void) | ||
708 | { | ||
709 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
710 | return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions); | ||
711 | } | ||
712 | |||
713 | static inline void | ||
714 | xpc_allow_hb(short partid) | ||
715 | { | ||
716 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
717 | set_bit(partid, xpc_heartbeating_to_mask); | ||
718 | } | ||
719 | |||
720 | static inline void | ||
721 | xpc_disallow_hb(short partid) | ||
722 | { | ||
723 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
724 | clear_bit(partid, xpc_heartbeating_to_mask); | ||
725 | } | ||
726 | |||
727 | static inline void | ||
728 | xpc_disallow_all_hbs(void) | ||
729 | { | ||
730 | DBUG_ON(xpc_heartbeating_to_mask == NULL); | ||
731 | bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions); | ||
732 | } | ||
733 | |||
683 | static inline void | 734 | static inline void |
684 | xpc_wakeup_channel_mgr(struct xpc_partition *part) | 735 | xpc_wakeup_channel_mgr(struct xpc_partition *part) |
685 | { | 736 | { |
@@ -749,297 +800,7 @@ xpc_part_ref(struct xpc_partition *part) | |||
749 | } | 800 | } |
750 | 801 | ||
751 | /* | 802 | /* |
752 | * This next set of inlines are used to keep track of when a partition is | 803 | * The sending and receiving of IPIs includes the setting of an >>>AMO variable |
753 | * potentially engaged in accessing memory belonging to another partition. | ||
754 | */ | ||
755 | |||
756 | static inline void | ||
757 | xpc_mark_partition_engaged(struct xpc_partition *part) | ||
758 | { | ||
759 | unsigned long irq_flags; | ||
760 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + | ||
761 | (XPC_ENGAGED_PARTITIONS_AMO * | ||
762 | sizeof(AMO_t))); | ||
763 | |||
764 | local_irq_save(irq_flags); | ||
765 | |||
766 | /* set bit corresponding to our partid in remote partition's AMO */ | ||
767 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, | ||
768 | (1UL << sn_partition_id)); | ||
769 | /* | ||
770 | * We must always use the nofault function regardless of whether we | ||
771 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | ||
772 | * didn't, we'd never know that the other partition is down and would | ||
773 | * keep sending IPIs and AMOs to it until the heartbeat times out. | ||
774 | */ | ||
775 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | ||
776 | variable), | ||
777 | xp_nofault_PIOR_target)); | ||
778 | |||
779 | local_irq_restore(irq_flags); | ||
780 | } | ||
781 | |||
782 | static inline void | ||
783 | xpc_mark_partition_disengaged(struct xpc_partition *part) | ||
784 | { | ||
785 | unsigned long irq_flags; | ||
786 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + | ||
787 | (XPC_ENGAGED_PARTITIONS_AMO * | ||
788 | sizeof(AMO_t))); | ||
789 | |||
790 | local_irq_save(irq_flags); | ||
791 | |||
792 | /* clear bit corresponding to our partid in remote partition's AMO */ | ||
793 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, | ||
794 | ~(1UL << sn_partition_id)); | ||
795 | /* | ||
796 | * We must always use the nofault function regardless of whether we | ||
797 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | ||
798 | * didn't, we'd never know that the other partition is down and would | ||
799 | * keep sending IPIs and AMOs to it until the heartbeat times out. | ||
800 | */ | ||
801 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | ||
802 | variable), | ||
803 | xp_nofault_PIOR_target)); | ||
804 | |||
805 | local_irq_restore(irq_flags); | ||
806 | } | ||
807 | |||
808 | static inline void | ||
809 | xpc_request_partition_disengage(struct xpc_partition *part) | ||
810 | { | ||
811 | unsigned long irq_flags; | ||
812 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + | ||
813 | (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); | ||
814 | |||
815 | local_irq_save(irq_flags); | ||
816 | |||
817 | /* set bit corresponding to our partid in remote partition's AMO */ | ||
818 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, | ||
819 | (1UL << sn_partition_id)); | ||
820 | /* | ||
821 | * We must always use the nofault function regardless of whether we | ||
822 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | ||
823 | * didn't, we'd never know that the other partition is down and would | ||
824 | * keep sending IPIs and AMOs to it until the heartbeat times out. | ||
825 | */ | ||
826 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | ||
827 | variable), | ||
828 | xp_nofault_PIOR_target)); | ||
829 | |||
830 | local_irq_restore(irq_flags); | ||
831 | } | ||
832 | |||
833 | static inline void | ||
834 | xpc_cancel_partition_disengage_request(struct xpc_partition *part) | ||
835 | { | ||
836 | unsigned long irq_flags; | ||
837 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + | ||
838 | (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); | ||
839 | |||
840 | local_irq_save(irq_flags); | ||
841 | |||
842 | /* clear bit corresponding to our partid in remote partition's AMO */ | ||
843 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, | ||
844 | ~(1UL << sn_partition_id)); | ||
845 | /* | ||
846 | * We must always use the nofault function regardless of whether we | ||
847 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | ||
848 | * didn't, we'd never know that the other partition is down and would | ||
849 | * keep sending IPIs and AMOs to it until the heartbeat times out. | ||
850 | */ | ||
851 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> | ||
852 | variable), | ||
853 | xp_nofault_PIOR_target)); | ||
854 | |||
855 | local_irq_restore(irq_flags); | ||
856 | } | ||
857 | |||
858 | static inline u64 | ||
859 | xpc_partition_engaged(u64 partid_mask) | ||
860 | { | ||
861 | AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; | ||
862 | |||
863 | /* return our partition's AMO variable ANDed with partid_mask */ | ||
864 | return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & | ||
865 | partid_mask); | ||
866 | } | ||
867 | |||
868 | static inline u64 | ||
869 | xpc_partition_disengage_requested(u64 partid_mask) | ||
870 | { | ||
871 | AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; | ||
872 | |||
873 | /* return our partition's AMO variable ANDed with partid_mask */ | ||
874 | return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & | ||
875 | partid_mask); | ||
876 | } | ||
877 | |||
878 | static inline void | ||
879 | xpc_clear_partition_engaged(u64 partid_mask) | ||
880 | { | ||
881 | AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; | ||
882 | |||
883 | /* clear bit(s) based on partid_mask in our partition's AMO */ | ||
884 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, | ||
885 | ~partid_mask); | ||
886 | } | ||
887 | |||
888 | static inline void | ||
889 | xpc_clear_partition_disengage_request(u64 partid_mask) | ||
890 | { | ||
891 | AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; | ||
892 | |||
893 | /* clear bit(s) based on partid_mask in our partition's AMO */ | ||
894 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, | ||
895 | ~partid_mask); | ||
896 | } | ||
897 | |||
898 | /* | ||
899 | * The following set of macros and inlines are used for the sending and | ||
900 | * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, | ||
901 | * one that is associated with partition activity (SGI_XPC_ACTIVATE) and | ||
902 | * the other that is associated with channel activity (SGI_XPC_NOTIFY). | ||
903 | */ | ||
904 | |||
905 | static inline u64 | ||
906 | xpc_IPI_receive(AMO_t *amo) | ||
907 | { | ||
908 | return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); | ||
909 | } | ||
910 | |||
911 | static inline enum xp_retval | ||
912 | xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | ||
913 | { | ||
914 | int ret = 0; | ||
915 | unsigned long irq_flags; | ||
916 | |||
917 | local_irq_save(irq_flags); | ||
918 | |||
919 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); | ||
920 | sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); | ||
921 | |||
922 | /* | ||
923 | * We must always use the nofault function regardless of whether we | ||
924 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | ||
925 | * didn't, we'd never know that the other partition is down and would | ||
926 | * keep sending IPIs and AMOs to it until the heartbeat times out. | ||
927 | */ | ||
928 | ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), | ||
929 | xp_nofault_PIOR_target)); | ||
930 | |||
931 | local_irq_restore(irq_flags); | ||
932 | |||
933 | return ((ret == 0) ? xpSuccess : xpPioReadError); | ||
934 | } | ||
935 | |||
936 | /* | ||
937 | * IPIs associated with SGI_XPC_ACTIVATE IRQ. | ||
938 | */ | ||
939 | |||
940 | /* | ||
941 | * Flag the appropriate AMO variable and send an IPI to the specified node. | ||
942 | */ | ||
943 | static inline void | ||
944 | xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, | ||
945 | int to_phys_cpuid) | ||
946 | { | ||
947 | int w_index = XPC_NASID_W_INDEX(from_nasid); | ||
948 | int b_index = XPC_NASID_B_INDEX(from_nasid); | ||
949 | AMO_t *amos = (AMO_t *)__va(amos_page_pa + | ||
950 | (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); | ||
951 | |||
952 | (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, | ||
953 | to_phys_cpuid, SGI_XPC_ACTIVATE); | ||
954 | } | ||
955 | |||
956 | static inline void | ||
957 | xpc_IPI_send_activate(struct xpc_vars *vars) | ||
958 | { | ||
959 | xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), | ||
960 | vars->act_nasid, vars->act_phys_cpuid); | ||
961 | } | ||
962 | |||
963 | static inline void | ||
964 | xpc_IPI_send_activated(struct xpc_partition *part) | ||
965 | { | ||
966 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), | ||
967 | part->remote_act_nasid, | ||
968 | part->remote_act_phys_cpuid); | ||
969 | } | ||
970 | |||
971 | static inline void | ||
972 | xpc_IPI_send_reactivate(struct xpc_partition *part) | ||
973 | { | ||
974 | xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, | ||
975 | xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); | ||
976 | } | ||
977 | |||
978 | static inline void | ||
979 | xpc_IPI_send_disengage(struct xpc_partition *part) | ||
980 | { | ||
981 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), | ||
982 | part->remote_act_nasid, | ||
983 | part->remote_act_phys_cpuid); | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * IPIs associated with SGI_XPC_NOTIFY IRQ. | ||
988 | */ | ||
989 | |||
990 | /* | ||
991 | * Send an IPI to the remote partition that is associated with the | ||
992 | * specified channel. | ||
993 | */ | ||
994 | #define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ | ||
995 | xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f) | ||
996 | |||
997 | static inline void | ||
998 | xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | ||
999 | unsigned long *irq_flags) | ||
1000 | { | ||
1001 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
1002 | enum xp_retval ret; | ||
1003 | |||
1004 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { | ||
1005 | ret = xpc_IPI_send(part->remote_IPI_amo_va, | ||
1006 | (u64)ipi_flag << (ch->number * 8), | ||
1007 | part->remote_IPI_nasid, | ||
1008 | part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); | ||
1009 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", | ||
1010 | ipi_flag_string, ch->partid, ch->number, ret); | ||
1011 | if (unlikely(ret != xpSuccess)) { | ||
1012 | if (irq_flags != NULL) | ||
1013 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
1014 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
1015 | if (irq_flags != NULL) | ||
1016 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
1017 | } | ||
1018 | } | ||
1019 | } | ||
1020 | |||
1021 | /* | ||
1022 | * Make it look like the remote partition, which is associated with the | ||
1023 | * specified channel, sent us an IPI. This faked IPI will be handled | ||
1024 | * by xpc_dropped_IPI_check(). | ||
1025 | */ | ||
1026 | #define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ | ||
1027 | xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f) | ||
1028 | |||
1029 | static inline void | ||
1030 | xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | ||
1031 | char *ipi_flag_string) | ||
1032 | { | ||
1033 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
1034 | |||
1035 | FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable), | ||
1036 | FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); | ||
1037 | dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", | ||
1038 | ipi_flag_string, ch->partid, ch->number); | ||
1039 | } | ||
1040 | |||
1041 | /* | ||
1042 | * The sending and receiving of IPIs includes the setting of an AMO variable | ||
1043 | * to indicate the reason the IPI was sent. The 64-bit variable is divided | 804 | * to indicate the reason the IPI was sent. The 64-bit variable is divided |
1044 | * up into eight bytes, ordered from right to left. Byte zero pertains to | 805 | * up into eight bytes, ordered from right to left. Byte zero pertains to |
1045 | * channel 0, byte one to channel 1, and so on. Each byte is described by | 806 | * channel 0, byte one to channel 1, and so on. Each byte is described by |
@@ -1052,107 +813,11 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | |||
1052 | #define XPC_IPI_OPENREPLY 0x08 | 813 | #define XPC_IPI_OPENREPLY 0x08 |
1053 | #define XPC_IPI_MSGREQUEST 0x10 | 814 | #define XPC_IPI_MSGREQUEST 0x10 |
1054 | 815 | ||
1055 | /* given an AMO variable and a channel#, get its associated IPI flags */ | 816 | /* given an >>>AMO variable and a channel#, get its associated IPI flags */ |
1056 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) | 817 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) |
1057 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) | 818 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) |
1058 | 819 | ||
1059 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL) | 820 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL) |
1060 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL) | 821 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL) |
1061 | 822 | ||
1062 | static inline void | ||
1063 | xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) | ||
1064 | { | ||
1065 | struct xpc_openclose_args *args = ch->local_openclose_args; | ||
1066 | |||
1067 | args->reason = ch->reason; | ||
1068 | |||
1069 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); | ||
1070 | } | ||
1071 | |||
1072 | static inline void | ||
1073 | xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags) | ||
1074 | { | ||
1075 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags); | ||
1076 | } | ||
1077 | |||
1078 | static inline void | ||
1079 | xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) | ||
1080 | { | ||
1081 | struct xpc_openclose_args *args = ch->local_openclose_args; | ||
1082 | |||
1083 | args->msg_size = ch->msg_size; | ||
1084 | args->local_nentries = ch->local_nentries; | ||
1085 | |||
1086 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags); | ||
1087 | } | ||
1088 | |||
1089 | static inline void | ||
1090 | xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) | ||
1091 | { | ||
1092 | struct xpc_openclose_args *args = ch->local_openclose_args; | ||
1093 | |||
1094 | args->remote_nentries = ch->remote_nentries; | ||
1095 | args->local_nentries = ch->local_nentries; | ||
1096 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); | ||
1097 | |||
1098 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags); | ||
1099 | } | ||
1100 | |||
1101 | static inline void | ||
1102 | xpc_IPI_send_msgrequest(struct xpc_channel *ch) | ||
1103 | { | ||
1104 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL); | ||
1105 | } | ||
1106 | |||
1107 | static inline void | ||
1108 | xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) | ||
1109 | { | ||
1110 | XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); | ||
1111 | } | ||
1112 | |||
1113 | /* | ||
1114 | >>> this block comment needs to be moved and re-written. | ||
1115 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These | ||
1116 | * pages are located in the lowest granule. The lowest granule uses 4k pages | ||
1117 | * for cached references and an alternate TLB handler to never provide a | ||
1118 | * cacheable mapping for the entire region. This will prevent speculative | ||
1119 | * reading of cached copies of our lines from being issued which will cause | ||
1120 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 | ||
1121 | * AMO variables (based on xp_max_npartitions) for message notification and an | ||
1122 | * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition | ||
1123 | * activation and 2 AMO variables for partition deactivation. | ||
1124 | */ | ||
1125 | static inline AMO_t * | ||
1126 | xpc_IPI_init(int index) | ||
1127 | { | ||
1128 | AMO_t *amo = xpc_vars->amos_page + index; | ||
1129 | |||
1130 | (void)xpc_IPI_receive(amo); /* clear AMO variable */ | ||
1131 | return amo; | ||
1132 | } | ||
1133 | |||
1134 | /* | ||
1135 | * Check to see if there is any channel activity to/from the specified | ||
1136 | * partition. | ||
1137 | */ | ||
1138 | static inline void | ||
1139 | xpc_check_for_channel_activity(struct xpc_partition *part) | ||
1140 | { | ||
1141 | u64 IPI_amo; | ||
1142 | unsigned long irq_flags; | ||
1143 | |||
1144 | IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); | ||
1145 | if (IPI_amo == 0) | ||
1146 | return; | ||
1147 | |||
1148 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | ||
1149 | part->local_IPI_amo |= IPI_amo; | ||
1150 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | ||
1151 | |||
1152 | dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", | ||
1153 | XPC_PARTID(part), IPI_amo); | ||
1154 | |||
1155 | xpc_wakeup_channel_mgr(part); | ||
1156 | } | ||
1157 | |||
1158 | #endif /* _DRIVERS_MISC_SGIXP_XPC_H */ | 823 | #endif /* _DRIVERS_MISC_SGIXP_XPC_H */ |