diff options
author | Dean Nelson <dcn@sgi.com> | 2005-10-25 15:07:43 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-25 19:27:37 -0400 |
commit | e54af724c1ae3530c95135157776c9be65cdb747 (patch) | |
tree | 99623edaf5d8fb34c8b43cbd19cf3d7b47d8b8f8 | |
parent | a607c38971fd078865fa9bef39e6c1d4435680c8 (diff) |
[IA64-SGI] fixes for XPC disengage and open/close protocol
This patch addresses a few issues with the open/close protocol that
were revealed by the newly added disengage functionality combined
with more extensive testing.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/sn/kernel/xpc.h | 21 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 117 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 146 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_partition.c | 8 | ||||
-rw-r--r-- | include/asm-ia64/sn/xp.h | 6 |
5 files changed, 208 insertions, 90 deletions
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h index 565822ab3d08..ae51d7b4c42e 100644 --- a/arch/ia64/sn/kernel/xpc.h +++ b/arch/ia64/sn/kernel/xpc.h | |||
@@ -417,6 +417,9 @@ struct xpc_channel { | |||
417 | atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ | 417 | atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ |
418 | wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ | 418 | wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ |
419 | 419 | ||
420 | u8 delayed_IPI_flags; /* IPI flags received, but delayed */ | ||
421 | /* action until channel disconnected */ | ||
422 | |||
420 | /* queue of msg senders who want to be notified when msg received */ | 423 | /* queue of msg senders who want to be notified when msg received */ |
421 | 424 | ||
422 | atomic_t n_to_notify; /* #of msg senders to notify */ | 425 | atomic_t n_to_notify; /* #of msg senders to notify */ |
@@ -478,7 +481,8 @@ struct xpc_channel { | |||
478 | 481 | ||
479 | #define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ | 482 | #define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ |
480 | #define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ | 483 | #define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ |
481 | #define XPC_C_WDISCONNECT 0x00008000 /* waiting for channel disconnect */ | 484 | #define XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */ |
485 | #define XPC_C_WDISCONNECT 0x00010000 /* waiting for channel disconnect */ | ||
482 | 486 | ||
483 | 487 | ||
484 | 488 | ||
@@ -508,13 +512,13 @@ struct xpc_partition { | |||
508 | int reason_line; /* line# deactivation initiated from */ | 512 | int reason_line; /* line# deactivation initiated from */ |
509 | int reactivate_nasid; /* nasid in partition to reactivate */ | 513 | int reactivate_nasid; /* nasid in partition to reactivate */ |
510 | 514 | ||
511 | unsigned long disengage_request_timeout; /* timeout in XPC_TICKS */ | 515 | unsigned long disengage_request_timeout; /* timeout in jiffies */ |
512 | struct timer_list disengage_request_timer; | 516 | struct timer_list disengage_request_timer; |
513 | 517 | ||
514 | 518 | ||
515 | /* XPC infrastructure referencing and teardown control */ | 519 | /* XPC infrastructure referencing and teardown control */ |
516 | 520 | ||
517 | volatile u8 setup_state; /* infrastructure setup state */ | 521 | volatile u8 setup_state; /* infrastructure setup state */ |
518 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ | 522 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ |
519 | atomic_t references; /* #of references to infrastructure */ | 523 | atomic_t references; /* #of references to infrastructure */ |
520 | 524 | ||
@@ -604,7 +608,7 @@ struct xpc_partition { | |||
604 | 608 | ||
605 | 609 | ||
606 | /* number of seconds to wait for other partitions to disengage */ | 610 | /* number of seconds to wait for other partitions to disengage */ |
607 | #define XPC_DISENGAGE_REQUEST_TIMELIMIT 90 | 611 | #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 |
608 | 612 | ||
609 | /* interval in seconds to print 'waiting disengagement' messages */ | 613 | /* interval in seconds to print 'waiting disengagement' messages */ |
610 | #define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 | 614 | #define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 |
@@ -618,20 +622,18 @@ struct xpc_partition { | |||
618 | extern struct xpc_registration xpc_registrations[]; | 622 | extern struct xpc_registration xpc_registrations[]; |
619 | 623 | ||
620 | 624 | ||
621 | /* >>> found in xpc_main.c only */ | 625 | /* found in xpc_main.c */ |
622 | extern struct device *xpc_part; | 626 | extern struct device *xpc_part; |
623 | extern struct device *xpc_chan; | 627 | extern struct device *xpc_chan; |
628 | extern int xpc_disengage_request_timelimit; | ||
624 | extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); | 629 | extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); |
625 | extern void xpc_dropped_IPI_check(struct xpc_partition *); | 630 | extern void xpc_dropped_IPI_check(struct xpc_partition *); |
631 | extern void xpc_activate_partition(struct xpc_partition *); | ||
626 | extern void xpc_activate_kthreads(struct xpc_channel *, int); | 632 | extern void xpc_activate_kthreads(struct xpc_channel *, int); |
627 | extern void xpc_create_kthreads(struct xpc_channel *, int); | 633 | extern void xpc_create_kthreads(struct xpc_channel *, int); |
628 | extern void xpc_disconnect_wait(int); | 634 | extern void xpc_disconnect_wait(int); |
629 | 635 | ||
630 | 636 | ||
631 | /* found in xpc_main.c and efi-xpc.c */ | ||
632 | extern void xpc_activate_partition(struct xpc_partition *); | ||
633 | |||
634 | |||
635 | /* found in xpc_partition.c */ | 637 | /* found in xpc_partition.c */ |
636 | extern int xpc_exiting; | 638 | extern int xpc_exiting; |
637 | extern struct xpc_vars *xpc_vars; | 639 | extern struct xpc_vars *xpc_vars; |
@@ -1077,6 +1079,7 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | |||
1077 | 1079 | ||
1078 | /* given an AMO variable and a channel#, get its associated IPI flags */ | 1080 | /* given an AMO variable and a channel#, get its associated IPI flags */ |
1079 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) | 1081 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) |
1082 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) | ||
1080 | 1083 | ||
1081 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) | 1084 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) |
1082 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) | 1085 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) |
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 195ac1b8e262..abf4fc2a87bb 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -792,11 +792,20 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
792 | "reason=%d\n", ch->number, ch->partid, ch->reason); | 792 | "reason=%d\n", ch->number, ch->partid, ch->reason); |
793 | } | 793 | } |
794 | 794 | ||
795 | /* wake the thread that is waiting for this channel to disconnect */ | ||
796 | if (ch->flags & XPC_C_WDISCONNECT) { | 795 | if (ch->flags & XPC_C_WDISCONNECT) { |
797 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 796 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
798 | up(&ch->wdisconnect_sema); | 797 | up(&ch->wdisconnect_sema); |
799 | spin_lock_irqsave(&ch->lock, *irq_flags); | 798 | spin_lock_irqsave(&ch->lock, *irq_flags); |
799 | |||
800 | } else if (ch->delayed_IPI_flags) { | ||
801 | if (part->act_state != XPC_P_DEACTIVATING) { | ||
802 | /* time to take action on any delayed IPI flags */ | ||
803 | spin_lock(&part->IPI_lock); | ||
804 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, | ||
805 | ch->delayed_IPI_flags); | ||
806 | spin_unlock(&part->IPI_lock); | ||
807 | } | ||
808 | ch->delayed_IPI_flags = 0; | ||
800 | } | 809 | } |
801 | } | 810 | } |
802 | 811 | ||
@@ -818,6 +827,19 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
818 | 827 | ||
819 | spin_lock_irqsave(&ch->lock, irq_flags); | 828 | spin_lock_irqsave(&ch->lock, irq_flags); |
820 | 829 | ||
830 | again: | ||
831 | |||
832 | if ((ch->flags & XPC_C_DISCONNECTED) && | ||
833 | (ch->flags & XPC_C_WDISCONNECT)) { | ||
834 | /* | ||
835 | * Delay processing IPI flags until thread waiting disconnect | ||
836 | * has had a chance to see that the channel is disconnected. | ||
837 | */ | ||
838 | ch->delayed_IPI_flags |= IPI_flags; | ||
839 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
840 | return; | ||
841 | } | ||
842 | |||
821 | 843 | ||
822 | if (IPI_flags & XPC_IPI_CLOSEREQUEST) { | 844 | if (IPI_flags & XPC_IPI_CLOSEREQUEST) { |
823 | 845 | ||
@@ -843,14 +865,22 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
843 | 865 | ||
844 | /* both sides have finished disconnecting */ | 866 | /* both sides have finished disconnecting */ |
845 | xpc_process_disconnect(ch, &irq_flags); | 867 | xpc_process_disconnect(ch, &irq_flags); |
868 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | ||
869 | goto again; | ||
846 | } | 870 | } |
847 | 871 | ||
848 | if (ch->flags & XPC_C_DISCONNECTED) { | 872 | if (ch->flags & XPC_C_DISCONNECTED) { |
849 | // >>> explain this section | ||
850 | |||
851 | if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { | 873 | if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { |
852 | DBUG_ON(part->act_state != | 874 | if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, |
853 | XPC_P_DEACTIVATING); | 875 | ch_number) & XPC_IPI_OPENREQUEST)) { |
876 | |||
877 | DBUG_ON(ch->delayed_IPI_flags != 0); | ||
878 | spin_lock(&part->IPI_lock); | ||
879 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, | ||
880 | ch_number, | ||
881 | XPC_IPI_CLOSEREQUEST); | ||
882 | spin_unlock(&part->IPI_lock); | ||
883 | } | ||
854 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 884 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
855 | return; | 885 | return; |
856 | } | 886 | } |
@@ -880,9 +910,13 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
880 | } | 910 | } |
881 | 911 | ||
882 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | 912 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); |
883 | } else { | 913 | |
884 | xpc_process_disconnect(ch, &irq_flags); | 914 | DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY); |
915 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
916 | return; | ||
885 | } | 917 | } |
918 | |||
919 | xpc_process_disconnect(ch, &irq_flags); | ||
886 | } | 920 | } |
887 | 921 | ||
888 | 922 | ||
@@ -898,7 +932,20 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
898 | } | 932 | } |
899 | 933 | ||
900 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | 934 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); |
901 | DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST)); | 935 | |
936 | if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { | ||
937 | if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) | ||
938 | & XPC_IPI_CLOSEREQUEST)) { | ||
939 | |||
940 | DBUG_ON(ch->delayed_IPI_flags != 0); | ||
941 | spin_lock(&part->IPI_lock); | ||
942 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, | ||
943 | ch_number, XPC_IPI_CLOSEREPLY); | ||
944 | spin_unlock(&part->IPI_lock); | ||
945 | } | ||
946 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
947 | return; | ||
948 | } | ||
902 | 949 | ||
903 | ch->flags |= XPC_C_RCLOSEREPLY; | 950 | ch->flags |= XPC_C_RCLOSEREPLY; |
904 | 951 | ||
@@ -916,8 +963,14 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
916 | "channel=%d\n", args->msg_size, args->local_nentries, | 963 | "channel=%d\n", args->msg_size, args->local_nentries, |
917 | ch->partid, ch->number); | 964 | ch->partid, ch->number); |
918 | 965 | ||
919 | if ((ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) || | 966 | if (part->act_state == XPC_P_DEACTIVATING || |
920 | part->act_state == XPC_P_DEACTIVATING) { | 967 | (ch->flags & XPC_C_ROPENREQUEST)) { |
968 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
969 | return; | ||
970 | } | ||
971 | |||
972 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { | ||
973 | ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST; | ||
921 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 974 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
922 | return; | 975 | return; |
923 | } | 976 | } |
@@ -931,8 +984,11 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
931 | * msg_size = size of channel's messages in bytes | 984 | * msg_size = size of channel's messages in bytes |
932 | * local_nentries = remote partition's local_nentries | 985 | * local_nentries = remote partition's local_nentries |
933 | */ | 986 | */ |
934 | DBUG_ON(args->msg_size == 0); | 987 | if (args->msg_size == 0 || args->local_nentries == 0) { |
935 | DBUG_ON(args->local_nentries == 0); | 988 | /* assume OPENREQUEST was delayed by mistake */ |
989 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
990 | return; | ||
991 | } | ||
936 | 992 | ||
937 | ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); | 993 | ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); |
938 | ch->remote_nentries = args->local_nentries; | 994 | ch->remote_nentries = args->local_nentries; |
@@ -970,7 +1026,13 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
970 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1026 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
971 | return; | 1027 | return; |
972 | } | 1028 | } |
973 | DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST)); | 1029 | if (!(ch->flags & XPC_C_OPENREQUEST)) { |
1030 | XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, | ||
1031 | &irq_flags); | ||
1032 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1033 | return; | ||
1034 | } | ||
1035 | |||
974 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); | 1036 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); |
975 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | 1037 | DBUG_ON(ch->flags & XPC_C_CONNECTED); |
976 | 1038 | ||
@@ -1024,8 +1086,8 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1024 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | 1086 | struct xpc_registration *registration = &xpc_registrations[ch->number]; |
1025 | 1087 | ||
1026 | 1088 | ||
1027 | if (down_interruptible(®istration->sema) != 0) { | 1089 | if (down_trylock(®istration->sema) != 0) { |
1028 | return xpcInterrupted; | 1090 | return xpcRetry; |
1029 | } | 1091 | } |
1030 | 1092 | ||
1031 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | 1093 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { |
@@ -1445,19 +1507,11 @@ xpc_initiate_connect(int ch_number) | |||
1445 | if (xpc_part_ref(part)) { | 1507 | if (xpc_part_ref(part)) { |
1446 | ch = &part->channels[ch_number]; | 1508 | ch = &part->channels[ch_number]; |
1447 | 1509 | ||
1448 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 1510 | /* |
1449 | DBUG_ON(ch->flags & XPC_C_OPENREQUEST); | 1511 | * Initiate the establishment of a connection on the |
1450 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | 1512 | * newly registered channel to the remote partition. |
1451 | DBUG_ON(ch->flags & XPC_C_SETUP); | 1513 | */ |
1452 | 1514 | xpc_wakeup_channel_mgr(part); | |
1453 | /* | ||
1454 | * Initiate the establishment of a connection | ||
1455 | * on the newly registered channel to the | ||
1456 | * remote partition. | ||
1457 | */ | ||
1458 | xpc_wakeup_channel_mgr(part); | ||
1459 | } | ||
1460 | |||
1461 | xpc_part_deref(part); | 1515 | xpc_part_deref(part); |
1462 | } | 1516 | } |
1463 | } | 1517 | } |
@@ -1467,9 +1521,6 @@ xpc_initiate_connect(int ch_number) | |||
1467 | void | 1521 | void |
1468 | xpc_connected_callout(struct xpc_channel *ch) | 1522 | xpc_connected_callout(struct xpc_channel *ch) |
1469 | { | 1523 | { |
1470 | unsigned long irq_flags; | ||
1471 | |||
1472 | |||
1473 | /* let the registerer know that a connection has been established */ | 1524 | /* let the registerer know that a connection has been established */ |
1474 | 1525 | ||
1475 | if (ch->func != NULL) { | 1526 | if (ch->func != NULL) { |
@@ -1482,10 +1533,6 @@ xpc_connected_callout(struct xpc_channel *ch) | |||
1482 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " | 1533 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " |
1483 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 1534 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
1484 | } | 1535 | } |
1485 | |||
1486 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
1487 | ch->flags |= XPC_C_CONNECTCALLOUT; | ||
1488 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1489 | } | 1536 | } |
1490 | 1537 | ||
1491 | 1538 | ||
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index feece200b3c3..db349c6d4c58 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -91,6 +91,10 @@ static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; | |||
91 | static int xpc_hb_check_min_interval = 10; | 91 | static int xpc_hb_check_min_interval = 10; |
92 | static int xpc_hb_check_max_interval = 120; | 92 | static int xpc_hb_check_max_interval = 120; |
93 | 93 | ||
94 | int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; | ||
95 | static int xpc_disengage_request_min_timelimit = 0; | ||
96 | static int xpc_disengage_request_max_timelimit = 120; | ||
97 | |||
94 | static ctl_table xpc_sys_xpc_hb_dir[] = { | 98 | static ctl_table xpc_sys_xpc_hb_dir[] = { |
95 | { | 99 | { |
96 | 1, | 100 | 1, |
@@ -129,6 +133,19 @@ static ctl_table xpc_sys_xpc_dir[] = { | |||
129 | 0555, | 133 | 0555, |
130 | xpc_sys_xpc_hb_dir | 134 | xpc_sys_xpc_hb_dir |
131 | }, | 135 | }, |
136 | { | ||
137 | 2, | ||
138 | "disengage_request_timelimit", | ||
139 | &xpc_disengage_request_timelimit, | ||
140 | sizeof(int), | ||
141 | 0644, | ||
142 | NULL, | ||
143 | &proc_dointvec_minmax, | ||
144 | &sysctl_intvec, | ||
145 | NULL, | ||
146 | &xpc_disengage_request_min_timelimit, | ||
147 | &xpc_disengage_request_max_timelimit | ||
148 | }, | ||
132 | {0} | 149 | {0} |
133 | }; | 150 | }; |
134 | static ctl_table xpc_sys_dir[] = { | 151 | static ctl_table xpc_sys_dir[] = { |
@@ -153,11 +170,11 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); | |||
153 | 170 | ||
154 | static unsigned long xpc_hb_check_timeout; | 171 | static unsigned long xpc_hb_check_timeout; |
155 | 172 | ||
156 | /* used as an indication of when the xpc_hb_checker thread is inactive */ | 173 | /* notification that the xpc_hb_checker thread has exited */ |
157 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_inactive); | 174 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); |
158 | 175 | ||
159 | /* used as an indication of when the xpc_discovery thread is inactive */ | 176 | /* notification that the xpc_discovery thread has exited */ |
160 | static DECLARE_MUTEX_LOCKED(xpc_discovery_inactive); | 177 | static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); |
161 | 178 | ||
162 | 179 | ||
163 | static struct timer_list xpc_hb_timer; | 180 | static struct timer_list xpc_hb_timer; |
@@ -181,7 +198,7 @@ xpc_timeout_partition_disengage_request(unsigned long data) | |||
181 | struct xpc_partition *part = (struct xpc_partition *) data; | 198 | struct xpc_partition *part = (struct xpc_partition *) data; |
182 | 199 | ||
183 | 200 | ||
184 | DBUG_ON(XPC_TICKS < part->disengage_request_timeout); | 201 | DBUG_ON(jiffies < part->disengage_request_timeout); |
185 | 202 | ||
186 | (void) xpc_partition_disengaged(part); | 203 | (void) xpc_partition_disengaged(part); |
187 | 204 | ||
@@ -292,8 +309,8 @@ xpc_hb_checker(void *ignore) | |||
292 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); | 309 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
293 | 310 | ||
294 | 311 | ||
295 | /* mark this thread as inactive */ | 312 | /* mark this thread as having exited */ |
296 | up(&xpc_hb_checker_inactive); | 313 | up(&xpc_hb_checker_exited); |
297 | return 0; | 314 | return 0; |
298 | } | 315 | } |
299 | 316 | ||
@@ -312,8 +329,8 @@ xpc_initiate_discovery(void *ignore) | |||
312 | 329 | ||
313 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | 330 | dev_dbg(xpc_part, "discovery thread is exiting\n"); |
314 | 331 | ||
315 | /* mark this thread as inactive */ | 332 | /* mark this thread as having exited */ |
316 | up(&xpc_discovery_inactive); | 333 | up(&xpc_discovery_exited); |
317 | return 0; | 334 | return 0; |
318 | } | 335 | } |
319 | 336 | ||
@@ -703,6 +720,7 @@ xpc_daemonize_kthread(void *args) | |||
703 | struct xpc_partition *part = &xpc_partitions[partid]; | 720 | struct xpc_partition *part = &xpc_partitions[partid]; |
704 | struct xpc_channel *ch; | 721 | struct xpc_channel *ch; |
705 | int n_needed; | 722 | int n_needed; |
723 | unsigned long irq_flags; | ||
706 | 724 | ||
707 | 725 | ||
708 | daemonize("xpc%02dc%d", partid, ch_number); | 726 | daemonize("xpc%02dc%d", partid, ch_number); |
@@ -713,11 +731,14 @@ xpc_daemonize_kthread(void *args) | |||
713 | ch = &part->channels[ch_number]; | 731 | ch = &part->channels[ch_number]; |
714 | 732 | ||
715 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 733 | if (!(ch->flags & XPC_C_DISCONNECTING)) { |
716 | DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); | ||
717 | 734 | ||
718 | /* let registerer know that connection has been established */ | 735 | /* let registerer know that connection has been established */ |
719 | 736 | ||
720 | if (atomic_read(&ch->kthreads_assigned) == 1) { | 737 | spin_lock_irqsave(&ch->lock, irq_flags); |
738 | if (!(ch->flags & XPC_C_CONNECTCALLOUT)) { | ||
739 | ch->flags |= XPC_C_CONNECTCALLOUT; | ||
740 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
741 | |||
721 | xpc_connected_callout(ch); | 742 | xpc_connected_callout(ch); |
722 | 743 | ||
723 | /* | 744 | /* |
@@ -732,14 +753,23 @@ xpc_daemonize_kthread(void *args) | |||
732 | !(ch->flags & XPC_C_DISCONNECTING)) { | 753 | !(ch->flags & XPC_C_DISCONNECTING)) { |
733 | xpc_activate_kthreads(ch, n_needed); | 754 | xpc_activate_kthreads(ch, n_needed); |
734 | } | 755 | } |
756 | } else { | ||
757 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
735 | } | 758 | } |
736 | 759 | ||
737 | xpc_kthread_waitmsgs(part, ch); | 760 | xpc_kthread_waitmsgs(part, ch); |
738 | } | 761 | } |
739 | 762 | ||
740 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { | 763 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { |
741 | if (ch->flags & XPC_C_CONNECTCALLOUT) { | 764 | spin_lock_irqsave(&ch->lock, irq_flags); |
765 | if ((ch->flags & XPC_C_CONNECTCALLOUT) && | ||
766 | !(ch->flags & XPC_C_DISCONNECTCALLOUT)) { | ||
767 | ch->flags |= XPC_C_DISCONNECTCALLOUT; | ||
768 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
769 | |||
742 | xpc_disconnecting_callout(ch); | 770 | xpc_disconnecting_callout(ch); |
771 | } else { | ||
772 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
743 | } | 773 | } |
744 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { | 774 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { |
745 | xpc_mark_partition_disengaged(part); | 775 | xpc_mark_partition_disengaged(part); |
@@ -780,9 +810,29 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
780 | 810 | ||
781 | 811 | ||
782 | while (needed-- > 0) { | 812 | while (needed-- > 0) { |
813 | |||
814 | /* | ||
815 | * The following is done on behalf of the newly created | ||
816 | * kthread. That kthread is responsible for doing the | ||
817 | * counterpart to the following before it exits. | ||
818 | */ | ||
819 | (void) xpc_part_ref(part); | ||
820 | xpc_msgqueue_ref(ch); | ||
821 | if (atomic_inc_return(&ch->kthreads_assigned) == 1 && | ||
822 | atomic_inc_return(&part->nchannels_engaged) == 1) { | ||
823 | xpc_mark_partition_engaged(part); | ||
824 | } | ||
825 | |||
783 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); | 826 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); |
784 | if (pid < 0) { | 827 | if (pid < 0) { |
785 | /* the fork failed */ | 828 | /* the fork failed */ |
829 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | ||
830 | atomic_dec_return(&part->nchannels_engaged) == 0) { | ||
831 | xpc_mark_partition_disengaged(part); | ||
832 | xpc_IPI_send_disengage(part); | ||
833 | } | ||
834 | xpc_msgqueue_deref(ch); | ||
835 | xpc_part_deref(part); | ||
786 | 836 | ||
787 | if (atomic_read(&ch->kthreads_assigned) < | 837 | if (atomic_read(&ch->kthreads_assigned) < |
788 | ch->kthreads_idle_limit) { | 838 | ch->kthreads_idle_limit) { |
@@ -802,18 +852,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
802 | break; | 852 | break; |
803 | } | 853 | } |
804 | 854 | ||
805 | /* | ||
806 | * The following is done on behalf of the newly created | ||
807 | * kthread. That kthread is responsible for doing the | ||
808 | * counterpart to the following before it exits. | ||
809 | */ | ||
810 | (void) xpc_part_ref(part); | ||
811 | xpc_msgqueue_ref(ch); | ||
812 | if (atomic_inc_return(&ch->kthreads_assigned) == 1) { | ||
813 | if (atomic_inc_return(&part->nchannels_engaged) == 1) { | ||
814 | xpc_mark_partition_engaged(part); | ||
815 | } | ||
816 | } | ||
817 | ch->kthreads_created++; // >>> temporary debug only!!! | 855 | ch->kthreads_created++; // >>> temporary debug only!!! |
818 | } | 856 | } |
819 | } | 857 | } |
@@ -826,26 +864,49 @@ xpc_disconnect_wait(int ch_number) | |||
826 | partid_t partid; | 864 | partid_t partid; |
827 | struct xpc_partition *part; | 865 | struct xpc_partition *part; |
828 | struct xpc_channel *ch; | 866 | struct xpc_channel *ch; |
867 | int wakeup_channel_mgr; | ||
829 | 868 | ||
830 | 869 | ||
831 | /* now wait for all callouts to the caller's function to cease */ | 870 | /* now wait for all callouts to the caller's function to cease */ |
832 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 871 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
833 | part = &xpc_partitions[partid]; | 872 | part = &xpc_partitions[partid]; |
834 | 873 | ||
835 | if (xpc_part_ref(part)) { | 874 | if (!xpc_part_ref(part)) { |
836 | ch = &part->channels[ch_number]; | 875 | continue; |
876 | } | ||
837 | 877 | ||
838 | if (ch->flags & XPC_C_WDISCONNECT) { | 878 | ch = &part->channels[ch_number]; |
839 | if (!(ch->flags & XPC_C_DISCONNECTED)) { | ||
840 | (void) down(&ch->wdisconnect_sema); | ||
841 | } | ||
842 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
843 | ch->flags &= ~XPC_C_WDISCONNECT; | ||
844 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
845 | } | ||
846 | 879 | ||
880 | if (!(ch->flags & XPC_C_WDISCONNECT)) { | ||
847 | xpc_part_deref(part); | 881 | xpc_part_deref(part); |
882 | continue; | ||
848 | } | 883 | } |
884 | |||
885 | (void) down(&ch->wdisconnect_sema); | ||
886 | |||
887 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
888 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | ||
889 | wakeup_channel_mgr = 0; | ||
890 | |||
891 | if (ch->delayed_IPI_flags) { | ||
892 | if (part->act_state != XPC_P_DEACTIVATING) { | ||
893 | spin_lock(&part->IPI_lock); | ||
894 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, | ||
895 | ch->number, ch->delayed_IPI_flags); | ||
896 | spin_unlock(&part->IPI_lock); | ||
897 | wakeup_channel_mgr = 1; | ||
898 | } | ||
899 | ch->delayed_IPI_flags = 0; | ||
900 | } | ||
901 | |||
902 | ch->flags &= ~XPC_C_WDISCONNECT; | ||
903 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
904 | |||
905 | if (wakeup_channel_mgr) { | ||
906 | xpc_wakeup_channel_mgr(part); | ||
907 | } | ||
908 | |||
909 | xpc_part_deref(part); | ||
849 | } | 910 | } |
850 | } | 911 | } |
851 | 912 | ||
@@ -873,11 +934,11 @@ xpc_do_exit(enum xpc_retval reason) | |||
873 | /* ignore all incoming interrupts */ | 934 | /* ignore all incoming interrupts */ |
874 | free_irq(SGI_XPC_ACTIVATE, NULL); | 935 | free_irq(SGI_XPC_ACTIVATE, NULL); |
875 | 936 | ||
876 | /* wait for the discovery thread to mark itself inactive */ | 937 | /* wait for the discovery thread to exit */ |
877 | down(&xpc_discovery_inactive); | 938 | down(&xpc_discovery_exited); |
878 | 939 | ||
879 | /* wait for the heartbeat checker thread to mark itself inactive */ | 940 | /* wait for the heartbeat checker thread to exit */ |
880 | down(&xpc_hb_checker_inactive); | 941 | down(&xpc_hb_checker_exited); |
881 | 942 | ||
882 | 943 | ||
883 | /* sleep for a 1/3 of a second or so */ | 944 | /* sleep for a 1/3 of a second or so */ |
@@ -893,6 +954,7 @@ xpc_do_exit(enum xpc_retval reason) | |||
893 | 954 | ||
894 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 955 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
895 | part = &xpc_partitions[partid]; | 956 | part = &xpc_partitions[partid]; |
957 | |||
896 | if (xpc_partition_disengaged(part) && | 958 | if (xpc_partition_disengaged(part) && |
897 | part->act_state == XPC_P_INACTIVE) { | 959 | part->act_state == XPC_P_INACTIVE) { |
898 | continue; | 960 | continue; |
@@ -930,7 +992,7 @@ xpc_do_exit(enum xpc_retval reason) | |||
930 | 992 | ||
931 | /* now it's time to eliminate our heartbeat */ | 993 | /* now it's time to eliminate our heartbeat */ |
932 | del_timer_sync(&xpc_hb_timer); | 994 | del_timer_sync(&xpc_hb_timer); |
933 | DBUG_ON(xpc_vars->heartbeating_to_mask == 0); | 995 | DBUG_ON(xpc_vars->heartbeating_to_mask != 0); |
934 | 996 | ||
935 | /* take ourselves off of the reboot_notifier_list */ | 997 | /* take ourselves off of the reboot_notifier_list */ |
936 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | 998 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); |
@@ -1134,7 +1196,7 @@ xpc_init(void) | |||
1134 | dev_err(xpc_part, "failed while forking discovery thread\n"); | 1196 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1135 | 1197 | ||
1136 | /* mark this new thread as a non-starter */ | 1198 | /* mark this new thread as a non-starter */ |
1137 | up(&xpc_discovery_inactive); | 1199 | up(&xpc_discovery_exited); |
1138 | 1200 | ||
1139 | xpc_do_exit(xpcUnloading); | 1201 | xpc_do_exit(xpcUnloading); |
1140 | return -EBUSY; | 1202 | return -EBUSY; |
@@ -1172,3 +1234,7 @@ module_param(xpc_hb_check_interval, int, 0); | |||
1172 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | 1234 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " |
1173 | "heartbeat checks."); | 1235 | "heartbeat checks."); |
1174 | 1236 | ||
1237 | module_param(xpc_disengage_request_timelimit, int, 0); | ||
1238 | MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " | ||
1239 | "for disengage request to complete."); | ||
1240 | |||
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 79a0fc4c860c..958488f55699 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c | |||
@@ -578,7 +578,7 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, | |||
578 | 578 | ||
579 | 579 | ||
580 | /* | 580 | /* |
581 | * Prior code has determine the nasid which generated an IPI. Inspect | 581 | * Prior code has determined the nasid which generated an IPI. Inspect |
582 | * that nasid to determine if its partition needs to be activated or | 582 | * that nasid to determine if its partition needs to be activated or |
583 | * deactivated. | 583 | * deactivated. |
584 | * | 584 | * |
@@ -942,14 +942,14 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
942 | 942 | ||
943 | /* set a timelimit on the disengage request */ | 943 | /* set a timelimit on the disengage request */ |
944 | part->disengage_request_timeout = jiffies + | 944 | part->disengage_request_timeout = jiffies + |
945 | (XPC_DISENGAGE_REQUEST_TIMELIMIT * HZ); | 945 | (xpc_disengage_request_timelimit * HZ); |
946 | part->disengage_request_timer.expires = | 946 | part->disengage_request_timer.expires = |
947 | part->disengage_request_timeout; | 947 | part->disengage_request_timeout; |
948 | add_timer(&part->disengage_request_timer); | 948 | add_timer(&part->disengage_request_timer); |
949 | } | 949 | } |
950 | 950 | ||
951 | dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid, | 951 | dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", |
952 | reason); | 952 | XPC_PARTID(part), reason); |
953 | 953 | ||
954 | xpc_partition_going_down(part, reason); | 954 | xpc_partition_going_down(part, reason); |
955 | } | 955 | } |
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h index f3052a54932b..30312be31206 100644 --- a/include/asm-ia64/sn/xp.h +++ b/include/asm-ia64/sn/xp.h | |||
@@ -225,7 +225,9 @@ enum xpc_retval { | |||
225 | 225 | ||
226 | xpcDisconnecting, /* 49: channel disconnecting (closing) */ | 226 | xpcDisconnecting, /* 49: channel disconnecting (closing) */ |
227 | 227 | ||
228 | xpcUnknownReason /* 50: unknown reason -- must be last in list */ | 228 | xpcOpenCloseError, /* 50: channel open/close protocol error */ |
229 | |||
230 | xpcUnknownReason /* 51: unknown reason -- must be last in list */ | ||
229 | }; | 231 | }; |
230 | 232 | ||
231 | 233 | ||
@@ -350,7 +352,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, | |||
350 | * | 352 | * |
351 | * The 'func' field points to the function to call when aynchronous | 353 | * The 'func' field points to the function to call when aynchronous |
352 | * notification is required for such events as: a connection established/lost, | 354 | * notification is required for such events as: a connection established/lost, |
353 | * or an incomming message received, or an error condition encountered. A | 355 | * or an incoming message received, or an error condition encountered. A |
354 | * non-NULL 'func' field indicates that there is an active registration for | 356 | * non-NULL 'func' field indicates that there is an active registration for |
355 | * the channel. | 357 | * the channel. |
356 | */ | 358 | */ |