aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/sgi-xp/xpc.h14
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c96
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c121
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c217
4 files changed, 229 insertions, 219 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 6b622b091bde..1edf37512de6 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -480,7 +480,7 @@ struct xpc_partition {
480 u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 480 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
481 int remote_act_nasid; /* active part's act/deact nasid */ 481 int remote_act_nasid; /* active part's act/deact nasid */
482 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ 482 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
483 u32 act_IRQ_rcvd; /* IRQs since activation */ 483 u32 activate_IRQ_rcvd; /* IRQs since activation */
484 spinlock_t act_lock; /* protect updating of act_state */ 484 spinlock_t act_lock; /* protect updating of act_state */
485 u8 act_state; /* from XPC HB viewpoint */ 485 u8 act_state; /* from XPC HB viewpoint */
486 u8 remote_vars_version; /* version# of partition's vars */ 486 u8 remote_vars_version; /* version# of partition's vars */
@@ -580,8 +580,8 @@ extern struct device *xpc_part;
580extern struct device *xpc_chan; 580extern struct device *xpc_chan;
581extern int xpc_disengage_request_timelimit; 581extern int xpc_disengage_request_timelimit;
582extern int xpc_disengage_request_timedout; 582extern int xpc_disengage_request_timedout;
583extern atomic_t xpc_act_IRQ_rcvd; 583extern atomic_t xpc_activate_IRQ_rcvd;
584extern wait_queue_head_t xpc_act_IRQ_wq; 584extern wait_queue_head_t xpc_activate_IRQ_wq;
585extern void *xpc_heartbeating_to_mask; 585extern void *xpc_heartbeating_to_mask;
586extern irqreturn_t xpc_notify_IRQ_handler(int, void *); 586extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
587extern void xpc_dropped_IPI_check(struct xpc_partition *); 587extern void xpc_dropped_IPI_check(struct xpc_partition *);
@@ -601,7 +601,7 @@ extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *);
601extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); 601extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
602extern void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *, u64, 602extern void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *, u64,
603 int); 603 int);
604extern void (*xpc_process_act_IRQ_rcvd) (int); 604extern void (*xpc_process_activate_IRQ_rcvd) (int);
605extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *); 605extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *);
606extern void (*xpc_teardown_infrastructure) (struct xpc_partition *); 606extern void (*xpc_teardown_infrastructure) (struct xpc_partition *);
607extern void (*xpc_mark_partition_engaged) (struct xpc_partition *); 607extern void (*xpc_mark_partition_engaged) (struct xpc_partition *);
@@ -629,10 +629,12 @@ extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16,
629extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *); 629extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *);
630 630
631/* found in xpc_sn2.c */ 631/* found in xpc_sn2.c */
632extern void xpc_init_sn2(void); 632extern int xpc_init_sn2(void);
633extern void xpc_exit_sn2(void);
633 634
634/* found in xpc_uv.c */ 635/* found in xpc_uv.c */
635extern void xpc_init_uv(void); 636extern void xpc_init_uv(void);
637extern void xpc_exit_uv(void);
636 638
637/* found in xpc_partition.c */ 639/* found in xpc_partition.c */
638extern int xpc_exiting; 640extern int xpc_exiting;
@@ -646,7 +648,7 @@ extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
646extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void); 648extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void);
647extern void xpc_allow_IPI_ops(void); 649extern void xpc_allow_IPI_ops(void);
648extern void xpc_restrict_IPI_ops(void); 650extern void xpc_restrict_IPI_ops(void);
649extern int xpc_identify_act_IRQ_sender(void); 651extern int xpc_identify_activate_IRQ_sender(void);
650extern int xpc_partition_disengaged(struct xpc_partition *); 652extern int xpc_partition_disengaged(struct xpc_partition *);
651extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); 653extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
652extern void xpc_mark_partition_inactive(struct xpc_partition *); 654extern void xpc_mark_partition_inactive(struct xpc_partition *);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index aae90f5933b5..8780d5d00f62 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -147,11 +147,11 @@ static struct ctl_table_header *xpc_sysctl;
147/* non-zero if any remote partition disengage request was timed out */ 147/* non-zero if any remote partition disengage request was timed out */
148int xpc_disengage_request_timedout; 148int xpc_disengage_request_timedout;
149 149
150/* #of IRQs received */ 150/* #of activate IRQs received */
151atomic_t xpc_act_IRQ_rcvd; 151atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0);
152 152
153/* IRQ handler notifies this wait queue on receipt of an IRQ */ 153/* IRQ handler notifies this wait queue on receipt of an IRQ */
154DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); 154DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
155 155
156static unsigned long xpc_hb_check_timeout; 156static unsigned long xpc_hb_check_timeout;
157static struct timer_list xpc_hb_timer; 157static struct timer_list xpc_hb_timer;
@@ -190,7 +190,7 @@ struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
190void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp, 190void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp,
191 u64 remote_rp_pa, int nasid); 191 u64 remote_rp_pa, int nasid);
192 192
193void (*xpc_process_act_IRQ_rcvd) (int n_IRQs_expected); 193void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected);
194enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); 194enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
195void (*xpc_teardown_infrastructure) (struct xpc_partition *part); 195void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
196 196
@@ -239,17 +239,6 @@ xpc_timeout_partition_disengage_request(unsigned long data)
239} 239}
240 240
241/* 241/*
242 * Notify the heartbeat check thread that an IRQ has been received.
243 */
244static irqreturn_t
245xpc_act_IRQ_handler(int irq, void *dev_id)
246{
247 atomic_inc(&xpc_act_IRQ_rcvd);
248 wake_up_interruptible(&xpc_act_IRQ_wq);
249 return IRQ_HANDLED;
250}
251
252/*
253 * Timer to produce the heartbeat. The timer structures function is 242 * Timer to produce the heartbeat. The timer structures function is
254 * already set when this is initially called. A tunable is used to 243 * already set when this is initially called. A tunable is used to
255 * specify when the next timeout should occur. 244 * specify when the next timeout should occur.
@@ -260,7 +249,7 @@ xpc_hb_beater(unsigned long dummy)
260 xpc_increment_heartbeat(); 249 xpc_increment_heartbeat();
261 250
262 if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) 251 if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
263 wake_up_interruptible(&xpc_act_IRQ_wq); 252 wake_up_interruptible(&xpc_activate_IRQ_wq);
264 253
265 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); 254 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
266 add_timer(&xpc_hb_timer); 255 add_timer(&xpc_hb_timer);
@@ -306,7 +295,7 @@ xpc_hb_checker(void *ignore)
306 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 295 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
307 "been received\n", 296 "been received\n",
308 (int)(xpc_hb_check_timeout - jiffies), 297 (int)(xpc_hb_check_timeout - jiffies),
309 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); 298 atomic_read(&xpc_activate_IRQ_rcvd) - last_IRQ_count);
310 299
311 /* checking of remote heartbeats is skewed by IRQ handling */ 300 /* checking of remote heartbeats is skewed by IRQ handling */
312 if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { 301 if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
@@ -322,15 +311,15 @@ xpc_hb_checker(void *ignore)
322 } 311 }
323 312
324 /* check for outstanding IRQs */ 313 /* check for outstanding IRQs */
325 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); 314 new_IRQ_count = atomic_read(&xpc_activate_IRQ_rcvd);
326 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { 315 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
327 force_IRQ = 0; 316 force_IRQ = 0;
328 317
329 dev_dbg(xpc_part, "found an IRQ to process; will be " 318 dev_dbg(xpc_part, "found an IRQ to process; will be "
330 "resetting xpc_hb_check_timeout\n"); 319 "resetting xpc_hb_check_timeout\n");
331 320
332 xpc_process_act_IRQ_rcvd(new_IRQ_count - 321 xpc_process_activate_IRQ_rcvd(new_IRQ_count -
333 last_IRQ_count); 322 last_IRQ_count);
334 last_IRQ_count = new_IRQ_count; 323 last_IRQ_count = new_IRQ_count;
335 324
336 xpc_hb_check_timeout = jiffies + 325 xpc_hb_check_timeout = jiffies +
@@ -338,9 +327,9 @@ xpc_hb_checker(void *ignore)
338 } 327 }
339 328
340 /* wait for IRQ or timeout */ 329 /* wait for IRQ or timeout */
341 (void)wait_event_interruptible(xpc_act_IRQ_wq, 330 (void)wait_event_interruptible(xpc_activate_IRQ_wq,
342 (last_IRQ_count < 331 (last_IRQ_count < atomic_read(
343 atomic_read(&xpc_act_IRQ_rcvd) 332 &xpc_activate_IRQ_rcvd)
344 || time_is_before_eq_jiffies( 333 || time_is_before_eq_jiffies(
345 xpc_hb_check_timeout) || 334 xpc_hb_check_timeout) ||
346 xpc_exiting)); 335 xpc_exiting));
@@ -884,10 +873,7 @@ xpc_do_exit(enum xp_retval reason)
884 * the heartbeat checker thread in case it's sleeping. 873 * the heartbeat checker thread in case it's sleeping.
885 */ 874 */
886 xpc_exiting = 1; 875 xpc_exiting = 1;
887 wake_up_interruptible(&xpc_act_IRQ_wq); 876 wake_up_interruptible(&xpc_activate_IRQ_wq);
888
889 /* ignore all incoming interrupts */
890 free_irq(SGI_XPC_ACTIVATE, NULL);
891 877
892 /* wait for the discovery thread to exit */ 878 /* wait for the discovery thread to exit */
893 wait_for_completion(&xpc_discovery_exited); 879 wait_for_completion(&xpc_discovery_exited);
@@ -968,9 +954,6 @@ xpc_do_exit(enum xp_retval reason)
968 (void)unregister_reboot_notifier(&xpc_reboot_notifier); 954 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
969 } 955 }
970 956
971 /* close down protections for IPI operations */
972 xpc_restrict_IPI_ops();
973
974 /* clear the interface to XPC's functions */ 957 /* clear the interface to XPC's functions */
975 xpc_clear_interface(); 958 xpc_clear_interface();
976 959
@@ -979,6 +962,11 @@ xpc_do_exit(enum xp_retval reason)
979 962
980 kfree(xpc_partitions); 963 kfree(xpc_partitions);
981 kfree(xpc_remote_copy_buffer_base); 964 kfree(xpc_remote_copy_buffer_base);
965
966 if (is_shub())
967 xpc_exit_sn2();
968 else
969 xpc_exit_uv();
982} 970}
983 971
984/* 972/*
@@ -1144,7 +1132,9 @@ xpc_init(void)
1144 if (xp_max_npartitions != 64) 1132 if (xp_max_npartitions != 64)
1145 return -EINVAL; 1133 return -EINVAL;
1146 1134
1147 xpc_init_sn2(); 1135 ret = xpc_init_sn2();
1136 if (ret != 0)
1137 return ret;
1148 1138
1149 } else if (is_uv()) { 1139 } else if (is_uv()) {
1150 xpc_init_uv(); 1140 xpc_init_uv();
@@ -1163,7 +1153,8 @@ xpc_init(void)
1163 &xpc_remote_copy_buffer_base); 1153 &xpc_remote_copy_buffer_base);
1164 if (xpc_remote_copy_buffer == NULL) { 1154 if (xpc_remote_copy_buffer == NULL) {
1165 dev_err(xpc_part, "can't get memory for remote copy buffer\n"); 1155 dev_err(xpc_part, "can't get memory for remote copy buffer\n");
1166 return -ENOMEM; 1156 ret = -ENOMEM;
1157 goto out_1;
1167 } 1158 }
1168 1159
1169 xpc_partitions = kzalloc(sizeof(struct xpc_partition) * 1160 xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
@@ -1171,7 +1162,7 @@ xpc_init(void)
1171 if (xpc_partitions == NULL) { 1162 if (xpc_partitions == NULL) {
1172 dev_err(xpc_part, "can't get memory for partition structure\n"); 1163 dev_err(xpc_part, "can't get memory for partition structure\n");
1173 ret = -ENOMEM; 1164 ret = -ENOMEM;
1174 goto out_1; 1165 goto out_2;
1175 } 1166 }
1176 1167
1177 /* 1168 /*
@@ -1187,7 +1178,7 @@ xpc_init(void)
1187 1178
1188 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); 1179 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1189 1180
1190 part->act_IRQ_rcvd = 0; 1181 part->activate_IRQ_rcvd = 0;
1191 spin_lock_init(&part->act_lock); 1182 spin_lock_init(&part->act_lock);
1192 part->act_state = XPC_P_INACTIVE; 1183 part->act_state = XPC_P_INACTIVE;
1193 XPC_SET_REASON(part, 0, 0); 1184 XPC_SET_REASON(part, 0, 0);
@@ -1205,33 +1196,6 @@ xpc_init(void)
1205 xpc_sysctl = register_sysctl_table(xpc_sys_dir); 1196 xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1206 1197
1207 /* 1198 /*
1208 * Open up protections for IPI operations (and AMO operations on
1209 * Shub 1.1 systems).
1210 */
1211 xpc_allow_IPI_ops();
1212
1213 /*
1214 * Interrupts being processed will increment this atomic variable and
1215 * awaken the heartbeat thread which will process the interrupts.
1216 */
1217 atomic_set(&xpc_act_IRQ_rcvd, 0);
1218
1219 /*
1220 * This is safe to do before the xpc_hb_checker thread has started
1221 * because the handler releases a wait queue. If an interrupt is
1222 * received before the thread is waiting, it will not go to sleep,
1223 * but rather immediately process the interrupt.
1224 */
1225 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1226 "xpc hb", NULL);
1227 if (ret != 0) {
1228 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1229 "errno=%d\n", -ret);
1230 ret = -EBUSY;
1231 goto out_2;
1232 }
1233
1234 /*
1235 * Fill the partition reserved page with the information needed by 1199 * Fill the partition reserved page with the information needed by
1236 * other partitions to discover we are alive and establish initial 1200 * other partitions to discover we are alive and establish initial
1237 * communications. 1201 * communications.
@@ -1296,14 +1260,16 @@ out_4:
1296 (void)unregister_die_notifier(&xpc_die_notifier); 1260 (void)unregister_die_notifier(&xpc_die_notifier);
1297 (void)unregister_reboot_notifier(&xpc_reboot_notifier); 1261 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1298out_3: 1262out_3:
1299 free_irq(SGI_XPC_ACTIVATE, NULL);
1300out_2:
1301 xpc_restrict_IPI_ops();
1302 if (xpc_sysctl) 1263 if (xpc_sysctl)
1303 unregister_sysctl_table(xpc_sysctl); 1264 unregister_sysctl_table(xpc_sysctl);
1304 kfree(xpc_partitions); 1265 kfree(xpc_partitions);
1305out_1: 1266out_2:
1306 kfree(xpc_remote_copy_buffer_base); 1267 kfree(xpc_remote_copy_buffer_base);
1268out_1:
1269 if (is_shub())
1270 xpc_exit_sn2();
1271 else
1272 xpc_exit_uv();
1307 return ret; 1273 return ret;
1308} 1274}
1309 1275
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 90ec5ca8c9ab..bf9b1193bd2a 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -29,16 +29,6 @@
29/* XPC is exiting flag */ 29/* XPC is exiting flag */
30int xpc_exiting; 30int xpc_exiting;
31 31
32/* SH_IPI_ACCESS shub register value on startup */
33static u64 xpc_sh1_IPI_access;
34static u64 xpc_sh2_IPI_access0;
35static u64 xpc_sh2_IPI_access1;
36static u64 xpc_sh2_IPI_access2;
37static u64 xpc_sh2_IPI_access3;
38
39/* original protection values for each node */
40u64 xpc_prot_vec[MAX_NUMNODES];
41
42/* this partition's reserved page pointers */ 32/* this partition's reserved page pointers */
43struct xpc_rsvd_page *xpc_rsvd_page; 33struct xpc_rsvd_page *xpc_rsvd_page;
44static u64 *xpc_part_nasids; 34static u64 *xpc_part_nasids;
@@ -211,117 +201,6 @@ xpc_setup_rsvd_page(void)
211} 201}
212 202
213/* 203/*
214 * Change protections to allow IPI operations (and AMO operations on
215 * Shub 1.1 systems).
216 */
217void
218xpc_allow_IPI_ops(void)
219{
220 int node;
221 int nasid;
222
223 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
224
225 if (is_shub2()) {
226 xpc_sh2_IPI_access0 =
227 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
228 xpc_sh2_IPI_access1 =
229 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
230 xpc_sh2_IPI_access2 =
231 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
232 xpc_sh2_IPI_access3 =
233 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
234
235 for_each_online_node(node) {
236 nasid = cnodeid_to_nasid(node);
237 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
238 -1UL);
239 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
240 -1UL);
241 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
242 -1UL);
243 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
244 -1UL);
245 }
246
247 } else {
248 xpc_sh1_IPI_access =
249 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
250
251 for_each_online_node(node) {
252 nasid = cnodeid_to_nasid(node);
253 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
254 -1UL);
255
256 /*
257 * Since the BIST collides with memory operations on
258 * SHUB 1.1 sn_change_memprotect() cannot be used.
259 */
260 if (enable_shub_wars_1_1()) {
261 /* open up everything */
262 xpc_prot_vec[node] = (u64)HUB_L((u64 *)
263 GLOBAL_MMR_ADDR
264 (nasid,
265 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
266 HUB_S((u64 *)
267 GLOBAL_MMR_ADDR(nasid,
268 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
269 -1UL);
270 HUB_S((u64 *)
271 GLOBAL_MMR_ADDR(nasid,
272 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
273 -1UL);
274 }
275 }
276 }
277}
278
279/*
280 * Restrict protections to disallow IPI operations (and AMO operations on
281 * Shub 1.1 systems).
282 */
283void
284xpc_restrict_IPI_ops(void)
285{
286 int node;
287 int nasid;
288
289 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
290
291 if (is_shub2()) {
292
293 for_each_online_node(node) {
294 nasid = cnodeid_to_nasid(node);
295 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
296 xpc_sh2_IPI_access0);
297 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
298 xpc_sh2_IPI_access1);
299 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
300 xpc_sh2_IPI_access2);
301 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
302 xpc_sh2_IPI_access3);
303 }
304
305 } else {
306
307 for_each_online_node(node) {
308 nasid = cnodeid_to_nasid(node);
309 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
310 xpc_sh1_IPI_access);
311
312 if (enable_shub_wars_1_1()) {
313 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
314 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
315 xpc_prot_vec[node]);
316 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
317 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
318 xpc_prot_vec[node]);
319 }
320 }
321 }
322}
323
324/*
325 * Get a copy of a portion of the remote partition's rsvd page. 204 * Get a copy of a portion of the remote partition's rsvd page.
326 * 205 *
327 * remote_rp points to a buffer that is cacheline aligned for BTE copies and 206 * remote_rp points to a buffer that is cacheline aligned for BTE copies and
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index db67d348b35c..4659f6cb885e 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -22,6 +22,87 @@
22static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */ 22static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */
23static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */ 23static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */
24 24
25/* SH_IPI_ACCESS shub register value on startup */
26static u64 xpc_sh1_IPI_access;
27static u64 xpc_sh2_IPI_access0;
28static u64 xpc_sh2_IPI_access1;
29static u64 xpc_sh2_IPI_access2;
30static u64 xpc_sh2_IPI_access3;
31
32/*
33 * Change protections to allow IPI operations.
34 */
35static void
36xpc_allow_IPI_ops_sn2(void)
37{
38 int node;
39 int nasid;
40
41 /* >>> The following should get moved into SAL. */
42 if (is_shub2()) {
43 xpc_sh2_IPI_access0 =
44 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
45 xpc_sh2_IPI_access1 =
46 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
47 xpc_sh2_IPI_access2 =
48 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
49 xpc_sh2_IPI_access3 =
50 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
51
52 for_each_online_node(node) {
53 nasid = cnodeid_to_nasid(node);
54 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
55 -1UL);
56 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
57 -1UL);
58 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
59 -1UL);
60 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
61 -1UL);
62 }
63 } else {
64 xpc_sh1_IPI_access =
65 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
66
67 for_each_online_node(node) {
68 nasid = cnodeid_to_nasid(node);
69 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
70 -1UL);
71 }
72 }
73}
74
75/*
76 * Restrict protections to disallow IPI operations.
77 */
78static void
79xpc_disallow_IPI_ops_sn2(void)
80{
81 int node;
82 int nasid;
83
84 /* >>> The following should get moved into SAL. */
85 if (is_shub2()) {
86 for_each_online_node(node) {
87 nasid = cnodeid_to_nasid(node);
88 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
89 xpc_sh2_IPI_access0);
90 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
91 xpc_sh2_IPI_access1);
92 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
93 xpc_sh2_IPI_access2);
94 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
95 xpc_sh2_IPI_access3);
96 }
97 } else {
98 for_each_online_node(node) {
99 nasid = cnodeid_to_nasid(node);
100 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
101 xpc_sh1_IPI_access);
102 }
103 }
104}
105
25/* 106/*
26 * The following set of macros and functions are used for the sending and 107 * The following set of macros and functions are used for the sending and
27 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 108 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -74,6 +155,17 @@ xpc_IPI_init_sn2(int index)
74 */ 155 */
75 156
76/* 157/*
158 * Notify the heartbeat check thread that an activate IRQ has been received.
159 */
160static irqreturn_t
161xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
162{
163 atomic_inc(&xpc_activate_IRQ_rcvd);
164 wake_up_interruptible(&xpc_activate_IRQ_wq);
165 return IRQ_HANDLED;
166}
167
168/*
77 * Flag the appropriate AMO variable and send an IPI to the specified node. 169 * Flag the appropriate AMO variable and send an IPI to the specified node.
78 */ 170 */
79static void 171static void
@@ -100,8 +192,8 @@ xpc_activate_IRQ_send_local_sn2(int from_nasid)
100 /* fake the sending and receipt of an activate IRQ from remote nasid */ 192 /* fake the sending and receipt of an activate IRQ from remote nasid */
101 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR, 193 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR,
102 (1UL << b_index)); 194 (1UL << b_index));
103 atomic_inc(&xpc_act_IRQ_rcvd); 195 atomic_inc(&xpc_activate_IRQ_rcvd);
104 wake_up_interruptible(&xpc_act_IRQ_wq); 196 wake_up_interruptible(&xpc_activate_IRQ_wq);
105} 197}
106 198
107static void 199static void
@@ -383,11 +475,65 @@ xpc_clear_partition_disengage_request_sn2(u64 partid_mask)
383 ~partid_mask); 475 ~partid_mask);
384} 476}
385 477
478/* original protection values for each node */
479static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
480
481/*
482 * Change protections to allow AMO operations on non-Shub 1.1 systems.
483 */
484static enum xp_retval
485xpc_allow_AMO_ops_sn2(AMO_t *amos_page)
486{
487 u64 nasid_array = 0;
488 int ret;
489
490 /*
491 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
492 * collides with memory operations. On those systems we call
493 * xpc_allow_AMO_ops_shub_wars_1_1_sn2() instead.
494 */
495 if (!enable_shub_wars_1_1()) {
496 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
497 SN_MEMPROT_ACCESS_CLASS_1,
498 &nasid_array);
499 if (ret != 0)
500 return xpSalError;
501 }
502 return xpSuccess;
503}
504
505/*
506 * Change protections to allow AMO operations on Shub 1.1 systems.
507 */
508static void
509xpc_allow_AMO_ops_shub_wars_1_1_sn2(void)
510{
511 int node;
512 int nasid;
513
514 if (!enable_shub_wars_1_1())
515 return;
516
517 for_each_online_node(node) {
518 nasid = cnodeid_to_nasid(node);
519 /* save current protection values */
520 xpc_prot_vec_sn2[node] =
521 (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
522 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
523 /* open up everything */
524 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
525 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
526 -1UL);
527 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
528 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
529 -1UL);
530 }
531}
532
386static enum xp_retval 533static enum xp_retval
387xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) 534xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
388{ 535{
389 AMO_t *amos_page; 536 AMO_t *amos_page;
390 u64 nasid_array = 0;
391 int i; 537 int i;
392 int ret; 538 int ret;
393 539
@@ -421,21 +567,15 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
421 } 567 }
422 568
423 /* 569 /*
424 * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems 570 * Open up AMO-R/W to cpu. This is done on Shub 1.1 systems
425 * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 571 * when xpc_allow_AMO_ops_shub_wars_1_1_sn2() is called.
426 */ 572 */
427 if (!enable_shub_wars_1_1()) { 573 ret = xpc_allow_AMO_ops_sn2(amos_page);
428 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), 574 if (ret != xpSuccess) {
429 PAGE_SIZE, 575 dev_err(xpc_part, "can't allow AMO operations\n");
430 SN_MEMPROT_ACCESS_CLASS_1, 576 uncached_free_page(__IA64_UNCACHED_OFFSET |
431 &nasid_array); 577 TO_PHYS((u64)amos_page), 1);
432 if (ret != 0) { 578 return ret;
433 dev_err(xpc_part, "can't change memory "
434 "protections\n");
435 uncached_free_page(__IA64_UNCACHED_OFFSET |
436 TO_PHYS((u64)amos_page), 1);
437 return xpSalError;
438 }
439 } 579 }
440 } 580 }
441 581
@@ -656,7 +796,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
656 * initialized reserved page. 796 * initialized reserved page.
657 */ 797 */
658static void 798static void
659xpc_identify_act_IRQ_req_sn2(int nasid) 799xpc_identify_activate_IRQ_req_sn2(int nasid)
660{ 800{
661 struct xpc_rsvd_page *remote_rp; 801 struct xpc_rsvd_page *remote_rp;
662 struct xpc_vars_sn2 *remote_vars; 802 struct xpc_vars_sn2 *remote_vars;
@@ -702,10 +842,10 @@ xpc_identify_act_IRQ_req_sn2(int nasid)
702 return; 842 return;
703 } 843 }
704 844
705 part->act_IRQ_rcvd++; 845 part->activate_IRQ_rcvd++;
706 846
707 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 847 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
708 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd, 848 "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd,
709 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]); 849 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
710 850
711 if (xpc_partition_disengaged(part) && 851 if (xpc_partition_disengaged(part) &&
@@ -831,7 +971,7 @@ xpc_identify_act_IRQ_req_sn2(int nasid)
831 * Return #of IRQs detected. 971 * Return #of IRQs detected.
832 */ 972 */
833int 973int
834xpc_identify_act_IRQ_sender_sn2(void) 974xpc_identify_activate_IRQ_sender_sn2(void)
835{ 975{
836 int word, bit; 976 int word, bit;
837 u64 nasid_mask; 977 u64 nasid_mask;
@@ -872,7 +1012,7 @@ xpc_identify_act_IRQ_sender_sn2(void)
872 nasid = XPC_NASID_FROM_W_B(word, bit); 1012 nasid = XPC_NASID_FROM_W_B(word, bit);
873 dev_dbg(xpc_part, "interrupt from nasid %ld\n", 1013 dev_dbg(xpc_part, "interrupt from nasid %ld\n",
874 nasid); 1014 nasid);
875 xpc_identify_act_IRQ_req_sn2(nasid); 1015 xpc_identify_activate_IRQ_req_sn2(nasid);
876 } 1016 }
877 } 1017 }
878 } 1018 }
@@ -880,14 +1020,14 @@ xpc_identify_act_IRQ_sender_sn2(void)
880} 1020}
881 1021
882static void 1022static void
883xpc_process_act_IRQ_rcvd_sn2(int n_IRQs_expected) 1023xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
884{ 1024{
885 int n_IRQs_detected; 1025 int n_IRQs_detected;
886 1026
887 n_IRQs_detected = xpc_identify_act_IRQ_sender_sn2(); 1027 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
888 if (n_IRQs_detected < n_IRQs_expected) { 1028 if (n_IRQs_detected < n_IRQs_expected) {
889 /* retry once to help avoid missing AMO */ 1029 /* retry once to help avoid missing AMO */
890 (void)xpc_identify_act_IRQ_sender_sn2(); 1030 (void)xpc_identify_activate_IRQ_sender_sn2();
891 } 1031 }
892} 1032}
893 1033
@@ -1775,9 +1915,11 @@ xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg)
1775 xpc_acknowledge_msgs_sn2(ch, get, msg->flags); 1915 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
1776} 1916}
1777 1917
1778void 1918int
1779xpc_init_sn2(void) 1919xpc_init_sn2(void)
1780{ 1920{
1921 int ret;
1922
1781 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; 1923 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2;
1782 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; 1924 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
1783 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; 1925 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
@@ -1788,7 +1930,7 @@ xpc_init_sn2(void)
1788 1930
1789 xpc_initiate_partition_activation = 1931 xpc_initiate_partition_activation =
1790 xpc_initiate_partition_activation_sn2; 1932 xpc_initiate_partition_activation_sn2;
1791 xpc_process_act_IRQ_rcvd = xpc_process_act_IRQ_rcvd_sn2; 1933 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
1792 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; 1934 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2;
1793 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; 1935 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2;
1794 xpc_make_first_contact = xpc_make_first_contact_sn2; 1936 xpc_make_first_contact = xpc_make_first_contact_sn2;
@@ -1819,9 +1961,30 @@ xpc_init_sn2(void)
1819 1961
1820 xpc_send_msg = xpc_send_msg_sn2; 1962 xpc_send_msg = xpc_send_msg_sn2;
1821 xpc_received_msg = xpc_received_msg_sn2; 1963 xpc_received_msg = xpc_received_msg_sn2;
1964
1965 /* open up protections for IPI and [potentially] AMO operations */
1966 xpc_allow_IPI_ops_sn2();
1967 xpc_allow_AMO_ops_shub_wars_1_1_sn2();
1968
1969 /*
1970 * This is safe to do before the xpc_hb_checker thread has started
1971 * because the handler releases a wait queue. If an interrupt is
1972 * received before the thread is waiting, it will not go to sleep,
1973 * but rather immediately process the interrupt.
1974 */
1975 ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
1976 "xpc hb", NULL);
1977 if (ret != 0) {
1978 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1979 "errno=%d\n", -ret);
1980 xpc_disallow_IPI_ops_sn2();
1981 }
1982 return ret;
1822} 1983}
1823 1984
1824void 1985void
1825xpc_exit_sn2(void) 1986xpc_exit_sn2(void)
1826{ 1987{
1988 free_irq(SGI_XPC_ACTIVATE, NULL);
1989 xpc_disallow_IPI_ops_sn2();
1827} 1990}