aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc_partition.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_partition.c')
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c328
1 files changed, 138 insertions, 190 deletions
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 7412dc7351cd..57f1d0b3ac26 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -6,7 +6,6 @@
6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) partition support. 10 * Cross Partition Communication (XPC) partition support.
12 * 11 *
@@ -16,7 +15,6 @@
16 * 15 *
17 */ 16 */
18 17
19
20#include <linux/kernel.h> 18#include <linux/kernel.h>
21#include <linux/sysctl.h> 19#include <linux/sysctl.h>
22#include <linux/cache.h> 20#include <linux/cache.h>
@@ -30,11 +28,9 @@
30#include <asm/sn/addrs.h> 28#include <asm/sn/addrs.h>
31#include "xpc.h" 29#include "xpc.h"
32 30
33
34/* XPC is exiting flag */ 31/* XPC is exiting flag */
35int xpc_exiting; 32int xpc_exiting;
36 33
37
38/* SH_IPI_ACCESS shub register value on startup */ 34/* SH_IPI_ACCESS shub register value on startup */
39static u64 xpc_sh1_IPI_access; 35static u64 xpc_sh1_IPI_access;
40static u64 xpc_sh2_IPI_access0; 36static u64 xpc_sh2_IPI_access0;
@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1;
42static u64 xpc_sh2_IPI_access2; 38static u64 xpc_sh2_IPI_access2;
43static u64 xpc_sh2_IPI_access3; 39static u64 xpc_sh2_IPI_access3;
44 40
45
46/* original protection values for each node */ 41/* original protection values for each node */
47u64 xpc_prot_vec[MAX_NUMNODES]; 42u64 xpc_prot_vec[MAX_NUMNODES];
48 43
49
50/* this partition's reserved page pointers */ 44/* this partition's reserved page pointers */
51struct xpc_rsvd_page *xpc_rsvd_page; 45struct xpc_rsvd_page *xpc_rsvd_page;
52static u64 *xpc_part_nasids; 46static u64 *xpc_part_nasids;
@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part;
57static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ 51static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
58static int xp_nasid_mask_words; /* actual size in words of nasid mask */ 52static int xp_nasid_mask_words; /* actual size in words of nasid mask */
59 53
60
61/* 54/*
62 * For performance reasons, each entry of xpc_partitions[] is cacheline 55 * For performance reasons, each entry of xpc_partitions[] is cacheline
63 * aligned. And xpc_partitions[] is padded with an additional entry at the 56 * aligned. And xpc_partitions[] is padded with an additional entry at the
@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */
66 */ 59 */
67struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 60struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
68 61
69
70/* 62/*
71 * Generic buffer used to store a local copy of portions of a remote 63 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask, 64 * partition's reserved page (either its header and part_nasids mask,
@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
75char *xpc_remote_copy_buffer; 67char *xpc_remote_copy_buffer;
76void *xpc_remote_copy_buffer_base; 68void *xpc_remote_copy_buffer_base;
77 69
78
79/* 70/*
80 * Guarantee that the kmalloc'd memory is cacheline aligned. 71 * Guarantee that the kmalloc'd memory is cacheline aligned.
81 */ 72 */
@@ -87,7 +78,7 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
87 if (*base == NULL) { 78 if (*base == NULL) {
88 return NULL; 79 return NULL;
89 } 80 }
90 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 81 if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) {
91 return *base; 82 return *base;
92 } 83 }
93 kfree(*base); 84 kfree(*base);
@@ -97,10 +88,9 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
97 if (*base == NULL) { 88 if (*base == NULL) {
98 return NULL; 89 return NULL;
99 } 90 }
100 return (void *) L1_CACHE_ALIGN((u64) *base); 91 return (void *)L1_CACHE_ALIGN((u64)*base);
101} 92}
102 93
103
104/* 94/*
105 * Given a nasid, get the physical address of the partition's reserved page 95 * Given a nasid, get the physical address of the partition's reserved page
106 * for that nasid. This function returns 0 on any error. 96 * for that nasid. This function returns 0 on any error.
@@ -117,11 +107,10 @@ xpc_get_rsvd_page_pa(int nasid)
117 u64 buf_len = 0; 107 u64 buf_len = 0;
118 void *buf_base = NULL; 108 void *buf_base = NULL;
119 109
120
121 while (1) { 110 while (1) {
122 111
123 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, 112 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
124 &len); 113 &len);
125 114
126 dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" 115 dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
127 "0x%016lx, address=0x%016lx, len=0x%016lx\n", 116 "0x%016lx, address=0x%016lx, len=0x%016lx\n",
@@ -134,8 +123,9 @@ xpc_get_rsvd_page_pa(int nasid)
134 if (L1_CACHE_ALIGN(len) > buf_len) { 123 if (L1_CACHE_ALIGN(len) > buf_len) {
135 kfree(buf_base); 124 kfree(buf_base);
136 buf_len = L1_CACHE_ALIGN(len); 125 buf_len = L1_CACHE_ALIGN(len);
137 buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, 126 buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
138 GFP_KERNEL, &buf_base); 127 GFP_KERNEL,
128 &buf_base);
139 if (buf_base == NULL) { 129 if (buf_base == NULL) {
140 dev_err(xpc_part, "unable to kmalloc " 130 dev_err(xpc_part, "unable to kmalloc "
141 "len=0x%016lx\n", buf_len); 131 "len=0x%016lx\n", buf_len);
@@ -145,7 +135,7 @@ xpc_get_rsvd_page_pa(int nasid)
145 } 135 }
146 136
147 bte_res = xp_bte_copy(rp_pa, buf, buf_len, 137 bte_res = xp_bte_copy(rp_pa, buf, buf_len,
148 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 138 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
149 if (bte_res != BTE_SUCCESS) { 139 if (bte_res != BTE_SUCCESS) {
150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 140 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
151 status = SALRET_ERROR; 141 status = SALRET_ERROR;
@@ -162,7 +152,6 @@ xpc_get_rsvd_page_pa(int nasid)
162 return rp_pa; 152 return rp_pa;
163} 153}
164 154
165
166/* 155/*
167 * Fill the partition reserved page with the information needed by 156 * Fill the partition reserved page with the information needed by
168 * other partitions to discover we are alive and establish initial 157 * other partitions to discover we are alive and establish initial
@@ -176,7 +165,6 @@ xpc_rsvd_page_init(void)
176 u64 rp_pa, nasid_array = 0; 165 u64 rp_pa, nasid_array = 0;
177 int i, ret; 166 int i, ret;
178 167
179
180 /* get the local reserved page's address */ 168 /* get the local reserved page's address */
181 169
182 preempt_disable(); 170 preempt_disable();
@@ -186,7 +174,7 @@ xpc_rsvd_page_init(void)
186 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 174 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
187 return NULL; 175 return NULL;
188 } 176 }
189 rp = (struct xpc_rsvd_page *) __va(rp_pa); 177 rp = (struct xpc_rsvd_page *)__va(rp_pa);
190 178
191 if (rp->partid != sn_partition_id) { 179 if (rp->partid != sn_partition_id) {
192 dev_err(xpc_part, "the reserved page's partid of %d should be " 180 dev_err(xpc_part, "the reserved page's partid of %d should be "
@@ -223,7 +211,7 @@ xpc_rsvd_page_init(void)
223 * memory protections are never restricted. 211 * memory protections are never restricted.
224 */ 212 */
225 if ((amos_page = xpc_vars->amos_page) == NULL) { 213 if ((amos_page = xpc_vars->amos_page) == NULL) {
226 amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); 214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
227 if (amos_page == NULL) { 215 if (amos_page == NULL) {
228 dev_err(xpc_part, "can't allocate page of AMOs\n"); 216 dev_err(xpc_part, "can't allocate page of AMOs\n");
229 return NULL; 217 return NULL;
@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void)
234 * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 222 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
235 */ 223 */
236 if (!enable_shub_wars_1_1()) { 224 if (!enable_shub_wars_1_1()) {
237 ret = sn_change_memprotect(ia64_tpa((u64) amos_page), 225 ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
238 PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, 226 PAGE_SIZE,
239 &nasid_array); 227 SN_MEMPROT_ACCESS_CLASS_1,
228 &nasid_array);
240 if (ret != 0) { 229 if (ret != 0) {
241 dev_err(xpc_part, "can't change memory " 230 dev_err(xpc_part, "can't change memory "
242 "protections\n"); 231 "protections\n");
243 uncached_free_page(__IA64_UNCACHED_OFFSET | 232 uncached_free_page(__IA64_UNCACHED_OFFSET |
244 TO_PHYS((u64) amos_page)); 233 TO_PHYS((u64)amos_page));
245 return NULL; 234 return NULL;
246 } 235 }
247 } 236 }
248 } else if (!IS_AMO_ADDRESS((u64) amos_page)) { 237 } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
249 /* 238 /*
250 * EFI's XPBOOT can also set amos_page in the reserved page, 239 * EFI's XPBOOT can also set amos_page in the reserved page,
251 * but it happens to leave it as an uncached physical address 240 * but it happens to leave it as an uncached physical address
252 * and we need it to be an uncached virtual, so we'll have to 241 * and we need it to be an uncached virtual, so we'll have to
253 * convert it. 242 * convert it.
254 */ 243 */
255 if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { 244 if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
256 dev_err(xpc_part, "previously used amos_page address " 245 dev_err(xpc_part, "previously used amos_page address "
257 "is bad = 0x%p\n", (void *) amos_page); 246 "is bad = 0x%p\n", (void *)amos_page);
258 return NULL; 247 return NULL;
259 } 248 }
260 amos_page = (AMO_t *) TO_AMO((u64) amos_page); 249 amos_page = (AMO_t *)TO_AMO((u64)amos_page);
261 } 250 }
262 251
263 /* clear xpc_vars */ 252 /* clear xpc_vars */
@@ -267,22 +256,21 @@ xpc_rsvd_page_init(void)
267 xpc_vars->act_nasid = cpuid_to_nasid(0); 256 xpc_vars->act_nasid = cpuid_to_nasid(0);
268 xpc_vars->act_phys_cpuid = cpu_physical_id(0); 257 xpc_vars->act_phys_cpuid = cpu_physical_id(0);
269 xpc_vars->vars_part_pa = __pa(xpc_vars_part); 258 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
270 xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); 259 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
271 xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 260 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
272
273 261
274 /* clear xpc_vars_part */ 262 /* clear xpc_vars_part */
275 memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * 263 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
276 XP_MAX_PARTITIONS); 264 XP_MAX_PARTITIONS);
277 265
278 /* initialize the activate IRQ related AMO variables */ 266 /* initialize the activate IRQ related AMO variables */
279 for (i = 0; i < xp_nasid_mask_words; i++) { 267 for (i = 0; i < xp_nasid_mask_words; i++) {
280 (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); 268 (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
281 } 269 }
282 270
283 /* initialize the engaged remote partitions related AMO variables */ 271 /* initialize the engaged remote partitions related AMO variables */
284 (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); 272 (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
285 (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); 273 (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
286 274
287 /* timestamp of when reserved page was setup by XPC */ 275 /* timestamp of when reserved page was setup by XPC */
288 rp->stamp = CURRENT_TIME; 276 rp->stamp = CURRENT_TIME;
@@ -296,7 +284,6 @@ xpc_rsvd_page_init(void)
296 return rp; 284 return rp;
297} 285}
298 286
299
300/* 287/*
301 * Change protections to allow IPI operations (and AMO operations on 288 * Change protections to allow IPI operations (and AMO operations on
302 * Shub 1.1 systems). 289 * Shub 1.1 systems).
@@ -307,39 +294,38 @@ xpc_allow_IPI_ops(void)
307 int node; 294 int node;
308 int nasid; 295 int nasid;
309 296
310
311 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 297 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
312 298
313 if (is_shub2()) { 299 if (is_shub2()) {
314 xpc_sh2_IPI_access0 = 300 xpc_sh2_IPI_access0 =
315 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 301 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
316 xpc_sh2_IPI_access1 = 302 xpc_sh2_IPI_access1 =
317 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 303 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
318 xpc_sh2_IPI_access2 = 304 xpc_sh2_IPI_access2 =
319 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 305 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
320 xpc_sh2_IPI_access3 = 306 xpc_sh2_IPI_access3 =
321 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 307 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
322 308
323 for_each_online_node(node) { 309 for_each_online_node(node) {
324 nasid = cnodeid_to_nasid(node); 310 nasid = cnodeid_to_nasid(node);
325 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 311 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
326 -1UL); 312 -1UL);
327 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 313 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
328 -1UL); 314 -1UL);
329 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 315 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
330 -1UL); 316 -1UL);
331 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 317 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
332 -1UL); 318 -1UL);
333 } 319 }
334 320
335 } else { 321 } else {
336 xpc_sh1_IPI_access = 322 xpc_sh1_IPI_access =
337 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 323 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
338 324
339 for_each_online_node(node) { 325 for_each_online_node(node) {
340 nasid = cnodeid_to_nasid(node); 326 nasid = cnodeid_to_nasid(node);
341 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 327 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
342 -1UL); 328 -1UL);
343 329
344 /* 330 /*
345 * Since the BIST collides with memory operations on 331 * Since the BIST collides with memory operations on
@@ -347,21 +333,23 @@ xpc_allow_IPI_ops(void)
347 */ 333 */
348 if (enable_shub_wars_1_1()) { 334 if (enable_shub_wars_1_1()) {
349 /* open up everything */ 335 /* open up everything */
350 xpc_prot_vec[node] = (u64) HUB_L((u64 *) 336 xpc_prot_vec[node] = (u64)HUB_L((u64 *)
351 GLOBAL_MMR_ADDR(nasid, 337 GLOBAL_MMR_ADDR
352 SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 338 (nasid,
353 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 339 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
354 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 340 HUB_S((u64 *)
355 -1UL); 341 GLOBAL_MMR_ADDR(nasid,
356 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 342 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
357 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 343 -1UL);
358 -1UL); 344 HUB_S((u64 *)
345 GLOBAL_MMR_ADDR(nasid,
346 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
347 -1UL);
359 } 348 }
360 } 349 }
361 } 350 }
362} 351}
363 352
364
365/* 353/*
366 * Restrict protections to disallow IPI operations (and AMO operations on 354 * Restrict protections to disallow IPI operations (and AMO operations on
367 * Shub 1.1 systems). 355 * Shub 1.1 systems).
@@ -372,43 +360,41 @@ xpc_restrict_IPI_ops(void)
372 int node; 360 int node;
373 int nasid; 361 int nasid;
374 362
375
376 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. 363 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
377 364
378 if (is_shub2()) { 365 if (is_shub2()) {
379 366
380 for_each_online_node(node) { 367 for_each_online_node(node) {
381 nasid = cnodeid_to_nasid(node); 368 nasid = cnodeid_to_nasid(node);
382 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 369 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
383 xpc_sh2_IPI_access0); 370 xpc_sh2_IPI_access0);
384 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 371 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
385 xpc_sh2_IPI_access1); 372 xpc_sh2_IPI_access1);
386 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 373 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
387 xpc_sh2_IPI_access2); 374 xpc_sh2_IPI_access2);
388 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 375 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
389 xpc_sh2_IPI_access3); 376 xpc_sh2_IPI_access3);
390 } 377 }
391 378
392 } else { 379 } else {
393 380
394 for_each_online_node(node) { 381 for_each_online_node(node) {
395 nasid = cnodeid_to_nasid(node); 382 nasid = cnodeid_to_nasid(node);
396 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 383 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
397 xpc_sh1_IPI_access); 384 xpc_sh1_IPI_access);
398 385
399 if (enable_shub_wars_1_1()) { 386 if (enable_shub_wars_1_1()) {
400 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 387 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
401 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 388 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
402 xpc_prot_vec[node]); 389 xpc_prot_vec[node]);
403 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 390 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
404 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 391 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
405 xpc_prot_vec[node]); 392 xpc_prot_vec[node]);
406 } 393 }
407 } 394 }
408 } 395 }
409} 396}
410 397
411
412/* 398/*
413 * At periodic intervals, scan through all active partitions and ensure 399 * At periodic intervals, scan through all active partitions and ensure
414 * their heartbeat is still active. If not, the partition is deactivated. 400 * their heartbeat is still active. If not, the partition is deactivated.
@@ -421,8 +407,7 @@ xpc_check_remote_hb(void)
421 partid_t partid; 407 partid_t partid;
422 bte_result_t bres; 408 bte_result_t bres;
423 409
424 410 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
425 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
426 411
427 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 412 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
428 413
@@ -437,18 +422,18 @@ xpc_check_remote_hb(void)
437 part = &xpc_partitions[partid]; 422 part = &xpc_partitions[partid];
438 423
439 if (part->act_state == XPC_P_INACTIVE || 424 if (part->act_state == XPC_P_INACTIVE ||
440 part->act_state == XPC_P_DEACTIVATING) { 425 part->act_state == XPC_P_DEACTIVATING) {
441 continue; 426 continue;
442 } 427 }
443 428
444 /* pull the remote_hb cache line */ 429 /* pull the remote_hb cache line */
445 bres = xp_bte_copy(part->remote_vars_pa, 430 bres = xp_bte_copy(part->remote_vars_pa,
446 (u64) remote_vars, 431 (u64)remote_vars,
447 XPC_RP_VARS_SIZE, 432 XPC_RP_VARS_SIZE,
448 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 433 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
449 if (bres != BTE_SUCCESS) { 434 if (bres != BTE_SUCCESS) {
450 XPC_DEACTIVATE_PARTITION(part, 435 XPC_DEACTIVATE_PARTITION(part,
451 xpc_map_bte_errors(bres)); 436 xpc_map_bte_errors(bres));
452 continue; 437 continue;
453 } 438 }
454 439
@@ -459,8 +444,8 @@ xpc_check_remote_hb(void)
459 remote_vars->heartbeating_to_mask); 444 remote_vars->heartbeating_to_mask);
460 445
461 if (((remote_vars->heartbeat == part->last_heartbeat) && 446 if (((remote_vars->heartbeat == part->last_heartbeat) &&
462 (remote_vars->heartbeat_offline == 0)) || 447 (remote_vars->heartbeat_offline == 0)) ||
463 !xpc_hb_allowed(sn_partition_id, remote_vars)) { 448 !xpc_hb_allowed(sn_partition_id, remote_vars)) {
464 449
465 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); 450 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
466 continue; 451 continue;
@@ -470,7 +455,6 @@ xpc_check_remote_hb(void)
470 } 455 }
471} 456}
472 457
473
474/* 458/*
475 * Get a copy of a portion of the remote partition's rsvd page. 459 * Get a copy of a portion of the remote partition's rsvd page.
476 * 460 *
@@ -480,11 +464,10 @@ xpc_check_remote_hb(void)
480 */ 464 */
481static enum xpc_retval 465static enum xpc_retval
482xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 466xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
483 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 467 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
484{ 468{
485 int bres, i; 469 int bres, i;
486 470
487
488 /* get the reserved page's physical address */ 471 /* get the reserved page's physical address */
489 472
490 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); 473 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
@@ -492,30 +475,26 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
492 return xpcNoRsvdPageAddr; 475 return xpcNoRsvdPageAddr;
493 } 476 }
494 477
495
496 /* pull over the reserved page header and part_nasids mask */ 478 /* pull over the reserved page header and part_nasids mask */
497 bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, 479 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
498 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 480 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
499 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 481 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
500 if (bres != BTE_SUCCESS) { 482 if (bres != BTE_SUCCESS) {
501 return xpc_map_bte_errors(bres); 483 return xpc_map_bte_errors(bres);
502 } 484 }
503 485
504
505 if (discovered_nasids != NULL) { 486 if (discovered_nasids != NULL) {
506 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); 487 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
507 488
508
509 for (i = 0; i < xp_nasid_mask_words; i++) { 489 for (i = 0; i < xp_nasid_mask_words; i++) {
510 discovered_nasids[i] |= remote_part_nasids[i]; 490 discovered_nasids[i] |= remote_part_nasids[i];
511 } 491 }
512 } 492 }
513 493
514
515 /* check that the partid is for another partition */ 494 /* check that the partid is for another partition */
516 495
517 if (remote_rp->partid < 1 || 496 if (remote_rp->partid < 1 ||
518 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 497 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
519 return xpcInvalidPartid; 498 return xpcInvalidPartid;
520 } 499 }
521 500
@@ -523,16 +502,14 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
523 return xpcLocalPartid; 502 return xpcLocalPartid;
524 } 503 }
525 504
526
527 if (XPC_VERSION_MAJOR(remote_rp->version) != 505 if (XPC_VERSION_MAJOR(remote_rp->version) !=
528 XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 506 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
529 return xpcBadVersion; 507 return xpcBadVersion;
530 } 508 }
531 509
532 return xpcSuccess; 510 return xpcSuccess;
533} 511}
534 512
535
536/* 513/*
537 * Get a copy of the remote partition's XPC variables from the reserved page. 514 * Get a copy of the remote partition's XPC variables from the reserved page.
538 * 515 *
@@ -544,34 +521,32 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
544{ 521{
545 int bres; 522 int bres;
546 523
547
548 if (remote_vars_pa == 0) { 524 if (remote_vars_pa == 0) {
549 return xpcVarsNotSet; 525 return xpcVarsNotSet;
550 } 526 }
551 527
552 /* pull over the cross partition variables */ 528 /* pull over the cross partition variables */
553 bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, 529 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
554 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 530 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
555 if (bres != BTE_SUCCESS) { 531 if (bres != BTE_SUCCESS) {
556 return xpc_map_bte_errors(bres); 532 return xpc_map_bte_errors(bres);
557 } 533 }
558 534
559 if (XPC_VERSION_MAJOR(remote_vars->version) != 535 if (XPC_VERSION_MAJOR(remote_vars->version) !=
560 XPC_VERSION_MAJOR(XPC_V_VERSION)) { 536 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
561 return xpcBadVersion; 537 return xpcBadVersion;
562 } 538 }
563 539
564 return xpcSuccess; 540 return xpcSuccess;
565} 541}
566 542
567
568/* 543/*
569 * Update the remote partition's info. 544 * Update the remote partition's info.
570 */ 545 */
571static void 546static void
572xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, 547xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
573 struct timespec *remote_rp_stamp, u64 remote_rp_pa, 548 struct timespec *remote_rp_stamp, u64 remote_rp_pa,
574 u64 remote_vars_pa, struct xpc_vars *remote_vars) 549 u64 remote_vars_pa, struct xpc_vars *remote_vars)
575{ 550{
576 part->remote_rp_version = remote_rp_version; 551 part->remote_rp_version = remote_rp_version;
577 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", 552 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
@@ -613,7 +588,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
613 part->remote_vars_version); 588 part->remote_vars_version);
614} 589}
615 590
616
617/* 591/*
618 * Prior code has determined the nasid which generated an IPI. Inspect 592 * Prior code has determined the nasid which generated an IPI. Inspect
619 * that nasid to determine if its partition needs to be activated or 593 * that nasid to determine if its partition needs to be activated or
@@ -643,15 +617,14 @@ xpc_identify_act_IRQ_req(int nasid)
643 struct xpc_partition *part; 617 struct xpc_partition *part;
644 enum xpc_retval ret; 618 enum xpc_retval ret;
645 619
646
647 /* pull over the reserved page structure */ 620 /* pull over the reserved page structure */
648 621
649 remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; 622 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
650 623
651 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); 624 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
652 if (ret != xpcSuccess) { 625 if (ret != xpcSuccess) {
653 dev_warn(xpc_part, "unable to get reserved page from nasid %d, " 626 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
654 "which sent interrupt, reason=%d\n", nasid, ret); 627 "which sent interrupt, reason=%d\n", nasid, ret);
655 return; 628 return;
656 } 629 }
657 630
@@ -663,34 +636,31 @@ xpc_identify_act_IRQ_req(int nasid)
663 partid = remote_rp->partid; 636 partid = remote_rp->partid;
664 part = &xpc_partitions[partid]; 637 part = &xpc_partitions[partid];
665 638
666
667 /* pull over the cross partition variables */ 639 /* pull over the cross partition variables */
668 640
669 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 641 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
670 642
671 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 643 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
672 if (ret != xpcSuccess) { 644 if (ret != xpcSuccess) {
673 645
674 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 646 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
675 "which sent interrupt, reason=%d\n", nasid, ret); 647 "which sent interrupt, reason=%d\n", nasid, ret);
676 648
677 XPC_DEACTIVATE_PARTITION(part, ret); 649 XPC_DEACTIVATE_PARTITION(part, ret);
678 return; 650 return;
679 } 651 }
680 652
681
682 part->act_IRQ_rcvd++; 653 part->act_IRQ_rcvd++;
683 654
684 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 655 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
685 "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, 656 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
686 remote_vars->heartbeat, remote_vars->heartbeating_to_mask); 657 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
687 658
688 if (xpc_partition_disengaged(part) && 659 if (xpc_partition_disengaged(part) && part->act_state == XPC_P_INACTIVE) {
689 part->act_state == XPC_P_INACTIVE) {
690 660
691 xpc_update_partition_info(part, remote_rp_version, 661 xpc_update_partition_info(part, remote_rp_version,
692 &remote_rp_stamp, remote_rp_pa, 662 &remote_rp_stamp, remote_rp_pa,
693 remote_vars_pa, remote_vars); 663 remote_vars_pa, remote_vars);
694 664
695 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 665 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
696 if (xpc_partition_disengage_requested(1UL << partid)) { 666 if (xpc_partition_disengage_requested(1UL << partid)) {
@@ -714,16 +684,15 @@ xpc_identify_act_IRQ_req(int nasid)
714 684
715 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { 685 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
716 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 686 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
717 remote_vars_version)); 687 remote_vars_version));
718 688
719 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 689 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
720 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 690 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
721 version)); 691 version));
722 /* see if the other side rebooted */ 692 /* see if the other side rebooted */
723 if (part->remote_amos_page_pa == 693 if (part->remote_amos_page_pa ==
724 remote_vars->amos_page_pa && 694 remote_vars->amos_page_pa &&
725 xpc_hb_allowed(sn_partition_id, 695 xpc_hb_allowed(sn_partition_id, remote_vars)) {
726 remote_vars)) {
727 /* doesn't look that way, so ignore the IPI */ 696 /* doesn't look that way, so ignore the IPI */
728 return; 697 return;
729 } 698 }
@@ -735,8 +704,8 @@ xpc_identify_act_IRQ_req(int nasid)
735 */ 704 */
736 705
737 xpc_update_partition_info(part, remote_rp_version, 706 xpc_update_partition_info(part, remote_rp_version,
738 &remote_rp_stamp, remote_rp_pa, 707 &remote_rp_stamp, remote_rp_pa,
739 remote_vars_pa, remote_vars); 708 remote_vars_pa, remote_vars);
740 part->reactivate_nasid = nasid; 709 part->reactivate_nasid = nasid;
741 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 710 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
742 return; 711 return;
@@ -756,15 +725,15 @@ xpc_identify_act_IRQ_req(int nasid)
756 xpc_clear_partition_disengage_request(1UL << partid); 725 xpc_clear_partition_disengage_request(1UL << partid);
757 726
758 xpc_update_partition_info(part, remote_rp_version, 727 xpc_update_partition_info(part, remote_rp_version,
759 &remote_rp_stamp, remote_rp_pa, 728 &remote_rp_stamp, remote_rp_pa,
760 remote_vars_pa, remote_vars); 729 remote_vars_pa, remote_vars);
761 reactivate = 1; 730 reactivate = 1;
762 731
763 } else { 732 } else {
764 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); 733 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
765 734
766 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, 735 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
767 &remote_rp_stamp); 736 &remote_rp_stamp);
768 if (stamp_diff != 0) { 737 if (stamp_diff != 0) {
769 DBUG_ON(stamp_diff >= 0); 738 DBUG_ON(stamp_diff >= 0);
770 739
@@ -775,17 +744,18 @@ xpc_identify_act_IRQ_req(int nasid)
775 744
776 DBUG_ON(xpc_partition_engaged(1UL << partid)); 745 DBUG_ON(xpc_partition_engaged(1UL << partid));
777 DBUG_ON(xpc_partition_disengage_requested(1UL << 746 DBUG_ON(xpc_partition_disengage_requested(1UL <<
778 partid)); 747 partid));
779 748
780 xpc_update_partition_info(part, remote_rp_version, 749 xpc_update_partition_info(part, remote_rp_version,
781 &remote_rp_stamp, remote_rp_pa, 750 &remote_rp_stamp,
782 remote_vars_pa, remote_vars); 751 remote_rp_pa, remote_vars_pa,
752 remote_vars);
783 reactivate = 1; 753 reactivate = 1;
784 } 754 }
785 } 755 }
786 756
787 if (part->disengage_request_timeout > 0 && 757 if (part->disengage_request_timeout > 0 &&
788 !xpc_partition_disengaged(part)) { 758 !xpc_partition_disengaged(part)) {
789 /* still waiting on other side to disengage from us */ 759 /* still waiting on other side to disengage from us */
790 return; 760 return;
791 } 761 }
@@ -795,12 +765,11 @@ xpc_identify_act_IRQ_req(int nasid)
795 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 765 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
796 766
797 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && 767 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
798 xpc_partition_disengage_requested(1UL << partid)) { 768 xpc_partition_disengage_requested(1UL << partid)) {
799 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); 769 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
800 } 770 }
801} 771}
802 772
803
804/* 773/*
805 * Loop through the activation AMO variables and process any bits 774 * Loop through the activation AMO variables and process any bits
806 * which are set. Each bit indicates a nasid sending a partition 775 * which are set. Each bit indicates a nasid sending a partition
@@ -813,14 +782,12 @@ xpc_identify_act_IRQ_sender(void)
813{ 782{
814 int word, bit; 783 int word, bit;
815 u64 nasid_mask; 784 u64 nasid_mask;
816 u64 nasid; /* remote nasid */ 785 u64 nasid; /* remote nasid */
817 int n_IRQs_detected = 0; 786 int n_IRQs_detected = 0;
818 AMO_t *act_amos; 787 AMO_t *act_amos;
819 788
820
821 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; 789 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
822 790
823
824 /* scan through act AMO variable looking for non-zero entries */ 791 /* scan through act AMO variable looking for non-zero entries */
825 for (word = 0; word < xp_nasid_mask_words; word++) { 792 for (word = 0; word < xp_nasid_mask_words; word++) {
826 793
@@ -837,7 +804,6 @@ xpc_identify_act_IRQ_sender(void)
837 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, 804 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
838 nasid_mask); 805 nasid_mask);
839 806
840
841 /* 807 /*
842 * If this nasid has been added to the machine since 808 * If this nasid has been added to the machine since
843 * our partition was reset, this will retain the 809 * our partition was reset, this will retain the
@@ -846,7 +812,6 @@ xpc_identify_act_IRQ_sender(void)
846 */ 812 */
847 xpc_mach_nasids[word] |= nasid_mask; 813 xpc_mach_nasids[word] |= nasid_mask;
848 814
849
850 /* locate the nasid(s) which sent interrupts */ 815 /* locate the nasid(s) which sent interrupts */
851 816
852 for (bit = 0; bit < (8 * sizeof(u64)); bit++) { 817 for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
@@ -862,7 +827,6 @@ xpc_identify_act_IRQ_sender(void)
862 return n_IRQs_detected; 827 return n_IRQs_detected;
863} 828}
864 829
865
866/* 830/*
867 * See if the other side has responded to a partition disengage request 831 * See if the other side has responded to a partition disengage request
868 * from us. 832 * from us.
@@ -873,7 +837,6 @@ xpc_partition_disengaged(struct xpc_partition *part)
873 partid_t partid = XPC_PARTID(part); 837 partid_t partid = XPC_PARTID(part);
874 int disengaged; 838 int disengaged;
875 839
876
877 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 840 disengaged = (xpc_partition_engaged(1UL << partid) == 0);
878 if (part->disengage_request_timeout) { 841 if (part->disengage_request_timeout) {
879 if (!disengaged) { 842 if (!disengaged) {
@@ -888,7 +851,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
888 */ 851 */
889 852
890 dev_info(xpc_part, "disengage from remote partition %d " 853 dev_info(xpc_part, "disengage from remote partition %d "
891 "timed out\n", partid); 854 "timed out\n", partid);
892 xpc_disengage_request_timedout = 1; 855 xpc_disengage_request_timedout = 1;
893 xpc_clear_partition_engaged(1UL << partid); 856 xpc_clear_partition_engaged(1UL << partid);
894 disengaged = 1; 857 disengaged = 1;
@@ -898,11 +861,11 @@ xpc_partition_disengaged(struct xpc_partition *part)
898 /* cancel the timer function, provided it's not us */ 861 /* cancel the timer function, provided it's not us */
899 if (!in_interrupt()) { 862 if (!in_interrupt()) {
900 del_singleshot_timer_sync(&part-> 863 del_singleshot_timer_sync(&part->
901 disengage_request_timer); 864 disengage_request_timer);
902 } 865 }
903 866
904 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 867 DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
905 part->act_state != XPC_P_INACTIVE); 868 part->act_state != XPC_P_INACTIVE);
906 if (part->act_state != XPC_P_INACTIVE) { 869 if (part->act_state != XPC_P_INACTIVE) {
907 xpc_wakeup_channel_mgr(part); 870 xpc_wakeup_channel_mgr(part);
908 } 871 }
@@ -914,7 +877,6 @@ xpc_partition_disengaged(struct xpc_partition *part)
914 return disengaged; 877 return disengaged;
915} 878}
916 879
917
918/* 880/*
919 * Mark specified partition as active. 881 * Mark specified partition as active.
920 */ 882 */
@@ -924,7 +886,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
924 unsigned long irq_flags; 886 unsigned long irq_flags;
925 enum xpc_retval ret; 887 enum xpc_retval ret;
926 888
927
928 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 889 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
929 890
930 spin_lock_irqsave(&part->act_lock, irq_flags); 891 spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -940,17 +901,15 @@ xpc_mark_partition_active(struct xpc_partition *part)
940 return ret; 901 return ret;
941} 902}
942 903
943
944/* 904/*
945 * Notify XPC that the partition is down. 905 * Notify XPC that the partition is down.
946 */ 906 */
947void 907void
948xpc_deactivate_partition(const int line, struct xpc_partition *part, 908xpc_deactivate_partition(const int line, struct xpc_partition *part,
949 enum xpc_retval reason) 909 enum xpc_retval reason)
950{ 910{
951 unsigned long irq_flags; 911 unsigned long irq_flags;
952 912
953
954 spin_lock_irqsave(&part->act_lock, irq_flags); 913 spin_lock_irqsave(&part->act_lock, irq_flags);
955 914
956 if (part->act_state == XPC_P_INACTIVE) { 915 if (part->act_state == XPC_P_INACTIVE) {
@@ -964,7 +923,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
964 } 923 }
965 if (part->act_state == XPC_P_DEACTIVATING) { 924 if (part->act_state == XPC_P_DEACTIVATING) {
966 if ((part->reason == xpcUnloading && reason != xpcUnloading) || 925 if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
967 reason == xpcReactivating) { 926 reason == xpcReactivating) {
968 XPC_SET_REASON(part, reason, line); 927 XPC_SET_REASON(part, reason, line);
969 } 928 }
970 spin_unlock_irqrestore(&part->act_lock, irq_flags); 929 spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -982,9 +941,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
982 941
983 /* set a timelimit on the disengage request */ 942 /* set a timelimit on the disengage request */
984 part->disengage_request_timeout = jiffies + 943 part->disengage_request_timeout = jiffies +
985 (xpc_disengage_request_timelimit * HZ); 944 (xpc_disengage_request_timelimit * HZ);
986 part->disengage_request_timer.expires = 945 part->disengage_request_timer.expires =
987 part->disengage_request_timeout; 946 part->disengage_request_timeout;
988 add_timer(&part->disengage_request_timer); 947 add_timer(&part->disengage_request_timer);
989 } 948 }
990 949
@@ -994,7 +953,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
994 xpc_partition_going_down(part, reason); 953 xpc_partition_going_down(part, reason);
995} 954}
996 955
997
998/* 956/*
999 * Mark specified partition as inactive. 957 * Mark specified partition as inactive.
1000 */ 958 */
@@ -1003,7 +961,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
1003{ 961{
1004 unsigned long irq_flags; 962 unsigned long irq_flags;
1005 963
1006
1007 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", 964 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
1008 XPC_PARTID(part)); 965 XPC_PARTID(part));
1009 966
@@ -1013,7 +970,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
1013 part->remote_rp_pa = 0; 970 part->remote_rp_pa = 0;
1014} 971}
1015 972
1016
1017/* 973/*
1018 * SAL has provided a partition and machine mask. The partition mask 974 * SAL has provided a partition and machine mask. The partition mask
1019 * contains a bit for each even nasid in our partition. The machine 975 * contains a bit for each even nasid in our partition. The machine
@@ -1041,24 +997,22 @@ xpc_discovery(void)
1041 u64 *discovered_nasids; 997 u64 *discovered_nasids;
1042 enum xpc_retval ret; 998 enum xpc_retval ret;
1043 999
1044
1045 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 1000 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
1046 xp_nasid_mask_bytes, 1001 xp_nasid_mask_bytes,
1047 GFP_KERNEL, &remote_rp_base); 1002 GFP_KERNEL, &remote_rp_base);
1048 if (remote_rp == NULL) { 1003 if (remote_rp == NULL) {
1049 return; 1004 return;
1050 } 1005 }
1051 remote_vars = (struct xpc_vars *) remote_rp; 1006 remote_vars = (struct xpc_vars *)remote_rp;
1052
1053 1007
1054 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, 1008 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
1055 GFP_KERNEL); 1009 GFP_KERNEL);
1056 if (discovered_nasids == NULL) { 1010 if (discovered_nasids == NULL) {
1057 kfree(remote_rp_base); 1011 kfree(remote_rp_base);
1058 return; 1012 return;
1059 } 1013 }
1060 1014
1061 rp = (struct xpc_rsvd_page *) xpc_rsvd_page; 1015 rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
1062 1016
1063 /* 1017 /*
1064 * The term 'region' in this context refers to the minimum number of 1018 * The term 'region' in this context refers to the minimum number of
@@ -1081,23 +1035,21 @@ xpc_discovery(void)
1081 1035
1082 for (region = 0; region < max_regions; region++) { 1036 for (region = 0; region < max_regions; region++) {
1083 1037
1084 if ((volatile int) xpc_exiting) { 1038 if ((volatile int)xpc_exiting) {
1085 break; 1039 break;
1086 } 1040 }
1087 1041
1088 dev_dbg(xpc_part, "searching region %d\n", region); 1042 dev_dbg(xpc_part, "searching region %d\n", region);
1089 1043
1090 for (nasid = (region * region_size * 2); 1044 for (nasid = (region * region_size * 2);
1091 nasid < ((region + 1) * region_size * 2); 1045 nasid < ((region + 1) * region_size * 2); nasid += 2) {
1092 nasid += 2) {
1093 1046
1094 if ((volatile int) xpc_exiting) { 1047 if ((volatile int)xpc_exiting) {
1095 break; 1048 break;
1096 } 1049 }
1097 1050
1098 dev_dbg(xpc_part, "checking nasid %d\n", nasid); 1051 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
1099 1052
1100
1101 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { 1053 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
1102 dev_dbg(xpc_part, "PROM indicates Nasid %d is " 1054 dev_dbg(xpc_part, "PROM indicates Nasid %d is "
1103 "part of the local partition; skipping " 1055 "part of the local partition; skipping "
@@ -1119,11 +1071,10 @@ xpc_discovery(void)
1119 continue; 1071 continue;
1120 } 1072 }
1121 1073
1122
1123 /* pull over the reserved page structure */ 1074 /* pull over the reserved page structure */
1124 1075
1125 ret = xpc_get_remote_rp(nasid, discovered_nasids, 1076 ret = xpc_get_remote_rp(nasid, discovered_nasids,
1126 remote_rp, &remote_rp_pa); 1077 remote_rp, &remote_rp_pa);
1127 if (ret != xpcSuccess) { 1078 if (ret != xpcSuccess) {
1128 dev_dbg(xpc_part, "unable to get reserved page " 1079 dev_dbg(xpc_part, "unable to get reserved page "
1129 "from nasid %d, reason=%d\n", nasid, 1080 "from nasid %d, reason=%d\n", nasid,
@@ -1140,7 +1091,6 @@ xpc_discovery(void)
1140 partid = remote_rp->partid; 1091 partid = remote_rp->partid;
1141 part = &xpc_partitions[partid]; 1092 part = &xpc_partitions[partid];
1142 1093
1143
1144 /* pull over the cross partition variables */ 1094 /* pull over the cross partition variables */
1145 1095
1146 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 1096 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
@@ -1171,15 +1121,15 @@ xpc_discovery(void)
1171 * get the same page for remote_act_amos_pa after 1121 * get the same page for remote_act_amos_pa after
1172 * module reloads and system reboots. 1122 * module reloads and system reboots.
1173 */ 1123 */
1174 if (sn_register_xp_addr_region( 1124 if (sn_register_xp_addr_region
1175 remote_vars->amos_page_pa, 1125 (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
1176 PAGE_SIZE, 1) < 0) { 1126 dev_dbg(xpc_part,
1177 dev_dbg(xpc_part, "partition %d failed to " 1127 "partition %d failed to "
1178 "register xp_addr region 0x%016lx\n", 1128 "register xp_addr region 0x%016lx\n",
1179 partid, remote_vars->amos_page_pa); 1129 partid, remote_vars->amos_page_pa);
1180 1130
1181 XPC_SET_REASON(part, xpcPhysAddrRegFailed, 1131 XPC_SET_REASON(part, xpcPhysAddrRegFailed,
1182 __LINE__); 1132 __LINE__);
1183 break; 1133 break;
1184 } 1134 }
1185 1135
@@ -1195,9 +1145,9 @@ xpc_discovery(void)
1195 remote_vars->act_phys_cpuid); 1145 remote_vars->act_phys_cpuid);
1196 1146
1197 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 1147 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
1198 version)) { 1148 version)) {
1199 part->remote_amos_page_pa = 1149 part->remote_amos_page_pa =
1200 remote_vars->amos_page_pa; 1150 remote_vars->amos_page_pa;
1201 xpc_mark_partition_disengaged(part); 1151 xpc_mark_partition_disengaged(part);
1202 xpc_cancel_partition_disengage_request(part); 1152 xpc_cancel_partition_disengage_request(part);
1203 } 1153 }
@@ -1209,7 +1159,6 @@ xpc_discovery(void)
1209 kfree(remote_rp_base); 1159 kfree(remote_rp_base);
1210} 1160}
1211 1161
1212
1213/* 1162/*
1214 * Given a partid, get the nasids owned by that partition from the 1163 * Given a partid, get the nasids owned by that partition from the
1215 * remote partition's reserved page. 1164 * remote partition's reserved page.
@@ -1221,7 +1170,6 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1221 u64 part_nasid_pa; 1170 u64 part_nasid_pa;
1222 int bte_res; 1171 int bte_res;
1223 1172
1224
1225 part = &xpc_partitions[partid]; 1173 part = &xpc_partitions[partid];
1226 if (part->remote_rp_pa == 0) { 1174 if (part->remote_rp_pa == 0) {
1227 return xpcPartitionDown; 1175 return xpcPartitionDown;
@@ -1229,11 +1177,11 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1229 1177
1230 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 1178 memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
1231 1179
1232 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1180 part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
1233 1181
1234 bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, 1182 bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
1235 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1183 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
1184 NULL);
1236 1185
1237 return xpc_map_bte_errors(bte_res); 1186 return xpc_map_bte_errors(bte_res);
1238} 1187}
1239