diff options
Diffstat (limited to 'drivers/misc/sgi-xp')
-rw-r--r-- | drivers/misc/sgi-xp/xp.h | 80 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_main.c | 98 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_nofault.S | 1 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 442 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_partition.c | 328 |
5 files changed, 396 insertions, 553 deletions
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index fb65981754c3..87171682664d 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -6,30 +6,25 @@ | |||
6 | * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * External Cross Partition (XP) structures and defines. | 10 | * External Cross Partition (XP) structures and defines. |
12 | */ | 11 | */ |
13 | 12 | ||
14 | |||
15 | #ifndef _DRIVERS_MISC_SGIXP_XP_H | 13 | #ifndef _DRIVERS_MISC_SGIXP_XP_H |
16 | #define _DRIVERS_MISC_SGIXP_XP_H | 14 | #define _DRIVERS_MISC_SGIXP_XP_H |
17 | 15 | ||
18 | |||
19 | #include <linux/cache.h> | 16 | #include <linux/cache.h> |
20 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
21 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
22 | #include <asm/sn/types.h> | 19 | #include <asm/sn/types.h> |
23 | #include <asm/sn/bte.h> | 20 | #include <asm/sn/bte.h> |
24 | 21 | ||
25 | |||
26 | #ifdef USE_DBUG_ON | 22 | #ifdef USE_DBUG_ON |
27 | #define DBUG_ON(condition) BUG_ON(condition) | 23 | #define DBUG_ON(condition) BUG_ON(condition) |
28 | #else | 24 | #else |
29 | #define DBUG_ON(condition) | 25 | #define DBUG_ON(condition) |
30 | #endif | 26 | #endif |
31 | 27 | ||
32 | |||
33 | /* | 28 | /* |
34 | * Define the maximum number of logically defined partitions the system | 29 | * Define the maximum number of logically defined partitions the system |
35 | * can support. It is constrained by the maximum number of hardware | 30 | * can support. It is constrained by the maximum number of hardware |
@@ -43,7 +38,6 @@ | |||
43 | */ | 38 | */ |
44 | #define XP_MAX_PARTITIONS 64 | 39 | #define XP_MAX_PARTITIONS 64 |
45 | 40 | ||
46 | |||
47 | /* | 41 | /* |
48 | * Define the number of u64s required to represent all the C-brick nasids | 42 | * Define the number of u64s required to represent all the C-brick nasids |
49 | * as a bitmap. The cross-partition kernel modules deal only with | 43 | * as a bitmap. The cross-partition kernel modules deal only with |
@@ -54,7 +48,6 @@ | |||
54 | #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) | 48 | #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) |
55 | #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) | 49 | #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) |
56 | 50 | ||
57 | |||
58 | /* | 51 | /* |
59 | * Wrapper for bte_copy() that should it return a failure status will retry | 52 | * Wrapper for bte_copy() that should it return a failure status will retry |
60 | * the bte_copy() once in the hope that the failure was due to a temporary | 53 | * the bte_copy() once in the hope that the failure was due to a temporary |
@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) | |||
74 | bte_result_t ret; | 67 | bte_result_t ret; |
75 | u64 pdst = ia64_tpa(vdst); | 68 | u64 pdst = ia64_tpa(vdst); |
76 | 69 | ||
77 | |||
78 | /* | 70 | /* |
79 | * Ensure that the physically mapped memory is contiguous. | 71 | * Ensure that the physically mapped memory is contiguous. |
80 | * | 72 | * |
@@ -96,7 +88,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) | |||
96 | return ret; | 88 | return ret; |
97 | } | 89 | } |
98 | 90 | ||
99 | |||
100 | /* | 91 | /* |
101 | * XPC establishes channel connections between the local partition and any | 92 | * XPC establishes channel connections between the local partition and any |
102 | * other partition that is currently up. Over these channels, kernel-level | 93 | * other partition that is currently up. Over these channels, kernel-level |
@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) | |||
122 | #error XPC_NCHANNELS exceeds MAXIMUM allowed. | 113 | #error XPC_NCHANNELS exceeds MAXIMUM allowed. |
123 | #endif | 114 | #endif |
124 | 115 | ||
125 | |||
126 | /* | 116 | /* |
127 | * The format of an XPC message is as follows: | 117 | * The format of an XPC message is as follows: |
128 | * | 118 | * |
@@ -160,12 +150,10 @@ struct xpc_msg { | |||
160 | u64 payload; /* user defined portion of message */ | 150 | u64 payload; /* user defined portion of message */ |
161 | }; | 151 | }; |
162 | 152 | ||
163 | |||
164 | #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) | 153 | #define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) |
165 | #define XPC_MSG_SIZE(_payload_size) \ | 154 | #define XPC_MSG_SIZE(_payload_size) \ |
166 | L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) | 155 | L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) |
167 | 156 | ||
168 | |||
169 | /* | 157 | /* |
170 | * Define the return values and values passed to user's callout functions. | 158 | * Define the return values and values passed to user's callout functions. |
171 | * (It is important to add new value codes at the end just preceding | 159 | * (It is important to add new value codes at the end just preceding |
@@ -270,7 +258,6 @@ enum xpc_retval { | |||
270 | xpcUnknownReason /* 116: unknown reason -- must be last in list */ | 258 | xpcUnknownReason /* 116: unknown reason -- must be last in list */ |
271 | }; | 259 | }; |
272 | 260 | ||
273 | |||
274 | /* | 261 | /* |
275 | * Define the callout function types used by XPC to update the user on | 262 | * Define the callout function types used by XPC to update the user on |
276 | * connection activity and state changes (via the user function registered by | 263 | * connection activity and state changes (via the user function registered by |
@@ -375,12 +362,11 @@ enum xpc_retval { | |||
375 | * =====================+================================+===================== | 362 | * =====================+================================+===================== |
376 | */ | 363 | */ |
377 | 364 | ||
378 | typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, | 365 | typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid, |
379 | int ch_number, void *data, void *key); | 366 | int ch_number, void *data, void *key); |
380 | |||
381 | typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, | ||
382 | int ch_number, void *key); | ||
383 | 367 | ||
368 | typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid, | ||
369 | int ch_number, void *key); | ||
384 | 370 | ||
385 | /* | 371 | /* |
386 | * The following is a registration entry. There is a global array of these, | 372 | * The following is a registration entry. There is a global array of these, |
@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, | |||
398 | */ | 384 | */ |
399 | struct xpc_registration { | 385 | struct xpc_registration { |
400 | struct mutex mutex; | 386 | struct mutex mutex; |
401 | xpc_channel_func func; /* function to call */ | 387 | xpc_channel_func func; /* function to call */ |
402 | void *key; /* pointer to user's key */ | 388 | void *key; /* pointer to user's key */ |
403 | u16 nentries; /* #of msg entries in local msg queue */ | 389 | u16 nentries; /* #of msg entries in local msg queue */ |
404 | u16 msg_size; /* message queue's message size */ | 390 | u16 msg_size; /* message queue's message size */ |
405 | u32 assigned_limit; /* limit on #of assigned kthreads */ | 391 | u32 assigned_limit; /* limit on #of assigned kthreads */ |
406 | u32 idle_limit; /* limit on #of idle kthreads */ | 392 | u32 idle_limit; /* limit on #of idle kthreads */ |
407 | } ____cacheline_aligned; | 393 | } ____cacheline_aligned; |
408 | 394 | ||
409 | |||
410 | #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) | 395 | #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) |
411 | 396 | ||
412 | |||
413 | /* the following are valid xpc_allocate() flags */ | 397 | /* the following are valid xpc_allocate() flags */ |
414 | #define XPC_WAIT 0 /* wait flag */ | 398 | #define XPC_WAIT 0 /* wait flag */ |
415 | #define XPC_NOWAIT 1 /* no wait flag */ | 399 | #define XPC_NOWAIT 1 /* no wait flag */ |
416 | |||
417 | 400 | ||
418 | struct xpc_interface { | 401 | struct xpc_interface { |
419 | void (*connect)(int); | 402 | void (*connect) (int); |
420 | void (*disconnect)(int); | 403 | void (*disconnect) (int); |
421 | enum xpc_retval (*allocate)(partid_t, int, u32, void **); | 404 | enum xpc_retval (*allocate) (partid_t, int, u32, void **); |
422 | enum xpc_retval (*send)(partid_t, int, void *); | 405 | enum xpc_retval (*send) (partid_t, int, void *); |
423 | enum xpc_retval (*send_notify)(partid_t, int, void *, | 406 | enum xpc_retval (*send_notify) (partid_t, int, void *, |
424 | xpc_notify_func, void *); | 407 | xpc_notify_func, void *); |
425 | void (*received)(partid_t, int, void *); | 408 | void (*received) (partid_t, int, void *); |
426 | enum xpc_retval (*partid_to_nasids)(partid_t, void *); | 409 | enum xpc_retval (*partid_to_nasids) (partid_t, void *); |
427 | }; | 410 | }; |
428 | 411 | ||
429 | |||
430 | extern struct xpc_interface xpc_interface; | 412 | extern struct xpc_interface xpc_interface; |
431 | 413 | ||
432 | extern void xpc_set_interface(void (*)(int), | 414 | extern void xpc_set_interface(void (*)(int), |
433 | void (*)(int), | 415 | void (*)(int), |
434 | enum xpc_retval (*)(partid_t, int, u32, void **), | 416 | enum xpc_retval (*)(partid_t, int, u32, void **), |
435 | enum xpc_retval (*)(partid_t, int, void *), | 417 | enum xpc_retval (*)(partid_t, int, void *), |
436 | enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, | 418 | enum xpc_retval (*)(partid_t, int, void *, |
437 | void *), | 419 | xpc_notify_func, void *), |
438 | void (*)(partid_t, int, void *), | 420 | void (*)(partid_t, int, void *), |
439 | enum xpc_retval (*)(partid_t, void *)); | 421 | enum xpc_retval (*)(partid_t, void *)); |
440 | extern void xpc_clear_interface(void); | 422 | extern void xpc_clear_interface(void); |
441 | 423 | ||
442 | |||
443 | extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, | 424 | extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, |
444 | u16, u32, u32); | 425 | u16, u32, u32); |
445 | extern void xpc_disconnect(int); | 426 | extern void xpc_disconnect(int); |
446 | 427 | ||
447 | static inline enum xpc_retval | 428 | static inline enum xpc_retval |
@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload) | |||
458 | 439 | ||
459 | static inline enum xpc_retval | 440 | static inline enum xpc_retval |
460 | xpc_send_notify(partid_t partid, int ch_number, void *payload, | 441 | xpc_send_notify(partid_t partid, int ch_number, void *payload, |
461 | xpc_notify_func func, void *key) | 442 | xpc_notify_func func, void *key) |
462 | { | 443 | { |
463 | return xpc_interface.send_notify(partid, ch_number, payload, func, key); | 444 | return xpc_interface.send_notify(partid, ch_number, payload, func, key); |
464 | } | 445 | } |
@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids) | |||
475 | return xpc_interface.partid_to_nasids(partid, nasids); | 456 | return xpc_interface.partid_to_nasids(partid, nasids); |
476 | } | 457 | } |
477 | 458 | ||
478 | |||
479 | extern u64 xp_nofault_PIOR_target; | 459 | extern u64 xp_nofault_PIOR_target; |
480 | extern int xp_nofault_PIOR(void *); | 460 | extern int xp_nofault_PIOR(void *); |
481 | extern int xp_error_PIOR(void); | 461 | extern int xp_error_PIOR(void); |
482 | 462 | ||
483 | |||
484 | #endif /* _DRIVERS_MISC_SGIXP_XP_H */ | 463 | #endif /* _DRIVERS_MISC_SGIXP_XP_H */ |
485 | |||
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 5f9f9c2e9298..bb9257642fcf 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -6,7 +6,6 @@ | |||
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * Cross Partition (XP) base. | 10 | * Cross Partition (XP) base. |
12 | * | 11 | * |
@@ -15,7 +14,6 @@ | |||
15 | * | 14 | * |
16 | */ | 15 | */ |
17 | 16 | ||
18 | |||
19 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
20 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
21 | #include <linux/module.h> | 19 | #include <linux/module.h> |
@@ -24,49 +22,49 @@ | |||
24 | #include <asm/sn/sn_sal.h> | 22 | #include <asm/sn/sn_sal.h> |
25 | #include "xp.h" | 23 | #include "xp.h" |
26 | 24 | ||
27 | |||
28 | /* | 25 | /* |
29 | * Target of nofault PIO read. | 26 | * Target of nofault PIO read. |
30 | */ | 27 | */ |
31 | u64 xp_nofault_PIOR_target; | 28 | u64 xp_nofault_PIOR_target; |
32 | 29 | ||
33 | |||
34 | /* | 30 | /* |
35 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level | 31 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level |
36 | * users of XPC. | 32 | * users of XPC. |
37 | */ | 33 | */ |
38 | struct xpc_registration xpc_registrations[XPC_NCHANNELS]; | 34 | struct xpc_registration xpc_registrations[XPC_NCHANNELS]; |
39 | 35 | ||
40 | |||
41 | /* | 36 | /* |
42 | * Initialize the XPC interface to indicate that XPC isn't loaded. | 37 | * Initialize the XPC interface to indicate that XPC isn't loaded. |
43 | */ | 38 | */ |
44 | static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } | 39 | static enum xpc_retval |
40 | xpc_notloaded(void) | ||
41 | { | ||
42 | return xpcNotLoaded; | ||
43 | } | ||
45 | 44 | ||
46 | struct xpc_interface xpc_interface = { | 45 | struct xpc_interface xpc_interface = { |
47 | (void (*)(int)) xpc_notloaded, | 46 | (void (*)(int))xpc_notloaded, |
48 | (void (*)(int)) xpc_notloaded, | 47 | (void (*)(int))xpc_notloaded, |
49 | (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, | 48 | (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded, |
50 | (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, | 49 | (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded, |
51 | (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) | 50 | (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) |
52 | xpc_notloaded, | 51 | xpc_notloaded, |
53 | (void (*)(partid_t, int, void *)) xpc_notloaded, | 52 | (void (*)(partid_t, int, void *))xpc_notloaded, |
54 | (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded | 53 | (enum xpc_retval(*)(partid_t, void *))xpc_notloaded |
55 | }; | 54 | }; |
56 | 55 | ||
57 | |||
58 | /* | 56 | /* |
59 | * XPC calls this when it (the XPC module) has been loaded. | 57 | * XPC calls this when it (the XPC module) has been loaded. |
60 | */ | 58 | */ |
61 | void | 59 | void |
62 | xpc_set_interface(void (*connect)(int), | 60 | xpc_set_interface(void (*connect) (int), |
63 | void (*disconnect)(int), | 61 | void (*disconnect) (int), |
64 | enum xpc_retval (*allocate)(partid_t, int, u32, void **), | 62 | enum xpc_retval (*allocate) (partid_t, int, u32, void **), |
65 | enum xpc_retval (*send)(partid_t, int, void *), | 63 | enum xpc_retval (*send) (partid_t, int, void *), |
66 | enum xpc_retval (*send_notify)(partid_t, int, void *, | 64 | enum xpc_retval (*send_notify) (partid_t, int, void *, |
67 | xpc_notify_func, void *), | 65 | xpc_notify_func, void *), |
68 | void (*received)(partid_t, int, void *), | 66 | void (*received) (partid_t, int, void *), |
69 | enum xpc_retval (*partid_to_nasids)(partid_t, void *)) | 67 | enum xpc_retval (*partid_to_nasids) (partid_t, void *)) |
70 | { | 68 | { |
71 | xpc_interface.connect = connect; | 69 | xpc_interface.connect = connect; |
72 | xpc_interface.disconnect = disconnect; | 70 | xpc_interface.disconnect = disconnect; |
@@ -77,28 +75,27 @@ xpc_set_interface(void (*connect)(int), | |||
77 | xpc_interface.partid_to_nasids = partid_to_nasids; | 75 | xpc_interface.partid_to_nasids = partid_to_nasids; |
78 | } | 76 | } |
79 | 77 | ||
80 | |||
81 | /* | 78 | /* |
82 | * XPC calls this when it (the XPC module) is being unloaded. | 79 | * XPC calls this when it (the XPC module) is being unloaded. |
83 | */ | 80 | */ |
84 | void | 81 | void |
85 | xpc_clear_interface(void) | 82 | xpc_clear_interface(void) |
86 | { | 83 | { |
87 | xpc_interface.connect = (void (*)(int)) xpc_notloaded; | 84 | xpc_interface.connect = (void (*)(int))xpc_notloaded; |
88 | xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; | 85 | xpc_interface.disconnect = (void (*)(int))xpc_notloaded; |
89 | xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, | 86 | xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32, |
90 | void **)) xpc_notloaded; | 87 | void **))xpc_notloaded; |
91 | xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) | 88 | xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *)) |
92 | xpc_notloaded; | 89 | xpc_notloaded; |
93 | xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, | 90 | xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *, |
94 | xpc_notify_func, void *)) xpc_notloaded; | 91 | xpc_notify_func, |
92 | void *))xpc_notloaded; | ||
95 | xpc_interface.received = (void (*)(partid_t, int, void *)) | 93 | xpc_interface.received = (void (*)(partid_t, int, void *)) |
96 | xpc_notloaded; | 94 | xpc_notloaded; |
97 | xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) | 95 | xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) |
98 | xpc_notloaded; | 96 | xpc_notloaded; |
99 | } | 97 | } |
100 | 98 | ||
101 | |||
102 | /* | 99 | /* |
103 | * Register for automatic establishment of a channel connection whenever | 100 | * Register for automatic establishment of a channel connection whenever |
104 | * a partition comes up. | 101 | * a partition comes up. |
@@ -125,11 +122,10 @@ xpc_clear_interface(void) | |||
125 | */ | 122 | */ |
126 | enum xpc_retval | 123 | enum xpc_retval |
127 | xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | 124 | xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, |
128 | u16 nentries, u32 assigned_limit, u32 idle_limit) | 125 | u16 nentries, u32 assigned_limit, u32 idle_limit) |
129 | { | 126 | { |
130 | struct xpc_registration *registration; | 127 | struct xpc_registration *registration; |
131 | 128 | ||
132 | |||
133 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | 129 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); |
134 | DBUG_ON(payload_size == 0 || nentries == 0); | 130 | DBUG_ON(payload_size == 0 || nentries == 0); |
135 | DBUG_ON(func == NULL); | 131 | DBUG_ON(func == NULL); |
@@ -162,7 +158,6 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
162 | return xpcSuccess; | 158 | return xpcSuccess; |
163 | } | 159 | } |
164 | 160 | ||
165 | |||
166 | /* | 161 | /* |
167 | * Remove the registration for automatic connection of the specified channel | 162 | * Remove the registration for automatic connection of the specified channel |
168 | * when a partition comes up. | 163 | * when a partition comes up. |
@@ -181,7 +176,6 @@ xpc_disconnect(int ch_number) | |||
181 | { | 176 | { |
182 | struct xpc_registration *registration; | 177 | struct xpc_registration *registration; |
183 | 178 | ||
184 | |||
185 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | 179 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); |
186 | 180 | ||
187 | registration = &xpc_registrations[ch_number]; | 181 | registration = &xpc_registrations[ch_number]; |
@@ -214,14 +208,12 @@ xpc_disconnect(int ch_number) | |||
214 | return; | 208 | return; |
215 | } | 209 | } |
216 | 210 | ||
217 | |||
218 | int __init | 211 | int __init |
219 | xp_init(void) | 212 | xp_init(void) |
220 | { | 213 | { |
221 | int ret, ch_number; | 214 | int ret, ch_number; |
222 | u64 func_addr = *(u64 *) xp_nofault_PIOR; | 215 | u64 func_addr = *(u64 *)xp_nofault_PIOR; |
223 | u64 err_func_addr = *(u64 *) xp_error_PIOR; | 216 | u64 err_func_addr = *(u64 *)xp_error_PIOR; |
224 | |||
225 | 217 | ||
226 | if (!ia64_platform_is("sn2")) { | 218 | if (!ia64_platform_is("sn2")) { |
227 | return -ENODEV; | 219 | return -ENODEV; |
@@ -237,9 +229,9 @@ xp_init(void) | |||
237 | * work around). | 229 | * work around). |
238 | */ | 230 | */ |
239 | if ((ret = sn_register_nofault_code(func_addr, err_func_addr, | 231 | if ((ret = sn_register_nofault_code(func_addr, err_func_addr, |
240 | err_func_addr, 1, 1)) != 0) { | 232 | err_func_addr, 1, 1)) != 0) { |
241 | printk(KERN_ERR "XP: can't register nofault code, error=%d\n", | 233 | printk(KERN_ERR "XP: can't register nofault code, error=%d\n", |
242 | ret); | 234 | ret); |
243 | } | 235 | } |
244 | /* | 236 | /* |
245 | * Setup the nofault PIO read target. (There is no special reason why | 237 | * Setup the nofault PIO read target. (There is no special reason why |
@@ -258,22 +250,21 @@ xp_init(void) | |||
258 | 250 | ||
259 | return 0; | 251 | return 0; |
260 | } | 252 | } |
261 | module_init(xp_init); | ||
262 | 253 | ||
254 | module_init(xp_init); | ||
263 | 255 | ||
264 | void __exit | 256 | void __exit |
265 | xp_exit(void) | 257 | xp_exit(void) |
266 | { | 258 | { |
267 | u64 func_addr = *(u64 *) xp_nofault_PIOR; | 259 | u64 func_addr = *(u64 *)xp_nofault_PIOR; |
268 | u64 err_func_addr = *(u64 *) xp_error_PIOR; | 260 | u64 err_func_addr = *(u64 *)xp_error_PIOR; |
269 | |||
270 | 261 | ||
271 | /* unregister the PIO read nofault code region */ | 262 | /* unregister the PIO read nofault code region */ |
272 | (void) sn_register_nofault_code(func_addr, err_func_addr, | 263 | (void)sn_register_nofault_code(func_addr, err_func_addr, |
273 | err_func_addr, 1, 0); | 264 | err_func_addr, 1, 0); |
274 | } | 265 | } |
275 | module_exit(xp_exit); | ||
276 | 266 | ||
267 | module_exit(xp_exit); | ||
277 | 268 | ||
278 | MODULE_AUTHOR("Silicon Graphics, Inc."); | 269 | MODULE_AUTHOR("Silicon Graphics, Inc."); |
279 | MODULE_DESCRIPTION("Cross Partition (XP) base"); | 270 | MODULE_DESCRIPTION("Cross Partition (XP) base"); |
@@ -287,4 +278,3 @@ EXPORT_SYMBOL(xpc_clear_interface); | |||
287 | EXPORT_SYMBOL(xpc_set_interface); | 278 | EXPORT_SYMBOL(xpc_set_interface); |
288 | EXPORT_SYMBOL(xpc_connect); | 279 | EXPORT_SYMBOL(xpc_connect); |
289 | EXPORT_SYMBOL(xpc_disconnect); | 280 | EXPORT_SYMBOL(xpc_disconnect); |
290 | |||
diff --git a/drivers/misc/sgi-xp/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S index c13a709c4db5..e38d43319429 100644 --- a/drivers/misc/sgi-xp/xp_nofault.S +++ b/drivers/misc/sgi-xp/xp_nofault.S | |||
@@ -6,7 +6,6 @@ | |||
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * The xp_nofault_PIOR function takes a pointer to a remote PIO register | 10 | * The xp_nofault_PIOR function takes a pointer to a remote PIO register |
12 | * and attempts to load and consume a value from it. This function | 11 | * and attempts to load and consume a value from it. This function |
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 14e70ee53ebe..64368bb88890 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -6,7 +6,6 @@ | |||
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * Cross Partition Communication (XPC) structures and macros. | 10 | * Cross Partition Communication (XPC) structures and macros. |
12 | */ | 11 | */ |
@@ -14,7 +13,6 @@ | |||
14 | #ifndef _DRIVERS_MISC_SGIXP_XPC_H | 13 | #ifndef _DRIVERS_MISC_SGIXP_XPC_H |
15 | #define _DRIVERS_MISC_SGIXP_XPC_H | 14 | #define _DRIVERS_MISC_SGIXP_XPC_H |
16 | 15 | ||
17 | |||
18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
19 | #include <linux/sysctl.h> | 17 | #include <linux/sysctl.h> |
20 | #include <linux/device.h> | 18 | #include <linux/device.h> |
@@ -29,7 +27,6 @@ | |||
29 | #include <asm/sn/shub_mmr.h> | 27 | #include <asm/sn/shub_mmr.h> |
30 | #include "xp.h" | 28 | #include "xp.h" |
31 | 29 | ||
32 | |||
33 | /* | 30 | /* |
34 | * XPC Version numbers consist of a major and minor number. XPC can always | 31 | * XPC Version numbers consist of a major and minor number. XPC can always |
35 | * talk to versions with same major #, and never talk to versions with a | 32 | * talk to versions with same major #, and never talk to versions with a |
@@ -39,7 +36,6 @@ | |||
39 | #define XPC_VERSION_MAJOR(_v) ((_v) >> 4) | 36 | #define XPC_VERSION_MAJOR(_v) ((_v) >> 4) |
40 | #define XPC_VERSION_MINOR(_v) ((_v) & 0xf) | 37 | #define XPC_VERSION_MINOR(_v) ((_v) & 0xf) |
41 | 38 | ||
42 | |||
43 | /* | 39 | /* |
44 | * The next macros define word or bit representations for given | 40 | * The next macros define word or bit representations for given |
45 | * C-brick nasid in either the SAL provided bit array representing | 41 | * C-brick nasid in either the SAL provided bit array representing |
@@ -67,7 +63,6 @@ | |||
67 | /* define the process name of the discovery thread */ | 63 | /* define the process name of the discovery thread */ |
68 | #define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" | 64 | #define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" |
69 | 65 | ||
70 | |||
71 | /* | 66 | /* |
72 | * the reserved page | 67 | * the reserved page |
73 | * | 68 | * |
@@ -121,7 +116,7 @@ struct xpc_rsvd_page { | |||
121 | u64 nasids_size; /* SAL: size of each nasid mask in bytes */ | 116 | u64 nasids_size; /* SAL: size of each nasid mask in bytes */ |
122 | }; | 117 | }; |
123 | 118 | ||
124 | #define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ | 119 | #define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ |
125 | 120 | ||
126 | #define XPC_SUPPORTS_RP_STAMP(_version) \ | 121 | #define XPC_SUPPORTS_RP_STAMP(_version) \ |
127 | (_version >= _XPC_VERSION(1,1)) | 122 | (_version >= _XPC_VERSION(1,1)) |
@@ -138,14 +133,12 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2) | |||
138 | { | 133 | { |
139 | int ret; | 134 | int ret; |
140 | 135 | ||
141 | |||
142 | if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { | 136 | if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { |
143 | ret = stamp1->tv_nsec - stamp2->tv_nsec; | 137 | ret = stamp1->tv_nsec - stamp2->tv_nsec; |
144 | } | 138 | } |
145 | return ret; | 139 | return ret; |
146 | } | 140 | } |
147 | 141 | ||
148 | |||
149 | /* | 142 | /* |
150 | * Define the structures by which XPC variables can be exported to other | 143 | * Define the structures by which XPC variables can be exported to other |
151 | * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) | 144 | * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) |
@@ -172,12 +165,11 @@ struct xpc_vars { | |||
172 | AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ | 165 | AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ |
173 | }; | 166 | }; |
174 | 167 | ||
175 | #define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ | 168 | #define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ |
176 | 169 | ||
177 | #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ | 170 | #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ |
178 | (_version >= _XPC_VERSION(3,1)) | 171 | (_version >= _XPC_VERSION(3,1)) |
179 | 172 | ||
180 | |||
181 | static inline int | 173 | static inline int |
182 | xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) | 174 | xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) |
183 | { | 175 | { |
@@ -193,7 +185,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars) | |||
193 | old_mask = vars->heartbeating_to_mask; | 185 | old_mask = vars->heartbeating_to_mask; |
194 | new_mask = (old_mask | (1UL << partid)); | 186 | new_mask = (old_mask | (1UL << partid)); |
195 | } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != | 187 | } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != |
196 | old_mask); | 188 | old_mask); |
197 | } | 189 | } |
198 | 190 | ||
199 | static inline void | 191 | static inline void |
@@ -205,10 +197,9 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) | |||
205 | old_mask = vars->heartbeating_to_mask; | 197 | old_mask = vars->heartbeating_to_mask; |
206 | new_mask = (old_mask & ~(1UL << partid)); | 198 | new_mask = (old_mask & ~(1UL << partid)); |
207 | } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != | 199 | } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != |
208 | old_mask); | 200 | old_mask); |
209 | } | 201 | } |
210 | 202 | ||
211 | |||
212 | /* | 203 | /* |
213 | * The AMOs page consists of a number of AMO variables which are divided into | 204 | * The AMOs page consists of a number of AMO variables which are divided into |
214 | * four groups, The first two groups are used to identify an IRQ's sender. | 205 | * four groups, The first two groups are used to identify an IRQ's sender. |
@@ -222,7 +213,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars) | |||
222 | #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) | 213 | #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) |
223 | #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) | 214 | #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) |
224 | 215 | ||
225 | |||
226 | /* | 216 | /* |
227 | * The following structure describes the per partition specific variables. | 217 | * The following structure describes the per partition specific variables. |
228 | * | 218 | * |
@@ -257,9 +247,8 @@ struct xpc_vars_part { | |||
257 | * MAGIC2 indicates that this partition has pulled the remote partititions | 247 | * MAGIC2 indicates that this partition has pulled the remote partititions |
258 | * per partition variables that pertain to this partition. | 248 | * per partition variables that pertain to this partition. |
259 | */ | 249 | */ |
260 | #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ | 250 | #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ |
261 | #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ | 251 | #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ |
262 | |||
263 | 252 | ||
264 | /* the reserved page sizes and offsets */ | 253 | /* the reserved page sizes and offsets */ |
265 | 254 | ||
@@ -271,7 +260,6 @@ struct xpc_vars_part { | |||
271 | #define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) | 260 | #define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) |
272 | #define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) | 261 | #define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) |
273 | 262 | ||
274 | |||
275 | /* | 263 | /* |
276 | * Functions registered by add_timer() or called by kernel_thread() only | 264 | * Functions registered by add_timer() or called by kernel_thread() only |
277 | * allow for a single 64-bit argument. The following macros can be used to | 265 | * allow for a single 64-bit argument. The following macros can be used to |
@@ -285,8 +273,6 @@ struct xpc_vars_part { | |||
285 | #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) | 273 | #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) |
286 | #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) | 274 | #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) |
287 | 275 | ||
288 | |||
289 | |||
290 | /* | 276 | /* |
291 | * Define a Get/Put value pair (pointers) used with a message queue. | 277 | * Define a Get/Put value pair (pointers) used with a message queue. |
292 | */ | 278 | */ |
@@ -298,8 +284,6 @@ struct xpc_gp { | |||
298 | #define XPC_GP_SIZE \ | 284 | #define XPC_GP_SIZE \ |
299 | L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) | 285 | L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) |
300 | 286 | ||
301 | |||
302 | |||
303 | /* | 287 | /* |
304 | * Define a structure that contains arguments associated with opening and | 288 | * Define a structure that contains arguments associated with opening and |
305 | * closing a channel. | 289 | * closing a channel. |
@@ -315,20 +299,15 @@ struct xpc_openclose_args { | |||
315 | #define XPC_OPENCLOSE_ARGS_SIZE \ | 299 | #define XPC_OPENCLOSE_ARGS_SIZE \ |
316 | L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) | 300 | L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) |
317 | 301 | ||
318 | |||
319 | |||
320 | /* struct xpc_msg flags */ | 302 | /* struct xpc_msg flags */ |
321 | 303 | ||
322 | #define XPC_M_DONE 0x01 /* msg has been received/consumed */ | 304 | #define XPC_M_DONE 0x01 /* msg has been received/consumed */ |
323 | #define XPC_M_READY 0x02 /* msg is ready to be sent */ | 305 | #define XPC_M_READY 0x02 /* msg is ready to be sent */ |
324 | #define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ | 306 | #define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ |
325 | 307 | ||
326 | |||
327 | #define XPC_MSG_ADDRESS(_payload) \ | 308 | #define XPC_MSG_ADDRESS(_payload) \ |
328 | ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) | 309 | ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) |
329 | 310 | ||
330 | |||
331 | |||
332 | /* | 311 | /* |
333 | * Defines notify entry. | 312 | * Defines notify entry. |
334 | * | 313 | * |
@@ -336,19 +315,17 @@ struct xpc_openclose_args { | |||
336 | * and consumed by the intended recipient. | 315 | * and consumed by the intended recipient. |
337 | */ | 316 | */ |
338 | struct xpc_notify { | 317 | struct xpc_notify { |
339 | volatile u8 type; /* type of notification */ | 318 | volatile u8 type; /* type of notification */ |
340 | 319 | ||
341 | /* the following two fields are only used if type == XPC_N_CALL */ | 320 | /* the following two fields are only used if type == XPC_N_CALL */ |
342 | xpc_notify_func func; /* user's notify function */ | 321 | xpc_notify_func func; /* user's notify function */ |
343 | void *key; /* pointer to user's key */ | 322 | void *key; /* pointer to user's key */ |
344 | }; | 323 | }; |
345 | 324 | ||
346 | /* struct xpc_notify type of notification */ | 325 | /* struct xpc_notify type of notification */ |
347 | 326 | ||
348 | #define XPC_N_CALL 0x01 /* notify function provided by user */ | 327 | #define XPC_N_CALL 0x01 /* notify function provided by user */ |
349 | 328 | ||
350 | |||
351 | |||
352 | /* | 329 | /* |
353 | * Define the structure that manages all the stuff required by a channel. In | 330 | * Define the structure that manages all the stuff required by a channel. In |
354 | * particular, they are used to manage the messages sent across the channel. | 331 | * particular, they are used to manage the messages sent across the channel. |
@@ -428,48 +405,48 @@ struct xpc_notify { | |||
428 | * messages. | 405 | * messages. |
429 | */ | 406 | */ |
430 | struct xpc_channel { | 407 | struct xpc_channel { |
431 | partid_t partid; /* ID of remote partition connected */ | 408 | partid_t partid; /* ID of remote partition connected */ |
432 | spinlock_t lock; /* lock for updating this structure */ | 409 | spinlock_t lock; /* lock for updating this structure */ |
433 | u32 flags; /* general flags */ | 410 | u32 flags; /* general flags */ |
434 | 411 | ||
435 | enum xpc_retval reason; /* reason why channel is disconnect'g */ | 412 | enum xpc_retval reason; /* reason why channel is disconnect'g */ |
436 | int reason_line; /* line# disconnect initiated from */ | 413 | int reason_line; /* line# disconnect initiated from */ |
437 | 414 | ||
438 | u16 number; /* channel # */ | 415 | u16 number; /* channel # */ |
439 | 416 | ||
440 | u16 msg_size; /* sizeof each msg entry */ | 417 | u16 msg_size; /* sizeof each msg entry */ |
441 | u16 local_nentries; /* #of msg entries in local msg queue */ | 418 | u16 local_nentries; /* #of msg entries in local msg queue */ |
442 | u16 remote_nentries; /* #of msg entries in remote msg queue*/ | 419 | u16 remote_nentries; /* #of msg entries in remote msg queue */ |
443 | 420 | ||
444 | void *local_msgqueue_base; /* base address of kmalloc'd space */ | 421 | void *local_msgqueue_base; /* base address of kmalloc'd space */ |
445 | struct xpc_msg *local_msgqueue; /* local message queue */ | 422 | struct xpc_msg *local_msgqueue; /* local message queue */ |
446 | void *remote_msgqueue_base; /* base address of kmalloc'd space */ | 423 | void *remote_msgqueue_base; /* base address of kmalloc'd space */ |
447 | struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ | 424 | struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ |
448 | /* local message queue */ | 425 | /* local message queue */ |
449 | u64 remote_msgqueue_pa; /* phys addr of remote partition's */ | 426 | u64 remote_msgqueue_pa; /* phys addr of remote partition's */ |
450 | /* local message queue */ | 427 | /* local message queue */ |
451 | 428 | ||
452 | atomic_t references; /* #of external references to queues */ | 429 | atomic_t references; /* #of external references to queues */ |
453 | 430 | ||
454 | atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ | 431 | atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ |
455 | wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ | 432 | wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ |
456 | 433 | ||
457 | u8 delayed_IPI_flags; /* IPI flags received, but delayed */ | 434 | u8 delayed_IPI_flags; /* IPI flags received, but delayed */ |
458 | /* action until channel disconnected */ | 435 | /* action until channel disconnected */ |
459 | 436 | ||
460 | /* queue of msg senders who want to be notified when msg received */ | 437 | /* queue of msg senders who want to be notified when msg received */ |
461 | 438 | ||
462 | atomic_t n_to_notify; /* #of msg senders to notify */ | 439 | atomic_t n_to_notify; /* #of msg senders to notify */ |
463 | struct xpc_notify *notify_queue;/* notify queue for messages sent */ | 440 | struct xpc_notify *notify_queue; /* notify queue for messages sent */ |
464 | 441 | ||
465 | xpc_channel_func func; /* user's channel function */ | 442 | xpc_channel_func func; /* user's channel function */ |
466 | void *key; /* pointer to user's key */ | 443 | void *key; /* pointer to user's key */ |
467 | 444 | ||
468 | struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ | 445 | struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ |
469 | struct completion wdisconnect_wait; /* wait for channel disconnect */ | 446 | struct completion wdisconnect_wait; /* wait for channel disconnect */ |
470 | 447 | ||
471 | struct xpc_openclose_args *local_openclose_args; /* args passed on */ | 448 | struct xpc_openclose_args *local_openclose_args; /* args passed on */ |
472 | /* opening or closing of channel */ | 449 | /* opening or closing of channel */ |
473 | 450 | ||
474 | /* various flavors of local and remote Get/Put values */ | 451 | /* various flavors of local and remote Get/Put values */ |
475 | 452 | ||
@@ -477,7 +454,7 @@ struct xpc_channel { | |||
477 | struct xpc_gp remote_GP; /* remote Get/Put values */ | 454 | struct xpc_gp remote_GP; /* remote Get/Put values */ |
478 | struct xpc_gp w_local_GP; /* working local Get/Put values */ | 455 | struct xpc_gp w_local_GP; /* working local Get/Put values */ |
479 | struct xpc_gp w_remote_GP; /* working remote Get/Put values */ | 456 | struct xpc_gp w_remote_GP; /* working remote Get/Put values */ |
480 | s64 next_msg_to_pull; /* Put value of next msg to pull */ | 457 | s64 next_msg_to_pull; /* Put value of next msg to pull */ |
481 | 458 | ||
482 | /* kthread management related fields */ | 459 | /* kthread management related fields */ |
483 | 460 | ||
@@ -485,48 +462,45 @@ struct xpc_channel { | |||
485 | // >>> allow the assigned limit be unbounded and let the idle limit be dynamic | 462 | // >>> allow the assigned limit be unbounded and let the idle limit be dynamic |
486 | // >>> dependent on activity over the last interval of time | 463 | // >>> dependent on activity over the last interval of time |
487 | atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ | 464 | atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ |
488 | u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ | 465 | u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ |
489 | atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ | 466 | atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ |
490 | u32 kthreads_idle_limit; /* limit on #of kthreads idle */ | 467 | u32 kthreads_idle_limit; /* limit on #of kthreads idle */ |
491 | atomic_t kthreads_active; /* #of kthreads actively working */ | 468 | atomic_t kthreads_active; /* #of kthreads actively working */ |
492 | // >>> following field is temporary | 469 | // >>> following field is temporary |
493 | u32 kthreads_created; /* total #of kthreads created */ | 470 | u32 kthreads_created; /* total #of kthreads created */ |
494 | 471 | ||
495 | wait_queue_head_t idle_wq; /* idle kthread wait queue */ | 472 | wait_queue_head_t idle_wq; /* idle kthread wait queue */ |
496 | 473 | ||
497 | } ____cacheline_aligned; | 474 | } ____cacheline_aligned; |
498 | 475 | ||
499 | |||
500 | /* struct xpc_channel flags */ | 476 | /* struct xpc_channel flags */ |
501 | 477 | ||
502 | #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ | 478 | #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ |
503 | 479 | ||
504 | #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ | 480 | #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ |
505 | #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ | 481 | #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ |
506 | #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ | 482 | #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ |
507 | #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ | 483 | #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ |
508 | 484 | ||
509 | #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ | 485 | #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ |
510 | #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ | 486 | #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ |
511 | #define XPC_C_CONNECTEDCALLOUT_MADE \ | 487 | #define XPC_C_CONNECTEDCALLOUT_MADE \ |
512 | 0x00000080 /* connected callout completed */ | 488 | 0x00000080 /* connected callout completed */ |
513 | #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ | 489 | #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ |
514 | #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ | 490 | #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ |
515 | 491 | ||
516 | #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ | 492 | #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ |
517 | #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ | 493 | #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ |
518 | #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ | 494 | #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ |
519 | #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ | 495 | #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ |
520 | 496 | ||
521 | #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ | 497 | #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ |
522 | #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ | 498 | #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ |
523 | #define XPC_C_DISCONNECTINGCALLOUT \ | 499 | #define XPC_C_DISCONNECTINGCALLOUT \ |
524 | 0x00010000 /* disconnecting callout initiated */ | 500 | 0x00010000 /* disconnecting callout initiated */ |
525 | #define XPC_C_DISCONNECTINGCALLOUT_MADE \ | 501 | #define XPC_C_DISCONNECTINGCALLOUT_MADE \ |
526 | 0x00020000 /* disconnecting callout completed */ | 502 | 0x00020000 /* disconnecting callout completed */ |
527 | #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ | 503 | #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ |
528 | |||
529 | |||
530 | 504 | ||
531 | /* | 505 | /* |
532 | * Manages channels on a partition basis. There is one of these structures | 506 | * Manages channels on a partition basis. There is one of these structures |
@@ -537,33 +511,31 @@ struct xpc_partition { | |||
537 | 511 | ||
538 | /* XPC HB infrastructure */ | 512 | /* XPC HB infrastructure */ |
539 | 513 | ||
540 | u8 remote_rp_version; /* version# of partition's rsvd pg */ | 514 | u8 remote_rp_version; /* version# of partition's rsvd pg */ |
541 | struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */ | 515 | struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */ |
542 | u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ | 516 | u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ |
543 | u64 remote_vars_pa; /* phys addr of partition's vars */ | 517 | u64 remote_vars_pa; /* phys addr of partition's vars */ |
544 | u64 remote_vars_part_pa; /* phys addr of partition's vars part */ | 518 | u64 remote_vars_part_pa; /* phys addr of partition's vars part */ |
545 | u64 last_heartbeat; /* HB at last read */ | 519 | u64 last_heartbeat; /* HB at last read */ |
546 | u64 remote_amos_page_pa; /* phys addr of partition's amos page */ | 520 | u64 remote_amos_page_pa; /* phys addr of partition's amos page */ |
547 | int remote_act_nasid; /* active part's act/deact nasid */ | 521 | int remote_act_nasid; /* active part's act/deact nasid */ |
548 | int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ | 522 | int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ |
549 | u32 act_IRQ_rcvd; /* IRQs since activation */ | 523 | u32 act_IRQ_rcvd; /* IRQs since activation */ |
550 | spinlock_t act_lock; /* protect updating of act_state */ | 524 | spinlock_t act_lock; /* protect updating of act_state */ |
551 | u8 act_state; /* from XPC HB viewpoint */ | 525 | u8 act_state; /* from XPC HB viewpoint */ |
552 | u8 remote_vars_version; /* version# of partition's vars */ | 526 | u8 remote_vars_version; /* version# of partition's vars */ |
553 | enum xpc_retval reason; /* reason partition is deactivating */ | 527 | enum xpc_retval reason; /* reason partition is deactivating */ |
554 | int reason_line; /* line# deactivation initiated from */ | 528 | int reason_line; /* line# deactivation initiated from */ |
555 | int reactivate_nasid; /* nasid in partition to reactivate */ | 529 | int reactivate_nasid; /* nasid in partition to reactivate */ |
556 | 530 | ||
557 | unsigned long disengage_request_timeout; /* timeout in jiffies */ | 531 | unsigned long disengage_request_timeout; /* timeout in jiffies */ |
558 | struct timer_list disengage_request_timer; | 532 | struct timer_list disengage_request_timer; |
559 | 533 | ||
560 | |||
561 | /* XPC infrastructure referencing and teardown control */ | 534 | /* XPC infrastructure referencing and teardown control */ |
562 | 535 | ||
563 | volatile u8 setup_state; /* infrastructure setup state */ | 536 | volatile u8 setup_state; /* infrastructure setup state */ |
564 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ | 537 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ |
565 | atomic_t references; /* #of references to infrastructure */ | 538 | atomic_t references; /* #of references to infrastructure */ |
566 | |||
567 | 539 | ||
568 | /* | 540 | /* |
569 | * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN | 541 | * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN |
@@ -572,53 +544,48 @@ struct xpc_partition { | |||
572 | * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) | 544 | * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) |
573 | */ | 545 | */ |
574 | 546 | ||
575 | 547 | u8 nchannels; /* #of defined channels supported */ | |
576 | u8 nchannels; /* #of defined channels supported */ | 548 | atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ |
577 | atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ | 549 | atomic_t nchannels_engaged; /* #of channels engaged with remote part */ |
578 | atomic_t nchannels_engaged;/* #of channels engaged with remote part */ | 550 | struct xpc_channel *channels; /* array of channel structures */ |
579 | struct xpc_channel *channels;/* array of channel structures */ | 551 | |
580 | 552 | void *local_GPs_base; /* base address of kmalloc'd space */ | |
581 | void *local_GPs_base; /* base address of kmalloc'd space */ | 553 | struct xpc_gp *local_GPs; /* local Get/Put values */ |
582 | struct xpc_gp *local_GPs; /* local Get/Put values */ | 554 | void *remote_GPs_base; /* base address of kmalloc'd space */ |
583 | void *remote_GPs_base; /* base address of kmalloc'd space */ | 555 | struct xpc_gp *remote_GPs; /* copy of remote partition's local Get/Put */ |
584 | struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ | 556 | /* values */ |
585 | /* values */ | 557 | u64 remote_GPs_pa; /* phys address of remote partition's local */ |
586 | u64 remote_GPs_pa; /* phys address of remote partition's local */ | 558 | /* Get/Put values */ |
587 | /* Get/Put values */ | ||
588 | |||
589 | 559 | ||
590 | /* fields used to pass args when opening or closing a channel */ | 560 | /* fields used to pass args when opening or closing a channel */ |
591 | 561 | ||
592 | void *local_openclose_args_base; /* base address of kmalloc'd space */ | 562 | void *local_openclose_args_base; /* base address of kmalloc'd space */ |
593 | struct xpc_openclose_args *local_openclose_args; /* local's args */ | 563 | struct xpc_openclose_args *local_openclose_args; /* local's args */ |
594 | void *remote_openclose_args_base; /* base address of kmalloc'd space */ | 564 | void *remote_openclose_args_base; /* base address of kmalloc'd space */ |
595 | struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ | 565 | struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ |
596 | /* args */ | 566 | /* args */ |
597 | u64 remote_openclose_args_pa; /* phys addr of remote's args */ | 567 | u64 remote_openclose_args_pa; /* phys addr of remote's args */ |
598 | |||
599 | 568 | ||
600 | /* IPI sending, receiving and handling related fields */ | 569 | /* IPI sending, receiving and handling related fields */ |
601 | 570 | ||
602 | int remote_IPI_nasid; /* nasid of where to send IPIs */ | 571 | int remote_IPI_nasid; /* nasid of where to send IPIs */ |
603 | int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ | 572 | int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ |
604 | AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ | 573 | AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ |
605 | 574 | ||
606 | AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ | 575 | AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ |
607 | u64 local_IPI_amo; /* IPI amo flags yet to be handled */ | 576 | u64 local_IPI_amo; /* IPI amo flags yet to be handled */ |
608 | char IPI_owner[8]; /* IPI owner's name */ | 577 | char IPI_owner[8]; /* IPI owner's name */ |
609 | struct timer_list dropped_IPI_timer; /* dropped IPI timer */ | 578 | struct timer_list dropped_IPI_timer; /* dropped IPI timer */ |
610 | |||
611 | spinlock_t IPI_lock; /* IPI handler lock */ | ||
612 | 579 | ||
580 | spinlock_t IPI_lock; /* IPI handler lock */ | ||
613 | 581 | ||
614 | /* channel manager related fields */ | 582 | /* channel manager related fields */ |
615 | 583 | ||
616 | atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ | 584 | atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ |
617 | wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ | 585 | wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ |
618 | 586 | ||
619 | } ____cacheline_aligned; | 587 | } ____cacheline_aligned; |
620 | 588 | ||
621 | |||
622 | /* struct xpc_partition act_state values (for XPC HB) */ | 589 | /* struct xpc_partition act_state values (for XPC HB) */ |
623 | 590 | ||
624 | #define XPC_P_INACTIVE 0x00 /* partition is not active */ | 591 | #define XPC_P_INACTIVE 0x00 /* partition is not active */ |
@@ -627,11 +594,9 @@ struct xpc_partition { | |||
627 | #define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ | 594 | #define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ |
628 | #define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ | 595 | #define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ |
629 | 596 | ||
630 | |||
631 | #define XPC_DEACTIVATE_PARTITION(_p, _reason) \ | 597 | #define XPC_DEACTIVATE_PARTITION(_p, _reason) \ |
632 | xpc_deactivate_partition(__LINE__, (_p), (_reason)) | 598 | xpc_deactivate_partition(__LINE__, (_p), (_reason)) |
633 | 599 | ||
634 | |||
635 | /* struct xpc_partition setup_state values */ | 600 | /* struct xpc_partition setup_state values */ |
636 | 601 | ||
637 | #define XPC_P_UNSET 0x00 /* infrastructure was never setup */ | 602 | #define XPC_P_UNSET 0x00 /* infrastructure was never setup */ |
@@ -639,8 +604,6 @@ struct xpc_partition { | |||
639 | #define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ | 604 | #define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ |
640 | #define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ | 605 | #define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ |
641 | 606 | ||
642 | |||
643 | |||
644 | /* | 607 | /* |
645 | * struct xpc_partition IPI_timer #of seconds to wait before checking for | 608 | * struct xpc_partition IPI_timer #of seconds to wait before checking for |
646 | * dropped IPIs. These occur whenever an IPI amo write doesn't complete until | 609 | * dropped IPIs. These occur whenever an IPI amo write doesn't complete until |
@@ -648,22 +611,17 @@ struct xpc_partition { | |||
648 | */ | 611 | */ |
649 | #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) | 612 | #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) |
650 | 613 | ||
651 | |||
652 | /* number of seconds to wait for other partitions to disengage */ | 614 | /* number of seconds to wait for other partitions to disengage */ |
653 | #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 | 615 | #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 |
654 | 616 | ||
655 | /* interval in seconds to print 'waiting disengagement' messages */ | 617 | /* interval in seconds to print 'waiting disengagement' messages */ |
656 | #define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 | 618 | #define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 |
657 | 619 | ||
658 | |||
659 | #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) | 620 | #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) |
660 | 621 | ||
661 | |||
662 | |||
663 | /* found in xp_main.c */ | 622 | /* found in xp_main.c */ |
664 | extern struct xpc_registration xpc_registrations[]; | 623 | extern struct xpc_registration xpc_registrations[]; |
665 | 624 | ||
666 | |||
667 | /* found in xpc_main.c */ | 625 | /* found in xpc_main.c */ |
668 | extern struct device *xpc_part; | 626 | extern struct device *xpc_part; |
669 | extern struct device *xpc_chan; | 627 | extern struct device *xpc_chan; |
@@ -676,7 +634,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int); | |||
676 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); | 634 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); |
677 | extern void xpc_disconnect_wait(int); | 635 | extern void xpc_disconnect_wait(int); |
678 | 636 | ||
679 | |||
680 | /* found in xpc_partition.c */ | 637 | /* found in xpc_partition.c */ |
681 | extern int xpc_exiting; | 638 | extern int xpc_exiting; |
682 | extern struct xpc_vars *xpc_vars; | 639 | extern struct xpc_vars *xpc_vars; |
@@ -696,10 +653,9 @@ extern void xpc_mark_partition_inactive(struct xpc_partition *); | |||
696 | extern void xpc_discovery(void); | 653 | extern void xpc_discovery(void); |
697 | extern void xpc_check_remote_hb(void); | 654 | extern void xpc_check_remote_hb(void); |
698 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, | 655 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, |
699 | enum xpc_retval); | 656 | enum xpc_retval); |
700 | extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); | 657 | extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); |
701 | 658 | ||
702 | |||
703 | /* found in xpc_channel.c */ | 659 | /* found in xpc_channel.c */ |
704 | extern void xpc_initiate_connect(int); | 660 | extern void xpc_initiate_connect(int); |
705 | extern void xpc_initiate_disconnect(int); | 661 | extern void xpc_initiate_disconnect(int); |
@@ -714,13 +670,11 @@ extern void xpc_process_channel_activity(struct xpc_partition *); | |||
714 | extern void xpc_connected_callout(struct xpc_channel *); | 670 | extern void xpc_connected_callout(struct xpc_channel *); |
715 | extern void xpc_deliver_msg(struct xpc_channel *); | 671 | extern void xpc_deliver_msg(struct xpc_channel *); |
716 | extern void xpc_disconnect_channel(const int, struct xpc_channel *, | 672 | extern void xpc_disconnect_channel(const int, struct xpc_channel *, |
717 | enum xpc_retval, unsigned long *); | 673 | enum xpc_retval, unsigned long *); |
718 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); | 674 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); |
719 | extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); | 675 | extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); |
720 | extern void xpc_teardown_infrastructure(struct xpc_partition *); | 676 | extern void xpc_teardown_infrastructure(struct xpc_partition *); |
721 | 677 | ||
722 | |||
723 | |||
724 | static inline void | 678 | static inline void |
725 | xpc_wakeup_channel_mgr(struct xpc_partition *part) | 679 | xpc_wakeup_channel_mgr(struct xpc_partition *part) |
726 | { | 680 | { |
@@ -729,8 +683,6 @@ xpc_wakeup_channel_mgr(struct xpc_partition *part) | |||
729 | } | 683 | } |
730 | } | 684 | } |
731 | 685 | ||
732 | |||
733 | |||
734 | /* | 686 | /* |
735 | * These next two inlines are used to keep us from tearing down a channel's | 687 | * These next two inlines are used to keep us from tearing down a channel's |
736 | * msg queues while a thread may be referencing them. | 688 | * msg queues while a thread may be referencing them. |
@@ -752,12 +704,9 @@ xpc_msgqueue_deref(struct xpc_channel *ch) | |||
752 | } | 704 | } |
753 | } | 705 | } |
754 | 706 | ||
755 | |||
756 | |||
757 | #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ | 707 | #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ |
758 | xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) | 708 | xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) |
759 | 709 | ||
760 | |||
761 | /* | 710 | /* |
762 | * These two inlines are used to keep us from tearing down a partition's | 711 | * These two inlines are used to keep us from tearing down a partition's |
763 | * setup infrastructure while a thread may be referencing it. | 712 | * setup infrastructure while a thread may be referencing it. |
@@ -767,7 +716,6 @@ xpc_part_deref(struct xpc_partition *part) | |||
767 | { | 716 | { |
768 | s32 refs = atomic_dec_return(&part->references); | 717 | s32 refs = atomic_dec_return(&part->references); |
769 | 718 | ||
770 | |||
771 | DBUG_ON(refs < 0); | 719 | DBUG_ON(refs < 0); |
772 | if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { | 720 | if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { |
773 | wake_up(&part->teardown_wq); | 721 | wake_up(&part->teardown_wq); |
@@ -779,7 +727,6 @@ xpc_part_ref(struct xpc_partition *part) | |||
779 | { | 727 | { |
780 | int setup; | 728 | int setup; |
781 | 729 | ||
782 | |||
783 | atomic_inc(&part->references); | 730 | atomic_inc(&part->references); |
784 | setup = (part->setup_state == XPC_P_SETUP); | 731 | setup = (part->setup_state == XPC_P_SETUP); |
785 | if (!setup) { | 732 | if (!setup) { |
@@ -788,8 +735,6 @@ xpc_part_ref(struct xpc_partition *part) | |||
788 | return setup; | 735 | return setup; |
789 | } | 736 | } |
790 | 737 | ||
791 | |||
792 | |||
793 | /* | 738 | /* |
794 | * The following macro is to be used for the setting of the reason and | 739 | * The following macro is to be used for the setting of the reason and |
795 | * reason_line fields in both the struct xpc_channel and struct xpc_partition | 740 | * reason_line fields in both the struct xpc_channel and struct xpc_partition |
@@ -801,8 +746,6 @@ xpc_part_ref(struct xpc_partition *part) | |||
801 | (_p)->reason_line = _line; \ | 746 | (_p)->reason_line = _line; \ |
802 | } | 747 | } |
803 | 748 | ||
804 | |||
805 | |||
806 | /* | 749 | /* |
807 | * This next set of inlines are used to keep track of when a partition is | 750 | * This next set of inlines are used to keep track of when a partition is |
808 | * potentially engaged in accessing memory belonging to another partition. | 751 | * potentially engaged in accessing memory belonging to another partition. |
@@ -812,23 +755,24 @@ static inline void | |||
812 | xpc_mark_partition_engaged(struct xpc_partition *part) | 755 | xpc_mark_partition_engaged(struct xpc_partition *part) |
813 | { | 756 | { |
814 | unsigned long irq_flags; | 757 | unsigned long irq_flags; |
815 | AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + | 758 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + |
816 | (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); | 759 | (XPC_ENGAGED_PARTITIONS_AMO * |
817 | 760 | sizeof(AMO_t))); | |
818 | 761 | ||
819 | local_irq_save(irq_flags); | 762 | local_irq_save(irq_flags); |
820 | 763 | ||
821 | /* set bit corresponding to our partid in remote partition's AMO */ | 764 | /* set bit corresponding to our partid in remote partition's AMO */ |
822 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, | 765 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, |
823 | (1UL << sn_partition_id)); | 766 | (1UL << sn_partition_id)); |
824 | /* | 767 | /* |
825 | * We must always use the nofault function regardless of whether we | 768 | * We must always use the nofault function regardless of whether we |
826 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 769 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
827 | * didn't, we'd never know that the other partition is down and would | 770 | * didn't, we'd never know that the other partition is down and would |
828 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 771 | * keep sending IPIs and AMOs to it until the heartbeat times out. |
829 | */ | 772 | */ |
830 | (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 773 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
831 | variable), xp_nofault_PIOR_target)); | 774 | variable), |
775 | xp_nofault_PIOR_target)); | ||
832 | 776 | ||
833 | local_irq_restore(irq_flags); | 777 | local_irq_restore(irq_flags); |
834 | } | 778 | } |
@@ -837,23 +781,24 @@ static inline void | |||
837 | xpc_mark_partition_disengaged(struct xpc_partition *part) | 781 | xpc_mark_partition_disengaged(struct xpc_partition *part) |
838 | { | 782 | { |
839 | unsigned long irq_flags; | 783 | unsigned long irq_flags; |
840 | AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + | 784 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + |
841 | (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); | 785 | (XPC_ENGAGED_PARTITIONS_AMO * |
842 | 786 | sizeof(AMO_t))); | |
843 | 787 | ||
844 | local_irq_save(irq_flags); | 788 | local_irq_save(irq_flags); |
845 | 789 | ||
846 | /* clear bit corresponding to our partid in remote partition's AMO */ | 790 | /* clear bit corresponding to our partid in remote partition's AMO */ |
847 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, | 791 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, |
848 | ~(1UL << sn_partition_id)); | 792 | ~(1UL << sn_partition_id)); |
849 | /* | 793 | /* |
850 | * We must always use the nofault function regardless of whether we | 794 | * We must always use the nofault function regardless of whether we |
851 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 795 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
852 | * didn't, we'd never know that the other partition is down and would | 796 | * didn't, we'd never know that the other partition is down and would |
853 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 797 | * keep sending IPIs and AMOs to it until the heartbeat times out. |
854 | */ | 798 | */ |
855 | (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 799 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
856 | variable), xp_nofault_PIOR_target)); | 800 | variable), |
801 | xp_nofault_PIOR_target)); | ||
857 | 802 | ||
858 | local_irq_restore(irq_flags); | 803 | local_irq_restore(irq_flags); |
859 | } | 804 | } |
@@ -862,23 +807,23 @@ static inline void | |||
862 | xpc_request_partition_disengage(struct xpc_partition *part) | 807 | xpc_request_partition_disengage(struct xpc_partition *part) |
863 | { | 808 | { |
864 | unsigned long irq_flags; | 809 | unsigned long irq_flags; |
865 | AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + | 810 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + |
866 | (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); | 811 | (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); |
867 | |||
868 | 812 | ||
869 | local_irq_save(irq_flags); | 813 | local_irq_save(irq_flags); |
870 | 814 | ||
871 | /* set bit corresponding to our partid in remote partition's AMO */ | 815 | /* set bit corresponding to our partid in remote partition's AMO */ |
872 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, | 816 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, |
873 | (1UL << sn_partition_id)); | 817 | (1UL << sn_partition_id)); |
874 | /* | 818 | /* |
875 | * We must always use the nofault function regardless of whether we | 819 | * We must always use the nofault function regardless of whether we |
876 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 820 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
877 | * didn't, we'd never know that the other partition is down and would | 821 | * didn't, we'd never know that the other partition is down and would |
878 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 822 | * keep sending IPIs and AMOs to it until the heartbeat times out. |
879 | */ | 823 | */ |
880 | (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 824 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
881 | variable), xp_nofault_PIOR_target)); | 825 | variable), |
826 | xp_nofault_PIOR_target)); | ||
882 | 827 | ||
883 | local_irq_restore(irq_flags); | 828 | local_irq_restore(irq_flags); |
884 | } | 829 | } |
@@ -887,23 +832,23 @@ static inline void | |||
887 | xpc_cancel_partition_disengage_request(struct xpc_partition *part) | 832 | xpc_cancel_partition_disengage_request(struct xpc_partition *part) |
888 | { | 833 | { |
889 | unsigned long irq_flags; | 834 | unsigned long irq_flags; |
890 | AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + | 835 | AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + |
891 | (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); | 836 | (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); |
892 | |||
893 | 837 | ||
894 | local_irq_save(irq_flags); | 838 | local_irq_save(irq_flags); |
895 | 839 | ||
896 | /* clear bit corresponding to our partid in remote partition's AMO */ | 840 | /* clear bit corresponding to our partid in remote partition's AMO */ |
897 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, | 841 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, |
898 | ~(1UL << sn_partition_id)); | 842 | ~(1UL << sn_partition_id)); |
899 | /* | 843 | /* |
900 | * We must always use the nofault function regardless of whether we | 844 | * We must always use the nofault function regardless of whether we |
901 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | 845 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we |
902 | * didn't, we'd never know that the other partition is down and would | 846 | * didn't, we'd never know that the other partition is down and would |
903 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 847 | * keep sending IPIs and AMOs to it until the heartbeat times out. |
904 | */ | 848 | */ |
905 | (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> | 849 | (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> |
906 | variable), xp_nofault_PIOR_target)); | 850 | variable), |
851 | xp_nofault_PIOR_target)); | ||
907 | 852 | ||
908 | local_irq_restore(irq_flags); | 853 | local_irq_restore(irq_flags); |
909 | } | 854 | } |
@@ -913,10 +858,9 @@ xpc_partition_engaged(u64 partid_mask) | |||
913 | { | 858 | { |
914 | AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; | 859 | AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; |
915 | 860 | ||
916 | |||
917 | /* return our partition's AMO variable ANDed with partid_mask */ | 861 | /* return our partition's AMO variable ANDed with partid_mask */ |
918 | return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & | 862 | return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & |
919 | partid_mask); | 863 | partid_mask); |
920 | } | 864 | } |
921 | 865 | ||
922 | static inline u64 | 866 | static inline u64 |
@@ -924,10 +868,9 @@ xpc_partition_disengage_requested(u64 partid_mask) | |||
924 | { | 868 | { |
925 | AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; | 869 | AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; |
926 | 870 | ||
927 | |||
928 | /* return our partition's AMO variable ANDed with partid_mask */ | 871 | /* return our partition's AMO variable ANDed with partid_mask */ |
929 | return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & | 872 | return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & |
930 | partid_mask); | 873 | partid_mask); |
931 | } | 874 | } |
932 | 875 | ||
933 | static inline void | 876 | static inline void |
@@ -935,10 +878,9 @@ xpc_clear_partition_engaged(u64 partid_mask) | |||
935 | { | 878 | { |
936 | AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; | 879 | AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; |
937 | 880 | ||
938 | |||
939 | /* clear bit(s) based on partid_mask in our partition's AMO */ | 881 | /* clear bit(s) based on partid_mask in our partition's AMO */ |
940 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, | 882 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, |
941 | ~partid_mask); | 883 | ~partid_mask); |
942 | } | 884 | } |
943 | 885 | ||
944 | static inline void | 886 | static inline void |
@@ -946,14 +888,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask) | |||
946 | { | 888 | { |
947 | AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; | 889 | AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; |
948 | 890 | ||
949 | |||
950 | /* clear bit(s) based on partid_mask in our partition's AMO */ | 891 | /* clear bit(s) based on partid_mask in our partition's AMO */ |
951 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, | 892 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, |
952 | ~partid_mask); | 893 | ~partid_mask); |
953 | } | 894 | } |
954 | 895 | ||
955 | |||
956 | |||
957 | /* | 896 | /* |
958 | * The following set of macros and inlines are used for the sending and | 897 | * The following set of macros and inlines are used for the sending and |
959 | * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, | 898 | * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, |
@@ -964,20 +903,18 @@ xpc_clear_partition_disengage_request(u64 partid_mask) | |||
964 | static inline u64 | 903 | static inline u64 |
965 | xpc_IPI_receive(AMO_t *amo) | 904 | xpc_IPI_receive(AMO_t *amo) |
966 | { | 905 | { |
967 | return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); | 906 | return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); |
968 | } | 907 | } |
969 | 908 | ||
970 | |||
971 | static inline enum xpc_retval | 909 | static inline enum xpc_retval |
972 | xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | 910 | xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) |
973 | { | 911 | { |
974 | int ret = 0; | 912 | int ret = 0; |
975 | unsigned long irq_flags; | 913 | unsigned long irq_flags; |
976 | 914 | ||
977 | |||
978 | local_irq_save(irq_flags); | 915 | local_irq_save(irq_flags); |
979 | 916 | ||
980 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); | 917 | FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); |
981 | sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); | 918 | sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); |
982 | 919 | ||
983 | /* | 920 | /* |
@@ -986,15 +923,14 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | |||
986 | * didn't, we'd never know that the other partition is down and would | 923 | * didn't, we'd never know that the other partition is down and would |
987 | * keep sending IPIs and AMOs to it until the heartbeat times out. | 924 | * keep sending IPIs and AMOs to it until the heartbeat times out. |
988 | */ | 925 | */ |
989 | ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), | 926 | ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), |
990 | xp_nofault_PIOR_target)); | 927 | xp_nofault_PIOR_target)); |
991 | 928 | ||
992 | local_irq_restore(irq_flags); | 929 | local_irq_restore(irq_flags); |
993 | 930 | ||
994 | return ((ret == 0) ? xpcSuccess : xpcPioReadError); | 931 | return ((ret == 0) ? xpcSuccess : xpcPioReadError); |
995 | } | 932 | } |
996 | 933 | ||
997 | |||
998 | /* | 934 | /* |
999 | * IPIs associated with SGI_XPC_ACTIVATE IRQ. | 935 | * IPIs associated with SGI_XPC_ACTIVATE IRQ. |
1000 | */ | 936 | */ |
@@ -1004,47 +940,47 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | |||
1004 | */ | 940 | */ |
1005 | static inline void | 941 | static inline void |
1006 | xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, | 942 | xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, |
1007 | int to_phys_cpuid) | 943 | int to_phys_cpuid) |
1008 | { | 944 | { |
1009 | int w_index = XPC_NASID_W_INDEX(from_nasid); | 945 | int w_index = XPC_NASID_W_INDEX(from_nasid); |
1010 | int b_index = XPC_NASID_B_INDEX(from_nasid); | 946 | int b_index = XPC_NASID_B_INDEX(from_nasid); |
1011 | AMO_t *amos = (AMO_t *) __va(amos_page_pa + | 947 | AMO_t *amos = (AMO_t *)__va(amos_page_pa + |
1012 | (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); | 948 | (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); |
1013 | |||
1014 | 949 | ||
1015 | (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, | 950 | (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, |
1016 | to_phys_cpuid, SGI_XPC_ACTIVATE); | 951 | to_phys_cpuid, SGI_XPC_ACTIVATE); |
1017 | } | 952 | } |
1018 | 953 | ||
1019 | static inline void | 954 | static inline void |
1020 | xpc_IPI_send_activate(struct xpc_vars *vars) | 955 | xpc_IPI_send_activate(struct xpc_vars *vars) |
1021 | { | 956 | { |
1022 | xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), | 957 | xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), |
1023 | vars->act_nasid, vars->act_phys_cpuid); | 958 | vars->act_nasid, vars->act_phys_cpuid); |
1024 | } | 959 | } |
1025 | 960 | ||
1026 | static inline void | 961 | static inline void |
1027 | xpc_IPI_send_activated(struct xpc_partition *part) | 962 | xpc_IPI_send_activated(struct xpc_partition *part) |
1028 | { | 963 | { |
1029 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), | 964 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), |
1030 | part->remote_act_nasid, part->remote_act_phys_cpuid); | 965 | part->remote_act_nasid, |
966 | part->remote_act_phys_cpuid); | ||
1031 | } | 967 | } |
1032 | 968 | ||
1033 | static inline void | 969 | static inline void |
1034 | xpc_IPI_send_reactivate(struct xpc_partition *part) | 970 | xpc_IPI_send_reactivate(struct xpc_partition *part) |
1035 | { | 971 | { |
1036 | xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, | 972 | xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, |
1037 | xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); | 973 | xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); |
1038 | } | 974 | } |
1039 | 975 | ||
1040 | static inline void | 976 | static inline void |
1041 | xpc_IPI_send_disengage(struct xpc_partition *part) | 977 | xpc_IPI_send_disengage(struct xpc_partition *part) |
1042 | { | 978 | { |
1043 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), | 979 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), |
1044 | part->remote_act_nasid, part->remote_act_phys_cpuid); | 980 | part->remote_act_nasid, |
981 | part->remote_act_phys_cpuid); | ||
1045 | } | 982 | } |
1046 | 983 | ||
1047 | |||
1048 | /* | 984 | /* |
1049 | * IPIs associated with SGI_XPC_NOTIFY IRQ. | 985 | * IPIs associated with SGI_XPC_NOTIFY IRQ. |
1050 | */ | 986 | */ |
@@ -1058,18 +994,16 @@ xpc_IPI_send_disengage(struct xpc_partition *part) | |||
1058 | 994 | ||
1059 | static inline void | 995 | static inline void |
1060 | xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | 996 | xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, |
1061 | unsigned long *irq_flags) | 997 | unsigned long *irq_flags) |
1062 | { | 998 | { |
1063 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 999 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
1064 | enum xpc_retval ret; | 1000 | enum xpc_retval ret; |
1065 | 1001 | ||
1066 | |||
1067 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { | 1002 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { |
1068 | ret = xpc_IPI_send(part->remote_IPI_amo_va, | 1003 | ret = xpc_IPI_send(part->remote_IPI_amo_va, |
1069 | (u64) ipi_flag << (ch->number * 8), | 1004 | (u64)ipi_flag << (ch->number * 8), |
1070 | part->remote_IPI_nasid, | 1005 | part->remote_IPI_nasid, |
1071 | part->remote_IPI_phys_cpuid, | 1006 | part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); |
1072 | SGI_XPC_NOTIFY); | ||
1073 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", | 1007 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", |
1074 | ipi_flag_string, ch->partid, ch->number, ret); | 1008 | ipi_flag_string, ch->partid, ch->number, ret); |
1075 | if (unlikely(ret != xpcSuccess)) { | 1009 | if (unlikely(ret != xpcSuccess)) { |
@@ -1084,7 +1018,6 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | |||
1084 | } | 1018 | } |
1085 | } | 1019 | } |
1086 | 1020 | ||
1087 | |||
1088 | /* | 1021 | /* |
1089 | * Make it look like the remote partition, which is associated with the | 1022 | * Make it look like the remote partition, which is associated with the |
1090 | * specified channel, sent us an IPI. This faked IPI will be handled | 1023 | * specified channel, sent us an IPI. This faked IPI will be handled |
@@ -1095,18 +1028,16 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | |||
1095 | 1028 | ||
1096 | static inline void | 1029 | static inline void |
1097 | xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | 1030 | xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, |
1098 | char *ipi_flag_string) | 1031 | char *ipi_flag_string) |
1099 | { | 1032 | { |
1100 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 1033 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
1101 | 1034 | ||
1102 | 1035 | FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable), | |
1103 | FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), | 1036 | FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); |
1104 | FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); | ||
1105 | dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", | 1037 | dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", |
1106 | ipi_flag_string, ch->partid, ch->number); | 1038 | ipi_flag_string, ch->partid, ch->number); |
1107 | } | 1039 | } |
1108 | 1040 | ||
1109 | |||
1110 | /* | 1041 | /* |
1111 | * The sending and receiving of IPIs includes the setting of an AMO variable | 1042 | * The sending and receiving of IPIs includes the setting of an AMO variable |
1112 | * to indicate the reason the IPI was sent. The 64-bit variable is divided | 1043 | * to indicate the reason the IPI was sent. The 64-bit variable is divided |
@@ -1121,7 +1052,6 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | |||
1121 | #define XPC_IPI_OPENREPLY 0x08 | 1052 | #define XPC_IPI_OPENREPLY 0x08 |
1122 | #define XPC_IPI_MSGREQUEST 0x10 | 1053 | #define XPC_IPI_MSGREQUEST 0x10 |
1123 | 1054 | ||
1124 | |||
1125 | /* given an AMO variable and a channel#, get its associated IPI flags */ | 1055 | /* given an AMO variable and a channel#, get its associated IPI flags */ |
1126 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) | 1056 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) |
1127 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) | 1057 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) |
@@ -1129,13 +1059,11 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | |||
1129 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) | 1059 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) |
1130 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) | 1060 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) |
1131 | 1061 | ||
1132 | |||
1133 | static inline void | 1062 | static inline void |
1134 | xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) | 1063 | xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) |
1135 | { | 1064 | { |
1136 | struct xpc_openclose_args *args = ch->local_openclose_args; | 1065 | struct xpc_openclose_args *args = ch->local_openclose_args; |
1137 | 1066 | ||
1138 | |||
1139 | args->reason = ch->reason; | 1067 | args->reason = ch->reason; |
1140 | 1068 | ||
1141 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); | 1069 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); |
@@ -1152,7 +1080,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) | |||
1152 | { | 1080 | { |
1153 | struct xpc_openclose_args *args = ch->local_openclose_args; | 1081 | struct xpc_openclose_args *args = ch->local_openclose_args; |
1154 | 1082 | ||
1155 | |||
1156 | args->msg_size = ch->msg_size; | 1083 | args->msg_size = ch->msg_size; |
1157 | args->local_nentries = ch->local_nentries; | 1084 | args->local_nentries = ch->local_nentries; |
1158 | 1085 | ||
@@ -1164,7 +1091,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) | |||
1164 | { | 1091 | { |
1165 | struct xpc_openclose_args *args = ch->local_openclose_args; | 1092 | struct xpc_openclose_args *args = ch->local_openclose_args; |
1166 | 1093 | ||
1167 | |||
1168 | args->remote_nentries = ch->remote_nentries; | 1094 | args->remote_nentries = ch->remote_nentries; |
1169 | args->local_nentries = ch->local_nentries; | 1095 | args->local_nentries = ch->local_nentries; |
1170 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); | 1096 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); |
@@ -1184,7 +1110,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) | |||
1184 | XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); | 1110 | XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); |
1185 | } | 1111 | } |
1186 | 1112 | ||
1187 | |||
1188 | /* | 1113 | /* |
1189 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These | 1114 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These |
1190 | * pages are located in the lowest granule. The lowest granule uses 4k pages | 1115 | * pages are located in the lowest granule. The lowest granule uses 4k pages |
@@ -1201,13 +1126,10 @@ xpc_IPI_init(int index) | |||
1201 | { | 1126 | { |
1202 | AMO_t *amo = xpc_vars->amos_page + index; | 1127 | AMO_t *amo = xpc_vars->amos_page + index; |
1203 | 1128 | ||
1204 | 1129 | (void)xpc_IPI_receive(amo); /* clear AMO variable */ | |
1205 | (void) xpc_IPI_receive(amo); /* clear AMO variable */ | ||
1206 | return amo; | 1130 | return amo; |
1207 | } | 1131 | } |
1208 | 1132 | ||
1209 | |||
1210 | |||
1211 | static inline enum xpc_retval | 1133 | static inline enum xpc_retval |
1212 | xpc_map_bte_errors(bte_result_t error) | 1134 | xpc_map_bte_errors(bte_result_t error) |
1213 | { | 1135 | { |
@@ -1220,22 +1142,31 @@ xpc_map_bte_errors(bte_result_t error) | |||
1220 | return xpcBteUnmappedError; | 1142 | return xpcBteUnmappedError; |
1221 | } | 1143 | } |
1222 | switch (error) { | 1144 | switch (error) { |
1223 | case BTE_SUCCESS: return xpcSuccess; | 1145 | case BTE_SUCCESS: |
1224 | case BTEFAIL_DIR: return xpcBteDirectoryError; | 1146 | return xpcSuccess; |
1225 | case BTEFAIL_POISON: return xpcBtePoisonError; | 1147 | case BTEFAIL_DIR: |
1226 | case BTEFAIL_WERR: return xpcBteWriteError; | 1148 | return xpcBteDirectoryError; |
1227 | case BTEFAIL_ACCESS: return xpcBteAccessError; | 1149 | case BTEFAIL_POISON: |
1228 | case BTEFAIL_PWERR: return xpcBtePWriteError; | 1150 | return xpcBtePoisonError; |
1229 | case BTEFAIL_PRERR: return xpcBtePReadError; | 1151 | case BTEFAIL_WERR: |
1230 | case BTEFAIL_TOUT: return xpcBteTimeOutError; | 1152 | return xpcBteWriteError; |
1231 | case BTEFAIL_XTERR: return xpcBteXtalkError; | 1153 | case BTEFAIL_ACCESS: |
1232 | case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; | 1154 | return xpcBteAccessError; |
1233 | default: return xpcBteUnmappedError; | 1155 | case BTEFAIL_PWERR: |
1156 | return xpcBtePWriteError; | ||
1157 | case BTEFAIL_PRERR: | ||
1158 | return xpcBtePReadError; | ||
1159 | case BTEFAIL_TOUT: | ||
1160 | return xpcBteTimeOutError; | ||
1161 | case BTEFAIL_XTERR: | ||
1162 | return xpcBteXtalkError; | ||
1163 | case BTEFAIL_NOTAVAIL: | ||
1164 | return xpcBteNotAvailable; | ||
1165 | default: | ||
1166 | return xpcBteUnmappedError; | ||
1234 | } | 1167 | } |
1235 | } | 1168 | } |
1236 | 1169 | ||
1237 | |||
1238 | |||
1239 | /* | 1170 | /* |
1240 | * Check to see if there is any channel activity to/from the specified | 1171 | * Check to see if there is any channel activity to/from the specified |
1241 | * partition. | 1172 | * partition. |
@@ -1246,7 +1177,6 @@ xpc_check_for_channel_activity(struct xpc_partition *part) | |||
1246 | u64 IPI_amo; | 1177 | u64 IPI_amo; |
1247 | unsigned long irq_flags; | 1178 | unsigned long irq_flags; |
1248 | 1179 | ||
1249 | |||
1250 | IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); | 1180 | IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); |
1251 | if (IPI_amo == 0) { | 1181 | if (IPI_amo == 0) { |
1252 | return; | 1182 | return; |
@@ -1262,6 +1192,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part) | |||
1262 | xpc_wakeup_channel_mgr(part); | 1192 | xpc_wakeup_channel_mgr(part); |
1263 | } | 1193 | } |
1264 | 1194 | ||
1265 | |||
1266 | #endif /* _DRIVERS_MISC_SGIXP_XPC_H */ | 1195 | #endif /* _DRIVERS_MISC_SGIXP_XPC_H */ |
1267 | |||
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 7412dc7351cd..57f1d0b3ac26 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -6,7 +6,6 @@ | |||
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * Cross Partition Communication (XPC) partition support. | 10 | * Cross Partition Communication (XPC) partition support. |
12 | * | 11 | * |
@@ -16,7 +15,6 @@ | |||
16 | * | 15 | * |
17 | */ | 16 | */ |
18 | 17 | ||
19 | |||
20 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
21 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
22 | #include <linux/cache.h> | 20 | #include <linux/cache.h> |
@@ -30,11 +28,9 @@ | |||
30 | #include <asm/sn/addrs.h> | 28 | #include <asm/sn/addrs.h> |
31 | #include "xpc.h" | 29 | #include "xpc.h" |
32 | 30 | ||
33 | |||
34 | /* XPC is exiting flag */ | 31 | /* XPC is exiting flag */ |
35 | int xpc_exiting; | 32 | int xpc_exiting; |
36 | 33 | ||
37 | |||
38 | /* SH_IPI_ACCESS shub register value on startup */ | 34 | /* SH_IPI_ACCESS shub register value on startup */ |
39 | static u64 xpc_sh1_IPI_access; | 35 | static u64 xpc_sh1_IPI_access; |
40 | static u64 xpc_sh2_IPI_access0; | 36 | static u64 xpc_sh2_IPI_access0; |
@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1; | |||
42 | static u64 xpc_sh2_IPI_access2; | 38 | static u64 xpc_sh2_IPI_access2; |
43 | static u64 xpc_sh2_IPI_access3; | 39 | static u64 xpc_sh2_IPI_access3; |
44 | 40 | ||
45 | |||
46 | /* original protection values for each node */ | 41 | /* original protection values for each node */ |
47 | u64 xpc_prot_vec[MAX_NUMNODES]; | 42 | u64 xpc_prot_vec[MAX_NUMNODES]; |
48 | 43 | ||
49 | |||
50 | /* this partition's reserved page pointers */ | 44 | /* this partition's reserved page pointers */ |
51 | struct xpc_rsvd_page *xpc_rsvd_page; | 45 | struct xpc_rsvd_page *xpc_rsvd_page; |
52 | static u64 *xpc_part_nasids; | 46 | static u64 *xpc_part_nasids; |
@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part; | |||
57 | static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ | 51 | static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ |
58 | static int xp_nasid_mask_words; /* actual size in words of nasid mask */ | 52 | static int xp_nasid_mask_words; /* actual size in words of nasid mask */ |
59 | 53 | ||
60 | |||
61 | /* | 54 | /* |
62 | * For performance reasons, each entry of xpc_partitions[] is cacheline | 55 | * For performance reasons, each entry of xpc_partitions[] is cacheline |
63 | * aligned. And xpc_partitions[] is padded with an additional entry at the | 56 | * aligned. And xpc_partitions[] is padded with an additional entry at the |
@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */ | |||
66 | */ | 59 | */ |
67 | struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | 60 | struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; |
68 | 61 | ||
69 | |||
70 | /* | 62 | /* |
71 | * Generic buffer used to store a local copy of portions of a remote | 63 | * Generic buffer used to store a local copy of portions of a remote |
72 | * partition's reserved page (either its header and part_nasids mask, | 64 | * partition's reserved page (either its header and part_nasids mask, |
@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | |||
75 | char *xpc_remote_copy_buffer; | 67 | char *xpc_remote_copy_buffer; |
76 | void *xpc_remote_copy_buffer_base; | 68 | void *xpc_remote_copy_buffer_base; |
77 | 69 | ||
78 | |||
79 | /* | 70 | /* |
80 | * Guarantee that the kmalloc'd memory is cacheline aligned. | 71 | * Guarantee that the kmalloc'd memory is cacheline aligned. |
81 | */ | 72 | */ |
@@ -87,7 +78,7 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
87 | if (*base == NULL) { | 78 | if (*base == NULL) { |
88 | return NULL; | 79 | return NULL; |
89 | } | 80 | } |
90 | if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { | 81 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) { |
91 | return *base; | 82 | return *base; |
92 | } | 83 | } |
93 | kfree(*base); | 84 | kfree(*base); |
@@ -97,10 +88,9 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
97 | if (*base == NULL) { | 88 | if (*base == NULL) { |
98 | return NULL; | 89 | return NULL; |
99 | } | 90 | } |
100 | return (void *) L1_CACHE_ALIGN((u64) *base); | 91 | return (void *)L1_CACHE_ALIGN((u64)*base); |
101 | } | 92 | } |
102 | 93 | ||
103 | |||
104 | /* | 94 | /* |
105 | * Given a nasid, get the physical address of the partition's reserved page | 95 | * Given a nasid, get the physical address of the partition's reserved page |
106 | * for that nasid. This function returns 0 on any error. | 96 | * for that nasid. This function returns 0 on any error. |
@@ -117,11 +107,10 @@ xpc_get_rsvd_page_pa(int nasid) | |||
117 | u64 buf_len = 0; | 107 | u64 buf_len = 0; |
118 | void *buf_base = NULL; | 108 | void *buf_base = NULL; |
119 | 109 | ||
120 | |||
121 | while (1) { | 110 | while (1) { |
122 | 111 | ||
123 | status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, | 112 | status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, |
124 | &len); | 113 | &len); |
125 | 114 | ||
126 | dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" | 115 | dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" |
127 | "0x%016lx, address=0x%016lx, len=0x%016lx\n", | 116 | "0x%016lx, address=0x%016lx, len=0x%016lx\n", |
@@ -134,8 +123,9 @@ xpc_get_rsvd_page_pa(int nasid) | |||
134 | if (L1_CACHE_ALIGN(len) > buf_len) { | 123 | if (L1_CACHE_ALIGN(len) > buf_len) { |
135 | kfree(buf_base); | 124 | kfree(buf_base); |
136 | buf_len = L1_CACHE_ALIGN(len); | 125 | buf_len = L1_CACHE_ALIGN(len); |
137 | buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, | 126 | buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, |
138 | GFP_KERNEL, &buf_base); | 127 | GFP_KERNEL, |
128 | &buf_base); | ||
139 | if (buf_base == NULL) { | 129 | if (buf_base == NULL) { |
140 | dev_err(xpc_part, "unable to kmalloc " | 130 | dev_err(xpc_part, "unable to kmalloc " |
141 | "len=0x%016lx\n", buf_len); | 131 | "len=0x%016lx\n", buf_len); |
@@ -145,7 +135,7 @@ xpc_get_rsvd_page_pa(int nasid) | |||
145 | } | 135 | } |
146 | 136 | ||
147 | bte_res = xp_bte_copy(rp_pa, buf, buf_len, | 137 | bte_res = xp_bte_copy(rp_pa, buf, buf_len, |
148 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 138 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
149 | if (bte_res != BTE_SUCCESS) { | 139 | if (bte_res != BTE_SUCCESS) { |
150 | dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); | 140 | dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); |
151 | status = SALRET_ERROR; | 141 | status = SALRET_ERROR; |
@@ -162,7 +152,6 @@ xpc_get_rsvd_page_pa(int nasid) | |||
162 | return rp_pa; | 152 | return rp_pa; |
163 | } | 153 | } |
164 | 154 | ||
165 | |||
166 | /* | 155 | /* |
167 | * Fill the partition reserved page with the information needed by | 156 | * Fill the partition reserved page with the information needed by |
168 | * other partitions to discover we are alive and establish initial | 157 | * other partitions to discover we are alive and establish initial |
@@ -176,7 +165,6 @@ xpc_rsvd_page_init(void) | |||
176 | u64 rp_pa, nasid_array = 0; | 165 | u64 rp_pa, nasid_array = 0; |
177 | int i, ret; | 166 | int i, ret; |
178 | 167 | ||
179 | |||
180 | /* get the local reserved page's address */ | 168 | /* get the local reserved page's address */ |
181 | 169 | ||
182 | preempt_disable(); | 170 | preempt_disable(); |
@@ -186,7 +174,7 @@ xpc_rsvd_page_init(void) | |||
186 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); | 174 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); |
187 | return NULL; | 175 | return NULL; |
188 | } | 176 | } |
189 | rp = (struct xpc_rsvd_page *) __va(rp_pa); | 177 | rp = (struct xpc_rsvd_page *)__va(rp_pa); |
190 | 178 | ||
191 | if (rp->partid != sn_partition_id) { | 179 | if (rp->partid != sn_partition_id) { |
192 | dev_err(xpc_part, "the reserved page's partid of %d should be " | 180 | dev_err(xpc_part, "the reserved page's partid of %d should be " |
@@ -223,7 +211,7 @@ xpc_rsvd_page_init(void) | |||
223 | * memory protections are never restricted. | 211 | * memory protections are never restricted. |
224 | */ | 212 | */ |
225 | if ((amos_page = xpc_vars->amos_page) == NULL) { | 213 | if ((amos_page = xpc_vars->amos_page) == NULL) { |
226 | amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); | 214 | amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0)); |
227 | if (amos_page == NULL) { | 215 | if (amos_page == NULL) { |
228 | dev_err(xpc_part, "can't allocate page of AMOs\n"); | 216 | dev_err(xpc_part, "can't allocate page of AMOs\n"); |
229 | return NULL; | 217 | return NULL; |
@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void) | |||
234 | * when xpc_allow_IPI_ops() is called via xpc_hb_init(). | 222 | * when xpc_allow_IPI_ops() is called via xpc_hb_init(). |
235 | */ | 223 | */ |
236 | if (!enable_shub_wars_1_1()) { | 224 | if (!enable_shub_wars_1_1()) { |
237 | ret = sn_change_memprotect(ia64_tpa((u64) amos_page), | 225 | ret = sn_change_memprotect(ia64_tpa((u64)amos_page), |
238 | PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, | 226 | PAGE_SIZE, |
239 | &nasid_array); | 227 | SN_MEMPROT_ACCESS_CLASS_1, |
228 | &nasid_array); | ||
240 | if (ret != 0) { | 229 | if (ret != 0) { |
241 | dev_err(xpc_part, "can't change memory " | 230 | dev_err(xpc_part, "can't change memory " |
242 | "protections\n"); | 231 | "protections\n"); |
243 | uncached_free_page(__IA64_UNCACHED_OFFSET | | 232 | uncached_free_page(__IA64_UNCACHED_OFFSET | |
244 | TO_PHYS((u64) amos_page)); | 233 | TO_PHYS((u64)amos_page)); |
245 | return NULL; | 234 | return NULL; |
246 | } | 235 | } |
247 | } | 236 | } |
248 | } else if (!IS_AMO_ADDRESS((u64) amos_page)) { | 237 | } else if (!IS_AMO_ADDRESS((u64)amos_page)) { |
249 | /* | 238 | /* |
250 | * EFI's XPBOOT can also set amos_page in the reserved page, | 239 | * EFI's XPBOOT can also set amos_page in the reserved page, |
251 | * but it happens to leave it as an uncached physical address | 240 | * but it happens to leave it as an uncached physical address |
252 | * and we need it to be an uncached virtual, so we'll have to | 241 | * and we need it to be an uncached virtual, so we'll have to |
253 | * convert it. | 242 | * convert it. |
254 | */ | 243 | */ |
255 | if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { | 244 | if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) { |
256 | dev_err(xpc_part, "previously used amos_page address " | 245 | dev_err(xpc_part, "previously used amos_page address " |
257 | "is bad = 0x%p\n", (void *) amos_page); | 246 | "is bad = 0x%p\n", (void *)amos_page); |
258 | return NULL; | 247 | return NULL; |
259 | } | 248 | } |
260 | amos_page = (AMO_t *) TO_AMO((u64) amos_page); | 249 | amos_page = (AMO_t *)TO_AMO((u64)amos_page); |
261 | } | 250 | } |
262 | 251 | ||
263 | /* clear xpc_vars */ | 252 | /* clear xpc_vars */ |
@@ -267,22 +256,21 @@ xpc_rsvd_page_init(void) | |||
267 | xpc_vars->act_nasid = cpuid_to_nasid(0); | 256 | xpc_vars->act_nasid = cpuid_to_nasid(0); |
268 | xpc_vars->act_phys_cpuid = cpu_physical_id(0); | 257 | xpc_vars->act_phys_cpuid = cpu_physical_id(0); |
269 | xpc_vars->vars_part_pa = __pa(xpc_vars_part); | 258 | xpc_vars->vars_part_pa = __pa(xpc_vars_part); |
270 | xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); | 259 | xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); |
271 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ | 260 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ |
272 | |||
273 | 261 | ||
274 | /* clear xpc_vars_part */ | 262 | /* clear xpc_vars_part */ |
275 | memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * | 263 | memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * |
276 | XP_MAX_PARTITIONS); | 264 | XP_MAX_PARTITIONS); |
277 | 265 | ||
278 | /* initialize the activate IRQ related AMO variables */ | 266 | /* initialize the activate IRQ related AMO variables */ |
279 | for (i = 0; i < xp_nasid_mask_words; i++) { | 267 | for (i = 0; i < xp_nasid_mask_words; i++) { |
280 | (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); | 268 | (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); |
281 | } | 269 | } |
282 | 270 | ||
283 | /* initialize the engaged remote partitions related AMO variables */ | 271 | /* initialize the engaged remote partitions related AMO variables */ |
284 | (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); | 272 | (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); |
285 | (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); | 273 | (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); |
286 | 274 | ||
287 | /* timestamp of when reserved page was setup by XPC */ | 275 | /* timestamp of when reserved page was setup by XPC */ |
288 | rp->stamp = CURRENT_TIME; | 276 | rp->stamp = CURRENT_TIME; |
@@ -296,7 +284,6 @@ xpc_rsvd_page_init(void) | |||
296 | return rp; | 284 | return rp; |
297 | } | 285 | } |
298 | 286 | ||
299 | |||
300 | /* | 287 | /* |
301 | * Change protections to allow IPI operations (and AMO operations on | 288 | * Change protections to allow IPI operations (and AMO operations on |
302 | * Shub 1.1 systems). | 289 | * Shub 1.1 systems). |
@@ -307,39 +294,38 @@ xpc_allow_IPI_ops(void) | |||
307 | int node; | 294 | int node; |
308 | int nasid; | 295 | int nasid; |
309 | 296 | ||
310 | |||
311 | // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. | 297 | // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. |
312 | 298 | ||
313 | if (is_shub2()) { | 299 | if (is_shub2()) { |
314 | xpc_sh2_IPI_access0 = | 300 | xpc_sh2_IPI_access0 = |
315 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); | 301 | (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); |
316 | xpc_sh2_IPI_access1 = | 302 | xpc_sh2_IPI_access1 = |
317 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); | 303 | (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); |
318 | xpc_sh2_IPI_access2 = | 304 | xpc_sh2_IPI_access2 = |
319 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); | 305 | (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); |
320 | xpc_sh2_IPI_access3 = | 306 | xpc_sh2_IPI_access3 = |
321 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); | 307 | (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); |
322 | 308 | ||
323 | for_each_online_node(node) { | 309 | for_each_online_node(node) { |
324 | nasid = cnodeid_to_nasid(node); | 310 | nasid = cnodeid_to_nasid(node); |
325 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), | 311 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), |
326 | -1UL); | 312 | -1UL); |
327 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), | 313 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), |
328 | -1UL); | 314 | -1UL); |
329 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), | 315 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), |
330 | -1UL); | 316 | -1UL); |
331 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), | 317 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), |
332 | -1UL); | 318 | -1UL); |
333 | } | 319 | } |
334 | 320 | ||
335 | } else { | 321 | } else { |
336 | xpc_sh1_IPI_access = | 322 | xpc_sh1_IPI_access = |
337 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); | 323 | (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); |
338 | 324 | ||
339 | for_each_online_node(node) { | 325 | for_each_online_node(node) { |
340 | nasid = cnodeid_to_nasid(node); | 326 | nasid = cnodeid_to_nasid(node); |
341 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), | 327 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), |
342 | -1UL); | 328 | -1UL); |
343 | 329 | ||
344 | /* | 330 | /* |
345 | * Since the BIST collides with memory operations on | 331 | * Since the BIST collides with memory operations on |
@@ -347,21 +333,23 @@ xpc_allow_IPI_ops(void) | |||
347 | */ | 333 | */ |
348 | if (enable_shub_wars_1_1()) { | 334 | if (enable_shub_wars_1_1()) { |
349 | /* open up everything */ | 335 | /* open up everything */ |
350 | xpc_prot_vec[node] = (u64) HUB_L((u64 *) | 336 | xpc_prot_vec[node] = (u64)HUB_L((u64 *) |
351 | GLOBAL_MMR_ADDR(nasid, | 337 | GLOBAL_MMR_ADDR |
352 | SH1_MD_DQLP_MMR_DIR_PRIVEC0)); | 338 | (nasid, |
353 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | 339 | SH1_MD_DQLP_MMR_DIR_PRIVEC0)); |
354 | SH1_MD_DQLP_MMR_DIR_PRIVEC0), | 340 | HUB_S((u64 *) |
355 | -1UL); | 341 | GLOBAL_MMR_ADDR(nasid, |
356 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | 342 | SH1_MD_DQLP_MMR_DIR_PRIVEC0), |
357 | SH1_MD_DQRP_MMR_DIR_PRIVEC0), | 343 | -1UL); |
358 | -1UL); | 344 | HUB_S((u64 *) |
345 | GLOBAL_MMR_ADDR(nasid, | ||
346 | SH1_MD_DQRP_MMR_DIR_PRIVEC0), | ||
347 | -1UL); | ||
359 | } | 348 | } |
360 | } | 349 | } |
361 | } | 350 | } |
362 | } | 351 | } |
363 | 352 | ||
364 | |||
365 | /* | 353 | /* |
366 | * Restrict protections to disallow IPI operations (and AMO operations on | 354 | * Restrict protections to disallow IPI operations (and AMO operations on |
367 | * Shub 1.1 systems). | 355 | * Shub 1.1 systems). |
@@ -372,43 +360,41 @@ xpc_restrict_IPI_ops(void) | |||
372 | int node; | 360 | int node; |
373 | int nasid; | 361 | int nasid; |
374 | 362 | ||
375 | |||
376 | // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. | 363 | // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. |
377 | 364 | ||
378 | if (is_shub2()) { | 365 | if (is_shub2()) { |
379 | 366 | ||
380 | for_each_online_node(node) { | 367 | for_each_online_node(node) { |
381 | nasid = cnodeid_to_nasid(node); | 368 | nasid = cnodeid_to_nasid(node); |
382 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), | 369 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), |
383 | xpc_sh2_IPI_access0); | 370 | xpc_sh2_IPI_access0); |
384 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), | 371 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), |
385 | xpc_sh2_IPI_access1); | 372 | xpc_sh2_IPI_access1); |
386 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), | 373 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), |
387 | xpc_sh2_IPI_access2); | 374 | xpc_sh2_IPI_access2); |
388 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), | 375 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), |
389 | xpc_sh2_IPI_access3); | 376 | xpc_sh2_IPI_access3); |
390 | } | 377 | } |
391 | 378 | ||
392 | } else { | 379 | } else { |
393 | 380 | ||
394 | for_each_online_node(node) { | 381 | for_each_online_node(node) { |
395 | nasid = cnodeid_to_nasid(node); | 382 | nasid = cnodeid_to_nasid(node); |
396 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), | 383 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), |
397 | xpc_sh1_IPI_access); | 384 | xpc_sh1_IPI_access); |
398 | 385 | ||
399 | if (enable_shub_wars_1_1()) { | 386 | if (enable_shub_wars_1_1()) { |
400 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | 387 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, |
401 | SH1_MD_DQLP_MMR_DIR_PRIVEC0), | 388 | SH1_MD_DQLP_MMR_DIR_PRIVEC0), |
402 | xpc_prot_vec[node]); | 389 | xpc_prot_vec[node]); |
403 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | 390 | HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, |
404 | SH1_MD_DQRP_MMR_DIR_PRIVEC0), | 391 | SH1_MD_DQRP_MMR_DIR_PRIVEC0), |
405 | xpc_prot_vec[node]); | 392 | xpc_prot_vec[node]); |
406 | } | 393 | } |
407 | } | 394 | } |
408 | } | 395 | } |
409 | } | 396 | } |
410 | 397 | ||
411 | |||
412 | /* | 398 | /* |
413 | * At periodic intervals, scan through all active partitions and ensure | 399 | * At periodic intervals, scan through all active partitions and ensure |
414 | * their heartbeat is still active. If not, the partition is deactivated. | 400 | * their heartbeat is still active. If not, the partition is deactivated. |
@@ -421,8 +407,7 @@ xpc_check_remote_hb(void) | |||
421 | partid_t partid; | 407 | partid_t partid; |
422 | bte_result_t bres; | 408 | bte_result_t bres; |
423 | 409 | ||
424 | 410 | remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; | |
425 | remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; | ||
426 | 411 | ||
427 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 412 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
428 | 413 | ||
@@ -437,18 +422,18 @@ xpc_check_remote_hb(void) | |||
437 | part = &xpc_partitions[partid]; | 422 | part = &xpc_partitions[partid]; |
438 | 423 | ||
439 | if (part->act_state == XPC_P_INACTIVE || | 424 | if (part->act_state == XPC_P_INACTIVE || |
440 | part->act_state == XPC_P_DEACTIVATING) { | 425 | part->act_state == XPC_P_DEACTIVATING) { |
441 | continue; | 426 | continue; |
442 | } | 427 | } |
443 | 428 | ||
444 | /* pull the remote_hb cache line */ | 429 | /* pull the remote_hb cache line */ |
445 | bres = xp_bte_copy(part->remote_vars_pa, | 430 | bres = xp_bte_copy(part->remote_vars_pa, |
446 | (u64) remote_vars, | 431 | (u64)remote_vars, |
447 | XPC_RP_VARS_SIZE, | 432 | XPC_RP_VARS_SIZE, |
448 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 433 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
449 | if (bres != BTE_SUCCESS) { | 434 | if (bres != BTE_SUCCESS) { |
450 | XPC_DEACTIVATE_PARTITION(part, | 435 | XPC_DEACTIVATE_PARTITION(part, |
451 | xpc_map_bte_errors(bres)); | 436 | xpc_map_bte_errors(bres)); |
452 | continue; | 437 | continue; |
453 | } | 438 | } |
454 | 439 | ||
@@ -459,8 +444,8 @@ xpc_check_remote_hb(void) | |||
459 | remote_vars->heartbeating_to_mask); | 444 | remote_vars->heartbeating_to_mask); |
460 | 445 | ||
461 | if (((remote_vars->heartbeat == part->last_heartbeat) && | 446 | if (((remote_vars->heartbeat == part->last_heartbeat) && |
462 | (remote_vars->heartbeat_offline == 0)) || | 447 | (remote_vars->heartbeat_offline == 0)) || |
463 | !xpc_hb_allowed(sn_partition_id, remote_vars)) { | 448 | !xpc_hb_allowed(sn_partition_id, remote_vars)) { |
464 | 449 | ||
465 | XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); | 450 | XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); |
466 | continue; | 451 | continue; |
@@ -470,7 +455,6 @@ xpc_check_remote_hb(void) | |||
470 | } | 455 | } |
471 | } | 456 | } |
472 | 457 | ||
473 | |||
474 | /* | 458 | /* |
475 | * Get a copy of a portion of the remote partition's rsvd page. | 459 | * Get a copy of a portion of the remote partition's rsvd page. |
476 | * | 460 | * |
@@ -480,11 +464,10 @@ xpc_check_remote_hb(void) | |||
480 | */ | 464 | */ |
481 | static enum xpc_retval | 465 | static enum xpc_retval |
482 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | 466 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, |
483 | struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) | 467 | struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) |
484 | { | 468 | { |
485 | int bres, i; | 469 | int bres, i; |
486 | 470 | ||
487 | |||
488 | /* get the reserved page's physical address */ | 471 | /* get the reserved page's physical address */ |
489 | 472 | ||
490 | *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); | 473 | *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); |
@@ -492,30 +475,26 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
492 | return xpcNoRsvdPageAddr; | 475 | return xpcNoRsvdPageAddr; |
493 | } | 476 | } |
494 | 477 | ||
495 | |||
496 | /* pull over the reserved page header and part_nasids mask */ | 478 | /* pull over the reserved page header and part_nasids mask */ |
497 | bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, | 479 | bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, |
498 | XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, | 480 | XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, |
499 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 481 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
500 | if (bres != BTE_SUCCESS) { | 482 | if (bres != BTE_SUCCESS) { |
501 | return xpc_map_bte_errors(bres); | 483 | return xpc_map_bte_errors(bres); |
502 | } | 484 | } |
503 | 485 | ||
504 | |||
505 | if (discovered_nasids != NULL) { | 486 | if (discovered_nasids != NULL) { |
506 | u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); | 487 | u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); |
507 | 488 | ||
508 | |||
509 | for (i = 0; i < xp_nasid_mask_words; i++) { | 489 | for (i = 0; i < xp_nasid_mask_words; i++) { |
510 | discovered_nasids[i] |= remote_part_nasids[i]; | 490 | discovered_nasids[i] |= remote_part_nasids[i]; |
511 | } | 491 | } |
512 | } | 492 | } |
513 | 493 | ||
514 | |||
515 | /* check that the partid is for another partition */ | 494 | /* check that the partid is for another partition */ |
516 | 495 | ||
517 | if (remote_rp->partid < 1 || | 496 | if (remote_rp->partid < 1 || |
518 | remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { | 497 | remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { |
519 | return xpcInvalidPartid; | 498 | return xpcInvalidPartid; |
520 | } | 499 | } |
521 | 500 | ||
@@ -523,16 +502,14 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
523 | return xpcLocalPartid; | 502 | return xpcLocalPartid; |
524 | } | 503 | } |
525 | 504 | ||
526 | |||
527 | if (XPC_VERSION_MAJOR(remote_rp->version) != | 505 | if (XPC_VERSION_MAJOR(remote_rp->version) != |
528 | XPC_VERSION_MAJOR(XPC_RP_VERSION)) { | 506 | XPC_VERSION_MAJOR(XPC_RP_VERSION)) { |
529 | return xpcBadVersion; | 507 | return xpcBadVersion; |
530 | } | 508 | } |
531 | 509 | ||
532 | return xpcSuccess; | 510 | return xpcSuccess; |
533 | } | 511 | } |
534 | 512 | ||
535 | |||
536 | /* | 513 | /* |
537 | * Get a copy of the remote partition's XPC variables from the reserved page. | 514 | * Get a copy of the remote partition's XPC variables from the reserved page. |
538 | * | 515 | * |
@@ -544,34 +521,32 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) | |||
544 | { | 521 | { |
545 | int bres; | 522 | int bres; |
546 | 523 | ||
547 | |||
548 | if (remote_vars_pa == 0) { | 524 | if (remote_vars_pa == 0) { |
549 | return xpcVarsNotSet; | 525 | return xpcVarsNotSet; |
550 | } | 526 | } |
551 | 527 | ||
552 | /* pull over the cross partition variables */ | 528 | /* pull over the cross partition variables */ |
553 | bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, | 529 | bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, |
554 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 530 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
555 | if (bres != BTE_SUCCESS) { | 531 | if (bres != BTE_SUCCESS) { |
556 | return xpc_map_bte_errors(bres); | 532 | return xpc_map_bte_errors(bres); |
557 | } | 533 | } |
558 | 534 | ||
559 | if (XPC_VERSION_MAJOR(remote_vars->version) != | 535 | if (XPC_VERSION_MAJOR(remote_vars->version) != |
560 | XPC_VERSION_MAJOR(XPC_V_VERSION)) { | 536 | XPC_VERSION_MAJOR(XPC_V_VERSION)) { |
561 | return xpcBadVersion; | 537 | return xpcBadVersion; |
562 | } | 538 | } |
563 | 539 | ||
564 | return xpcSuccess; | 540 | return xpcSuccess; |
565 | } | 541 | } |
566 | 542 | ||
567 | |||
568 | /* | 543 | /* |
569 | * Update the remote partition's info. | 544 | * Update the remote partition's info. |
570 | */ | 545 | */ |
571 | static void | 546 | static void |
572 | xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, | 547 | xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, |
573 | struct timespec *remote_rp_stamp, u64 remote_rp_pa, | 548 | struct timespec *remote_rp_stamp, u64 remote_rp_pa, |
574 | u64 remote_vars_pa, struct xpc_vars *remote_vars) | 549 | u64 remote_vars_pa, struct xpc_vars *remote_vars) |
575 | { | 550 | { |
576 | part->remote_rp_version = remote_rp_version; | 551 | part->remote_rp_version = remote_rp_version; |
577 | dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", | 552 | dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", |
@@ -613,7 +588,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, | |||
613 | part->remote_vars_version); | 588 | part->remote_vars_version); |
614 | } | 589 | } |
615 | 590 | ||
616 | |||
617 | /* | 591 | /* |
618 | * Prior code has determined the nasid which generated an IPI. Inspect | 592 | * Prior code has determined the nasid which generated an IPI. Inspect |
619 | * that nasid to determine if its partition needs to be activated or | 593 | * that nasid to determine if its partition needs to be activated or |
@@ -643,15 +617,14 @@ xpc_identify_act_IRQ_req(int nasid) | |||
643 | struct xpc_partition *part; | 617 | struct xpc_partition *part; |
644 | enum xpc_retval ret; | 618 | enum xpc_retval ret; |
645 | 619 | ||
646 | |||
647 | /* pull over the reserved page structure */ | 620 | /* pull over the reserved page structure */ |
648 | 621 | ||
649 | remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; | 622 | remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; |
650 | 623 | ||
651 | ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); | 624 | ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); |
652 | if (ret != xpcSuccess) { | 625 | if (ret != xpcSuccess) { |
653 | dev_warn(xpc_part, "unable to get reserved page from nasid %d, " | 626 | dev_warn(xpc_part, "unable to get reserved page from nasid %d, " |
654 | "which sent interrupt, reason=%d\n", nasid, ret); | 627 | "which sent interrupt, reason=%d\n", nasid, ret); |
655 | return; | 628 | return; |
656 | } | 629 | } |
657 | 630 | ||
@@ -663,34 +636,31 @@ xpc_identify_act_IRQ_req(int nasid) | |||
663 | partid = remote_rp->partid; | 636 | partid = remote_rp->partid; |
664 | part = &xpc_partitions[partid]; | 637 | part = &xpc_partitions[partid]; |
665 | 638 | ||
666 | |||
667 | /* pull over the cross partition variables */ | 639 | /* pull over the cross partition variables */ |
668 | 640 | ||
669 | remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; | 641 | remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; |
670 | 642 | ||
671 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); | 643 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); |
672 | if (ret != xpcSuccess) { | 644 | if (ret != xpcSuccess) { |
673 | 645 | ||
674 | dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " | 646 | dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " |
675 | "which sent interrupt, reason=%d\n", nasid, ret); | 647 | "which sent interrupt, reason=%d\n", nasid, ret); |
676 | 648 | ||
677 | XPC_DEACTIVATE_PARTITION(part, ret); | 649 | XPC_DEACTIVATE_PARTITION(part, ret); |
678 | return; | 650 | return; |
679 | } | 651 | } |
680 | 652 | ||
681 | |||
682 | part->act_IRQ_rcvd++; | 653 | part->act_IRQ_rcvd++; |
683 | 654 | ||
684 | dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " | 655 | dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " |
685 | "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, | 656 | "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd, |
686 | remote_vars->heartbeat, remote_vars->heartbeating_to_mask); | 657 | remote_vars->heartbeat, remote_vars->heartbeating_to_mask); |
687 | 658 | ||
688 | if (xpc_partition_disengaged(part) && | 659 | if (xpc_partition_disengaged(part) && part->act_state == XPC_P_INACTIVE) { |
689 | part->act_state == XPC_P_INACTIVE) { | ||
690 | 660 | ||
691 | xpc_update_partition_info(part, remote_rp_version, | 661 | xpc_update_partition_info(part, remote_rp_version, |
692 | &remote_rp_stamp, remote_rp_pa, | 662 | &remote_rp_stamp, remote_rp_pa, |
693 | remote_vars_pa, remote_vars); | 663 | remote_vars_pa, remote_vars); |
694 | 664 | ||
695 | if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { | 665 | if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { |
696 | if (xpc_partition_disengage_requested(1UL << partid)) { | 666 | if (xpc_partition_disengage_requested(1UL << partid)) { |
@@ -714,16 +684,15 @@ xpc_identify_act_IRQ_req(int nasid) | |||
714 | 684 | ||
715 | if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { | 685 | if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { |
716 | DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> | 686 | DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> |
717 | remote_vars_version)); | 687 | remote_vars_version)); |
718 | 688 | ||
719 | if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { | 689 | if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { |
720 | DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> | 690 | DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> |
721 | version)); | 691 | version)); |
722 | /* see if the other side rebooted */ | 692 | /* see if the other side rebooted */ |
723 | if (part->remote_amos_page_pa == | 693 | if (part->remote_amos_page_pa == |
724 | remote_vars->amos_page_pa && | 694 | remote_vars->amos_page_pa && |
725 | xpc_hb_allowed(sn_partition_id, | 695 | xpc_hb_allowed(sn_partition_id, remote_vars)) { |
726 | remote_vars)) { | ||
727 | /* doesn't look that way, so ignore the IPI */ | 696 | /* doesn't look that way, so ignore the IPI */ |
728 | return; | 697 | return; |
729 | } | 698 | } |
@@ -735,8 +704,8 @@ xpc_identify_act_IRQ_req(int nasid) | |||
735 | */ | 704 | */ |
736 | 705 | ||
737 | xpc_update_partition_info(part, remote_rp_version, | 706 | xpc_update_partition_info(part, remote_rp_version, |
738 | &remote_rp_stamp, remote_rp_pa, | 707 | &remote_rp_stamp, remote_rp_pa, |
739 | remote_vars_pa, remote_vars); | 708 | remote_vars_pa, remote_vars); |
740 | part->reactivate_nasid = nasid; | 709 | part->reactivate_nasid = nasid; |
741 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); | 710 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); |
742 | return; | 711 | return; |
@@ -756,15 +725,15 @@ xpc_identify_act_IRQ_req(int nasid) | |||
756 | xpc_clear_partition_disengage_request(1UL << partid); | 725 | xpc_clear_partition_disengage_request(1UL << partid); |
757 | 726 | ||
758 | xpc_update_partition_info(part, remote_rp_version, | 727 | xpc_update_partition_info(part, remote_rp_version, |
759 | &remote_rp_stamp, remote_rp_pa, | 728 | &remote_rp_stamp, remote_rp_pa, |
760 | remote_vars_pa, remote_vars); | 729 | remote_vars_pa, remote_vars); |
761 | reactivate = 1; | 730 | reactivate = 1; |
762 | 731 | ||
763 | } else { | 732 | } else { |
764 | DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); | 733 | DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); |
765 | 734 | ||
766 | stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, | 735 | stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, |
767 | &remote_rp_stamp); | 736 | &remote_rp_stamp); |
768 | if (stamp_diff != 0) { | 737 | if (stamp_diff != 0) { |
769 | DBUG_ON(stamp_diff >= 0); | 738 | DBUG_ON(stamp_diff >= 0); |
770 | 739 | ||
@@ -775,17 +744,18 @@ xpc_identify_act_IRQ_req(int nasid) | |||
775 | 744 | ||
776 | DBUG_ON(xpc_partition_engaged(1UL << partid)); | 745 | DBUG_ON(xpc_partition_engaged(1UL << partid)); |
777 | DBUG_ON(xpc_partition_disengage_requested(1UL << | 746 | DBUG_ON(xpc_partition_disengage_requested(1UL << |
778 | partid)); | 747 | partid)); |
779 | 748 | ||
780 | xpc_update_partition_info(part, remote_rp_version, | 749 | xpc_update_partition_info(part, remote_rp_version, |
781 | &remote_rp_stamp, remote_rp_pa, | 750 | &remote_rp_stamp, |
782 | remote_vars_pa, remote_vars); | 751 | remote_rp_pa, remote_vars_pa, |
752 | remote_vars); | ||
783 | reactivate = 1; | 753 | reactivate = 1; |
784 | } | 754 | } |
785 | } | 755 | } |
786 | 756 | ||
787 | if (part->disengage_request_timeout > 0 && | 757 | if (part->disengage_request_timeout > 0 && |
788 | !xpc_partition_disengaged(part)) { | 758 | !xpc_partition_disengaged(part)) { |
789 | /* still waiting on other side to disengage from us */ | 759 | /* still waiting on other side to disengage from us */ |
790 | return; | 760 | return; |
791 | } | 761 | } |
@@ -795,12 +765,11 @@ xpc_identify_act_IRQ_req(int nasid) | |||
795 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); | 765 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); |
796 | 766 | ||
797 | } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && | 767 | } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && |
798 | xpc_partition_disengage_requested(1UL << partid)) { | 768 | xpc_partition_disengage_requested(1UL << partid)) { |
799 | XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); | 769 | XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); |
800 | } | 770 | } |
801 | } | 771 | } |
802 | 772 | ||
803 | |||
804 | /* | 773 | /* |
805 | * Loop through the activation AMO variables and process any bits | 774 | * Loop through the activation AMO variables and process any bits |
806 | * which are set. Each bit indicates a nasid sending a partition | 775 | * which are set. Each bit indicates a nasid sending a partition |
@@ -813,14 +782,12 @@ xpc_identify_act_IRQ_sender(void) | |||
813 | { | 782 | { |
814 | int word, bit; | 783 | int word, bit; |
815 | u64 nasid_mask; | 784 | u64 nasid_mask; |
816 | u64 nasid; /* remote nasid */ | 785 | u64 nasid; /* remote nasid */ |
817 | int n_IRQs_detected = 0; | 786 | int n_IRQs_detected = 0; |
818 | AMO_t *act_amos; | 787 | AMO_t *act_amos; |
819 | 788 | ||
820 | |||
821 | act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; | 789 | act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; |
822 | 790 | ||
823 | |||
824 | /* scan through act AMO variable looking for non-zero entries */ | 791 | /* scan through act AMO variable looking for non-zero entries */ |
825 | for (word = 0; word < xp_nasid_mask_words; word++) { | 792 | for (word = 0; word < xp_nasid_mask_words; word++) { |
826 | 793 | ||
@@ -837,7 +804,6 @@ xpc_identify_act_IRQ_sender(void) | |||
837 | dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, | 804 | dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, |
838 | nasid_mask); | 805 | nasid_mask); |
839 | 806 | ||
840 | |||
841 | /* | 807 | /* |
842 | * If this nasid has been added to the machine since | 808 | * If this nasid has been added to the machine since |
843 | * our partition was reset, this will retain the | 809 | * our partition was reset, this will retain the |
@@ -846,7 +812,6 @@ xpc_identify_act_IRQ_sender(void) | |||
846 | */ | 812 | */ |
847 | xpc_mach_nasids[word] |= nasid_mask; | 813 | xpc_mach_nasids[word] |= nasid_mask; |
848 | 814 | ||
849 | |||
850 | /* locate the nasid(s) which sent interrupts */ | 815 | /* locate the nasid(s) which sent interrupts */ |
851 | 816 | ||
852 | for (bit = 0; bit < (8 * sizeof(u64)); bit++) { | 817 | for (bit = 0; bit < (8 * sizeof(u64)); bit++) { |
@@ -862,7 +827,6 @@ xpc_identify_act_IRQ_sender(void) | |||
862 | return n_IRQs_detected; | 827 | return n_IRQs_detected; |
863 | } | 828 | } |
864 | 829 | ||
865 | |||
866 | /* | 830 | /* |
867 | * See if the other side has responded to a partition disengage request | 831 | * See if the other side has responded to a partition disengage request |
868 | * from us. | 832 | * from us. |
@@ -873,7 +837,6 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
873 | partid_t partid = XPC_PARTID(part); | 837 | partid_t partid = XPC_PARTID(part); |
874 | int disengaged; | 838 | int disengaged; |
875 | 839 | ||
876 | |||
877 | disengaged = (xpc_partition_engaged(1UL << partid) == 0); | 840 | disengaged = (xpc_partition_engaged(1UL << partid) == 0); |
878 | if (part->disengage_request_timeout) { | 841 | if (part->disengage_request_timeout) { |
879 | if (!disengaged) { | 842 | if (!disengaged) { |
@@ -888,7 +851,7 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
888 | */ | 851 | */ |
889 | 852 | ||
890 | dev_info(xpc_part, "disengage from remote partition %d " | 853 | dev_info(xpc_part, "disengage from remote partition %d " |
891 | "timed out\n", partid); | 854 | "timed out\n", partid); |
892 | xpc_disengage_request_timedout = 1; | 855 | xpc_disengage_request_timedout = 1; |
893 | xpc_clear_partition_engaged(1UL << partid); | 856 | xpc_clear_partition_engaged(1UL << partid); |
894 | disengaged = 1; | 857 | disengaged = 1; |
@@ -898,11 +861,11 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
898 | /* cancel the timer function, provided it's not us */ | 861 | /* cancel the timer function, provided it's not us */ |
899 | if (!in_interrupt()) { | 862 | if (!in_interrupt()) { |
900 | del_singleshot_timer_sync(&part-> | 863 | del_singleshot_timer_sync(&part-> |
901 | disengage_request_timer); | 864 | disengage_request_timer); |
902 | } | 865 | } |
903 | 866 | ||
904 | DBUG_ON(part->act_state != XPC_P_DEACTIVATING && | 867 | DBUG_ON(part->act_state != XPC_P_DEACTIVATING && |
905 | part->act_state != XPC_P_INACTIVE); | 868 | part->act_state != XPC_P_INACTIVE); |
906 | if (part->act_state != XPC_P_INACTIVE) { | 869 | if (part->act_state != XPC_P_INACTIVE) { |
907 | xpc_wakeup_channel_mgr(part); | 870 | xpc_wakeup_channel_mgr(part); |
908 | } | 871 | } |
@@ -914,7 +877,6 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
914 | return disengaged; | 877 | return disengaged; |
915 | } | 878 | } |
916 | 879 | ||
917 | |||
918 | /* | 880 | /* |
919 | * Mark specified partition as active. | 881 | * Mark specified partition as active. |
920 | */ | 882 | */ |
@@ -924,7 +886,6 @@ xpc_mark_partition_active(struct xpc_partition *part) | |||
924 | unsigned long irq_flags; | 886 | unsigned long irq_flags; |
925 | enum xpc_retval ret; | 887 | enum xpc_retval ret; |
926 | 888 | ||
927 | |||
928 | dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); | 889 | dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); |
929 | 890 | ||
930 | spin_lock_irqsave(&part->act_lock, irq_flags); | 891 | spin_lock_irqsave(&part->act_lock, irq_flags); |
@@ -940,17 +901,15 @@ xpc_mark_partition_active(struct xpc_partition *part) | |||
940 | return ret; | 901 | return ret; |
941 | } | 902 | } |
942 | 903 | ||
943 | |||
944 | /* | 904 | /* |
945 | * Notify XPC that the partition is down. | 905 | * Notify XPC that the partition is down. |
946 | */ | 906 | */ |
947 | void | 907 | void |
948 | xpc_deactivate_partition(const int line, struct xpc_partition *part, | 908 | xpc_deactivate_partition(const int line, struct xpc_partition *part, |
949 | enum xpc_retval reason) | 909 | enum xpc_retval reason) |
950 | { | 910 | { |
951 | unsigned long irq_flags; | 911 | unsigned long irq_flags; |
952 | 912 | ||
953 | |||
954 | spin_lock_irqsave(&part->act_lock, irq_flags); | 913 | spin_lock_irqsave(&part->act_lock, irq_flags); |
955 | 914 | ||
956 | if (part->act_state == XPC_P_INACTIVE) { | 915 | if (part->act_state == XPC_P_INACTIVE) { |
@@ -964,7 +923,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
964 | } | 923 | } |
965 | if (part->act_state == XPC_P_DEACTIVATING) { | 924 | if (part->act_state == XPC_P_DEACTIVATING) { |
966 | if ((part->reason == xpcUnloading && reason != xpcUnloading) || | 925 | if ((part->reason == xpcUnloading && reason != xpcUnloading) || |
967 | reason == xpcReactivating) { | 926 | reason == xpcReactivating) { |
968 | XPC_SET_REASON(part, reason, line); | 927 | XPC_SET_REASON(part, reason, line); |
969 | } | 928 | } |
970 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 929 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
@@ -982,9 +941,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
982 | 941 | ||
983 | /* set a timelimit on the disengage request */ | 942 | /* set a timelimit on the disengage request */ |
984 | part->disengage_request_timeout = jiffies + | 943 | part->disengage_request_timeout = jiffies + |
985 | (xpc_disengage_request_timelimit * HZ); | 944 | (xpc_disengage_request_timelimit * HZ); |
986 | part->disengage_request_timer.expires = | 945 | part->disengage_request_timer.expires = |
987 | part->disengage_request_timeout; | 946 | part->disengage_request_timeout; |
988 | add_timer(&part->disengage_request_timer); | 947 | add_timer(&part->disengage_request_timer); |
989 | } | 948 | } |
990 | 949 | ||
@@ -994,7 +953,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
994 | xpc_partition_going_down(part, reason); | 953 | xpc_partition_going_down(part, reason); |
995 | } | 954 | } |
996 | 955 | ||
997 | |||
998 | /* | 956 | /* |
999 | * Mark specified partition as inactive. | 957 | * Mark specified partition as inactive. |
1000 | */ | 958 | */ |
@@ -1003,7 +961,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part) | |||
1003 | { | 961 | { |
1004 | unsigned long irq_flags; | 962 | unsigned long irq_flags; |
1005 | 963 | ||
1006 | |||
1007 | dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", | 964 | dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", |
1008 | XPC_PARTID(part)); | 965 | XPC_PARTID(part)); |
1009 | 966 | ||
@@ -1013,7 +970,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part) | |||
1013 | part->remote_rp_pa = 0; | 970 | part->remote_rp_pa = 0; |
1014 | } | 971 | } |
1015 | 972 | ||
1016 | |||
1017 | /* | 973 | /* |
1018 | * SAL has provided a partition and machine mask. The partition mask | 974 | * SAL has provided a partition and machine mask. The partition mask |
1019 | * contains a bit for each even nasid in our partition. The machine | 975 | * contains a bit for each even nasid in our partition. The machine |
@@ -1041,24 +997,22 @@ xpc_discovery(void) | |||
1041 | u64 *discovered_nasids; | 997 | u64 *discovered_nasids; |
1042 | enum xpc_retval ret; | 998 | enum xpc_retval ret; |
1043 | 999 | ||
1044 | |||
1045 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + | 1000 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + |
1046 | xp_nasid_mask_bytes, | 1001 | xp_nasid_mask_bytes, |
1047 | GFP_KERNEL, &remote_rp_base); | 1002 | GFP_KERNEL, &remote_rp_base); |
1048 | if (remote_rp == NULL) { | 1003 | if (remote_rp == NULL) { |
1049 | return; | 1004 | return; |
1050 | } | 1005 | } |
1051 | remote_vars = (struct xpc_vars *) remote_rp; | 1006 | remote_vars = (struct xpc_vars *)remote_rp; |
1052 | |||
1053 | 1007 | ||
1054 | discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, | 1008 | discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, |
1055 | GFP_KERNEL); | 1009 | GFP_KERNEL); |
1056 | if (discovered_nasids == NULL) { | 1010 | if (discovered_nasids == NULL) { |
1057 | kfree(remote_rp_base); | 1011 | kfree(remote_rp_base); |
1058 | return; | 1012 | return; |
1059 | } | 1013 | } |
1060 | 1014 | ||
1061 | rp = (struct xpc_rsvd_page *) xpc_rsvd_page; | 1015 | rp = (struct xpc_rsvd_page *)xpc_rsvd_page; |
1062 | 1016 | ||
1063 | /* | 1017 | /* |
1064 | * The term 'region' in this context refers to the minimum number of | 1018 | * The term 'region' in this context refers to the minimum number of |
@@ -1081,23 +1035,21 @@ xpc_discovery(void) | |||
1081 | 1035 | ||
1082 | for (region = 0; region < max_regions; region++) { | 1036 | for (region = 0; region < max_regions; region++) { |
1083 | 1037 | ||
1084 | if ((volatile int) xpc_exiting) { | 1038 | if ((volatile int)xpc_exiting) { |
1085 | break; | 1039 | break; |
1086 | } | 1040 | } |
1087 | 1041 | ||
1088 | dev_dbg(xpc_part, "searching region %d\n", region); | 1042 | dev_dbg(xpc_part, "searching region %d\n", region); |
1089 | 1043 | ||
1090 | for (nasid = (region * region_size * 2); | 1044 | for (nasid = (region * region_size * 2); |
1091 | nasid < ((region + 1) * region_size * 2); | 1045 | nasid < ((region + 1) * region_size * 2); nasid += 2) { |
1092 | nasid += 2) { | ||
1093 | 1046 | ||
1094 | if ((volatile int) xpc_exiting) { | 1047 | if ((volatile int)xpc_exiting) { |
1095 | break; | 1048 | break; |
1096 | } | 1049 | } |
1097 | 1050 | ||
1098 | dev_dbg(xpc_part, "checking nasid %d\n", nasid); | 1051 | dev_dbg(xpc_part, "checking nasid %d\n", nasid); |
1099 | 1052 | ||
1100 | |||
1101 | if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { | 1053 | if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { |
1102 | dev_dbg(xpc_part, "PROM indicates Nasid %d is " | 1054 | dev_dbg(xpc_part, "PROM indicates Nasid %d is " |
1103 | "part of the local partition; skipping " | 1055 | "part of the local partition; skipping " |
@@ -1119,11 +1071,10 @@ xpc_discovery(void) | |||
1119 | continue; | 1071 | continue; |
1120 | } | 1072 | } |
1121 | 1073 | ||
1122 | |||
1123 | /* pull over the reserved page structure */ | 1074 | /* pull over the reserved page structure */ |
1124 | 1075 | ||
1125 | ret = xpc_get_remote_rp(nasid, discovered_nasids, | 1076 | ret = xpc_get_remote_rp(nasid, discovered_nasids, |
1126 | remote_rp, &remote_rp_pa); | 1077 | remote_rp, &remote_rp_pa); |
1127 | if (ret != xpcSuccess) { | 1078 | if (ret != xpcSuccess) { |
1128 | dev_dbg(xpc_part, "unable to get reserved page " | 1079 | dev_dbg(xpc_part, "unable to get reserved page " |
1129 | "from nasid %d, reason=%d\n", nasid, | 1080 | "from nasid %d, reason=%d\n", nasid, |
@@ -1140,7 +1091,6 @@ xpc_discovery(void) | |||
1140 | partid = remote_rp->partid; | 1091 | partid = remote_rp->partid; |
1141 | part = &xpc_partitions[partid]; | 1092 | part = &xpc_partitions[partid]; |
1142 | 1093 | ||
1143 | |||
1144 | /* pull over the cross partition variables */ | 1094 | /* pull over the cross partition variables */ |
1145 | 1095 | ||
1146 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); | 1096 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); |
@@ -1171,15 +1121,15 @@ xpc_discovery(void) | |||
1171 | * get the same page for remote_act_amos_pa after | 1121 | * get the same page for remote_act_amos_pa after |
1172 | * module reloads and system reboots. | 1122 | * module reloads and system reboots. |
1173 | */ | 1123 | */ |
1174 | if (sn_register_xp_addr_region( | 1124 | if (sn_register_xp_addr_region |
1175 | remote_vars->amos_page_pa, | 1125 | (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) { |
1176 | PAGE_SIZE, 1) < 0) { | 1126 | dev_dbg(xpc_part, |
1177 | dev_dbg(xpc_part, "partition %d failed to " | 1127 | "partition %d failed to " |
1178 | "register xp_addr region 0x%016lx\n", | 1128 | "register xp_addr region 0x%016lx\n", |
1179 | partid, remote_vars->amos_page_pa); | 1129 | partid, remote_vars->amos_page_pa); |
1180 | 1130 | ||
1181 | XPC_SET_REASON(part, xpcPhysAddrRegFailed, | 1131 | XPC_SET_REASON(part, xpcPhysAddrRegFailed, |
1182 | __LINE__); | 1132 | __LINE__); |
1183 | break; | 1133 | break; |
1184 | } | 1134 | } |
1185 | 1135 | ||
@@ -1195,9 +1145,9 @@ xpc_discovery(void) | |||
1195 | remote_vars->act_phys_cpuid); | 1145 | remote_vars->act_phys_cpuid); |
1196 | 1146 | ||
1197 | if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> | 1147 | if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> |
1198 | version)) { | 1148 | version)) { |
1199 | part->remote_amos_page_pa = | 1149 | part->remote_amos_page_pa = |
1200 | remote_vars->amos_page_pa; | 1150 | remote_vars->amos_page_pa; |
1201 | xpc_mark_partition_disengaged(part); | 1151 | xpc_mark_partition_disengaged(part); |
1202 | xpc_cancel_partition_disengage_request(part); | 1152 | xpc_cancel_partition_disengage_request(part); |
1203 | } | 1153 | } |
@@ -1209,7 +1159,6 @@ xpc_discovery(void) | |||
1209 | kfree(remote_rp_base); | 1159 | kfree(remote_rp_base); |
1210 | } | 1160 | } |
1211 | 1161 | ||
1212 | |||
1213 | /* | 1162 | /* |
1214 | * Given a partid, get the nasids owned by that partition from the | 1163 | * Given a partid, get the nasids owned by that partition from the |
1215 | * remote partition's reserved page. | 1164 | * remote partition's reserved page. |
@@ -1221,7 +1170,6 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) | |||
1221 | u64 part_nasid_pa; | 1170 | u64 part_nasid_pa; |
1222 | int bte_res; | 1171 | int bte_res; |
1223 | 1172 | ||
1224 | |||
1225 | part = &xpc_partitions[partid]; | 1173 | part = &xpc_partitions[partid]; |
1226 | if (part->remote_rp_pa == 0) { | 1174 | if (part->remote_rp_pa == 0) { |
1227 | return xpcPartitionDown; | 1175 | return xpcPartitionDown; |
@@ -1229,11 +1177,11 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) | |||
1229 | 1177 | ||
1230 | memset(nasid_mask, 0, XP_NASID_MASK_BYTES); | 1178 | memset(nasid_mask, 0, XP_NASID_MASK_BYTES); |
1231 | 1179 | ||
1232 | part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); | 1180 | part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); |
1233 | 1181 | ||
1234 | bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, | 1182 | bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask, |
1235 | xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); | 1183 | xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), |
1184 | NULL); | ||
1236 | 1185 | ||
1237 | return xpc_map_bte_errors(bte_res); | 1186 | return xpc_map_bte_errors(bte_res); |
1238 | } | 1187 | } |
1239 | |||