aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp/xpc.h')
-rw-r--r--drivers/misc/sgi-xp/xpc.h1200
1 files changed, 517 insertions, 683 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 11ac267ed68f..619208d61862 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -13,18 +13,10 @@
13#ifndef _DRIVERS_MISC_SGIXP_XPC_H 13#ifndef _DRIVERS_MISC_SGIXP_XPC_H
14#define _DRIVERS_MISC_SGIXP_XPC_H 14#define _DRIVERS_MISC_SGIXP_XPC_H
15 15
16#include <linux/interrupt.h> 16#include <linux/wait.h>
17#include <linux/sysctl.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/completion.h> 17#include <linux/completion.h>
21#include <asm/pgtable.h> 18#include <linux/timer.h>
22#include <asm/processor.h> 19#include <linux/sched.h>
23#include <asm/sn/bte.h>
24#include <asm/sn/clksupport.h>
25#include <asm/sn/addrs.h>
26#include <asm/sn/mspec.h>
27#include <asm/sn/shub_mmr.h>
28#include "xp.h" 20#include "xp.h"
29 21
30/* 22/*
@@ -36,23 +28,7 @@
36#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) 28#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
37#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) 29#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
38 30
39/* 31/* define frequency of the heartbeat and frequency how often it's checked */
40 * The next macros define word or bit representations for given
41 * C-brick nasid in either the SAL provided bit array representing
42 * nasids in the partition/machine or the AMO_t array used for
43 * inter-partition initiation communications.
44 *
45 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As
46 * such, some space will be saved by insisting that nasid information
47 * passed from SAL always be packed for C-Bricks and the
48 * cross-partition interrupts use the same packing scheme.
49 */
50#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
51#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
52#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
53 (1UL << XPC_NASID_B_INDEX(_n)))
54#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
55
56#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ 32#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
57#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */ 33#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
58 34
@@ -72,11 +48,11 @@
72 * 48 *
73 * reserved page header 49 * reserved page header
74 * 50 *
75 * The first cacheline of the reserved page contains the header 51 * The first two 64-byte cachelines of the reserved page contain the
76 * (struct xpc_rsvd_page). Before SAL initialization has completed, 52 * header (struct xpc_rsvd_page). Before SAL initialization has completed,
77 * SAL has set up the following fields of the reserved page header: 53 * SAL has set up the following fields of the reserved page header:
78 * SAL_signature, SAL_version, partid, and nasids_size. The other 54 * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The
79 * fields are set up by XPC. (xpc_rsvd_page points to the local 55 * other fields are set up by XPC. (xpc_rsvd_page points to the local
80 * partition's reserved page.) 56 * partition's reserved page.)
81 * 57 *
82 * part_nasids mask 58 * part_nasids mask
@@ -87,14 +63,16 @@
87 * the actual nasids in the entire machine (mach_nasids). We're only 63 * the actual nasids in the entire machine (mach_nasids). We're only
88 * interested in the even numbered nasids (which contain the processors 64 * interested in the even numbered nasids (which contain the processors
89 * and/or memory), so we only need half as many bits to represent the 65 * and/or memory), so we only need half as many bits to represent the
90 * nasids. The part_nasids mask is located starting at the first cacheline 66 * nasids. When mapping nasid to bit in a mask (or bit to nasid) be sure
91 * following the reserved page header. The mach_nasids mask follows right 67 * to either divide or multiply by 2. The part_nasids mask is located
92 * after the part_nasids mask. The size in bytes of each mask is reflected 68 * starting at the first cacheline following the reserved page header. The
93 * by the reserved page header field 'nasids_size'. (Local partition's 69 * mach_nasids mask follows right after the part_nasids mask. The size in
94 * mask pointers are xpc_part_nasids and xpc_mach_nasids.) 70 * bytes of each mask is reflected by the reserved page header field
71 * 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids
72 * and xpc_mach_nasids.)
95 * 73 *
96 * vars 74 * vars (ia64-sn2 only)
97 * vars part 75 * vars part (ia64-sn2 only)
98 * 76 *
99 * Immediately following the mach_nasids mask are the XPC variables 77 * Immediately following the mach_nasids mask are the XPC variables
100 * required by other partitions. First are those that are generic to all 78 * required by other partitions. First are those that are generic to all
@@ -102,43 +80,26 @@
102 * which are partition specific (vars part). These are setup by XPC. 80 * which are partition specific (vars part). These are setup by XPC.
103 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.) 81 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
104 * 82 *
105 * Note: Until vars_pa is set, the partition XPC code has not been initialized. 83 * Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been
84 * initialized.
106 */ 85 */
107struct xpc_rsvd_page { 86struct xpc_rsvd_page {
108 u64 SAL_signature; /* SAL: unique signature */ 87 u64 SAL_signature; /* SAL: unique signature */
109 u64 SAL_version; /* SAL: version */ 88 u64 SAL_version; /* SAL: version */
110 u8 partid; /* SAL: partition ID */ 89 short SAL_partid; /* SAL: partition ID */
90 short max_npartitions; /* value of XPC_MAX_PARTITIONS */
111 u8 version; 91 u8 version;
112 u8 pad1[6]; /* align to next u64 in cacheline */ 92 u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
113 u64 vars_pa; /* physical address of struct xpc_vars */ 93 union {
114 struct timespec stamp; /* time when reserved page was setup by XPC */ 94 unsigned long vars_pa; /* phys address of struct xpc_vars */
115 u64 pad2[9]; /* align to last u64 in cacheline */ 95 unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */
116 u64 nasids_size; /* SAL: size of each nasid mask in bytes */ 96 } sn;
97 unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
98 u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */
99 u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
117}; 100};
118 101
119#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */ 102#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */
120
121#define XPC_SUPPORTS_RP_STAMP(_version) \
122 (_version >= _XPC_VERSION(1, 1))
123
124/*
125 * compare stamps - the return value is:
126 *
127 * < 0, if stamp1 < stamp2
128 * = 0, if stamp1 == stamp2
129 * > 0, if stamp1 > stamp2
130 */
131static inline int
132xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
133{
134 int ret;
135
136 ret = stamp1->tv_sec - stamp2->tv_sec;
137 if (ret == 0)
138 ret = stamp1->tv_nsec - stamp2->tv_nsec;
139
140 return ret;
141}
142 103
143/* 104/*
144 * Define the structures by which XPC variables can be exported to other 105 * Define the structures by which XPC variables can be exported to other
@@ -154,85 +115,40 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
154 * reflected by incrementing either the major or minor version numbers 115 * reflected by incrementing either the major or minor version numbers
155 * of struct xpc_vars. 116 * of struct xpc_vars.
156 */ 117 */
157struct xpc_vars { 118struct xpc_vars_sn2 {
158 u8 version; 119 u8 version;
159 u64 heartbeat; 120 u64 heartbeat;
160 u64 heartbeating_to_mask; 121 DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
161 u64 heartbeat_offline; /* if 0, heartbeat should be changing */ 122 u64 heartbeat_offline; /* if 0, heartbeat should be changing */
162 int act_nasid; 123 int activate_IRQ_nasid;
163 int act_phys_cpuid; 124 int activate_IRQ_phys_cpuid;
164 u64 vars_part_pa; 125 unsigned long vars_part_pa;
165 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 126 unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */
166 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 127 struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
167}; 128};
168 129
169#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ 130#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
170 131
171#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
172 (_version >= _XPC_VERSION(3, 1))
173
174static inline int
175xpc_hb_allowed(short partid, struct xpc_vars *vars)
176{
177 return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
178}
179
180static inline void
181xpc_allow_hb(short partid, struct xpc_vars *vars)
182{
183 u64 old_mask, new_mask;
184
185 do {
186 old_mask = vars->heartbeating_to_mask;
187 new_mask = (old_mask | (1UL << partid));
188 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
189 old_mask);
190}
191
192static inline void
193xpc_disallow_hb(short partid, struct xpc_vars *vars)
194{
195 u64 old_mask, new_mask;
196
197 do {
198 old_mask = vars->heartbeating_to_mask;
199 new_mask = (old_mask & ~(1UL << partid));
200 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
201 old_mask);
202}
203
204/*
205 * The AMOs page consists of a number of AMO variables which are divided into
206 * four groups, The first two groups are used to identify an IRQ's sender.
207 * These two groups consist of 64 and 128 AMO variables respectively. The last
208 * two groups, consisting of just one AMO variable each, are used to identify
209 * the remote partitions that are currently engaged (from the viewpoint of
210 * the XPC running on the remote partition).
211 */
212#define XPC_NOTIFY_IRQ_AMOS 0
213#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
214#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
215#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
216
217/* 132/*
218 * The following structure describes the per partition specific variables. 133 * The following structure describes the per partition specific variables.
219 * 134 *
220 * An array of these structures, one per partition, will be defined. As a 135 * An array of these structures, one per partition, will be defined. As a
221 * partition becomes active XPC will copy the array entry corresponding to 136 * partition becomes active XPC will copy the array entry corresponding to
222 * itself from that partition. It is desirable that the size of this 137 * itself from that partition. It is desirable that the size of this structure
223 * structure evenly divide into a cacheline, such that none of the entries 138 * evenly divides into a 128-byte cacheline, such that none of the entries in
224 * in this array crosses a cacheline boundary. As it is now, each entry 139 * this array crosses a 128-byte cacheline boundary. As it is now, each entry
225 * occupies half a cacheline. 140 * occupies 64-bytes.
226 */ 141 */
227struct xpc_vars_part { 142struct xpc_vars_part_sn2 {
228 u64 magic; 143 u64 magic;
229 144
230 u64 openclose_args_pa; /* physical address of open and close args */ 145 unsigned long openclose_args_pa; /* phys addr of open and close args */
231 u64 GPs_pa; /* physical address of Get/Put values */ 146 unsigned long GPs_pa; /* physical address of Get/Put values */
147
148 unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */
232 149
233 u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ 150 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
234 int IPI_nasid; /* nasid of where to send IPIs */ 151 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
235 int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
236 152
237 u8 nchannels; /* #of defined channels supported */ 153 u8 nchannels; /* #of defined channels supported */
238 154
@@ -248,20 +164,95 @@ struct xpc_vars_part {
248 * MAGIC2 indicates that this partition has pulled the remote partititions 164 * MAGIC2 indicates that this partition has pulled the remote partititions
249 * per partition variables that pertain to this partition. 165 * per partition variables that pertain to this partition.
250 */ 166 */
251#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 167#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
252#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 168#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
253 169
254/* the reserved page sizes and offsets */ 170/* the reserved page sizes and offsets */
255 171
256#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) 172#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
257#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) 173#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2))
258 174
259#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) 175#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \
260#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) 176 XPC_RP_HEADER_SIZE))
261#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \ 177#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
262 xp_nasid_mask_words)) 178 xpc_nasid_mask_nlongs)
263#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \ 179#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \
264 ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE)) 180 (XPC_RP_MACH_NASIDS(_rp) + \
181 xpc_nasid_mask_nlongs))
182
183/*
184 * The activate_mq is used to send/receive GRU messages that affect XPC's
185 * heartbeat, partition active state, and channel state. This is UV only.
186 */
187struct xpc_activate_mq_msghdr_uv {
188 short partid; /* sender's partid */
189 u8 act_state; /* sender's act_state at time msg sent */
190 u8 type; /* message's type */
191 unsigned long rp_ts_jiffies; /* timestamp of sender's rp setup by XPC */
192};
193
194/* activate_mq defined message types */
195#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0
196#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1
197#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2
198#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3
199
200#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4
201#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5
202
203#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6
204#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7
205#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8
206#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9
207
208#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10
209#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11
210
211struct xpc_activate_mq_msg_uv {
212 struct xpc_activate_mq_msghdr_uv hdr;
213};
214
215struct xpc_activate_mq_msg_heartbeat_req_uv {
216 struct xpc_activate_mq_msghdr_uv hdr;
217 u64 heartbeat;
218};
219
220struct xpc_activate_mq_msg_activate_req_uv {
221 struct xpc_activate_mq_msghdr_uv hdr;
222 unsigned long rp_gpa;
223 unsigned long activate_mq_gpa;
224};
225
226struct xpc_activate_mq_msg_deactivate_req_uv {
227 struct xpc_activate_mq_msghdr_uv hdr;
228 enum xp_retval reason;
229};
230
231struct xpc_activate_mq_msg_chctl_closerequest_uv {
232 struct xpc_activate_mq_msghdr_uv hdr;
233 short ch_number;
234 enum xp_retval reason;
235};
236
237struct xpc_activate_mq_msg_chctl_closereply_uv {
238 struct xpc_activate_mq_msghdr_uv hdr;
239 short ch_number;
240};
241
242struct xpc_activate_mq_msg_chctl_openrequest_uv {
243 struct xpc_activate_mq_msghdr_uv hdr;
244 short ch_number;
245 short entry_size; /* size of notify_mq's GRU messages */
246 short local_nentries; /* ??? Is this needed? What is? */
247};
248
249struct xpc_activate_mq_msg_chctl_openreply_uv {
250 struct xpc_activate_mq_msghdr_uv hdr;
251 short ch_number;
252 short remote_nentries; /* ??? Is this needed? What is? */
253 short local_nentries; /* ??? Is this needed? What is? */
254 unsigned long local_notify_mq_gpa;
255};
265 256
266/* 257/*
267 * Functions registered by add_timer() or called by kernel_thread() only 258 * Functions registered by add_timer() or called by kernel_thread() only
@@ -270,22 +261,22 @@ struct xpc_vars_part {
270 * the passed argument. 261 * the passed argument.
271 */ 262 */
272#define XPC_PACK_ARGS(_arg1, _arg2) \ 263#define XPC_PACK_ARGS(_arg1, _arg2) \
273 ((((u64) _arg1) & 0xffffffff) | \ 264 ((((u64)_arg1) & 0xffffffff) | \
274 ((((u64) _arg2) & 0xffffffff) << 32)) 265 ((((u64)_arg2) & 0xffffffff) << 32))
275 266
276#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) 267#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff)
277#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) 268#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff)
278 269
279/* 270/*
280 * Define a Get/Put value pair (pointers) used with a message queue. 271 * Define a Get/Put value pair (pointers) used with a message queue.
281 */ 272 */
282struct xpc_gp { 273struct xpc_gp_sn2 {
283 s64 get; /* Get value */ 274 s64 get; /* Get value */
284 s64 put; /* Put value */ 275 s64 put; /* Put value */
285}; 276};
286 277
287#define XPC_GP_SIZE \ 278#define XPC_GP_SIZE \
288 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) 279 L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS)
289 280
290/* 281/*
291 * Define a structure that contains arguments associated with opening and 282 * Define a structure that contains arguments associated with opening and
@@ -293,31 +284,89 @@ struct xpc_gp {
293 */ 284 */
294struct xpc_openclose_args { 285struct xpc_openclose_args {
295 u16 reason; /* reason why channel is closing */ 286 u16 reason; /* reason why channel is closing */
296 u16 msg_size; /* sizeof each message entry */ 287 u16 entry_size; /* sizeof each message entry */
297 u16 remote_nentries; /* #of message entries in remote msg queue */ 288 u16 remote_nentries; /* #of message entries in remote msg queue */
298 u16 local_nentries; /* #of message entries in local msg queue */ 289 u16 local_nentries; /* #of message entries in local msg queue */
299 u64 local_msgqueue_pa; /* physical address of local message queue */ 290 unsigned long local_msgqueue_pa; /* phys addr of local message queue */
300}; 291};
301 292
302#define XPC_OPENCLOSE_ARGS_SIZE \ 293#define XPC_OPENCLOSE_ARGS_SIZE \
303 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) 294 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \
295 XPC_MAX_NCHANNELS)
304 296
305/* struct xpc_msg flags */
306 297
307#define XPC_M_DONE 0x01 /* msg has been received/consumed */ 298/*
308#define XPC_M_READY 0x02 /* msg is ready to be sent */ 299 * Structures to define a fifo singly-linked list.
309#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ 300 */
310 301
311#define XPC_MSG_ADDRESS(_payload) \ 302struct xpc_fifo_entry_uv {
312 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) 303 struct xpc_fifo_entry_uv *next;
304};
305
306struct xpc_fifo_head_uv {
307 struct xpc_fifo_entry_uv *first;
308 struct xpc_fifo_entry_uv *last;
309 spinlock_t lock;
310 int n_entries;
311};
313 312
314/* 313/*
315 * Defines notify entry. 314 * Define a sn2 styled message.
315 *
316 * A user-defined message resides in the payload area. The max size of the
317 * payload is defined by the user via xpc_connect().
318 *
319 * The size of a message entry (within a message queue) must be a 128-byte
320 * cacheline sized multiple in order to facilitate the BTE transfer of messages
321 * from one message queue to another.
322 */
323struct xpc_msg_sn2 {
324 u8 flags; /* FOR XPC INTERNAL USE ONLY */
325 u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
326 s64 number; /* FOR XPC INTERNAL USE ONLY */
327
328 u64 payload; /* user defined portion of message */
329};
330
331/* struct xpc_msg_sn2 flags */
332
333#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */
334#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */
335#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */
336
337/*
338 * The format of a uv XPC notify_mq GRU message is as follows:
339 *
340 * A user-defined message resides in the payload area. The max size of the
341 * payload is defined by the user via xpc_connect().
342 *
343 * The size of a message (payload and header) sent via the GRU must be either 1
344 * or 2 GRU_CACHE_LINE_BYTES in length.
345 */
346
347struct xpc_notify_mq_msghdr_uv {
348 union {
349 unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
350 struct xpc_fifo_entry_uv next; /* FOR XPC INTERNAL USE ONLY */
351 } u;
352 short partid; /* FOR XPC INTERNAL USE ONLY */
353 u8 ch_number; /* FOR XPC INTERNAL USE ONLY */
354 u8 size; /* FOR XPC INTERNAL USE ONLY */
355 unsigned int msg_slot_number; /* FOR XPC INTERNAL USE ONLY */
356};
357
358struct xpc_notify_mq_msg_uv {
359 struct xpc_notify_mq_msghdr_uv hdr;
360 unsigned long payload;
361};
362
363/*
364 * Define sn2's notify entry.
316 * 365 *
317 * This is used to notify a message's sender that their message was received 366 * This is used to notify a message's sender that their message was received
318 * and consumed by the intended recipient. 367 * and consumed by the intended recipient.
319 */ 368 */
320struct xpc_notify { 369struct xpc_notify_sn2 {
321 u8 type; /* type of notification */ 370 u8 type; /* type of notification */
322 371
323 /* the following two fields are only used if type == XPC_N_CALL */ 372 /* the following two fields are only used if type == XPC_N_CALL */
@@ -325,9 +374,20 @@ struct xpc_notify {
325 void *key; /* pointer to user's key */ 374 void *key; /* pointer to user's key */
326}; 375};
327 376
328/* struct xpc_notify type of notification */ 377/* struct xpc_notify_sn2 type of notification */
378
379#define XPC_N_CALL 0x01 /* notify function provided by user */
329 380
330#define XPC_N_CALL 0x01 /* notify function provided by user */ 381/*
382 * Define uv's version of the notify entry. It additionally is used to allocate
383 * a msg slot on the remote partition into which is copied a sent message.
384 */
385struct xpc_send_msg_slot_uv {
386 struct xpc_fifo_entry_uv next;
387 unsigned int msg_slot_number;
388 xpc_notify_func func; /* user's notify function */
389 void *key; /* pointer to user's key */
390};
331 391
332/* 392/*
333 * Define the structure that manages all the stuff required by a channel. In 393 * Define the structure that manages all the stuff required by a channel. In
@@ -339,8 +399,12 @@ struct xpc_notify {
339 * There is an array of these structures for each remote partition. It is 399 * There is an array of these structures for each remote partition. It is
340 * allocated at the time a partition becomes active. The array contains one 400 * allocated at the time a partition becomes active. The array contains one
341 * of these structures for each potential channel connection to that partition. 401 * of these structures for each potential channel connection to that partition.
402 */
403
404/*
405 * The following is sn2 only.
342 * 406 *
343 * Each of these structures manages two message queues (circular buffers). 407 * Each channel structure manages two message queues (circular buffers).
344 * They are allocated at the time a channel connection is made. One of 408 * They are allocated at the time a channel connection is made. One of
345 * these message queues (local_msgqueue) holds the locally created messages 409 * these message queues (local_msgqueue) holds the locally created messages
346 * that are destined for the remote partition. The other of these message 410 * that are destined for the remote partition. The other of these message
@@ -407,58 +471,72 @@ struct xpc_notify {
407 * new messages, by the clearing of the message flags of the acknowledged 471 * new messages, by the clearing of the message flags of the acknowledged
408 * messages. 472 * messages.
409 */ 473 */
474
475struct xpc_channel_sn2 {
476 struct xpc_openclose_args *local_openclose_args; /* args passed on */
477 /* opening or closing of channel */
478
479 void *local_msgqueue_base; /* base address of kmalloc'd space */
480 struct xpc_msg_sn2 *local_msgqueue; /* local message queue */
481 void *remote_msgqueue_base; /* base address of kmalloc'd space */
482 struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */
483 /* partition's local message queue */
484 unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */
485 /* local message queue */
486
487 struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */
488
489 /* various flavors of local and remote Get/Put values */
490
491 struct xpc_gp_sn2 *local_GP; /* local Get/Put values */
492 struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */
493 struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */
494 struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */
495 s64 next_msg_to_pull; /* Put value of next msg to pull */
496
497 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
498};
499
500struct xpc_channel_uv {
501 unsigned long remote_notify_mq_gpa; /* gru phys address of remote */
502 /* partition's notify mq */
503
504 struct xpc_send_msg_slot_uv *send_msg_slots;
505 struct xpc_notify_mq_msg_uv *recv_msg_slots;
506
507 struct xpc_fifo_head_uv msg_slot_free_list;
508 struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */
509};
510
410struct xpc_channel { 511struct xpc_channel {
411 short partid; /* ID of remote partition connected */ 512 short partid; /* ID of remote partition connected */
412 spinlock_t lock; /* lock for updating this structure */ 513 spinlock_t lock; /* lock for updating this structure */
413 u32 flags; /* general flags */ 514 unsigned int flags; /* general flags */
414 515
415 enum xp_retval reason; /* reason why channel is disconnect'g */ 516 enum xp_retval reason; /* reason why channel is disconnect'g */
416 int reason_line; /* line# disconnect initiated from */ 517 int reason_line; /* line# disconnect initiated from */
417 518
418 u16 number; /* channel # */ 519 u16 number; /* channel # */
419 520
420 u16 msg_size; /* sizeof each msg entry */ 521 u16 entry_size; /* sizeof each msg entry */
421 u16 local_nentries; /* #of msg entries in local msg queue */ 522 u16 local_nentries; /* #of msg entries in local msg queue */
422 u16 remote_nentries; /* #of msg entries in remote msg queue */ 523 u16 remote_nentries; /* #of msg entries in remote msg queue */
423 524
424 void *local_msgqueue_base; /* base address of kmalloc'd space */
425 struct xpc_msg *local_msgqueue; /* local message queue */
426 void *remote_msgqueue_base; /* base address of kmalloc'd space */
427 struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
428 /* local message queue */
429 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
430 /* local message queue */
431
432 atomic_t references; /* #of external references to queues */ 525 atomic_t references; /* #of external references to queues */
433 526
434 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 527 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
435 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 528 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
436 529
437 u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 530 u8 delayed_chctl_flags; /* chctl flags received, but delayed */
438 /* action until channel disconnected */ 531 /* action until channel disconnected */
439 532
440 /* queue of msg senders who want to be notified when msg received */
441
442 atomic_t n_to_notify; /* #of msg senders to notify */ 533 atomic_t n_to_notify; /* #of msg senders to notify */
443 struct xpc_notify *notify_queue; /* notify queue for messages sent */
444 534
445 xpc_channel_func func; /* user's channel function */ 535 xpc_channel_func func; /* user's channel function */
446 void *key; /* pointer to user's key */ 536 void *key; /* pointer to user's key */
447 537
448 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
449 struct completion wdisconnect_wait; /* wait for channel disconnect */ 538 struct completion wdisconnect_wait; /* wait for channel disconnect */
450 539
451 struct xpc_openclose_args *local_openclose_args; /* args passed on */
452 /* opening or closing of channel */
453
454 /* various flavors of local and remote Get/Put values */
455
456 struct xpc_gp *local_GP; /* local Get/Put values */
457 struct xpc_gp remote_GP; /* remote Get/Put values */
458 struct xpc_gp w_local_GP; /* working local Get/Put values */
459 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
460 s64 next_msg_to_pull; /* Put value of next msg to pull */
461
462 /* kthread management related fields */ 540 /* kthread management related fields */
463 541
464 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 542 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
@@ -469,6 +547,11 @@ struct xpc_channel {
469 547
470 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 548 wait_queue_head_t idle_wq; /* idle kthread wait queue */
471 549
550 union {
551 struct xpc_channel_sn2 sn2;
552 struct xpc_channel_uv uv;
553 } sn;
554
472} ____cacheline_aligned; 555} ____cacheline_aligned;
473 556
474/* struct xpc_channel flags */ 557/* struct xpc_channel flags */
@@ -501,33 +584,128 @@ struct xpc_channel {
501#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 584#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
502 585
503/* 586/*
504 * Manages channels on a partition basis. There is one of these structures 587 * The channel control flags (chctl) union consists of a 64-bit variable which
588 * is divided up into eight bytes, ordered from right to left. Byte zero
589 * pertains to channel 0, byte one to channel 1, and so on. Each channel's byte
590 * can have one or more of the chctl flags set in it.
591 */
592
593union xpc_channel_ctl_flags {
594 u64 all_flags;
595 u8 flags[XPC_MAX_NCHANNELS];
596};
597
598/* chctl flags */
599#define XPC_CHCTL_CLOSEREQUEST 0x01
600#define XPC_CHCTL_CLOSEREPLY 0x02
601#define XPC_CHCTL_OPENREQUEST 0x04
602#define XPC_CHCTL_OPENREPLY 0x08
603#define XPC_CHCTL_MSGREQUEST 0x10
604
605#define XPC_OPENCLOSE_CHCTL_FLAGS \
606 (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \
607 XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY)
608#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST
609
610static inline int
611xpc_any_openclose_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
612{
613 int ch_number;
614
615 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
616 if (chctl->flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS)
617 return 1;
618 }
619 return 0;
620}
621
622static inline int
623xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
624{
625 int ch_number;
626
627 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
628 if (chctl->flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
629 return 1;
630 }
631 return 0;
632}
633
634/*
635 * Manage channels on a partition basis. There is one of these structures
505 * for each partition (a partition will never utilize the structure that 636 * for each partition (a partition will never utilize the structure that
506 * represents itself). 637 * represents itself).
507 */ 638 */
639
640struct xpc_partition_sn2 {
641 unsigned long remote_amos_page_pa; /* paddr of partition's amos page */
642 int activate_IRQ_nasid; /* active partition's act/deact nasid */
643 int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
644
645 unsigned long remote_vars_pa; /* phys addr of partition's vars */
646 unsigned long remote_vars_part_pa; /* paddr of partition's vars part */
647 u8 remote_vars_version; /* version# of partition's vars */
648
649 void *local_GPs_base; /* base address of kmalloc'd space */
650 struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */
651 void *remote_GPs_base; /* base address of kmalloc'd space */
652 struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */
653 /* Get/Put values */
654 unsigned long remote_GPs_pa; /* phys addr of remote partition's local */
655 /* Get/Put values */
656
657 void *local_openclose_args_base; /* base address of kmalloc'd space */
658 struct xpc_openclose_args *local_openclose_args; /* local's args */
659 unsigned long remote_openclose_args_pa; /* phys addr of remote's args */
660
661 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
662 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
663 char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
664
665 struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */
666 struct amo *local_chctl_amo_va; /* address of chctl flags' amo */
667
668 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
669};
670
671struct xpc_partition_uv {
672 unsigned long remote_activate_mq_gpa; /* gru phys address of remote */
673 /* partition's activate mq */
674 spinlock_t flags_lock; /* protect updating of flags */
675 unsigned int flags; /* general flags */
676 u8 remote_act_state; /* remote partition's act_state */
677 u8 act_state_req; /* act_state request from remote partition */
678 enum xp_retval reason; /* reason for deactivate act_state request */
679 u64 heartbeat; /* incremented by remote partition */
680};
681
682/* struct xpc_partition_uv flags */
683
684#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001
685#define XPC_P_ENGAGED_UV 0x00000002
686
687/* struct xpc_partition_uv act_state change requests */
688
689#define XPC_P_ASR_ACTIVATE_UV 0x01
690#define XPC_P_ASR_REACTIVATE_UV 0x02
691#define XPC_P_ASR_DEACTIVATE_UV 0x03
692
508struct xpc_partition { 693struct xpc_partition {
509 694
510 /* XPC HB infrastructure */ 695 /* XPC HB infrastructure */
511 696
512 u8 remote_rp_version; /* version# of partition's rsvd pg */ 697 u8 remote_rp_version; /* version# of partition's rsvd pg */
513 struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */ 698 unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */
514 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 699 unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */
515 u64 remote_vars_pa; /* phys addr of partition's vars */
516 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
517 u64 last_heartbeat; /* HB at last read */ 700 u64 last_heartbeat; /* HB at last read */
518 u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 701 u32 activate_IRQ_rcvd; /* IRQs since activation */
519 int remote_act_nasid; /* active part's act/deact nasid */
520 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
521 u32 act_IRQ_rcvd; /* IRQs since activation */
522 spinlock_t act_lock; /* protect updating of act_state */ 702 spinlock_t act_lock; /* protect updating of act_state */
523 u8 act_state; /* from XPC HB viewpoint */ 703 u8 act_state; /* from XPC HB viewpoint */
524 u8 remote_vars_version; /* version# of partition's vars */
525 enum xp_retval reason; /* reason partition is deactivating */ 704 enum xp_retval reason; /* reason partition is deactivating */
526 int reason_line; /* line# deactivation initiated from */ 705 int reason_line; /* line# deactivation initiated from */
527 int reactivate_nasid; /* nasid in partition to reactivate */
528 706
529 unsigned long disengage_request_timeout; /* timeout in jiffies */ 707 unsigned long disengage_timeout; /* timeout in jiffies */
530 struct timer_list disengage_request_timer; 708 struct timer_list disengage_timer;
531 709
532 /* XPC infrastructure referencing and teardown control */ 710 /* XPC infrastructure referencing and teardown control */
533 711
@@ -535,85 +713,63 @@ struct xpc_partition {
535 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 713 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
536 atomic_t references; /* #of references to infrastructure */ 714 atomic_t references; /* #of references to infrastructure */
537 715
538 /*
539 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
540 * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
541 * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
542 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
543 */
544
545 u8 nchannels; /* #of defined channels supported */ 716 u8 nchannels; /* #of defined channels supported */
546 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 717 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
547 atomic_t nchannels_engaged; /* #of channels engaged with remote part */ 718 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
548 struct xpc_channel *channels; /* array of channel structures */ 719 struct xpc_channel *channels; /* array of channel structures */
549 720
550 void *local_GPs_base; /* base address of kmalloc'd space */ 721 /* fields used for managing channel avialability and activity */
551 struct xpc_gp *local_GPs; /* local Get/Put values */
552 void *remote_GPs_base; /* base address of kmalloc'd space */
553 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
554 /* Get/Put values */
555 u64 remote_GPs_pa; /* phys address of remote partition's local */
556 /* Get/Put values */
557 722
558 /* fields used to pass args when opening or closing a channel */ 723 union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */
724 spinlock_t chctl_lock; /* chctl flags lock */
559 725
560 void *local_openclose_args_base; /* base address of kmalloc'd space */
561 struct xpc_openclose_args *local_openclose_args; /* local's args */
562 void *remote_openclose_args_base; /* base address of kmalloc'd space */ 726 void *remote_openclose_args_base; /* base address of kmalloc'd space */
563 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 727 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
564 /* args */ 728 /* args */
565 u64 remote_openclose_args_pa; /* phys addr of remote's args */
566
567 /* IPI sending, receiving and handling related fields */
568
569 int remote_IPI_nasid; /* nasid of where to send IPIs */
570 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
571 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
572
573 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
574 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
575 char IPI_owner[8]; /* IPI owner's name */
576 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
577
578 spinlock_t IPI_lock; /* IPI handler lock */
579 729
580 /* channel manager related fields */ 730 /* channel manager related fields */
581 731
582 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 732 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
583 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 733 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
584 734
735 union {
736 struct xpc_partition_sn2 sn2;
737 struct xpc_partition_uv uv;
738 } sn;
739
585} ____cacheline_aligned; 740} ____cacheline_aligned;
586 741
587/* struct xpc_partition act_state values (for XPC HB) */ 742/* struct xpc_partition act_state values (for XPC HB) */
588 743
589#define XPC_P_INACTIVE 0x00 /* partition is not active */ 744#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
590#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ 745#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */
591#define XPC_P_ACTIVATING 0x02 /* activation thread started */ 746#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */
592#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ 747#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */
593#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ 748#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */
594 749
595#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ 750#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
596 xpc_deactivate_partition(__LINE__, (_p), (_reason)) 751 xpc_deactivate_partition(__LINE__, (_p), (_reason))
597 752
598/* struct xpc_partition setup_state values */ 753/* struct xpc_partition setup_state values */
599 754
600#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ 755#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */
601#define XPC_P_SETUP 0x01 /* infrastructure is setup */ 756#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */
602#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ 757#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
603#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 758#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
604 759
605/* 760/*
606 * struct xpc_partition IPI_timer #of seconds to wait before checking for 761 * struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
607 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 762 * following interval #of seconds before checking for dropped notify IRQs.
608 * after the IPI was received. 763 * These can occur whenever an IRQ's associated amo write doesn't complete
764 * until after the IRQ was received.
609 */ 765 */
610#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 766#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ)
611 767
612/* number of seconds to wait for other partitions to disengage */ 768/* number of seconds to wait for other partitions to disengage */
613#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 769#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
614 770
615/* interval in seconds to print 'waiting disengagement' messages */ 771/* interval in seconds to print 'waiting deactivation' messages */
616#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 772#define XPC_DEACTIVATE_PRINTMSG_INTERVAL 10
617 773
618#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0])) 774#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0]))
619 775
@@ -623,33 +779,92 @@ extern struct xpc_registration xpc_registrations[];
623/* found in xpc_main.c */ 779/* found in xpc_main.c */
624extern struct device *xpc_part; 780extern struct device *xpc_part;
625extern struct device *xpc_chan; 781extern struct device *xpc_chan;
626extern int xpc_disengage_request_timelimit; 782extern int xpc_disengage_timelimit;
627extern int xpc_disengage_request_timedout; 783extern int xpc_disengage_timedout;
628extern irqreturn_t xpc_notify_IRQ_handler(int, void *); 784extern int xpc_activate_IRQ_rcvd;
629extern void xpc_dropped_IPI_check(struct xpc_partition *); 785extern spinlock_t xpc_activate_IRQ_rcvd_lock;
786extern wait_queue_head_t xpc_activate_IRQ_wq;
787extern void *xpc_heartbeating_to_mask;
788extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
630extern void xpc_activate_partition(struct xpc_partition *); 789extern void xpc_activate_partition(struct xpc_partition *);
631extern void xpc_activate_kthreads(struct xpc_channel *, int); 790extern void xpc_activate_kthreads(struct xpc_channel *, int);
632extern void xpc_create_kthreads(struct xpc_channel *, int, int); 791extern void xpc_create_kthreads(struct xpc_channel *, int, int);
633extern void xpc_disconnect_wait(int); 792extern void xpc_disconnect_wait(int);
793extern int (*xpc_setup_partitions_sn) (void);
794extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
795 unsigned long *,
796 size_t *);
797extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *);
798extern void (*xpc_heartbeat_init) (void);
799extern void (*xpc_heartbeat_exit) (void);
800extern void (*xpc_increment_heartbeat) (void);
801extern void (*xpc_offline_heartbeat) (void);
802extern void (*xpc_online_heartbeat) (void);
803extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *);
804extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
805extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *);
806extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *);
807extern void (*xpc_teardown_msg_structures) (struct xpc_channel *);
808extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
809extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int);
810extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *);
811extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *);
812extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *,
813 unsigned long, int);
814extern void (*xpc_request_partition_reactivation) (struct xpc_partition *);
815extern void (*xpc_request_partition_deactivation) (struct xpc_partition *);
816extern void (*xpc_cancel_partition_deactivation_request) (
817 struct xpc_partition *);
818extern void (*xpc_process_activate_IRQ_rcvd) (void);
819extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *);
820extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *);
821
822extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *);
823extern int (*xpc_partition_engaged) (short);
824extern int (*xpc_any_partition_engaged) (void);
825extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *);
826extern void (*xpc_assume_partition_disengaged) (short);
827
828extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *,
829 unsigned long *);
830extern void (*xpc_send_chctl_closereply) (struct xpc_channel *,
831 unsigned long *);
832extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
833 unsigned long *);
834extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);
835
836extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
837 unsigned long);
838
839extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *,
840 u16, u8, xpc_notify_func, void *);
841extern void (*xpc_received_payload) (struct xpc_channel *, void *);
842
843/* found in xpc_sn2.c */
844extern int xpc_init_sn2(void);
845extern void xpc_exit_sn2(void);
846
847/* found in xpc_uv.c */
848extern int xpc_init_uv(void);
849extern void xpc_exit_uv(void);
634 850
635/* found in xpc_partition.c */ 851/* found in xpc_partition.c */
636extern int xpc_exiting; 852extern int xpc_exiting;
637extern struct xpc_vars *xpc_vars; 853extern int xpc_nasid_mask_nlongs;
638extern struct xpc_rsvd_page *xpc_rsvd_page; 854extern struct xpc_rsvd_page *xpc_rsvd_page;
639extern struct xpc_vars_part *xpc_vars_part; 855extern unsigned long *xpc_mach_nasids;
640extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 856extern struct xpc_partition *xpc_partitions;
641extern char *xpc_remote_copy_buffer;
642extern void *xpc_remote_copy_buffer_base;
643extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); 857extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
644extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); 858extern int xpc_setup_rsvd_page(void);
645extern void xpc_allow_IPI_ops(void); 859extern void xpc_teardown_rsvd_page(void);
646extern void xpc_restrict_IPI_ops(void); 860extern int xpc_identify_activate_IRQ_sender(void);
647extern int xpc_identify_act_IRQ_sender(void);
648extern int xpc_partition_disengaged(struct xpc_partition *); 861extern int xpc_partition_disengaged(struct xpc_partition *);
649extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); 862extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
650extern void xpc_mark_partition_inactive(struct xpc_partition *); 863extern void xpc_mark_partition_inactive(struct xpc_partition *);
651extern void xpc_discovery(void); 864extern void xpc_discovery(void);
652extern void xpc_check_remote_hb(void); 865extern enum xp_retval xpc_get_remote_rp(int, unsigned long *,
866 struct xpc_rsvd_page *,
867 unsigned long *);
653extern void xpc_deactivate_partition(const int, struct xpc_partition *, 868extern void xpc_deactivate_partition(const int, struct xpc_partition *,
654 enum xp_retval); 869 enum xp_retval);
655extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); 870extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
@@ -657,21 +872,52 @@ extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
657/* found in xpc_channel.c */ 872/* found in xpc_channel.c */
658extern void xpc_initiate_connect(int); 873extern void xpc_initiate_connect(int);
659extern void xpc_initiate_disconnect(int); 874extern void xpc_initiate_disconnect(int);
660extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); 875extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *);
661extern enum xp_retval xpc_initiate_send(short, int, void *); 876extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16);
662extern enum xp_retval xpc_initiate_send_notify(short, int, void *, 877extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16,
663 xpc_notify_func, void *); 878 xpc_notify_func, void *);
664extern void xpc_initiate_received(short, int, void *); 879extern void xpc_initiate_received(short, int, void *);
665extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *); 880extern void xpc_process_sent_chctl_flags(struct xpc_partition *);
666extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
667extern void xpc_process_channel_activity(struct xpc_partition *);
668extern void xpc_connected_callout(struct xpc_channel *); 881extern void xpc_connected_callout(struct xpc_channel *);
669extern void xpc_deliver_msg(struct xpc_channel *); 882extern void xpc_deliver_payload(struct xpc_channel *);
670extern void xpc_disconnect_channel(const int, struct xpc_channel *, 883extern void xpc_disconnect_channel(const int, struct xpc_channel *,
671 enum xp_retval, unsigned long *); 884 enum xp_retval, unsigned long *);
672extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); 885extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
673extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); 886extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
674extern void xpc_teardown_infrastructure(struct xpc_partition *); 887
888static inline int
889xpc_hb_allowed(short partid, void *heartbeating_to_mask)
890{
891 return test_bit(partid, heartbeating_to_mask);
892}
893
894static inline int
895xpc_any_hbs_allowed(void)
896{
897 DBUG_ON(xpc_heartbeating_to_mask == NULL);
898 return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions);
899}
900
901static inline void
902xpc_allow_hb(short partid)
903{
904 DBUG_ON(xpc_heartbeating_to_mask == NULL);
905 set_bit(partid, xpc_heartbeating_to_mask);
906}
907
908static inline void
909xpc_disallow_hb(short partid)
910{
911 DBUG_ON(xpc_heartbeating_to_mask == NULL);
912 clear_bit(partid, xpc_heartbeating_to_mask);
913}
914
915static inline void
916xpc_disallow_all_hbs(void)
917{
918 DBUG_ON(xpc_heartbeating_to_mask == NULL);
919 bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions);
920}
675 921
676static inline void 922static inline void
677xpc_wakeup_channel_mgr(struct xpc_partition *part) 923xpc_wakeup_channel_mgr(struct xpc_partition *part)
@@ -713,7 +959,7 @@ xpc_part_deref(struct xpc_partition *part)
713 s32 refs = atomic_dec_return(&part->references); 959 s32 refs = atomic_dec_return(&part->references);
714 960
715 DBUG_ON(refs < 0); 961 DBUG_ON(refs < 0);
716 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) 962 if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN)
717 wake_up(&part->teardown_wq); 963 wake_up(&part->teardown_wq);
718} 964}
719 965
@@ -723,7 +969,7 @@ xpc_part_ref(struct xpc_partition *part)
723 int setup; 969 int setup;
724 970
725 atomic_inc(&part->references); 971 atomic_inc(&part->references);
726 setup = (part->setup_state == XPC_P_SETUP); 972 setup = (part->setup_state == XPC_P_SS_SETUP);
727 if (!setup) 973 if (!setup)
728 xpc_part_deref(part); 974 xpc_part_deref(part);
729 975
@@ -741,416 +987,4 @@ xpc_part_ref(struct xpc_partition *part)
741 (_p)->reason_line = _line; \ 987 (_p)->reason_line = _line; \
742 } 988 }
743 989
744/*
745 * This next set of inlines are used to keep track of when a partition is
746 * potentially engaged in accessing memory belonging to another partition.
747 */
748
749static inline void
750xpc_mark_partition_engaged(struct xpc_partition *part)
751{
752 unsigned long irq_flags;
753 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
754 (XPC_ENGAGED_PARTITIONS_AMO *
755 sizeof(AMO_t)));
756
757 local_irq_save(irq_flags);
758
759 /* set bit corresponding to our partid in remote partition's AMO */
760 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
761 (1UL << sn_partition_id));
762 /*
763 * We must always use the nofault function regardless of whether we
764 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
765 * didn't, we'd never know that the other partition is down and would
766 * keep sending IPIs and AMOs to it until the heartbeat times out.
767 */
768 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
769 variable),
770 xp_nofault_PIOR_target));
771
772 local_irq_restore(irq_flags);
773}
774
775static inline void
776xpc_mark_partition_disengaged(struct xpc_partition *part)
777{
778 unsigned long irq_flags;
779 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
780 (XPC_ENGAGED_PARTITIONS_AMO *
781 sizeof(AMO_t)));
782
783 local_irq_save(irq_flags);
784
785 /* clear bit corresponding to our partid in remote partition's AMO */
786 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
787 ~(1UL << sn_partition_id));
788 /*
789 * We must always use the nofault function regardless of whether we
790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
791 * didn't, we'd never know that the other partition is down and would
792 * keep sending IPIs and AMOs to it until the heartbeat times out.
793 */
794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
795 variable),
796 xp_nofault_PIOR_target));
797
798 local_irq_restore(irq_flags);
799}
800
801static inline void
802xpc_request_partition_disengage(struct xpc_partition *part)
803{
804 unsigned long irq_flags;
805 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
806 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
807
808 local_irq_save(irq_flags);
809
810 /* set bit corresponding to our partid in remote partition's AMO */
811 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
812 (1UL << sn_partition_id));
813 /*
814 * We must always use the nofault function regardless of whether we
815 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
816 * didn't, we'd never know that the other partition is down and would
817 * keep sending IPIs and AMOs to it until the heartbeat times out.
818 */
819 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
820 variable),
821 xp_nofault_PIOR_target));
822
823 local_irq_restore(irq_flags);
824}
825
826static inline void
827xpc_cancel_partition_disengage_request(struct xpc_partition *part)
828{
829 unsigned long irq_flags;
830 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
831 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
832
833 local_irq_save(irq_flags);
834
835 /* clear bit corresponding to our partid in remote partition's AMO */
836 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
837 ~(1UL << sn_partition_id));
838 /*
839 * We must always use the nofault function regardless of whether we
840 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
841 * didn't, we'd never know that the other partition is down and would
842 * keep sending IPIs and AMOs to it until the heartbeat times out.
843 */
844 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
845 variable),
846 xp_nofault_PIOR_target));
847
848 local_irq_restore(irq_flags);
849}
850
851static inline u64
852xpc_partition_engaged(u64 partid_mask)
853{
854 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
855
856 /* return our partition's AMO variable ANDed with partid_mask */
857 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
858 partid_mask);
859}
860
861static inline u64
862xpc_partition_disengage_requested(u64 partid_mask)
863{
864 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
865
866 /* return our partition's AMO variable ANDed with partid_mask */
867 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
868 partid_mask);
869}
870
871static inline void
872xpc_clear_partition_engaged(u64 partid_mask)
873{
874 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
875
876 /* clear bit(s) based on partid_mask in our partition's AMO */
877 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
878 ~partid_mask);
879}
880
881static inline void
882xpc_clear_partition_disengage_request(u64 partid_mask)
883{
884 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
885
886 /* clear bit(s) based on partid_mask in our partition's AMO */
887 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
888 ~partid_mask);
889}
890
891/*
892 * The following set of macros and inlines are used for the sending and
893 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
894 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
895 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
896 */
897
898static inline u64
899xpc_IPI_receive(AMO_t *amo)
900{
901 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
902}
903
904static inline enum xp_retval
905xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
906{
907 int ret = 0;
908 unsigned long irq_flags;
909
910 local_irq_save(irq_flags);
911
912 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
913 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
914
915 /*
916 * We must always use the nofault function regardless of whether we
917 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
918 * didn't, we'd never know that the other partition is down and would
919 * keep sending IPIs and AMOs to it until the heartbeat times out.
920 */
921 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
922 xp_nofault_PIOR_target));
923
924 local_irq_restore(irq_flags);
925
926 return ((ret == 0) ? xpSuccess : xpPioReadError);
927}
928
929/*
930 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
931 */
932
933/*
934 * Flag the appropriate AMO variable and send an IPI to the specified node.
935 */
936static inline void
937xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
938 int to_phys_cpuid)
939{
940 int w_index = XPC_NASID_W_INDEX(from_nasid);
941 int b_index = XPC_NASID_B_INDEX(from_nasid);
942 AMO_t *amos = (AMO_t *)__va(amos_page_pa +
943 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
944
945 (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
946 to_phys_cpuid, SGI_XPC_ACTIVATE);
947}
948
949static inline void
950xpc_IPI_send_activate(struct xpc_vars *vars)
951{
952 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
953 vars->act_nasid, vars->act_phys_cpuid);
954}
955
956static inline void
957xpc_IPI_send_activated(struct xpc_partition *part)
958{
959 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
960 part->remote_act_nasid,
961 part->remote_act_phys_cpuid);
962}
963
964static inline void
965xpc_IPI_send_reactivate(struct xpc_partition *part)
966{
967 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
968 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
969}
970
971static inline void
972xpc_IPI_send_disengage(struct xpc_partition *part)
973{
974 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
975 part->remote_act_nasid,
976 part->remote_act_phys_cpuid);
977}
978
979/*
980 * IPIs associated with SGI_XPC_NOTIFY IRQ.
981 */
982
983/*
984 * Send an IPI to the remote partition that is associated with the
985 * specified channel.
986 */
987#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
988 xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
989
990static inline void
991xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
992 unsigned long *irq_flags)
993{
994 struct xpc_partition *part = &xpc_partitions[ch->partid];
995 enum xp_retval ret;
996
997 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
998 ret = xpc_IPI_send(part->remote_IPI_amo_va,
999 (u64)ipi_flag << (ch->number * 8),
1000 part->remote_IPI_nasid,
1001 part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
1002 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1003 ipi_flag_string, ch->partid, ch->number, ret);
1004 if (unlikely(ret != xpSuccess)) {
1005 if (irq_flags != NULL)
1006 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1007 XPC_DEACTIVATE_PARTITION(part, ret);
1008 if (irq_flags != NULL)
1009 spin_lock_irqsave(&ch->lock, *irq_flags);
1010 }
1011 }
1012}
1013
1014/*
1015 * Make it look like the remote partition, which is associated with the
1016 * specified channel, sent us an IPI. This faked IPI will be handled
1017 * by xpc_dropped_IPI_check().
1018 */
1019#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
1020 xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
1021
1022static inline void
1023xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1024 char *ipi_flag_string)
1025{
1026 struct xpc_partition *part = &xpc_partitions[ch->partid];
1027
1028 FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
1029 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
1030 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
1031 ipi_flag_string, ch->partid, ch->number);
1032}
1033
1034/*
1035 * The sending and receiving of IPIs includes the setting of an AMO variable
1036 * to indicate the reason the IPI was sent. The 64-bit variable is divided
1037 * up into eight bytes, ordered from right to left. Byte zero pertains to
1038 * channel 0, byte one to channel 1, and so on. Each byte is described by
1039 * the following IPI flags.
1040 */
1041
1042#define XPC_IPI_CLOSEREQUEST 0x01
1043#define XPC_IPI_CLOSEREPLY 0x02
1044#define XPC_IPI_OPENREQUEST 0x04
1045#define XPC_IPI_OPENREPLY 0x08
1046#define XPC_IPI_MSGREQUEST 0x10
1047
1048/* given an AMO variable and a channel#, get its associated IPI flags */
1049#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1050#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1051
1052#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
1053#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
1054
1055static inline void
1056xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
1057{
1058 struct xpc_openclose_args *args = ch->local_openclose_args;
1059
1060 args->reason = ch->reason;
1061
1062 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
1063}
1064
1065static inline void
1066xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
1067{
1068 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
1069}
1070
1071static inline void
1072xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
1073{
1074 struct xpc_openclose_args *args = ch->local_openclose_args;
1075
1076 args->msg_size = ch->msg_size;
1077 args->local_nentries = ch->local_nentries;
1078
1079 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
1080}
1081
1082static inline void
1083xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
1084{
1085 struct xpc_openclose_args *args = ch->local_openclose_args;
1086
1087 args->remote_nentries = ch->remote_nentries;
1088 args->local_nentries = ch->local_nentries;
1089 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
1090
1091 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
1092}
1093
1094static inline void
1095xpc_IPI_send_msgrequest(struct xpc_channel *ch)
1096{
1097 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
1098}
1099
1100static inline void
1101xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
1102{
1103 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
1104}
1105
1106/*
1107 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
1108 * pages are located in the lowest granule. The lowest granule uses 4k pages
1109 * for cached references and an alternate TLB handler to never provide a
1110 * cacheable mapping for the entire region. This will prevent speculative
1111 * reading of cached copies of our lines from being issued which will cause
1112 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
1113 * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
1114 * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
1115 * activation and 2 AMO variables for partition deactivation.
1116 */
1117static inline AMO_t *
1118xpc_IPI_init(int index)
1119{
1120 AMO_t *amo = xpc_vars->amos_page + index;
1121
1122 (void)xpc_IPI_receive(amo); /* clear AMO variable */
1123 return amo;
1124}
1125
1126static inline enum xp_retval
1127xpc_map_bte_errors(bte_result_t error)
1128{
1129 return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError);
1130}
1131
1132/*
1133 * Check to see if there is any channel activity to/from the specified
1134 * partition.
1135 */
1136static inline void
1137xpc_check_for_channel_activity(struct xpc_partition *part)
1138{
1139 u64 IPI_amo;
1140 unsigned long irq_flags;
1141
1142 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1143 if (IPI_amo == 0)
1144 return;
1145
1146 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1147 part->local_IPI_amo |= IPI_amo;
1148 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
1149
1150 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
1151 XPC_PARTID(part), IPI_amo);
1152
1153 xpc_wakeup_channel_mgr(part);
1154}
1155
1156#endif /* _DRIVERS_MISC_SGIXP_XPC_H */ 990#endif /* _DRIVERS_MISC_SGIXP_XPC_H */