diff options
author | Dean Nelson <dcn@sgi.com> | 2008-04-22 15:48:55 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-04-22 18:08:44 -0400 |
commit | 35190506b1a18eda7df24b285fdcd94dec7800ef (patch) | |
tree | ce0101cd6867738ff8bbe4edf343cbe2517540e9 /drivers/misc/sgi-xp/xpc_main.c | |
parent | 4a3ad2ddc0b920cd3ead84b0c67599be02d689ca (diff) |
[IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
Ran patches through scripts/Lindent (part 2).
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 329 |
1 files changed, 138 insertions, 191 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index bdb2cf1fcbcc..d81a2dd787ac 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -6,7 +6,6 @@ | |||
6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * Cross Partition Communication (XPC) support - standard version. | 10 | * Cross Partition Communication (XPC) support - standard version. |
12 | * | 11 | * |
@@ -44,7 +43,6 @@ | |||
44 | * | 43 | * |
45 | */ | 44 | */ |
46 | 45 | ||
47 | |||
48 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
49 | #include <linux/module.h> | 47 | #include <linux/module.h> |
50 | #include <linux/init.h> | 48 | #include <linux/init.h> |
@@ -61,7 +59,6 @@ | |||
61 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
62 | #include "xpc.h" | 60 | #include "xpc.h" |
63 | 61 | ||
64 | |||
65 | /* define two XPC debug device structures to be used with dev_dbg() et al */ | 62 | /* define two XPC debug device structures to be used with dev_dbg() et al */ |
66 | 63 | ||
67 | struct device_driver xpc_dbg_name = { | 64 | struct device_driver xpc_dbg_name = { |
@@ -81,10 +78,8 @@ struct device xpc_chan_dbg_subname = { | |||
81 | struct device *xpc_part = &xpc_part_dbg_subname; | 78 | struct device *xpc_part = &xpc_part_dbg_subname; |
82 | struct device *xpc_chan = &xpc_chan_dbg_subname; | 79 | struct device *xpc_chan = &xpc_chan_dbg_subname; |
83 | 80 | ||
84 | |||
85 | static int xpc_kdebug_ignore; | 81 | static int xpc_kdebug_ignore; |
86 | 82 | ||
87 | |||
88 | /* systune related variables for /proc/sys directories */ | 83 | /* systune related variables for /proc/sys directories */ |
89 | 84 | ||
90 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; | 85 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
@@ -101,56 +96,51 @@ static int xpc_disengage_request_max_timelimit = 120; | |||
101 | 96 | ||
102 | static ctl_table xpc_sys_xpc_hb_dir[] = { | 97 | static ctl_table xpc_sys_xpc_hb_dir[] = { |
103 | { | 98 | { |
104 | .ctl_name = CTL_UNNUMBERED, | 99 | .ctl_name = CTL_UNNUMBERED, |
105 | .procname = "hb_interval", | 100 | .procname = "hb_interval", |
106 | .data = &xpc_hb_interval, | 101 | .data = &xpc_hb_interval, |
107 | .maxlen = sizeof(int), | 102 | .maxlen = sizeof(int), |
108 | .mode = 0644, | 103 | .mode = 0644, |
109 | .proc_handler = &proc_dointvec_minmax, | 104 | .proc_handler = &proc_dointvec_minmax, |
110 | .strategy = &sysctl_intvec, | 105 | .strategy = &sysctl_intvec, |
111 | .extra1 = &xpc_hb_min_interval, | 106 | .extra1 = &xpc_hb_min_interval, |
112 | .extra2 = &xpc_hb_max_interval | 107 | .extra2 = &xpc_hb_max_interval}, |
113 | }, | ||
114 | { | 108 | { |
115 | .ctl_name = CTL_UNNUMBERED, | 109 | .ctl_name = CTL_UNNUMBERED, |
116 | .procname = "hb_check_interval", | 110 | .procname = "hb_check_interval", |
117 | .data = &xpc_hb_check_interval, | 111 | .data = &xpc_hb_check_interval, |
118 | .maxlen = sizeof(int), | 112 | .maxlen = sizeof(int), |
119 | .mode = 0644, | 113 | .mode = 0644, |
120 | .proc_handler = &proc_dointvec_minmax, | 114 | .proc_handler = &proc_dointvec_minmax, |
121 | .strategy = &sysctl_intvec, | 115 | .strategy = &sysctl_intvec, |
122 | .extra1 = &xpc_hb_check_min_interval, | 116 | .extra1 = &xpc_hb_check_min_interval, |
123 | .extra2 = &xpc_hb_check_max_interval | 117 | .extra2 = &xpc_hb_check_max_interval}, |
124 | }, | ||
125 | {} | 118 | {} |
126 | }; | 119 | }; |
127 | static ctl_table xpc_sys_xpc_dir[] = { | 120 | static ctl_table xpc_sys_xpc_dir[] = { |
128 | { | 121 | { |
129 | .ctl_name = CTL_UNNUMBERED, | 122 | .ctl_name = CTL_UNNUMBERED, |
130 | .procname = "hb", | 123 | .procname = "hb", |
131 | .mode = 0555, | 124 | .mode = 0555, |
132 | .child = xpc_sys_xpc_hb_dir | 125 | .child = xpc_sys_xpc_hb_dir}, |
133 | }, | ||
134 | { | 126 | { |
135 | .ctl_name = CTL_UNNUMBERED, | 127 | .ctl_name = CTL_UNNUMBERED, |
136 | .procname = "disengage_request_timelimit", | 128 | .procname = "disengage_request_timelimit", |
137 | .data = &xpc_disengage_request_timelimit, | 129 | .data = &xpc_disengage_request_timelimit, |
138 | .maxlen = sizeof(int), | 130 | .maxlen = sizeof(int), |
139 | .mode = 0644, | 131 | .mode = 0644, |
140 | .proc_handler = &proc_dointvec_minmax, | 132 | .proc_handler = &proc_dointvec_minmax, |
141 | .strategy = &sysctl_intvec, | 133 | .strategy = &sysctl_intvec, |
142 | .extra1 = &xpc_disengage_request_min_timelimit, | 134 | .extra1 = &xpc_disengage_request_min_timelimit, |
143 | .extra2 = &xpc_disengage_request_max_timelimit | 135 | .extra2 = &xpc_disengage_request_max_timelimit}, |
144 | }, | ||
145 | {} | 136 | {} |
146 | }; | 137 | }; |
147 | static ctl_table xpc_sys_dir[] = { | 138 | static ctl_table xpc_sys_dir[] = { |
148 | { | 139 | { |
149 | .ctl_name = CTL_UNNUMBERED, | 140 | .ctl_name = CTL_UNNUMBERED, |
150 | .procname = "xpc", | 141 | .procname = "xpc", |
151 | .mode = 0555, | 142 | .mode = 0555, |
152 | .child = xpc_sys_xpc_dir | 143 | .child = xpc_sys_xpc_dir}, |
153 | }, | ||
154 | {} | 144 | {} |
155 | }; | 145 | }; |
156 | static struct ctl_table_header *xpc_sysctl; | 146 | static struct ctl_table_header *xpc_sysctl; |
@@ -172,13 +162,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited); | |||
172 | /* notification that the xpc_discovery thread has exited */ | 162 | /* notification that the xpc_discovery thread has exited */ |
173 | static DECLARE_COMPLETION(xpc_discovery_exited); | 163 | static DECLARE_COMPLETION(xpc_discovery_exited); |
174 | 164 | ||
175 | |||
176 | static struct timer_list xpc_hb_timer; | 165 | static struct timer_list xpc_hb_timer; |
177 | 166 | ||
178 | |||
179 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); | 167 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
180 | 168 | ||
181 | |||
182 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); | 169 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); |
183 | static struct notifier_block xpc_reboot_notifier = { | 170 | static struct notifier_block xpc_reboot_notifier = { |
184 | .notifier_call = xpc_system_reboot, | 171 | .notifier_call = xpc_system_reboot, |
@@ -189,25 +176,22 @@ static struct notifier_block xpc_die_notifier = { | |||
189 | .notifier_call = xpc_system_die, | 176 | .notifier_call = xpc_system_die, |
190 | }; | 177 | }; |
191 | 178 | ||
192 | |||
193 | /* | 179 | /* |
194 | * Timer function to enforce the timelimit on the partition disengage request. | 180 | * Timer function to enforce the timelimit on the partition disengage request. |
195 | */ | 181 | */ |
196 | static void | 182 | static void |
197 | xpc_timeout_partition_disengage_request(unsigned long data) | 183 | xpc_timeout_partition_disengage_request(unsigned long data) |
198 | { | 184 | { |
199 | struct xpc_partition *part = (struct xpc_partition *) data; | 185 | struct xpc_partition *part = (struct xpc_partition *)data; |
200 | |||
201 | 186 | ||
202 | DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); | 187 | DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); |
203 | 188 | ||
204 | (void) xpc_partition_disengaged(part); | 189 | (void)xpc_partition_disengaged(part); |
205 | 190 | ||
206 | DBUG_ON(part->disengage_request_timeout != 0); | 191 | DBUG_ON(part->disengage_request_timeout != 0); |
207 | DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); | 192 | DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); |
208 | } | 193 | } |
209 | 194 | ||
210 | |||
211 | /* | 195 | /* |
212 | * Notify the heartbeat check thread that an IRQ has been received. | 196 | * Notify the heartbeat check thread that an IRQ has been received. |
213 | */ | 197 | */ |
@@ -219,7 +203,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id) | |||
219 | return IRQ_HANDLED; | 203 | return IRQ_HANDLED; |
220 | } | 204 | } |
221 | 205 | ||
222 | |||
223 | /* | 206 | /* |
224 | * Timer to produce the heartbeat. The timer structures function is | 207 | * Timer to produce the heartbeat. The timer structures function is |
225 | * already set when this is initially called. A tunable is used to | 208 | * already set when this is initially called. A tunable is used to |
@@ -238,7 +221,6 @@ xpc_hb_beater(unsigned long dummy) | |||
238 | add_timer(&xpc_hb_timer); | 221 | add_timer(&xpc_hb_timer); |
239 | } | 222 | } |
240 | 223 | ||
241 | |||
242 | /* | 224 | /* |
243 | * This thread is responsible for nearly all of the partition | 225 | * This thread is responsible for nearly all of the partition |
244 | * activation/deactivation. | 226 | * activation/deactivation. |
@@ -248,8 +230,7 @@ xpc_hb_checker(void *ignore) | |||
248 | { | 230 | { |
249 | int last_IRQ_count = 0; | 231 | int last_IRQ_count = 0; |
250 | int new_IRQ_count; | 232 | int new_IRQ_count; |
251 | int force_IRQ=0; | 233 | int force_IRQ = 0; |
252 | |||
253 | 234 | ||
254 | /* this thread was marked active by xpc_hb_init() */ | 235 | /* this thread was marked active by xpc_hb_init() */ |
255 | 236 | ||
@@ -261,14 +242,13 @@ xpc_hb_checker(void *ignore) | |||
261 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 242 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
262 | xpc_hb_beater(0); | 243 | xpc_hb_beater(0); |
263 | 244 | ||
264 | while (!(volatile int) xpc_exiting) { | 245 | while (!(volatile int)xpc_exiting) { |
265 | 246 | ||
266 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " | 247 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
267 | "been received\n", | 248 | "been received\n", |
268 | (int) (xpc_hb_check_timeout - jiffies), | 249 | (int)(xpc_hb_check_timeout - jiffies), |
269 | atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); | 250 | atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); |
270 | 251 | ||
271 | |||
272 | /* checking of remote heartbeats is skewed by IRQ handling */ | 252 | /* checking of remote heartbeats is skewed by IRQ handling */ |
273 | if (time_after_eq(jiffies, xpc_hb_check_timeout)) { | 253 | if (time_after_eq(jiffies, xpc_hb_check_timeout)) { |
274 | dev_dbg(xpc_part, "checking remote heartbeats\n"); | 254 | dev_dbg(xpc_part, "checking remote heartbeats\n"); |
@@ -282,7 +262,6 @@ xpc_hb_checker(void *ignore) | |||
282 | force_IRQ = 1; | 262 | force_IRQ = 1; |
283 | } | 263 | } |
284 | 264 | ||
285 | |||
286 | /* check for outstanding IRQs */ | 265 | /* check for outstanding IRQs */ |
287 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); | 266 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); |
288 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { | 267 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { |
@@ -294,30 +273,30 @@ xpc_hb_checker(void *ignore) | |||
294 | last_IRQ_count += xpc_identify_act_IRQ_sender(); | 273 | last_IRQ_count += xpc_identify_act_IRQ_sender(); |
295 | if (last_IRQ_count < new_IRQ_count) { | 274 | if (last_IRQ_count < new_IRQ_count) { |
296 | /* retry once to help avoid missing AMO */ | 275 | /* retry once to help avoid missing AMO */ |
297 | (void) xpc_identify_act_IRQ_sender(); | 276 | (void)xpc_identify_act_IRQ_sender(); |
298 | } | 277 | } |
299 | last_IRQ_count = new_IRQ_count; | 278 | last_IRQ_count = new_IRQ_count; |
300 | 279 | ||
301 | xpc_hb_check_timeout = jiffies + | 280 | xpc_hb_check_timeout = jiffies + |
302 | (xpc_hb_check_interval * HZ); | 281 | (xpc_hb_check_interval * HZ); |
303 | } | 282 | } |
304 | 283 | ||
305 | /* wait for IRQ or timeout */ | 284 | /* wait for IRQ or timeout */ |
306 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | 285 | (void)wait_event_interruptible(xpc_act_IRQ_wq, |
307 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | 286 | (last_IRQ_count < |
308 | time_after_eq(jiffies, xpc_hb_check_timeout) || | 287 | atomic_read(&xpc_act_IRQ_rcvd) |
309 | (volatile int) xpc_exiting)); | 288 | || time_after_eq(jiffies, |
289 | xpc_hb_check_timeout) || | ||
290 | (volatile int)xpc_exiting)); | ||
310 | } | 291 | } |
311 | 292 | ||
312 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); | 293 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
313 | 294 | ||
314 | |||
315 | /* mark this thread as having exited */ | 295 | /* mark this thread as having exited */ |
316 | complete(&xpc_hb_checker_exited); | 296 | complete(&xpc_hb_checker_exited); |
317 | return 0; | 297 | return 0; |
318 | } | 298 | } |
319 | 299 | ||
320 | |||
321 | /* | 300 | /* |
322 | * This thread will attempt to discover other partitions to activate | 301 | * This thread will attempt to discover other partitions to activate |
323 | * based on info provided by SAL. This new thread is short lived and | 302 | * based on info provided by SAL. This new thread is short lived and |
@@ -337,7 +316,6 @@ xpc_initiate_discovery(void *ignore) | |||
337 | return 0; | 316 | return 0; |
338 | } | 317 | } |
339 | 318 | ||
340 | |||
341 | /* | 319 | /* |
342 | * Establish first contact with the remote partititon. This involves pulling | 320 | * Establish first contact with the remote partititon. This involves pulling |
343 | * the XPC per partition variables from the remote partition and waiting for | 321 | * the XPC per partition variables from the remote partition and waiting for |
@@ -348,7 +326,6 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
348 | { | 326 | { |
349 | enum xpc_retval ret; | 327 | enum xpc_retval ret; |
350 | 328 | ||
351 | |||
352 | while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { | 329 | while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { |
353 | if (ret != xpcRetry) { | 330 | if (ret != xpcRetry) { |
354 | XPC_DEACTIVATE_PARTITION(part, ret); | 331 | XPC_DEACTIVATE_PARTITION(part, ret); |
@@ -359,7 +336,7 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
359 | "partition %d\n", XPC_PARTID(part)); | 336 | "partition %d\n", XPC_PARTID(part)); |
360 | 337 | ||
361 | /* wait a 1/4 of a second or so */ | 338 | /* wait a 1/4 of a second or so */ |
362 | (void) msleep_interruptible(250); | 339 | (void)msleep_interruptible(250); |
363 | 340 | ||
364 | if (part->act_state == XPC_P_DEACTIVATING) { | 341 | if (part->act_state == XPC_P_DEACTIVATING) { |
365 | return part->reason; | 342 | return part->reason; |
@@ -369,7 +346,6 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
369 | return xpc_mark_partition_active(part); | 346 | return xpc_mark_partition_active(part); |
370 | } | 347 | } |
371 | 348 | ||
372 | |||
373 | /* | 349 | /* |
374 | * The first kthread assigned to a newly activated partition is the one | 350 | * The first kthread assigned to a newly activated partition is the one |
375 | * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to | 351 | * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to |
@@ -386,12 +362,11 @@ static void | |||
386 | xpc_channel_mgr(struct xpc_partition *part) | 362 | xpc_channel_mgr(struct xpc_partition *part) |
387 | { | 363 | { |
388 | while (part->act_state != XPC_P_DEACTIVATING || | 364 | while (part->act_state != XPC_P_DEACTIVATING || |
389 | atomic_read(&part->nchannels_active) > 0 || | 365 | atomic_read(&part->nchannels_active) > 0 || |
390 | !xpc_partition_disengaged(part)) { | 366 | !xpc_partition_disengaged(part)) { |
391 | 367 | ||
392 | xpc_process_channel_activity(part); | 368 | xpc_process_channel_activity(part); |
393 | 369 | ||
394 | |||
395 | /* | 370 | /* |
396 | * Wait until we've been requested to activate kthreads or | 371 | * Wait until we've been requested to activate kthreads or |
397 | * all of the channel's message queues have been torn down or | 372 | * all of the channel's message queues have been torn down or |
@@ -406,13 +381,19 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
406 | * wake him up. | 381 | * wake him up. |
407 | */ | 382 | */ |
408 | atomic_dec(&part->channel_mgr_requests); | 383 | atomic_dec(&part->channel_mgr_requests); |
409 | (void) wait_event_interruptible(part->channel_mgr_wq, | 384 | (void)wait_event_interruptible(part->channel_mgr_wq, |
410 | (atomic_read(&part->channel_mgr_requests) > 0 || | 385 | (atomic_read |
411 | (volatile u64) part->local_IPI_amo != 0 || | 386 | (&part->channel_mgr_requests) > |
412 | ((volatile u8) part->act_state == | 387 | 0 || |
413 | XPC_P_DEACTIVATING && | 388 | (volatile u64)part-> |
414 | atomic_read(&part->nchannels_active) == 0 && | 389 | local_IPI_amo != 0 || |
415 | xpc_partition_disengaged(part)))); | 390 | ((volatile u8)part->act_state == |
391 | XPC_P_DEACTIVATING && | ||
392 | atomic_read(&part-> | ||
393 | nchannels_active) | ||
394 | == 0 && | ||
395 | xpc_partition_disengaged | ||
396 | (part)))); | ||
416 | atomic_set(&part->channel_mgr_requests, 1); | 397 | atomic_set(&part->channel_mgr_requests, 1); |
417 | 398 | ||
418 | // >>> Does it need to wakeup periodically as well? In case we | 399 | // >>> Does it need to wakeup periodically as well? In case we |
@@ -420,7 +401,6 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
420 | } | 401 | } |
421 | } | 402 | } |
422 | 403 | ||
423 | |||
424 | /* | 404 | /* |
425 | * When XPC HB determines that a partition has come up, it will create a new | 405 | * When XPC HB determines that a partition has come up, it will create a new |
426 | * kthread and that kthread will call this function to attempt to set up the | 406 | * kthread and that kthread will call this function to attempt to set up the |
@@ -454,7 +434,7 @@ xpc_partition_up(struct xpc_partition *part) | |||
454 | * has been dismantled. | 434 | * has been dismantled. |
455 | */ | 435 | */ |
456 | 436 | ||
457 | (void) xpc_part_ref(part); /* this will always succeed */ | 437 | (void)xpc_part_ref(part); /* this will always succeed */ |
458 | 438 | ||
459 | if (xpc_make_first_contact(part) == xpcSuccess) { | 439 | if (xpc_make_first_contact(part) == xpcSuccess) { |
460 | xpc_channel_mgr(part); | 440 | xpc_channel_mgr(part); |
@@ -465,17 +445,15 @@ xpc_partition_up(struct xpc_partition *part) | |||
465 | xpc_teardown_infrastructure(part); | 445 | xpc_teardown_infrastructure(part); |
466 | } | 446 | } |
467 | 447 | ||
468 | |||
469 | static int | 448 | static int |
470 | xpc_activating(void *__partid) | 449 | xpc_activating(void *__partid) |
471 | { | 450 | { |
472 | partid_t partid = (u64) __partid; | 451 | partid_t partid = (u64)__partid; |
473 | struct xpc_partition *part = &xpc_partitions[partid]; | 452 | struct xpc_partition *part = &xpc_partitions[partid]; |
474 | unsigned long irq_flags; | 453 | unsigned long irq_flags; |
475 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | 454 | struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 }; |
476 | int ret; | 455 | int ret; |
477 | 456 | ||
478 | |||
479 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 457 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |
480 | 458 | ||
481 | spin_lock_irqsave(&part->act_lock, irq_flags); | 459 | spin_lock_irqsave(&part->act_lock, irq_flags); |
@@ -505,7 +483,7 @@ xpc_activating(void *__partid) | |||
505 | ret = sched_setscheduler(current, SCHED_FIFO, ¶m); | 483 | ret = sched_setscheduler(current, SCHED_FIFO, ¶m); |
506 | if (ret != 0) { | 484 | if (ret != 0) { |
507 | dev_warn(xpc_part, "unable to set pid %d to a realtime " | 485 | dev_warn(xpc_part, "unable to set pid %d to a realtime " |
508 | "priority, ret=%d\n", current->pid, ret); | 486 | "priority, ret=%d\n", current->pid, ret); |
509 | } | 487 | } |
510 | 488 | ||
511 | /* allow this thread and its children to run on any CPU */ | 489 | /* allow this thread and its children to run on any CPU */ |
@@ -522,9 +500,9 @@ xpc_activating(void *__partid) | |||
522 | * reloads and system reboots. | 500 | * reloads and system reboots. |
523 | */ | 501 | */ |
524 | if (sn_register_xp_addr_region(part->remote_amos_page_pa, | 502 | if (sn_register_xp_addr_region(part->remote_amos_page_pa, |
525 | PAGE_SIZE, 1) < 0) { | 503 | PAGE_SIZE, 1) < 0) { |
526 | dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " | 504 | dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " |
527 | "xp_addr region\n", partid); | 505 | "xp_addr region\n", partid); |
528 | 506 | ||
529 | spin_lock_irqsave(&part->act_lock, irq_flags); | 507 | spin_lock_irqsave(&part->act_lock, irq_flags); |
530 | part->act_state = XPC_P_INACTIVE; | 508 | part->act_state = XPC_P_INACTIVE; |
@@ -537,12 +515,11 @@ xpc_activating(void *__partid) | |||
537 | xpc_allow_hb(partid, xpc_vars); | 515 | xpc_allow_hb(partid, xpc_vars); |
538 | xpc_IPI_send_activated(part); | 516 | xpc_IPI_send_activated(part); |
539 | 517 | ||
540 | |||
541 | /* | 518 | /* |
542 | * xpc_partition_up() holds this thread and marks this partition as | 519 | * xpc_partition_up() holds this thread and marks this partition as |
543 | * XPC_P_ACTIVE by calling xpc_hb_mark_active(). | 520 | * XPC_P_ACTIVE by calling xpc_hb_mark_active(). |
544 | */ | 521 | */ |
545 | (void) xpc_partition_up(part); | 522 | (void)xpc_partition_up(part); |
546 | 523 | ||
547 | xpc_disallow_hb(partid, xpc_vars); | 524 | xpc_disallow_hb(partid, xpc_vars); |
548 | xpc_mark_partition_inactive(part); | 525 | xpc_mark_partition_inactive(part); |
@@ -555,7 +532,6 @@ xpc_activating(void *__partid) | |||
555 | return 0; | 532 | return 0; |
556 | } | 533 | } |
557 | 534 | ||
558 | |||
559 | void | 535 | void |
560 | xpc_activate_partition(struct xpc_partition *part) | 536 | xpc_activate_partition(struct xpc_partition *part) |
561 | { | 537 | { |
@@ -563,7 +539,6 @@ xpc_activate_partition(struct xpc_partition *part) | |||
563 | unsigned long irq_flags; | 539 | unsigned long irq_flags; |
564 | pid_t pid; | 540 | pid_t pid; |
565 | 541 | ||
566 | |||
567 | spin_lock_irqsave(&part->act_lock, irq_flags); | 542 | spin_lock_irqsave(&part->act_lock, irq_flags); |
568 | 543 | ||
569 | DBUG_ON(part->act_state != XPC_P_INACTIVE); | 544 | DBUG_ON(part->act_state != XPC_P_INACTIVE); |
@@ -573,7 +548,7 @@ xpc_activate_partition(struct xpc_partition *part) | |||
573 | 548 | ||
574 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 549 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
575 | 550 | ||
576 | pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); | 551 | pid = kernel_thread(xpc_activating, (void *)((u64)partid), 0); |
577 | 552 | ||
578 | if (unlikely(pid <= 0)) { | 553 | if (unlikely(pid <= 0)) { |
579 | spin_lock_irqsave(&part->act_lock, irq_flags); | 554 | spin_lock_irqsave(&part->act_lock, irq_flags); |
@@ -583,7 +558,6 @@ xpc_activate_partition(struct xpc_partition *part) | |||
583 | } | 558 | } |
584 | } | 559 | } |
585 | 560 | ||
586 | |||
587 | /* | 561 | /* |
588 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified | 562 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified |
589 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more | 563 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more |
@@ -603,10 +577,9 @@ xpc_activate_partition(struct xpc_partition *part) | |||
603 | irqreturn_t | 577 | irqreturn_t |
604 | xpc_notify_IRQ_handler(int irq, void *dev_id) | 578 | xpc_notify_IRQ_handler(int irq, void *dev_id) |
605 | { | 579 | { |
606 | partid_t partid = (partid_t) (u64) dev_id; | 580 | partid_t partid = (partid_t) (u64)dev_id; |
607 | struct xpc_partition *part = &xpc_partitions[partid]; | 581 | struct xpc_partition *part = &xpc_partitions[partid]; |
608 | 582 | ||
609 | |||
610 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 583 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |
611 | 584 | ||
612 | if (xpc_part_ref(part)) { | 585 | if (xpc_part_ref(part)) { |
@@ -617,7 +590,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id) | |||
617 | return IRQ_HANDLED; | 590 | return IRQ_HANDLED; |
618 | } | 591 | } |
619 | 592 | ||
620 | |||
621 | /* | 593 | /* |
622 | * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor | 594 | * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor |
623 | * because the write to their associated IPI amo completed after the IRQ/IPI | 595 | * because the write to their associated IPI amo completed after the IRQ/IPI |
@@ -630,13 +602,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part) | |||
630 | xpc_check_for_channel_activity(part); | 602 | xpc_check_for_channel_activity(part); |
631 | 603 | ||
632 | part->dropped_IPI_timer.expires = jiffies + | 604 | part->dropped_IPI_timer.expires = jiffies + |
633 | XPC_P_DROPPED_IPI_WAIT; | 605 | XPC_P_DROPPED_IPI_WAIT; |
634 | add_timer(&part->dropped_IPI_timer); | 606 | add_timer(&part->dropped_IPI_timer); |
635 | xpc_part_deref(part); | 607 | xpc_part_deref(part); |
636 | } | 608 | } |
637 | } | 609 | } |
638 | 610 | ||
639 | |||
640 | void | 611 | void |
641 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) | 612 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) |
642 | { | 613 | { |
@@ -644,7 +615,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |||
644 | int assigned = atomic_read(&ch->kthreads_assigned); | 615 | int assigned = atomic_read(&ch->kthreads_assigned); |
645 | int wakeup; | 616 | int wakeup; |
646 | 617 | ||
647 | |||
648 | DBUG_ON(needed <= 0); | 618 | DBUG_ON(needed <= 0); |
649 | 619 | ||
650 | if (idle > 0) { | 620 | if (idle > 0) { |
@@ -676,7 +646,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |||
676 | xpc_create_kthreads(ch, needed, 0); | 646 | xpc_create_kthreads(ch, needed, 0); |
677 | } | 647 | } |
678 | 648 | ||
679 | |||
680 | /* | 649 | /* |
681 | * This function is where XPC's kthreads wait for messages to deliver. | 650 | * This function is where XPC's kthreads wait for messages to deliver. |
682 | */ | 651 | */ |
@@ -686,15 +655,14 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |||
686 | do { | 655 | do { |
687 | /* deliver messages to their intended recipients */ | 656 | /* deliver messages to their intended recipients */ |
688 | 657 | ||
689 | while ((volatile s64) ch->w_local_GP.get < | 658 | while ((volatile s64)ch->w_local_GP.get < |
690 | (volatile s64) ch->w_remote_GP.put && | 659 | (volatile s64)ch->w_remote_GP.put && |
691 | !((volatile u32) ch->flags & | 660 | !((volatile u32)ch->flags & XPC_C_DISCONNECTING)) { |
692 | XPC_C_DISCONNECTING)) { | ||
693 | xpc_deliver_msg(ch); | 661 | xpc_deliver_msg(ch); |
694 | } | 662 | } |
695 | 663 | ||
696 | if (atomic_inc_return(&ch->kthreads_idle) > | 664 | if (atomic_inc_return(&ch->kthreads_idle) > |
697 | ch->kthreads_idle_limit) { | 665 | ch->kthreads_idle_limit) { |
698 | /* too many idle kthreads on this channel */ | 666 | /* too many idle kthreads on this channel */ |
699 | atomic_dec(&ch->kthreads_idle); | 667 | atomic_dec(&ch->kthreads_idle); |
700 | break; | 668 | break; |
@@ -703,18 +671,20 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |||
703 | dev_dbg(xpc_chan, "idle kthread calling " | 671 | dev_dbg(xpc_chan, "idle kthread calling " |
704 | "wait_event_interruptible_exclusive()\n"); | 672 | "wait_event_interruptible_exclusive()\n"); |
705 | 673 | ||
706 | (void) wait_event_interruptible_exclusive(ch->idle_wq, | 674 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
707 | ((volatile s64) ch->w_local_GP.get < | 675 | ((volatile s64)ch-> |
708 | (volatile s64) ch->w_remote_GP.put || | 676 | w_local_GP.get < |
709 | ((volatile u32) ch->flags & | 677 | (volatile s64)ch-> |
710 | XPC_C_DISCONNECTING))); | 678 | w_remote_GP.put || |
679 | ((volatile u32)ch-> | ||
680 | flags & | ||
681 | XPC_C_DISCONNECTING))); | ||
711 | 682 | ||
712 | atomic_dec(&ch->kthreads_idle); | 683 | atomic_dec(&ch->kthreads_idle); |
713 | 684 | ||
714 | } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); | 685 | } while (!((volatile u32)ch->flags & XPC_C_DISCONNECTING)); |
715 | } | 686 | } |
716 | 687 | ||
717 | |||
718 | static int | 688 | static int |
719 | xpc_daemonize_kthread(void *args) | 689 | xpc_daemonize_kthread(void *args) |
720 | { | 690 | { |
@@ -725,7 +695,6 @@ xpc_daemonize_kthread(void *args) | |||
725 | int n_needed; | 695 | int n_needed; |
726 | unsigned long irq_flags; | 696 | unsigned long irq_flags; |
727 | 697 | ||
728 | |||
729 | daemonize("xpc%02dc%d", partid, ch_number); | 698 | daemonize("xpc%02dc%d", partid, ch_number); |
730 | 699 | ||
731 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", | 700 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
@@ -756,8 +725,7 @@ xpc_daemonize_kthread(void *args) | |||
756 | * need one less than total #of messages to deliver. | 725 | * need one less than total #of messages to deliver. |
757 | */ | 726 | */ |
758 | n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; | 727 | n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; |
759 | if (n_needed > 0 && | 728 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) { |
760 | !(ch->flags & XPC_C_DISCONNECTING)) { | ||
761 | xpc_activate_kthreads(ch, n_needed); | 729 | xpc_activate_kthreads(ch, n_needed); |
762 | } | 730 | } |
763 | } else { | 731 | } else { |
@@ -771,7 +739,7 @@ xpc_daemonize_kthread(void *args) | |||
771 | 739 | ||
772 | spin_lock_irqsave(&ch->lock, irq_flags); | 740 | spin_lock_irqsave(&ch->lock, irq_flags); |
773 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | 741 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
774 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { | 742 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
775 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; | 743 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
776 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 744 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
777 | 745 | ||
@@ -798,7 +766,6 @@ xpc_daemonize_kthread(void *args) | |||
798 | return 0; | 766 | return 0; |
799 | } | 767 | } |
800 | 768 | ||
801 | |||
802 | /* | 769 | /* |
803 | * For each partition that XPC has established communications with, there is | 770 | * For each partition that XPC has established communications with, there is |
804 | * a minimum of one kernel thread assigned to perform any operation that | 771 | * a minimum of one kernel thread assigned to perform any operation that |
@@ -813,14 +780,13 @@ xpc_daemonize_kthread(void *args) | |||
813 | */ | 780 | */ |
814 | void | 781 | void |
815 | xpc_create_kthreads(struct xpc_channel *ch, int needed, | 782 | xpc_create_kthreads(struct xpc_channel *ch, int needed, |
816 | int ignore_disconnecting) | 783 | int ignore_disconnecting) |
817 | { | 784 | { |
818 | unsigned long irq_flags; | 785 | unsigned long irq_flags; |
819 | pid_t pid; | 786 | pid_t pid; |
820 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | 787 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
821 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 788 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
822 | 789 | ||
823 | |||
824 | while (needed-- > 0) { | 790 | while (needed-- > 0) { |
825 | 791 | ||
826 | /* | 792 | /* |
@@ -832,7 +798,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
832 | if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { | 798 | if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { |
833 | /* kthreads assigned had gone to zero */ | 799 | /* kthreads assigned had gone to zero */ |
834 | BUG_ON(!(ch->flags & | 800 | BUG_ON(!(ch->flags & |
835 | XPC_C_DISCONNECTINGCALLOUT_MADE)); | 801 | XPC_C_DISCONNECTINGCALLOUT_MADE)); |
836 | break; | 802 | break; |
837 | } | 803 | } |
838 | 804 | ||
@@ -843,10 +809,10 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
843 | if (atomic_inc_return(&part->nchannels_engaged) == 1) | 809 | if (atomic_inc_return(&part->nchannels_engaged) == 1) |
844 | xpc_mark_partition_engaged(part); | 810 | xpc_mark_partition_engaged(part); |
845 | } | 811 | } |
846 | (void) xpc_part_ref(part); | 812 | (void)xpc_part_ref(part); |
847 | xpc_msgqueue_ref(ch); | 813 | xpc_msgqueue_ref(ch); |
848 | 814 | ||
849 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); | 815 | pid = kernel_thread(xpc_daemonize_kthread, (void *)args, 0); |
850 | if (pid < 0) { | 816 | if (pid < 0) { |
851 | /* the fork failed */ | 817 | /* the fork failed */ |
852 | 818 | ||
@@ -869,7 +835,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
869 | xpc_part_deref(part); | 835 | xpc_part_deref(part); |
870 | 836 | ||
871 | if (atomic_read(&ch->kthreads_assigned) < | 837 | if (atomic_read(&ch->kthreads_assigned) < |
872 | ch->kthreads_idle_limit) { | 838 | ch->kthreads_idle_limit) { |
873 | /* | 839 | /* |
874 | * Flag this as an error only if we have an | 840 | * Flag this as an error only if we have an |
875 | * insufficient #of kthreads for the channel | 841 | * insufficient #of kthreads for the channel |
@@ -877,7 +843,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
877 | */ | 843 | */ |
878 | spin_lock_irqsave(&ch->lock, irq_flags); | 844 | spin_lock_irqsave(&ch->lock, irq_flags); |
879 | XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, | 845 | XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, |
880 | &irq_flags); | 846 | &irq_flags); |
881 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 847 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
882 | } | 848 | } |
883 | break; | 849 | break; |
@@ -887,7 +853,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
887 | } | 853 | } |
888 | } | 854 | } |
889 | 855 | ||
890 | |||
891 | void | 856 | void |
892 | xpc_disconnect_wait(int ch_number) | 857 | xpc_disconnect_wait(int ch_number) |
893 | { | 858 | { |
@@ -897,7 +862,6 @@ xpc_disconnect_wait(int ch_number) | |||
897 | struct xpc_channel *ch; | 862 | struct xpc_channel *ch; |
898 | int wakeup_channel_mgr; | 863 | int wakeup_channel_mgr; |
899 | 864 | ||
900 | |||
901 | /* now wait for all callouts to the caller's function to cease */ | 865 | /* now wait for all callouts to the caller's function to cease */ |
902 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 866 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
903 | part = &xpc_partitions[partid]; | 867 | part = &xpc_partitions[partid]; |
@@ -923,7 +887,8 @@ xpc_disconnect_wait(int ch_number) | |||
923 | if (part->act_state != XPC_P_DEACTIVATING) { | 887 | if (part->act_state != XPC_P_DEACTIVATING) { |
924 | spin_lock(&part->IPI_lock); | 888 | spin_lock(&part->IPI_lock); |
925 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, | 889 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, |
926 | ch->number, ch->delayed_IPI_flags); | 890 | ch->number, |
891 | ch->delayed_IPI_flags); | ||
927 | spin_unlock(&part->IPI_lock); | 892 | spin_unlock(&part->IPI_lock); |
928 | wakeup_channel_mgr = 1; | 893 | wakeup_channel_mgr = 1; |
929 | } | 894 | } |
@@ -941,7 +906,6 @@ xpc_disconnect_wait(int ch_number) | |||
941 | } | 906 | } |
942 | } | 907 | } |
943 | 908 | ||
944 | |||
945 | static void | 909 | static void |
946 | xpc_do_exit(enum xpc_retval reason) | 910 | xpc_do_exit(enum xpc_retval reason) |
947 | { | 911 | { |
@@ -950,7 +914,6 @@ xpc_do_exit(enum xpc_retval reason) | |||
950 | struct xpc_partition *part; | 914 | struct xpc_partition *part; |
951 | unsigned long printmsg_time, disengage_request_timeout = 0; | 915 | unsigned long printmsg_time, disengage_request_timeout = 0; |
952 | 916 | ||
953 | |||
954 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ | 917 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
955 | DBUG_ON(xpc_exiting == 1); | 918 | DBUG_ON(xpc_exiting == 1); |
956 | 919 | ||
@@ -971,10 +934,8 @@ xpc_do_exit(enum xpc_retval reason) | |||
971 | /* wait for the heartbeat checker thread to exit */ | 934 | /* wait for the heartbeat checker thread to exit */ |
972 | wait_for_completion(&xpc_hb_checker_exited); | 935 | wait_for_completion(&xpc_hb_checker_exited); |
973 | 936 | ||
974 | |||
975 | /* sleep for a 1/3 of a second or so */ | 937 | /* sleep for a 1/3 of a second or so */ |
976 | (void) msleep_interruptible(300); | 938 | (void)msleep_interruptible(300); |
977 | |||
978 | 939 | ||
979 | /* wait for all partitions to become inactive */ | 940 | /* wait for all partitions to become inactive */ |
980 | 941 | ||
@@ -988,7 +949,7 @@ xpc_do_exit(enum xpc_retval reason) | |||
988 | part = &xpc_partitions[partid]; | 949 | part = &xpc_partitions[partid]; |
989 | 950 | ||
990 | if (xpc_partition_disengaged(part) && | 951 | if (xpc_partition_disengaged(part) && |
991 | part->act_state == XPC_P_INACTIVE) { | 952 | part->act_state == XPC_P_INACTIVE) { |
992 | continue; | 953 | continue; |
993 | } | 954 | } |
994 | 955 | ||
@@ -997,47 +958,46 @@ xpc_do_exit(enum xpc_retval reason) | |||
997 | XPC_DEACTIVATE_PARTITION(part, reason); | 958 | XPC_DEACTIVATE_PARTITION(part, reason); |
998 | 959 | ||
999 | if (part->disengage_request_timeout > | 960 | if (part->disengage_request_timeout > |
1000 | disengage_request_timeout) { | 961 | disengage_request_timeout) { |
1001 | disengage_request_timeout = | 962 | disengage_request_timeout = |
1002 | part->disengage_request_timeout; | 963 | part->disengage_request_timeout; |
1003 | } | 964 | } |
1004 | } | 965 | } |
1005 | 966 | ||
1006 | if (xpc_partition_engaged(-1UL)) { | 967 | if (xpc_partition_engaged(-1UL)) { |
1007 | if (time_after(jiffies, printmsg_time)) { | 968 | if (time_after(jiffies, printmsg_time)) { |
1008 | dev_info(xpc_part, "waiting for remote " | 969 | dev_info(xpc_part, "waiting for remote " |
1009 | "partitions to disengage, timeout in " | 970 | "partitions to disengage, timeout in " |
1010 | "%ld seconds\n", | 971 | "%ld seconds\n", |
1011 | (disengage_request_timeout - jiffies) | 972 | (disengage_request_timeout - jiffies) |
1012 | / HZ); | 973 | / HZ); |
1013 | printmsg_time = jiffies + | 974 | printmsg_time = jiffies + |
1014 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); | 975 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); |
1015 | printed_waiting_msg = 1; | 976 | printed_waiting_msg = 1; |
1016 | } | 977 | } |
1017 | 978 | ||
1018 | } else if (active_part_count > 0) { | 979 | } else if (active_part_count > 0) { |
1019 | if (printed_waiting_msg) { | 980 | if (printed_waiting_msg) { |
1020 | dev_info(xpc_part, "waiting for local partition" | 981 | dev_info(xpc_part, "waiting for local partition" |
1021 | " to disengage\n"); | 982 | " to disengage\n"); |
1022 | printed_waiting_msg = 0; | 983 | printed_waiting_msg = 0; |
1023 | } | 984 | } |
1024 | 985 | ||
1025 | } else { | 986 | } else { |
1026 | if (!xpc_disengage_request_timedout) { | 987 | if (!xpc_disengage_request_timedout) { |
1027 | dev_info(xpc_part, "all partitions have " | 988 | dev_info(xpc_part, "all partitions have " |
1028 | "disengaged\n"); | 989 | "disengaged\n"); |
1029 | } | 990 | } |
1030 | break; | 991 | break; |
1031 | } | 992 | } |
1032 | 993 | ||
1033 | /* sleep for a 1/3 of a second or so */ | 994 | /* sleep for a 1/3 of a second or so */ |
1034 | (void) msleep_interruptible(300); | 995 | (void)msleep_interruptible(300); |
1035 | 996 | ||
1036 | } while (1); | 997 | } while (1); |
1037 | 998 | ||
1038 | DBUG_ON(xpc_partition_engaged(-1UL)); | 999 | DBUG_ON(xpc_partition_engaged(-1UL)); |
1039 | 1000 | ||
1040 | |||
1041 | /* indicate to others that our reserved page is uninitialized */ | 1001 | /* indicate to others that our reserved page is uninitialized */ |
1042 | xpc_rsvd_page->vars_pa = 0; | 1002 | xpc_rsvd_page->vars_pa = 0; |
1043 | 1003 | ||
@@ -1047,16 +1007,15 @@ xpc_do_exit(enum xpc_retval reason) | |||
1047 | 1007 | ||
1048 | if (reason == xpcUnloading) { | 1008 | if (reason == xpcUnloading) { |
1049 | /* take ourselves off of the reboot_notifier_list */ | 1009 | /* take ourselves off of the reboot_notifier_list */ |
1050 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | 1010 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
1051 | 1011 | ||
1052 | /* take ourselves off of the die_notifier list */ | 1012 | /* take ourselves off of the die_notifier list */ |
1053 | (void) unregister_die_notifier(&xpc_die_notifier); | 1013 | (void)unregister_die_notifier(&xpc_die_notifier); |
1054 | } | 1014 | } |
1055 | 1015 | ||
1056 | /* close down protections for IPI operations */ | 1016 | /* close down protections for IPI operations */ |
1057 | xpc_restrict_IPI_ops(); | 1017 | xpc_restrict_IPI_ops(); |
1058 | 1018 | ||
1059 | |||
1060 | /* clear the interface to XPC's functions */ | 1019 | /* clear the interface to XPC's functions */ |
1061 | xpc_clear_interface(); | 1020 | xpc_clear_interface(); |
1062 | 1021 | ||
@@ -1067,7 +1026,6 @@ xpc_do_exit(enum xpc_retval reason) | |||
1067 | kfree(xpc_remote_copy_buffer_base); | 1026 | kfree(xpc_remote_copy_buffer_base); |
1068 | } | 1027 | } |
1069 | 1028 | ||
1070 | |||
1071 | /* | 1029 | /* |
1072 | * This function is called when the system is being rebooted. | 1030 | * This function is called when the system is being rebooted. |
1073 | */ | 1031 | */ |
@@ -1076,7 +1034,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | |||
1076 | { | 1034 | { |
1077 | enum xpc_retval reason; | 1035 | enum xpc_retval reason; |
1078 | 1036 | ||
1079 | |||
1080 | switch (event) { | 1037 | switch (event) { |
1081 | case SYS_RESTART: | 1038 | case SYS_RESTART: |
1082 | reason = xpcSystemReboot; | 1039 | reason = xpcSystemReboot; |
@@ -1095,7 +1052,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | |||
1095 | return NOTIFY_DONE; | 1052 | return NOTIFY_DONE; |
1096 | } | 1053 | } |
1097 | 1054 | ||
1098 | |||
1099 | /* | 1055 | /* |
1100 | * Notify other partitions to disengage from all references to our memory. | 1056 | * Notify other partitions to disengage from all references to our memory. |
1101 | */ | 1057 | */ |
@@ -1107,17 +1063,15 @@ xpc_die_disengage(void) | |||
1107 | unsigned long engaged; | 1063 | unsigned long engaged; |
1108 | long time, printmsg_time, disengage_request_timeout; | 1064 | long time, printmsg_time, disengage_request_timeout; |
1109 | 1065 | ||
1110 | |||
1111 | /* keep xpc_hb_checker thread from doing anything (just in case) */ | 1066 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1112 | xpc_exiting = 1; | 1067 | xpc_exiting = 1; |
1113 | 1068 | ||
1114 | xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ | 1069 | xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ |
1115 | 1070 | ||
1116 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1071 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
1117 | part = &xpc_partitions[partid]; | 1072 | part = &xpc_partitions[partid]; |
1118 | 1073 | ||
1119 | if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> | 1074 | if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { |
1120 | remote_vars_version)) { | ||
1121 | 1075 | ||
1122 | /* just in case it was left set by an earlier XPC */ | 1076 | /* just in case it was left set by an earlier XPC */ |
1123 | xpc_clear_partition_engaged(1UL << partid); | 1077 | xpc_clear_partition_engaged(1UL << partid); |
@@ -1125,7 +1079,7 @@ xpc_die_disengage(void) | |||
1125 | } | 1079 | } |
1126 | 1080 | ||
1127 | if (xpc_partition_engaged(1UL << partid) || | 1081 | if (xpc_partition_engaged(1UL << partid) || |
1128 | part->act_state != XPC_P_INACTIVE) { | 1082 | part->act_state != XPC_P_INACTIVE) { |
1129 | xpc_request_partition_disengage(part); | 1083 | xpc_request_partition_disengage(part); |
1130 | xpc_mark_partition_disengaged(part); | 1084 | xpc_mark_partition_disengaged(part); |
1131 | xpc_IPI_send_disengage(part); | 1085 | xpc_IPI_send_disengage(part); |
@@ -1134,9 +1088,9 @@ xpc_die_disengage(void) | |||
1134 | 1088 | ||
1135 | time = rtc_time(); | 1089 | time = rtc_time(); |
1136 | printmsg_time = time + | 1090 | printmsg_time = time + |
1137 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); | 1091 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); |
1138 | disengage_request_timeout = time + | 1092 | disengage_request_timeout = time + |
1139 | (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); | 1093 | (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); |
1140 | 1094 | ||
1141 | /* wait for all other partitions to disengage from us */ | 1095 | /* wait for all other partitions to disengage from us */ |
1142 | 1096 | ||
@@ -1152,8 +1106,8 @@ xpc_die_disengage(void) | |||
1152 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1106 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
1153 | if (engaged & (1UL << partid)) { | 1107 | if (engaged & (1UL << partid)) { |
1154 | dev_info(xpc_part, "disengage from " | 1108 | dev_info(xpc_part, "disengage from " |
1155 | "remote partition %d timed " | 1109 | "remote partition %d timed " |
1156 | "out\n", partid); | 1110 | "out\n", partid); |
1157 | } | 1111 | } |
1158 | } | 1112 | } |
1159 | break; | 1113 | break; |
@@ -1161,17 +1115,16 @@ xpc_die_disengage(void) | |||
1161 | 1115 | ||
1162 | if (time >= printmsg_time) { | 1116 | if (time >= printmsg_time) { |
1163 | dev_info(xpc_part, "waiting for remote partitions to " | 1117 | dev_info(xpc_part, "waiting for remote partitions to " |
1164 | "disengage, timeout in %ld seconds\n", | 1118 | "disengage, timeout in %ld seconds\n", |
1165 | (disengage_request_timeout - time) / | 1119 | (disengage_request_timeout - time) / |
1166 | sn_rtc_cycles_per_second); | 1120 | sn_rtc_cycles_per_second); |
1167 | printmsg_time = time + | 1121 | printmsg_time = time + |
1168 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * | 1122 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * |
1169 | sn_rtc_cycles_per_second); | 1123 | sn_rtc_cycles_per_second); |
1170 | } | 1124 | } |
1171 | } | 1125 | } |
1172 | } | 1126 | } |
1173 | 1127 | ||
1174 | |||
1175 | /* | 1128 | /* |
1176 | * This function is called when the system is being restarted or halted due | 1129 | * This function is called when the system is being restarted or halted due |
1177 | * to some sort of system failure. If this is the case we need to notify the | 1130 | * to some sort of system failure. If this is the case we need to notify the |
@@ -1217,7 +1170,6 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1217 | return NOTIFY_DONE; | 1170 | return NOTIFY_DONE; |
1218 | } | 1171 | } |
1219 | 1172 | ||
1220 | |||
1221 | int __init | 1173 | int __init |
1222 | xpc_init(void) | 1174 | xpc_init(void) |
1223 | { | 1175 | { |
@@ -1227,16 +1179,15 @@ xpc_init(void) | |||
1227 | pid_t pid; | 1179 | pid_t pid; |
1228 | size_t buf_size; | 1180 | size_t buf_size; |
1229 | 1181 | ||
1230 | |||
1231 | if (!ia64_platform_is("sn2")) { | 1182 | if (!ia64_platform_is("sn2")) { |
1232 | return -ENODEV; | 1183 | return -ENODEV; |
1233 | } | 1184 | } |
1234 | 1185 | ||
1235 | |||
1236 | buf_size = max(XPC_RP_VARS_SIZE, | 1186 | buf_size = max(XPC_RP_VARS_SIZE, |
1237 | XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); | 1187 | XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); |
1238 | xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, | 1188 | xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, |
1239 | GFP_KERNEL, &xpc_remote_copy_buffer_base); | 1189 | GFP_KERNEL, |
1190 | &xpc_remote_copy_buffer_base); | ||
1240 | if (xpc_remote_copy_buffer == NULL) | 1191 | if (xpc_remote_copy_buffer == NULL) |
1241 | return -ENOMEM; | 1192 | return -ENOMEM; |
1242 | 1193 | ||
@@ -1256,7 +1207,7 @@ xpc_init(void) | |||
1256 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1207 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
1257 | part = &xpc_partitions[partid]; | 1208 | part = &xpc_partitions[partid]; |
1258 | 1209 | ||
1259 | DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); | 1210 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); |
1260 | 1211 | ||
1261 | part->act_IRQ_rcvd = 0; | 1212 | part->act_IRQ_rcvd = 0; |
1262 | spin_lock_init(&part->act_lock); | 1213 | spin_lock_init(&part->act_lock); |
@@ -1265,8 +1216,8 @@ xpc_init(void) | |||
1265 | 1216 | ||
1266 | init_timer(&part->disengage_request_timer); | 1217 | init_timer(&part->disengage_request_timer); |
1267 | part->disengage_request_timer.function = | 1218 | part->disengage_request_timer.function = |
1268 | xpc_timeout_partition_disengage_request; | 1219 | xpc_timeout_partition_disengage_request; |
1269 | part->disengage_request_timer.data = (unsigned long) part; | 1220 | part->disengage_request_timer.data = (unsigned long)part; |
1270 | 1221 | ||
1271 | part->setup_state = XPC_P_UNSET; | 1222 | part->setup_state = XPC_P_UNSET; |
1272 | init_waitqueue_head(&part->teardown_wq); | 1223 | init_waitqueue_head(&part->teardown_wq); |
@@ -1292,7 +1243,7 @@ xpc_init(void) | |||
1292 | * but rather immediately process the interrupt. | 1243 | * but rather immediately process the interrupt. |
1293 | */ | 1244 | */ |
1294 | ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, | 1245 | ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, |
1295 | "xpc hb", NULL); | 1246 | "xpc hb", NULL); |
1296 | if (ret != 0) { | 1247 | if (ret != 0) { |
1297 | dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " | 1248 | dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " |
1298 | "errno=%d\n", -ret); | 1249 | "errno=%d\n", -ret); |
@@ -1327,7 +1278,6 @@ xpc_init(void) | |||
1327 | return -EBUSY; | 1278 | return -EBUSY; |
1328 | } | 1279 | } |
1329 | 1280 | ||
1330 | |||
1331 | /* add ourselves to the reboot_notifier_list */ | 1281 | /* add ourselves to the reboot_notifier_list */ |
1332 | ret = register_reboot_notifier(&xpc_reboot_notifier); | 1282 | ret = register_reboot_notifier(&xpc_reboot_notifier); |
1333 | if (ret != 0) { | 1283 | if (ret != 0) { |
@@ -1355,10 +1305,10 @@ xpc_init(void) | |||
1355 | xpc_rsvd_page->vars_pa = 0; | 1305 | xpc_rsvd_page->vars_pa = 0; |
1356 | 1306 | ||
1357 | /* take ourselves off of the reboot_notifier_list */ | 1307 | /* take ourselves off of the reboot_notifier_list */ |
1358 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | 1308 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
1359 | 1309 | ||
1360 | /* take ourselves off of the die_notifier list */ | 1310 | /* take ourselves off of the die_notifier list */ |
1361 | (void) unregister_die_notifier(&xpc_die_notifier); | 1311 | (void)unregister_die_notifier(&xpc_die_notifier); |
1362 | 1312 | ||
1363 | del_timer_sync(&xpc_hb_timer); | 1313 | del_timer_sync(&xpc_hb_timer); |
1364 | free_irq(SGI_XPC_ACTIVATE, NULL); | 1314 | free_irq(SGI_XPC_ACTIVATE, NULL); |
@@ -1372,7 +1322,6 @@ xpc_init(void) | |||
1372 | return -EBUSY; | 1322 | return -EBUSY; |
1373 | } | 1323 | } |
1374 | 1324 | ||
1375 | |||
1376 | /* | 1325 | /* |
1377 | * Startup a thread that will attempt to discover other partitions to | 1326 | * Startup a thread that will attempt to discover other partitions to |
1378 | * activate based on info provided by SAL. This new thread is short | 1327 | * activate based on info provided by SAL. This new thread is short |
@@ -1389,7 +1338,6 @@ xpc_init(void) | |||
1389 | return -EBUSY; | 1338 | return -EBUSY; |
1390 | } | 1339 | } |
1391 | 1340 | ||
1392 | |||
1393 | /* set the interface to point at XPC's functions */ | 1341 | /* set the interface to point at XPC's functions */ |
1394 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, | 1342 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, |
1395 | xpc_initiate_allocate, xpc_initiate_send, | 1343 | xpc_initiate_allocate, xpc_initiate_send, |
@@ -1398,16 +1346,16 @@ xpc_init(void) | |||
1398 | 1346 | ||
1399 | return 0; | 1347 | return 0; |
1400 | } | 1348 | } |
1401 | module_init(xpc_init); | ||
1402 | 1349 | ||
1350 | module_init(xpc_init); | ||
1403 | 1351 | ||
1404 | void __exit | 1352 | void __exit |
1405 | xpc_exit(void) | 1353 | xpc_exit(void) |
1406 | { | 1354 | { |
1407 | xpc_do_exit(xpcUnloading); | 1355 | xpc_do_exit(xpcUnloading); |
1408 | } | 1356 | } |
1409 | module_exit(xpc_exit); | ||
1410 | 1357 | ||
1358 | module_exit(xpc_exit); | ||
1411 | 1359 | ||
1412 | MODULE_AUTHOR("Silicon Graphics, Inc."); | 1360 | MODULE_AUTHOR("Silicon Graphics, Inc."); |
1413 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); | 1361 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); |
@@ -1415,17 +1363,16 @@ MODULE_LICENSE("GPL"); | |||
1415 | 1363 | ||
1416 | module_param(xpc_hb_interval, int, 0); | 1364 | module_param(xpc_hb_interval, int, 0); |
1417 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " | 1365 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " |
1418 | "heartbeat increments."); | 1366 | "heartbeat increments."); |
1419 | 1367 | ||
1420 | module_param(xpc_hb_check_interval, int, 0); | 1368 | module_param(xpc_hb_check_interval, int, 0); |
1421 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | 1369 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " |
1422 | "heartbeat checks."); | 1370 | "heartbeat checks."); |
1423 | 1371 | ||
1424 | module_param(xpc_disengage_request_timelimit, int, 0); | 1372 | module_param(xpc_disengage_request_timelimit, int, 0); |
1425 | MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " | 1373 | MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " |
1426 | "for disengage request to complete."); | 1374 | "for disengage request to complete."); |
1427 | 1375 | ||
1428 | module_param(xpc_kdebug_ignore, int, 0); | 1376 | module_param(xpc_kdebug_ignore, int, 0); |
1429 | MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " | 1377 | MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " |
1430 | "other partitions when dropping into kdebug."); | 1378 | "other partitions when dropping into kdebug."); |
1431 | |||