diff options
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 167 |
1 files changed, 56 insertions, 111 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index d81a2dd787ac..f673ba90eb0e 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -46,17 +46,16 @@ | |||
46 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
47 | #include <linux/module.h> | 47 | #include <linux/module.h> |
48 | #include <linux/init.h> | 48 | #include <linux/init.h> |
49 | #include <linux/sched.h> | ||
50 | #include <linux/syscalls.h> | ||
51 | #include <linux/cache.h> | 49 | #include <linux/cache.h> |
52 | #include <linux/interrupt.h> | 50 | #include <linux/interrupt.h> |
53 | #include <linux/delay.h> | 51 | #include <linux/delay.h> |
54 | #include <linux/reboot.h> | 52 | #include <linux/reboot.h> |
55 | #include <linux/completion.h> | 53 | #include <linux/completion.h> |
56 | #include <linux/kdebug.h> | 54 | #include <linux/kdebug.h> |
55 | #include <linux/kthread.h> | ||
56 | #include <linux/uaccess.h> | ||
57 | #include <asm/sn/intr.h> | 57 | #include <asm/sn/intr.h> |
58 | #include <asm/sn/sn_sal.h> | 58 | #include <asm/sn/sn_sal.h> |
59 | #include <asm/uaccess.h> | ||
60 | #include "xpc.h" | 59 | #include "xpc.h" |
61 | 60 | ||
62 | /* define two XPC debug device structures to be used with dev_dbg() et al */ | 61 | /* define two XPC debug device structures to be used with dev_dbg() et al */ |
@@ -91,7 +90,7 @@ static int xpc_hb_check_min_interval = 10; | |||
91 | static int xpc_hb_check_max_interval = 120; | 90 | static int xpc_hb_check_max_interval = 120; |
92 | 91 | ||
93 | int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; | 92 | int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; |
94 | static int xpc_disengage_request_min_timelimit = 0; | 93 | static int xpc_disengage_request_min_timelimit; /* = 0 */ |
95 | static int xpc_disengage_request_max_timelimit = 120; | 94 | static int xpc_disengage_request_max_timelimit = 120; |
96 | 95 | ||
97 | static ctl_table xpc_sys_xpc_hb_dir[] = { | 96 | static ctl_table xpc_sys_xpc_hb_dir[] = { |
@@ -213,9 +212,8 @@ xpc_hb_beater(unsigned long dummy) | |||
213 | { | 212 | { |
214 | xpc_vars->heartbeat++; | 213 | xpc_vars->heartbeat++; |
215 | 214 | ||
216 | if (time_after_eq(jiffies, xpc_hb_check_timeout)) { | 215 | if (time_after_eq(jiffies, xpc_hb_check_timeout)) |
217 | wake_up_interruptible(&xpc_act_IRQ_wq); | 216 | wake_up_interruptible(&xpc_act_IRQ_wq); |
218 | } | ||
219 | 217 | ||
220 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); | 218 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); |
221 | add_timer(&xpc_hb_timer); | 219 | add_timer(&xpc_hb_timer); |
@@ -234,15 +232,13 @@ xpc_hb_checker(void *ignore) | |||
234 | 232 | ||
235 | /* this thread was marked active by xpc_hb_init() */ | 233 | /* this thread was marked active by xpc_hb_init() */ |
236 | 234 | ||
237 | daemonize(XPC_HB_CHECK_THREAD_NAME); | ||
238 | |||
239 | set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); | 235 | set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); |
240 | 236 | ||
241 | /* set our heartbeating to other partitions into motion */ | 237 | /* set our heartbeating to other partitions into motion */ |
242 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 238 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
243 | xpc_hb_beater(0); | 239 | xpc_hb_beater(0); |
244 | 240 | ||
245 | while (!(volatile int)xpc_exiting) { | 241 | while (!xpc_exiting) { |
246 | 242 | ||
247 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " | 243 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
248 | "been received\n", | 244 | "been received\n", |
@@ -287,7 +283,7 @@ xpc_hb_checker(void *ignore) | |||
287 | atomic_read(&xpc_act_IRQ_rcvd) | 283 | atomic_read(&xpc_act_IRQ_rcvd) |
288 | || time_after_eq(jiffies, | 284 | || time_after_eq(jiffies, |
289 | xpc_hb_check_timeout) || | 285 | xpc_hb_check_timeout) || |
290 | (volatile int)xpc_exiting)); | 286 | xpc_exiting)); |
291 | } | 287 | } |
292 | 288 | ||
293 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); | 289 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
@@ -305,8 +301,6 @@ xpc_hb_checker(void *ignore) | |||
305 | static int | 301 | static int |
306 | xpc_initiate_discovery(void *ignore) | 302 | xpc_initiate_discovery(void *ignore) |
307 | { | 303 | { |
308 | daemonize(XPC_DISCOVERY_THREAD_NAME); | ||
309 | |||
310 | xpc_discovery(); | 304 | xpc_discovery(); |
311 | 305 | ||
312 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | 306 | dev_dbg(xpc_part, "discovery thread is exiting\n"); |
@@ -338,9 +332,8 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
338 | /* wait a 1/4 of a second or so */ | 332 | /* wait a 1/4 of a second or so */ |
339 | (void)msleep_interruptible(250); | 333 | (void)msleep_interruptible(250); |
340 | 334 | ||
341 | if (part->act_state == XPC_P_DEACTIVATING) { | 335 | if (part->act_state == XPC_P_DEACTIVATING) |
342 | return part->reason; | 336 | return part->reason; |
343 | } | ||
344 | } | 337 | } |
345 | 338 | ||
346 | return xpc_mark_partition_active(part); | 339 | return xpc_mark_partition_active(part); |
@@ -382,22 +375,12 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
382 | */ | 375 | */ |
383 | atomic_dec(&part->channel_mgr_requests); | 376 | atomic_dec(&part->channel_mgr_requests); |
384 | (void)wait_event_interruptible(part->channel_mgr_wq, | 377 | (void)wait_event_interruptible(part->channel_mgr_wq, |
385 | (atomic_read | 378 | (atomic_read(&part->channel_mgr_requests) > 0 || |
386 | (&part->channel_mgr_requests) > | 379 | part->local_IPI_amo != 0 || |
387 | 0 || | 380 | (part->act_state == XPC_P_DEACTIVATING && |
388 | (volatile u64)part-> | 381 | atomic_read(&part->nchannels_active) == 0 && |
389 | local_IPI_amo != 0 || | 382 | xpc_partition_disengaged(part)))); |
390 | ((volatile u8)part->act_state == | ||
391 | XPC_P_DEACTIVATING && | ||
392 | atomic_read(&part-> | ||
393 | nchannels_active) | ||
394 | == 0 && | ||
395 | xpc_partition_disengaged | ||
396 | (part)))); | ||
397 | atomic_set(&part->channel_mgr_requests, 1); | 383 | atomic_set(&part->channel_mgr_requests, 1); |
398 | |||
399 | // >>> Does it need to wakeup periodically as well? In case we | ||
400 | // >>> miscalculated the #of kthreads to wakeup or create? | ||
401 | } | 384 | } |
402 | } | 385 | } |
403 | 386 | ||
@@ -423,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part) | |||
423 | 406 | ||
424 | dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); | 407 | dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); |
425 | 408 | ||
426 | if (xpc_setup_infrastructure(part) != xpcSuccess) { | 409 | if (xpc_setup_infrastructure(part) != xpcSuccess) |
427 | return; | 410 | return; |
428 | } | ||
429 | 411 | ||
430 | /* | 412 | /* |
431 | * The kthread that XPC HB called us with will become the | 413 | * The kthread that XPC HB called us with will become the |
@@ -436,9 +418,8 @@ xpc_partition_up(struct xpc_partition *part) | |||
436 | 418 | ||
437 | (void)xpc_part_ref(part); /* this will always succeed */ | 419 | (void)xpc_part_ref(part); /* this will always succeed */ |
438 | 420 | ||
439 | if (xpc_make_first_contact(part) == xpcSuccess) { | 421 | if (xpc_make_first_contact(part) == xpcSuccess) |
440 | xpc_channel_mgr(part); | 422 | xpc_channel_mgr(part); |
441 | } | ||
442 | 423 | ||
443 | xpc_part_deref(part); | 424 | xpc_part_deref(part); |
444 | 425 | ||
@@ -451,8 +432,6 @@ xpc_activating(void *__partid) | |||
451 | partid_t partid = (u64)__partid; | 432 | partid_t partid = (u64)__partid; |
452 | struct xpc_partition *part = &xpc_partitions[partid]; | 433 | struct xpc_partition *part = &xpc_partitions[partid]; |
453 | unsigned long irq_flags; | 434 | unsigned long irq_flags; |
454 | struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 }; | ||
455 | int ret; | ||
456 | 435 | ||
457 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 436 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |
458 | 437 | ||
@@ -474,21 +453,6 @@ xpc_activating(void *__partid) | |||
474 | 453 | ||
475 | dev_dbg(xpc_part, "bringing partition %d up\n", partid); | 454 | dev_dbg(xpc_part, "bringing partition %d up\n", partid); |
476 | 455 | ||
477 | daemonize("xpc%02d", partid); | ||
478 | |||
479 | /* | ||
480 | * This thread needs to run at a realtime priority to prevent a | ||
481 | * significant performance degradation. | ||
482 | */ | ||
483 | ret = sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
484 | if (ret != 0) { | ||
485 | dev_warn(xpc_part, "unable to set pid %d to a realtime " | ||
486 | "priority, ret=%d\n", current->pid, ret); | ||
487 | } | ||
488 | |||
489 | /* allow this thread and its children to run on any CPU */ | ||
490 | set_cpus_allowed(current, CPU_MASK_ALL); | ||
491 | |||
492 | /* | 456 | /* |
493 | * Register the remote partition's AMOs with SAL so it can handle | 457 | * Register the remote partition's AMOs with SAL so it can handle |
494 | * and cleanup errors within that address range should the remote | 458 | * and cleanup errors within that address range should the remote |
@@ -537,7 +501,7 @@ xpc_activate_partition(struct xpc_partition *part) | |||
537 | { | 501 | { |
538 | partid_t partid = XPC_PARTID(part); | 502 | partid_t partid = XPC_PARTID(part); |
539 | unsigned long irq_flags; | 503 | unsigned long irq_flags; |
540 | pid_t pid; | 504 | struct task_struct *kthread; |
541 | 505 | ||
542 | spin_lock_irqsave(&part->act_lock, irq_flags); | 506 | spin_lock_irqsave(&part->act_lock, irq_flags); |
543 | 507 | ||
@@ -548,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part) | |||
548 | 512 | ||
549 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 513 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
550 | 514 | ||
551 | pid = kernel_thread(xpc_activating, (void *)((u64)partid), 0); | 515 | kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", |
552 | 516 | partid); | |
553 | if (unlikely(pid <= 0)) { | 517 | if (IS_ERR(kthread)) { |
554 | spin_lock_irqsave(&part->act_lock, irq_flags); | 518 | spin_lock_irqsave(&part->act_lock, irq_flags); |
555 | part->act_state = XPC_P_INACTIVE; | 519 | part->act_state = XPC_P_INACTIVE; |
556 | XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); | 520 | XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); |
@@ -562,7 +526,7 @@ xpc_activate_partition(struct xpc_partition *part) | |||
562 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified | 526 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified |
563 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more | 527 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more |
564 | * than one partition, we use an AMO_t structure per partition to indicate | 528 | * than one partition, we use an AMO_t structure per partition to indicate |
565 | * whether a partition has sent an IPI or not. >>> If it has, then wake up the | 529 | * whether a partition has sent an IPI or not. If it has, then wake up the |
566 | * associated kthread to handle it. | 530 | * associated kthread to handle it. |
567 | * | 531 | * |
568 | * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC | 532 | * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC |
@@ -628,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |||
628 | wake_up_nr(&ch->idle_wq, wakeup); | 592 | wake_up_nr(&ch->idle_wq, wakeup); |
629 | } | 593 | } |
630 | 594 | ||
631 | if (needed <= 0) { | 595 | if (needed <= 0) |
632 | return; | 596 | return; |
633 | } | ||
634 | 597 | ||
635 | if (needed + assigned > ch->kthreads_assigned_limit) { | 598 | if (needed + assigned > ch->kthreads_assigned_limit) { |
636 | needed = ch->kthreads_assigned_limit - assigned; | 599 | needed = ch->kthreads_assigned_limit - assigned; |
637 | // >>>should never be less than 0 | 600 | if (needed <= 0) |
638 | if (needed <= 0) { | ||
639 | return; | 601 | return; |
640 | } | ||
641 | } | 602 | } |
642 | 603 | ||
643 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", | 604 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", |
@@ -655,9 +616,8 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |||
655 | do { | 616 | do { |
656 | /* deliver messages to their intended recipients */ | 617 | /* deliver messages to their intended recipients */ |
657 | 618 | ||
658 | while ((volatile s64)ch->w_local_GP.get < | 619 | while (ch->w_local_GP.get < ch->w_remote_GP.put && |
659 | (volatile s64)ch->w_remote_GP.put && | 620 | !(ch->flags & XPC_C_DISCONNECTING)) { |
660 | !((volatile u32)ch->flags & XPC_C_DISCONNECTING)) { | ||
661 | xpc_deliver_msg(ch); | 621 | xpc_deliver_msg(ch); |
662 | } | 622 | } |
663 | 623 | ||
@@ -672,21 +632,16 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |||
672 | "wait_event_interruptible_exclusive()\n"); | 632 | "wait_event_interruptible_exclusive()\n"); |
673 | 633 | ||
674 | (void)wait_event_interruptible_exclusive(ch->idle_wq, | 634 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
675 | ((volatile s64)ch-> | 635 | (ch->w_local_GP.get < ch->w_remote_GP.put || |
676 | w_local_GP.get < | 636 | (ch->flags & XPC_C_DISCONNECTING))); |
677 | (volatile s64)ch-> | ||
678 | w_remote_GP.put || | ||
679 | ((volatile u32)ch-> | ||
680 | flags & | ||
681 | XPC_C_DISCONNECTING))); | ||
682 | 637 | ||
683 | atomic_dec(&ch->kthreads_idle); | 638 | atomic_dec(&ch->kthreads_idle); |
684 | 639 | ||
685 | } while (!((volatile u32)ch->flags & XPC_C_DISCONNECTING)); | 640 | } while (!(ch->flags & XPC_C_DISCONNECTING)); |
686 | } | 641 | } |
687 | 642 | ||
688 | static int | 643 | static int |
689 | xpc_daemonize_kthread(void *args) | 644 | xpc_kthread_start(void *args) |
690 | { | 645 | { |
691 | partid_t partid = XPC_UNPACK_ARG1(args); | 646 | partid_t partid = XPC_UNPACK_ARG1(args); |
692 | u16 ch_number = XPC_UNPACK_ARG2(args); | 647 | u16 ch_number = XPC_UNPACK_ARG2(args); |
@@ -695,8 +650,6 @@ xpc_daemonize_kthread(void *args) | |||
695 | int n_needed; | 650 | int n_needed; |
696 | unsigned long irq_flags; | 651 | unsigned long irq_flags; |
697 | 652 | ||
698 | daemonize("xpc%02dc%d", partid, ch_number); | ||
699 | |||
700 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", | 653 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
701 | partid, ch_number); | 654 | partid, ch_number); |
702 | 655 | ||
@@ -725,9 +678,9 @@ xpc_daemonize_kthread(void *args) | |||
725 | * need one less than total #of messages to deliver. | 678 | * need one less than total #of messages to deliver. |
726 | */ | 679 | */ |
727 | n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; | 680 | n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; |
728 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) { | 681 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
729 | xpc_activate_kthreads(ch, n_needed); | 682 | xpc_activate_kthreads(ch, n_needed); |
730 | } | 683 | |
731 | } else { | 684 | } else { |
732 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 685 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
733 | } | 686 | } |
@@ -783,9 +736,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
783 | int ignore_disconnecting) | 736 | int ignore_disconnecting) |
784 | { | 737 | { |
785 | unsigned long irq_flags; | 738 | unsigned long irq_flags; |
786 | pid_t pid; | ||
787 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | 739 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
788 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 740 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
741 | struct task_struct *kthread; | ||
789 | 742 | ||
790 | while (needed-- > 0) { | 743 | while (needed-- > 0) { |
791 | 744 | ||
@@ -812,8 +765,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
812 | (void)xpc_part_ref(part); | 765 | (void)xpc_part_ref(part); |
813 | xpc_msgqueue_ref(ch); | 766 | xpc_msgqueue_ref(ch); |
814 | 767 | ||
815 | pid = kernel_thread(xpc_daemonize_kthread, (void *)args, 0); | 768 | kthread = kthread_run(xpc_kthread_start, (void *)args, |
816 | if (pid < 0) { | 769 | "xpc%02dc%d", ch->partid, ch->number); |
770 | if (IS_ERR(kthread)) { | ||
817 | /* the fork failed */ | 771 | /* the fork failed */ |
818 | 772 | ||
819 | /* | 773 | /* |
@@ -823,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
823 | * to this channel are blocked in the channel's | 777 | * to this channel are blocked in the channel's |
824 | * registerer, because the only thing that will unblock | 778 | * registerer, because the only thing that will unblock |
825 | * them is the xpcDisconnecting callout that this | 779 | * them is the xpcDisconnecting callout that this |
826 | * failed kernel_thread would have made. | 780 | * failed kthread_run() would have made. |
827 | */ | 781 | */ |
828 | 782 | ||
829 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 783 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
@@ -848,8 +802,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
848 | } | 802 | } |
849 | break; | 803 | break; |
850 | } | 804 | } |
851 | |||
852 | ch->kthreads_created++; // >>> temporary debug only!!! | ||
853 | } | 805 | } |
854 | } | 806 | } |
855 | 807 | ||
@@ -866,9 +818,8 @@ xpc_disconnect_wait(int ch_number) | |||
866 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 818 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
867 | part = &xpc_partitions[partid]; | 819 | part = &xpc_partitions[partid]; |
868 | 820 | ||
869 | if (!xpc_part_ref(part)) { | 821 | if (!xpc_part_ref(part)) |
870 | continue; | 822 | continue; |
871 | } | ||
872 | 823 | ||
873 | ch = &part->channels[ch_number]; | 824 | ch = &part->channels[ch_number]; |
874 | 825 | ||
@@ -898,9 +849,8 @@ xpc_disconnect_wait(int ch_number) | |||
898 | ch->flags &= ~XPC_C_WDISCONNECT; | 849 | ch->flags &= ~XPC_C_WDISCONNECT; |
899 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 850 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
900 | 851 | ||
901 | if (wakeup_channel_mgr) { | 852 | if (wakeup_channel_mgr) |
902 | xpc_wakeup_channel_mgr(part); | 853 | xpc_wakeup_channel_mgr(part); |
903 | } | ||
904 | 854 | ||
905 | xpc_part_deref(part); | 855 | xpc_part_deref(part); |
906 | } | 856 | } |
@@ -1019,9 +969,8 @@ xpc_do_exit(enum xpc_retval reason) | |||
1019 | /* clear the interface to XPC's functions */ | 969 | /* clear the interface to XPC's functions */ |
1020 | xpc_clear_interface(); | 970 | xpc_clear_interface(); |
1021 | 971 | ||
1022 | if (xpc_sysctl) { | 972 | if (xpc_sysctl) |
1023 | unregister_sysctl_table(xpc_sysctl); | 973 | unregister_sysctl_table(xpc_sysctl); |
1024 | } | ||
1025 | 974 | ||
1026 | kfree(xpc_remote_copy_buffer_base); | 975 | kfree(xpc_remote_copy_buffer_base); |
1027 | } | 976 | } |
@@ -1071,7 +1020,8 @@ xpc_die_disengage(void) | |||
1071 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 1020 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
1072 | part = &xpc_partitions[partid]; | 1021 | part = &xpc_partitions[partid]; |
1073 | 1022 | ||
1074 | if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { | 1023 | if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> |
1024 | remote_vars_version)) { | ||
1075 | 1025 | ||
1076 | /* just in case it was left set by an earlier XPC */ | 1026 | /* just in case it was left set by an earlier XPC */ |
1077 | xpc_clear_partition_engaged(1UL << partid); | 1027 | xpc_clear_partition_engaged(1UL << partid); |
@@ -1144,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1144 | 1094 | ||
1145 | case DIE_KDEBUG_ENTER: | 1095 | case DIE_KDEBUG_ENTER: |
1146 | /* Should lack of heartbeat be ignored by other partitions? */ | 1096 | /* Should lack of heartbeat be ignored by other partitions? */ |
1147 | if (!xpc_kdebug_ignore) { | 1097 | if (!xpc_kdebug_ignore) |
1148 | break; | 1098 | break; |
1149 | } | 1099 | |
1150 | /* fall through */ | 1100 | /* fall through */ |
1151 | case DIE_MCA_MONARCH_ENTER: | 1101 | case DIE_MCA_MONARCH_ENTER: |
1152 | case DIE_INIT_MONARCH_ENTER: | 1102 | case DIE_INIT_MONARCH_ENTER: |
@@ -1156,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) | |||
1156 | 1106 | ||
1157 | case DIE_KDEBUG_LEAVE: | 1107 | case DIE_KDEBUG_LEAVE: |
1158 | /* Is lack of heartbeat being ignored by other partitions? */ | 1108 | /* Is lack of heartbeat being ignored by other partitions? */ |
1159 | if (!xpc_kdebug_ignore) { | 1109 | if (!xpc_kdebug_ignore) |
1160 | break; | 1110 | break; |
1161 | } | 1111 | |
1162 | /* fall through */ | 1112 | /* fall through */ |
1163 | case DIE_MCA_MONARCH_LEAVE: | 1113 | case DIE_MCA_MONARCH_LEAVE: |
1164 | case DIE_INIT_MONARCH_LEAVE: | 1114 | case DIE_INIT_MONARCH_LEAVE: |
@@ -1176,18 +1126,17 @@ xpc_init(void) | |||
1176 | int ret; | 1126 | int ret; |
1177 | partid_t partid; | 1127 | partid_t partid; |
1178 | struct xpc_partition *part; | 1128 | struct xpc_partition *part; |
1179 | pid_t pid; | 1129 | struct task_struct *kthread; |
1180 | size_t buf_size; | 1130 | size_t buf_size; |
1181 | 1131 | ||
1182 | if (!ia64_platform_is("sn2")) { | 1132 | if (!ia64_platform_is("sn2")) |
1183 | return -ENODEV; | 1133 | return -ENODEV; |
1184 | } | ||
1185 | 1134 | ||
1186 | buf_size = max(XPC_RP_VARS_SIZE, | 1135 | buf_size = max(XPC_RP_VARS_SIZE, |
1187 | XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); | 1136 | XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); |
1188 | xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, | 1137 | xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, |
1189 | GFP_KERNEL, | 1138 | GFP_KERNEL, |
1190 | &xpc_remote_copy_buffer_base); | 1139 | &xpc_remote_copy_buffer_base); |
1191 | if (xpc_remote_copy_buffer == NULL) | 1140 | if (xpc_remote_copy_buffer == NULL) |
1192 | return -ENOMEM; | 1141 | return -ENOMEM; |
1193 | 1142 | ||
@@ -1250,9 +1199,8 @@ xpc_init(void) | |||
1250 | 1199 | ||
1251 | xpc_restrict_IPI_ops(); | 1200 | xpc_restrict_IPI_ops(); |
1252 | 1201 | ||
1253 | if (xpc_sysctl) { | 1202 | if (xpc_sysctl) |
1254 | unregister_sysctl_table(xpc_sysctl); | 1203 | unregister_sysctl_table(xpc_sysctl); |
1255 | } | ||
1256 | 1204 | ||
1257 | kfree(xpc_remote_copy_buffer_base); | 1205 | kfree(xpc_remote_copy_buffer_base); |
1258 | return -EBUSY; | 1206 | return -EBUSY; |
@@ -1270,9 +1218,8 @@ xpc_init(void) | |||
1270 | free_irq(SGI_XPC_ACTIVATE, NULL); | 1218 | free_irq(SGI_XPC_ACTIVATE, NULL); |
1271 | xpc_restrict_IPI_ops(); | 1219 | xpc_restrict_IPI_ops(); |
1272 | 1220 | ||
1273 | if (xpc_sysctl) { | 1221 | if (xpc_sysctl) |
1274 | unregister_sysctl_table(xpc_sysctl); | 1222 | unregister_sysctl_table(xpc_sysctl); |
1275 | } | ||
1276 | 1223 | ||
1277 | kfree(xpc_remote_copy_buffer_base); | 1224 | kfree(xpc_remote_copy_buffer_base); |
1278 | return -EBUSY; | 1225 | return -EBUSY; |
@@ -1280,15 +1227,13 @@ xpc_init(void) | |||
1280 | 1227 | ||
1281 | /* add ourselves to the reboot_notifier_list */ | 1228 | /* add ourselves to the reboot_notifier_list */ |
1282 | ret = register_reboot_notifier(&xpc_reboot_notifier); | 1229 | ret = register_reboot_notifier(&xpc_reboot_notifier); |
1283 | if (ret != 0) { | 1230 | if (ret != 0) |
1284 | dev_warn(xpc_part, "can't register reboot notifier\n"); | 1231 | dev_warn(xpc_part, "can't register reboot notifier\n"); |
1285 | } | ||
1286 | 1232 | ||
1287 | /* add ourselves to the die_notifier list */ | 1233 | /* add ourselves to the die_notifier list */ |
1288 | ret = register_die_notifier(&xpc_die_notifier); | 1234 | ret = register_die_notifier(&xpc_die_notifier); |
1289 | if (ret != 0) { | 1235 | if (ret != 0) |
1290 | dev_warn(xpc_part, "can't register die notifier\n"); | 1236 | dev_warn(xpc_part, "can't register die notifier\n"); |
1291 | } | ||
1292 | 1237 | ||
1293 | init_timer(&xpc_hb_timer); | 1238 | init_timer(&xpc_hb_timer); |
1294 | xpc_hb_timer.function = xpc_hb_beater; | 1239 | xpc_hb_timer.function = xpc_hb_beater; |
@@ -1297,8 +1242,8 @@ xpc_init(void) | |||
1297 | * The real work-horse behind xpc. This processes incoming | 1242 | * The real work-horse behind xpc. This processes incoming |
1298 | * interrupts and monitors remote heartbeats. | 1243 | * interrupts and monitors remote heartbeats. |
1299 | */ | 1244 | */ |
1300 | pid = kernel_thread(xpc_hb_checker, NULL, 0); | 1245 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); |
1301 | if (pid < 0) { | 1246 | if (IS_ERR(kthread)) { |
1302 | dev_err(xpc_part, "failed while forking hb check thread\n"); | 1247 | dev_err(xpc_part, "failed while forking hb check thread\n"); |
1303 | 1248 | ||
1304 | /* indicate to others that our reserved page is uninitialized */ | 1249 | /* indicate to others that our reserved page is uninitialized */ |
@@ -1314,9 +1259,8 @@ xpc_init(void) | |||
1314 | free_irq(SGI_XPC_ACTIVATE, NULL); | 1259 | free_irq(SGI_XPC_ACTIVATE, NULL); |
1315 | xpc_restrict_IPI_ops(); | 1260 | xpc_restrict_IPI_ops(); |
1316 | 1261 | ||
1317 | if (xpc_sysctl) { | 1262 | if (xpc_sysctl) |
1318 | unregister_sysctl_table(xpc_sysctl); | 1263 | unregister_sysctl_table(xpc_sysctl); |
1319 | } | ||
1320 | 1264 | ||
1321 | kfree(xpc_remote_copy_buffer_base); | 1265 | kfree(xpc_remote_copy_buffer_base); |
1322 | return -EBUSY; | 1266 | return -EBUSY; |
@@ -1327,8 +1271,9 @@ xpc_init(void) | |||
1327 | * activate based on info provided by SAL. This new thread is short | 1271 | * activate based on info provided by SAL. This new thread is short |
1328 | * lived and will exit once discovery is complete. | 1272 | * lived and will exit once discovery is complete. |
1329 | */ | 1273 | */ |
1330 | pid = kernel_thread(xpc_initiate_discovery, NULL, 0); | 1274 | kthread = kthread_run(xpc_initiate_discovery, NULL, |
1331 | if (pid < 0) { | 1275 | XPC_DISCOVERY_THREAD_NAME); |
1276 | if (IS_ERR(kthread)) { | ||
1332 | dev_err(xpc_part, "failed while forking discovery thread\n"); | 1277 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1333 | 1278 | ||
1334 | /* mark this new thread as a non-starter */ | 1279 | /* mark this new thread as a non-starter */ |