diff options
author | Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | 2009-06-15 05:18:43 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-06-16 19:56:05 -0400 |
commit | 7fb06fc9672b947424e05871243a4c8e19ec3bce (patch) | |
tree | 7674eb386c8719167c8c7846cac2c3a98f8131ff /arch | |
parent | 33edbf02a92771fa2a81e41084a44ba874e3a5a5 (diff) |
x86, mce: cleanup mce_start()
Simplify interface of mce_start():
- no_way_out = mce_start(no_way_out, &order);
+ order = mce_start(&no_way_out);
Now Monarch and Subjects share same exit(return) in usual path.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 66 |
1 files changed, 31 insertions, 35 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index dda77215e9e2..739fd7eca0a4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -691,23 +691,21 @@ static atomic_t global_nwo; | |||
691 | * in the entry order. | 691 | * in the entry order. |
692 | * TBD double check parallel CPU hotunplug | 692 | * TBD double check parallel CPU hotunplug |
693 | */ | 693 | */ |
694 | static int mce_start(int no_way_out, int *order) | 694 | static int mce_start(int *no_way_out) |
695 | { | 695 | { |
696 | int nwo; | 696 | int order; |
697 | int cpus = num_online_cpus(); | 697 | int cpus = num_online_cpus(); |
698 | u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; | 698 | u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; |
699 | 699 | ||
700 | if (!timeout) { | 700 | if (!timeout) |
701 | *order = -1; | 701 | return -1; |
702 | return no_way_out; | ||
703 | } | ||
704 | 702 | ||
705 | atomic_add(no_way_out, &global_nwo); | 703 | atomic_add(*no_way_out, &global_nwo); |
706 | /* | 704 | /* |
707 | * global_nwo should be updated before mce_callin | 705 | * global_nwo should be updated before mce_callin |
708 | */ | 706 | */ |
709 | smp_wmb(); | 707 | smp_wmb(); |
710 | *order = atomic_add_return(1, &mce_callin); | 708 | order = atomic_add_return(1, &mce_callin); |
711 | 709 | ||
712 | /* | 710 | /* |
713 | * Wait for everyone. | 711 | * Wait for everyone. |
@@ -715,8 +713,7 @@ static int mce_start(int no_way_out, int *order) | |||
715 | while (atomic_read(&mce_callin) != cpus) { | 713 | while (atomic_read(&mce_callin) != cpus) { |
716 | if (mce_timed_out(&timeout)) { | 714 | if (mce_timed_out(&timeout)) { |
717 | atomic_set(&global_nwo, 0); | 715 | atomic_set(&global_nwo, 0); |
718 | *order = -1; | 716 | return -1; |
719 | return no_way_out; | ||
720 | } | 717 | } |
721 | ndelay(SPINUNIT); | 718 | ndelay(SPINUNIT); |
722 | } | 719 | } |
@@ -725,34 +722,34 @@ static int mce_start(int no_way_out, int *order) | |||
725 | * mce_callin should be read before global_nwo | 722 | * mce_callin should be read before global_nwo |
726 | */ | 723 | */ |
727 | smp_rmb(); | 724 | smp_rmb(); |
728 | /* | ||
729 | * Cache the global no_way_out state. | ||
730 | */ | ||
731 | nwo = atomic_read(&global_nwo); | ||
732 | 725 | ||
733 | /* | 726 | if (order == 1) { |
734 | * Monarch starts executing now, the others wait. | 727 | /* |
735 | */ | 728 | * Monarch: Starts executing now, the others wait. |
736 | if (*order == 1) { | 729 | */ |
737 | atomic_set(&mce_executing, 1); | 730 | atomic_set(&mce_executing, 1); |
738 | return nwo; | 731 | } else { |
732 | /* | ||
733 | * Subject: Now start the scanning loop one by one in | ||
734 | * the original callin order. | ||
735 | * This way when there are any shared banks it will be | ||
736 | * only seen by one CPU before cleared, avoiding duplicates. | ||
737 | */ | ||
738 | while (atomic_read(&mce_executing) < order) { | ||
739 | if (mce_timed_out(&timeout)) { | ||
740 | atomic_set(&global_nwo, 0); | ||
741 | return -1; | ||
742 | } | ||
743 | ndelay(SPINUNIT); | ||
744 | } | ||
739 | } | 745 | } |
740 | 746 | ||
741 | /* | 747 | /* |
742 | * Now start the scanning loop one by one | 748 | * Cache the global no_way_out state. |
743 | * in the original callin order. | ||
744 | * This way when there are any shared banks it will | ||
745 | * be only seen by one CPU before cleared, avoiding duplicates. | ||
746 | */ | 749 | */ |
747 | while (atomic_read(&mce_executing) < *order) { | 750 | *no_way_out = atomic_read(&global_nwo); |
748 | if (mce_timed_out(&timeout)) { | 751 | |
749 | atomic_set(&global_nwo, 0); | 752 | return order; |
750 | *order = -1; | ||
751 | return no_way_out; | ||
752 | } | ||
753 | ndelay(SPINUNIT); | ||
754 | } | ||
755 | return nwo; | ||
756 | } | 753 | } |
757 | 754 | ||
758 | /* | 755 | /* |
@@ -871,8 +868,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
871 | * Establish sequential order between the CPUs entering the machine | 868 | * Establish sequential order between the CPUs entering the machine |
872 | * check handler. | 869 | * check handler. |
873 | */ | 870 | */ |
874 | int order = -1; | 871 | int order; |
875 | |||
876 | /* | 872 | /* |
877 | * If no_way_out gets set, there is no safe way to recover from this | 873 | * If no_way_out gets set, there is no safe way to recover from this |
878 | * MCE. If tolerant is cranked up, we'll try anyway. | 874 | * MCE. If tolerant is cranked up, we'll try anyway. |
@@ -917,7 +913,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
917 | * This way we don't report duplicated events on shared banks | 913 | * This way we don't report duplicated events on shared banks |
918 | * because the first one to see it will clear it. | 914 | * because the first one to see it will clear it. |
919 | */ | 915 | */ |
920 | no_way_out = mce_start(no_way_out, &order); | 916 | order = mce_start(&no_way_out); |
921 | for (i = 0; i < banks; i++) { | 917 | for (i = 0; i < banks; i++) { |
922 | __clear_bit(i, toclear); | 918 | __clear_bit(i, toclear); |
923 | if (!bank[i]) | 919 | if (!bank[i]) |