diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-04-17 01:46:23 -0400 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-04-17 01:47:05 -0400 |
commit | 43ca5c3a1cefdaa09231d64485b8f676118bf1e0 (patch) | |
tree | 37e71a475b96b811935fc484cb0633701953b5c0 /arch/s390/kernel/process.c | |
parent | e1776856286bef076f400ec062b150b6f3c353cd (diff) |
[S390] Convert monitor calls to function calls.
Remove the program check generating monitor calls and use function
calls instead. Theres is no real advantage in using monitor calls,
but they do make debugging harder, because of all the program checks
it generates.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/process.c')
-rw-r--r-- | arch/s390/kernel/process.c | 70 |
1 files changed, 30 insertions, 40 deletions
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index ce203154d8ce..eb768ce88672 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -76,6 +76,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
76 | * Need to know about CPUs going idle? | 76 | * Need to know about CPUs going idle? |
77 | */ | 77 | */ |
78 | static ATOMIC_NOTIFIER_HEAD(idle_chain); | 78 | static ATOMIC_NOTIFIER_HEAD(idle_chain); |
79 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
79 | 80 | ||
80 | int register_idle_notifier(struct notifier_block *nb) | 81 | int register_idle_notifier(struct notifier_block *nb) |
81 | { | 82 | { |
@@ -89,9 +90,33 @@ int unregister_idle_notifier(struct notifier_block *nb) | |||
89 | } | 90 | } |
90 | EXPORT_SYMBOL(unregister_idle_notifier); | 91 | EXPORT_SYMBOL(unregister_idle_notifier); |
91 | 92 | ||
92 | void do_monitor_call(struct pt_regs *regs, long interruption_code) | 93 | static int s390_idle_enter(void) |
94 | { | ||
95 | struct s390_idle_data *idle; | ||
96 | int nr_calls = 0; | ||
97 | void *hcpu; | ||
98 | int rc; | ||
99 | |||
100 | hcpu = (void *)(long)smp_processor_id(); | ||
101 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
102 | &nr_calls); | ||
103 | if (rc == NOTIFY_BAD) { | ||
104 | nr_calls--; | ||
105 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
106 | hcpu, nr_calls, NULL); | ||
107 | return rc; | ||
108 | } | ||
109 | idle = &__get_cpu_var(s390_idle); | ||
110 | spin_lock(&idle->lock); | ||
111 | idle->idle_count++; | ||
112 | idle->in_idle = 1; | ||
113 | idle->idle_enter = get_clock(); | ||
114 | spin_unlock(&idle->lock); | ||
115 | return NOTIFY_OK; | ||
116 | } | ||
117 | |||
118 | void s390_idle_leave(void) | ||
93 | { | 119 | { |
94 | #ifdef CONFIG_SMP | ||
95 | struct s390_idle_data *idle; | 120 | struct s390_idle_data *idle; |
96 | 121 | ||
97 | idle = &__get_cpu_var(s390_idle); | 122 | idle = &__get_cpu_var(s390_idle); |
@@ -99,10 +124,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code) | |||
99 | idle->idle_time += get_clock() - idle->idle_enter; | 124 | idle->idle_time += get_clock() - idle->idle_enter; |
100 | idle->in_idle = 0; | 125 | idle->in_idle = 0; |
101 | spin_unlock(&idle->lock); | 126 | spin_unlock(&idle->lock); |
102 | #endif | ||
103 | /* disable monitor call class 0 */ | ||
104 | __ctl_clear_bit(8, 15); | ||
105 | |||
106 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | 127 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, |
107 | (void *)(long) smp_processor_id()); | 128 | (void *)(long) smp_processor_id()); |
108 | } | 129 | } |
@@ -113,61 +134,30 @@ extern void s390_handle_mcck(void); | |||
113 | */ | 134 | */ |
114 | static void default_idle(void) | 135 | static void default_idle(void) |
115 | { | 136 | { |
116 | int cpu, rc; | ||
117 | int nr_calls = 0; | ||
118 | void *hcpu; | ||
119 | #ifdef CONFIG_SMP | ||
120 | struct s390_idle_data *idle; | ||
121 | #endif | ||
122 | |||
123 | /* CPU is going idle. */ | 137 | /* CPU is going idle. */ |
124 | cpu = smp_processor_id(); | ||
125 | hcpu = (void *)(long)cpu; | ||
126 | local_irq_disable(); | 138 | local_irq_disable(); |
127 | if (need_resched()) { | 139 | if (need_resched()) { |
128 | local_irq_enable(); | 140 | local_irq_enable(); |
129 | return; | 141 | return; |
130 | } | 142 | } |
131 | 143 | if (s390_idle_enter() == NOTIFY_BAD) { | |
132 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
133 | &nr_calls); | ||
134 | if (rc == NOTIFY_BAD) { | ||
135 | nr_calls--; | ||
136 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
137 | hcpu, nr_calls, NULL); | ||
138 | local_irq_enable(); | 144 | local_irq_enable(); |
139 | return; | 145 | return; |
140 | } | 146 | } |
141 | |||
142 | /* enable monitor call class 0 */ | ||
143 | __ctl_set_bit(8, 15); | ||
144 | |||
145 | #ifdef CONFIG_HOTPLUG_CPU | 147 | #ifdef CONFIG_HOTPLUG_CPU |
146 | if (cpu_is_offline(cpu)) { | 148 | if (cpu_is_offline(smp_processor_id())) { |
147 | preempt_enable_no_resched(); | 149 | preempt_enable_no_resched(); |
148 | cpu_die(); | 150 | cpu_die(); |
149 | } | 151 | } |
150 | #endif | 152 | #endif |
151 | |||
152 | local_mcck_disable(); | 153 | local_mcck_disable(); |
153 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 154 | if (test_thread_flag(TIF_MCCK_PENDING)) { |
154 | local_mcck_enable(); | 155 | local_mcck_enable(); |
155 | /* disable monitor call class 0 */ | 156 | s390_idle_leave(); |
156 | __ctl_clear_bit(8, 15); | ||
157 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
158 | hcpu); | ||
159 | local_irq_enable(); | 157 | local_irq_enable(); |
160 | s390_handle_mcck(); | 158 | s390_handle_mcck(); |
161 | return; | 159 | return; |
162 | } | 160 | } |
163 | #ifdef CONFIG_SMP | ||
164 | idle = &__get_cpu_var(s390_idle); | ||
165 | spin_lock(&idle->lock); | ||
166 | idle->idle_count++; | ||
167 | idle->in_idle = 1; | ||
168 | idle->idle_enter = get_clock(); | ||
169 | spin_unlock(&idle->lock); | ||
170 | #endif | ||
171 | trace_hardirqs_on(); | 161 | trace_hardirqs_on(); |
172 | /* Wait for external, I/O or machine check interrupt. */ | 162 | /* Wait for external, I/O or machine check interrupt. */ |
173 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 163 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |