aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/process.c70
-rw-r--r--arch/s390/kernel/s390_ext.c4
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--include/asm-s390/cpu.h8
6 files changed, 42 insertions, 46 deletions
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ce203154d8ce..eb768ce88672 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -76,6 +76,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
76 * Need to know about CPUs going idle? 76 * Need to know about CPUs going idle?
77 */ 77 */
78static ATOMIC_NOTIFIER_HEAD(idle_chain); 78static ATOMIC_NOTIFIER_HEAD(idle_chain);
79DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
79 80
80int register_idle_notifier(struct notifier_block *nb) 81int register_idle_notifier(struct notifier_block *nb)
81{ 82{
@@ -89,9 +90,33 @@ int unregister_idle_notifier(struct notifier_block *nb)
89} 90}
90EXPORT_SYMBOL(unregister_idle_notifier); 91EXPORT_SYMBOL(unregister_idle_notifier);
91 92
92void do_monitor_call(struct pt_regs *regs, long interruption_code) 93static int s390_idle_enter(void)
94{
95 struct s390_idle_data *idle;
96 int nr_calls = 0;
97 void *hcpu;
98 int rc;
99
100 hcpu = (void *)(long)smp_processor_id();
101 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
102 &nr_calls);
103 if (rc == NOTIFY_BAD) {
104 nr_calls--;
105 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
106 hcpu, nr_calls, NULL);
107 return rc;
108 }
109 idle = &__get_cpu_var(s390_idle);
110 spin_lock(&idle->lock);
111 idle->idle_count++;
112 idle->in_idle = 1;
113 idle->idle_enter = get_clock();
114 spin_unlock(&idle->lock);
115 return NOTIFY_OK;
116}
117
118void s390_idle_leave(void)
93{ 119{
94#ifdef CONFIG_SMP
95 struct s390_idle_data *idle; 120 struct s390_idle_data *idle;
96 121
97 idle = &__get_cpu_var(s390_idle); 122 idle = &__get_cpu_var(s390_idle);
@@ -99,10 +124,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
99 idle->idle_time += get_clock() - idle->idle_enter; 124 idle->idle_time += get_clock() - idle->idle_enter;
100 idle->in_idle = 0; 125 idle->in_idle = 0;
101 spin_unlock(&idle->lock); 126 spin_unlock(&idle->lock);
102#endif
103 /* disable monitor call class 0 */
104 __ctl_clear_bit(8, 15);
105
106 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, 127 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
107 (void *)(long) smp_processor_id()); 128 (void *)(long) smp_processor_id());
108} 129}
@@ -113,61 +134,30 @@ extern void s390_handle_mcck(void);
113 */ 134 */
114static void default_idle(void) 135static void default_idle(void)
115{ 136{
116 int cpu, rc;
117 int nr_calls = 0;
118 void *hcpu;
119#ifdef CONFIG_SMP
120 struct s390_idle_data *idle;
121#endif
122
123 /* CPU is going idle. */ 137 /* CPU is going idle. */
124 cpu = smp_processor_id();
125 hcpu = (void *)(long)cpu;
126 local_irq_disable(); 138 local_irq_disable();
127 if (need_resched()) { 139 if (need_resched()) {
128 local_irq_enable(); 140 local_irq_enable();
129 return; 141 return;
130 } 142 }
131 143 if (s390_idle_enter() == NOTIFY_BAD) {
132 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
133 &nr_calls);
134 if (rc == NOTIFY_BAD) {
135 nr_calls--;
136 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
137 hcpu, nr_calls, NULL);
138 local_irq_enable(); 144 local_irq_enable();
139 return; 145 return;
140 } 146 }
141
142 /* enable monitor call class 0 */
143 __ctl_set_bit(8, 15);
144
145#ifdef CONFIG_HOTPLUG_CPU 147#ifdef CONFIG_HOTPLUG_CPU
146 if (cpu_is_offline(cpu)) { 148 if (cpu_is_offline(smp_processor_id())) {
147 preempt_enable_no_resched(); 149 preempt_enable_no_resched();
148 cpu_die(); 150 cpu_die();
149 } 151 }
150#endif 152#endif
151
152 local_mcck_disable(); 153 local_mcck_disable();
153 if (test_thread_flag(TIF_MCCK_PENDING)) { 154 if (test_thread_flag(TIF_MCCK_PENDING)) {
154 local_mcck_enable(); 155 local_mcck_enable();
155 /* disable monitor call class 0 */ 156 s390_idle_leave();
156 __ctl_clear_bit(8, 15);
157 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
158 hcpu);
159 local_irq_enable(); 157 local_irq_enable();
160 s390_handle_mcck(); 158 s390_handle_mcck();
161 return; 159 return;
162 } 160 }
163#ifdef CONFIG_SMP
164 idle = &__get_cpu_var(s390_idle);
165 spin_lock(&idle->lock);
166 idle->idle_count++;
167 idle->in_idle = 1;
168 idle->idle_enter = get_clock();
169 spin_unlock(&idle->lock);
170#endif
171 trace_hardirqs_on(); 161 trace_hardirqs_on();
172 /* Wait for external, I/O or machine check interrupt. */ 162 /* Wait for external, I/O or machine check interrupt. */
173 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 163 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index acf93dba7727..3a8772d3baea 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -13,7 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16#include <asm/cpu.h>
17#include <asm/lowcore.h> 17#include <asm/lowcore.h>
18#include <asm/s390_ext.h> 18#include <asm/s390_ext.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
@@ -119,7 +119,7 @@ void do_extint(struct pt_regs *regs, unsigned short code)
119 119
120 old_regs = set_irq_regs(regs); 120 old_regs = set_irq_regs(regs);
121 irq_enter(); 121 irq_enter();
122 asm volatile ("mc 0,0"); 122 s390_idle_check();
123 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 123 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
124 /** 124 /**
125 * Make sure that the i/o interrupt did not "overtake" 125 * Make sure that the i/o interrupt did not "overtake"
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index d1e8e8a3fb66..5a445b1b1217 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -73,7 +73,6 @@ static int smp_cpu_state[NR_CPUS];
73static int cpu_management; 73static int cpu_management;
74 74
75static DEFINE_PER_CPU(struct cpu, cpu_devices); 75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77 76
78static void smp_ext_bitcall(int, ec_bit_sig); 77static void smp_ext_bitcall(int, ec_bit_sig);
79 78
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 60f728aeaf12..9452a205629b 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -59,7 +59,6 @@ int sysctl_userprocess_debug = 0;
59 59
60extern pgm_check_handler_t do_protection_exception; 60extern pgm_check_handler_t do_protection_exception;
61extern pgm_check_handler_t do_dat_exception; 61extern pgm_check_handler_t do_dat_exception;
62extern pgm_check_handler_t do_monitor_call;
63extern pgm_check_handler_t do_asce_exception; 62extern pgm_check_handler_t do_asce_exception;
64 63
65#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 64#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -739,6 +738,5 @@ void __init trap_init(void)
739 pgm_check_table[0x15] = &operand_exception; 738 pgm_check_table[0x15] = &operand_exception;
740 pgm_check_table[0x1C] = &space_switch_exception; 739 pgm_check_table[0x1C] = &space_switch_exception;
741 pgm_check_table[0x1D] = &hfp_sqrt_exception; 740 pgm_check_table[0x1D] = &hfp_sqrt_exception;
742 pgm_check_table[0x40] = &do_monitor_call;
743 pfault_irq_init(); 741 pfault_irq_init();
744} 742}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 60590a12d529..6dbe9488d3f9 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -24,6 +24,7 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/cpu.h>
27#include "cio.h" 28#include "cio.h"
28#include "css.h" 29#include "css.h"
29#include "chsc.h" 30#include "chsc.h"
@@ -649,7 +650,7 @@ do_IRQ (struct pt_regs *regs)
649 650
650 old_regs = set_irq_regs(regs); 651 old_regs = set_irq_regs(regs);
651 irq_enter(); 652 irq_enter();
652 asm volatile ("mc 0,0"); 653 s390_idle_check();
653 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 654 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
654 /** 655 /**
655 * Make sure that the i/o interrupt did not "overtake" 656 * Make sure that the i/o interrupt did not "overtake"
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
index 352dde194f3c..e5a6a9ba3adf 100644
--- a/include/asm-s390/cpu.h
+++ b/include/asm-s390/cpu.h
@@ -22,4 +22,12 @@ struct s390_idle_data {
22 22
23DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 23DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
24 24
25void s390_idle_leave(void);
26
27static inline void s390_idle_check(void)
28{
29 if ((&__get_cpu_var(s390_idle))->in_idle)
30 s390_idle_leave();
31}
32
25#endif /* _ASM_S390_CPU_H_ */ 33#endif /* _ASM_S390_CPU_H_ */