aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/process.c')
-rw-r--r--arch/s390/kernel/process.c64
1 files changed, 3 insertions, 61 deletions
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 1e06436f07c2..b6110bdf8dc2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -46,7 +46,6 @@
46#include <asm/processor.h> 46#include <asm/processor.h>
47#include <asm/irq.h> 47#include <asm/irq.h>
48#include <asm/timer.h> 48#include <asm/timer.h>
49#include <asm/cpu.h>
50#include "entry.h" 49#include "entry.h"
51 50
52asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 51asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -76,35 +75,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
76 return sf->gprs[8]; 75 return sf->gprs[8];
77} 76}
78 77
79DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
80 .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
81};
82
83void s390_idle_leave(void)
84{
85 struct s390_idle_data *idle;
86 unsigned long long idle_time;
87
88 idle = &__get_cpu_var(s390_idle);
89 idle_time = S390_lowcore.int_clock - idle->idle_enter;
90 spin_lock(&idle->lock);
91 idle->idle_time += idle_time;
92 idle->idle_enter = 0ULL;
93 idle->idle_count++;
94 spin_unlock(&idle->lock);
95 vtime_start_cpu_timer();
96}
97
98extern void s390_handle_mcck(void); 78extern void s390_handle_mcck(void);
99/* 79/*
100 * The idle loop on a S390... 80 * The idle loop on a S390...
101 */ 81 */
102static void default_idle(void) 82static void default_idle(void)
103{ 83{
104 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
105 unsigned long addr;
106 psw_t psw;
107
108 /* CPU is going idle. */ 84 /* CPU is going idle. */
109 local_irq_disable(); 85 local_irq_disable();
110 if (need_resched()) { 86 if (need_resched()) {
@@ -120,7 +96,6 @@ static void default_idle(void)
120 local_mcck_disable(); 96 local_mcck_disable();
121 if (test_thread_flag(TIF_MCCK_PENDING)) { 97 if (test_thread_flag(TIF_MCCK_PENDING)) {
122 local_mcck_enable(); 98 local_mcck_enable();
123 s390_idle_leave();
124 local_irq_enable(); 99 local_irq_enable();
125 s390_handle_mcck(); 100 s390_handle_mcck();
126 return; 101 return;
@@ -128,42 +103,9 @@ static void default_idle(void)
128 trace_hardirqs_on(); 103 trace_hardirqs_on();
129 /* Don't trace preempt off for idle. */ 104 /* Don't trace preempt off for idle. */
130 stop_critical_timings(); 105 stop_critical_timings();
131 vtime_stop_cpu_timer(); 106 /* Stop virtual timer and halt the cpu. */
132 107 vtime_stop_cpu();
133 /* 108 /* Reenable preemption tracer. */
134 * The inline assembly is equivalent to
135 * idle->idle_enter = get_clock();
136 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
137 * PSW_MASK_IO | PSW_MASK_EXT);
138 * The difference is that the inline assembly makes sure that
139 * the stck instruction is right before the lpsw instruction.
140 * This is done to increase the precision.
141 */
142
143 /* Wait for external, I/O or machine check interrupt. */
144 psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT;
145#ifndef __s390x__
146 asm volatile(
147 " basr %0,0\n"
148 "0: ahi %0,1f-0b\n"
149 " st %0,4(%2)\n"
150 " stck 0(%3)\n"
151 " lpsw 0(%2)\n"
152 "1:"
153 : "=&d" (addr), "=m" (idle->idle_enter)
154 : "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
155 : "memory", "cc");
156#else /* __s390x__ */
157 asm volatile(
158 " larl %0,1f\n"
159 " stg %0,8(%2)\n"
160 " stck 0(%3)\n"
161 " lpswe 0(%2)\n"
162 "1:"
163 : "=&d" (addr), "=m" (idle->idle_enter)
164 : "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
165 : "memory", "cc");
166#endif /* __s390x__ */
167 start_critical_timings(); 109 start_critical_timings();
168} 110}
169 111