diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 95 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 94 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_32.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 114 | ||||
-rw-r--r-- | arch/powerpc/kernel/lparcfg.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 32 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 54 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 40 | ||||
-rw-r--r-- | arch/powerpc/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 19 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_64.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/sys_ppc32.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/systbl.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 282 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/gettimeofday.S | 4 |
23 files changed, 256 insertions, 555 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 840aad43a98b..c9a660e4c2db 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -92,7 +92,6 @@ int main(void) | |||
92 | 92 | ||
93 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 93 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
94 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | 94 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); |
95 | DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame)); | ||
96 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | 95 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
97 | #ifdef CONFIG_PPC32 | 96 | #ifdef CONFIG_PPC32 |
98 | DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); | 97 | DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 10696456a4c6..e4e81374cb9a 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -53,8 +53,10 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); | |||
53 | PPC_FEATURE_HAS_MMU) | 53 | PPC_FEATURE_HAS_MMU) |
54 | #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) | 54 | #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) |
55 | #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) | 55 | #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) |
56 | #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5) | 56 | #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ |
57 | #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS) | 57 | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) |
58 | #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ | ||
59 | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) | ||
58 | #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ | 60 | #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ |
59 | PPC_FEATURE_BOOKE) | 61 | PPC_FEATURE_BOOKE) |
60 | 62 | ||
@@ -267,7 +269,8 @@ struct cpu_spec cpu_specs[] = { | |||
267 | .cpu_name = "Cell Broadband Engine", | 269 | .cpu_name = "Cell Broadband Engine", |
268 | .cpu_features = CPU_FTRS_CELL, | 270 | .cpu_features = CPU_FTRS_CELL, |
269 | .cpu_user_features = COMMON_USER_PPC64 | | 271 | .cpu_user_features = COMMON_USER_PPC64 | |
270 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP, | 272 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | |
273 | PPC_FEATURE_SMT, | ||
271 | .icache_bsize = 128, | 274 | .icache_bsize = 128, |
272 | .dcache_bsize = 128, | 275 | .dcache_bsize = 128, |
273 | .cpu_setup = __setup_cpu_be, | 276 | .cpu_setup = __setup_cpu_be, |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 8c21d378f5d2..778f22fd85d2 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -134,8 +134,10 @@ static void crash_kexec_prepare_cpus(void) | |||
134 | * the crash CPU will send an IPI and wait for other CPUs to | 134 | * the crash CPU will send an IPI and wait for other CPUs to |
135 | * respond. If not, proceed the kexec boot even though we failed to | 135 | * respond. If not, proceed the kexec boot even though we failed to |
136 | * capture other CPU states. | 136 | * capture other CPU states. |
137 | * Delay of at least 10 seconds. | ||
137 | */ | 138 | */ |
138 | msecs = 1000000; | 139 | printk(KERN_ALERT "Sending IPI to other cpus...\n"); |
140 | msecs = 10000; | ||
139 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { | 141 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { |
140 | barrier(); | 142 | barrier(); |
141 | mdelay(1); | 143 | mdelay(1); |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index f20a67261ec7..4827ca1ec89b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -227,7 +227,7 @@ ret_from_syscall: | |||
227 | MTMSRD(r10) | 227 | MTMSRD(r10) |
228 | lwz r9,TI_FLAGS(r12) | 228 | lwz r9,TI_FLAGS(r12) |
229 | li r8,-_LAST_ERRNO | 229 | li r8,-_LAST_ERRNO |
230 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK) | 230 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
231 | bne- syscall_exit_work | 231 | bne- syscall_exit_work |
232 | cmplw 0,r3,r8 | 232 | cmplw 0,r3,r8 |
233 | blt+ syscall_exit_cont | 233 | blt+ syscall_exit_cont |
@@ -287,8 +287,10 @@ syscall_dotrace: | |||
287 | 287 | ||
288 | syscall_exit_work: | 288 | syscall_exit_work: |
289 | andi. r0,r9,_TIF_RESTOREALL | 289 | andi. r0,r9,_TIF_RESTOREALL |
290 | bne- 2f | 290 | beq+ 0f |
291 | cmplw 0,r3,r8 | 291 | REST_NVGPRS(r1) |
292 | b 2f | ||
293 | 0: cmplw 0,r3,r8 | ||
292 | blt+ 1f | 294 | blt+ 1f |
293 | andi. r0,r9,_TIF_NOERROR | 295 | andi. r0,r9,_TIF_NOERROR |
294 | bne- 1f | 296 | bne- 1f |
@@ -302,9 +304,7 @@ syscall_exit_work: | |||
302 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | 304 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
303 | beq 4f | 305 | beq 4f |
304 | 306 | ||
305 | /* Clear per-syscall TIF flags if any are set, but _leave_ | 307 | /* Clear per-syscall TIF flags if any are set. */ |
306 | _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that | ||
307 | yet. */ | ||
308 | 308 | ||
309 | li r11,_TIF_PERSYSCALL_MASK | 309 | li r11,_TIF_PERSYSCALL_MASK |
310 | addi r12,r12,TI_FLAGS | 310 | addi r12,r12,TI_FLAGS |
@@ -318,8 +318,13 @@ syscall_exit_work: | |||
318 | subi r12,r12,TI_FLAGS | 318 | subi r12,r12,TI_FLAGS |
319 | 319 | ||
320 | 4: /* Anything which requires enabling interrupts? */ | 320 | 4: /* Anything which requires enabling interrupts? */ |
321 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) | 321 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
322 | beq 7f | 322 | beq ret_from_except |
323 | |||
324 | /* Re-enable interrupts */ | ||
325 | ori r10,r10,MSR_EE | ||
326 | SYNC | ||
327 | MTMSRD(r10) | ||
323 | 328 | ||
324 | /* Save NVGPRS if they're not saved already */ | 329 | /* Save NVGPRS if they're not saved already */ |
325 | lwz r4,_TRAP(r1) | 330 | lwz r4,_TRAP(r1) |
@@ -328,71 +333,11 @@ syscall_exit_work: | |||
328 | SAVE_NVGPRS(r1) | 333 | SAVE_NVGPRS(r1) |
329 | li r4,0xc00 | 334 | li r4,0xc00 |
330 | stw r4,_TRAP(r1) | 335 | stw r4,_TRAP(r1) |
331 | 336 | 5: | |
332 | /* Re-enable interrupts */ | ||
333 | 5: ori r10,r10,MSR_EE | ||
334 | SYNC | ||
335 | MTMSRD(r10) | ||
336 | |||
337 | andi. r0,r9,_TIF_SAVE_NVGPRS | ||
338 | bne save_user_nvgprs | ||
339 | |||
340 | save_user_nvgprs_cont: | ||
341 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
342 | beq 7f | ||
343 | |||
344 | addi r3,r1,STACK_FRAME_OVERHEAD | 337 | addi r3,r1,STACK_FRAME_OVERHEAD |
345 | bl do_syscall_trace_leave | 338 | bl do_syscall_trace_leave |
346 | REST_NVGPRS(r1) | 339 | b ret_from_except_full |
347 | |||
348 | 6: lwz r3,GPR3(r1) | ||
349 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | ||
350 | SYNC | ||
351 | MTMSRD(r10) /* disable interrupts again */ | ||
352 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
353 | lwz r9,TI_FLAGS(r12) | ||
354 | 7: | ||
355 | andi. r0,r9,_TIF_NEED_RESCHED | ||
356 | bne 8f | ||
357 | lwz r5,_MSR(r1) | ||
358 | andi. r5,r5,MSR_PR | ||
359 | beq ret_from_except | ||
360 | andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK | ||
361 | beq ret_from_except | ||
362 | b do_user_signal | ||
363 | 8: | ||
364 | ori r10,r10,MSR_EE | ||
365 | SYNC | ||
366 | MTMSRD(r10) /* re-enable interrupts */ | ||
367 | bl schedule | ||
368 | b 6b | ||
369 | |||
370 | save_user_nvgprs: | ||
371 | lwz r8,TI_SIGFRAME(r12) | ||
372 | |||
373 | .macro savewords start, end | ||
374 | 1: stw \start,4*(\start)(r8) | ||
375 | .section __ex_table,"a" | ||
376 | .align 2 | ||
377 | .long 1b,save_user_nvgprs_fault | ||
378 | .previous | ||
379 | .if \end - \start | ||
380 | savewords "(\start+1)",\end | ||
381 | .endif | ||
382 | .endm | ||
383 | savewords 14,31 | ||
384 | b save_user_nvgprs_cont | ||
385 | |||
386 | |||
387 | save_user_nvgprs_fault: | ||
388 | li r3,11 /* SIGSEGV */ | ||
389 | lwz r4,TI_TASK(r12) | ||
390 | bl force_sigsegv | ||
391 | 340 | ||
392 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
393 | lwz r9,TI_FLAGS(r12) | ||
394 | b save_user_nvgprs_cont | ||
395 | |||
396 | #ifdef SHOW_SYSCALLS | 341 | #ifdef SHOW_SYSCALLS |
397 | do_show_syscall: | 342 | do_show_syscall: |
398 | #ifdef SHOW_SYSCALLS_TASK | 343 | #ifdef SHOW_SYSCALLS_TASK |
@@ -490,6 +435,14 @@ ppc_clone: | |||
490 | stw r0,_TRAP(r1) /* register set saved */ | 435 | stw r0,_TRAP(r1) /* register set saved */ |
491 | b sys_clone | 436 | b sys_clone |
492 | 437 | ||
438 | .globl ppc_swapcontext | ||
439 | ppc_swapcontext: | ||
440 | SAVE_NVGPRS(r1) | ||
441 | lwz r0,_TRAP(r1) | ||
442 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
443 | stw r0,_TRAP(r1) /* register set saved */ | ||
444 | b sys_swapcontext | ||
445 | |||
493 | /* | 446 | /* |
494 | * Top-level page fault handling. | 447 | * Top-level page fault handling. |
495 | * This is in assembler because if do_page_fault tells us that | 448 | * This is in assembler because if do_page_fault tells us that |
@@ -683,7 +636,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ | |||
683 | /* Check current_thread_info()->flags */ | 636 | /* Check current_thread_info()->flags */ |
684 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) | 637 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) |
685 | lwz r9,TI_FLAGS(r9) | 638 | lwz r9,TI_FLAGS(r9) |
686 | andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK) | 639 | andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED) |
687 | bne do_work | 640 | bne do_work |
688 | 641 | ||
689 | restore_user: | 642 | restore_user: |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 388f861b8ed1..24be0cf86d7f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -160,7 +160,7 @@ syscall_exit: | |||
160 | mtmsrd r10,1 | 160 | mtmsrd r10,1 |
161 | ld r9,TI_FLAGS(r12) | 161 | ld r9,TI_FLAGS(r12) |
162 | li r11,-_LAST_ERRNO | 162 | li r11,-_LAST_ERRNO |
163 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK) | 163 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
164 | bne- syscall_exit_work | 164 | bne- syscall_exit_work |
165 | cmpld r3,r11 | 165 | cmpld r3,r11 |
166 | ld r5,_CCR(r1) | 166 | ld r5,_CCR(r1) |
@@ -216,8 +216,10 @@ syscall_exit_work: | |||
216 | If TIF_NOERROR is set, just save r3 as it is. */ | 216 | If TIF_NOERROR is set, just save r3 as it is. */ |
217 | 217 | ||
218 | andi. r0,r9,_TIF_RESTOREALL | 218 | andi. r0,r9,_TIF_RESTOREALL |
219 | bne- 2f | 219 | beq+ 0f |
220 | cmpld r3,r11 /* r10 is -LAST_ERRNO */ | 220 | REST_NVGPRS(r1) |
221 | b 2f | ||
222 | 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ | ||
221 | blt+ 1f | 223 | blt+ 1f |
222 | andi. r0,r9,_TIF_NOERROR | 224 | andi. r0,r9,_TIF_NOERROR |
223 | bne- 1f | 225 | bne- 1f |
@@ -229,9 +231,7 @@ syscall_exit_work: | |||
229 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | 231 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
230 | beq 4f | 232 | beq 4f |
231 | 233 | ||
232 | /* Clear per-syscall TIF flags if any are set, but _leave_ | 234 | /* Clear per-syscall TIF flags if any are set. */ |
233 | _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that | ||
234 | yet. */ | ||
235 | 235 | ||
236 | li r11,_TIF_PERSYSCALL_MASK | 236 | li r11,_TIF_PERSYSCALL_MASK |
237 | addi r12,r12,TI_FLAGS | 237 | addi r12,r12,TI_FLAGS |
@@ -240,10 +240,9 @@ syscall_exit_work: | |||
240 | stdcx. r10,0,r12 | 240 | stdcx. r10,0,r12 |
241 | bne- 3b | 241 | bne- 3b |
242 | subi r12,r12,TI_FLAGS | 242 | subi r12,r12,TI_FLAGS |
243 | 243 | ||
244 | 4: bl .save_nvgprs | 244 | 4: /* Anything else left to do? */ |
245 | /* Anything else left to do? */ | 245 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
246 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) | ||
247 | beq .ret_from_except_lite | 246 | beq .ret_from_except_lite |
248 | 247 | ||
249 | /* Re-enable interrupts */ | 248 | /* Re-enable interrupts */ |
@@ -251,26 +250,10 @@ syscall_exit_work: | |||
251 | ori r10,r10,MSR_EE | 250 | ori r10,r10,MSR_EE |
252 | mtmsrd r10,1 | 251 | mtmsrd r10,1 |
253 | 252 | ||
254 | andi. r0,r9,_TIF_SAVE_NVGPRS | 253 | bl .save_nvgprs |
255 | bne save_user_nvgprs | ||
256 | |||
257 | /* If tracing, re-enable interrupts and do it */ | ||
258 | save_user_nvgprs_cont: | ||
259 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
260 | beq 5f | ||
261 | |||
262 | addi r3,r1,STACK_FRAME_OVERHEAD | 254 | addi r3,r1,STACK_FRAME_OVERHEAD |
263 | bl .do_syscall_trace_leave | 255 | bl .do_syscall_trace_leave |
264 | REST_NVGPRS(r1) | 256 | b .ret_from_except |
265 | clrrdi r12,r1,THREAD_SHIFT | ||
266 | |||
267 | /* Disable interrupts again and handle other work if any */ | ||
268 | 5: mfmsr r10 | ||
269 | rldicl r10,r10,48,1 | ||
270 | rotldi r10,r10,16 | ||
271 | mtmsrd r10,1 | ||
272 | |||
273 | b .ret_from_except_lite | ||
274 | 257 | ||
275 | /* Save non-volatile GPRs, if not already saved. */ | 258 | /* Save non-volatile GPRs, if not already saved. */ |
276 | _GLOBAL(save_nvgprs) | 259 | _GLOBAL(save_nvgprs) |
@@ -282,51 +265,6 @@ _GLOBAL(save_nvgprs) | |||
282 | std r0,_TRAP(r1) | 265 | std r0,_TRAP(r1) |
283 | blr | 266 | blr |
284 | 267 | ||
285 | |||
286 | save_user_nvgprs: | ||
287 | ld r10,TI_SIGFRAME(r12) | ||
288 | andi. r0,r9,_TIF_32BIT | ||
289 | beq- save_user_nvgprs_64 | ||
290 | |||
291 | /* 32-bit save to userspace */ | ||
292 | |||
293 | .macro savewords start, end | ||
294 | 1: stw \start,4*(\start)(r10) | ||
295 | .section __ex_table,"a" | ||
296 | .align 3 | ||
297 | .llong 1b,save_user_nvgprs_fault | ||
298 | .previous | ||
299 | .if \end - \start | ||
300 | savewords "(\start+1)",\end | ||
301 | .endif | ||
302 | .endm | ||
303 | savewords 14,31 | ||
304 | b save_user_nvgprs_cont | ||
305 | |||
306 | save_user_nvgprs_64: | ||
307 | /* 64-bit save to userspace */ | ||
308 | |||
309 | .macro savelongs start, end | ||
310 | 1: std \start,8*(\start)(r10) | ||
311 | .section __ex_table,"a" | ||
312 | .align 3 | ||
313 | .llong 1b,save_user_nvgprs_fault | ||
314 | .previous | ||
315 | .if \end - \start | ||
316 | savelongs "(\start+1)",\end | ||
317 | .endif | ||
318 | .endm | ||
319 | savelongs 14,31 | ||
320 | b save_user_nvgprs_cont | ||
321 | |||
322 | save_user_nvgprs_fault: | ||
323 | li r3,11 /* SIGSEGV */ | ||
324 | ld r4,TI_TASK(r12) | ||
325 | bl .force_sigsegv | ||
326 | |||
327 | clrrdi r12,r1,THREAD_SHIFT | ||
328 | ld r9,TI_FLAGS(r12) | ||
329 | b save_user_nvgprs_cont | ||
330 | 268 | ||
331 | /* | 269 | /* |
332 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | 270 | * The sigsuspend and rt_sigsuspend system calls can call do_signal |
@@ -352,6 +290,16 @@ _GLOBAL(ppc_clone) | |||
352 | bl .sys_clone | 290 | bl .sys_clone |
353 | b syscall_exit | 291 | b syscall_exit |
354 | 292 | ||
293 | _GLOBAL(ppc32_swapcontext) | ||
294 | bl .save_nvgprs | ||
295 | bl .compat_sys_swapcontext | ||
296 | b syscall_exit | ||
297 | |||
298 | _GLOBAL(ppc64_swapcontext) | ||
299 | bl .save_nvgprs | ||
300 | bl .sys_swapcontext | ||
301 | b syscall_exit | ||
302 | |||
355 | _GLOBAL(ret_from_fork) | 303 | _GLOBAL(ret_from_fork) |
356 | bl .schedule_tail | 304 | bl .schedule_tail |
357 | REST_NVGPRS(r1) | 305 | REST_NVGPRS(r1) |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 03b25f9359f8..a0579e859b21 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -714,6 +714,7 @@ AltiVecUnavailable: | |||
714 | #ifdef CONFIG_ALTIVEC | 714 | #ifdef CONFIG_ALTIVEC |
715 | bne load_up_altivec /* if from user, just load it up */ | 715 | bne load_up_altivec /* if from user, just load it up */ |
716 | #endif /* CONFIG_ALTIVEC */ | 716 | #endif /* CONFIG_ALTIVEC */ |
717 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
717 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) | 718 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) |
718 | 719 | ||
719 | PerformanceMonitor: | 720 | PerformanceMonitor: |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 415659629394..9b65029dd2a3 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -139,7 +139,7 @@ _GLOBAL(__secondary_hold) | |||
139 | ori r24,r24,MSR_RI | 139 | ori r24,r24,MSR_RI |
140 | mtmsrd r24 /* RI on */ | 140 | mtmsrd r24 /* RI on */ |
141 | 141 | ||
142 | /* Grab our linux cpu number */ | 142 | /* Grab our physical cpu number */ |
143 | mr r24,r3 | 143 | mr r24,r3 |
144 | 144 | ||
145 | /* Tell the master cpu we're here */ | 145 | /* Tell the master cpu we're here */ |
@@ -153,12 +153,7 @@ _GLOBAL(__secondary_hold) | |||
153 | cmpdi 0,r4,1 | 153 | cmpdi 0,r4,1 |
154 | bne 100b | 154 | bne 100b |
155 | 155 | ||
156 | #ifdef CONFIG_HMT | 156 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
157 | SET_REG_IMMEDIATE(r4, .hmt_init) | ||
158 | mtctr r4 | ||
159 | bctr | ||
160 | #else | ||
161 | #ifdef CONFIG_SMP | ||
162 | LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) | 157 | LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) |
163 | mtctr r4 | 158 | mtctr r4 |
164 | mr r3,r24 | 159 | mr r3,r24 |
@@ -166,7 +161,6 @@ _GLOBAL(__secondary_hold) | |||
166 | #else | 161 | #else |
167 | BUG_OPCODE | 162 | BUG_OPCODE |
168 | #endif | 163 | #endif |
169 | #endif | ||
170 | 164 | ||
171 | /* This value is used to mark exception frames on the stack. */ | 165 | /* This value is used to mark exception frames on the stack. */ |
172 | .section ".toc","aw" | 166 | .section ".toc","aw" |
@@ -321,7 +315,6 @@ exception_marker: | |||
321 | label##_pSeries: \ | 315 | label##_pSeries: \ |
322 | HMT_MEDIUM; \ | 316 | HMT_MEDIUM; \ |
323 | mtspr SPRN_SPRG1,r13; /* save r13 */ \ | 317 | mtspr SPRN_SPRG1,r13; /* save r13 */ \ |
324 | RUNLATCH_ON(r13); \ | ||
325 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) | 318 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) |
326 | 319 | ||
327 | #define STD_EXCEPTION_ISERIES(n, label, area) \ | 320 | #define STD_EXCEPTION_ISERIES(n, label, area) \ |
@@ -329,7 +322,6 @@ label##_pSeries: \ | |||
329 | label##_iSeries: \ | 322 | label##_iSeries: \ |
330 | HMT_MEDIUM; \ | 323 | HMT_MEDIUM; \ |
331 | mtspr SPRN_SPRG1,r13; /* save r13 */ \ | 324 | mtspr SPRN_SPRG1,r13; /* save r13 */ \ |
332 | RUNLATCH_ON(r13); \ | ||
333 | EXCEPTION_PROLOG_ISERIES_1(area); \ | 325 | EXCEPTION_PROLOG_ISERIES_1(area); \ |
334 | EXCEPTION_PROLOG_ISERIES_2; \ | 326 | EXCEPTION_PROLOG_ISERIES_2; \ |
335 | b label##_common | 327 | b label##_common |
@@ -339,7 +331,6 @@ label##_iSeries: \ | |||
339 | label##_iSeries: \ | 331 | label##_iSeries: \ |
340 | HMT_MEDIUM; \ | 332 | HMT_MEDIUM; \ |
341 | mtspr SPRN_SPRG1,r13; /* save r13 */ \ | 333 | mtspr SPRN_SPRG1,r13; /* save r13 */ \ |
342 | RUNLATCH_ON(r13); \ | ||
343 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ | 334 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ |
344 | lbz r10,PACAPROCENABLED(r13); \ | 335 | lbz r10,PACAPROCENABLED(r13); \ |
345 | cmpwi 0,r10,0; \ | 336 | cmpwi 0,r10,0; \ |
@@ -392,6 +383,7 @@ label##_common: \ | |||
392 | label##_common: \ | 383 | label##_common: \ |
393 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | 384 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ |
394 | DISABLE_INTS; \ | 385 | DISABLE_INTS; \ |
386 | bl .ppc64_runlatch_on; \ | ||
395 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 387 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
396 | bl hdlr; \ | 388 | bl hdlr; \ |
397 | b .ret_from_except_lite | 389 | b .ret_from_except_lite |
@@ -409,7 +401,6 @@ __start_interrupts: | |||
409 | _machine_check_pSeries: | 401 | _machine_check_pSeries: |
410 | HMT_MEDIUM | 402 | HMT_MEDIUM |
411 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 403 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
412 | RUNLATCH_ON(r13) | ||
413 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 404 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
414 | 405 | ||
415 | . = 0x300 | 406 | . = 0x300 |
@@ -436,7 +427,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |||
436 | data_access_slb_pSeries: | 427 | data_access_slb_pSeries: |
437 | HMT_MEDIUM | 428 | HMT_MEDIUM |
438 | mtspr SPRN_SPRG1,r13 | 429 | mtspr SPRN_SPRG1,r13 |
439 | RUNLATCH_ON(r13) | ||
440 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | 430 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
441 | std r3,PACA_EXSLB+EX_R3(r13) | 431 | std r3,PACA_EXSLB+EX_R3(r13) |
442 | mfspr r3,SPRN_DAR | 432 | mfspr r3,SPRN_DAR |
@@ -462,7 +452,6 @@ data_access_slb_pSeries: | |||
462 | instruction_access_slb_pSeries: | 452 | instruction_access_slb_pSeries: |
463 | HMT_MEDIUM | 453 | HMT_MEDIUM |
464 | mtspr SPRN_SPRG1,r13 | 454 | mtspr SPRN_SPRG1,r13 |
465 | RUNLATCH_ON(r13) | ||
466 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | 455 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
467 | std r3,PACA_EXSLB+EX_R3(r13) | 456 | std r3,PACA_EXSLB+EX_R3(r13) |
468 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 457 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
@@ -493,7 +482,6 @@ instruction_access_slb_pSeries: | |||
493 | .globl system_call_pSeries | 482 | .globl system_call_pSeries |
494 | system_call_pSeries: | 483 | system_call_pSeries: |
495 | HMT_MEDIUM | 484 | HMT_MEDIUM |
496 | RUNLATCH_ON(r9) | ||
497 | mr r9,r13 | 485 | mr r9,r13 |
498 | mfmsr r10 | 486 | mfmsr r10 |
499 | mfspr r13,SPRN_SPRG3 | 487 | mfspr r13,SPRN_SPRG3 |
@@ -577,7 +565,6 @@ slb_miss_user_pseries: | |||
577 | system_reset_fwnmi: | 565 | system_reset_fwnmi: |
578 | HMT_MEDIUM | 566 | HMT_MEDIUM |
579 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 567 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
580 | RUNLATCH_ON(r13) | ||
581 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | 568 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) |
582 | 569 | ||
583 | .globl machine_check_fwnmi | 570 | .globl machine_check_fwnmi |
@@ -585,7 +572,6 @@ system_reset_fwnmi: | |||
585 | machine_check_fwnmi: | 572 | machine_check_fwnmi: |
586 | HMT_MEDIUM | 573 | HMT_MEDIUM |
587 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 574 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
588 | RUNLATCH_ON(r13) | ||
589 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 575 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
590 | 576 | ||
591 | #ifdef CONFIG_PPC_ISERIES | 577 | #ifdef CONFIG_PPC_ISERIES |
@@ -896,7 +882,6 @@ unrecov_fer: | |||
896 | .align 7 | 882 | .align 7 |
897 | .globl data_access_common | 883 | .globl data_access_common |
898 | data_access_common: | 884 | data_access_common: |
899 | RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ | ||
900 | mfspr r10,SPRN_DAR | 885 | mfspr r10,SPRN_DAR |
901 | std r10,PACA_EXGEN+EX_DAR(r13) | 886 | std r10,PACA_EXGEN+EX_DAR(r13) |
902 | mfspr r10,SPRN_DSISR | 887 | mfspr r10,SPRN_DSISR |
@@ -1044,6 +1029,7 @@ hardware_interrupt_common: | |||
1044 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | 1029 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) |
1045 | hardware_interrupt_entry: | 1030 | hardware_interrupt_entry: |
1046 | DISABLE_INTS | 1031 | DISABLE_INTS |
1032 | bl .ppc64_runlatch_on | ||
1047 | addi r3,r1,STACK_FRAME_OVERHEAD | 1033 | addi r3,r1,STACK_FRAME_OVERHEAD |
1048 | bl .do_IRQ | 1034 | bl .do_IRQ |
1049 | b .ret_from_except_lite | 1035 | b .ret_from_except_lite |
@@ -1551,6 +1537,9 @@ _STATIC(__boot_from_prom) | |||
1551 | mr r28,r6 | 1537 | mr r28,r6 |
1552 | mr r27,r7 | 1538 | mr r27,r7 |
1553 | 1539 | ||
1540 | /* Align the stack to 16-byte boundary for broken yaboot */ | ||
1541 | rldicr r1,r1,0,59 | ||
1542 | |||
1554 | /* Make sure we are running in 64 bits mode */ | 1543 | /* Make sure we are running in 64 bits mode */ |
1555 | bl .enable_64b_mode | 1544 | bl .enable_64b_mode |
1556 | 1545 | ||
@@ -1818,22 +1807,6 @@ _STATIC(start_here_multiplatform) | |||
1818 | ori r6,r6,MSR_RI | 1807 | ori r6,r6,MSR_RI |
1819 | mtmsrd r6 /* RI on */ | 1808 | mtmsrd r6 /* RI on */ |
1820 | 1809 | ||
1821 | #ifdef CONFIG_HMT | ||
1822 | /* Start up the second thread on cpu 0 */ | ||
1823 | mfspr r3,SPRN_PVR | ||
1824 | srwi r3,r3,16 | ||
1825 | cmpwi r3,0x34 /* Pulsar */ | ||
1826 | beq 90f | ||
1827 | cmpwi r3,0x36 /* Icestar */ | ||
1828 | beq 90f | ||
1829 | cmpwi r3,0x37 /* SStar */ | ||
1830 | beq 90f | ||
1831 | b 91f /* HMT not supported */ | ||
1832 | 90: li r3,0 | ||
1833 | bl .hmt_start_secondary | ||
1834 | 91: | ||
1835 | #endif | ||
1836 | |||
1837 | /* The following gets the stack and TOC set up with the regs */ | 1810 | /* The following gets the stack and TOC set up with the regs */ |
1838 | /* pointing to the real addr of the kernel stack. This is */ | 1811 | /* pointing to the real addr of the kernel stack. This is */ |
1839 | /* all done to support the C function call below which sets */ | 1812 | /* all done to support the C function call below which sets */ |
@@ -1947,77 +1920,8 @@ _STATIC(start_here_common) | |||
1947 | 1920 | ||
1948 | bl .start_kernel | 1921 | bl .start_kernel |
1949 | 1922 | ||
1950 | _GLOBAL(hmt_init) | 1923 | /* Not reached */ |
1951 | #ifdef CONFIG_HMT | 1924 | BUG_OPCODE |
1952 | LOAD_REG_IMMEDIATE(r5, hmt_thread_data) | ||
1953 | mfspr r7,SPRN_PVR | ||
1954 | srwi r7,r7,16 | ||
1955 | cmpwi r7,0x34 /* Pulsar */ | ||
1956 | beq 90f | ||
1957 | cmpwi r7,0x36 /* Icestar */ | ||
1958 | beq 91f | ||
1959 | cmpwi r7,0x37 /* SStar */ | ||
1960 | beq 91f | ||
1961 | b 101f | ||
1962 | 90: mfspr r6,SPRN_PIR | ||
1963 | andi. r6,r6,0x1f | ||
1964 | b 92f | ||
1965 | 91: mfspr r6,SPRN_PIR | ||
1966 | andi. r6,r6,0x3ff | ||
1967 | 92: sldi r4,r24,3 | ||
1968 | stwx r6,r5,r4 | ||
1969 | bl .hmt_start_secondary | ||
1970 | b 101f | ||
1971 | |||
1972 | __hmt_secondary_hold: | ||
1973 | LOAD_REG_IMMEDIATE(r5, hmt_thread_data) | ||
1974 | clrldi r5,r5,4 | ||
1975 | li r7,0 | ||
1976 | mfspr r6,SPRN_PIR | ||
1977 | mfspr r8,SPRN_PVR | ||
1978 | srwi r8,r8,16 | ||
1979 | cmpwi r8,0x34 | ||
1980 | bne 93f | ||
1981 | andi. r6,r6,0x1f | ||
1982 | b 103f | ||
1983 | 93: andi. r6,r6,0x3f | ||
1984 | |||
1985 | 103: lwzx r8,r5,r7 | ||
1986 | cmpw r8,r6 | ||
1987 | beq 104f | ||
1988 | addi r7,r7,8 | ||
1989 | b 103b | ||
1990 | |||
1991 | 104: addi r7,r7,4 | ||
1992 | lwzx r9,r5,r7 | ||
1993 | mr r24,r9 | ||
1994 | 101: | ||
1995 | #endif | ||
1996 | mr r3,r24 | ||
1997 | b .pSeries_secondary_smp_init | ||
1998 | |||
1999 | #ifdef CONFIG_HMT | ||
2000 | _GLOBAL(hmt_start_secondary) | ||
2001 | LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold) | ||
2002 | clrldi r4,r4,4 | ||
2003 | mtspr SPRN_NIADORM, r4 | ||
2004 | mfspr r4, SPRN_MSRDORM | ||
2005 | li r5, -65 | ||
2006 | and r4, r4, r5 | ||
2007 | mtspr SPRN_MSRDORM, r4 | ||
2008 | lis r4,0xffef | ||
2009 | ori r4,r4,0x7403 | ||
2010 | mtspr SPRN_TSC, r4 | ||
2011 | li r4,0x1f4 | ||
2012 | mtspr SPRN_TST, r4 | ||
2013 | mfspr r4, SPRN_HID0 | ||
2014 | ori r4, r4, 0x1 | ||
2015 | mtspr SPRN_HID0, r4 | ||
2016 | mfspr r4, SPRN_CTRLF | ||
2017 | oris r4, r4, 0x40 | ||
2018 | mtspr SPRN_CTRLT, r4 | ||
2019 | blr | ||
2020 | #endif | ||
2021 | 1925 | ||
2022 | /* | 1926 | /* |
2023 | * We put a few things here that have to be page-aligned. | 1927 | * We put a few things here that have to be page-aligned. |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1ae96a8ed7e2..e789fef4eb8a 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
341 | const char *system_id = ""; | 341 | const char *system_id = ""; |
342 | unsigned int *lp_index_ptr, lp_index = 0; | 342 | unsigned int *lp_index_ptr, lp_index = 0; |
343 | struct device_node *rtas_node; | 343 | struct device_node *rtas_node; |
344 | int *lrdrp; | 344 | int *lrdrp = NULL; |
345 | 345 | ||
346 | rootdn = find_path_device("/"); | 346 | rootdn = find_path_device("/"); |
347 | if (rootdn) { | 347 | if (rootdn) { |
@@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
362 | seq_printf(m, "partition_id=%d\n", (int)lp_index); | 362 | seq_printf(m, "partition_id=%d\n", (int)lp_index); |
363 | 363 | ||
364 | rtas_node = find_path_device("/rtas"); | 364 | rtas_node = find_path_device("/rtas"); |
365 | lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL); | 365 | if (rtas_node) |
366 | lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", | ||
367 | NULL); | ||
366 | 368 | ||
367 | if (lrdrp == NULL) { | 369 | if (lrdrp == NULL) { |
368 | partition_potential_processors = vdso_data->processorCount; | 370 | partition_potential_processors = vdso_data->processorCount; |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d6431440c54f..ee166c586642 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -26,8 +26,6 @@ | |||
26 | #include <asm/prom.h> | 26 | #include <asm/prom.h> |
27 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
28 | 28 | ||
29 | #define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ | ||
30 | |||
31 | int default_machine_kexec_prepare(struct kimage *image) | 29 | int default_machine_kexec_prepare(struct kimage *image) |
32 | { | 30 | { |
33 | int i; | 31 | int i; |
@@ -61,7 +59,7 @@ int default_machine_kexec_prepare(struct kimage *image) | |||
61 | */ | 59 | */ |
62 | if (htab_address) { | 60 | if (htab_address) { |
63 | low = __pa(htab_address); | 61 | low = __pa(htab_address); |
64 | high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; | 62 | high = low + htab_size_bytes; |
65 | 63 | ||
66 | for (i = 0; i < image->nr_segments; i++) { | 64 | for (i = 0; i < image->nr_segments; i++) { |
67 | begin = image->segment[i].mem; | 65 | begin = image->segment[i].mem; |
@@ -294,7 +292,7 @@ void default_machine_kexec(struct kimage *image) | |||
294 | } | 292 | } |
295 | 293 | ||
296 | /* Values we need to export to the second kernel via the device tree. */ | 294 | /* Values we need to export to the second kernel via the device tree. */ |
297 | static unsigned long htab_base, htab_size, kernel_end; | 295 | static unsigned long htab_base, kernel_end; |
298 | 296 | ||
299 | static struct property htab_base_prop = { | 297 | static struct property htab_base_prop = { |
300 | .name = "linux,htab-base", | 298 | .name = "linux,htab-base", |
@@ -305,7 +303,7 @@ static struct property htab_base_prop = { | |||
305 | static struct property htab_size_prop = { | 303 | static struct property htab_size_prop = { |
306 | .name = "linux,htab-size", | 304 | .name = "linux,htab-size", |
307 | .length = sizeof(unsigned long), | 305 | .length = sizeof(unsigned long), |
308 | .value = (unsigned char *)&htab_size, | 306 | .value = (unsigned char *)&htab_size_bytes, |
309 | }; | 307 | }; |
310 | 308 | ||
311 | static struct property kernel_end_prop = { | 309 | static struct property kernel_end_prop = { |
@@ -331,8 +329,6 @@ static void __init export_htab_values(void) | |||
331 | 329 | ||
332 | htab_base = __pa(htab_address); | 330 | htab_base = __pa(htab_address); |
333 | prom_add_property(node, &htab_base_prop); | 331 | prom_add_property(node, &htab_base_prop); |
334 | |||
335 | htab_size = 1UL << ppc64_pft_size; | ||
336 | prom_add_property(node, &htab_size_prop); | 332 | prom_add_property(node, &htab_size_prop); |
337 | 333 | ||
338 | out: | 334 | out: |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d9a459c144d8..8a731ea877b7 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn); | |||
79 | EXPORT_SYMBOL(strcpy); | 79 | EXPORT_SYMBOL(strcpy); |
80 | EXPORT_SYMBOL(strncpy); | 80 | EXPORT_SYMBOL(strncpy); |
81 | EXPORT_SYMBOL(strcat); | 81 | EXPORT_SYMBOL(strcat); |
82 | EXPORT_SYMBOL(strncat); | ||
83 | EXPORT_SYMBOL(strchr); | ||
84 | EXPORT_SYMBOL(strrchr); | ||
85 | EXPORT_SYMBOL(strpbrk); | ||
86 | EXPORT_SYMBOL(strstr); | ||
87 | EXPORT_SYMBOL(strlen); | 82 | EXPORT_SYMBOL(strlen); |
88 | EXPORT_SYMBOL(strnlen); | ||
89 | EXPORT_SYMBOL(strcmp); | 83 | EXPORT_SYMBOL(strcmp); |
90 | EXPORT_SYMBOL(strncmp); | ||
91 | EXPORT_SYMBOL(strcasecmp); | 84 | EXPORT_SYMBOL(strcasecmp); |
92 | 85 | ||
93 | EXPORT_SYMBOL(csum_partial); | 86 | EXPORT_SYMBOL(csum_partial); |
@@ -185,9 +178,6 @@ EXPORT_SYMBOL(adb_try_handler_change); | |||
185 | EXPORT_SYMBOL(cuda_request); | 178 | EXPORT_SYMBOL(cuda_request); |
186 | EXPORT_SYMBOL(cuda_poll); | 179 | EXPORT_SYMBOL(cuda_poll); |
187 | #endif /* CONFIG_ADB_CUDA */ | 180 | #endif /* CONFIG_ADB_CUDA */ |
188 | #ifdef CONFIG_PPC_PMAC | ||
189 | EXPORT_SYMBOL(sys_ctrler); | ||
190 | #endif | ||
191 | #ifdef CONFIG_VT | 181 | #ifdef CONFIG_VT |
192 | EXPORT_SYMBOL(kd_mksound); | 182 | EXPORT_SYMBOL(kd_mksound); |
193 | #endif | 183 | #endif |
@@ -205,7 +195,6 @@ EXPORT_SYMBOL(__lshrdi3); | |||
205 | EXPORT_SYMBOL(memcpy); | 195 | EXPORT_SYMBOL(memcpy); |
206 | EXPORT_SYMBOL(memset); | 196 | EXPORT_SYMBOL(memset); |
207 | EXPORT_SYMBOL(memmove); | 197 | EXPORT_SYMBOL(memmove); |
208 | EXPORT_SYMBOL(memscan); | ||
209 | EXPORT_SYMBOL(memcmp); | 198 | EXPORT_SYMBOL(memcmp); |
210 | EXPORT_SYMBOL(memchr); | 199 | EXPORT_SYMBOL(memchr); |
211 | 200 | ||
@@ -214,7 +203,6 @@ EXPORT_SYMBOL(screen_info); | |||
214 | #endif | 203 | #endif |
215 | 204 | ||
216 | #ifdef CONFIG_PPC32 | 205 | #ifdef CONFIG_PPC32 |
217 | EXPORT_SYMBOL(__delay); | ||
218 | EXPORT_SYMBOL(timer_interrupt); | 206 | EXPORT_SYMBOL(timer_interrupt); |
219 | EXPORT_SYMBOL(irq_desc); | 207 | EXPORT_SYMBOL(irq_desc); |
220 | EXPORT_SYMBOL(tb_ticks_per_jiffy); | 208 | EXPORT_SYMBOL(tb_ticks_per_jiffy); |
@@ -222,10 +210,6 @@ EXPORT_SYMBOL(console_drivers); | |||
222 | EXPORT_SYMBOL(cacheable_memcpy); | 210 | EXPORT_SYMBOL(cacheable_memcpy); |
223 | #endif | 211 | #endif |
224 | 212 | ||
225 | EXPORT_SYMBOL(__up); | ||
226 | EXPORT_SYMBOL(__down); | ||
227 | EXPORT_SYMBOL(__down_interruptible); | ||
228 | |||
229 | #ifdef CONFIG_8xx | 213 | #ifdef CONFIG_8xx |
230 | EXPORT_SYMBOL(cpm_install_handler); | 214 | EXPORT_SYMBOL(cpm_install_handler); |
231 | EXPORT_SYMBOL(cpm_free_handler); | 215 | EXPORT_SYMBOL(cpm_free_handler); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 57703994a063..c225cf154bfe 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -888,3 +888,35 @@ void dump_stack(void) | |||
888 | show_stack(current, NULL); | 888 | show_stack(current, NULL); |
889 | } | 889 | } |
890 | EXPORT_SYMBOL(dump_stack); | 890 | EXPORT_SYMBOL(dump_stack); |
891 | |||
892 | #ifdef CONFIG_PPC64 | ||
893 | void ppc64_runlatch_on(void) | ||
894 | { | ||
895 | unsigned long ctrl; | ||
896 | |||
897 | if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { | ||
898 | HMT_medium(); | ||
899 | |||
900 | ctrl = mfspr(SPRN_CTRLF); | ||
901 | ctrl |= CTRL_RUNLATCH; | ||
902 | mtspr(SPRN_CTRLT, ctrl); | ||
903 | |||
904 | set_thread_flag(TIF_RUNLATCH); | ||
905 | } | ||
906 | } | ||
907 | |||
908 | void ppc64_runlatch_off(void) | ||
909 | { | ||
910 | unsigned long ctrl; | ||
911 | |||
912 | if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { | ||
913 | HMT_medium(); | ||
914 | |||
915 | clear_thread_flag(TIF_RUNLATCH); | ||
916 | |||
917 | ctrl = mfspr(SPRN_CTRLF); | ||
918 | ctrl &= ~CTRL_RUNLATCH; | ||
919 | mtspr(SPRN_CTRLT, ctrl); | ||
920 | } | ||
921 | } | ||
922 | #endif | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 294832a7e0a6..6dbd21726770 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -816,8 +816,6 @@ void __init unflatten_device_tree(void) | |||
816 | { | 816 | { |
817 | unsigned long start, mem, size; | 817 | unsigned long start, mem, size; |
818 | struct device_node **allnextp = &allnodes; | 818 | struct device_node **allnextp = &allnodes; |
819 | char *p = NULL; | ||
820 | int l = 0; | ||
821 | 819 | ||
822 | DBG(" -> unflatten_device_tree()\n"); | 820 | DBG(" -> unflatten_device_tree()\n"); |
823 | 821 | ||
@@ -857,19 +855,6 @@ void __init unflatten_device_tree(void) | |||
857 | if (of_chosen == NULL) | 855 | if (of_chosen == NULL) |
858 | of_chosen = of_find_node_by_path("/chosen@0"); | 856 | of_chosen = of_find_node_by_path("/chosen@0"); |
859 | 857 | ||
860 | /* Retreive command line */ | ||
861 | if (of_chosen != NULL) { | ||
862 | p = (char *)get_property(of_chosen, "bootargs", &l); | ||
863 | if (p != NULL && l > 0) | ||
864 | strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE)); | ||
865 | } | ||
866 | #ifdef CONFIG_CMDLINE | ||
867 | if (l == 0 || (l == 1 && (*p) == 0)) | ||
868 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
869 | #endif /* CONFIG_CMDLINE */ | ||
870 | |||
871 | DBG("Command line is: %s\n", cmd_line); | ||
872 | |||
873 | DBG(" <- unflatten_device_tree()\n"); | 858 | DBG(" <- unflatten_device_tree()\n"); |
874 | } | 859 | } |
875 | 860 | ||
@@ -940,6 +925,8 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
940 | { | 925 | { |
941 | u32 *prop; | 926 | u32 *prop; |
942 | unsigned long *lprop; | 927 | unsigned long *lprop; |
928 | unsigned long l; | ||
929 | char *p; | ||
943 | 930 | ||
944 | DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); | 931 | DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); |
945 | 932 | ||
@@ -1004,6 +991,41 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
1004 | crashk_res.end = crashk_res.start + *lprop - 1; | 991 | crashk_res.end = crashk_res.start + *lprop - 1; |
1005 | #endif | 992 | #endif |
1006 | 993 | ||
994 | /* Retreive command line */ | ||
995 | p = of_get_flat_dt_prop(node, "bootargs", &l); | ||
996 | if (p != NULL && l > 0) | ||
997 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); | ||
998 | |||
999 | #ifdef CONFIG_CMDLINE | ||
1000 | if (l == 0 || (l == 1 && (*p) == 0)) | ||
1001 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
1002 | #endif /* CONFIG_CMDLINE */ | ||
1003 | |||
1004 | DBG("Command line is: %s\n", cmd_line); | ||
1005 | |||
1006 | if (strstr(cmd_line, "mem=")) { | ||
1007 | char *p, *q; | ||
1008 | unsigned long maxmem = 0; | ||
1009 | |||
1010 | for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { | ||
1011 | q = p + 4; | ||
1012 | if (p > cmd_line && p[-1] != ' ') | ||
1013 | continue; | ||
1014 | maxmem = simple_strtoul(q, &q, 0); | ||
1015 | if (*q == 'k' || *q == 'K') { | ||
1016 | maxmem <<= 10; | ||
1017 | ++q; | ||
1018 | } else if (*q == 'm' || *q == 'M') { | ||
1019 | maxmem <<= 20; | ||
1020 | ++q; | ||
1021 | } else if (*q == 'g' || *q == 'G') { | ||
1022 | maxmem <<= 30; | ||
1023 | ++q; | ||
1024 | } | ||
1025 | } | ||
1026 | memory_limit = maxmem; | ||
1027 | } | ||
1028 | |||
1007 | /* break now */ | 1029 | /* break now */ |
1008 | return 1; | 1030 | return 1; |
1009 | } | 1031 | } |
@@ -1124,7 +1146,7 @@ static void __init early_reserve_mem(void) | |||
1124 | size_32 = *(reserve_map_32++); | 1146 | size_32 = *(reserve_map_32++); |
1125 | if (size_32 == 0) | 1147 | if (size_32 == 0) |
1126 | break; | 1148 | break; |
1127 | DBG("reserving: %lx -> %lx\n", base_32, size_32); | 1149 | DBG("reserving: %x -> %x\n", base_32, size_32); |
1128 | lmb_reserve(base_32, size_32); | 1150 | lmb_reserve(base_32, size_32); |
1129 | } | 1151 | } |
1130 | return; | 1152 | return; |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ec7153f4d47c..813c2cd194c2 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -205,14 +205,6 @@ static cell_t __initdata regbuf[1024]; | |||
205 | 205 | ||
206 | #define MAX_CPU_THREADS 2 | 206 | #define MAX_CPU_THREADS 2 |
207 | 207 | ||
208 | /* TO GO */ | ||
209 | #ifdef CONFIG_HMT | ||
210 | struct { | ||
211 | unsigned int pir; | ||
212 | unsigned int threadid; | ||
213 | } hmt_thread_data[NR_CPUS]; | ||
214 | #endif /* CONFIG_HMT */ | ||
215 | |||
216 | /* | 208 | /* |
217 | * Error results ... some OF calls will return "-1" on error, some | 209 | * Error results ... some OF calls will return "-1" on error, some |
218 | * will return 0, some will return either. To simplify, here are | 210 | * will return 0, some will return either. To simplify, here are |
@@ -986,7 +978,7 @@ static void __init prom_init_mem(void) | |||
986 | if (size == 0) | 978 | if (size == 0) |
987 | continue; | 979 | continue; |
988 | prom_debug(" %x %x\n", base, size); | 980 | prom_debug(" %x %x\n", base, size); |
989 | if (base == 0) | 981 | if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR)) |
990 | RELOC(rmo_top) = size; | 982 | RELOC(rmo_top) = size; |
991 | if ((base + size) > RELOC(ram_top)) | 983 | if ((base + size) > RELOC(ram_top)) |
992 | RELOC(ram_top) = base + size; | 984 | RELOC(ram_top) = base + size; |
@@ -1319,10 +1311,6 @@ static void __init prom_hold_cpus(void) | |||
1319 | */ | 1311 | */ |
1320 | *spinloop = 0; | 1312 | *spinloop = 0; |
1321 | 1313 | ||
1322 | #ifdef CONFIG_HMT | ||
1323 | for (i = 0; i < NR_CPUS; i++) | ||
1324 | RELOC(hmt_thread_data)[i].pir = 0xdeadbeef; | ||
1325 | #endif | ||
1326 | /* look for cpus */ | 1314 | /* look for cpus */ |
1327 | for (node = 0; prom_next_node(&node); ) { | 1315 | for (node = 0; prom_next_node(&node); ) { |
1328 | type[0] = 0; | 1316 | type[0] = 0; |
@@ -1389,32 +1377,6 @@ static void __init prom_hold_cpus(void) | |||
1389 | /* Reserve cpu #s for secondary threads. They start later. */ | 1377 | /* Reserve cpu #s for secondary threads. They start later. */ |
1390 | cpuid += cpu_threads; | 1378 | cpuid += cpu_threads; |
1391 | } | 1379 | } |
1392 | #ifdef CONFIG_HMT | ||
1393 | /* Only enable HMT on processors that provide support. */ | ||
1394 | if (__is_processor(PV_PULSAR) || | ||
1395 | __is_processor(PV_ICESTAR) || | ||
1396 | __is_processor(PV_SSTAR)) { | ||
1397 | prom_printf(" starting secondary threads\n"); | ||
1398 | |||
1399 | for (i = 0; i < NR_CPUS; i += 2) { | ||
1400 | if (!cpu_online(i)) | ||
1401 | continue; | ||
1402 | |||
1403 | if (i == 0) { | ||
1404 | unsigned long pir = mfspr(SPRN_PIR); | ||
1405 | if (__is_processor(PV_PULSAR)) { | ||
1406 | RELOC(hmt_thread_data)[i].pir = | ||
1407 | pir & 0x1f; | ||
1408 | } else { | ||
1409 | RELOC(hmt_thread_data)[i].pir = | ||
1410 | pir & 0x3ff; | ||
1411 | } | ||
1412 | } | ||
1413 | } | ||
1414 | } else { | ||
1415 | prom_printf("Processor is not HMT capable\n"); | ||
1416 | } | ||
1417 | #endif | ||
1418 | 1380 | ||
1419 | if (cpuid > NR_CPUS) | 1381 | if (cpuid > NR_CPUS) |
1420 | prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS) | 1382 | prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS) |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 400793c71304..bcb83574335b 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -561,10 +561,7 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
561 | regs->result); | 561 | regs->result); |
562 | 562 | ||
563 | if ((test_thread_flag(TIF_SYSCALL_TRACE) | 563 | if ((test_thread_flag(TIF_SYSCALL_TRACE) |
564 | #ifdef CONFIG_PPC64 | 564 | || test_thread_flag(TIF_SINGLESTEP)) |
565 | || test_thread_flag(TIF_SINGLESTEP) | ||
566 | #endif | ||
567 | ) | ||
568 | && (current->ptrace & PT_PTRACED)) | 565 | && (current->ptrace & PT_PTRACED)) |
569 | do_syscall_trace(); | 566 | do_syscall_trace(); |
570 | } | 567 | } |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a717dff695ef..f96c49b03ba0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -311,8 +311,6 @@ void smp_release_cpus(void) | |||
311 | 311 | ||
312 | DBG(" <- smp_release_cpus()\n"); | 312 | DBG(" <- smp_release_cpus()\n"); |
313 | } | 313 | } |
314 | #else | ||
315 | #define smp_release_cpus() | ||
316 | #endif /* CONFIG_SMP || CONFIG_KEXEC */ | 314 | #endif /* CONFIG_SMP || CONFIG_KEXEC */ |
317 | 315 | ||
318 | /* | 316 | /* |
@@ -473,10 +471,12 @@ void __init setup_system(void) | |||
473 | check_smt_enabled(); | 471 | check_smt_enabled(); |
474 | smp_setup_cpu_maps(); | 472 | smp_setup_cpu_maps(); |
475 | 473 | ||
474 | #ifdef CONFIG_SMP | ||
476 | /* Release secondary cpus out of their spinloops at 0x60 now that | 475 | /* Release secondary cpus out of their spinloops at 0x60 now that |
477 | * we can map physical -> logical CPU ids | 476 | * we can map physical -> logical CPU ids |
478 | */ | 477 | */ |
479 | smp_release_cpus(); | 478 | smp_release_cpus(); |
479 | #endif | ||
480 | 480 | ||
481 | printk("Starting Linux PPC64 %s\n", system_utsname.version); | 481 | printk("Starting Linux PPC64 %s\n", system_utsname.version); |
482 | 482 | ||
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index bd837b5dbf06..d7a4e814974d 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -151,10 +151,7 @@ static inline int save_general_regs(struct pt_regs *regs, | |||
151 | elf_greg_t64 *gregs = (elf_greg_t64 *)regs; | 151 | elf_greg_t64 *gregs = (elf_greg_t64 *)regs; |
152 | int i; | 152 | int i; |
153 | 153 | ||
154 | if (!FULL_REGS(regs)) { | 154 | WARN_ON(!FULL_REGS(regs)); |
155 | set_thread_flag(TIF_SAVE_NVGPRS); | ||
156 | current_thread_info()->nvgprs_frame = frame->mc_gregs; | ||
157 | } | ||
158 | 155 | ||
159 | for (i = 0; i <= PT_RESULT; i ++) { | 156 | for (i = 0; i <= PT_RESULT; i ++) { |
160 | if (i == 14 && !FULL_REGS(regs)) | 157 | if (i == 14 && !FULL_REGS(regs)) |
@@ -215,15 +212,7 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, | |||
215 | static inline int save_general_regs(struct pt_regs *regs, | 212 | static inline int save_general_regs(struct pt_regs *regs, |
216 | struct mcontext __user *frame) | 213 | struct mcontext __user *frame) |
217 | { | 214 | { |
218 | if (!FULL_REGS(regs)) { | 215 | WARN_ON(!FULL_REGS(regs)); |
219 | /* Zero out the unsaved GPRs to avoid information | ||
220 | leak, and set TIF_SAVE_NVGPRS to ensure that the | ||
221 | registers do actually get saved later. */ | ||
222 | memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); | ||
223 | current_thread_info()->nvgprs_frame = &frame->mc_gregs; | ||
224 | set_thread_flag(TIF_SAVE_NVGPRS); | ||
225 | } | ||
226 | |||
227 | return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); | 216 | return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); |
228 | } | 217 | } |
229 | 218 | ||
@@ -826,8 +815,8 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int | |||
826 | } | 815 | } |
827 | 816 | ||
828 | long sys_swapcontext(struct ucontext __user *old_ctx, | 817 | long sys_swapcontext(struct ucontext __user *old_ctx, |
829 | struct ucontext __user *new_ctx, | 818 | struct ucontext __user *new_ctx, |
830 | int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) | 819 | int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) |
831 | { | 820 | { |
832 | unsigned char tmp; | 821 | unsigned char tmp; |
833 | 822 | ||
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 497a5d3df359..4324f8a8ba24 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -118,14 +118,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
118 | err |= __put_user(0, &sc->v_regs); | 118 | err |= __put_user(0, &sc->v_regs); |
119 | #endif /* CONFIG_ALTIVEC */ | 119 | #endif /* CONFIG_ALTIVEC */ |
120 | err |= __put_user(&sc->gp_regs, &sc->regs); | 120 | err |= __put_user(&sc->gp_regs, &sc->regs); |
121 | if (!FULL_REGS(regs)) { | 121 | WARN_ON(!FULL_REGS(regs)); |
122 | /* Zero out the unsaved GPRs to avoid information | ||
123 | leak, and set TIF_SAVE_NVGPRS to ensure that the | ||
124 | registers do actually get saved later. */ | ||
125 | memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); | ||
126 | set_thread_flag(TIF_SAVE_NVGPRS); | ||
127 | current_thread_info()->nvgprs_frame = &sc->gp_regs; | ||
128 | } | ||
129 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); | 122 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); |
130 | err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); | 123 | err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); |
131 | err |= __put_user(signr, &sc->signal); | 124 | err |= __put_user(signr, &sc->signal); |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 475249dc2350..cd75ab2908fa 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -176,7 +176,6 @@ struct timex32 { | |||
176 | }; | 176 | }; |
177 | 177 | ||
178 | extern int do_adjtimex(struct timex *); | 178 | extern int do_adjtimex(struct timex *); |
179 | extern void ppc_adjtimex(void); | ||
180 | 179 | ||
181 | asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) | 180 | asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) |
182 | { | 181 | { |
@@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) | |||
209 | 208 | ||
210 | ret = do_adjtimex(&txc); | 209 | ret = do_adjtimex(&txc); |
211 | 210 | ||
212 | /* adjust the conversion of TB to time of day to track adjtimex */ | ||
213 | ppc_adjtimex(); | ||
214 | |||
215 | if(put_user(txc.modes, &utp->modes) || | 211 | if(put_user(txc.modes, &utp->modes) || |
216 | __put_user(txc.offset, &utp->offset) || | 212 | __put_user(txc.offset, &utp->offset) || |
217 | __put_user(txc.freq, &utp->freq) || | 213 | __put_user(txc.freq, &utp->freq) || |
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 8a9f994ed917..1ad55f0466fd 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S | |||
@@ -288,7 +288,7 @@ COMPAT_SYS(clock_settime) | |||
288 | COMPAT_SYS(clock_gettime) | 288 | COMPAT_SYS(clock_gettime) |
289 | COMPAT_SYS(clock_getres) | 289 | COMPAT_SYS(clock_getres) |
290 | COMPAT_SYS(clock_nanosleep) | 290 | COMPAT_SYS(clock_nanosleep) |
291 | COMPAT_SYS(swapcontext) | 291 | SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext) |
292 | COMPAT_SYS(tgkill) | 292 | COMPAT_SYS(tgkill) |
293 | COMPAT_SYS(utimes) | 293 | COMPAT_SYS(utimes) |
294 | COMPAT_SYS(statfs64) | 294 | COMPAT_SYS(statfs64) |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1886045a2fd8..2a7ddc579379 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/security.h> | 50 | #include <linux/security.h> |
51 | #include <linux/percpu.h> | 51 | #include <linux/percpu.h> |
52 | #include <linux/rtc.h> | 52 | #include <linux/rtc.h> |
53 | #include <linux/jiffies.h> | ||
53 | 54 | ||
54 | #include <asm/io.h> | 55 | #include <asm/io.h> |
55 | #include <asm/processor.h> | 56 | #include <asm/processor.h> |
@@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec); | |||
99 | unsigned long tb_ticks_per_sec; | 100 | unsigned long tb_ticks_per_sec; |
100 | u64 tb_to_xs; | 101 | u64 tb_to_xs; |
101 | unsigned tb_to_us; | 102 | unsigned tb_to_us; |
102 | unsigned long processor_freq; | 103 | |
104 | #define TICKLEN_SCALE (SHIFT_SCALE - 10) | ||
105 | u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ | ||
106 | u64 ticklen_to_xs; /* 0.64 fraction */ | ||
107 | |||
108 | /* If last_tick_len corresponds to about 1/HZ seconds, then | ||
109 | last_tick_len << TICKLEN_SHIFT will be about 2^63. */ | ||
110 | #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) | ||
111 | |||
103 | DEFINE_SPINLOCK(rtc_lock); | 112 | DEFINE_SPINLOCK(rtc_lock); |
104 | EXPORT_SYMBOL_GPL(rtc_lock); | 113 | EXPORT_SYMBOL_GPL(rtc_lock); |
105 | 114 | ||
@@ -113,10 +122,6 @@ extern unsigned long wall_jiffies; | |||
113 | extern struct timezone sys_tz; | 122 | extern struct timezone sys_tz; |
114 | static long timezone_offset; | 123 | static long timezone_offset; |
115 | 124 | ||
116 | void ppc_adjtimex(void); | ||
117 | |||
118 | static unsigned adjusting_time = 0; | ||
119 | |||
120 | unsigned long ppc_proc_freq; | 125 | unsigned long ppc_proc_freq; |
121 | unsigned long ppc_tb_freq; | 126 | unsigned long ppc_tb_freq; |
122 | 127 | ||
@@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void) | |||
178 | */ | 183 | */ |
179 | if (ppc_md.set_rtc_time && ntp_synced() && | 184 | if (ppc_md.set_rtc_time && ntp_synced() && |
180 | xtime.tv_sec - last_rtc_update >= 659 && | 185 | xtime.tv_sec - last_rtc_update >= 659 && |
181 | abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && | 186 | abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { |
182 | jiffies - wall_jiffies == 1) { | ||
183 | struct rtc_time tm; | 187 | struct rtc_time tm; |
184 | to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); | 188 | to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); |
185 | tm.tm_year -= 1900; | 189 | tm.tm_year -= 1900; |
@@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv) | |||
226 | if (__USE_RTC()) { | 230 | if (__USE_RTC()) { |
227 | /* do this the old way */ | 231 | /* do this the old way */ |
228 | unsigned long flags, seq; | 232 | unsigned long flags, seq; |
229 | unsigned int sec, nsec, usec, lost; | 233 | unsigned int sec, nsec, usec; |
230 | 234 | ||
231 | do { | 235 | do { |
232 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | 236 | seq = read_seqbegin_irqsave(&xtime_lock, flags); |
233 | sec = xtime.tv_sec; | 237 | sec = xtime.tv_sec; |
234 | nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); | 238 | nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); |
235 | lost = jiffies - wall_jiffies; | ||
236 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | 239 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); |
237 | usec = nsec / 1000 + lost * (1000000 / HZ); | 240 | usec = nsec / 1000; |
238 | while (usec >= 1000000) { | 241 | while (usec >= 1000000) { |
239 | usec -= 1000000; | 242 | usec -= 1000000; |
240 | ++sec; | 243 | ++sec; |
@@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv) | |||
248 | 251 | ||
249 | EXPORT_SYMBOL(do_gettimeofday); | 252 | EXPORT_SYMBOL(do_gettimeofday); |
250 | 253 | ||
251 | /* Synchronize xtime with do_gettimeofday */ | ||
252 | |||
253 | static inline void timer_sync_xtime(unsigned long cur_tb) | ||
254 | { | ||
255 | #ifdef CONFIG_PPC64 | ||
256 | /* why do we do this? */ | ||
257 | struct timeval my_tv; | ||
258 | |||
259 | __do_gettimeofday(&my_tv, cur_tb); | ||
260 | |||
261 | if (xtime.tv_sec <= my_tv.tv_sec) { | ||
262 | xtime.tv_sec = my_tv.tv_sec; | ||
263 | xtime.tv_nsec = my_tv.tv_usec * 1000; | ||
264 | } | ||
265 | #endif | ||
266 | } | ||
267 | |||
268 | /* | 254 | /* |
269 | * There are two copies of tb_to_xs and stamp_xsec so that no | 255 | * There are two copies of tb_to_xs and stamp_xsec so that no |
270 | * lock is needed to access and use these values in | 256 | * lock is needed to access and use these values in |
@@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb) | |||
323 | { | 309 | { |
324 | unsigned long offset; | 310 | unsigned long offset; |
325 | u64 new_stamp_xsec; | 311 | u64 new_stamp_xsec; |
312 | u64 tlen, t2x; | ||
326 | 313 | ||
327 | if (__USE_RTC()) | 314 | if (__USE_RTC()) |
328 | return; | 315 | return; |
316 | tlen = current_tick_length(); | ||
329 | offset = cur_tb - do_gtod.varp->tb_orig_stamp; | 317 | offset = cur_tb - do_gtod.varp->tb_orig_stamp; |
330 | if ((offset & 0x80000000u) == 0) | 318 | if (tlen == last_tick_len && offset < 0x80000000u) { |
331 | return; | 319 | /* check that we're still in sync; if not, resync */ |
332 | new_stamp_xsec = do_gtod.varp->stamp_xsec | 320 | struct timeval tv; |
333 | + mulhdu(offset, do_gtod.varp->tb_to_xs); | 321 | __do_gettimeofday(&tv, cur_tb); |
334 | update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); | 322 | if (tv.tv_sec <= xtime.tv_sec && |
323 | (tv.tv_sec < xtime.tv_sec || | ||
324 | tv.tv_usec * 1000 <= xtime.tv_nsec)) | ||
325 | return; | ||
326 | } | ||
327 | if (tlen != last_tick_len) { | ||
328 | t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); | ||
329 | last_tick_len = tlen; | ||
330 | } else | ||
331 | t2x = do_gtod.varp->tb_to_xs; | ||
332 | new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; | ||
333 | do_div(new_stamp_xsec, 1000000000); | ||
334 | new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; | ||
335 | update_gtod(cur_tb, new_stamp_xsec, t2x); | ||
335 | } | 336 | } |
336 | 337 | ||
337 | #ifdef CONFIG_SMP | 338 | #ifdef CONFIG_SMP |
@@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs) | |||
462 | write_seqlock(&xtime_lock); | 463 | write_seqlock(&xtime_lock); |
463 | tb_last_jiffy += tb_ticks_per_jiffy; | 464 | tb_last_jiffy += tb_ticks_per_jiffy; |
464 | tb_last_stamp = per_cpu(last_jiffy, cpu); | 465 | tb_last_stamp = per_cpu(last_jiffy, cpu); |
465 | timer_recalc_offset(tb_last_jiffy); | ||
466 | do_timer(regs); | 466 | do_timer(regs); |
467 | timer_sync_xtime(tb_last_jiffy); | 467 | timer_recalc_offset(tb_last_jiffy); |
468 | timer_check_rtc(); | 468 | timer_check_rtc(); |
469 | write_sequnlock(&xtime_lock); | 469 | write_sequnlock(&xtime_lock); |
470 | if (adjusting_time && (time_adjust == 0)) | ||
471 | ppc_adjtimex(); | ||
472 | } | 470 | } |
473 | 471 | ||
474 | next_dec = tb_ticks_per_jiffy - ticks; | 472 | next_dec = tb_ticks_per_jiffy - ticks; |
@@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs) | |||
492 | 490 | ||
493 | void wakeup_decrementer(void) | 491 | void wakeup_decrementer(void) |
494 | { | 492 | { |
495 | int i; | 493 | unsigned long ticks; |
496 | 494 | ||
497 | set_dec(tb_ticks_per_jiffy); | ||
498 | /* | 495 | /* |
499 | * We don't expect this to be called on a machine with a 601, | 496 | * The timebase gets saved on sleep and restored on wakeup, |
500 | * so using get_tbl is fine. | 497 | * so all we need to do is to reset the decrementer. |
501 | */ | 498 | */ |
502 | tb_last_stamp = tb_last_jiffy = get_tb(); | 499 | ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); |
503 | for_each_cpu(i) | 500 | if (ticks < tb_ticks_per_jiffy) |
504 | per_cpu(last_jiffy, i) = tb_last_stamp; | 501 | ticks = tb_ticks_per_jiffy - ticks; |
502 | else | ||
503 | ticks = 1; | ||
504 | set_dec(ticks); | ||
505 | } | 505 | } |
506 | 506 | ||
507 | #ifdef CONFIG_SMP | 507 | #ifdef CONFIG_SMP |
@@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv) | |||
541 | time_t wtm_sec, new_sec = tv->tv_sec; | 541 | time_t wtm_sec, new_sec = tv->tv_sec; |
542 | long wtm_nsec, new_nsec = tv->tv_nsec; | 542 | long wtm_nsec, new_nsec = tv->tv_nsec; |
543 | unsigned long flags; | 543 | unsigned long flags; |
544 | long int tb_delta; | 544 | u64 new_xsec; |
545 | u64 new_xsec, tb_delta_xs; | 545 | unsigned long tb_delta; |
546 | 546 | ||
547 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 547 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
548 | return -EINVAL; | 548 | return -EINVAL; |
@@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv) | |||
563 | first_settimeofday = 0; | 563 | first_settimeofday = 0; |
564 | } | 564 | } |
565 | #endif | 565 | #endif |
566 | |||
567 | /* | ||
568 | * Subtract off the number of nanoseconds since the | ||
569 | * beginning of the last tick. | ||
570 | * Note that since we don't increment jiffies_64 anywhere other | ||
571 | * than in do_timer (since we don't have a lost tick problem), | ||
572 | * wall_jiffies will always be the same as jiffies, | ||
573 | * and therefore the (jiffies - wall_jiffies) computation | ||
574 | * has been removed. | ||
575 | */ | ||
566 | tb_delta = tb_ticks_since(tb_last_stamp); | 576 | tb_delta = tb_ticks_since(tb_last_stamp); |
567 | tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; | 577 | tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ |
568 | tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); | 578 | new_nsec -= SCALE_XSEC(tb_delta, 1000000000); |
569 | 579 | ||
570 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); | 580 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); |
571 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); | 581 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); |
@@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv) | |||
580 | 590 | ||
581 | ntp_clear(); | 591 | ntp_clear(); |
582 | 592 | ||
583 | new_xsec = 0; | 593 | new_xsec = xtime.tv_nsec; |
584 | if (new_nsec != 0) { | 594 | if (new_xsec != 0) { |
585 | new_xsec = (u64)new_nsec * XSEC_PER_SEC; | 595 | new_xsec *= XSEC_PER_SEC; |
586 | do_div(new_xsec, NSEC_PER_SEC); | 596 | do_div(new_xsec, NSEC_PER_SEC); |
587 | } | 597 | } |
588 | new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; | 598 | new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; |
589 | update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); | 599 | update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); |
590 | 600 | ||
591 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | 601 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; |
@@ -671,7 +681,7 @@ void __init time_init(void) | |||
671 | unsigned long flags; | 681 | unsigned long flags; |
672 | unsigned long tm = 0; | 682 | unsigned long tm = 0; |
673 | struct div_result res; | 683 | struct div_result res; |
674 | u64 scale; | 684 | u64 scale, x; |
675 | unsigned shift; | 685 | unsigned shift; |
676 | 686 | ||
677 | if (ppc_md.time_init != NULL) | 687 | if (ppc_md.time_init != NULL) |
@@ -693,11 +703,36 @@ void __init time_init(void) | |||
693 | } | 703 | } |
694 | 704 | ||
695 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | 705 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; |
696 | tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; | 706 | tb_ticks_per_sec = ppc_tb_freq; |
697 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | 707 | tb_ticks_per_usec = ppc_tb_freq / 1000000; |
698 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | 708 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); |
699 | div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); | 709 | |
700 | tb_to_xs = res.result_low; | 710 | /* |
711 | * Calculate the length of each tick in ns. It will not be | ||
712 | * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. | ||
713 | * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, | ||
714 | * rounded up. | ||
715 | */ | ||
716 | x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; | ||
717 | do_div(x, ppc_tb_freq); | ||
718 | tick_nsec = x; | ||
719 | last_tick_len = x << TICKLEN_SCALE; | ||
720 | |||
721 | /* | ||
722 | * Compute ticklen_to_xs, which is a factor which gets multiplied | ||
723 | * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. | ||
724 | * It is computed as: | ||
725 | * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) | ||
726 | * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT | ||
727 | * so as to give the result as a 0.64 fixed-point fraction. | ||
728 | */ | ||
729 | div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0, | ||
730 | tb_ticks_per_jiffy, &res); | ||
731 | div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); | ||
732 | ticklen_to_xs = res.result_low; | ||
733 | |||
734 | /* Compute tb_to_xs from tick_nsec */ | ||
735 | tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); | ||
701 | 736 | ||
702 | /* | 737 | /* |
703 | * Compute scale factor for sched_clock. | 738 | * Compute scale factor for sched_clock. |
@@ -724,6 +759,14 @@ void __init time_init(void) | |||
724 | tm = get_boot_time(); | 759 | tm = get_boot_time(); |
725 | 760 | ||
726 | write_seqlock_irqsave(&xtime_lock, flags); | 761 | write_seqlock_irqsave(&xtime_lock, flags); |
762 | |||
763 | /* If platform provided a timezone (pmac), we correct the time */ | ||
764 | if (timezone_offset) { | ||
765 | sys_tz.tz_minuteswest = -timezone_offset / 60; | ||
766 | sys_tz.tz_dsttime = 0; | ||
767 | tm -= timezone_offset; | ||
768 | } | ||
769 | |||
727 | xtime.tv_sec = tm; | 770 | xtime.tv_sec = tm; |
728 | xtime.tv_nsec = 0; | 771 | xtime.tv_nsec = 0; |
729 | do_gtod.varp = &do_gtod.vars[0]; | 772 | do_gtod.varp = &do_gtod.vars[0]; |
@@ -738,18 +781,11 @@ void __init time_init(void) | |||
738 | vdso_data->tb_orig_stamp = tb_last_jiffy; | 781 | vdso_data->tb_orig_stamp = tb_last_jiffy; |
739 | vdso_data->tb_update_count = 0; | 782 | vdso_data->tb_update_count = 0; |
740 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 783 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; |
741 | vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; | 784 | vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; |
742 | vdso_data->tb_to_xs = tb_to_xs; | 785 | vdso_data->tb_to_xs = tb_to_xs; |
743 | 786 | ||
744 | time_freq = 0; | 787 | time_freq = 0; |
745 | 788 | ||
746 | /* If platform provided a timezone (pmac), we correct the time */ | ||
747 | if (timezone_offset) { | ||
748 | sys_tz.tz_minuteswest = -timezone_offset / 60; | ||
749 | sys_tz.tz_dsttime = 0; | ||
750 | xtime.tv_sec -= timezone_offset; | ||
751 | } | ||
752 | |||
753 | last_rtc_update = xtime.tv_sec; | 789 | last_rtc_update = xtime.tv_sec; |
754 | set_normalized_timespec(&wall_to_monotonic, | 790 | set_normalized_timespec(&wall_to_monotonic, |
755 | -xtime.tv_sec, -xtime.tv_nsec); | 791 | -xtime.tv_sec, -xtime.tv_nsec); |
@@ -759,126 +795,6 @@ void __init time_init(void) | |||
759 | set_dec(tb_ticks_per_jiffy); | 795 | set_dec(tb_ticks_per_jiffy); |
760 | } | 796 | } |
761 | 797 | ||
762 | /* | ||
763 | * After adjtimex is called, adjust the conversion of tb ticks | ||
764 | * to microseconds to keep do_gettimeofday synchronized | ||
765 | * with ntpd. | ||
766 | * | ||
767 | * Use the time_adjust, time_freq and time_offset computed by adjtimex to | ||
768 | * adjust the frequency. | ||
769 | */ | ||
770 | |||
771 | /* #define DEBUG_PPC_ADJTIMEX 1 */ | ||
772 | |||
773 | void ppc_adjtimex(void) | ||
774 | { | ||
775 | #ifdef CONFIG_PPC64 | ||
776 | unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, | ||
777 | new_tb_to_xs, new_xsec, new_stamp_xsec; | ||
778 | unsigned long tb_ticks_per_sec_delta; | ||
779 | long delta_freq, ltemp; | ||
780 | struct div_result divres; | ||
781 | unsigned long flags; | ||
782 | long singleshot_ppm = 0; | ||
783 | |||
784 | /* | ||
785 | * Compute parts per million frequency adjustment to | ||
786 | * accomplish the time adjustment implied by time_offset to be | ||
787 | * applied over the elapsed time indicated by time_constant. | ||
788 | * Use SHIFT_USEC to get it into the same units as | ||
789 | * time_freq. | ||
790 | */ | ||
791 | if ( time_offset < 0 ) { | ||
792 | ltemp = -time_offset; | ||
793 | ltemp <<= SHIFT_USEC - SHIFT_UPDATE; | ||
794 | ltemp >>= SHIFT_KG + time_constant; | ||
795 | ltemp = -ltemp; | ||
796 | } else { | ||
797 | ltemp = time_offset; | ||
798 | ltemp <<= SHIFT_USEC - SHIFT_UPDATE; | ||
799 | ltemp >>= SHIFT_KG + time_constant; | ||
800 | } | ||
801 | |||
802 | /* If there is a single shot time adjustment in progress */ | ||
803 | if ( time_adjust ) { | ||
804 | #ifdef DEBUG_PPC_ADJTIMEX | ||
805 | printk("ppc_adjtimex: "); | ||
806 | if ( adjusting_time == 0 ) | ||
807 | printk("starting "); | ||
808 | printk("single shot time_adjust = %ld\n", time_adjust); | ||
809 | #endif | ||
810 | |||
811 | adjusting_time = 1; | ||
812 | |||
813 | /* | ||
814 | * Compute parts per million frequency adjustment | ||
815 | * to match time_adjust | ||
816 | */ | ||
817 | singleshot_ppm = tickadj * HZ; | ||
818 | /* | ||
819 | * The adjustment should be tickadj*HZ to match the code in | ||
820 | * linux/kernel/timer.c, but experiments show that this is too | ||
821 | * large. 3/4 of tickadj*HZ seems about right | ||
822 | */ | ||
823 | singleshot_ppm -= singleshot_ppm / 4; | ||
824 | /* Use SHIFT_USEC to get it into the same units as time_freq */ | ||
825 | singleshot_ppm <<= SHIFT_USEC; | ||
826 | if ( time_adjust < 0 ) | ||
827 | singleshot_ppm = -singleshot_ppm; | ||
828 | } | ||
829 | else { | ||
830 | #ifdef DEBUG_PPC_ADJTIMEX | ||
831 | if ( adjusting_time ) | ||
832 | printk("ppc_adjtimex: ending single shot time_adjust\n"); | ||
833 | #endif | ||
834 | adjusting_time = 0; | ||
835 | } | ||
836 | |||
837 | /* Add up all of the frequency adjustments */ | ||
838 | delta_freq = time_freq + ltemp + singleshot_ppm; | ||
839 | |||
840 | /* | ||
841 | * Compute a new value for tb_ticks_per_sec based on | ||
842 | * the frequency adjustment | ||
843 | */ | ||
844 | den = 1000000 * (1 << (SHIFT_USEC - 8)); | ||
845 | if ( delta_freq < 0 ) { | ||
846 | tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; | ||
847 | new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; | ||
848 | } | ||
849 | else { | ||
850 | tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; | ||
851 | new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; | ||
852 | } | ||
853 | |||
854 | #ifdef DEBUG_PPC_ADJTIMEX | ||
855 | printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); | ||
856 | printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); | ||
857 | #endif | ||
858 | |||
859 | /* | ||
860 | * Compute a new value of tb_to_xs (used to convert tb to | ||
861 | * microseconds) and a new value of stamp_xsec which is the | ||
862 | * time (in 1/2^20 second units) corresponding to | ||
863 | * tb_orig_stamp. This new value of stamp_xsec compensates | ||
864 | * for the change in frequency (implied by the new tb_to_xs) | ||
865 | * which guarantees that the current time remains the same. | ||
866 | */ | ||
867 | write_seqlock_irqsave( &xtime_lock, flags ); | ||
868 | tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; | ||
869 | div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres); | ||
870 | new_tb_to_xs = divres.result_low; | ||
871 | new_xsec = mulhdu(tb_ticks, new_tb_to_xs); | ||
872 | |||
873 | old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs); | ||
874 | new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; | ||
875 | |||
876 | update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs); | ||
877 | |||
878 | write_sequnlock_irqrestore( &xtime_lock, flags ); | ||
879 | #endif /* CONFIG_PPC64 */ | ||
880 | } | ||
881 | |||
882 | 798 | ||
883 | #define FEBRUARY 2 | 799 | #define FEBRUARY 2 |
884 | #define STARTOFTIME 1970 | 800 | #define STARTOFTIME 1970 |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 7509aa6474f2..98660aedeeb7 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -814,6 +814,8 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
814 | return; | 814 | return; |
815 | } | 815 | } |
816 | 816 | ||
817 | local_irq_enable(); | ||
818 | |||
817 | /* Try to emulate it if we should. */ | 819 | /* Try to emulate it if we should. */ |
818 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | 820 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { |
819 | switch (emulate_instruction(regs)) { | 821 | switch (emulate_instruction(regs)) { |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index f0c47dab0903..04f7df39ffbb 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -182,8 +182,8 @@ static struct page * vdso_vma_nopage(struct vm_area_struct * vma, | |||
182 | unsigned long offset = address - vma->vm_start; | 182 | unsigned long offset = address - vma->vm_start; |
183 | struct page *pg; | 183 | struct page *pg; |
184 | #ifdef CONFIG_PPC64 | 184 | #ifdef CONFIG_PPC64 |
185 | void *vbase = test_thread_flag(TIF_32BIT) ? | 185 | void *vbase = (vma->vm_mm->task_size > TASK_SIZE_USER32) ? |
186 | vdso32_kbase : vdso64_kbase; | 186 | vdso64_kbase : vdso32_kbase; |
187 | #else | 187 | #else |
188 | void *vbase = vdso32_kbase; | 188 | void *vbase = vdso32_kbase; |
189 | #endif | 189 | #endif |
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index ccaeda5136d1..4ee871f1cadb 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S | |||
@@ -225,9 +225,9 @@ V_FUNCTION_BEGIN(__do_get_xsec) | |||
225 | .cfi_startproc | 225 | .cfi_startproc |
226 | /* check for update count & load values */ | 226 | /* check for update count & load values */ |
227 | 1: ld r8,CFG_TB_UPDATE_COUNT(r3) | 227 | 1: ld r8,CFG_TB_UPDATE_COUNT(r3) |
228 | andi. r0,r4,1 /* pending update ? loop */ | 228 | andi. r0,r8,1 /* pending update ? loop */ |
229 | bne- 1b | 229 | bne- 1b |
230 | xor r0,r4,r4 /* create dependency */ | 230 | xor r0,r8,r8 /* create dependency */ |
231 | add r3,r3,r0 | 231 | add r3,r3,r0 |
232 | 232 | ||
233 | /* Get TB & offset it */ | 233 | /* Get TB & offset it */ |