aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile43
-rw-r--r--arch/powerpc/kernel/align.c307
-rw-r--r--arch/powerpc/kernel/asm-offsets.c12
-rw-r--r--arch/powerpc/kernel/btext.c1
-rw-r--r--arch/powerpc/kernel/clock.c82
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S56
-rw-r--r--arch/powerpc/kernel/cputable.c119
-rw-r--r--arch/powerpc/kernel/crash.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S4
-rw-r--r--arch/powerpc/kernel/entry_64.S24
-rw-r--r--arch/powerpc/kernel/head_32.S71
-rw-r--r--arch/powerpc/kernel/head_40x.S (renamed from arch/powerpc/kernel/head_4xx.S)28
-rw-r--r--arch/powerpc/kernel/head_44x.S30
-rw-r--r--arch/powerpc/kernel/head_64.S604
-rw-r--r--arch/powerpc/kernel/head_8xx.S27
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S99
-rw-r--r--arch/powerpc/kernel/ibmebus.c7
-rw-r--r--arch/powerpc/kernel/idle.c3
-rw-r--r--arch/powerpc/kernel/iomap.c4
-rw-r--r--arch/powerpc/kernel/iommu.c1
-rw-r--r--arch/powerpc/kernel/irq.c97
-rw-r--r--arch/powerpc/kernel/legacy_serial.c5
-rw-r--r--arch/powerpc/kernel/lparcfg.c2
-rw-r--r--arch/powerpc/kernel/lparmap.c32
-rw-r--r--arch/powerpc/kernel/nvram_64.c23
-rw-r--r--arch/powerpc/kernel/of_platform.c17
-rw-r--r--arch/powerpc/kernel/pci-common.c7
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/pci_dn.c7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c8
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kernel/prom.c24
-rw-r--r--arch/powerpc/kernel/prom_init.c23
-rw-r--r--arch/powerpc/kernel/ptrace.c10
-rw-r--r--arch/powerpc/kernel/ptrace32.c8
-rw-r--r--arch/powerpc/kernel/rtas_pci.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c10
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kernel/signal.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c38
-rw-r--r--arch/powerpc/kernel/signal_64.c15
-rw-r--r--arch/powerpc/kernel/smp.c29
-rw-r--r--arch/powerpc/kernel/softemu8xx.c202
-rw-r--r--arch/powerpc/kernel/sysfs.c64
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/time.c503
-rw-r--r--arch/powerpc/kernel/traps.c97
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c11
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vdso32/.gitignore1
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile20
-rw-r--r--arch/powerpc/kernel/vdso64/.gitignore1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile19
-rw-r--r--arch/powerpc/kernel/vio.c104
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
59 files changed, 1614 insertions, 1357 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b0cb2e662c25..ca51f0cf27ab 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PPC64) += vdso64/
24obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 24obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
25obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 25obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
26obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o 26obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
27obj-$(CONFIG_PPC_CLOCK) += clock.o
27procfs-$(CONFIG_PPC64) := proc_ppc64.o 28procfs-$(CONFIG_PPC64) := proc_ppc64.o
28obj-$(CONFIG_PROC_FS) += $(procfs-y) 29obj-$(CONFIG_PROC_FS) += $(procfs-y)
29rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o 30rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
@@ -37,25 +38,27 @@ obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
37obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 38obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
38obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o 39obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
39obj-$(CONFIG_TAU) += tau_6xx.o 40obj-$(CONFIG_TAU) += tau_6xx.o
40obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o 41obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \
41obj32-$(CONFIG_HIBERNATION) += swsusp_32.o 42 swsusp_$(CONFIG_WORD_SIZE).o
42obj64-$(CONFIG_HIBERNATION) += swsusp_64.o swsusp_asm64.o 43obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
43obj32-$(CONFIG_MODULES) += module_32.o 44obj-$(CONFIG_MODULES) += module_$(CONFIG_WORD_SIZE).o
45obj-$(CONFIG_44x) += cpu_setup_44x.o
44 46
45ifeq ($(CONFIG_PPC_MERGE),y) 47ifeq ($(CONFIG_PPC_MERGE),y)
46 48
47extra-$(CONFIG_PPC_STD_MMU) := head_32.o 49extra-$(CONFIG_PPC_STD_MMU) := head_32.o
48extra-$(CONFIG_PPC64) := head_64.o 50extra-$(CONFIG_PPC64) := head_64.o
49extra-$(CONFIG_40x) := head_4xx.o 51extra-$(CONFIG_40x) := head_40x.o
50extra-$(CONFIG_44x) := head_44x.o 52extra-$(CONFIG_44x) := head_44x.o
51extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o 53extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
52extra-$(CONFIG_8xx) := head_8xx.o 54extra-$(CONFIG_8xx) := head_8xx.o
53extra-y += vmlinux.lds 55extra-y += vmlinux.lds
54 56
55obj-y += time.o prom.o traps.o setup-common.o \ 57obj-y += time.o prom.o traps.o setup-common.o \
56 udbg.o misc.o io.o 58 udbg.o misc.o io.o \
57obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o 59 misc_$(CONFIG_WORD_SIZE).o
58obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o 60obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
61obj-$(CONFIG_PPC64) += dma_64.o iommu.o
59obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 62obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
60obj-$(CONFIG_MODULES) += ppc_ksyms.o 63obj-$(CONFIG_MODULES) += ppc_ksyms.o
61obj-$(CONFIG_BOOTX_TEXT) += btext.o 64obj-$(CONFIG_BOOTX_TEXT) += btext.o
@@ -63,37 +66,27 @@ obj-$(CONFIG_SMP) += smp.o
63obj-$(CONFIG_KPROBES) += kprobes.o 66obj-$(CONFIG_KPROBES) += kprobes.o
64obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o 67obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
65 68
66module-$(CONFIG_PPC64) += module_64.o 69pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
67obj-$(CONFIG_MODULES) += $(module-y) 70obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
68 71 pci-common.o
69pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o isa-bridge.o
70pci32-$(CONFIG_PPC32) := pci_32.o
71obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) pci-common.o
72obj-$(CONFIG_PCI_MSI) += msi.o 72obj-$(CONFIG_PCI_MSI) += msi.o
73kexec-$(CONFIG_PPC64) := machine_kexec_64.o 73obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
74kexec-$(CONFIG_PPC32) := machine_kexec_32.o 74 machine_kexec_$(CONFIG_WORD_SIZE).o
75obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y)
76obj-$(CONFIG_AUDIT) += audit.o 75obj-$(CONFIG_AUDIT) += audit.o
77obj64-$(CONFIG_AUDIT) += compat_audit.o 76obj64-$(CONFIG_AUDIT) += compat_audit.o
78 77
78obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
79
79ifneq ($(CONFIG_PPC_INDIRECT_IO),y) 80ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
80obj-y += iomap.o 81obj-y += iomap.o
81endif 82endif
82 83
83ifeq ($(CONFIG_PPC_ISERIES),y)
84CFLAGS_lparmap.s += -g0
85extra-y += lparmap.s
86$(obj)/head_64.o: $(obj)/lparmap.s
87AFLAGS_head_64.o += -I$(obj)
88endif
89
90else 84else
91# stuff used from here for ARCH=ppc 85# stuff used from here for ARCH=ppc
92smpobj-$(CONFIG_SMP) += smp.o 86smpobj-$(CONFIG_SMP) += smp.o
93 87
94endif 88endif
95 89
96obj-$(CONFIG_PPC32) += $(obj32-y)
97obj-$(CONFIG_PPC64) += $(obj64-y) 90obj-$(CONFIG_PPC64) += $(obj64-y)
98 91
99extra-$(CONFIG_PPC_FPU) += fpu.o 92extra-$(CONFIG_PPC_FPU) += fpu.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 5c9ff7f5c44e..e06f75daeba3 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -38,7 +38,7 @@ struct aligninfo {
38/* Bits in the flags field */ 38/* Bits in the flags field */
39#define LD 0 /* load */ 39#define LD 0 /* load */
40#define ST 1 /* store */ 40#define ST 1 /* store */
41#define SE 2 /* sign-extend value */ 41#define SE 2 /* sign-extend value, or FP ld/st as word */
42#define F 4 /* to/from fp regs */ 42#define F 4 /* to/from fp regs */
43#define U 8 /* update index register */ 43#define U 8 /* update index register */
44#define M 0x10 /* multiple load/store */ 44#define M 0x10 /* multiple load/store */
@@ -46,6 +46,8 @@ struct aligninfo {
46#define S 0x40 /* single-precision fp or... */ 46#define S 0x40 /* single-precision fp or... */
47#define SX 0x40 /* ... byte count in XER */ 47#define SX 0x40 /* ... byte count in XER */
48#define HARD 0x80 /* string, stwcx. */ 48#define HARD 0x80 /* string, stwcx. */
49#define E4 0x40 /* SPE endianness is word */
50#define E8 0x80 /* SPE endianness is double word */
49 51
50/* DSISR bits reported for a DCBZ instruction: */ 52/* DSISR bits reported for a DCBZ instruction: */
51#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ 53#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
@@ -87,9 +89,9 @@ static struct aligninfo aligninfo[128] = {
87 { 8, LD+F+U }, /* 00 1 1001: lfdu */ 89 { 8, LD+F+U }, /* 00 1 1001: lfdu */
88 { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ 90 { 4, ST+F+S+U }, /* 00 1 1010: stfsu */
89 { 8, ST+F+U }, /* 00 1 1011: stfdu */ 91 { 8, ST+F+U }, /* 00 1 1011: stfdu */
90 INVALID, /* 00 1 1100 */ 92 { 16, LD+F }, /* 00 1 1100: lfdp */
91 INVALID, /* 00 1 1101 */ 93 INVALID, /* 00 1 1101 */
92 INVALID, /* 00 1 1110 */ 94 { 16, ST+F }, /* 00 1 1110: stfdp */
93 INVALID, /* 00 1 1111 */ 95 INVALID, /* 00 1 1111 */
94 { 8, LD }, /* 01 0 0000: ldx */ 96 { 8, LD }, /* 01 0 0000: ldx */
95 INVALID, /* 01 0 0001 */ 97 INVALID, /* 01 0 0001 */
@@ -167,10 +169,10 @@ static struct aligninfo aligninfo[128] = {
167 { 8, LD+F }, /* 11 0 1001: lfdx */ 169 { 8, LD+F }, /* 11 0 1001: lfdx */
168 { 4, ST+F+S }, /* 11 0 1010: stfsx */ 170 { 4, ST+F+S }, /* 11 0 1010: stfsx */
169 { 8, ST+F }, /* 11 0 1011: stfdx */ 171 { 8, ST+F }, /* 11 0 1011: stfdx */
170 INVALID, /* 11 0 1100 */ 172 { 16, LD+F }, /* 11 0 1100: lfdpx */
171 { 8, LD+M }, /* 11 0 1101: lmd */ 173 { 4, LD+F+SE }, /* 11 0 1101: lfiwax */
172 INVALID, /* 11 0 1110 */ 174 { 16, ST+F }, /* 11 0 1110: stfdpx */
173 { 8, ST+M }, /* 11 0 1111: stmd */ 175 { 4, ST+F }, /* 11 0 1111: stfiwx */
174 { 4, LD+U }, /* 11 1 0000: lwzux */ 176 { 4, LD+U }, /* 11 1 0000: lwzux */
175 INVALID, /* 11 1 0001 */ 177 INVALID, /* 11 1 0001 */
176 { 4, ST+U }, /* 11 1 0010: stwux */ 178 { 4, ST+U }, /* 11 1 0010: stwux */
@@ -356,6 +358,284 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
356 return 1; 358 return 1;
357} 359}
358 360
361/*
362 * Emulate floating-point pair loads and stores.
363 * Only POWER6 has these instructions, and it does true little-endian,
364 * so we don't need the address swizzling.
365 */
366static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
367 unsigned int reg, unsigned int flags)
368{
369 char *ptr = (char *) &current->thread.fpr[reg];
370 int i, ret;
371
372 if (!(flags & F))
373 return 0;
374 if (reg & 1)
375 return 0; /* invalid form: FRS/FRT must be even */
376 if (!(flags & SW)) {
377 /* not byte-swapped - easy */
378 if (!(flags & ST))
379 ret = __copy_from_user(ptr, addr, 16);
380 else
381 ret = __copy_to_user(addr, ptr, 16);
382 } else {
383 /* each FPR value is byte-swapped separately */
384 ret = 0;
385 for (i = 0; i < 16; ++i) {
386 if (!(flags & ST))
387 ret |= __get_user(ptr[i^7], addr + i);
388 else
389 ret |= __put_user(ptr[i^7], addr + i);
390 }
391 }
392 if (ret)
393 return -EFAULT;
394 return 1; /* exception handled and fixed up */
395}
396
397#ifdef CONFIG_SPE
398
399static struct aligninfo spe_aligninfo[32] = {
400 { 8, LD+E8 }, /* 0 00 00: evldd[x] */
401 { 8, LD+E4 }, /* 0 00 01: evldw[x] */
402 { 8, LD }, /* 0 00 10: evldh[x] */
403 INVALID, /* 0 00 11 */
404 { 2, LD }, /* 0 01 00: evlhhesplat[x] */
405 INVALID, /* 0 01 01 */
406 { 2, LD }, /* 0 01 10: evlhhousplat[x] */
407 { 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */
408 { 4, LD }, /* 0 10 00: evlwhe[x] */
409 INVALID, /* 0 10 01 */
410 { 4, LD }, /* 0 10 10: evlwhou[x] */
411 { 4, LD+SE }, /* 0 10 11: evlwhos[x] */
412 { 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */
413 INVALID, /* 0 11 01 */
414 { 4, LD }, /* 0 11 10: evlwhsplat[x] */
415 INVALID, /* 0 11 11 */
416
417 { 8, ST+E8 }, /* 1 00 00: evstdd[x] */
418 { 8, ST+E4 }, /* 1 00 01: evstdw[x] */
419 { 8, ST }, /* 1 00 10: evstdh[x] */
420 INVALID, /* 1 00 11 */
421 INVALID, /* 1 01 00 */
422 INVALID, /* 1 01 01 */
423 INVALID, /* 1 01 10 */
424 INVALID, /* 1 01 11 */
425 { 4, ST }, /* 1 10 00: evstwhe[x] */
426 INVALID, /* 1 10 01 */
427 { 4, ST }, /* 1 10 10: evstwho[x] */
428 INVALID, /* 1 10 11 */
429 { 4, ST+E4 }, /* 1 11 00: evstwwe[x] */
430 INVALID, /* 1 11 01 */
431 { 4, ST+E4 }, /* 1 11 10: evstwwo[x] */
432 INVALID, /* 1 11 11 */
433};
434
435#define EVLDD 0x00
436#define EVLDW 0x01
437#define EVLDH 0x02
438#define EVLHHESPLAT 0x04
439#define EVLHHOUSPLAT 0x06
440#define EVLHHOSSPLAT 0x07
441#define EVLWHE 0x08
442#define EVLWHOU 0x0A
443#define EVLWHOS 0x0B
444#define EVLWWSPLAT 0x0C
445#define EVLWHSPLAT 0x0E
446#define EVSTDD 0x10
447#define EVSTDW 0x11
448#define EVSTDH 0x12
449#define EVSTWHE 0x18
450#define EVSTWHO 0x1A
451#define EVSTWWE 0x1C
452#define EVSTWWO 0x1E
453
454/*
455 * Emulate SPE loads and stores.
456 * Only Book-E has these instructions, and it does true little-endian,
457 * so we don't need the address swizzling.
458 */
459static int emulate_spe(struct pt_regs *regs, unsigned int reg,
460 unsigned int instr)
461{
462 int t, ret;
463 union {
464 u64 ll;
465 u32 w[2];
466 u16 h[4];
467 u8 v[8];
468 } data, temp;
469 unsigned char __user *p, *addr;
470 unsigned long *evr = &current->thread.evr[reg];
471 unsigned int nb, flags;
472
473 instr = (instr >> 1) & 0x1f;
474
475 /* DAR has the operand effective address */
476 addr = (unsigned char __user *)regs->dar;
477
478 nb = spe_aligninfo[instr].len;
479 flags = spe_aligninfo[instr].flags;
480
481 /* Verify the address of the operand */
482 if (unlikely(user_mode(regs) &&
483 !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
484 addr, nb)))
485 return -EFAULT;
486
487 /* userland only */
488 if (unlikely(!user_mode(regs)))
489 return 0;
490
491 flush_spe_to_thread(current);
492
493 /* If we are loading, get the data from user space, else
494 * get it from register values
495 */
496 if (flags & ST) {
497 data.ll = 0;
498 switch (instr) {
499 case EVSTDD:
500 case EVSTDW:
501 case EVSTDH:
502 data.w[0] = *evr;
503 data.w[1] = regs->gpr[reg];
504 break;
505 case EVSTWHE:
506 data.h[2] = *evr >> 16;
507 data.h[3] = regs->gpr[reg] >> 16;
508 break;
509 case EVSTWHO:
510 data.h[2] = *evr & 0xffff;
511 data.h[3] = regs->gpr[reg] & 0xffff;
512 break;
513 case EVSTWWE:
514 data.w[1] = *evr;
515 break;
516 case EVSTWWO:
517 data.w[1] = regs->gpr[reg];
518 break;
519 default:
520 return -EINVAL;
521 }
522 } else {
523 temp.ll = data.ll = 0;
524 ret = 0;
525 p = addr;
526
527 switch (nb) {
528 case 8:
529 ret |= __get_user_inatomic(temp.v[0], p++);
530 ret |= __get_user_inatomic(temp.v[1], p++);
531 ret |= __get_user_inatomic(temp.v[2], p++);
532 ret |= __get_user_inatomic(temp.v[3], p++);
533 case 4:
534 ret |= __get_user_inatomic(temp.v[4], p++);
535 ret |= __get_user_inatomic(temp.v[5], p++);
536 case 2:
537 ret |= __get_user_inatomic(temp.v[6], p++);
538 ret |= __get_user_inatomic(temp.v[7], p++);
539 if (unlikely(ret))
540 return -EFAULT;
541 }
542
543 switch (instr) {
544 case EVLDD:
545 case EVLDW:
546 case EVLDH:
547 data.ll = temp.ll;
548 break;
549 case EVLHHESPLAT:
550 data.h[0] = temp.h[3];
551 data.h[2] = temp.h[3];
552 break;
553 case EVLHHOUSPLAT:
554 case EVLHHOSSPLAT:
555 data.h[1] = temp.h[3];
556 data.h[3] = temp.h[3];
557 break;
558 case EVLWHE:
559 data.h[0] = temp.h[2];
560 data.h[2] = temp.h[3];
561 break;
562 case EVLWHOU:
563 case EVLWHOS:
564 data.h[1] = temp.h[2];
565 data.h[3] = temp.h[3];
566 break;
567 case EVLWWSPLAT:
568 data.w[0] = temp.w[1];
569 data.w[1] = temp.w[1];
570 break;
571 case EVLWHSPLAT:
572 data.h[0] = temp.h[2];
573 data.h[1] = temp.h[2];
574 data.h[2] = temp.h[3];
575 data.h[3] = temp.h[3];
576 break;
577 default:
578 return -EINVAL;
579 }
580 }
581
582 if (flags & SW) {
583 switch (flags & 0xf0) {
584 case E8:
585 SWAP(data.v[0], data.v[7]);
586 SWAP(data.v[1], data.v[6]);
587 SWAP(data.v[2], data.v[5]);
588 SWAP(data.v[3], data.v[4]);
589 break;
590 case E4:
591
592 SWAP(data.v[0], data.v[3]);
593 SWAP(data.v[1], data.v[2]);
594 SWAP(data.v[4], data.v[7]);
595 SWAP(data.v[5], data.v[6]);
596 break;
597 /* Its half word endian */
598 default:
599 SWAP(data.v[0], data.v[1]);
600 SWAP(data.v[2], data.v[3]);
601 SWAP(data.v[4], data.v[5]);
602 SWAP(data.v[6], data.v[7]);
603 break;
604 }
605 }
606
607 if (flags & SE) {
608 data.w[0] = (s16)data.h[1];
609 data.w[1] = (s16)data.h[3];
610 }
611
612 /* Store result to memory or update registers */
613 if (flags & ST) {
614 ret = 0;
615 p = addr;
616 switch (nb) {
617 case 8:
618 ret |= __put_user_inatomic(data.v[0], p++);
619 ret |= __put_user_inatomic(data.v[1], p++);
620 ret |= __put_user_inatomic(data.v[2], p++);
621 ret |= __put_user_inatomic(data.v[3], p++);
622 case 4:
623 ret |= __put_user_inatomic(data.v[4], p++);
624 ret |= __put_user_inatomic(data.v[5], p++);
625 case 2:
626 ret |= __put_user_inatomic(data.v[6], p++);
627 ret |= __put_user_inatomic(data.v[7], p++);
628 }
629 if (unlikely(ret))
630 return -EFAULT;
631 } else {
632 *evr = data.w[0];
633 regs->gpr[reg] = data.w[1];
634 }
635
636 return 1;
637}
638#endif /* CONFIG_SPE */
359 639
360/* 640/*
361 * Called on alignment exception. Attempts to fixup 641 * Called on alignment exception. Attempts to fixup
@@ -414,6 +694,12 @@ int fix_alignment(struct pt_regs *regs)
414 /* extract the operation and registers from the dsisr */ 694 /* extract the operation and registers from the dsisr */
415 reg = (dsisr >> 5) & 0x1f; /* source/dest register */ 695 reg = (dsisr >> 5) & 0x1f; /* source/dest register */
416 areg = dsisr & 0x1f; /* register to update */ 696 areg = dsisr & 0x1f; /* register to update */
697
698#ifdef CONFIG_SPE
699 if ((instr >> 26) == 0x4)
700 return emulate_spe(regs, reg, instr);
701#endif
702
417 instr = (dsisr >> 10) & 0x7f; 703 instr = (dsisr >> 10) & 0x7f;
418 instr |= (dsisr >> 13) & 0x60; 704 instr |= (dsisr >> 13) & 0x60;
419 705
@@ -471,6 +757,10 @@ int fix_alignment(struct pt_regs *regs)
471 flush_fp_to_thread(current); 757 flush_fp_to_thread(current);
472 } 758 }
473 759
760 /* Special case for 16-byte FP loads and stores */
761 if (nb == 16)
762 return emulate_fp_pair(regs, addr, reg, flags);
763
474 /* If we are loading, get the data from user space, else 764 /* If we are loading, get the data from user space, else
475 * get it from register values 765 * get it from register values
476 */ 766 */
@@ -531,7 +821,8 @@ int fix_alignment(struct pt_regs *regs)
531 * or floating point single precision conversion 821 * or floating point single precision conversion
532 */ 822 */
533 switch (flags & ~(U|SW)) { 823 switch (flags & ~(U|SW)) {
534 case LD+SE: /* sign extend */ 824 case LD+SE: /* sign extending integer loads */
825 case LD+F+SE: /* sign extend for lfiwax */
535 if ( nb == 2 ) 826 if ( nb == 2 )
536 data.ll = data.x16.low16; 827 data.ll = data.x16.low16;
537 else /* nb must be 4 */ 828 else /* nb must be 4 */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2cb1d9487796..0ae5d57b9368 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -312,5 +312,17 @@ int main(void)
312#ifdef CONFIG_BUG 312#ifdef CONFIG_BUG
313 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); 313 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
314#endif 314#endif
315
316#ifdef CONFIG_PPC_ISERIES
317 /* the assembler miscalculates the VSID values */
318 DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET));
319 DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET));
320 DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START));
321 DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START));
322#endif
323
324#ifdef CONFIG_PPC64
325 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
326#endif
315 return 0; 327 return 0;
316} 328}
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index e7b684689e04..3ef51fb6f107 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -11,7 +11,6 @@
11#include <asm/sections.h> 11#include <asm/sections.h>
12#include <asm/prom.h> 12#include <asm/prom.h>
13#include <asm/btext.h> 13#include <asm/btext.h>
14#include <asm/prom.h>
15#include <asm/page.h> 14#include <asm/page.h>
16#include <asm/mmu.h> 15#include <asm/mmu.h>
17#include <asm/pgtable.h> 16#include <asm/pgtable.h>
diff --git a/arch/powerpc/kernel/clock.c b/arch/powerpc/kernel/clock.c
new file mode 100644
index 000000000000..ce668f545758
--- /dev/null
+++ b/arch/powerpc/kernel/clock.c
@@ -0,0 +1,82 @@
1/*
2 * Dummy clk implementations for powerpc.
3 * These need to be overridden in platform code.
4 */
5
6#include <linux/clk.h>
7#include <linux/err.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <asm/clk_interface.h>
11
12struct clk_interface clk_functions;
13
14struct clk *clk_get(struct device *dev, const char *id)
15{
16 if (clk_functions.clk_get)
17 return clk_functions.clk_get(dev, id);
18 return ERR_PTR(-ENOSYS);
19}
20EXPORT_SYMBOL(clk_get);
21
22void clk_put(struct clk *clk)
23{
24 if (clk_functions.clk_put)
25 clk_functions.clk_put(clk);
26}
27EXPORT_SYMBOL(clk_put);
28
29int clk_enable(struct clk *clk)
30{
31 if (clk_functions.clk_enable)
32 return clk_functions.clk_enable(clk);
33 return -ENOSYS;
34}
35EXPORT_SYMBOL(clk_enable);
36
37void clk_disable(struct clk *clk)
38{
39 if (clk_functions.clk_disable)
40 clk_functions.clk_disable(clk);
41}
42EXPORT_SYMBOL(clk_disable);
43
44unsigned long clk_get_rate(struct clk *clk)
45{
46 if (clk_functions.clk_get_rate)
47 return clk_functions.clk_get_rate(clk);
48 return 0;
49}
50EXPORT_SYMBOL(clk_get_rate);
51
52long clk_round_rate(struct clk *clk, unsigned long rate)
53{
54 if (clk_functions.clk_round_rate)
55 return clk_functions.clk_round_rate(clk, rate);
56 return -ENOSYS;
57}
58EXPORT_SYMBOL(clk_round_rate);
59
60int clk_set_rate(struct clk *clk, unsigned long rate)
61{
62 if (clk_functions.clk_set_rate)
63 return clk_functions.clk_set_rate(clk, rate);
64 return -ENOSYS;
65}
66EXPORT_SYMBOL(clk_set_rate);
67
68struct clk *clk_get_parent(struct clk *clk)
69{
70 if (clk_functions.clk_get_parent)
71 return clk_functions.clk_get_parent(clk);
72 return ERR_PTR(-ENOSYS);
73}
74EXPORT_SYMBOL(clk_get_parent);
75
76int clk_set_parent(struct clk *clk, struct clk *parent)
77{
78 if (clk_functions.clk_set_parent)
79 return clk_functions.clk_set_parent(clk, parent);
80 return -ENOSYS;
81}
82EXPORT_SYMBOL(clk_set_parent);
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
new file mode 100644
index 000000000000..8e1812e2f3ee
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -0,0 +1,56 @@
1/*
2 * This file contains low level CPU setup functions.
3 * Valentine Barshak <vbarshak@ru.mvista.com>
4 * MontaVista Software, Inc (c) 2007
5 *
6 * Based on cpu_setup_6xx code by
7 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16#include <asm/processor.h>
17#include <asm/cputable.h>
18#include <asm/ppc_asm.h>
19
20_GLOBAL(__setup_cpu_440ep)
21 b __init_fpu_44x
22_GLOBAL(__setup_cpu_440epx)
23 mflr r4
24 bl __init_fpu_44x
25 bl __plb_disable_wrp
26 mtlr r4
27 blr
28_GLOBAL(__setup_cpu_440grx)
29 b __plb_disable_wrp
30
31
32/* enable APU between CPU and FPU */
33_GLOBAL(__init_fpu_44x)
34 mfspr r3,SPRN_CCR0
35 /* Clear DAPUIB flag in CCR0 */
36 rlwinm r3,r3,0,12,10
37 mtspr SPRN_CCR0,r3
38 isync
39 blr
40
41/*
42 * Workaround for the incorrect write to DDR SDRAM errata.
43 * The write address can be corrupted during writes to
44 * DDR SDRAM when write pipelining is enabled on PLB0.
45 * Disable write pipelining here.
46 */
47#define DCRN_PLB4A0_ACR 0x81
48
49_GLOBAL(__plb_disable_wrp)
50 mfdcr r3,DCRN_PLB4A0_ACR
51 /* clear WRP bit in PLB4A0_ACR */
52 rlwinm r3,r3,0,8,6
53 mtdcr DCRN_PLB4A0_ACR,r3
54 isync
55 blr
56
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b1f8000952f3..d3fb7d0c6c1c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -31,6 +31,9 @@ EXPORT_SYMBOL(cur_cpu_spec);
31 * and ppc64 31 * and ppc64
32 */ 32 */
33#ifdef CONFIG_PPC32 33#ifdef CONFIG_PPC32
34extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
34extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); 37extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); 38extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); 39extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -68,16 +71,7 @@ extern void __restore_cpu_ppc970(void);
68#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ 71#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
69 PPC_FEATURE_BOOKE) 72 PPC_FEATURE_BOOKE)
70 73
71/* We only set the spe features if the kernel was compiled with 74static struct cpu_spec __initdata cpu_specs[] = {
72 * spe support
73 */
74#ifdef CONFIG_SPE
75#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE
76#else
77#define PPC_FEATURE_SPE_COMP 0
78#endif
79
80static struct cpu_spec cpu_specs[] = {
81#ifdef CONFIG_PPC64 75#ifdef CONFIG_PPC64
82 { /* Power3 */ 76 { /* Power3 */
83 .pvr_mask = 0xffff0000, 77 .pvr_mask = 0xffff0000,
@@ -333,14 +327,6 @@ static struct cpu_spec cpu_specs[] = {
333 .cpu_user_features = COMMON_USER_POWER5_PLUS, 327 .cpu_user_features = COMMON_USER_POWER5_PLUS,
334 .icache_bsize = 128, 328 .icache_bsize = 128,
335 .dcache_bsize = 128, 329 .dcache_bsize = 128,
336 .num_pmcs = 6,
337 .pmc_type = PPC_PMC_IBM,
338 .oprofile_cpu_type = "ppc64/power6",
339 .oprofile_type = PPC_OPROFILE_POWER4,
340 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
341 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
342 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
343 POWER6_MMCRA_OTHER,
344 .platform = "power5+", 330 .platform = "power5+",
345 }, 331 },
346 { /* Power6 */ 332 { /* Power6 */
@@ -370,14 +356,6 @@ static struct cpu_spec cpu_specs[] = {
370 .cpu_user_features = COMMON_USER_POWER6, 356 .cpu_user_features = COMMON_USER_POWER6,
371 .icache_bsize = 128, 357 .icache_bsize = 128,
372 .dcache_bsize = 128, 358 .dcache_bsize = 128,
373 .num_pmcs = 6,
374 .pmc_type = PPC_PMC_IBM,
375 .oprofile_cpu_type = "ppc64/power6",
376 .oprofile_type = PPC_OPROFILE_POWER4,
377 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
378 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
379 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
380 POWER6_MMCRA_OTHER,
381 .platform = "power6", 359 .platform = "power6",
382 }, 360 },
383 { /* Cell Broadband Engine */ 361 { /* Cell Broadband Engine */
@@ -1109,6 +1087,17 @@ static struct cpu_spec cpu_specs[] = {
1109 .dcache_bsize = 32, 1087 .dcache_bsize = 32,
1110 .platform = "ppc405", 1088 .platform = "ppc405",
1111 }, 1089 },
1090 { /* 405EX */
1091 .pvr_mask = 0xffff0000,
1092 .pvr_value = 0x12910000,
1093 .cpu_name = "405EX",
1094 .cpu_features = CPU_FTRS_40X,
1095 .cpu_user_features = PPC_FEATURE_32 |
1096 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1097 .icache_bsize = 32,
1098 .dcache_bsize = 32,
1099 .platform = "ppc405",
1100 },
1112 1101
1113#endif /* CONFIG_40x */ 1102#endif /* CONFIG_40x */
1114#ifdef CONFIG_44x 1103#ifdef CONFIG_44x
@@ -1120,6 +1109,7 @@ static struct cpu_spec cpu_specs[] = {
1120 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1109 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1121 .icache_bsize = 32, 1110 .icache_bsize = 32,
1122 .dcache_bsize = 32, 1111 .dcache_bsize = 32,
1112 .cpu_setup = __setup_cpu_440ep,
1123 .platform = "ppc440", 1113 .platform = "ppc440",
1124 }, 1114 },
1125 { 1115 {
@@ -1130,6 +1120,29 @@ static struct cpu_spec cpu_specs[] = {
1130 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1120 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1131 .icache_bsize = 32, 1121 .icache_bsize = 32,
1132 .dcache_bsize = 32, 1122 .dcache_bsize = 32,
1123 .cpu_setup = __setup_cpu_440ep,
1124 .platform = "ppc440",
1125 },
1126 { /* 440EPX */
1127 .pvr_mask = 0xf0000ffb,
1128 .pvr_value = 0x200008D0,
1129 .cpu_name = "440EPX",
1130 .cpu_features = CPU_FTRS_44X,
1131 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1132 .icache_bsize = 32,
1133 .dcache_bsize = 32,
1134 .cpu_setup = __setup_cpu_440epx,
1135 .platform = "ppc440",
1136 },
1137 { /* 440GRX */
1138 .pvr_mask = 0xf0000ffb,
1139 .pvr_value = 0x200008D8,
1140 .cpu_name = "440GRX",
1141 .cpu_features = CPU_FTRS_44X,
1142 .cpu_user_features = COMMON_USER_BOOKE,
1143 .icache_bsize = 32,
1144 .dcache_bsize = 32,
1145 .cpu_setup = __setup_cpu_440grx,
1133 .platform = "ppc440", 1146 .platform = "ppc440",
1134 }, 1147 },
1135 { /* 440GP Rev. B */ 1148 { /* 440GP Rev. B */
@@ -1243,8 +1256,8 @@ static struct cpu_spec cpu_specs[] = {
1243 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1256 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1244 .cpu_features = CPU_FTRS_E200, 1257 .cpu_features = CPU_FTRS_E200,
1245 .cpu_user_features = COMMON_USER_BOOKE | 1258 .cpu_user_features = COMMON_USER_BOOKE |
1246 PPC_FEATURE_SPE_COMP | 1259 PPC_FEATURE_HAS_SPE_COMP |
1247 PPC_FEATURE_HAS_EFP_SINGLE | 1260 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1248 PPC_FEATURE_UNIFIED_CACHE, 1261 PPC_FEATURE_UNIFIED_CACHE,
1249 .dcache_bsize = 32, 1262 .dcache_bsize = 32,
1250 .platform = "ppc5554", 1263 .platform = "ppc5554",
@@ -1256,8 +1269,8 @@ static struct cpu_spec cpu_specs[] = {
1256 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1269 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1257 .cpu_features = CPU_FTRS_E500, 1270 .cpu_features = CPU_FTRS_E500,
1258 .cpu_user_features = COMMON_USER_BOOKE | 1271 .cpu_user_features = COMMON_USER_BOOKE |
1259 PPC_FEATURE_SPE_COMP | 1272 PPC_FEATURE_HAS_SPE_COMP |
1260 PPC_FEATURE_HAS_EFP_SINGLE, 1273 PPC_FEATURE_HAS_EFP_SINGLE_COMP,
1261 .icache_bsize = 32, 1274 .icache_bsize = 32,
1262 .dcache_bsize = 32, 1275 .dcache_bsize = 32,
1263 .num_pmcs = 4, 1276 .num_pmcs = 4,
@@ -1272,9 +1285,9 @@ static struct cpu_spec cpu_specs[] = {
1272 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 1285 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1273 .cpu_features = CPU_FTRS_E500_2, 1286 .cpu_features = CPU_FTRS_E500_2,
1274 .cpu_user_features = COMMON_USER_BOOKE | 1287 .cpu_user_features = COMMON_USER_BOOKE |
1275 PPC_FEATURE_SPE_COMP | 1288 PPC_FEATURE_HAS_SPE_COMP |
1276 PPC_FEATURE_HAS_EFP_SINGLE | 1289 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1277 PPC_FEATURE_HAS_EFP_DOUBLE, 1290 PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
1278 .icache_bsize = 32, 1291 .icache_bsize = 32,
1279 .dcache_bsize = 32, 1292 .dcache_bsize = 32,
1280 .num_pmcs = 4, 1293 .num_pmcs = 4,
@@ -1298,29 +1311,49 @@ static struct cpu_spec cpu_specs[] = {
1298#endif /* CONFIG_PPC32 */ 1311#endif /* CONFIG_PPC32 */
1299}; 1312};
1300 1313
1301struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr) 1314static struct cpu_spec the_cpu_spec;
1315
1316struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1302{ 1317{
1303 struct cpu_spec *s = cpu_specs; 1318 struct cpu_spec *s = cpu_specs;
1304 struct cpu_spec **cur = &cur_cpu_spec; 1319 struct cpu_spec *t = &the_cpu_spec;
1305 int i; 1320 int i;
1306 1321
1307 s = PTRRELOC(s); 1322 s = PTRRELOC(s);
1308 cur = PTRRELOC(cur); 1323 t = PTRRELOC(t);
1309 1324
1310 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) 1325 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
1311 if ((pvr & s->pvr_mask) == s->pvr_value) { 1326 if ((pvr & s->pvr_mask) == s->pvr_value) {
1312 *cur = cpu_specs + i; 1327 /*
1313#ifdef CONFIG_PPC64 1328 * If we are overriding a previous value derived
1314 /* ppc64 expects identify_cpu to also call setup_cpu 1329 * from the real PVR with a new value obtained
1315 * for that processor. I will consolidate that at a 1330 * using a logical PVR value, don't modify the
1316 * later time, for now, just use our friend #ifdef. 1331 * performance monitor fields.
1332 */
1333 if (t->num_pmcs && !s->num_pmcs) {
1334 t->cpu_name = s->cpu_name;
1335 t->cpu_features = s->cpu_features;
1336 t->cpu_user_features = s->cpu_user_features;
1337 t->icache_bsize = s->icache_bsize;
1338 t->dcache_bsize = s->dcache_bsize;
1339 t->cpu_setup = s->cpu_setup;
1340 t->cpu_restore = s->cpu_restore;
1341 t->platform = s->platform;
1342 } else
1343 *t = *s;
1344 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
1345#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
1346 /* ppc64 and booke expect identify_cpu to also call
1347 * setup_cpu for that processor. I will consolidate
1348 * that at a later time, for now, just use #ifdef.
1317 * we also don't need to PTRRELOC the function pointer 1349 * we also don't need to PTRRELOC the function pointer
1318 * on ppc64 as we are running at 0 in real mode. 1350 * on ppc64 and booke as we are running at 0 in real
1351 * mode on ppc64 and reloc_offset is always 0 on booke.
1319 */ 1352 */
1320 if (s->cpu_setup) { 1353 if (s->cpu_setup) {
1321 s->cpu_setup(offset, s); 1354 s->cpu_setup(offset, s);
1322 } 1355 }
1323#endif /* CONFIG_PPC64 */ 1356#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
1324 return s; 1357 return s;
1325 } 1358 }
1326 BUG(); 1359 BUG();
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 37658ea417fa..77c749a13378 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -24,7 +24,6 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/irq.h>
28 27
29#include <asm/processor.h> 28#include <asm/processor.h>
30#include <asm/machdep.h> 29#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 2f6f5a7bc69e..29ff77c468ac 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -25,7 +25,7 @@
25#define DBG(fmt...) 25#define DBG(fmt...)
26#endif 26#endif
27 27
28void reserve_kdump_trampoline(void) 28void __init reserve_kdump_trampoline(void)
29{ 29{
30 lmb_reserve(0, KDUMP_RESERVE_LIMIT); 30 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
31} 31}
@@ -54,8 +54,10 @@ void __init setup_kdump_trampoline(void)
54 create_trampoline(i); 54 create_trampoline(i);
55 } 55 }
56 56
57#ifdef CONFIG_PPC_PSERIES
57 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); 58 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
58 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); 59 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
60#endif /* CONFIG_PPC_PSERIES */
59 61
60 DBG(" <- setup_kdump_trampoline()\n"); 62 DBG(" <- setup_kdump_trampoline()\n");
61} 63}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4074c0b31453..21d889e63e87 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -504,9 +504,11 @@ BEGIN_FTR_SECTION
504END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 504END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
505#endif /* CONFIG_ALTIVEC */ 505#endif /* CONFIG_ALTIVEC */
506#ifdef CONFIG_SPE 506#ifdef CONFIG_SPE
507BEGIN_FTR_SECTION
507 oris r0,r0,MSR_SPE@h /* Disable SPE */ 508 oris r0,r0,MSR_SPE@h /* Disable SPE */
508 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */ 509 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
509 stw r12,THREAD+THREAD_SPEFSCR(r2) 510 stw r12,THREAD+THREAD_SPEFSCR(r2)
511END_FTR_SECTION_IFSET(CPU_FTR_SPE)
510#endif /* CONFIG_SPE */ 512#endif /* CONFIG_SPE */
511 and. r0,r0,r11 /* FP or altivec or SPE enabled? */ 513 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
512 beq+ 1f 514 beq+ 1f
@@ -542,8 +544,10 @@ BEGIN_FTR_SECTION
542END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 544END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
543#endif /* CONFIG_ALTIVEC */ 545#endif /* CONFIG_ALTIVEC */
544#ifdef CONFIG_SPE 546#ifdef CONFIG_SPE
547BEGIN_FTR_SECTION
545 lwz r0,THREAD+THREAD_SPEFSCR(r2) 548 lwz r0,THREAD+THREAD_SPEFSCR(r2)
546 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ 549 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
550END_FTR_SECTION_IFSET(CPU_FTR_SPE)
547#endif /* CONFIG_SPE */ 551#endif /* CONFIG_SPE */
548 552
549 lwz r0,_CCR(r1) 553 lwz r0,_CCR(r1)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 952eba6701f4..0ec134034899 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -373,8 +373,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373 373
374 ld r8,KSP(r4) /* new stack pointer */ 374 ld r8,KSP(r4) /* new stack pointer */
375BEGIN_FTR_SECTION 375BEGIN_FTR_SECTION
376 b 2f
377END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
378BEGIN_FTR_SECTION
376 clrrdi r6,r8,28 /* get its ESID */ 379 clrrdi r6,r8,28 /* get its ESID */
377 clrrdi r9,r1,28 /* get current sp ESID */ 380 clrrdi r9,r1,28 /* get current sp ESID */
381END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
382BEGIN_FTR_SECTION
383 clrrdi r6,r8,40 /* get its 1T ESID */
384 clrrdi r9,r1,40 /* get current sp 1T ESID */
385END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
378 clrldi. r0,r6,2 /* is new ESID c00000000? */ 386 clrldi. r0,r6,2 /* is new ESID c00000000? */
379 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 387 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
380 cror eq,4*cr1+eq,eq 388 cror eq,4*cr1+eq,eq
@@ -384,16 +392,21 @@ BEGIN_FTR_SECTION
384 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 392 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
385 oris r0,r6,(SLB_ESID_V)@h 393 oris r0,r6,(SLB_ESID_V)@h
386 ori r0,r0,(SLB_NUM_BOLTED-1)@l 394 ori r0,r0,(SLB_NUM_BOLTED-1)@l
387 395BEGIN_FTR_SECTION
388 /* Update the last bolted SLB */ 396 li r9,MMU_SEGSIZE_1T /* insert B field */
397 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
398 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
399END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
400
401 /* Update the last bolted SLB. No write barriers are needed
402 * here, provided we only update the current CPU's SLB shadow
403 * buffer.
404 */
389 ld r9,PACA_SLBSHADOWPTR(r13) 405 ld r9,PACA_SLBSHADOWPTR(r13)
390 li r12,0 406 li r12,0
391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 407 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
392 eieio
393 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 408 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
394 eieio
395 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 409 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
396 eieio
397 410
398 slbie r6 411 slbie r6
399 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 412 slbie r6 /* Workaround POWER5 < DD2.1 issue */
@@ -401,7 +414,6 @@ BEGIN_FTR_SECTION
401 isync 414 isync
402 415
4032: 4162:
404END_FTR_SECTION_IFSET(CPU_FTR_SLB)
405 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ 417 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
406 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE 418 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
407 because we don't need to leave the 288-byte ABI gap at the 419 because we don't need to leave the 288-byte ABI gap at the
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 7d73a13450b0..a5b13ae7fd20 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -48,20 +48,17 @@
48 mtspr SPRN_DBAT##n##L,RB; \ 48 mtspr SPRN_DBAT##n##L,RB; \
491: 491:
50 50
51 .text 51 .section .text.head, "ax"
52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f 52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
53 .stabs "head_32.S",N_SO,0,0,0f 53 .stabs "head_32.S",N_SO,0,0,0f
540: 540:
55 .globl _stext 55_ENTRY(_stext);
56_stext:
57 56
58/* 57/*
59 * _start is defined this way because the XCOFF loader in the OpenFirmware 58 * _start is defined this way because the XCOFF loader in the OpenFirmware
60 * on the powermac expects the entry point to be a procedure descriptor. 59 * on the powermac expects the entry point to be a procedure descriptor.
61 */ 60 */
62 .text 61_ENTRY(_start);
63 .globl _start
64_start:
65 /* 62 /*
66 * These are here for legacy reasons, the kernel used to 63 * These are here for legacy reasons, the kernel used to
67 * need to look like a coff function entry for the pmac 64 * need to look like a coff function entry for the pmac
@@ -152,6 +149,9 @@ __after_mmu_off:
152#if defined(CONFIG_BOOTX_TEXT) 149#if defined(CONFIG_BOOTX_TEXT)
153 bl setup_disp_bat 150 bl setup_disp_bat
154#endif 151#endif
152#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
153 bl setup_cpm_bat
154#endif
155 155
156/* 156/*
157 * Call setup_cpu for CPU 0 and initialize 6xx Idle 157 * Call setup_cpu for CPU 0 and initialize 6xx Idle
@@ -469,16 +469,16 @@ InstructionTLBMiss:
469 mfctr r0 469 mfctr r0
470 /* Get PTE (linux-style) and check access */ 470 /* Get PTE (linux-style) and check access */
471 mfspr r3,SPRN_IMISS 471 mfspr r3,SPRN_IMISS
472 lis r1,KERNELBASE@h /* check if kernel address */ 472 lis r1,PAGE_OFFSET@h /* check if kernel address */
473 cmplw 0,r3,r1 473 cmplw 0,r1,r3
474 mfspr r2,SPRN_SPRG3 474 mfspr r2,SPRN_SPRG3
475 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ 475 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
476 lwz r2,PGDIR(r2) 476 lwz r2,PGDIR(r2)
477 blt+ 112f 477 bge- 112f
478 mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
479 rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
478 lis r2,swapper_pg_dir@ha /* if kernel address, use */ 480 lis r2,swapper_pg_dir@ha /* if kernel address, use */
479 addi r2,r2,swapper_pg_dir@l /* kernel page table */ 481 addi r2,r2,swapper_pg_dir@l /* kernel page table */
480 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
481 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
482112: tophys(r2,r2) 482112: tophys(r2,r2)
483 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 483 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
484 lwz r2,0(r2) /* get pmd entry */ 484 lwz r2,0(r2) /* get pmd entry */
@@ -543,16 +543,16 @@ DataLoadTLBMiss:
543 mfctr r0 543 mfctr r0
544 /* Get PTE (linux-style) and check access */ 544 /* Get PTE (linux-style) and check access */
545 mfspr r3,SPRN_DMISS 545 mfspr r3,SPRN_DMISS
546 lis r1,KERNELBASE@h /* check if kernel address */ 546 lis r1,PAGE_OFFSET@h /* check if kernel address */
547 cmplw 0,r3,r1 547 cmplw 0,r1,r3
548 mfspr r2,SPRN_SPRG3 548 mfspr r2,SPRN_SPRG3
549 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ 549 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
550 lwz r2,PGDIR(r2) 550 lwz r2,PGDIR(r2)
551 blt+ 112f 551 bge- 112f
552 mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
553 rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
552 lis r2,swapper_pg_dir@ha /* if kernel address, use */ 554 lis r2,swapper_pg_dir@ha /* if kernel address, use */
553 addi r2,r2,swapper_pg_dir@l /* kernel page table */ 555 addi r2,r2,swapper_pg_dir@l /* kernel page table */
554 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
555 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
556112: tophys(r2,r2) 556112: tophys(r2,r2)
557 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 557 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
558 lwz r2,0(r2) /* get pmd entry */ 558 lwz r2,0(r2) /* get pmd entry */
@@ -615,16 +615,16 @@ DataStoreTLBMiss:
615 mfctr r0 615 mfctr r0
616 /* Get PTE (linux-style) and check access */ 616 /* Get PTE (linux-style) and check access */
617 mfspr r3,SPRN_DMISS 617 mfspr r3,SPRN_DMISS
618 lis r1,KERNELBASE@h /* check if kernel address */ 618 lis r1,PAGE_OFFSET@h /* check if kernel address */
619 cmplw 0,r3,r1 619 cmplw 0,r1,r3
620 mfspr r2,SPRN_SPRG3 620 mfspr r2,SPRN_SPRG3
621 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ 621 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
622 lwz r2,PGDIR(r2) 622 lwz r2,PGDIR(r2)
623 blt+ 112f 623 bge- 112f
624 mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
625 rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
624 lis r2,swapper_pg_dir@ha /* if kernel address, use */ 626 lis r2,swapper_pg_dir@ha /* if kernel address, use */
625 addi r2,r2,swapper_pg_dir@l /* kernel page table */ 627 addi r2,r2,swapper_pg_dir@l /* kernel page table */
626 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
627 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
628112: tophys(r2,r2) 628112: tophys(r2,r2)
629 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 629 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
630 lwz r2,0(r2) /* get pmd entry */ 630 lwz r2,0(r2) /* get pmd entry */
@@ -841,7 +841,7 @@ relocate_kernel:
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
843 */ 843 */
844_GLOBAL(copy_and_flush) 844_ENTRY(copy_and_flush)
845 addi r5,r5,-4 845 addi r5,r5,-4
846 addi r6,r6,-4 846 addi r6,r6,-4
8474: li r0,L1_CACHE_BYTES/4 8474: li r0,L1_CACHE_BYTES/4
@@ -954,9 +954,9 @@ __secondary_start:
954 * included in CONFIG_6xx 954 * included in CONFIG_6xx
955 */ 955 */
956#if !defined(CONFIG_6xx) 956#if !defined(CONFIG_6xx)
957_GLOBAL(__save_cpu_setup) 957_ENTRY(__save_cpu_setup)
958 blr 958 blr
959_GLOBAL(__restore_cpu_setup) 959_ENTRY(__restore_cpu_setup)
960 blr 960 blr
961#endif /* !defined(CONFIG_6xx) */ 961#endif /* !defined(CONFIG_6xx) */
962 962
@@ -1080,7 +1080,7 @@ start_here:
1080/* 1080/*
1081 * Set up the segment registers for a new context. 1081 * Set up the segment registers for a new context.
1082 */ 1082 */
1083_GLOBAL(set_context) 1083_ENTRY(set_context)
1084 mulli r3,r3,897 /* multiply context by skew factor */ 1084 mulli r3,r3,897 /* multiply context by skew factor */
1085 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ 1085 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1086 addis r3,r3,0x6000 /* Set Ks, Ku bits */ 1086 addis r3,r3,0x6000 /* Set Ks, Ku bits */
@@ -1248,6 +1248,19 @@ setup_disp_bat:
1248 blr 1248 blr
1249#endif /* CONFIG_BOOTX_TEXT */ 1249#endif /* CONFIG_BOOTX_TEXT */
1250 1250
1251#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1252setup_cpm_bat:
1253 lis r8, 0xf000
1254 ori r8, r8, 0x002a
1255 mtspr SPRN_DBAT1L, r8
1256
1257 lis r11, 0xf000
1258 ori r11, r11, (BL_1M << 2) | 2
1259 mtspr SPRN_DBAT1U, r11
1260
1261 blr
1262#endif
1263
1251#ifdef CONFIG_8260 1264#ifdef CONFIG_8260
1252/* Jump into the system reset for the rom. 1265/* Jump into the system reset for the rom.
1253 * We first disable the MMU, and then jump to the ROM reset address. 1266 * We first disable the MMU, and then jump to the ROM reset address.
@@ -1300,14 +1313,6 @@ empty_zero_page:
1300swapper_pg_dir: 1313swapper_pg_dir:
1301 .space 4096 1314 .space 4096
1302 1315
1303/*
1304 * This space gets a copy of optional info passed to us by the bootstrap
1305 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1306 */
1307 .globl cmd_line
1308cmd_line:
1309 .space 512
1310
1311 .globl intercept_table 1316 .globl intercept_table
1312intercept_table: 1317intercept_table:
1313 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 1318 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_40x.S
index adc7f8097cd4..cfefc2df8f2a 100644
--- a/arch/powerpc/kernel/head_4xx.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -35,7 +35,6 @@
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/mmu.h> 36#include <asm/mmu.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/cputable.h> 38#include <asm/cputable.h>
40#include <asm/thread_info.h> 39#include <asm/thread_info.h>
41#include <asm/ppc_asm.h> 40#include <asm/ppc_asm.h>
@@ -53,9 +52,9 @@
53 * 52 *
54 * This is all going to change RSN when we add bi_recs....... -- Dan 53 * This is all going to change RSN when we add bi_recs....... -- Dan
55 */ 54 */
56 .text 55 .section .text.head, "ax"
57_GLOBAL(_stext) 56_ENTRY(_stext);
58_GLOBAL(_start) 57_ENTRY(_start);
59 58
60 /* Save parameters we are passed. 59 /* Save parameters we are passed.
61 */ 60 */
@@ -90,9 +89,9 @@ turn_on_mmu:
90 */ 89 */
91 . = 0xc0 90 . = 0xc0
92crit_save: 91crit_save:
93_GLOBAL(crit_r10) 92_ENTRY(crit_r10)
94 .space 4 93 .space 4
95_GLOBAL(crit_r11) 94_ENTRY(crit_r11)
96 .space 4 95 .space 4
97 96
98/* 97/*
@@ -290,7 +289,7 @@ label:
290 /* If we are faulting a kernel address, we have to use the 289 /* If we are faulting a kernel address, we have to use the
291 * kernel page tables. 290 * kernel page tables.
292 */ 291 */
293 lis r11, TASK_SIZE@h 292 lis r11, PAGE_OFFSET@h
294 cmplw r10, r11 293 cmplw r10, r11
295 blt+ 3f 294 blt+ 3f
296 lis r11, swapper_pg_dir@h 295 lis r11, swapper_pg_dir@h
@@ -482,7 +481,7 @@ label:
482 /* If we are faulting a kernel address, we have to use the 481 /* If we are faulting a kernel address, we have to use the
483 * kernel page tables. 482 * kernel page tables.
484 */ 483 */
485 lis r11, TASK_SIZE@h 484 lis r11, PAGE_OFFSET@h
486 cmplw r10, r11 485 cmplw r10, r11
487 blt+ 3f 486 blt+ 3f
488 lis r11, swapper_pg_dir@h 487 lis r11, swapper_pg_dir@h
@@ -582,7 +581,7 @@ label:
582 /* If we are faulting a kernel address, we have to use the 581 /* If we are faulting a kernel address, we have to use the
583 * kernel page tables. 582 * kernel page tables.
584 */ 583 */
585 lis r11, TASK_SIZE@h 584 lis r11, PAGE_OFFSET@h
586 cmplw r10, r11 585 cmplw r10, r11
587 blt+ 3f 586 blt+ 3f
588 lis r11, swapper_pg_dir@h 587 lis r11, swapper_pg_dir@h
@@ -772,7 +771,7 @@ finish_tlb_load:
772 */ 771 */
773 lwz r9, tlb_4xx_index@l(0) 772 lwz r9, tlb_4xx_index@l(0)
774 addi r9, r9, 1 773 addi r9, r9, 1
775 andi. r9, r9, (PPC4XX_TLB_SIZE-1) 774 andi. r9, r9, (PPC40X_TLB_SIZE-1)
776 stw r9, tlb_4xx_index@l(0) 775 stw r9, tlb_4xx_index@l(0)
777 776
7786: 7776:
@@ -815,7 +814,7 @@ finish_tlb_load:
815 * The PowerPC 4xx family of processors do not have an FPU, so this just 814 * The PowerPC 4xx family of processors do not have an FPU, so this just
816 * returns. 815 * returns.
817 */ 816 */
818_GLOBAL(giveup_fpu) 817_ENTRY(giveup_fpu)
819 blr 818 blr
820 819
821/* This is where the main kernel code starts. 820/* This is where the main kernel code starts.
@@ -1007,13 +1006,6 @@ critical_stack_top:
1007 .globl exception_stack_top 1006 .globl exception_stack_top
1008exception_stack_top: 1007exception_stack_top:
1009 1008
1010/* This space gets a copy of optional info passed to us by the bootstrap
1011 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1012 */
1013 .globl cmd_line
1014cmd_line:
1015 .space 512
1016
1017/* Room for two PTE pointers, usually the kernel and current user pointers 1009/* Room for two PTE pointers, usually the kernel and current user pointers
1018 * to their respective root page table. 1010 * to their respective root page table.
1019 */ 1011 */
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 88695963f587..409db6123924 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -50,9 +50,9 @@
50 * r7 - End of kernel command line string 50 * r7 - End of kernel command line string
51 * 51 *
52 */ 52 */
53 .text 53 .section .text.head, "ax"
54_GLOBAL(_stext) 54_ENTRY(_stext);
55_GLOBAL(_start) 55_ENTRY(_start);
56 /* 56 /*
57 * Reserve a word at a fixed location to store the address 57 * Reserve a word at a fixed location to store the address
58 * of abatron_pteptrs 58 * of abatron_pteptrs
@@ -217,16 +217,6 @@ skpinv: addi r4,r4,1 /* Increment */
217 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 217 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
218 mtspr SPRN_IVPR,r4 218 mtspr SPRN_IVPR,r4
219 219
220#ifdef CONFIG_440EP
221 /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
222 mfspr r2,SPRN_CCR0
223 lis r3,0xffef
224 ori r3,r3,0xffff
225 and r2,r2,r3
226 mtspr SPRN_CCR0,r2
227 isync
228#endif
229
230 /* 220 /*
231 * This is where the main kernel code starts. 221 * This is where the main kernel code starts.
232 */ 222 */
@@ -329,7 +319,7 @@ interrupt_base:
329 /* If we are faulting a kernel address, we have to use the 319 /* If we are faulting a kernel address, we have to use the
330 * kernel page tables. 320 * kernel page tables.
331 */ 321 */
332 lis r11, TASK_SIZE@h 322 lis r11, PAGE_OFFSET@h
333 cmplw r10, r11 323 cmplw r10, r11
334 blt+ 3f 324 blt+ 3f
335 lis r11, swapper_pg_dir@h 325 lis r11, swapper_pg_dir@h
@@ -468,7 +458,7 @@ interrupt_base:
468 /* If we are faulting a kernel address, we have to use the 458 /* If we are faulting a kernel address, we have to use the
469 * kernel page tables. 459 * kernel page tables.
470 */ 460 */
471 lis r11, TASK_SIZE@h 461 lis r11, PAGE_OFFSET@h
472 cmplw r10, r11 462 cmplw r10, r11
473 blt+ 3f 463 blt+ 3f
474 lis r11, swapper_pg_dir@h 464 lis r11, swapper_pg_dir@h
@@ -538,7 +528,7 @@ interrupt_base:
538 /* If we are faulting a kernel address, we have to use the 528 /* If we are faulting a kernel address, we have to use the
539 * kernel page tables. 529 * kernel page tables.
540 */ 530 */
541 lis r11, TASK_SIZE@h 531 lis r11, PAGE_OFFSET@h
542 cmplw r10, r11 532 cmplw r10, r11
543 blt+ 3f 533 blt+ 3f
544 lis r11, swapper_pg_dir@h 534 lis r11, swapper_pg_dir@h
@@ -744,14 +734,6 @@ exception_stack_bottom:
744exception_stack_top: 734exception_stack_top:
745 735
746/* 736/*
747 * This space gets a copy of optional info passed to us by the bootstrap
748 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
749 */
750 .globl cmd_line
751cmd_line:
752 .space 512
753
754/*
755 * Room for two PTE pointers, usually the kernel and current user pointers 737 * Room for two PTE pointers, usually the kernel and current user pointers
756 * to their respective root page table. 738 * to their respective root page table.
757 */ 739 */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 171800002ede..97c5857faf00 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -34,6 +34,8 @@
34#include <asm/iseries/lpar_map.h> 34#include <asm/iseries/lpar_map.h>
35#include <asm/thread_info.h> 35#include <asm/thread_info.h>
36#include <asm/firmware.h> 36#include <asm/firmware.h>
37#include <asm/page_64.h>
38#include <asm/exception.h>
37 39
38#define DO_SOFT_DISABLE 40#define DO_SOFT_DISABLE
39 41
@@ -144,344 +146,9 @@ exception_marker:
144 .text 146 .text
145 147
146/* 148/*
147 * The following macros define the code that appears as
148 * the prologue to each of the exception handlers. They
149 * are split into two parts to allow a single kernel binary
150 * to be used for pSeries and iSeries.
151 * LOL. One day... - paulus
152 */
153
154/*
155 * We make as much of the exception code common between native
156 * exception handlers (including pSeries LPAR) and iSeries LPAR
157 * implementations as possible.
158 */
159
160/*
161 * This is the start of the interrupt handlers for pSeries 149 * This is the start of the interrupt handlers for pSeries
162 * This code runs with relocation off. 150 * This code runs with relocation off.
163 */ 151 */
164#define EX_R9 0
165#define EX_R10 8
166#define EX_R11 16
167#define EX_R12 24
168#define EX_R13 32
169#define EX_SRR0 40
170#define EX_DAR 48
171#define EX_DSISR 56
172#define EX_CCR 60
173#define EX_R3 64
174#define EX_LR 72
175
176/*
177 * We're short on space and time in the exception prolog, so we can't
178 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
179 * low halfword of the address, but for Kdump we need the whole low
180 * word.
181 */
182#ifdef CONFIG_CRASH_DUMP
183#define LOAD_HANDLER(reg, label) \
184 oris reg,reg,(label)@h; /* virt addr of handler ... */ \
185 ori reg,reg,(label)@l; /* .. and the rest */
186#else
187#define LOAD_HANDLER(reg, label) \
188 ori reg,reg,(label)@l; /* virt addr of handler ... */
189#endif
190
191/*
192 * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
193 * The firmware calls the registered system_reset_fwnmi and
194 * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
195 * a 32bit application at the time of the event.
196 * This firmware bug is present on POWER4 and JS20.
197 */
198#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \
199 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
200 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
201 std r10,area+EX_R10(r13); \
202 std r11,area+EX_R11(r13); \
203 std r12,area+EX_R12(r13); \
204 mfspr r9,SPRN_SPRG1; \
205 std r9,area+EX_R13(r13); \
206 mfcr r9; \
207 clrrdi r12,r13,32; /* get high part of &label */ \
208 mfmsr r10; \
209 /* force 64bit mode */ \
210 li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \
211 rldimi r10,r11,61,0; /* insert into top 3 bits */ \
212 /* done 64bit mode */ \
213 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
214 LOAD_HANDLER(r12,label) \
215 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
216 mtspr SPRN_SRR0,r12; \
217 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
218 mtspr SPRN_SRR1,r10; \
219 rfid; \
220 b . /* prevent speculative execution */
221
222#define EXCEPTION_PROLOG_PSERIES(area, label) \
223 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
224 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
225 std r10,area+EX_R10(r13); \
226 std r11,area+EX_R11(r13); \
227 std r12,area+EX_R12(r13); \
228 mfspr r9,SPRN_SPRG1; \
229 std r9,area+EX_R13(r13); \
230 mfcr r9; \
231 clrrdi r12,r13,32; /* get high part of &label */ \
232 mfmsr r10; \
233 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
234 LOAD_HANDLER(r12,label) \
235 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
236 mtspr SPRN_SRR0,r12; \
237 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
238 mtspr SPRN_SRR1,r10; \
239 rfid; \
240 b . /* prevent speculative execution */
241
242/*
243 * This is the start of the interrupt handlers for iSeries
244 * This code runs with relocation on.
245 */
246#define EXCEPTION_PROLOG_ISERIES_1(area) \
247 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
248 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
249 std r10,area+EX_R10(r13); \
250 std r11,area+EX_R11(r13); \
251 std r12,area+EX_R12(r13); \
252 mfspr r9,SPRN_SPRG1; \
253 std r9,area+EX_R13(r13); \
254 mfcr r9
255
256#define EXCEPTION_PROLOG_ISERIES_2 \
257 mfmsr r10; \
258 ld r12,PACALPPACAPTR(r13); \
259 ld r11,LPPACASRR0(r12); \
260 ld r12,LPPACASRR1(r12); \
261 ori r10,r10,MSR_RI; \
262 mtmsrd r10,1
263
264/*
265 * The common exception prolog is used for all except a few exceptions
266 * such as a segment miss on a kernel address. We have to be prepared
267 * to take another exception from the point where we first touch the
268 * kernel stack onwards.
269 *
270 * On entry r13 points to the paca, r9-r13 are saved in the paca,
271 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
272 * SRR1, and relocation is on.
273 */
274#define EXCEPTION_PROLOG_COMMON(n, area) \
275 andi. r10,r12,MSR_PR; /* See if coming from user */ \
276 mr r10,r1; /* Save r1 */ \
277 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
278 beq- 1f; \
279 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2801: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
281 bge- cr1,2f; /* abort if it is */ \
282 b 3f; \
2832: li r1,(n); /* will be reloaded later */ \
284 sth r1,PACA_TRAP_SAVE(r13); \
285 b bad_stack; \
2863: std r9,_CCR(r1); /* save CR in stackframe */ \
287 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
288 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
289 std r10,0(r1); /* make stack chain pointer */ \
290 std r0,GPR0(r1); /* save r0 in stackframe */ \
291 std r10,GPR1(r1); /* save r1 in stackframe */ \
292 ACCOUNT_CPU_USER_ENTRY(r9, r10); \
293 std r2,GPR2(r1); /* save r2 in stackframe */ \
294 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
295 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
296 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
297 ld r10,area+EX_R10(r13); \
298 std r9,GPR9(r1); \
299 std r10,GPR10(r1); \
300 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
301 ld r10,area+EX_R12(r13); \
302 ld r11,area+EX_R13(r13); \
303 std r9,GPR11(r1); \
304 std r10,GPR12(r1); \
305 std r11,GPR13(r1); \
306 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
307 mflr r9; /* save LR in stackframe */ \
308 std r9,_LINK(r1); \
309 mfctr r10; /* save CTR in stackframe */ \
310 std r10,_CTR(r1); \
311 lbz r10,PACASOFTIRQEN(r13); \
312 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
313 std r10,SOFTE(r1); \
314 std r11,_XER(r1); \
315 li r9,(n)+1; \
316 std r9,_TRAP(r1); /* set trap number */ \
317 li r10,0; \
318 ld r11,exception_marker@toc(r2); \
319 std r10,RESULT(r1); /* clear regs->result */ \
320 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
321
322/*
323 * Exception vectors.
324 */
325#define STD_EXCEPTION_PSERIES(n, label) \
326 . = n; \
327 .globl label##_pSeries; \
328label##_pSeries: \
329 HMT_MEDIUM; \
330 mtspr SPRN_SPRG1,r13; /* save r13 */ \
331 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
332
333#define HSTD_EXCEPTION_PSERIES(n, label) \
334 . = n; \
335 .globl label##_pSeries; \
336label##_pSeries: \
337 HMT_MEDIUM; \
338 mtspr SPRN_SPRG1,r20; /* save r20 */ \
339 mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
340 mtspr SPRN_SRR0,r20; \
341 mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
342 mtspr SPRN_SRR1,r20; \
343 mfspr r20,SPRN_SPRG1; /* restore r20 */ \
344 mtspr SPRN_SPRG1,r13; /* save r13 */ \
345 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
346
347
348#define MASKABLE_EXCEPTION_PSERIES(n, label) \
349 . = n; \
350 .globl label##_pSeries; \
351label##_pSeries: \
352 HMT_MEDIUM; \
353 mtspr SPRN_SPRG1,r13; /* save r13 */ \
354 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
355 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
356 std r10,PACA_EXGEN+EX_R10(r13); \
357 lbz r10,PACASOFTIRQEN(r13); \
358 mfcr r9; \
359 cmpwi r10,0; \
360 beq masked_interrupt; \
361 mfspr r10,SPRN_SPRG1; \
362 std r10,PACA_EXGEN+EX_R13(r13); \
363 std r11,PACA_EXGEN+EX_R11(r13); \
364 std r12,PACA_EXGEN+EX_R12(r13); \
365 clrrdi r12,r13,32; /* get high part of &label */ \
366 mfmsr r10; \
367 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
368 LOAD_HANDLER(r12,label##_common) \
369 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
370 mtspr SPRN_SRR0,r12; \
371 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
372 mtspr SPRN_SRR1,r10; \
373 rfid; \
374 b . /* prevent speculative execution */
375
376#define STD_EXCEPTION_ISERIES(n, label, area) \
377 .globl label##_iSeries; \
378label##_iSeries: \
379 HMT_MEDIUM; \
380 mtspr SPRN_SPRG1,r13; /* save r13 */ \
381 EXCEPTION_PROLOG_ISERIES_1(area); \
382 EXCEPTION_PROLOG_ISERIES_2; \
383 b label##_common
384
385#define MASKABLE_EXCEPTION_ISERIES(n, label) \
386 .globl label##_iSeries; \
387label##_iSeries: \
388 HMT_MEDIUM; \
389 mtspr SPRN_SPRG1,r13; /* save r13 */ \
390 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
391 lbz r10,PACASOFTIRQEN(r13); \
392 cmpwi 0,r10,0; \
393 beq- label##_iSeries_masked; \
394 EXCEPTION_PROLOG_ISERIES_2; \
395 b label##_common; \
396
397#ifdef CONFIG_PPC_ISERIES
398#define DISABLE_INTS \
399 li r11,0; \
400 stb r11,PACASOFTIRQEN(r13); \
401BEGIN_FW_FTR_SECTION; \
402 stb r11,PACAHARDIRQEN(r13); \
403END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
404BEGIN_FW_FTR_SECTION; \
405 mfmsr r10; \
406 ori r10,r10,MSR_EE; \
407 mtmsrd r10,1; \
408END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
409
410#else
411#define DISABLE_INTS \
412 li r11,0; \
413 stb r11,PACASOFTIRQEN(r13); \
414 stb r11,PACAHARDIRQEN(r13)
415
416#endif /* CONFIG_PPC_ISERIES */
417
418#define ENABLE_INTS \
419 ld r12,_MSR(r1); \
420 mfmsr r11; \
421 rlwimi r11,r12,0,MSR_EE; \
422 mtmsrd r11,1
423
424#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
425 .align 7; \
426 .globl label##_common; \
427label##_common: \
428 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
429 DISABLE_INTS; \
430 bl .save_nvgprs; \
431 addi r3,r1,STACK_FRAME_OVERHEAD; \
432 bl hdlr; \
433 b .ret_from_except
434
435/*
436 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
437 * in the idle task and therefore need the special idle handling.
438 */
439#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
440 .align 7; \
441 .globl label##_common; \
442label##_common: \
443 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
444 FINISH_NAP; \
445 DISABLE_INTS; \
446 bl .save_nvgprs; \
447 addi r3,r1,STACK_FRAME_OVERHEAD; \
448 bl hdlr; \
449 b .ret_from_except
450
451#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
452 .align 7; \
453 .globl label##_common; \
454label##_common: \
455 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
456 FINISH_NAP; \
457 DISABLE_INTS; \
458 bl .ppc64_runlatch_on; \
459 addi r3,r1,STACK_FRAME_OVERHEAD; \
460 bl hdlr; \
461 b .ret_from_except_lite
462
463/*
464 * When the idle code in power4_idle puts the CPU into NAP mode,
465 * it has to do so in a loop, and relies on the external interrupt
466 * and decrementer interrupt entry code to get it out of the loop.
467 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
468 * to signal that it is in the loop and needs help to get out.
469 */
470#ifdef CONFIG_PPC_970_NAP
471#define FINISH_NAP \
472BEGIN_FTR_SECTION \
473 clrrdi r11,r1,THREAD_SHIFT; \
474 ld r9,TI_LOCAL_FLAGS(r11); \
475 andi. r10,r9,_TLF_NAPPING; \
476 bnel power4_fixup_nap; \
477END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
478#else
479#define FINISH_NAP
480#endif
481
482/*
483 * Start of pSeries system interrupt routines
484 */
485 . = 0x100 152 . = 0x100
486 .globl __start_interrupts 153 .globl __start_interrupts
487__start_interrupts: 154__start_interrupts:
@@ -674,6 +341,7 @@ slb_miss_user_pseries:
674 b . /* prevent spec. execution */ 341 b . /* prevent spec. execution */
675#endif /* __DISABLED__ */ 342#endif /* __DISABLED__ */
676 343
344#ifdef CONFIG_PPC_PSERIES
677/* 345/*
678 * Vectors for the FWNMI option. Share common code. 346 * Vectors for the FWNMI option. Share common code.
679 */ 347 */
@@ -691,191 +359,7 @@ machine_check_fwnmi:
691 mtspr SPRN_SPRG1,r13 /* save r13 */ 359 mtspr SPRN_SPRG1,r13 /* save r13 */
692 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common) 360 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
693 361
694#ifdef CONFIG_PPC_ISERIES 362#endif /* CONFIG_PPC_PSERIES */
695/*** ISeries-LPAR interrupt handlers ***/
696
697 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
698
699 .globl data_access_iSeries
700data_access_iSeries:
701 mtspr SPRN_SPRG1,r13
702BEGIN_FTR_SECTION
703 mtspr SPRN_SPRG2,r12
704 mfspr r13,SPRN_DAR
705 mfspr r12,SPRN_DSISR
706 srdi r13,r13,60
707 rlwimi r13,r12,16,0x20
708 mfcr r12
709 cmpwi r13,0x2c
710 beq .do_stab_bolted_iSeries
711 mtcrf 0x80,r12
712 mfspr r12,SPRN_SPRG2
713END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
714 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
715 EXCEPTION_PROLOG_ISERIES_2
716 b data_access_common
717
718.do_stab_bolted_iSeries:
719 mtcrf 0x80,r12
720 mfspr r12,SPRN_SPRG2
721 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
722 EXCEPTION_PROLOG_ISERIES_2
723 b .do_stab_bolted
724
725 .globl data_access_slb_iSeries
726data_access_slb_iSeries:
727 mtspr SPRN_SPRG1,r13 /* save r13 */
728 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
729 std r3,PACA_EXSLB+EX_R3(r13)
730 mfspr r3,SPRN_DAR
731 std r9,PACA_EXSLB+EX_R9(r13)
732 mfcr r9
733#ifdef __DISABLED__
734 cmpdi r3,0
735 bge slb_miss_user_iseries
736#endif
737 std r10,PACA_EXSLB+EX_R10(r13)
738 std r11,PACA_EXSLB+EX_R11(r13)
739 std r12,PACA_EXSLB+EX_R12(r13)
740 mfspr r10,SPRN_SPRG1
741 std r10,PACA_EXSLB+EX_R13(r13)
742 ld r12,PACALPPACAPTR(r13)
743 ld r12,LPPACASRR1(r12)
744 b .slb_miss_realmode
745
746 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
747
748 .globl instruction_access_slb_iSeries
749instruction_access_slb_iSeries:
750 mtspr SPRN_SPRG1,r13 /* save r13 */
751 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
752 std r3,PACA_EXSLB+EX_R3(r13)
753 ld r3,PACALPPACAPTR(r13)
754 ld r3,LPPACASRR0(r3) /* get SRR0 value */
755 std r9,PACA_EXSLB+EX_R9(r13)
756 mfcr r9
757#ifdef __DISABLED__
758 cmpdi r3,0
759 bge .slb_miss_user_iseries
760#endif
761 std r10,PACA_EXSLB+EX_R10(r13)
762 std r11,PACA_EXSLB+EX_R11(r13)
763 std r12,PACA_EXSLB+EX_R12(r13)
764 mfspr r10,SPRN_SPRG1
765 std r10,PACA_EXSLB+EX_R13(r13)
766 ld r12,PACALPPACAPTR(r13)
767 ld r12,LPPACASRR1(r12)
768 b .slb_miss_realmode
769
770#ifdef __DISABLED__
771slb_miss_user_iseries:
772 std r10,PACA_EXGEN+EX_R10(r13)
773 std r11,PACA_EXGEN+EX_R11(r13)
774 std r12,PACA_EXGEN+EX_R12(r13)
775 mfspr r10,SPRG1
776 ld r11,PACA_EXSLB+EX_R9(r13)
777 ld r12,PACA_EXSLB+EX_R3(r13)
778 std r10,PACA_EXGEN+EX_R13(r13)
779 std r11,PACA_EXGEN+EX_R9(r13)
780 std r12,PACA_EXGEN+EX_R3(r13)
781 EXCEPTION_PROLOG_ISERIES_2
782 b slb_miss_user_common
783#endif
784
785 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
786 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
787 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
788 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
789 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
790 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
791 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
792
793 .globl system_call_iSeries
794system_call_iSeries:
795 mr r9,r13
796 mfspr r13,SPRN_SPRG3
797 EXCEPTION_PROLOG_ISERIES_2
798 b system_call_common
799
800 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
801 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
802 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
803
804 .globl system_reset_iSeries
805system_reset_iSeries:
806 mfspr r13,SPRN_SPRG3 /* Get paca address */
807 mfmsr r24
808 ori r24,r24,MSR_RI
809 mtmsrd r24 /* RI on */
810 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
811 cmpwi 0,r24,0 /* Are we processor 0? */
812 bne 1f
813 b .__start_initialization_iSeries /* Start up the first processor */
8141: mfspr r4,SPRN_CTRLF
815 li r5,CTRL_RUNLATCH /* Turn off the run light */
816 andc r4,r4,r5
817 mtspr SPRN_CTRLT,r4
818
8191:
820 HMT_LOW
821#ifdef CONFIG_SMP
822 lbz r23,PACAPROCSTART(r13) /* Test if this processor
823 * should start */
824 sync
825 LOAD_REG_IMMEDIATE(r3,current_set)
826 sldi r28,r24,3 /* get current_set[cpu#] */
827 ldx r3,r3,r28
828 addi r1,r3,THREAD_SIZE
829 subi r1,r1,STACK_FRAME_OVERHEAD
830
831 cmpwi 0,r23,0
832 beq iSeries_secondary_smp_loop /* Loop until told to go */
833 bne __secondary_start /* Loop until told to go */
834iSeries_secondary_smp_loop:
835 /* Let the Hypervisor know we are alive */
836 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
837 lis r3,0x8002
838 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
839#else /* CONFIG_SMP */
840 /* Yield the processor. This is required for non-SMP kernels
841 which are running on multi-threaded machines. */
842 lis r3,0x8000
843 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
844 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
845 li r4,0 /* "yield timed" */
846 li r5,-1 /* "yield forever" */
847#endif /* CONFIG_SMP */
848 li r0,-1 /* r0=-1 indicates a Hypervisor call */
849 sc /* Invoke the hypervisor via a system call */
850 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
851 b 1b /* If SMP not configured, secondaries
852 * loop forever */
853
854decrementer_iSeries_masked:
855 /* We may not have a valid TOC pointer in here. */
856 li r11,1
857 ld r12,PACALPPACAPTR(r13)
858 stb r11,LPPACADECRINT(r12)
859 LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
860 lwz r12,0(r12)
861 mtspr SPRN_DEC,r12
862 /* fall through */
863
864hardware_interrupt_iSeries_masked:
865 mtcrf 0x80,r9 /* Restore regs */
866 ld r12,PACALPPACAPTR(r13)
867 ld r11,LPPACASRR0(r12)
868 ld r12,LPPACASRR1(r12)
869 mtspr SPRN_SRR0,r11
870 mtspr SPRN_SRR1,r12
871 ld r9,PACA_EXGEN+EX_R9(r13)
872 ld r10,PACA_EXGEN+EX_R10(r13)
873 ld r11,PACA_EXGEN+EX_R11(r13)
874 ld r12,PACA_EXGEN+EX_R12(r13)
875 ld r13,PACA_EXGEN+EX_R13(r13)
876 rfid
877 b . /* prevent speculative execution */
878#endif /* CONFIG_PPC_ISERIES */
879 363
880/*** Common interrupt handlers ***/ 364/*** Common interrupt handlers ***/
881 365
@@ -1175,7 +659,9 @@ hardware_interrupt_common:
1175 FINISH_NAP 659 FINISH_NAP
1176hardware_interrupt_entry: 660hardware_interrupt_entry:
1177 DISABLE_INTS 661 DISABLE_INTS
662BEGIN_FTR_SECTION
1178 bl .ppc64_runlatch_on 663 bl .ppc64_runlatch_on
664END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
1179 addi r3,r1,STACK_FRAME_OVERHEAD 665 addi r3,r1,STACK_FRAME_OVERHEAD
1180 bl .do_IRQ 666 bl .do_IRQ
1181 b .ret_from_except_lite 667 b .ret_from_except_lite
@@ -1449,7 +935,7 @@ _GLOBAL(do_stab_bolted)
1449 935
1450 /* Calculate VSID */ 936 /* Calculate VSID */
1451 /* This is a kernel address, so protovsid = ESID */ 937 /* This is a kernel address, so protovsid = ESID */
1452 ASM_VSID_SCRAMBLE(r11, r9) 938 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1453 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 939 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1454 940
1455 /* Search the primary group for a free entry */ 941 /* Search the primary group for a free entry */
@@ -1519,8 +1005,8 @@ _GLOBAL(do_stab_bolted)
1519 * Space for CPU0's segment table. 1005 * Space for CPU0's segment table.
1520 * 1006 *
1521 * On iSeries, the hypervisor must fill in at least one entry before 1007 * On iSeries, the hypervisor must fill in at least one entry before
1522 * we get control (with relocate on). The address is give to the hv 1008 * we get control (with relocate on). The address is given to the hv
1523 * as a page number (see xLparMap in lpardata.c), so this must be at a 1009 * as a page number (see xLparMap below), so this must be at a
1524 * fixed address (the linker can't compute (u64)&initial_stab >> 1010 * fixed address (the linker can't compute (u64)&initial_stab >>
1525 * PAGE_SHIFT). 1011 * PAGE_SHIFT).
1526 */ 1012 */
@@ -1529,6 +1015,7 @@ _GLOBAL(do_stab_bolted)
1529initial_stab: 1015initial_stab:
1530 .space 4096 1016 .space 4096
1531 1017
1018#ifdef CONFIG_PPC_PSERIES
1532/* 1019/*
1533 * Data area reserved for FWNMI option. 1020 * Data area reserved for FWNMI option.
1534 * This address (0x7000) is fixed by the RPA. 1021 * This address (0x7000) is fixed by the RPA.
@@ -1536,21 +1023,34 @@ initial_stab:
1536 .= 0x7000 1023 .= 0x7000
1537 .globl fwnmi_data_area 1024 .globl fwnmi_data_area
1538fwnmi_data_area: 1025fwnmi_data_area:
1026#endif /* CONFIG_PPC_PSERIES */
1539 1027
1540 /* iSeries does not use the FWNMI stuff, so it is safe to put 1028 /* iSeries does not use the FWNMI stuff, so it is safe to put
1541 * this here, even if we later allow kernels that will boot on 1029 * this here, even if we later allow kernels that will boot on
1542 * both pSeries and iSeries */ 1030 * both pSeries and iSeries */
1543#ifdef CONFIG_PPC_ISERIES 1031#ifdef CONFIG_PPC_ISERIES
1544 . = LPARMAP_PHYS 1032 . = LPARMAP_PHYS
1545#include "lparmap.s" 1033 .globl xLparMap
1546/* 1034xLparMap:
1547 * This ".text" is here for old compilers that generate a trailing 1035 .quad HvEsidsToMap /* xNumberEsids */
1548 * .note section when compiling .c files to .s 1036 .quad HvRangesToMap /* xNumberRanges */
1549 */ 1037 .quad STAB0_PAGE /* xSegmentTableOffs */
1550 .text 1038 .zero 40 /* xRsvd */
1039 /* xEsids (HvEsidsToMap entries of 2 quads) */
1040 .quad PAGE_OFFSET_ESID /* xKernelEsid */
1041 .quad PAGE_OFFSET_VSID /* xKernelVsid */
1042 .quad VMALLOC_START_ESID /* xKernelEsid */
1043 .quad VMALLOC_START_VSID /* xKernelVsid */
1044 /* xRanges (HvRangesToMap entries of 3 quads) */
1045 .quad HvPagesToMap /* xPages */
1046 .quad 0 /* xOffset */
1047 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1048
1551#endif /* CONFIG_PPC_ISERIES */ 1049#endif /* CONFIG_PPC_ISERIES */
1552 1050
1051#ifdef CONFIG_PPC_PSERIES
1553 . = 0x8000 1052 . = 0x8000
1053#endif /* CONFIG_PPC_PSERIES */
1554 1054
1555/* 1055/*
1556 * On pSeries and most other platforms, secondary processors spin 1056 * On pSeries and most other platforms, secondary processors spin
@@ -1611,39 +1111,6 @@ _GLOBAL(generic_secondary_smp_init)
1611 b __secondary_start 1111 b __secondary_start
1612#endif 1112#endif
1613 1113
1614#ifdef CONFIG_PPC_ISERIES
1615_INIT_STATIC(__start_initialization_iSeries)
1616 /* Clear out the BSS */
1617 LOAD_REG_IMMEDIATE(r11,__bss_stop)
1618 LOAD_REG_IMMEDIATE(r8,__bss_start)
1619 sub r11,r11,r8 /* bss size */
1620 addi r11,r11,7 /* round up to an even double word */
1621 rldicl. r11,r11,61,3 /* shift right by 3 */
1622 beq 4f
1623 addi r8,r8,-8
1624 li r0,0
1625 mtctr r11 /* zero this many doublewords */
16263: stdu r0,8(r8)
1627 bdnz 3b
16284:
1629 LOAD_REG_IMMEDIATE(r1,init_thread_union)
1630 addi r1,r1,THREAD_SIZE
1631 li r0,0
1632 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1633
1634 LOAD_REG_IMMEDIATE(r2,__toc_start)
1635 addi r2,r2,0x4000
1636 addi r2,r2,0x4000
1637
1638 bl .iSeries_early_setup
1639 bl .early_setup
1640
1641 /* relocation is on at this point */
1642
1643 b .start_here_common
1644#endif /* CONFIG_PPC_ISERIES */
1645
1646
1647_STATIC(__mmu_off) 1114_STATIC(__mmu_off)
1648 mfmsr r3 1115 mfmsr r3
1649 andi. r0,r3,MSR_IR|MSR_DR 1116 andi. r0,r3,MSR_IR|MSR_DR
@@ -1891,6 +1358,7 @@ _GLOBAL(pmac_secondary_start)
1891 * r13 = paca virtual address 1358 * r13 = paca virtual address
1892 * SPRG3 = paca virtual address 1359 * SPRG3 = paca virtual address
1893 */ 1360 */
1361 .globl __secondary_start
1894__secondary_start: 1362__secondary_start:
1895 /* Set thread priority to MEDIUM */ 1363 /* Set thread priority to MEDIUM */
1896 HMT_MEDIUM 1364 HMT_MEDIUM
@@ -2021,7 +1489,7 @@ _INIT_STATIC(start_here_multiplatform)
2021 b . /* prevent speculative execution */ 1489 b . /* prevent speculative execution */
2022 1490
2023 /* This is where all platforms converge execution */ 1491 /* This is where all platforms converge execution */
2024_INIT_STATIC(start_here_common) 1492_INIT_GLOBAL(start_here_common)
2025 /* relocation is on at this point */ 1493 /* relocation is on at this point */
2026 1494
2027 /* The following code sets up the SP and TOC now that we are */ 1495 /* The following code sets up the SP and TOC now that we are */
@@ -2078,12 +1546,4 @@ empty_zero_page:
2078 1546
2079 .globl swapper_pg_dir 1547 .globl swapper_pg_dir
2080swapper_pg_dir: 1548swapper_pg_dir:
2081 .space PAGE_SIZE 1549 .space PGD_TABLE_SIZE
2082
2083/*
2084 * This space gets a copy of optional info passed to us by the bootstrap
2085 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2086 */
2087 .globl cmd_line
2088cmd_line:
2089 .space COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 901be47a02a9..f7458396cd7c 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -38,12 +38,9 @@
38#else 38#else
39#define DO_8xx_CPU6(val, reg) 39#define DO_8xx_CPU6(val, reg)
40#endif 40#endif
41 .text 41 .section .text.head, "ax"
42 .globl _stext 42_ENTRY(_stext);
43_stext: 43_ENTRY(_start);
44 .text
45 .globl _start
46_start:
47 44
48/* MPC8xx 45/* MPC8xx
49 * This port was done on an MBX board with an 860. Right now I only 46 * This port was done on an MBX board with an 860. Right now I only
@@ -301,6 +298,12 @@ InstructionTLBMiss:
301 stw r10, 0(r0) 298 stw r10, 0(r0)
302 stw r11, 4(r0) 299 stw r11, 4(r0)
303 mfspr r10, SPRN_SRR0 /* Get effective address of fault */ 300 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
301#ifdef CONFIG_8xx_CPU15
302 addi r11, r10, 0x1000
303 tlbie r11
304 addi r11, r10, -0x1000
305 tlbie r11
306#endif
304 DO_8xx_CPU6(0x3780, r3) 307 DO_8xx_CPU6(0x3780, r3)
305 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ 308 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
306 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ 309 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
@@ -730,13 +733,13 @@ initial_mmu:
730 mtspr SPRN_MD_TWC, r9 733 mtspr SPRN_MD_TWC, r9
731 li r11, MI_BOOTINIT /* Create RPN for address 0 */ 734 li r11, MI_BOOTINIT /* Create RPN for address 0 */
732 addis r11, r11, 0x0080 /* Add 8M */ 735 addis r11, r11, 0x0080 /* Add 8M */
733 mtspr SPRN_MD_RPN, r8 736 mtspr SPRN_MD_RPN, r11
734 737
735 addis r8, r8, 0x0080 /* Add 8M */ 738 addis r8, r8, 0x0080 /* Add 8M */
736 mtspr SPRN_MD_EPN, r8 739 mtspr SPRN_MD_EPN, r8
737 mtspr SPRN_MD_TWC, r9 740 mtspr SPRN_MD_TWC, r9
738 addis r11, r11, 0x0080 /* Add 8M */ 741 addis r11, r11, 0x0080 /* Add 8M */
739 mtspr SPRN_MD_RPN, r8 742 mtspr SPRN_MD_RPN, r11
740#endif 743#endif
741 744
742 /* Since the cache is enabled according to the information we 745 /* Since the cache is enabled according to the information we
@@ -835,14 +838,6 @@ empty_zero_page:
835swapper_pg_dir: 838swapper_pg_dir:
836 .space 4096 839 .space 4096
837 840
838/*
839 * This space gets a copy of optional info passed to us by the bootstrap
840 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
841 */
842 .globl cmd_line
843cmd_line:
844 .space 512
845
846/* Room for two PTE table poiners, usually the kernel and current user 841/* Room for two PTE table poiners, usually the kernel and current user
847 * pointer to their respective root page table (pgdir). 842 * pointer to their respective root page table (pgdir).
848 */ 843 */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 1f155d399d57..4b9822728aea 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -2,27 +2,27 @@
2 * Kernel execution entry point code. 2 * Kernel execution entry point code.
3 * 3 *
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version. 5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Rewritten for PReP 7 * Rewritten for PReP
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite. 9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications. 11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc. 12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications. 13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications. 15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc. 16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications 17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications. 18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc. 19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com 20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com 21 * debbie_chu@mvista.com
22 * Copyright 2002-2004 MontaVista Software, Inc. 22 * Copyright 2002-2004 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 * Copyright 2004 Freescale Semiconductor, Inc 24 * Copyright 2004 Freescale Semiconductor, Inc
25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org> 25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
26 * 26 *
27 * This program is free software; you can redistribute it and/or modify it 27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the 28 * under the terms of the GNU General Public License as published by the
@@ -52,9 +52,9 @@
52 * r7 - End of kernel command line string 52 * r7 - End of kernel command line string
53 * 53 *
54 */ 54 */
55 .text 55 .section .text.head, "ax"
56_GLOBAL(_stext) 56_ENTRY(_stext);
57_GLOBAL(_start) 57_ENTRY(_start);
58 /* 58 /*
59 * Reserve a word at a fixed location to store the address 59 * Reserve a word at a fixed location to store the address
60 * of abatron_pteptrs 60 * of abatron_pteptrs
@@ -146,13 +146,13 @@ skpinv: addi r6,r6,1 /* Increment */
146 bne 1b /* If not, repeat */ 146 bne 1b /* If not, repeat */
147 147
148 /* Invalidate TLB0 */ 148 /* Invalidate TLB0 */
149 li r6,0x04 149 li r6,0x04
150 tlbivax 0,r6 150 tlbivax 0,r6
151#ifdef CONFIG_SMP 151#ifdef CONFIG_SMP
152 tlbsync 152 tlbsync
153#endif 153#endif
154 /* Invalidate TLB1 */ 154 /* Invalidate TLB1 */
155 li r6,0x0c 155 li r6,0x0c
156 tlbivax 0,r6 156 tlbivax 0,r6
157#ifdef CONFIG_SMP 157#ifdef CONFIG_SMP
158 tlbsync 158 tlbsync
@@ -211,7 +211,7 @@ skpinv: addi r6,r6,1 /* Increment */
211 mtspr SPRN_MAS1,r6 211 mtspr SPRN_MAS1,r6
212 tlbwe 212 tlbwe
213 /* Invalidate TLB1 */ 213 /* Invalidate TLB1 */
214 li r9,0x0c 214 li r9,0x0c
215 tlbivax 0,r9 215 tlbivax 0,r9
216#ifdef CONFIG_SMP 216#ifdef CONFIG_SMP
217 tlbsync 217 tlbsync
@@ -254,7 +254,7 @@ skpinv: addi r6,r6,1 /* Increment */
254 mtspr SPRN_MAS1,r8 254 mtspr SPRN_MAS1,r8
255 tlbwe 255 tlbwe
256 /* Invalidate TLB1 */ 256 /* Invalidate TLB1 */
257 li r9,0x0c 257 li r9,0x0c
258 tlbivax 0,r9 258 tlbivax 0,r9
259#ifdef CONFIG_SMP 259#ifdef CONFIG_SMP
260 tlbsync 260 tlbsync
@@ -294,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */
294#ifdef CONFIG_E200 294#ifdef CONFIG_E200
295 oris r2,r2,MAS4_TLBSELD(1)@h 295 oris r2,r2,MAS4_TLBSELD(1)@h
296#endif 296#endif
297 mtspr SPRN_MAS4, r2 297 mtspr SPRN_MAS4, r2
298 298
299#if 0 299#if 0
300 /* Enable DOZE */ 300 /* Enable DOZE */
@@ -305,7 +305,7 @@ skpinv: addi r6,r6,1 /* Increment */
305#ifdef CONFIG_E200 305#ifdef CONFIG_E200
306 /* enable dedicated debug exception handling resources (Debug APU) */ 306 /* enable dedicated debug exception handling resources (Debug APU) */
307 mfspr r2,SPRN_HID0 307 mfspr r2,SPRN_HID0
308 ori r2,r2,HID0_DAPUEN@l 308 ori r2,r2,HID0_DAPUEN@l
309 mtspr SPRN_HID0,r2 309 mtspr SPRN_HID0,r2
310#endif 310#endif
311 311
@@ -391,7 +391,7 @@ skpinv: addi r6,r6,1 /* Increment */
391#ifdef CONFIG_PTE_64BIT 391#ifdef CONFIG_PTE_64BIT
392#define PTE_FLAGS_OFFSET 4 392#define PTE_FLAGS_OFFSET 4
393#define FIND_PTE \ 393#define FIND_PTE \
394 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 394 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
395 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 395 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
396 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ 396 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
397 beq 2f; /* Bail if no table */ \ 397 beq 2f; /* Bail if no table */ \
@@ -461,8 +461,7 @@ interrupt_base:
461 /* If we are faulting a kernel address, we have to use the 461 /* If we are faulting a kernel address, we have to use the
462 * kernel page tables. 462 * kernel page tables.
463 */ 463 */
464 lis r11, TASK_SIZE@h 464 lis r11, PAGE_OFFSET@h
465 ori r11, r11, TASK_SIZE@l
466 cmplw 0, r10, r11 465 cmplw 0, r10, r11
467 bge 2f 466 bge 2f
468 467
@@ -487,7 +486,7 @@ interrupt_base:
487 */ 486 */
488 andi. r11, r11, _PAGE_HWEXEC 487 andi. r11, r11, _PAGE_HWEXEC
489 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ 488 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
490 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ 489 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
491 490
492 /* update search PID in MAS6, AS = 0 */ 491 /* update search PID in MAS6, AS = 0 */
493 mfspr r12, SPRN_PID0 492 mfspr r12, SPRN_PID0
@@ -584,8 +583,7 @@ interrupt_base:
584 /* If we are faulting a kernel address, we have to use the 583 /* If we are faulting a kernel address, we have to use the
585 * kernel page tables. 584 * kernel page tables.
586 */ 585 */
587 lis r11, TASK_SIZE@h 586 lis r11, PAGE_OFFSET@h
588 ori r11, r11, TASK_SIZE@l
589 cmplw 5, r10, r11 587 cmplw 5, r10, r11
590 blt 5, 3f 588 blt 5, 3f
591 lis r11, swapper_pg_dir@h 589 lis r11, swapper_pg_dir@h
@@ -645,8 +643,7 @@ interrupt_base:
645 /* If we are faulting a kernel address, we have to use the 643 /* If we are faulting a kernel address, we have to use the
646 * kernel page tables. 644 * kernel page tables.
647 */ 645 */
648 lis r11, TASK_SIZE@h 646 lis r11, PAGE_OFFSET@h
649 ori r11, r11, TASK_SIZE@l
650 cmplw 5, r10, r11 647 cmplw 5, r10, r11
651 blt 5, 3f 648 blt 5, 3f
652 lis r11, swapper_pg_dir@h 649 lis r11, swapper_pg_dir@h
@@ -694,7 +691,7 @@ interrupt_base:
694 START_EXCEPTION(SPEUnavailable) 691 START_EXCEPTION(SPEUnavailable)
695 NORMAL_EXCEPTION_PROLOG 692 NORMAL_EXCEPTION_PROLOG
696 bne load_up_spe 693 bne load_up_spe
697 addi r3,r1,STACK_FRAME_OVERHEAD 694 addi r3,r1,STACK_FRAME_OVERHEAD
698 EXC_XFER_EE_LITE(0x2010, KernelSPE) 695 EXC_XFER_EE_LITE(0x2010, KernelSPE)
699#else 696#else
700 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) 697 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
@@ -741,10 +738,10 @@ data_access:
741 738
742 * Both the instruction and data TLB miss get to this 739 * Both the instruction and data TLB miss get to this
743 * point to load the TLB. 740 * point to load the TLB.
744 * r10 - EA of fault 741 * r10 - EA of fault
745 * r11 - TLB (info from Linux PTE) 742 * r11 - TLB (info from Linux PTE)
746 * r12, r13 - available to use 743 * r12, r13 - available to use
747 * CR5 - results of addr < TASK_SIZE 744 * CR5 - results of addr >= PAGE_OFFSET
748 * MAS0, MAS1 - loaded with proper value when we get here 745 * MAS0, MAS1 - loaded with proper value when we get here
749 * MAS2, MAS3 - will need additional info from Linux PTE 746 * MAS2, MAS3 - will need additional info from Linux PTE
750 * Upon exit, we reload everything and RFI. 747 * Upon exit, we reload everything and RFI.
@@ -813,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
813 lwz r13, tlbcam_index@l(r13) 810 lwz r13, tlbcam_index@l(r13)
814 rlwimi r12, r13, 0, 20, 31 811 rlwimi r12, r13, 0, 20, 31
8157: 8127:
816 mtspr SPRN_MAS0,r12 813 mtspr SPRN_MAS0,r12
817#endif /* CONFIG_E200 */ 814#endif /* CONFIG_E200 */
818 815
819 tlbwe 816 tlbwe
@@ -855,17 +852,17 @@ load_up_spe:
855 beq 1f 852 beq 1f
856 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 853 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
857 SAVE_32EVRS(0,r10,r4) 854 SAVE_32EVRS(0,r10,r4)
858 evxor evr10, evr10, evr10 /* clear out evr10 */ 855 evxor evr10, evr10, evr10 /* clear out evr10 */
859 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 856 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
860 li r5,THREAD_ACC 857 li r5,THREAD_ACC
861 evstddx evr10, r4, r5 /* save off accumulator */ 858 evstddx evr10, r4, r5 /* save off accumulator */
862 lwz r5,PT_REGS(r4) 859 lwz r5,PT_REGS(r4)
863 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 860 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
864 lis r10,MSR_SPE@h 861 lis r10,MSR_SPE@h
865 andc r4,r4,r10 /* disable SPE for previous task */ 862 andc r4,r4,r10 /* disable SPE for previous task */
866 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 863 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8671: 8641:
868#endif /* CONFIG_SMP */ 865#endif /* !CONFIG_SMP */
869 /* enable use of SPE after return */ 866 /* enable use of SPE after return */
870 oris r9,r9,MSR_SPE@h 867 oris r9,r9,MSR_SPE@h
871 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 868 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
@@ -878,7 +875,7 @@ load_up_spe:
878#ifndef CONFIG_SMP 875#ifndef CONFIG_SMP
879 subi r4,r5,THREAD 876 subi r4,r5,THREAD
880 stw r4,last_task_used_spe@l(r3) 877 stw r4,last_task_used_spe@l(r3)
881#endif /* CONFIG_SMP */ 878#endif /* !CONFIG_SMP */
882 /* restore registers and return */ 879 /* restore registers and return */
8832: REST_4GPRS(3, r11) 8802: REST_4GPRS(3, r11)
884 lwz r10,_CCR(r11) 881 lwz r10,_CCR(r11)
@@ -963,10 +960,10 @@ _GLOBAL(giveup_spe)
963 lwz r5,PT_REGS(r3) 960 lwz r5,PT_REGS(r3)
964 cmpi 0,r5,0 961 cmpi 0,r5,0
965 SAVE_32EVRS(0, r4, r3) 962 SAVE_32EVRS(0, r4, r3)
966 evxor evr6, evr6, evr6 /* clear out evr6 */ 963 evxor evr6, evr6, evr6 /* clear out evr6 */
967 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 964 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
968 li r4,THREAD_ACC 965 li r4,THREAD_ACC
969 evstddx evr6, r4, r3 /* save off accumulator */ 966 evstddx evr6, r4, r3 /* save off accumulator */
970 mfspr r6,SPRN_SPEFSCR 967 mfspr r6,SPRN_SPEFSCR
971 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ 968 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
972 beq 1f 969 beq 1f
@@ -979,7 +976,7 @@ _GLOBAL(giveup_spe)
979 li r5,0 976 li r5,0
980 lis r4,last_task_used_spe@ha 977 lis r4,last_task_used_spe@ha
981 stw r5,last_task_used_spe@l(r4) 978 stw r5,last_task_used_spe@l(r4)
982#endif /* CONFIG_SMP */ 979#endif /* !CONFIG_SMP */
983 blr 980 blr
984#endif /* CONFIG_SPE */ 981#endif /* CONFIG_SPE */
985 982
@@ -1000,15 +997,15 @@ _GLOBAL(giveup_fpu)
1000 */ 997 */
1001_GLOBAL(abort) 998_GLOBAL(abort)
1002 li r13,0 999 li r13,0
1003 mtspr SPRN_DBCR0,r13 /* disable all debug events */ 1000 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1004 isync 1001 isync
1005 mfmsr r13 1002 mfmsr r13
1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */ 1003 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1007 mtmsr r13 1004 mtmsr r13
1008 isync 1005 isync
1009 mfspr r13,SPRN_DBCR0 1006 mfspr r13,SPRN_DBCR0
1010 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h 1007 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1011 mtspr SPRN_DBCR0,r13 1008 mtspr SPRN_DBCR0,r13
1012 isync 1009 isync
1013 1010
1014_GLOBAL(set_context) 1011_GLOBAL(set_context)
@@ -1043,21 +1040,13 @@ swapper_pg_dir:
1043/* Reserved 4k for the critical exception stack & 4k for the machine 1040/* Reserved 4k for the critical exception stack & 4k for the machine
1044 * check stack per CPU for kernel mode exceptions */ 1041 * check stack per CPU for kernel mode exceptions */
1045 .section .bss 1042 .section .bss
1046 .align 12 1043 .align 12
1047exception_stack_bottom: 1044exception_stack_bottom:
1048 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1045 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1049 .globl exception_stack_top 1046 .globl exception_stack_top
1050exception_stack_top: 1047exception_stack_top:
1051 1048
1052/* 1049/*
1053 * This space gets a copy of optional info passed to us by the bootstrap
1054 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1055 */
1056 .globl cmd_line
1057cmd_line:
1058 .space 512
1059
1060/*
1061 * Room for two PTE pointers, usually the kernel and current user pointers 1050 * Room for two PTE pointers, usually the kernel and current user pointers
1062 * to their respective root page table. 1051 * to their respective root page table.
1063 */ 1052 */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index d6a38cd5018e..53bf64623bd8 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -371,7 +371,8 @@ static int ibmebus_match_path(struct device *dev, void *data)
371 371
372static char *ibmebus_chomp(const char *in, size_t count) 372static char *ibmebus_chomp(const char *in, size_t count)
373{ 373{
374 char *out = (char*)kmalloc(count + 1, GFP_KERNEL); 374 char *out = kmalloc(count + 1, GFP_KERNEL);
375
375 if (!out) 376 if (!out)
376 return NULL; 377 return NULL;
377 378
@@ -396,10 +397,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
396 return -ENOMEM; 397 return -ENOMEM;
397 398
398 if (bus_find_device(&ibmebus_bus_type, NULL, path, 399 if (bus_find_device(&ibmebus_bus_type, NULL, path,
399 ibmebus_match_path)) { 400 ibmebus_match_path)) {
400 printk(KERN_WARNING "%s: %s has already been probed\n", 401 printk(KERN_WARNING "%s: %s has already been probed\n",
401 __FUNCTION__, path); 402 __FUNCTION__, path);
402 rc = -EINVAL; 403 rc = -EEXIST;
403 goto out; 404 goto out;
404 } 405 }
405 406
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index a9e9cbd32975..abd2957fe537 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -24,6 +24,7 @@
24#include <linux/smp.h> 24#include <linux/smp.h>
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/sysctl.h> 26#include <linux/sysctl.h>
27#include <linux/tick.h>
27 28
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/processor.h> 30#include <asm/processor.h>
@@ -59,6 +60,7 @@ void cpu_idle(void)
59 60
60 set_thread_flag(TIF_POLLING_NRFLAG); 61 set_thread_flag(TIF_POLLING_NRFLAG);
61 while (1) { 62 while (1) {
63 tick_nohz_stop_sched_tick();
62 while (!need_resched() && !cpu_should_die()) { 64 while (!need_resched() && !cpu_should_die()) {
63 ppc64_runlatch_off(); 65 ppc64_runlatch_off();
64 66
@@ -90,6 +92,7 @@ void cpu_idle(void)
90 92
91 HMT_medium(); 93 HMT_medium();
92 ppc64_runlatch_on(); 94 ppc64_runlatch_on();
95 tick_nohz_restart_sched_tick();
93 if (cpu_should_die()) 96 if (cpu_should_die())
94 cpu_die(); 97 cpu_die();
95 preempt_enable_no_resched(); 98 preempt_enable_no_resched();
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 2a5cf8680370..1577434f4088 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -119,8 +119,8 @@ EXPORT_SYMBOL(ioport_unmap);
119 119
120void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) 120void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
121{ 121{
122 unsigned long start = pci_resource_start(dev, bar); 122 resource_size_t start = pci_resource_start(dev, bar);
123 unsigned long len = pci_resource_len(dev, bar); 123 resource_size_t len = pci_resource_len(dev, bar);
124 unsigned long flags = pci_resource_flags(dev, bar); 124 unsigned long flags = pci_resource_flags(dev, bar);
125 125
126 if (!len) 126 if (!len)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c08ceca6277d..e4ec6eee81a8 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -30,7 +30,6 @@
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/init.h>
34#include <linux/bitops.h> 33#include <linux/bitops.h>
35#include <asm/io.h> 34#include <asm/io.h>
36#include <asm/prom.h> 35#include <asm/prom.h>
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9bf63d5256db..2250f9e6c5ca 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -52,6 +52,7 @@
52#include <linux/mutex.h> 52#include <linux/mutex.h>
53#include <linux/bootmem.h> 53#include <linux/bootmem.h>
54#include <linux/pci.h> 54#include <linux/pci.h>
55#include <linux/debugfs.h>
55 56
56#include <asm/uaccess.h> 57#include <asm/uaccess.h>
57#include <asm/system.h> 58#include <asm/system.h>
@@ -272,7 +273,7 @@ void do_IRQ(struct pt_regs *regs)
272 struct thread_info *curtp, *irqtp; 273 struct thread_info *curtp, *irqtp;
273#endif 274#endif
274 275
275 irq_enter(); 276 irq_enter();
276 277
277#ifdef CONFIG_DEBUG_STACKOVERFLOW 278#ifdef CONFIG_DEBUG_STACKOVERFLOW
278 /* Debugging check for stack overflow: is there less than 2KB free? */ 279 /* Debugging check for stack overflow: is there less than 2KB free? */
@@ -321,7 +322,7 @@ void do_IRQ(struct pt_regs *regs)
321 /* That's not SMP safe ... but who cares ? */ 322 /* That's not SMP safe ... but who cares ? */
322 ppc_spurious_interrupts++; 323 ppc_spurious_interrupts++;
323 324
324 irq_exit(); 325 irq_exit();
325 set_irq_regs(old_regs); 326 set_irq_regs(old_regs);
326 327
327#ifdef CONFIG_PPC_ISERIES 328#ifdef CONFIG_PPC_ISERIES
@@ -417,10 +418,16 @@ irq_hw_number_t virq_to_hw(unsigned int virq)
417} 418}
418EXPORT_SYMBOL_GPL(virq_to_hw); 419EXPORT_SYMBOL_GPL(virq_to_hw);
419 420
420__init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type, 421static int default_irq_host_match(struct irq_host *h, struct device_node *np)
421 unsigned int revmap_arg, 422{
422 struct irq_host_ops *ops, 423 return h->of_node != NULL && h->of_node == np;
423 irq_hw_number_t inval_irq) 424}
425
426struct irq_host *irq_alloc_host(struct device_node *of_node,
427 unsigned int revmap_type,
428 unsigned int revmap_arg,
429 struct irq_host_ops *ops,
430 irq_hw_number_t inval_irq)
424{ 431{
425 struct irq_host *host; 432 struct irq_host *host;
426 unsigned int size = sizeof(struct irq_host); 433 unsigned int size = sizeof(struct irq_host);
@@ -431,13 +438,7 @@ __init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type,
431 /* Allocate structure and revmap table if using linear mapping */ 438 /* Allocate structure and revmap table if using linear mapping */
432 if (revmap_type == IRQ_HOST_MAP_LINEAR) 439 if (revmap_type == IRQ_HOST_MAP_LINEAR)
433 size += revmap_arg * sizeof(unsigned int); 440 size += revmap_arg * sizeof(unsigned int);
434 if (mem_init_done) 441 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
435 host = kzalloc(size, GFP_KERNEL);
436 else {
437 host = alloc_bootmem(size);
438 if (host)
439 memset(host, 0, size);
440 }
441 if (host == NULL) 442 if (host == NULL)
442 return NULL; 443 return NULL;
443 444
@@ -445,6 +446,10 @@ __init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type,
445 host->revmap_type = revmap_type; 446 host->revmap_type = revmap_type;
446 host->inval_irq = inval_irq; 447 host->inval_irq = inval_irq;
447 host->ops = ops; 448 host->ops = ops;
449 host->of_node = of_node;
450
451 if (host->ops->match == NULL)
452 host->ops->match = default_irq_host_match;
448 453
449 spin_lock_irqsave(&irq_big_lock, flags); 454 spin_lock_irqsave(&irq_big_lock, flags);
450 455
@@ -476,7 +481,7 @@ __init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type,
476 host->inval_irq = 0; 481 host->inval_irq = 0;
477 /* setup us as the host for all legacy interrupts */ 482 /* setup us as the host for all legacy interrupts */
478 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 483 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
479 irq_map[i].hwirq = 0; 484 irq_map[i].hwirq = i;
480 smp_wmb(); 485 smp_wmb();
481 irq_map[i].host = host; 486 irq_map[i].host = host;
482 smp_wmb(); 487 smp_wmb();
@@ -520,7 +525,7 @@ struct irq_host *irq_find_host(struct device_node *node)
520 */ 525 */
521 spin_lock_irqsave(&irq_big_lock, flags); 526 spin_lock_irqsave(&irq_big_lock, flags);
522 list_for_each_entry(h, &irq_hosts, link) 527 list_for_each_entry(h, &irq_hosts, link)
523 if (h->ops->match == NULL || h->ops->match(h, node)) { 528 if (h->ops->match(h, node)) {
524 found = h; 529 found = h;
525 break; 530 break;
526 } 531 }
@@ -995,6 +1000,68 @@ static int irq_late_init(void)
995} 1000}
996arch_initcall(irq_late_init); 1001arch_initcall(irq_late_init);
997 1002
1003#ifdef CONFIG_VIRQ_DEBUG
1004static int virq_debug_show(struct seq_file *m, void *private)
1005{
1006 unsigned long flags;
1007 irq_desc_t *desc;
1008 const char *p;
1009 char none[] = "none";
1010 int i;
1011
1012 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1013 "chip name", "host name");
1014
1015 for (i = 1; i < NR_IRQS; i++) {
1016 desc = get_irq_desc(i);
1017 spin_lock_irqsave(&desc->lock, flags);
1018
1019 if (desc->action && desc->action->handler) {
1020 seq_printf(m, "%5d ", i);
1021 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1022
1023 if (desc->chip && desc->chip->typename)
1024 p = desc->chip->typename;
1025 else
1026 p = none;
1027 seq_printf(m, "%-15s ", p);
1028
1029 if (irq_map[i].host && irq_map[i].host->of_node)
1030 p = irq_map[i].host->of_node->full_name;
1031 else
1032 p = none;
1033 seq_printf(m, "%s\n", p);
1034 }
1035
1036 spin_unlock_irqrestore(&desc->lock, flags);
1037 }
1038
1039 return 0;
1040}
1041
1042static int virq_debug_open(struct inode *inode, struct file *file)
1043{
1044 return single_open(file, virq_debug_show, inode->i_private);
1045}
1046
1047static const struct file_operations virq_debug_fops = {
1048 .open = virq_debug_open,
1049 .read = seq_read,
1050 .llseek = seq_lseek,
1051 .release = single_release,
1052};
1053
1054static int __init irq_debugfs_init(void)
1055{
1056 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1057 NULL, &virq_debug_fops))
1058 return -ENOMEM;
1059
1060 return 0;
1061}
1062__initcall(irq_debugfs_init);
1063#endif /* CONFIG_VIRQ_DEBUG */
1064
998#endif /* CONFIG_PPC_MERGE */ 1065#endif /* CONFIG_PPC_MERGE */
999 1066
1000#ifdef CONFIG_PPC64 1067#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 90fa11c72e1c..4ed58875ee17 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -340,9 +340,10 @@ void __init find_legacy_serial_ports(void)
340 } 340 }
341 341
342 /* First fill our array with opb bus ports */ 342 /* First fill our array with opb bus ports */
343 for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16750")) != NULL;) { 343 for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
344 struct device_node *opb = of_get_parent(np); 344 struct device_node *opb = of_get_parent(np);
345 if (opb && !strcmp(opb->type, "opb")) { 345 if (opb && (!strcmp(opb->type, "opb") ||
346 of_device_is_compatible(opb, "ibm,opb"))) {
346 index = add_legacy_soc_port(np, np); 347 index = add_legacy_soc_port(np, np);
347 if (index >= 0 && np == stdout) 348 if (index >= 0 && np == stdout)
348 legacy_serial_console = index; 349 legacy_serial_console = index;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 6444eaa30a2f..ff781b2eddec 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -77,7 +77,7 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
77 int processors, max_processors; 77 int processors, max_processors;
78 unsigned long purr = get_purr(); 78 unsigned long purr = get_purr();
79 79
80 shared = (int)(get_lppaca()->shared_proc); 80 shared = (int)(local_paca->lppaca_ptr->shared_proc);
81 81
82 seq_printf(m, "system_active_processors=%d\n", 82 seq_printf(m, "system_active_processors=%d\n",
83 (int)HvLpConfig_getSystemPhysicalProcessors()); 83 (int)HvLpConfig_getSystemPhysicalProcessors());
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
deleted file mode 100644
index af11285ffbd1..000000000000
--- a/arch/powerpc/kernel/lparmap.c
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (C) 2005 Stephen Rothwell IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <asm/mmu.h>
10#include <asm/pgtable.h>
11#include <asm/iseries/lpar_map.h>
12
13/* The # is to stop gcc trying to make .text nonexecutable */
14const struct LparMap __attribute__((__section__(".text #"))) xLparMap = {
15 .xNumberEsids = HvEsidsToMap,
16 .xNumberRanges = HvRangesToMap,
17 .xSegmentTableOffs = STAB0_PAGE,
18
19 .xEsids = {
20 { .xKernelEsid = GET_ESID(PAGE_OFFSET),
21 .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
22 { .xKernelEsid = GET_ESID(VMALLOC_START),
23 .xKernelVsid = KERNEL_VSID(VMALLOC_START), },
24 },
25
26 .xRanges = {
27 { .xPages = HvPagesToMap,
28 .xOffset = 0,
29 .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
30 },
31 },
32};
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index f9676f52c6d8..0ed31f220482 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -34,21 +34,10 @@
34 34
35#undef DEBUG_NVRAM 35#undef DEBUG_NVRAM
36 36
37static int nvram_scan_partitions(void);
38static int nvram_setup_partition(void);
39static int nvram_create_os_partition(void);
40static int nvram_remove_os_partition(void);
41
42static struct nvram_partition * nvram_part; 37static struct nvram_partition * nvram_part;
43static long nvram_error_log_index = -1; 38static long nvram_error_log_index = -1;
44static long nvram_error_log_size = 0; 39static long nvram_error_log_size = 0;
45 40
46int no_logging = 1; /* Until we initialize everything,
47 * make sure we don't try logging
48 * anything */
49
50extern volatile int error_log_cnt;
51
52struct err_log_info { 41struct err_log_info {
53 int error_type; 42 int error_type;
54 unsigned int seq_num; 43 unsigned int seq_num;
@@ -636,16 +625,13 @@ void __exit nvram_cleanup(void)
636 * sequence #: The unique sequence # for each event. (until it wraps) 625 * sequence #: The unique sequence # for each event. (until it wraps)
637 * error log: The error log from event_scan 626 * error log: The error log from event_scan
638 */ 627 */
639int nvram_write_error_log(char * buff, int length, unsigned int err_type) 628int nvram_write_error_log(char * buff, int length,
629 unsigned int err_type, unsigned int error_log_cnt)
640{ 630{
641 int rc; 631 int rc;
642 loff_t tmp_index; 632 loff_t tmp_index;
643 struct err_log_info info; 633 struct err_log_info info;
644 634
645 if (no_logging) {
646 return -EPERM;
647 }
648
649 if (nvram_error_log_index == -1) { 635 if (nvram_error_log_index == -1) {
650 return -ESPIPE; 636 return -ESPIPE;
651 } 637 }
@@ -678,7 +664,8 @@ int nvram_write_error_log(char * buff, int length, unsigned int err_type)
678 * 664 *
679 * Reads nvram for error log for at most 'length' 665 * Reads nvram for error log for at most 'length'
680 */ 666 */
681int nvram_read_error_log(char * buff, int length, unsigned int * err_type) 667int nvram_read_error_log(char * buff, int length,
668 unsigned int * err_type, unsigned int * error_log_cnt)
682{ 669{
683 int rc; 670 int rc;
684 loff_t tmp_index; 671 loff_t tmp_index;
@@ -704,7 +691,7 @@ int nvram_read_error_log(char * buff, int length, unsigned int * err_type)
704 return rc; 691 return rc;
705 } 692 }
706 693
707 error_log_cnt = info.seq_num; 694 *error_log_cnt = info.seq_num;
708 *err_type = info.error_type; 695 *err_type = info.error_type;
709 696
710 return 0; 697 return 0;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index f70e787d556f..eca8ccc3fa12 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -19,11 +19,11 @@
19#include <linux/mod_devicetable.h> 19#include <linux/mod_devicetable.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/of_device.h>
23#include <linux/of_platform.h>
22 24
23#include <asm/errno.h> 25#include <asm/errno.h>
24#include <asm/dcr.h> 26#include <asm/dcr.h>
25#include <asm/of_device.h>
26#include <asm/of_platform.h>
27#include <asm/topology.h> 27#include <asm/topology.h>
28#include <asm/pci-bridge.h> 28#include <asm/pci-bridge.h>
29#include <asm/ppc-pci.h> 29#include <asm/ppc-pci.h>
@@ -70,7 +70,10 @@ postcore_initcall(of_bus_driver_init);
70int of_register_platform_driver(struct of_platform_driver *drv) 70int of_register_platform_driver(struct of_platform_driver *drv)
71{ 71{
72 /* initialize common driver fields */ 72 /* initialize common driver fields */
73 drv->driver.name = drv->name; 73 if (!drv->driver.name)
74 drv->driver.name = drv->name;
75 if (!drv->driver.owner)
76 drv->driver.owner = drv->owner;
74 drv->driver.bus = &of_platform_bus_type; 77 drv->driver.bus = &of_platform_bus_type;
75 78
76 /* register with core */ 79 /* register with core */
@@ -385,9 +388,11 @@ static struct of_device_id of_pci_phb_ids[] = {
385}; 388};
386 389
387static struct of_platform_driver of_pci_phb_driver = { 390static struct of_platform_driver of_pci_phb_driver = {
388 .name = "of-pci", 391 .match_table = of_pci_phb_ids,
389 .match_table = of_pci_phb_ids, 392 .probe = of_pci_phb_probe,
390 .probe = of_pci_phb_probe, 393 .driver = {
394 .name = "of-pci",
395 },
391}; 396};
392 397
393static __init int of_pci_phb_init(void) 398static __init int of_pci_phb_init(void)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 083cfbdbe0b2..2ae3b6f778a3 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -65,14 +65,11 @@ static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
65 spin_unlock(&hose_spinlock); 65 spin_unlock(&hose_spinlock);
66} 66}
67 67
68__init_refok struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 68struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
69{ 69{
70 struct pci_controller *phb; 70 struct pci_controller *phb;
71 71
72 if (mem_init_done) 72 phb = alloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
73 phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
74 else
75 phb = alloc_bootmem(sizeof (struct pci_controller));
76 if (phb == NULL) 73 if (phb == NULL)
77 return NULL; 74 return NULL;
78 pci_setup_pci_controller(phb); 75 pci_setup_pci_controller(phb);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 04a3109ae3c6..0e2bee46304c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -1457,8 +1457,8 @@ null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1457 1457
1458static struct pci_ops null_pci_ops = 1458static struct pci_ops null_pci_ops =
1459{ 1459{
1460 null_read_config, 1460 .read = null_read_config,
1461 null_write_config 1461 .write = null_write_config,
1462}; 1462};
1463 1463
1464/* 1464/*
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 291ffbc360c9..9f63bdcb0bdf 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -588,7 +588,7 @@ int pci_proc_domain(struct pci_bus *bus)
588 return 0; 588 return 0;
589 else { 589 else {
590 struct pci_controller *hose = pci_bus_to_host(bus); 590 struct pci_controller *hose = pci_bus_to_host(bus);
591 return hose->buid; 591 return hose->buid != 0;
592 } 592 }
593} 593}
594 594
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d7d36df9c053..b4839038613d 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -23,8 +23,6 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/bootmem.h>
28 26
29#include <asm/io.h> 27#include <asm/io.h>
30#include <asm/prom.h> 28#include <asm/prom.h>
@@ -45,10 +43,7 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
45 const u32 *regs; 43 const u32 *regs;
46 struct pci_dn *pdn; 44 struct pci_dn *pdn;
47 45
48 if (mem_init_done) 46 pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
49 pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
50 else
51 pdn = alloc_bootmem(sizeof(*pdn));
52 if (pdn == NULL) 47 if (pdn == NULL)
53 return NULL; 48 return NULL;
54 memset(pdn, 0, sizeof(*pdn)); 49 memset(pdn, 0, sizeof(*pdn));
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index a20f1951a5ce..c6b1aa3efbb9 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -12,12 +12,12 @@
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/ide.h>
16#include <linux/bitops.h> 15#include <linux/bitops.h>
17 16
18#include <asm/page.h> 17#include <asm/page.h>
19#include <asm/semaphore.h> 18#include <asm/semaphore.h>
20#include <asm/processor.h> 19#include <asm/processor.h>
20#include <asm/cacheflush.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/atomic.h> 23#include <asm/atomic.h>
@@ -95,10 +95,6 @@ EXPORT_SYMBOL(__strnlen_user);
95EXPORT_SYMBOL(copy_4K_page); 95EXPORT_SYMBOL(copy_4K_page);
96#endif 96#endif
97 97
98#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
99EXPORT_SYMBOL(ppc_ide_md);
100#endif
101
102#if defined(CONFIG_PCI) && defined(CONFIG_PPC32) 98#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
103EXPORT_SYMBOL(isa_io_base); 99EXPORT_SYMBOL(isa_io_base);
104EXPORT_SYMBOL(isa_mem_base); 100EXPORT_SYMBOL(isa_mem_base);
@@ -180,7 +176,7 @@ EXPORT_SYMBOL(cacheable_memcpy);
180EXPORT_SYMBOL(cpm_install_handler); 176EXPORT_SYMBOL(cpm_install_handler);
181EXPORT_SYMBOL(cpm_free_handler); 177EXPORT_SYMBOL(cpm_free_handler);
182#endif /* CONFIG_8xx */ 178#endif /* CONFIG_8xx */
183#if defined(CONFIG_8xx) || defined(CONFIG_40x) 179#if defined(CONFIG_8xx)
184EXPORT_SYMBOL(__res); 180EXPORT_SYMBOL(__res);
185#endif 181#endif
186 182
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8a1b001d0b11..7949c203cb89 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -354,6 +354,14 @@ static void show_instructions(struct pt_regs *regs)
354 if (!(i % 8)) 354 if (!(i % 8))
355 printk("\n"); 355 printk("\n");
356 356
357#if !defined(CONFIG_BOOKE)
358 /* If executing with the IMMU off, adjust pc rather
359 * than print XXXXXXXX.
360 */
361 if (!(regs->msr & MSR_IR))
362 pc = (unsigned long)phys_to_virt(pc);
363#endif
364
357 /* We use __get_user here *only* to avoid an OOPS on a 365 /* We use __get_user here *only* to avoid an OOPS on a
358 * bad address because the pc *should* only be a 366 * bad address because the pc *should* only be a
359 * kernel address. 367 * kernel address.
@@ -556,10 +564,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
556 564
557#ifdef CONFIG_PPC64 565#ifdef CONFIG_PPC64
558 if (cpu_has_feature(CPU_FTR_SLB)) { 566 if (cpu_has_feature(CPU_FTR_SLB)) {
559 unsigned long sp_vsid = get_kernel_vsid(sp); 567 unsigned long sp_vsid;
560 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 568 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
561 569
562 sp_vsid <<= SLB_VSID_SHIFT; 570 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
571 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
572 << SLB_VSID_SHIFT_1T;
573 else
574 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
575 << SLB_VSID_SHIFT;
563 sp_vsid |= SLB_VSID_KERNEL | llp; 576 sp_vsid |= SLB_VSID_KERNEL | llp;
564 p->thread.ksp_vsid = sp_vsid; 577 p->thread.ksp_vsid = sp_vsid;
565 } 578 }
@@ -676,9 +689,13 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
676 * mode (asyn, precise, disabled) for 'Classic' FP. */ 689 * mode (asyn, precise, disabled) for 'Classic' FP. */
677 if (val & PR_FP_EXC_SW_ENABLE) { 690 if (val & PR_FP_EXC_SW_ENABLE) {
678#ifdef CONFIG_SPE 691#ifdef CONFIG_SPE
679 tsk->thread.fpexc_mode = val & 692 if (cpu_has_feature(CPU_FTR_SPE)) {
680 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 693 tsk->thread.fpexc_mode = val &
681 return 0; 694 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
695 return 0;
696 } else {
697 return -EINVAL;
698 }
682#else 699#else
683 return -EINVAL; 700 return -EINVAL;
684#endif 701#endif
@@ -704,7 +721,10 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
704 721
705 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 722 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
706#ifdef CONFIG_SPE 723#ifdef CONFIG_SPE
707 val = tsk->thread.fpexc_mode; 724 if (cpu_has_feature(CPU_FTR_SPE))
725 val = tsk->thread.fpexc_mode;
726 else
727 return -EINVAL;
708#else 728#else
709 return -EINVAL; 729 return -EINVAL;
710#endif 730#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index a38197b12d3e..9f329a8928ea 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -52,7 +52,6 @@
52#include <asm/pSeries_reconfig.h> 52#include <asm/pSeries_reconfig.h>
53#include <asm/pci-bridge.h> 53#include <asm/pci-bridge.h>
54#include <asm/kexec.h> 54#include <asm/kexec.h>
55#include <asm/system.h>
56 55
57#ifdef DEBUG 56#ifdef DEBUG
58#define DBG(fmt...) printk(KERN_ERR fmt) 57#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -431,9 +430,11 @@ static int __init early_parse_mem(char *p)
431} 430}
432early_param("mem", early_parse_mem); 431early_param("mem", early_parse_mem);
433 432
434/* 433/**
435 * The device tree may be allocated below our memory limit, or inside the 434 * move_device_tree - move tree to an unused area, if needed.
436 * crash kernel region for kdump. If so, move it out now. 435 *
436 * The device tree may be allocated beyond our memory limit, or inside the
437 * crash kernel region for kdump. If so, move it out of the way.
437 */ 438 */
438static void move_device_tree(void) 439static void move_device_tree(void)
439{ 440{
@@ -530,10 +531,7 @@ static struct ibm_pa_feature {
530 {CPU_FTR_CTRL, 0, 0, 3, 0}, 531 {CPU_FTR_CTRL, 0, 0, 3, 0},
531 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 532 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
532 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 533 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
533#if 0
534 /* put this back once we know how to test if firmware does 64k IO */
535 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 534 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
536#endif
537 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 535 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
538}; 536};
539 537
@@ -780,13 +778,13 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
780#endif 778#endif
781 779
782#ifdef CONFIG_KEXEC 780#ifdef CONFIG_KEXEC
783 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 781 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
784 if (lprop) 782 if (lprop)
785 crashk_res.start = *lprop; 783 crashk_res.start = *lprop;
786 784
787 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 785 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
788 if (lprop) 786 if (lprop)
789 crashk_res.end = crashk_res.start + *lprop - 1; 787 crashk_res.end = crashk_res.start + *lprop - 1;
790#endif 788#endif
791 789
792 early_init_dt_check_for_initrd(node); 790 early_init_dt_check_for_initrd(node);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index a1d582e38627..1db10f70ae69 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1199,7 +1199,7 @@ static void __init prom_initialize_tce_table(void)
1199 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL)) 1199 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1200 continue; 1200 continue;
1201 1201
1202 /* Keep the old logic in tack to avoid regression. */ 1202 /* Keep the old logic intact to avoid regression. */
1203 if (compatible[0] != 0) { 1203 if (compatible[0] != 0) {
1204 if ((strstr(compatible, RELOC("python")) == NULL) && 1204 if ((strstr(compatible, RELOC("python")) == NULL) &&
1205 (strstr(compatible, RELOC("Speedwagon")) == NULL) && 1205 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
@@ -2046,6 +2046,7 @@ static void __init fixup_device_tree_maple(void)
2046/* 2046/*
2047 * Pegasos and BriQ lacks the "ranges" property in the isa node 2047 * Pegasos and BriQ lacks the "ranges" property in the isa node
2048 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2048 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2049 * Pegasos has the IDE configured in legacy mode, but advertised as native
2049 */ 2050 */
2050static void __init fixup_device_tree_chrp(void) 2051static void __init fixup_device_tree_chrp(void)
2051{ 2052{
@@ -2083,9 +2084,13 @@ static void __init fixup_device_tree_chrp(void)
2083 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2084 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2084 prop[0] = 14; 2085 prop[0] = 14;
2085 prop[1] = 0x0; 2086 prop[1] = 0x0;
2086 prop[2] = 15; 2087 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2087 prop[3] = 0x0; 2088 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2088 prom_setprop(ph, name, "interrupts", prop, 4*sizeof(u32)); 2089 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2090 if (rc == sizeof(u32)) {
2091 prop[0] &= ~0x5;
2092 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2093 }
2089 } 2094 }
2090} 2095}
2091#else 2096#else
@@ -2226,7 +2231,7 @@ static void __init fixup_device_tree(void)
2226 2231
2227static void __init prom_find_boot_cpu(void) 2232static void __init prom_find_boot_cpu(void)
2228{ 2233{
2229 struct prom_t *_prom = &RELOC(prom); 2234 struct prom_t *_prom = &RELOC(prom);
2230 u32 getprop_rval; 2235 u32 getprop_rval;
2231 ihandle prom_cpu; 2236 ihandle prom_cpu;
2232 phandle cpu_pkg; 2237 phandle cpu_pkg;
@@ -2246,7 +2251,7 @@ static void __init prom_find_boot_cpu(void)
2246static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 2251static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2247{ 2252{
2248#ifdef CONFIG_BLK_DEV_INITRD 2253#ifdef CONFIG_BLK_DEV_INITRD
2249 struct prom_t *_prom = &RELOC(prom); 2254 struct prom_t *_prom = &RELOC(prom);
2250 2255
2251 if (r3 && r4 && r4 != 0xdeadbeef) { 2256 if (r3 && r4 && r4 != 0xdeadbeef) {
2252 unsigned long val; 2257 unsigned long val;
@@ -2279,7 +2284,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2279 unsigned long pp, 2284 unsigned long pp,
2280 unsigned long r6, unsigned long r7) 2285 unsigned long r6, unsigned long r7)
2281{ 2286{
2282 struct prom_t *_prom; 2287 struct prom_t *_prom;
2283 unsigned long hdr; 2288 unsigned long hdr;
2284 unsigned long offset = reloc_offset(); 2289 unsigned long offset = reloc_offset();
2285 2290
@@ -2338,8 +2343,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2338 /* 2343 /*
2339 * Copy the CPU hold code 2344 * Copy the CPU hold code
2340 */ 2345 */
2341 if (RELOC(of_platform) != PLATFORM_POWERMAC) 2346 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2342 copy_and_flush(0, KERNELBASE + offset, 0x100, 0); 2347 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2343 2348
2344 /* 2349 /*
2345 * Do early parsing of command line 2350 * Do early parsing of command line
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8a177bd9eab4..cf7732cdd6c7 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -331,6 +331,7 @@ static long arch_ptrace_old(struct task_struct *child, long request, long addr,
331 unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; 331 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
332 unsigned long __user *tmp = (unsigned long __user *)addr; 332 unsigned long __user *tmp = (unsigned long __user *)addr;
333 333
334 CHECK_FULL_REGS(child->thread.regs);
334 for (i = 0; i < 32; i++) { 335 for (i = 0; i < 32; i++) {
335 ret = put_user(*reg, tmp); 336 ret = put_user(*reg, tmp);
336 if (ret) 337 if (ret)
@@ -346,6 +347,7 @@ static long arch_ptrace_old(struct task_struct *child, long request, long addr,
346 unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; 347 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
347 unsigned long __user *tmp = (unsigned long __user *)addr; 348 unsigned long __user *tmp = (unsigned long __user *)addr;
348 349
350 CHECK_FULL_REGS(child->thread.regs);
349 for (i = 0; i < 32; i++) { 351 for (i = 0; i < 32; i++) {
350 ret = get_user(*reg, tmp); 352 ret = get_user(*reg, tmp);
351 if (ret) 353 if (ret)
@@ -517,6 +519,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
517 ret = -EIO; 519 ret = -EIO;
518 break; 520 break;
519 } 521 }
522 CHECK_FULL_REGS(child->thread.regs);
520 ret = 0; 523 ret = 0;
521 for (ui = 0; ui < PT_REGS_COUNT; ui ++) { 524 for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
522 ret |= __put_user(ptrace_get_reg(child, ui), 525 ret |= __put_user(ptrace_get_reg(child, ui),
@@ -537,6 +540,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
537 ret = -EIO; 540 ret = -EIO;
538 break; 541 break;
539 } 542 }
543 CHECK_FULL_REGS(child->thread.regs);
540 ret = 0; 544 ret = 0;
541 for (ui = 0; ui < PT_REGS_COUNT; ui ++) { 545 for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
542 ret = __get_user(tmp, (unsigned long __user *) data); 546 ret = __get_user(tmp, (unsigned long __user *) data);
@@ -576,8 +580,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
576#ifdef CONFIG_SPE 580#ifdef CONFIG_SPE
577 case PTRACE_GETEVRREGS: 581 case PTRACE_GETEVRREGS:
578 /* Get the child spe register state. */ 582 /* Get the child spe register state. */
579 if (child->thread.regs->msr & MSR_SPE) 583 flush_spe_to_thread(child);
580 giveup_spe(child);
581 ret = get_evrregs((unsigned long __user *)data, child); 584 ret = get_evrregs((unsigned long __user *)data, child);
582 break; 585 break;
583 586
@@ -585,8 +588,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
585 /* Set the child spe register state. */ 588 /* Set the child spe register state. */
586 /* this is to clear the MSR_SPE bit to force a reload 589 /* this is to clear the MSR_SPE bit to force a reload
587 * of register state from memory */ 590 * of register state from memory */
588 if (child->thread.regs->msr & MSR_SPE) 591 flush_spe_to_thread(child);
589 giveup_spe(child);
590 ret = set_evrregs(child, (unsigned long __user *)data); 592 ret = set_evrregs(child, (unsigned long __user *)data);
591 break; 593 break;
592#endif 594#endif
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 9e6baeac0fb1..fea6206ff90f 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -53,6 +53,7 @@ static long compat_ptrace_old(struct task_struct *child, long request,
53 unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; 53 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
54 unsigned int __user *tmp = (unsigned int __user *)addr; 54 unsigned int __user *tmp = (unsigned int __user *)addr;
55 55
56 CHECK_FULL_REGS(child->thread.regs);
56 for (i = 0; i < 32; i++) { 57 for (i = 0; i < 32; i++) {
57 ret = put_user(*reg, tmp); 58 ret = put_user(*reg, tmp);
58 if (ret) 59 if (ret)
@@ -68,6 +69,7 @@ static long compat_ptrace_old(struct task_struct *child, long request,
68 unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; 69 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
69 unsigned int __user *tmp = (unsigned int __user *)addr; 70 unsigned int __user *tmp = (unsigned int __user *)addr;
70 71
72 CHECK_FULL_REGS(child->thread.regs);
71 for (i = 0; i < 32; i++) { 73 for (i = 0; i < 32; i++) {
72 ret = get_user(*reg, tmp); 74 ret = get_user(*reg, tmp);
73 if (ret) 75 if (ret)
@@ -164,6 +166,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
164 if ((addr & 3) || (index > PT_FPSCR32)) 166 if ((addr & 3) || (index > PT_FPSCR32))
165 break; 167 break;
166 168
169 CHECK_FULL_REGS(child->thread.regs);
167 if (index < PT_FPR0) { 170 if (index < PT_FPR0) {
168 tmp = ptrace_get_reg(child, index); 171 tmp = ptrace_get_reg(child, index);
169 } else { 172 } else {
@@ -210,6 +213,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
210 if ((addr & 3) || numReg > PT_FPSCR) 213 if ((addr & 3) || numReg > PT_FPSCR)
211 break; 214 break;
212 215
216 CHECK_FULL_REGS(child->thread.regs);
213 if (numReg >= PT_FPR0) { 217 if (numReg >= PT_FPR0) {
214 flush_fp_to_thread(child); 218 flush_fp_to_thread(child);
215 tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0]; 219 tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
@@ -270,6 +274,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
270 if ((addr & 3) || (index > PT_FPSCR32)) 274 if ((addr & 3) || (index > PT_FPSCR32))
271 break; 275 break;
272 276
277 CHECK_FULL_REGS(child->thread.regs);
273 if (index < PT_FPR0) { 278 if (index < PT_FPR0) {
274 ret = ptrace_put_reg(child, index, data); 279 ret = ptrace_put_reg(child, index, data);
275 } else { 280 } else {
@@ -307,6 +312,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
307 */ 312 */
308 if ((addr & 3) || (numReg > PT_FPSCR)) 313 if ((addr & 3) || (numReg > PT_FPSCR))
309 break; 314 break;
315 CHECK_FULL_REGS(child->thread.regs);
310 if (numReg < PT_FPR0) { 316 if (numReg < PT_FPR0) {
311 unsigned long freg = ptrace_get_reg(child, numReg); 317 unsigned long freg = ptrace_get_reg(child, numReg);
312 if (index % 2) 318 if (index % 2)
@@ -342,6 +348,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
342 ret = -EIO; 348 ret = -EIO;
343 break; 349 break;
344 } 350 }
351 CHECK_FULL_REGS(child->thread.regs);
345 ret = 0; 352 ret = 0;
346 for (ui = 0; ui < PT_REGS_COUNT; ui ++) { 353 for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
347 ret |= __put_user(ptrace_get_reg(child, ui), 354 ret |= __put_user(ptrace_get_reg(child, ui),
@@ -359,6 +366,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
359 ret = -EIO; 366 ret = -EIO;
360 break; 367 break;
361 } 368 }
369 CHECK_FULL_REGS(child->thread.regs);
362 ret = 0; 370 ret = 0;
363 for (ui = 0; ui < PT_REGS_COUNT; ui ++) { 371 for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
364 ret = __get_user(tmp, (unsigned int __user *) data); 372 ret = __get_user(tmp, (unsigned int __user *) data);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index a5de6211b97a..21f14e57d1f3 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -171,8 +171,8 @@ static int rtas_pci_write_config(struct pci_bus *bus,
171} 171}
172 172
173struct pci_ops rtas_pci_ops = { 173struct pci_ops rtas_pci_ops = {
174 rtas_pci_read_config, 174 .read = rtas_pci_read_config,
175 rtas_pci_write_config 175 .write = rtas_pci_write_config,
176}; 176};
177 177
178int is_python(struct device_node *dev) 178int is_python(struct device_node *dev)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 50ef38cffdbf..36c90ba2d312 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -76,6 +76,8 @@ EXPORT_SYMBOL(machine_id);
76 76
77unsigned long klimit = (unsigned long) _end; 77unsigned long klimit = (unsigned long) _end;
78 78
79char cmd_line[COMMAND_LINE_SIZE];
80
79/* 81/*
80 * This still seems to be needed... -- paulus 82 * This still seems to be needed... -- paulus
81 */ 83 */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 7ec6ba56d83d..cd870a823d18 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -10,7 +10,9 @@
10#include <linux/reboot.h> 10#include <linux/reboot.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/initrd.h> 12#include <linux/initrd.h>
13#if defined(CONFIG_IDE) || defined(CONFIG_IDE_MODULE)
13#include <linux/ide.h> 14#include <linux/ide.h>
15#endif
14#include <linux/tty.h> 16#include <linux/tty.h>
15#include <linux/bootmem.h> 17#include <linux/bootmem.h>
16#include <linux/seq_file.h> 18#include <linux/seq_file.h>
@@ -18,13 +20,11 @@
18#include <linux/cpu.h> 20#include <linux/cpu.h>
19#include <linux/console.h> 21#include <linux/console.h>
20 22
21#include <asm/residual.h>
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/pgtable.h> 26#include <asm/pgtable.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/amigappc.h>
28#include <asm/smp.h> 28#include <asm/smp.h>
29#include <asm/elf.h> 29#include <asm/elf.h>
30#include <asm/cputable.h> 30#include <asm/cputable.h>
@@ -51,7 +51,10 @@
51 51
52extern void bootx_init(unsigned long r4, unsigned long phys); 52extern void bootx_init(unsigned long r4, unsigned long phys);
53 53
54#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
54struct ide_machdep_calls ppc_ide_md; 55struct ide_machdep_calls ppc_ide_md;
56EXPORT_SYMBOL(ppc_ide_md);
57#endif
55 58
56int boot_cpuid; 59int boot_cpuid;
57EXPORT_SYMBOL_GPL(boot_cpuid); 60EXPORT_SYMBOL_GPL(boot_cpuid);
@@ -287,7 +290,8 @@ void __init setup_arch(char **cmdline_p)
287 conswitchp = &dummy_con; 290 conswitchp = &dummy_con;
288#endif 291#endif
289 292
290 ppc_md.setup_arch(); 293 if (ppc_md.setup_arch)
294 ppc_md.setup_arch();
291 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 295 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
292 296
293 paging_init(); 297 paging_init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6018178708a5..008ab6823b02 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -181,9 +181,9 @@ void __init early_setup(unsigned long dt_ptr)
181 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); 181 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
182 182
183 /* 183 /*
184 * Do early initializations using the flattened device 184 * Do early initialization using the flattened device
185 * tree, like retreiving the physical memory map or 185 * tree, such as retrieving the physical memory map or
186 * calculating/retreiving the hash table size 186 * calculating/retrieving the hash table size.
187 */ 187 */
188 early_init_devtree(__va(dt_ptr)); 188 early_init_devtree(__va(dt_ptr));
189 189
@@ -530,7 +530,8 @@ void __init setup_arch(char **cmdline_p)
530 conswitchp = &dummy_con; 530 conswitchp = &dummy_con;
531#endif 531#endif
532 532
533 ppc_md.setup_arch(); 533 if (ppc_md.setup_arch)
534 ppc_md.setup_arch();
534 535
535 paging_init(); 536 paging_init();
536 ppc64_boot_msg(0x15, "Setup Done"); 537 ppc64_boot_msg(0x15, "Setup Done");
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index c434d6c4e4e6..a65a44fbe523 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -16,6 +16,12 @@
16 16
17#include "signal.h" 17#include "signal.h"
18 18
19/* Log an error when sending an unhandled signal to a process. Controlled
20 * through debug.exception-trace sysctl.
21 */
22
23int show_unhandled_signals = 0;
24
19/* 25/*
20 * Allocate space for the signal frame 26 * Allocate space for the signal frame
21 */ 27 */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 590057e9e987..6126bca8b70a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -705,11 +705,13 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
705{ 705{
706 struct rt_sigframe __user *rt_sf; 706 struct rt_sigframe __user *rt_sf;
707 struct mcontext __user *frame; 707 struct mcontext __user *frame;
708 void __user *addr;
708 unsigned long newsp = 0; 709 unsigned long newsp = 0;
709 710
710 /* Set up Signal Frame */ 711 /* Set up Signal Frame */
711 /* Put a Real Time Context onto stack */ 712 /* Put a Real Time Context onto stack */
712 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf)); 713 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
714 addr = rt_sf;
713 if (unlikely(rt_sf == NULL)) 715 if (unlikely(rt_sf == NULL))
714 goto badframe; 716 goto badframe;
715 717
@@ -728,6 +730,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
728 730
729 /* Save user registers on the stack */ 731 /* Save user registers on the stack */
730 frame = &rt_sf->uc.uc_mcontext; 732 frame = &rt_sf->uc.uc_mcontext;
733 addr = frame;
731 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { 734 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
732 if (save_user_regs(regs, frame, 0)) 735 if (save_user_regs(regs, frame, 0))
733 goto badframe; 736 goto badframe;
@@ -742,6 +745,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
742 745
743 /* create a stack frame for the caller of the handler */ 746 /* create a stack frame for the caller of the handler */
744 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); 747 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
748 addr = (void __user *)regs->gpr[1];
745 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 749 if (put_user(regs->gpr[1], (u32 __user *)newsp))
746 goto badframe; 750 goto badframe;
747 751
@@ -762,6 +766,12 @@ badframe:
762 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", 766 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
763 regs, frame, newsp); 767 regs, frame, newsp);
764#endif 768#endif
769 if (show_unhandled_signals && printk_ratelimit())
770 printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
771 "%p nip %08lx lr %08lx\n",
772 current->comm, current->pid,
773 addr, regs->nip, regs->link);
774
765 force_sigsegv(sig, current); 775 force_sigsegv(sig, current);
766 return 0; 776 return 0;
767} 777}
@@ -886,6 +896,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
886 return 0; 896 return 0;
887 897
888 bad: 898 bad:
899 if (show_unhandled_signals && printk_ratelimit())
900 printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
901 "%p nip %08lx lr %08lx\n",
902 current->comm, current->pid,
903 rt_sf, regs->nip, regs->link);
904
889 force_sig(SIGSEGV, current); 905 force_sig(SIGSEGV, current);
890 return 0; 906 return 0;
891} 907}
@@ -967,6 +983,13 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
967 * We kill the task with a SIGSEGV in this situation. 983 * We kill the task with a SIGSEGV in this situation.
968 */ 984 */
969 if (do_setcontext(ctx, regs, 1)) { 985 if (do_setcontext(ctx, regs, 1)) {
986 if (show_unhandled_signals && printk_ratelimit())
987 printk(KERN_INFO "%s[%d]: bad frame in "
988 "sys_debug_setcontext: %p nip %08lx "
989 "lr %08lx\n",
990 current->comm, current->pid,
991 ctx, regs->nip, regs->link);
992
970 force_sig(SIGSEGV, current); 993 force_sig(SIGSEGV, current);
971 goto out; 994 goto out;
972 } 995 }
@@ -1048,6 +1071,12 @@ badframe:
1048 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", 1071 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1049 regs, frame, newsp); 1072 regs, frame, newsp);
1050#endif 1073#endif
1074 if (show_unhandled_signals && printk_ratelimit())
1075 printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
1076 "%p nip %08lx lr %08lx\n",
1077 current->comm, current->pid,
1078 frame, regs->nip, regs->link);
1079
1051 force_sigsegv(sig, current); 1080 force_sigsegv(sig, current);
1052 return 0; 1081 return 0;
1053} 1082}
@@ -1061,12 +1090,14 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1061 struct sigcontext __user *sc; 1090 struct sigcontext __user *sc;
1062 struct sigcontext sigctx; 1091 struct sigcontext sigctx;
1063 struct mcontext __user *sr; 1092 struct mcontext __user *sr;
1093 void __user *addr;
1064 sigset_t set; 1094 sigset_t set;
1065 1095
1066 /* Always make any pending restarted system calls return -EINTR */ 1096 /* Always make any pending restarted system calls return -EINTR */
1067 current_thread_info()->restart_block.fn = do_no_restart_syscall; 1097 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1068 1098
1069 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); 1099 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1100 addr = sc;
1070 if (copy_from_user(&sigctx, sc, sizeof(sigctx))) 1101 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1071 goto badframe; 1102 goto badframe;
1072 1103
@@ -1083,6 +1114,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1083 restore_sigmask(&set); 1114 restore_sigmask(&set);
1084 1115
1085 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); 1116 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1117 addr = sr;
1086 if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) 1118 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1087 || restore_user_regs(regs, sr, 1)) 1119 || restore_user_regs(regs, sr, 1))
1088 goto badframe; 1120 goto badframe;
@@ -1091,6 +1123,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1091 return 0; 1123 return 0;
1092 1124
1093badframe: 1125badframe:
1126 if (show_unhandled_signals && printk_ratelimit())
1127 printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
1128 "%p nip %08lx lr %08lx\n",
1129 current->comm, current->pid,
1130 addr, regs->nip, regs->link);
1131
1094 force_sig(SIGSEGV, current); 1132 force_sig(SIGSEGV, current);
1095 return 0; 1133 return 0;
1096} 1134}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index de895e6d8c62..faeb8f207ea4 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -64,6 +64,11 @@ struct rt_sigframe {
64 char abigap[288]; 64 char abigap[288];
65} __attribute__ ((aligned (16))); 65} __attribute__ ((aligned (16)));
66 66
67static const char fmt32[] = KERN_INFO \
68 "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n";
69static const char fmt64[] = KERN_INFO \
70 "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n";
71
67/* 72/*
68 * Set up the sigcontext for the signal frame. 73 * Set up the sigcontext for the signal frame.
69 */ 74 */
@@ -315,6 +320,11 @@ badframe:
315 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", 320 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
316 regs, uc, &uc->uc_mcontext); 321 regs, uc, &uc->uc_mcontext);
317#endif 322#endif
323 if (show_unhandled_signals && printk_ratelimit())
324 printk(regs->msr & MSR_SF ? fmt64 : fmt32,
325 current->comm, current->pid, "rt_sigreturn",
326 (long)uc, regs->nip, regs->link);
327
318 force_sig(SIGSEGV, current); 328 force_sig(SIGSEGV, current);
319 return 0; 329 return 0;
320} 330}
@@ -398,6 +408,11 @@ badframe:
398 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", 408 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
399 regs, frame, newsp); 409 regs, frame, newsp);
400#endif 410#endif
411 if (show_unhandled_signals && printk_ratelimit())
412 printk(regs->msr & MSR_SF ? fmt64 : fmt32,
413 current->comm, current->pid, "setup_rt_frame",
414 (long)frame, regs->nip, regs->link);
415
401 force_sigsegv(signr, current); 416 force_sigsegv(signr, current);
402 return 0; 417 return 0;
403} 418}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1ea43160f543..d30f08fa0297 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -152,11 +152,6 @@ static void stop_this_cpu(void *dummy)
152 ; 152 ;
153} 153}
154 154
155void smp_send_stop(void)
156{
157 smp_call_function(stop_this_cpu, NULL, 1, 0);
158}
159
160/* 155/*
161 * Structure and data for smp_call_function(). This is designed to minimise 156 * Structure and data for smp_call_function(). This is designed to minimise
162 * static memory requirements. It also looks cleaner. 157 * static memory requirements. It also looks cleaner.
@@ -198,9 +193,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
198 int cpu; 193 int cpu;
199 u64 timeout; 194 u64 timeout;
200 195
201 /* Can deadlock when called with interrupts disabled */
202 WARN_ON(irqs_disabled());
203
204 if (unlikely(smp_ops == NULL)) 196 if (unlikely(smp_ops == NULL))
205 return ret; 197 return ret;
206 198
@@ -270,10 +262,19 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
270 return ret; 262 return ret;
271} 263}
272 264
265static int __smp_call_function(void (*func)(void *info), void *info,
266 int nonatomic, int wait)
267{
268 return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
269}
270
273int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 271int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
274 int wait) 272 int wait)
275{ 273{
276 return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map); 274 /* Can deadlock when called with interrupts disabled */
275 WARN_ON(irqs_disabled());
276
277 return __smp_call_function(func, info, nonatomic, wait);
277} 278}
278EXPORT_SYMBOL(smp_call_function); 279EXPORT_SYMBOL(smp_call_function);
279 280
@@ -283,6 +284,9 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
283 cpumask_t map = CPU_MASK_NONE; 284 cpumask_t map = CPU_MASK_NONE;
284 int ret = 0; 285 int ret = 0;
285 286
287 /* Can deadlock when called with interrupts disabled */
288 WARN_ON(irqs_disabled());
289
286 if (!cpu_online(cpu)) 290 if (!cpu_online(cpu))
287 return -EINVAL; 291 return -EINVAL;
288 292
@@ -299,6 +303,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
299} 303}
300EXPORT_SYMBOL(smp_call_function_single); 304EXPORT_SYMBOL(smp_call_function_single);
301 305
306void smp_send_stop(void)
307{
308 __smp_call_function(stop_this_cpu, NULL, 1, 0);
309}
310
302void smp_call_function_interrupt(void) 311void smp_call_function_interrupt(void)
303{ 312{
304 void (*func) (void *info); 313 void (*func) (void *info);
@@ -560,6 +569,8 @@ int __devinit start_secondary(void *unused)
560 if (system_state > SYSTEM_BOOTING) 569 if (system_state > SYSTEM_BOOTING)
561 snapshot_timebase(); 570 snapshot_timebase();
562 571
572 secondary_cpu_time_init();
573
563 spin_lock(&call_lock); 574 spin_lock(&call_lock);
564 cpu_set(cpu, cpu_online_map); 575 cpu_set(cpu, cpu_online_map);
565 spin_unlock(&call_lock); 576 spin_unlock(&call_lock);
diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c
new file mode 100644
index 000000000000..67d6f6890edc
--- /dev/null
+++ b/arch/powerpc/kernel/softemu8xx.c
@@ -0,0 +1,202 @@
1/*
2 * Software emulation of some PPC instructions for the 8xx core.
3 *
4 * Copyright (C) 1998 Dan Malek (dmalek@jlc.net)
5 *
6 * Software floating emuation for the MPC8xx processor. I did this mostly
7 * because it was easier than trying to get the libraries compiled for
8 * software floating point. The goal is still to get the libraries done,
9 * but I lost patience and needed some hacks to at least get init and
10 * shells running. The first problem is the setjmp/longjmp that save
11 * and restore the floating point registers.
12 *
13 * For this emulation, our working registers are found on the register
14 * save area.
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/a.out.h>
27#include <linux/interrupt.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33
34/* Eventually we may need a look-up table, but this works for now.
35*/
36#define LFS 48
37#define LFD 50
38#define LFDU 51
39#define STFD 54
40#define STFDU 55
41#define FMR 63
42
43void print_8xx_pte(struct mm_struct *mm, unsigned long addr)
44{
45 pgd_t *pgd;
46 pmd_t *pmd;
47 pte_t *pte;
48
49 printk(" pte @ 0x%8lx: ", addr);
50 pgd = pgd_offset(mm, addr & PAGE_MASK);
51 if (pgd) {
52 pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK),
53 addr & PAGE_MASK);
54 if (pmd && pmd_present(*pmd)) {
55 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
56 if (pte) {
57 printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n",
58 (long)pgd, (long)pte, (long)pte_val(*pte));
59#define pp ((long)pte_val(*pte))
60 printk(" RPN: %05lx PP: %lx SPS: %lx SH: %lx "
61 "CI: %lx v: %lx\n",
62 pp>>12, /* rpn */
63 (pp>>10)&3, /* pp */
64 (pp>>3)&1, /* small */
65 (pp>>2)&1, /* shared */
66 (pp>>1)&1, /* cache inhibit */
67 pp&1 /* valid */
68 );
69#undef pp
70 }
71 else {
72 printk("no pte\n");
73 }
74 }
75 else {
76 printk("no pmd\n");
77 }
78 }
79 else {
80 printk("no pgd\n");
81 }
82}
83
84int get_8xx_pte(struct mm_struct *mm, unsigned long addr)
85{
86 pgd_t *pgd;
87 pmd_t *pmd;
88 pte_t *pte;
89 int retval = 0;
90
91 pgd = pgd_offset(mm, addr & PAGE_MASK);
92 if (pgd) {
93 pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK),
94 addr & PAGE_MASK);
95 if (pmd && pmd_present(*pmd)) {
96 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
97 if (pte) {
98 retval = (int)pte_val(*pte);
99 }
100 }
101 }
102 return retval;
103}
104
105/*
106 * We return 0 on success, 1 on unimplemented instruction, and EFAULT
107 * if a load/store faulted.
108 */
109int Soft_emulate_8xx(struct pt_regs *regs)
110{
111 u32 inst, instword;
112 u32 flreg, idxreg, disp;
113 int retval;
114 s16 sdisp;
115 u32 *ea, *ip;
116
117 retval = 0;
118
119 instword = *((u32 *)regs->nip);
120 inst = instword >> 26;
121
122 flreg = (instword >> 21) & 0x1f;
123 idxreg = (instword >> 16) & 0x1f;
124 disp = instword & 0xffff;
125
126 ea = (u32 *)(regs->gpr[idxreg] + disp);
127 ip = (u32 *)&current->thread.fpr[flreg];
128
129 switch ( inst )
130 {
131 case LFD:
132 /* this is a 16 bit quantity that is sign extended
133 * so use a signed short here -- Cort
134 */
135 sdisp = (instword & 0xffff);
136 ea = (u32 *)(regs->gpr[idxreg] + sdisp);
137 if (copy_from_user(ip, ea, sizeof(double)))
138 retval = -EFAULT;
139 break;
140
141 case LFDU:
142 if (copy_from_user(ip, ea, sizeof(double)))
143 retval = -EFAULT;
144 else
145 regs->gpr[idxreg] = (u32)ea;
146 break;
147 case LFS:
148 sdisp = (instword & 0xffff);
149 ea = (u32 *)(regs->gpr[idxreg] + sdisp);
150 if (copy_from_user(ip, ea, sizeof(float)))
151 retval = -EFAULT;
152 break;
153 case STFD:
154 /* this is a 16 bit quantity that is sign extended
155 * so use a signed short here -- Cort
156 */
157 sdisp = (instword & 0xffff);
158 ea = (u32 *)(regs->gpr[idxreg] + sdisp);
159 if (copy_to_user(ea, ip, sizeof(double)))
160 retval = -EFAULT;
161 break;
162
163 case STFDU:
164 if (copy_to_user(ea, ip, sizeof(double)))
165 retval = -EFAULT;
166 else
167 regs->gpr[idxreg] = (u32)ea;
168 break;
169 case FMR:
170 /* assume this is a fp move -- Cort */
171 memcpy(ip, &current->thread.fpr[(instword>>11)&0x1f],
172 sizeof(double));
173 break;
174 default:
175 retval = 1;
176 printk("Bad emulation %s/%d\n"
177 " NIP: %08lx instruction: %08x opcode: %x "
178 "A: %x B: %x C: %x code: %x rc: %x\n",
179 current->comm,current->pid,
180 regs->nip,
181 instword,inst,
182 (instword>>16)&0x1f,
183 (instword>>11)&0x1f,
184 (instword>>6)&0x1f,
185 (instword>>1)&0x3ff,
186 instword&1);
187 {
188 int pa;
189 print_8xx_pte(current->mm,regs->nip);
190 pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK;
191 pa |= (regs->nip & ~PAGE_MASK);
192 pa = (unsigned long)__va(pa);
193 printk("Kernel VA for NIP %x ", pa);
194 print_8xx_pte(current->mm,pa);
195 }
196 }
197
198 if (retval == 0)
199 regs->nip += 4;
200
201 return retval;
202}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 55d29ed4b7a0..25d9a96484dd 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -197,6 +197,36 @@ SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
197SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); 197SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
198SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); 198SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
199 199
200#ifdef CONFIG_DEBUG_KERNEL
201SYSFS_PMCSETUP(hid0, SPRN_HID0);
202SYSFS_PMCSETUP(hid1, SPRN_HID1);
203SYSFS_PMCSETUP(hid4, SPRN_HID4);
204SYSFS_PMCSETUP(hid5, SPRN_HID5);
205SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0);
206SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1);
207SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2);
208SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3);
209SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4);
210SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5);
211SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6);
212SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7);
213SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8);
214SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9);
215SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT);
216SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR);
217SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR);
218SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR);
219SYSFS_PMCSETUP(der, SPRN_PA6T_DER);
220SYSFS_PMCSETUP(mer, SPRN_PA6T_MER);
221SYSFS_PMCSETUP(ber, SPRN_PA6T_BER);
222SYSFS_PMCSETUP(ier, SPRN_PA6T_IER);
223SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER);
224SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR);
225SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0);
226SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1);
227SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2);
228SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
229#endif /* CONFIG_DEBUG_KERNEL */
200 230
201static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 231static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
202static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); 232static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
@@ -228,6 +258,36 @@ static struct sysdev_attribute pa6t_attrs[] = {
228 _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), 258 _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
229 _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), 259 _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
230 _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), 260 _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
261#ifdef CONFIG_DEBUG_KERNEL
262 _SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0),
263 _SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1),
264 _SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4),
265 _SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5),
266 _SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0),
267 _SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1),
268 _SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2),
269 _SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3),
270 _SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4),
271 _SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5),
272 _SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6),
273 _SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7),
274 _SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8),
275 _SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9),
276 _SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat),
277 _SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr),
278 _SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr),
279 _SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr),
280 _SYSDEV_ATTR(der, 0600, show_der, store_der),
281 _SYSDEV_ATTR(mer, 0600, show_mer, store_mer),
282 _SYSDEV_ATTR(ber, 0600, show_ber, store_ber),
283 _SYSDEV_ATTR(ier, 0600, show_ier, store_ier),
284 _SYSDEV_ATTR(sier, 0600, show_sier, store_sier),
285 _SYSDEV_ATTR(siar, 0600, show_siar, store_siar),
286 _SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0),
287 _SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1),
288 _SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2),
289 _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3),
290#endif /* CONFIG_DEBUG_KERNEL */
231}; 291};
232 292
233 293
@@ -380,12 +440,14 @@ int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
380{ 440{
381 int cpu; 441 int cpu;
382 struct sys_device *sysdev; 442 struct sys_device *sysdev;
443 int ret;
383 444
384 mutex_lock(&cpu_mutex); 445 mutex_lock(&cpu_mutex);
385 446
386 for_each_possible_cpu(cpu) { 447 for_each_possible_cpu(cpu) {
387 sysdev = get_cpu_sysdev(cpu); 448 sysdev = get_cpu_sysdev(cpu);
388 sysfs_create_group(&sysdev->kobj, attrs); 449 ret = sysfs_create_group(&sysdev->kobj, attrs);
450 WARN_ON(ret != 0);
389 } 451 }
390 452
391 mutex_unlock(&cpu_mutex); 453 mutex_unlock(&cpu_mutex);
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 579de70e0b4d..93219c34af32 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -39,6 +39,8 @@
39#ifdef CONFIG_PPC64 39#ifdef CONFIG_PPC64
40#define sys_sigpending sys_ni_syscall 40#define sys_sigpending sys_ni_syscall
41#define sys_old_getrlimit sys_ni_syscall 41#define sys_old_getrlimit sys_ni_syscall
42
43 .p2align 3
42#endif 44#endif
43 45
44_GLOBAL(sys_call_table) 46_GLOBAL(sys_call_table)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index c627cf86d1e3..9368da371f36 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -65,24 +65,68 @@
65#include <asm/div64.h> 65#include <asm/div64.h>
66#include <asm/smp.h> 66#include <asm/smp.h>
67#include <asm/vdso_datapage.h> 67#include <asm/vdso_datapage.h>
68#ifdef CONFIG_PPC64
69#include <asm/firmware.h> 68#include <asm/firmware.h>
70#endif
71#ifdef CONFIG_PPC_ISERIES 69#ifdef CONFIG_PPC_ISERIES
72#include <asm/iseries/it_lp_queue.h> 70#include <asm/iseries/it_lp_queue.h>
73#include <asm/iseries/hv_call_xm.h> 71#include <asm/iseries/hv_call_xm.h>
74#endif 72#endif
75#include <asm/smp.h>
76 73
77/* keep track of when we need to update the rtc */ 74/* powerpc clocksource/clockevent code */
78time_t last_rtc_update; 75
76#include <linux/clockchips.h>
77#include <linux/clocksource.h>
78
79static cycle_t rtc_read(void);
80static struct clocksource clocksource_rtc = {
81 .name = "rtc",
82 .rating = 400,
83 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
84 .mask = CLOCKSOURCE_MASK(64),
85 .shift = 22,
86 .mult = 0, /* To be filled in */
87 .read = rtc_read,
88};
89
90static cycle_t timebase_read(void);
91static struct clocksource clocksource_timebase = {
92 .name = "timebase",
93 .rating = 400,
94 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
95 .mask = CLOCKSOURCE_MASK(64),
96 .shift = 22,
97 .mult = 0, /* To be filled in */
98 .read = timebase_read,
99};
100
101#define DECREMENTER_MAX 0x7fffffff
102
103static int decrementer_set_next_event(unsigned long evt,
104 struct clock_event_device *dev);
105static void decrementer_set_mode(enum clock_event_mode mode,
106 struct clock_event_device *dev);
107
108static struct clock_event_device decrementer_clockevent = {
109 .name = "decrementer",
110 .rating = 200,
111 .shift = 16,
112 .mult = 0, /* To be filled in */
113 .irq = 0,
114 .set_next_event = decrementer_set_next_event,
115 .set_mode = decrementer_set_mode,
116 .features = CLOCK_EVT_FEAT_ONESHOT,
117};
118
119static DEFINE_PER_CPU(struct clock_event_device, decrementers);
120void init_decrementer_clockevent(void);
121static DEFINE_PER_CPU(u64, decrementer_next_tb);
122
79#ifdef CONFIG_PPC_ISERIES 123#ifdef CONFIG_PPC_ISERIES
80static unsigned long __initdata iSeries_recal_titan; 124static unsigned long __initdata iSeries_recal_titan;
81static signed long __initdata iSeries_recal_tb; 125static signed long __initdata iSeries_recal_tb;
82#endif
83 126
84/* The decrementer counts down by 128 every 128ns on a 601. */ 127/* Forward declaration is only needed for iSereis compiles */
85#define DECREMENTER_COUNT_601 (1000000000 / HZ) 128void __init clocksource_init(void);
129#endif
86 130
87#define XSEC_PER_SEC (1024*1024) 131#define XSEC_PER_SEC (1024*1024)
88 132
@@ -349,98 +393,6 @@ void udelay(unsigned long usecs)
349} 393}
350EXPORT_SYMBOL(udelay); 394EXPORT_SYMBOL(udelay);
351 395
352static __inline__ void timer_check_rtc(void)
353{
354 /*
355 * update the rtc when needed, this should be performed on the
356 * right fraction of a second. Half or full second ?
357 * Full second works on mk48t59 clocks, others need testing.
358 * Note that this update is basically only used through
359 * the adjtimex system calls. Setting the HW clock in
360 * any other way is a /dev/rtc and userland business.
361 * This is still wrong by -0.5/+1.5 jiffies because of the
362 * timer interrupt resolution and possible delay, but here we
363 * hit a quantization limit which can only be solved by higher
364 * resolution timers and decoupling time management from timer
365 * interrupts. This is also wrong on the clocks
366 * which require being written at the half second boundary.
367 * We should have an rtc call that only sets the minutes and
368 * seconds like on Intel to avoid problems with non UTC clocks.
369 */
370 if (ppc_md.set_rtc_time && ntp_synced() &&
371 xtime.tv_sec - last_rtc_update >= 659 &&
372 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
373 struct rtc_time tm;
374 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
375 tm.tm_year -= 1900;
376 tm.tm_mon -= 1;
377 if (ppc_md.set_rtc_time(&tm) == 0)
378 last_rtc_update = xtime.tv_sec + 1;
379 else
380 /* Try again one minute later */
381 last_rtc_update += 60;
382 }
383}
384
385/*
386 * This version of gettimeofday has microsecond resolution.
387 */
388static inline void __do_gettimeofday(struct timeval *tv)
389{
390 unsigned long sec, usec;
391 u64 tb_ticks, xsec;
392 struct gettimeofday_vars *temp_varp;
393 u64 temp_tb_to_xs, temp_stamp_xsec;
394
395 /*
396 * These calculations are faster (gets rid of divides)
397 * if done in units of 1/2^20 rather than microseconds.
398 * The conversion to microseconds at the end is done
399 * without a divide (and in fact, without a multiply)
400 */
401 temp_varp = do_gtod.varp;
402
403 /* Sampling the time base must be done after loading
404 * do_gtod.varp in order to avoid racing with update_gtod.
405 */
406 data_barrier(temp_varp);
407 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
408 temp_tb_to_xs = temp_varp->tb_to_xs;
409 temp_stamp_xsec = temp_varp->stamp_xsec;
410 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
411 sec = xsec / XSEC_PER_SEC;
412 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
413 usec = SCALE_XSEC(usec, 1000000);
414
415 tv->tv_sec = sec;
416 tv->tv_usec = usec;
417}
418
419void do_gettimeofday(struct timeval *tv)
420{
421 if (__USE_RTC()) {
422 /* do this the old way */
423 unsigned long flags, seq;
424 unsigned int sec, nsec, usec;
425
426 do {
427 seq = read_seqbegin_irqsave(&xtime_lock, flags);
428 sec = xtime.tv_sec;
429 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
430 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
431 usec = nsec / 1000;
432 while (usec >= 1000000) {
433 usec -= 1000000;
434 ++sec;
435 }
436 tv->tv_sec = sec;
437 tv->tv_usec = usec;
438 return;
439 }
440 __do_gettimeofday(tv);
441}
442
443EXPORT_SYMBOL(do_gettimeofday);
444 396
445/* 397/*
446 * There are two copies of tb_to_xs and stamp_xsec so that no 398 * There are two copies of tb_to_xs and stamp_xsec so that no
@@ -486,56 +438,6 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
486 ++(vdso_data->tb_update_count); 438 ++(vdso_data->tb_update_count);
487} 439}
488 440
489/*
490 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
491 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
492 * difference tb - tb_orig_stamp small enough to always fit inside a
493 * 32 bits number. This is a requirement of our fast 32 bits userland
494 * implementation in the vdso. If we "miss" a call to this function
495 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
496 * with a too big difference, then the vdso will fallback to calling
497 * the syscall
498 */
499static __inline__ void timer_recalc_offset(u64 cur_tb)
500{
501 unsigned long offset;
502 u64 new_stamp_xsec;
503 u64 tlen, t2x;
504 u64 tb, xsec_old, xsec_new;
505 struct gettimeofday_vars *varp;
506
507 if (__USE_RTC())
508 return;
509 tlen = current_tick_length();
510 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
511 if (tlen == last_tick_len && offset < 0x80000000u)
512 return;
513 if (tlen != last_tick_len) {
514 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
515 last_tick_len = tlen;
516 } else
517 t2x = do_gtod.varp->tb_to_xs;
518 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
519 do_div(new_stamp_xsec, 1000000000);
520 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
521
522 ++vdso_data->tb_update_count;
523 smp_mb();
524
525 /*
526 * Make sure time doesn't go backwards for userspace gettimeofday.
527 */
528 tb = get_tb();
529 varp = do_gtod.varp;
530 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
531 + varp->stamp_xsec;
532 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
533 if (xsec_new < xsec_old)
534 new_stamp_xsec += xsec_old - xsec_new;
535
536 update_gtod(cur_tb, new_stamp_xsec, t2x);
537}
538
539#ifdef CONFIG_SMP 441#ifdef CONFIG_SMP
540unsigned long profile_pc(struct pt_regs *regs) 442unsigned long profile_pc(struct pt_regs *regs)
541{ 443{
@@ -607,6 +509,8 @@ static int __init iSeries_tb_recal(void)
607 iSeries_recal_titan = titan; 509 iSeries_recal_titan = titan;
608 iSeries_recal_tb = tb; 510 iSeries_recal_tb = tb;
609 511
512 /* Called here as now we know accurate values for the timebase */
513 clocksource_init();
610 return 0; 514 return 0;
611} 515}
612late_initcall(iSeries_tb_recal); 516late_initcall(iSeries_tb_recal);
@@ -636,20 +540,30 @@ void __init iSeries_time_init_early(void)
636void timer_interrupt(struct pt_regs * regs) 540void timer_interrupt(struct pt_regs * regs)
637{ 541{
638 struct pt_regs *old_regs; 542 struct pt_regs *old_regs;
639 int next_dec;
640 int cpu = smp_processor_id(); 543 int cpu = smp_processor_id();
641 unsigned long ticks; 544 struct clock_event_device *evt = &per_cpu(decrementers, cpu);
642 u64 tb_next_jiffy; 545 u64 now;
546
547 /* Ensure a positive value is written to the decrementer, or else
548 * some CPUs will continuue to take decrementer exceptions */
549 set_dec(DECREMENTER_MAX);
643 550
644#ifdef CONFIG_PPC32 551#ifdef CONFIG_PPC32
645 if (atomic_read(&ppc_n_lost_interrupts) != 0) 552 if (atomic_read(&ppc_n_lost_interrupts) != 0)
646 do_IRQ(regs); 553 do_IRQ(regs);
647#endif 554#endif
648 555
556 now = get_tb_or_rtc();
557 if (now < per_cpu(decrementer_next_tb, cpu)) {
558 /* not time for this event yet */
559 now = per_cpu(decrementer_next_tb, cpu) - now;
560 if (now <= DECREMENTER_MAX)
561 set_dec((unsigned int)now - 1);
562 return;
563 }
649 old_regs = set_irq_regs(regs); 564 old_regs = set_irq_regs(regs);
650 irq_enter(); 565 irq_enter();
651 566
652 profile_tick(CPU_PROFILING);
653 calculate_steal_time(); 567 calculate_steal_time();
654 568
655#ifdef CONFIG_PPC_ISERIES 569#ifdef CONFIG_PPC_ISERIES
@@ -657,46 +571,20 @@ void timer_interrupt(struct pt_regs * regs)
657 get_lppaca()->int_dword.fields.decr_int = 0; 571 get_lppaca()->int_dword.fields.decr_int = 0;
658#endif 572#endif
659 573
660 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 574 /*
661 >= tb_ticks_per_jiffy) { 575 * We cannot disable the decrementer, so in the period
662 /* Update last_jiffy */ 576 * between this cpu's being marked offline in cpu_online_map
663 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; 577 * and calling stop-self, it is taking timer interrupts.
664 /* Handle RTCL overflow on 601 */ 578 * Avoid calling into the scheduler rebalancing code if this
665 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) 579 * is the case.
666 per_cpu(last_jiffy, cpu) -= 1000000000; 580 */
667 581 if (!cpu_is_offline(cpu))
668 /* 582 account_process_time(regs);
669 * We cannot disable the decrementer, so in the period
670 * between this cpu's being marked offline in cpu_online_map
671 * and calling stop-self, it is taking timer interrupts.
672 * Avoid calling into the scheduler rebalancing code if this
673 * is the case.
674 */
675 if (!cpu_is_offline(cpu))
676 account_process_time(regs);
677
678 /*
679 * No need to check whether cpu is offline here; boot_cpuid
680 * should have been fixed up by now.
681 */
682 if (cpu != boot_cpuid)
683 continue;
684 583
685 write_seqlock(&xtime_lock); 584 if (evt->event_handler)
686 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; 585 evt->event_handler(evt);
687 if (__USE_RTC() && tb_next_jiffy >= 1000000000) 586 else
688 tb_next_jiffy -= 1000000000; 587 evt->set_next_event(DECREMENTER_MAX, evt);
689 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
690 tb_last_jiffy = tb_next_jiffy;
691 do_timer(1);
692 timer_recalc_offset(tb_last_jiffy);
693 timer_check_rtc();
694 }
695 write_sequnlock(&xtime_lock);
696 }
697
698 next_dec = tb_ticks_per_jiffy - ticks;
699 set_dec(next_dec);
700 588
701#ifdef CONFIG_PPC_ISERIES 589#ifdef CONFIG_PPC_ISERIES
702 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 590 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
@@ -762,71 +650,6 @@ unsigned long long sched_clock(void)
762 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 650 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
763} 651}
764 652
765int do_settimeofday(struct timespec *tv)
766{
767 time_t wtm_sec, new_sec = tv->tv_sec;
768 long wtm_nsec, new_nsec = tv->tv_nsec;
769 unsigned long flags;
770 u64 new_xsec;
771 unsigned long tb_delta;
772
773 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
774 return -EINVAL;
775
776 write_seqlock_irqsave(&xtime_lock, flags);
777
778 /*
779 * Updating the RTC is not the job of this code. If the time is
780 * stepped under NTP, the RTC will be updated after STA_UNSYNC
781 * is cleared. Tools like clock/hwclock either copy the RTC
782 * to the system time, in which case there is no point in writing
783 * to the RTC again, or write to the RTC but then they don't call
784 * settimeofday to perform this operation.
785 */
786
787 /* Make userspace gettimeofday spin until we're done. */
788 ++vdso_data->tb_update_count;
789 smp_mb();
790
791 /*
792 * Subtract off the number of nanoseconds since the
793 * beginning of the last tick.
794 */
795 tb_delta = tb_ticks_since(tb_last_jiffy);
796 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
797 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
798
799 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
800 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
801
802 set_normalized_timespec(&xtime, new_sec, new_nsec);
803 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
804
805 /* In case of a large backwards jump in time with NTP, we want the
806 * clock to be updated as soon as the PLL is again in lock.
807 */
808 last_rtc_update = new_sec - 658;
809
810 ntp_clear();
811
812 new_xsec = xtime.tv_nsec;
813 if (new_xsec != 0) {
814 new_xsec *= XSEC_PER_SEC;
815 do_div(new_xsec, NSEC_PER_SEC);
816 }
817 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
818 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
819
820 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
821 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
822
823 write_sequnlock_irqrestore(&xtime_lock, flags);
824 clock_was_set();
825 return 0;
826}
827
828EXPORT_SYMBOL(do_settimeofday);
829
830static int __init get_freq(char *name, int cells, unsigned long *val) 653static int __init get_freq(char *name, int cells, unsigned long *val)
831{ 654{
832 struct device_node *cpu; 655 struct device_node *cpu;
@@ -869,7 +692,7 @@ void __init generic_calibrate_decr(void)
869 "(not found)\n"); 692 "(not found)\n");
870 } 693 }
871 694
872#ifdef CONFIG_BOOKE 695#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
873 /* Set the time base to zero */ 696 /* Set the time base to zero */
874 mtspr(SPRN_TBWL, 0); 697 mtspr(SPRN_TBWL, 0);
875 mtspr(SPRN_TBWU, 0); 698 mtspr(SPRN_TBWU, 0);
@@ -882,12 +705,35 @@ void __init generic_calibrate_decr(void)
882#endif 705#endif
883} 706}
884 707
885unsigned long get_boot_time(void) 708int update_persistent_clock(struct timespec now)
886{ 709{
887 struct rtc_time tm; 710 struct rtc_time tm;
888 711
889 if (ppc_md.get_boot_time) 712 if (!ppc_md.set_rtc_time)
890 return ppc_md.get_boot_time(); 713 return 0;
714
715 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
716 tm.tm_year -= 1900;
717 tm.tm_mon -= 1;
718
719 return ppc_md.set_rtc_time(&tm);
720}
721
722unsigned long read_persistent_clock(void)
723{
724 struct rtc_time tm;
725 static int first = 1;
726
727 /* XXX this is a litle fragile but will work okay in the short term */
728 if (first) {
729 first = 0;
730 if (ppc_md.time_init)
731 timezone_offset = ppc_md.time_init();
732
733 /* get_boot_time() isn't guaranteed to be safe to call late */
734 if (ppc_md.get_boot_time)
735 return ppc_md.get_boot_time() -timezone_offset;
736 }
891 if (!ppc_md.get_rtc_time) 737 if (!ppc_md.get_rtc_time)
892 return 0; 738 return 0;
893 ppc_md.get_rtc_time(&tm); 739 ppc_md.get_rtc_time(&tm);
@@ -895,18 +741,128 @@ unsigned long get_boot_time(void)
895 tm.tm_hour, tm.tm_min, tm.tm_sec); 741 tm.tm_hour, tm.tm_min, tm.tm_sec);
896} 742}
897 743
744/* clocksource code */
745static cycle_t rtc_read(void)
746{
747 return (cycle_t)get_rtc();
748}
749
750static cycle_t timebase_read(void)
751{
752 return (cycle_t)get_tb();
753}
754
755void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
756{
757 u64 t2x, stamp_xsec;
758
759 if (clock != &clocksource_timebase)
760 return;
761
762 /* Make userspace gettimeofday spin until we're done. */
763 ++vdso_data->tb_update_count;
764 smp_mb();
765
766 /* XXX this assumes clock->shift == 22 */
767 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
768 t2x = (u64) clock->mult * 4611686018ULL;
769 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
770 do_div(stamp_xsec, 1000000000);
771 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
772 update_gtod(clock->cycle_last, stamp_xsec, t2x);
773}
774
775void update_vsyscall_tz(void)
776{
777 /* Make userspace gettimeofday spin until we're done. */
778 ++vdso_data->tb_update_count;
779 smp_mb();
780 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
781 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
782 smp_mb();
783 ++vdso_data->tb_update_count;
784}
785
786void __init clocksource_init(void)
787{
788 struct clocksource *clock;
789
790 if (__USE_RTC())
791 clock = &clocksource_rtc;
792 else
793 clock = &clocksource_timebase;
794
795 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
796
797 if (clocksource_register(clock)) {
798 printk(KERN_ERR "clocksource: %s is already registered\n",
799 clock->name);
800 return;
801 }
802
803 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
804 clock->name, clock->mult, clock->shift);
805}
806
807static int decrementer_set_next_event(unsigned long evt,
808 struct clock_event_device *dev)
809{
810 __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt;
811 /* The decrementer interrupts on the 0 -> -1 transition */
812 if (evt)
813 --evt;
814 set_dec(evt);
815 return 0;
816}
817
818static void decrementer_set_mode(enum clock_event_mode mode,
819 struct clock_event_device *dev)
820{
821 if (mode != CLOCK_EVT_MODE_ONESHOT)
822 decrementer_set_next_event(DECREMENTER_MAX, dev);
823}
824
825static void register_decrementer_clockevent(int cpu)
826{
827 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
828
829 *dec = decrementer_clockevent;
830 dec->cpumask = cpumask_of_cpu(cpu);
831
832 printk(KERN_ERR "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
833 dec->name, dec->mult, dec->shift, cpu);
834
835 clockevents_register_device(dec);
836}
837
838void init_decrementer_clockevent(void)
839{
840 int cpu = smp_processor_id();
841
842 decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
843 decrementer_clockevent.shift);
844 decrementer_clockevent.max_delta_ns =
845 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
846 decrementer_clockevent.min_delta_ns = 1000;
847
848 register_decrementer_clockevent(cpu);
849}
850
851void secondary_cpu_time_init(void)
852{
853 /* FIME: Should make unrelatred change to move snapshot_timebase
854 * call here ! */
855 register_decrementer_clockevent(smp_processor_id());
856}
857
898/* This function is only called on the boot processor */ 858/* This function is only called on the boot processor */
899void __init time_init(void) 859void __init time_init(void)
900{ 860{
901 unsigned long flags; 861 unsigned long flags;
902 unsigned long tm = 0;
903 struct div_result res; 862 struct div_result res;
904 u64 scale, x; 863 u64 scale, x;
905 unsigned shift; 864 unsigned shift;
906 865
907 if (ppc_md.time_init != NULL)
908 timezone_offset = ppc_md.time_init();
909
910 if (__USE_RTC()) { 866 if (__USE_RTC()) {
911 /* 601 processor: dec counts down by 128 every 128ns */ 867 /* 601 processor: dec counts down by 128 every 128ns */
912 ppc_tb_freq = 1000000000; 868 ppc_tb_freq = 1000000000;
@@ -981,19 +937,14 @@ void __init time_init(void)
981 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 937 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
982 boot_tb = get_tb_or_rtc(); 938 boot_tb = get_tb_or_rtc();
983 939
984 tm = get_boot_time();
985
986 write_seqlock_irqsave(&xtime_lock, flags); 940 write_seqlock_irqsave(&xtime_lock, flags);
987 941
988 /* If platform provided a timezone (pmac), we correct the time */ 942 /* If platform provided a timezone (pmac), we correct the time */
989 if (timezone_offset) { 943 if (timezone_offset) {
990 sys_tz.tz_minuteswest = -timezone_offset / 60; 944 sys_tz.tz_minuteswest = -timezone_offset / 60;
991 sys_tz.tz_dsttime = 0; 945 sys_tz.tz_dsttime = 0;
992 tm -= timezone_offset;
993 } 946 }
994 947
995 xtime.tv_sec = tm;
996 xtime.tv_nsec = 0;
997 do_gtod.varp = &do_gtod.vars[0]; 948 do_gtod.varp = &do_gtod.vars[0];
998 do_gtod.var_idx = 0; 949 do_gtod.var_idx = 0;
999 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 950 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
@@ -1011,13 +962,13 @@ void __init time_init(void)
1011 962
1012 time_freq = 0; 963 time_freq = 0;
1013 964
1014 last_rtc_update = xtime.tv_sec;
1015 set_normalized_timespec(&wall_to_monotonic,
1016 -xtime.tv_sec, -xtime.tv_nsec);
1017 write_sequnlock_irqrestore(&xtime_lock, flags); 965 write_sequnlock_irqrestore(&xtime_lock, flags);
1018 966
1019 /* Not exact, but the timer interrupt takes care of this */ 967 /* Register the clocksource, if we're not running on iSeries */
1020 set_dec(tb_ticks_per_jiffy); 968 if (!firmware_has_feature(FW_FEATURE_ISERIES))
969 clocksource_init();
970
971 init_decrementer_clockevent();
1021} 972}
1022 973
1023 974
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d8502e377518..bf9e39c6e296 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -172,11 +172,21 @@ int die(const char *str, struct pt_regs *regs, long err)
172void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 172void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
173{ 173{
174 siginfo_t info; 174 siginfo_t info;
175 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
176 "at %08lx nip %08lx lr %08lx code %x\n";
177 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
178 "at %016lx nip %016lx lr %016lx code %x\n";
175 179
176 if (!user_mode(regs)) { 180 if (!user_mode(regs)) {
177 if (die("Exception in kernel mode", regs, signr)) 181 if (die("Exception in kernel mode", regs, signr))
178 return; 182 return;
179 } 183 } else if (show_unhandled_signals &&
184 unhandled_signal(current, signr) &&
185 printk_ratelimit()) {
186 printk(regs->msr & MSR_SF ? fmt64 : fmt32,
187 current->comm, current->pid, signr,
188 addr, regs->nip, regs->link, code);
189 }
180 190
181 memset(&info, 0, sizeof(info)); 191 memset(&info, 0, sizeof(info));
182 info.si_signo = signr; 192 info.si_signo = signr;
@@ -324,47 +334,10 @@ static inline int check_io_access(struct pt_regs *regs)
324#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 334#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
325#endif 335#endif
326 336
327/* 337static int generic_machine_check_exception(struct pt_regs *regs)
328 * This is "fall-back" implementation for configurations
329 * which don't provide platform-specific machine check info
330 */
331void __attribute__ ((weak))
332platform_machine_check(struct pt_regs *regs)
333{
334}
335
336void machine_check_exception(struct pt_regs *regs)
337{ 338{
338 int recover = 0;
339 unsigned long reason = get_mc_reason(regs); 339 unsigned long reason = get_mc_reason(regs);
340 340
341 /* See if any machine dependent calls */
342 if (ppc_md.machine_check_exception)
343 recover = ppc_md.machine_check_exception(regs);
344
345 if (recover)
346 return;
347
348 if (user_mode(regs)) {
349 regs->msr |= MSR_RI;
350 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
351 return;
352 }
353
354#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
355 /* the qspan pci read routines can cause machine checks -- Cort */
356 bad_page_fault(regs, regs->dar, SIGBUS);
357 return;
358#endif
359
360 if (debugger_fault_handler(regs)) {
361 regs->msr |= MSR_RI;
362 return;
363 }
364
365 if (check_io_access(regs))
366 return;
367
368#if defined(CONFIG_4xx) && !defined(CONFIG_440A) 341#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
369 if (reason & ESR_IMCP) { 342 if (reason & ESR_IMCP) {
370 printk("Instruction"); 343 printk("Instruction");
@@ -480,11 +453,41 @@ void machine_check_exception(struct pt_regs *regs)
480 } 453 }
481#endif /* CONFIG_4xx */ 454#endif /* CONFIG_4xx */
482 455
483 /* 456 return 0;
484 * Optional platform-provided routine to print out 457}
485 * additional info, e.g. bus error registers. 458
486 */ 459void machine_check_exception(struct pt_regs *regs)
487 platform_machine_check(regs); 460{
461 int recover = 0;
462
463 /* See if any machine dependent calls */
464 if (ppc_md.machine_check_exception)
465 recover = ppc_md.machine_check_exception(regs);
466 else
467 recover = generic_machine_check_exception(regs);
468
469 if (recover)
470 return;
471
472 if (user_mode(regs)) {
473 regs->msr |= MSR_RI;
474 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
475 return;
476 }
477
478#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
479 /* the qspan pci read routines can cause machine checks -- Cort */
480 bad_page_fault(regs, regs->dar, SIGBUS);
481 return;
482#endif
483
484 if (debugger_fault_handler(regs)) {
485 regs->msr |= MSR_RI;
486 return;
487 }
488
489 if (check_io_access(regs))
490 return;
488 491
489 if (debugger_fault_handler(regs)) 492 if (debugger_fault_handler(regs))
490 return; 493 return;
@@ -913,7 +916,9 @@ void SoftwareEmulation(struct pt_regs *regs)
913{ 916{
914 extern int do_mathemu(struct pt_regs *); 917 extern int do_mathemu(struct pt_regs *);
915 extern int Soft_emulate_8xx(struct pt_regs *); 918 extern int Soft_emulate_8xx(struct pt_regs *);
919#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
916 int errcode; 920 int errcode;
921#endif
917 922
918 CHECK_FULL_REGS(regs); 923 CHECK_FULL_REGS(regs);
919 924
@@ -943,7 +948,7 @@ void SoftwareEmulation(struct pt_regs *regs)
943 return; 948 return;
944 } 949 }
945 950
946#else 951#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
947 errcode = Soft_emulate_8xx(regs); 952 errcode = Soft_emulate_8xx(regs);
948 switch (errcode) { 953 switch (errcode) {
949 case 0: 954 case 0:
@@ -956,6 +961,8 @@ void SoftwareEmulation(struct pt_regs *regs)
956 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 961 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
957 return; 962 return;
958 } 963 }
964#else
965 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
959#endif 966#endif
960} 967}
961#endif /* CONFIG_8xx */ 968#endif /* CONFIG_8xx */
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 0f9b4eadfbcb..d723070c9a33 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -54,6 +54,8 @@ void __init udbg_early_init(void)
54#elif defined(CONFIG_PPC_EARLY_DEBUG_44x) 54#elif defined(CONFIG_PPC_EARLY_DEBUG_44x)
55 /* PPC44x debug */ 55 /* PPC44x debug */
56 udbg_init_44x_as1(); 56 udbg_init_44x_as1();
57#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM)
58 udbg_init_cpm();
57#endif 59#endif
58} 60}
59 61
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 7afab5bcd61a..833a3d0bcfa7 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -206,11 +206,22 @@ static void udbg_44x_as1_putc(char c)
206 } 206 }
207} 207}
208 208
209static int udbg_44x_as1_getc(void)
210{
211 if (udbg_comport) {
212 while ((as1_readb(&udbg_comport->lsr) & LSR_DR) == 0)
213 ; /* wait for char */
214 return as1_readb(&udbg_comport->rbr);
215 }
216 return -1;
217}
218
209void __init udbg_init_44x_as1(void) 219void __init udbg_init_44x_as1(void)
210{ 220{
211 udbg_comport = 221 udbg_comport =
212 (volatile struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR; 222 (volatile struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
213 223
214 udbg_putc = udbg_44x_as1_putc; 224 udbg_putc = udbg_44x_as1_putc;
225 udbg_getc = udbg_44x_as1_getc;
215} 226}
216#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 227#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 213fa31ac537..2322ba5cce4c 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -766,7 +766,9 @@ static int __init vdso_init(void)
766 766
767 return 0; 767 return 0;
768} 768}
769#ifdef CONFIG_PPC_MERGE
769arch_initcall(vdso_init); 770arch_initcall(vdso_init);
771#endif
770 772
771int in_gate_area_no_task(unsigned long addr) 773int in_gate_area_no_task(unsigned long addr)
772{ 774{
diff --git a/arch/powerpc/kernel/vdso32/.gitignore b/arch/powerpc/kernel/vdso32/.gitignore
index e45fba9d0ced..fea5809857a5 100644
--- a/arch/powerpc/kernel/vdso32/.gitignore
+++ b/arch/powerpc/kernel/vdso32/.gitignore
@@ -1 +1,2 @@
1vdso32.lds 1vdso32.lds
2vdso32.so.dbg
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 3726358faae8..c3d57bd01a88 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -9,11 +9,11 @@ ifeq ($(CONFIG_PPC32),y)
9CROSS32CC := $(CC) 9CROSS32CC := $(CC)
10endif 10endif
11 11
12targets := $(obj-vdso32) vdso32.so 12targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
13obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) 13obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
14 14
15 15
16EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin 16EXTRA_CFLAGS := -shared -fno-common -fno-builtin
17EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ 17EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
18 $(call ld-option, -Wl$(comma)--hash-style=sysv) 18 $(call ld-option, -Wl$(comma)--hash-style=sysv)
19EXTRA_AFLAGS := -D__VDSO32__ -s 19EXTRA_AFLAGS := -D__VDSO32__ -s
@@ -26,9 +26,14 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc
26$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 26$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
27 27
28# link rule for the .so file, .lds has to be first 28# link rule for the .so file, .lds has to be first
29$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) 29$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
30 $(call if_changed,vdso32ld) 30 $(call if_changed,vdso32ld)
31 31
32# strip rule for the .so file
33$(obj)/%.so: OBJCOPYFLAGS := -S
34$(obj)/%.so: $(obj)/%.so.dbg FORCE
35 $(call if_changed,objcopy)
36
32# assembly rules for the .S files 37# assembly rules for the .S files
33$(obj-vdso32): %.o: %.S 38$(obj-vdso32): %.o: %.S
34 $(call if_changed_dep,vdso32as) 39 $(call if_changed_dep,vdso32as)
@@ -39,3 +44,12 @@ quiet_cmd_vdso32ld = VDSO32L $@
39quiet_cmd_vdso32as = VDSO32A $@ 44quiet_cmd_vdso32as = VDSO32A $@
40 cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< 45 cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
41 46
47# install commands for the unstripped file
48quiet_cmd_vdso_install = INSTALL $@
49 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
50
51vdso32.so: $(obj)/vdso32.so.dbg
52 @mkdir -p $(MODLIB)/vdso
53 $(call cmd,vdso_install)
54
55vdso_install: vdso32.so
diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore
index 3fd18cf9fec2..77a0b423642c 100644
--- a/arch/powerpc/kernel/vdso64/.gitignore
+++ b/arch/powerpc/kernel/vdso64/.gitignore
@@ -1 +1,2 @@
1vdso64.lds 1vdso64.lds
2vdso64.so.dbg
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 43af9b2a6f3b..fa7f1b8f3e50 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -4,10 +4,10 @@ obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o
4 4
5# Build rules 5# Build rules
6 6
7targets := $(obj-vdso64) vdso64.so 7targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
8obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) 8obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
9 9
10EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin 10EXTRA_CFLAGS := -shared -fno-common -fno-builtin
11EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ 11EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
12 $(call ld-option, -Wl$(comma)--hash-style=sysv) 12 $(call ld-option, -Wl$(comma)--hash-style=sysv)
13EXTRA_AFLAGS := -D__VDSO64__ -s 13EXTRA_AFLAGS := -D__VDSO64__ -s
@@ -20,9 +20,14 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
20$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 20$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
21 21
22# link rule for the .so file, .lds has to be first 22# link rule for the .so file, .lds has to be first
23$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) 23$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
24 $(call if_changed,vdso64ld) 24 $(call if_changed,vdso64ld)
25 25
26# strip rule for the .so file
27$(obj)/%.so: OBJCOPYFLAGS := -S
28$(obj)/%.so: $(obj)/%.so.dbg FORCE
29 $(call if_changed,objcopy)
30
26# assembly rules for the .S files 31# assembly rules for the .S files
27$(obj-vdso64): %.o: %.S 32$(obj-vdso64): %.o: %.S
28 $(call if_changed_dep,vdso64as) 33 $(call if_changed_dep,vdso64as)
@@ -33,4 +38,12 @@ quiet_cmd_vdso64ld = VDSO64L $@
33quiet_cmd_vdso64as = VDSO64A $@ 38quiet_cmd_vdso64as = VDSO64A $@
34 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< 39 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
35 40
41# install commands for the unstripped file
42quiet_cmd_vdso_install = INSTALL $@
43 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
44
45vdso64.so: $(obj)/vdso64.so.dbg
46 @mkdir -p $(MODLIB)/vdso
47 $(call cmd,vdso_install)
36 48
49vdso_install: vdso64.so
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 62c1bc12ea39..cb22a3557c4e 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -39,6 +39,8 @@
39 39
40extern struct kset devices_subsys; /* needed for vio_find_name() */ 40extern struct kset devices_subsys; /* needed for vio_find_name() */
41 41
42static struct bus_type vio_bus_type;
43
42static struct vio_dev vio_bus_device = { /* fake "parent" device */ 44static struct vio_dev vio_bus_device = { /* fake "parent" device */
43 .name = vio_bus_device.dev.bus_id, 45 .name = vio_bus_device.dev.bus_id,
44 .type = "", 46 .type = "",
@@ -46,60 +48,33 @@ static struct vio_dev vio_bus_device = { /* fake "parent" device */
46 .dev.bus = &vio_bus_type, 48 .dev.bus = &vio_bus_type,
47}; 49};
48 50
49#ifdef CONFIG_PPC_ISERIES 51static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
50struct device *iSeries_vio_dev = &vio_bus_device.dev; 52{
51EXPORT_SYMBOL(iSeries_vio_dev); 53 const unsigned char *dma_window;
54 struct iommu_table *tbl;
55 unsigned long offset, size;
52 56
53static struct iommu_table veth_iommu_table; 57 if (firmware_has_feature(FW_FEATURE_ISERIES))
54static struct iommu_table vio_iommu_table; 58 return vio_build_iommu_table_iseries(dev);
55 59
56static void __init iommu_vio_init(void) 60 dma_window = of_get_property(dev->dev.archdata.of_node,
57{ 61 "ibm,my-dma-window", NULL);
58 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); 62 if (!dma_window)
59 veth_iommu_table.it_size /= 2; 63 return NULL;
60 vio_iommu_table = veth_iommu_table;
61 vio_iommu_table.it_offset += veth_iommu_table.it_size;
62
63 if (!iommu_init_table(&veth_iommu_table, -1))
64 printk("Virtual Bus VETH TCE table failed.\n");
65 if (!iommu_init_table(&vio_iommu_table, -1))
66 printk("Virtual Bus VIO TCE table failed.\n");
67}
68#endif
69 64
70static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 65 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
71{ 66
72#ifdef CONFIG_PPC_ISERIES 67 of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
73 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 68 &tbl->it_index, &offset, &size);
74 if (strcmp(dev->type, "network") == 0) 69
75 return &veth_iommu_table; 70 /* TCE table size - measured in tce entries */
76 return &vio_iommu_table; 71 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
77 } else 72 /* offset for VIO should always be 0 */
78#endif 73 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
79 { 74 tbl->it_busno = 0;
80 const unsigned char *dma_window; 75 tbl->it_type = TCE_VB;
81 struct iommu_table *tbl; 76
82 unsigned long offset, size; 77 return iommu_init_table(tbl, -1);
83
84 dma_window = of_get_property(dev->dev.archdata.of_node,
85 "ibm,my-dma-window", NULL);
86 if (!dma_window)
87 return NULL;
88
89 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
90
91 of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
92 &tbl->it_index, &offset, &size);
93
94 /* TCE table size - measured in tce entries */
95 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
96 /* offset for VIO should always be 0 */
97 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
98 tbl->it_busno = 0;
99 tbl->it_type = TCE_VB;
100
101 return iommu_init_table(tbl, -1);
102 }
103} 78}
104 79
105/** 80/**
@@ -160,16 +135,6 @@ static int vio_bus_remove(struct device *dev)
160 return 1; 135 return 1;
161} 136}
162 137
163/* convert from struct device to struct vio_dev and pass to driver. */
164static void vio_bus_shutdown(struct device *dev)
165{
166 struct vio_dev *viodev = to_vio_dev(dev);
167 struct vio_driver *viodrv = to_vio_driver(dev->driver);
168
169 if (dev->driver && viodrv->shutdown)
170 viodrv->shutdown(viodev);
171}
172
173/** 138/**
174 * vio_register_driver: - Register a new vio driver 139 * vio_register_driver: - Register a new vio driver
175 * @drv: The vio_driver structure to be registered. 140 * @drv: The vio_driver structure to be registered.
@@ -282,15 +247,6 @@ static int __init vio_bus_init(void)
282 int err; 247 int err;
283 struct device_node *node_vroot; 248 struct device_node *node_vroot;
284 249
285#ifdef CONFIG_PPC_ISERIES
286 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
287 iommu_vio_init();
288 vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
289 vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
290 iSeries_vio_dev = &vio_bus_device.dev;
291 }
292#endif /* CONFIG_PPC_ISERIES */
293
294 err = bus_register(&vio_bus_type); 250 err = bus_register(&vio_bus_type);
295 if (err) { 251 if (err) {
296 printk(KERN_ERR "failed to register VIO bus\n"); 252 printk(KERN_ERR "failed to register VIO bus\n");
@@ -317,11 +273,8 @@ static int __init vio_bus_init(void)
317 * the device tree. Drivers will associate with them later. 273 * the device tree. Drivers will associate with them later.
318 */ 274 */
319 for (of_node = node_vroot->child; of_node != NULL; 275 for (of_node = node_vroot->child; of_node != NULL;
320 of_node = of_node->sibling) { 276 of_node = of_node->sibling)
321 printk(KERN_DEBUG "%s: processing %p\n",
322 __FUNCTION__, of_node);
323 vio_register_device_node(of_node); 277 vio_register_device_node(of_node);
324 }
325 of_node_put(node_vroot); 278 of_node_put(node_vroot);
326 } 279 }
327 280
@@ -391,14 +344,13 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
391 return 0; 344 return 0;
392} 345}
393 346
394struct bus_type vio_bus_type = { 347static struct bus_type vio_bus_type = {
395 .name = "vio", 348 .name = "vio",
396 .dev_attrs = vio_dev_attrs, 349 .dev_attrs = vio_dev_attrs,
397 .uevent = vio_hotplug, 350 .uevent = vio_hotplug,
398 .match = vio_bus_match, 351 .match = vio_bus_match,
399 .probe = vio_bus_probe, 352 .probe = vio_bus_probe,
400 .remove = vio_bus_remove, 353 .remove = vio_bus_remove,
401 .shutdown = vio_bus_shutdown,
402}; 354};
403 355
404/** 356/**
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0c458556399f..823a8cbd60b5 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -34,6 +34,8 @@ SECTIONS
34 34
35 /* Text and gots */ 35 /* Text and gots */
36 .text : { 36 .text : {
37 ALIGN_FUNCTION();
38 *(.text.head)
37 _text = .; 39 _text = .;
38 TEXT_TEXT 40 TEXT_TEXT
39 SCHED_TEXT 41 SCHED_TEXT