aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/cputable.c14
-rw-r--r--arch/powerpc/kernel/head_64.S320
-rw-r--r--arch/powerpc/kernel/lparmap.c4
-rw-r--r--arch/powerpc/kernel/ppc32.h138
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/kernel/process.c28
-rw-r--r--arch/powerpc/kernel/prom.c78
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c43
-rw-r--r--arch/powerpc/kernel/rtas-proc.c808
-rw-r--r--arch/powerpc/kernel/rtas.c19
-rw-r--r--arch/powerpc/kernel/rtas_flash.c834
-rw-r--r--arch/powerpc/kernel/setup-common.c123
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c199
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c581
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c171
-rw-r--r--arch/powerpc/kernel/smp.c565
-rw-r--r--arch/powerpc/kernel/time.c5
-rw-r--r--arch/powerpc/kernel/traps.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S29
24 files changed, 3590 insertions, 410 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 572d4f5eaacb..b3ae2993efb8 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -13,12 +13,15 @@ endif
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 signal_32.o pmc.o 14 signal_32.o pmc.o
15obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 15obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
16 ptrace32.o systbl.o 16 signal_64.o ptrace32.o systbl.o
17obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 17obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
18obj-$(CONFIG_POWER4) += idle_power4.o 18obj-$(CONFIG_POWER4) += idle_power4.o
19obj-$(CONFIG_PPC_OF) += of_device.o 19obj-$(CONFIG_PPC_OF) += of_device.o
20obj-$(CONFIG_PPC_RTAS) += rtas.o 20obj-$(CONFIG_PPC_RTAS) += rtas.o
21obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
22obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
21obj-$(CONFIG_IBMVIO) += vio.o 23obj-$(CONFIG_IBMVIO) += vio.o
24obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
22 25
23ifeq ($(CONFIG_PPC_MERGE),y) 26ifeq ($(CONFIG_PPC_MERGE),y)
24 27
@@ -38,6 +41,7 @@ obj-$(CONFIG_PPC_OF) += prom_init.o
38obj-$(CONFIG_MODULES) += ppc_ksyms.o 41obj-$(CONFIG_MODULES) += ppc_ksyms.o
39obj-$(CONFIG_BOOTX_TEXT) += btext.o 42obj-$(CONFIG_BOOTX_TEXT) += btext.o
40obj-$(CONFIG_6xx) += idle_6xx.o 43obj-$(CONFIG_6xx) += idle_6xx.o
44obj-$(CONFIG_SMP) += smp.o
41 45
42ifeq ($(CONFIG_PPC_ISERIES),y) 46ifeq ($(CONFIG_PPC_ISERIES),y)
43$(obj)/head_64.o: $(obj)/lparmap.s 47$(obj)/head_64.o: $(obj)/lparmap.s
@@ -46,8 +50,9 @@ endif
46 50
47else 51else
48# stuff used from here for ARCH=ppc or ARCH=ppc64 52# stuff used from here for ARCH=ppc or ARCH=ppc64
53smpobj-$(CONFIG_SMP) += smp.o
49obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \ 54obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
50 setup-common.o 55 setup-common.o $(smpobj-y)
51 56
52 57
53endif 58endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 330cd783206f..b75757251994 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -40,7 +40,7 @@
40#ifdef CONFIG_PPC64 40#ifdef CONFIG_PPC64
41#include <asm/paca.h> 41#include <asm/paca.h>
42#include <asm/lppaca.h> 42#include <asm/lppaca.h>
43#include <asm/iSeries/HvLpEvent.h> 43#include <asm/iseries/hv_lp_event.h>
44#include <asm/cache.h> 44#include <asm/cache.h>
45#include <asm/systemcfg.h> 45#include <asm/systemcfg.h>
46#include <asm/compat.h> 46#include <asm/compat.h>
@@ -125,6 +125,9 @@ int main(void)
125 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 125 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
126 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 126 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
127 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 127 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
128#ifdef CONFIG_PPC_64K_PAGES
129 DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
130#endif
128#ifdef CONFIG_HUGETLB_PAGE 131#ifdef CONFIG_HUGETLB_PAGE
129 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); 132 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
130 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); 133 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b91345fa0805..cc4e9eb1c13f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -240,7 +240,7 @@ struct cpu_spec cpu_specs[] = {
240 .oprofile_model = &op_model_power4, 240 .oprofile_model = &op_model_power4,
241#endif 241#endif
242 }, 242 },
243 { /* Power5 */ 243 { /* Power5 GR */
244 .pvr_mask = 0xffff0000, 244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000, 245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)", 246 .cpu_name = "POWER5 (gr)",
@@ -255,7 +255,7 @@ struct cpu_spec cpu_specs[] = {
255 .oprofile_model = &op_model_power4, 255 .oprofile_model = &op_model_power4,
256#endif 256#endif
257 }, 257 },
258 { /* Power5 */ 258 { /* Power5 GS */
259 .pvr_mask = 0xffff0000, 259 .pvr_mask = 0xffff0000,
260 .pvr_value = 0x003b0000, 260 .pvr_value = 0x003b0000,
261 .cpu_name = "POWER5 (gs)", 261 .cpu_name = "POWER5 (gs)",
@@ -929,6 +929,16 @@ struct cpu_spec cpu_specs[] = {
929 .icache_bsize = 32, 929 .icache_bsize = 32,
930 .dcache_bsize = 32, 930 .dcache_bsize = 32,
931 }, 931 },
932 { /* 440SPe Rev. A */
933 .pvr_mask = 0xff000fff,
934 .pvr_value = 0x53000890,
935 .cpu_name = "440SPe Rev. A",
936 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
937 CPU_FTR_USE_TB,
938 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
939 .icache_bsize = 32,
940 .dcache_bsize = 32,
941 },
932#endif /* CONFIG_44x */ 942#endif /* CONFIG_44x */
933#ifdef CONFIG_FSL_BOOKE 943#ifdef CONFIG_FSL_BOOKE
934 { /* e200z5 */ 944 { /* e200z5 */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 147215a0d6c0..16ab40daa738 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -35,7 +35,7 @@
35#include <asm/cputable.h> 35#include <asm/cputable.h>
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/hvcall.h> 37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h> 38#include <asm/iseries/lpar_map.h>
39#include <asm/thread_info.h> 39#include <asm/thread_info.h>
40 40
41#ifdef CONFIG_PPC_ISERIES 41#ifdef CONFIG_PPC_ISERIES
@@ -195,11 +195,11 @@ exception_marker:
195#define EX_R12 24 195#define EX_R12 24
196#define EX_R13 32 196#define EX_R13 32
197#define EX_SRR0 40 197#define EX_SRR0 40
198#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
199#define EX_DAR 48 198#define EX_DAR 48
200#define EX_LR 48 /* SLB miss saves LR, but not DAR */
201#define EX_DSISR 56 199#define EX_DSISR 56
202#define EX_CCR 60 200#define EX_CCR 60
201#define EX_R3 64
202#define EX_LR 72
203 203
204#define EXCEPTION_PROLOG_PSERIES(area, label) \ 204#define EXCEPTION_PROLOG_PSERIES(area, label) \
205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
@@ -419,17 +419,22 @@ data_access_slb_pSeries:
419 mtspr SPRN_SPRG1,r13 419 mtspr SPRN_SPRG1,r13
420 RUNLATCH_ON(r13) 420 RUNLATCH_ON(r13)
421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
422 std r3,PACA_EXSLB+EX_R3(r13)
423 mfspr r3,SPRN_DAR
422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 424 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
425 mfcr r9
426#ifdef __DISABLED__
427 /* Keep that around for when we re-implement dynamic VSIDs */
428 cmpdi r3,0
429 bge slb_miss_user_pseries
430#endif /* __DISABLED__ */
423 std r10,PACA_EXSLB+EX_R10(r13) 431 std r10,PACA_EXSLB+EX_R10(r13)
424 std r11,PACA_EXSLB+EX_R11(r13) 432 std r11,PACA_EXSLB+EX_R11(r13)
425 std r12,PACA_EXSLB+EX_R12(r13) 433 std r12,PACA_EXSLB+EX_R12(r13)
426 std r3,PACA_EXSLB+EX_R3(r13) 434 mfspr r10,SPRN_SPRG1
427 mfspr r9,SPRN_SPRG1 435 std r10,PACA_EXSLB+EX_R13(r13)
428 std r9,PACA_EXSLB+EX_R13(r13)
429 mfcr r9
430 mfspr r12,SPRN_SRR1 /* and SRR1 */ 436 mfspr r12,SPRN_SRR1 /* and SRR1 */
431 mfspr r3,SPRN_DAR 437 b .slb_miss_realmode /* Rel. branch works in real mode */
432 b .do_slb_miss /* Rel. branch works in real mode */
433 438
434 STD_EXCEPTION_PSERIES(0x400, instruction_access) 439 STD_EXCEPTION_PSERIES(0x400, instruction_access)
435 440
@@ -440,17 +445,22 @@ instruction_access_slb_pSeries:
440 mtspr SPRN_SPRG1,r13 445 mtspr SPRN_SPRG1,r13
441 RUNLATCH_ON(r13) 446 RUNLATCH_ON(r13)
442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 447 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
448 std r3,PACA_EXSLB+EX_R3(r13)
449 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 450 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
451 mfcr r9
452#ifdef __DISABLED__
453 /* Keep that around for when we re-implement dynamic VSIDs */
454 cmpdi r3,0
455 bge slb_miss_user_pseries
456#endif /* __DISABLED__ */
444 std r10,PACA_EXSLB+EX_R10(r13) 457 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13) 458 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13) 459 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13) 460 mfspr r10,SPRN_SPRG1
448 mfspr r9,SPRN_SPRG1 461 std r10,PACA_EXSLB+EX_R13(r13)
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SPRN_SRR1 /* and SRR1 */ 462 mfspr r12,SPRN_SRR1 /* and SRR1 */
452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 463 b .slb_miss_realmode /* Rel. branch works in real mode */
453 b .do_slb_miss /* Rel. branch works in real mode */
454 464
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 465 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment) 466 STD_EXCEPTION_PSERIES(0x600, alignment)
@@ -509,6 +519,38 @@ _GLOBAL(do_stab_bolted_pSeries)
509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 519 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
510 520
511/* 521/*
522 * We have some room here we use that to put
523 * the peries slb miss user trampoline code so it's reasonably
524 * away from slb_miss_user_common to avoid problems with rfid
525 *
526 * This is used for when the SLB miss handler has to go virtual,
527 * which doesn't happen for now anymore but will once we re-implement
528 * dynamic VSIDs for shared page tables
529 */
530#ifdef __DISABLED__
531slb_miss_user_pseries:
532 std r10,PACA_EXGEN+EX_R10(r13)
533 std r11,PACA_EXGEN+EX_R11(r13)
534 std r12,PACA_EXGEN+EX_R12(r13)
535 mfspr r10,SPRG1
536 ld r11,PACA_EXSLB+EX_R9(r13)
537 ld r12,PACA_EXSLB+EX_R3(r13)
538 std r10,PACA_EXGEN+EX_R13(r13)
539 std r11,PACA_EXGEN+EX_R9(r13)
540 std r12,PACA_EXGEN+EX_R3(r13)
541 clrrdi r12,r13,32
542 mfmsr r10
543 mfspr r11,SRR0 /* save SRR0 */
544 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
545 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
546 mtspr SRR0,r12
547 mfspr r12,SRR1 /* and SRR1 */
548 mtspr SRR1,r10
549 rfid
550 b . /* prevent spec. execution */
551#endif /* __DISABLED__ */
552
553/*
512 * Vectors for the FWNMI option. Share common code. 554 * Vectors for the FWNMI option. Share common code.
513 */ 555 */
514 .globl system_reset_fwnmi 556 .globl system_reset_fwnmi
@@ -559,22 +601,59 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
559 .globl data_access_slb_iSeries 601 .globl data_access_slb_iSeries
560data_access_slb_iSeries: 602data_access_slb_iSeries:
561 mtspr SPRN_SPRG1,r13 /* save r13 */ 603 mtspr SPRN_SPRG1,r13 /* save r13 */
562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 604 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
563 std r3,PACA_EXSLB+EX_R3(r13) 605 std r3,PACA_EXSLB+EX_R3(r13)
564 ld r12,PACALPPACA+LPPACASRR1(r13)
565 mfspr r3,SPRN_DAR 606 mfspr r3,SPRN_DAR
566 b .do_slb_miss 607 std r9,PACA_EXSLB+EX_R9(r13)
608 mfcr r9
609#ifdef __DISABLED__
610 cmpdi r3,0
611 bge slb_miss_user_iseries
612#endif
613 std r10,PACA_EXSLB+EX_R10(r13)
614 std r11,PACA_EXSLB+EX_R11(r13)
615 std r12,PACA_EXSLB+EX_R12(r13)
616 mfspr r10,SPRN_SPRG1
617 std r10,PACA_EXSLB+EX_R13(r13)
618 ld r12,PACALPPACA+LPPACASRR1(r13);
619 b .slb_miss_realmode
567 620
568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 621 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
569 622
570 .globl instruction_access_slb_iSeries 623 .globl instruction_access_slb_iSeries
571instruction_access_slb_iSeries: 624instruction_access_slb_iSeries:
572 mtspr SPRN_SPRG1,r13 /* save r13 */ 625 mtspr SPRN_SPRG1,r13 /* save r13 */
573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 626 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
574 std r3,PACA_EXSLB+EX_R3(r13) 627 std r3,PACA_EXSLB+EX_R3(r13)
575 ld r12,PACALPPACA+LPPACASRR1(r13) 628 ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
576 ld r3,PACALPPACA+LPPACASRR0(r13) 629 std r9,PACA_EXSLB+EX_R9(r13)
577 b .do_slb_miss 630 mfcr r9
631#ifdef __DISABLED__
632 cmpdi r3,0
633 bge .slb_miss_user_iseries
634#endif
635 std r10,PACA_EXSLB+EX_R10(r13)
636 std r11,PACA_EXSLB+EX_R11(r13)
637 std r12,PACA_EXSLB+EX_R12(r13)
638 mfspr r10,SPRN_SPRG1
639 std r10,PACA_EXSLB+EX_R13(r13)
640 ld r12,PACALPPACA+LPPACASRR1(r13);
641 b .slb_miss_realmode
642
643#ifdef __DISABLED__
644slb_miss_user_iseries:
645 std r10,PACA_EXGEN+EX_R10(r13)
646 std r11,PACA_EXGEN+EX_R11(r13)
647 std r12,PACA_EXGEN+EX_R12(r13)
648 mfspr r10,SPRG1
649 ld r11,PACA_EXSLB+EX_R9(r13)
650 ld r12,PACA_EXSLB+EX_R3(r13)
651 std r10,PACA_EXGEN+EX_R13(r13)
652 std r11,PACA_EXGEN+EX_R9(r13)
653 std r12,PACA_EXGEN+EX_R3(r13)
654 EXCEPTION_PROLOG_ISERIES_2
655 b slb_miss_user_common
656#endif
578 657
579 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) 658 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
580 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) 659 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
@@ -809,6 +888,126 @@ instruction_access_common:
809 li r5,0x400 888 li r5,0x400
810 b .do_hash_page /* Try to handle as hpte fault */ 889 b .do_hash_page /* Try to handle as hpte fault */
811 890
891/*
892 * Here is the common SLB miss user that is used when going to virtual
893 * mode for SLB misses, that is currently not used
894 */
895#ifdef __DISABLED__
896 .align 7
897 .globl slb_miss_user_common
898slb_miss_user_common:
899 mflr r10
900 std r3,PACA_EXGEN+EX_DAR(r13)
901 stw r9,PACA_EXGEN+EX_CCR(r13)
902 std r10,PACA_EXGEN+EX_LR(r13)
903 std r11,PACA_EXGEN+EX_SRR0(r13)
904 bl .slb_allocate_user
905
906 ld r10,PACA_EXGEN+EX_LR(r13)
907 ld r3,PACA_EXGEN+EX_R3(r13)
908 lwz r9,PACA_EXGEN+EX_CCR(r13)
909 ld r11,PACA_EXGEN+EX_SRR0(r13)
910 mtlr r10
911 beq- slb_miss_fault
912
913 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
914 beq- unrecov_user_slb
915 mfmsr r10
916
917.machine push
918.machine "power4"
919 mtcrf 0x80,r9
920.machine pop
921
922 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
923 mtmsrd r10,1
924
925 mtspr SRR0,r11
926 mtspr SRR1,r12
927
928 ld r9,PACA_EXGEN+EX_R9(r13)
929 ld r10,PACA_EXGEN+EX_R10(r13)
930 ld r11,PACA_EXGEN+EX_R11(r13)
931 ld r12,PACA_EXGEN+EX_R12(r13)
932 ld r13,PACA_EXGEN+EX_R13(r13)
933 rfid
934 b .
935
936slb_miss_fault:
937 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
938 ld r4,PACA_EXGEN+EX_DAR(r13)
939 li r5,0
940 std r4,_DAR(r1)
941 std r5,_DSISR(r1)
942 b .handle_page_fault
943
944unrecov_user_slb:
945 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
946 DISABLE_INTS
947 bl .save_nvgprs
9481: addi r3,r1,STACK_FRAME_OVERHEAD
949 bl .unrecoverable_exception
950 b 1b
951
952#endif /* __DISABLED__ */
953
954
955/*
956 * r13 points to the PACA, r9 contains the saved CR,
957 * r12 contain the saved SRR1, SRR0 is still ready for return
958 * r3 has the faulting address
959 * r9 - r13 are saved in paca->exslb.
960 * r3 is saved in paca->slb_r3
961 * We assume we aren't going to take any exceptions during this procedure.
962 */
963_GLOBAL(slb_miss_realmode)
964 mflr r10
965
966 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
967 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
968
969 bl .slb_allocate_realmode
970
971 /* All done -- return from exception. */
972
973 ld r10,PACA_EXSLB+EX_LR(r13)
974 ld r3,PACA_EXSLB+EX_R3(r13)
975 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
976#ifdef CONFIG_PPC_ISERIES
977 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
978#endif /* CONFIG_PPC_ISERIES */
979
980 mtlr r10
981
982 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
983 beq- unrecov_slb
984
985.machine push
986.machine "power4"
987 mtcrf 0x80,r9
988 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
989.machine pop
990
991#ifdef CONFIG_PPC_ISERIES
992 mtspr SPRN_SRR0,r11
993 mtspr SPRN_SRR1,r12
994#endif /* CONFIG_PPC_ISERIES */
995 ld r9,PACA_EXSLB+EX_R9(r13)
996 ld r10,PACA_EXSLB+EX_R10(r13)
997 ld r11,PACA_EXSLB+EX_R11(r13)
998 ld r12,PACA_EXSLB+EX_R12(r13)
999 ld r13,PACA_EXSLB+EX_R13(r13)
1000 rfid
1001 b . /* prevent speculative execution */
1002
1003unrecov_slb:
1004 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1005 DISABLE_INTS
1006 bl .save_nvgprs
10071: addi r3,r1,STACK_FRAME_OVERHEAD
1008 bl .unrecoverable_exception
1009 b 1b
1010
812 .align 7 1011 .align 7
813 .globl hardware_interrupt_common 1012 .globl hardware_interrupt_common
814 .globl hardware_interrupt_entry 1013 .globl hardware_interrupt_entry
@@ -1139,62 +1338,6 @@ _GLOBAL(do_stab_bolted)
1139 b . /* prevent speculative execution */ 1338 b . /* prevent speculative execution */
1140 1339
1141/* 1340/*
1142 * r13 points to the PACA, r9 contains the saved CR,
1143 * r11 and r12 contain the saved SRR0 and SRR1.
1144 * r3 has the faulting address
1145 * r9 - r13 are saved in paca->exslb.
1146 * r3 is saved in paca->slb_r3
1147 * We assume we aren't going to take any exceptions during this procedure.
1148 */
1149_GLOBAL(do_slb_miss)
1150 mflr r10
1151
1152 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1153 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1154
1155 bl .slb_allocate /* handle it */
1156
1157 /* All done -- return from exception. */
1158
1159 ld r10,PACA_EXSLB+EX_LR(r13)
1160 ld r3,PACA_EXSLB+EX_R3(r13)
1161 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1162#ifdef CONFIG_PPC_ISERIES
1163 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1164#endif /* CONFIG_PPC_ISERIES */
1165
1166 mtlr r10
1167
1168 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1169 beq- unrecov_slb
1170
1171.machine push
1172.machine "power4"
1173 mtcrf 0x80,r9
1174 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1175.machine pop
1176
1177#ifdef CONFIG_PPC_ISERIES
1178 mtspr SPRN_SRR0,r11
1179 mtspr SPRN_SRR1,r12
1180#endif /* CONFIG_PPC_ISERIES */
1181 ld r9,PACA_EXSLB+EX_R9(r13)
1182 ld r10,PACA_EXSLB+EX_R10(r13)
1183 ld r11,PACA_EXSLB+EX_R11(r13)
1184 ld r12,PACA_EXSLB+EX_R12(r13)
1185 ld r13,PACA_EXSLB+EX_R13(r13)
1186 rfid
1187 b . /* prevent speculative execution */
1188
1189unrecov_slb:
1190 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1191 DISABLE_INTS
1192 bl .save_nvgprs
11931: addi r3,r1,STACK_FRAME_OVERHEAD
1194 bl .unrecoverable_exception
1195 b 1b
1196
1197/*
1198 * Space for CPU0's segment table. 1341 * Space for CPU0's segment table.
1199 * 1342 *
1200 * On iSeries, the hypervisor must fill in at least one entry before 1343 * On iSeries, the hypervisor must fill in at least one entry before
@@ -1569,7 +1712,10 @@ _GLOBAL(__secondary_start)
1569#endif 1712#endif
1570 /* Initialize the first segment table (or SLB) entry */ 1713 /* Initialize the first segment table (or SLB) entry */
1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */ 1714 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1715BEGIN_FTR_SECTION
1572 bl .stab_initialize 1716 bl .stab_initialize
1717END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1718 bl .slb_initialize
1573 1719
1574 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1720 /* Initialize the kernel stack. Just a repeat for iSeries. */
1575 LOADADDR(r3,current_set) 1721 LOADADDR(r3,current_set)
@@ -1914,24 +2060,6 @@ _GLOBAL(hmt_start_secondary)
1914 blr 2060 blr
1915#endif 2061#endif
1916 2062
1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1918_GLOBAL(smp_release_cpus)
1919 /* All secondary cpus are spinning on a common
1920 * spinloop, release them all now so they can start
1921 * to spin on their individual paca spinloops.
1922 * For non SMP kernels, the secondary cpus never
1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1926 */
1927 li r3,1
1928 LOADADDR(r5,__secondary_hold_spinloop)
1929 std r3,0(r5)
1930 sync
1931 blr
1932#endif /* CONFIG_SMP */
1933
1934
1935/* 2063/*
1936 * We put a few things here that have to be page-aligned. 2064 * We put a few things here that have to be page-aligned.
1937 * This stuff goes at the beginning of the bss, which is page-aligned. 2065 * This stuff goes at the beginning of the bss, which is page-aligned.
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index b81de286df5e..5a05a797485f 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -8,7 +8,7 @@
8 */ 8 */
9#include <asm/mmu.h> 9#include <asm/mmu.h>
10#include <asm/page.h> 10#include <asm/page.h>
11#include <asm/iSeries/LparMap.h> 11#include <asm/iseries/lpar_map.h>
12 12
13const struct LparMap __attribute__((__section__(".text"))) xLparMap = { 13const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
14 .xNumberEsids = HvEsidsToMap, 14 .xNumberEsids = HvEsidsToMap,
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
25 .xRanges = { 25 .xRanges = {
26 { .xPages = HvPagesToMap, 26 { .xPages = HvPagesToMap,
27 .xOffset = 0, 27 .xOffset = 0,
28 .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT), 28 .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
29 }, 29 },
30 }, 30 },
31}; 31};
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
new file mode 100644
index 000000000000..90e562771791
--- /dev/null
+++ b/arch/powerpc/kernel/ppc32.h
@@ -0,0 +1,138 @@
1#ifndef _PPC64_PPC32_H
2#define _PPC64_PPC32_H
3
4#include <linux/compat.h>
5#include <asm/siginfo.h>
6#include <asm/signal.h>
7
8/*
9 * Data types and macros for providing 32b PowerPC support.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17/* These are here to support 32-bit syscalls on a 64-bit kernel. */
18
19typedef struct compat_siginfo {
20 int si_signo;
21 int si_errno;
22 int si_code;
23
24 union {
25 int _pad[SI_PAD_SIZE32];
26
27 /* kill() */
28 struct {
29 compat_pid_t _pid; /* sender's pid */
30 compat_uid_t _uid; /* sender's uid */
31 } _kill;
32
33 /* POSIX.1b timers */
34 struct {
35 compat_timer_t _tid; /* timer id */
36 int _overrun; /* overrun count */
37 compat_sigval_t _sigval; /* same as below */
38 int _sys_private; /* not to be passed to user */
39 } _timer;
40
41 /* POSIX.1b signals */
42 struct {
43 compat_pid_t _pid; /* sender's pid */
44 compat_uid_t _uid; /* sender's uid */
45 compat_sigval_t _sigval;
46 } _rt;
47
48 /* SIGCHLD */
49 struct {
50 compat_pid_t _pid; /* which child */
51 compat_uid_t _uid; /* sender's uid */
52 int _status; /* exit code */
53 compat_clock_t _utime;
54 compat_clock_t _stime;
55 } _sigchld;
56
57 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
58 struct {
59 unsigned int _addr; /* faulting insn/memory ref. */
60 } _sigfault;
61
62 /* SIGPOLL */
63 struct {
64 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
65 int _fd;
66 } _sigpoll;
67 } _sifields;
68} compat_siginfo_t;
69
70#define __old_sigaction32 old_sigaction32
71
72struct __old_sigaction32 {
73 compat_uptr_t sa_handler;
74 compat_old_sigset_t sa_mask;
75 unsigned int sa_flags;
76 compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */
77};
78
79
80
81struct sigaction32 {
82 compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */
83 unsigned int sa_flags;
84 compat_uptr_t sa_restorer; /* Another 32 bit pointer */
85 compat_sigset_t sa_mask; /* A 32 bit mask */
86};
87
88typedef struct sigaltstack_32 {
89 unsigned int ss_sp;
90 int ss_flags;
91 compat_size_t ss_size;
92} stack_32_t;
93
94struct pt_regs32 {
95 unsigned int gpr[32];
96 unsigned int nip;
97 unsigned int msr;
98 unsigned int orig_gpr3; /* Used for restarting system calls */
99 unsigned int ctr;
100 unsigned int link;
101 unsigned int xer;
102 unsigned int ccr;
103 unsigned int mq; /* 601 only (not used at present) */
104 unsigned int trap; /* Reason for being here */
105 unsigned int dar; /* Fault registers */
106 unsigned int dsisr;
107 unsigned int result; /* Result of a system call */
108};
109
110struct sigcontext32 {
111 unsigned int _unused[4];
112 int signal;
113 compat_uptr_t handler;
114 unsigned int oldmask;
115 compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
116};
117
118struct mcontext32 {
119 elf_gregset_t32 mc_gregs;
120 elf_fpregset_t mc_fregs;
121 unsigned int mc_pad[2];
122 elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
123};
124
125struct ucontext32 {
126 unsigned int uc_flags;
127 unsigned int uc_link;
128 stack_32_t uc_stack;
129 int uc_pad[7];
130 compat_uptr_t uc_regs; /* points to uc_mcontext field */
131 compat_sigset_t uc_sigmask; /* mask last for extensibility */
132 /* glibc has 1024-bit signal masks, ours are 64-bit */
133 int uc_maskext[30];
134 int uc_pad2[3];
135 struct mcontext32 uc_mcontext;
136};
137
138#endif /* _PPC64_PPC32_H */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 8bc540337ba0..47d6f7e2ea9f 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -81,15 +81,6 @@ EXPORT_SYMBOL(_prep_type);
81EXPORT_SYMBOL(ucSystemType); 81EXPORT_SYMBOL(ucSystemType);
82#endif 82#endif
83 83
84#if !defined(__INLINE_BITOPS)
85EXPORT_SYMBOL(set_bit);
86EXPORT_SYMBOL(clear_bit);
87EXPORT_SYMBOL(change_bit);
88EXPORT_SYMBOL(test_and_set_bit);
89EXPORT_SYMBOL(test_and_clear_bit);
90EXPORT_SYMBOL(test_and_change_bit);
91#endif /* __INLINE_BITOPS */
92
93EXPORT_SYMBOL(strcpy); 84EXPORT_SYMBOL(strcpy);
94EXPORT_SYMBOL(strncpy); 85EXPORT_SYMBOL(strncpy);
95EXPORT_SYMBOL(strcat); 86EXPORT_SYMBOL(strcat);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8f85dabe4df3..7f64f0464d44 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -48,8 +48,8 @@
48#include <asm/prom.h> 48#include <asm/prom.h>
49#ifdef CONFIG_PPC64 49#ifdef CONFIG_PPC64
50#include <asm/firmware.h> 50#include <asm/firmware.h>
51#include <asm/plpar_wrappers.h>
52#include <asm/time.h> 51#include <asm/time.h>
52#include <asm/machdep.h>
53#endif 53#endif
54 54
55extern unsigned long _get_SP(void); 55extern unsigned long _get_SP(void);
@@ -201,27 +201,15 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
201} 201}
202#endif /* CONFIG_SPE */ 202#endif /* CONFIG_SPE */
203 203
204static void set_dabr_spr(unsigned long val)
205{
206 mtspr(SPRN_DABR, val);
207}
208
209int set_dabr(unsigned long dabr) 204int set_dabr(unsigned long dabr)
210{ 205{
211 int ret = 0;
212
213#ifdef CONFIG_PPC64 206#ifdef CONFIG_PPC64
214 if (firmware_has_feature(FW_FEATURE_XDABR)) { 207 if (ppc_md.set_dabr)
215 /* We want to catch accesses from kernel and userspace */ 208 return ppc_md.set_dabr(dabr);
216 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
217 ret = plpar_set_xdabr(dabr, flags);
218 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
219 ret = plpar_set_dabr(dabr);
220 } else
221#endif 209#endif
222 set_dabr_spr(dabr);
223 210
224 return ret; 211 mtspr(SPRN_DABR, dabr);
212 return 0;
225} 213}
226 214
227#ifdef CONFIG_PPC64 215#ifdef CONFIG_PPC64
@@ -566,12 +554,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
566#ifdef CONFIG_PPC64 554#ifdef CONFIG_PPC64
567 if (cpu_has_feature(CPU_FTR_SLB)) { 555 if (cpu_has_feature(CPU_FTR_SLB)) {
568 unsigned long sp_vsid = get_kernel_vsid(sp); 556 unsigned long sp_vsid = get_kernel_vsid(sp);
557 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
569 558
570 sp_vsid <<= SLB_VSID_SHIFT; 559 sp_vsid <<= SLB_VSID_SHIFT;
571 sp_vsid |= SLB_VSID_KERNEL; 560 sp_vsid |= SLB_VSID_KERNEL | llp;
572 if (cpu_has_feature(CPU_FTR_16M_PAGE))
573 sp_vsid |= SLB_VSID_L;
574
575 p->thread.ksp_vsid = sp_vsid; 561 p->thread.ksp_vsid = sp_vsid;
576 } 562 }
577 563
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 2eccd0e159e3..3675ef4bac90 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -724,10 +724,10 @@ static inline char *find_flat_dt_string(u32 offset)
724 * used to extract the memory informations at boot before we can 724 * used to extract the memory informations at boot before we can
725 * unflatten the tree 725 * unflatten the tree
726 */ 726 */
727static int __init scan_flat_dt(int (*it)(unsigned long node, 727int __init of_scan_flat_dt(int (*it)(unsigned long node,
728 const char *uname, int depth, 728 const char *uname, int depth,
729 void *data), 729 void *data),
730 void *data) 730 void *data)
731{ 731{
732 unsigned long p = ((unsigned long)initial_boot_params) + 732 unsigned long p = ((unsigned long)initial_boot_params) +
733 initial_boot_params->off_dt_struct; 733 initial_boot_params->off_dt_struct;
@@ -784,8 +784,8 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
784 * This function can be used within scan_flattened_dt callback to get 784 * This function can be used within scan_flattened_dt callback to get
785 * access to properties 785 * access to properties
786 */ 786 */
787static void* __init get_flat_dt_prop(unsigned long node, const char *name, 787void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
788 unsigned long *size) 788 unsigned long *size)
789{ 789{
790 unsigned long p = node; 790 unsigned long p = node;
791 791
@@ -1087,7 +1087,7 @@ void __init unflatten_device_tree(void)
1087static int __init early_init_dt_scan_cpus(unsigned long node, 1087static int __init early_init_dt_scan_cpus(unsigned long node,
1088 const char *uname, int depth, void *data) 1088 const char *uname, int depth, void *data)
1089{ 1089{
1090 char *type = get_flat_dt_prop(node, "device_type", NULL); 1090 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1091 u32 *prop; 1091 u32 *prop;
1092 unsigned long size = 0; 1092 unsigned long size = 0;
1093 1093
@@ -1095,19 +1095,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
1095 if (type == NULL || strcmp(type, "cpu") != 0) 1095 if (type == NULL || strcmp(type, "cpu") != 0)
1096 return 0; 1096 return 0;
1097 1097
1098#ifdef CONFIG_PPC_PSERIES
1099 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1100 */
1101 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1102 u32 *pft_size;
1103 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1104 if (pft_size != NULL) {
1105 /* pft_size[0] is the NUMA CEC cookie */
1106 ppc64_pft_size = pft_size[1];
1107 }
1108 }
1109#endif
1110
1111 boot_cpuid = 0; 1098 boot_cpuid = 0;
1112 boot_cpuid_phys = 0; 1099 boot_cpuid_phys = 0;
1113 if (initial_boot_params && initial_boot_params->version >= 2) { 1100 if (initial_boot_params && initial_boot_params->version >= 2) {
@@ -1117,8 +1104,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
1117 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys; 1104 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1118 } else { 1105 } else {
1119 /* Check if it's the boot-cpu, set it's hw index now */ 1106 /* Check if it's the boot-cpu, set it's hw index now */
1120 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { 1107 if (of_get_flat_dt_prop(node,
1121 prop = get_flat_dt_prop(node, "reg", NULL); 1108 "linux,boot-cpu", NULL) != NULL) {
1109 prop = of_get_flat_dt_prop(node, "reg", NULL);
1122 if (prop != NULL) 1110 if (prop != NULL)
1123 boot_cpuid_phys = *prop; 1111 boot_cpuid_phys = *prop;
1124 } 1112 }
@@ -1127,14 +1115,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
1127 1115
1128#ifdef CONFIG_ALTIVEC 1116#ifdef CONFIG_ALTIVEC
1129 /* Check if we have a VMX and eventually update CPU features */ 1117 /* Check if we have a VMX and eventually update CPU features */
1130 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size); 1118 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
1131 if (prop && (*prop) > 0) { 1119 if (prop && (*prop) > 0) {
1132 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1120 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1133 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1121 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1134 } 1122 }
1135 1123
1136 /* Same goes for Apple's "altivec" property */ 1124 /* Same goes for Apple's "altivec" property */
1137 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL); 1125 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1138 if (prop) { 1126 if (prop) {
1139 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1127 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1140 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1128 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@@ -1147,7 +1135,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
1147 * this by looking at the size of the ibm,ppc-interrupt-server#s 1135 * this by looking at the size of the ibm,ppc-interrupt-server#s
1148 * property 1136 * property
1149 */ 1137 */
1150 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", 1138 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1151 &size); 1139 &size);
1152 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 1140 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1153 if (prop && ((size / sizeof(u32)) > 1)) 1141 if (prop && ((size / sizeof(u32)) > 1))
@@ -1170,7 +1158,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1170 return 0; 1158 return 0;
1171 1159
1172 /* get platform type */ 1160 /* get platform type */
1173 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL); 1161 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
1174 if (prop == NULL) 1162 if (prop == NULL)
1175 return 0; 1163 return 0;
1176#ifdef CONFIG_PPC64 1164#ifdef CONFIG_PPC64
@@ -1183,21 +1171,21 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1183 1171
1184#ifdef CONFIG_PPC64 1172#ifdef CONFIG_PPC64
1185 /* check if iommu is forced on or off */ 1173 /* check if iommu is forced on or off */
1186 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 1174 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1187 iommu_is_off = 1; 1175 iommu_is_off = 1;
1188 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 1176 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1189 iommu_force_on = 1; 1177 iommu_force_on = 1;
1190#endif 1178#endif
1191 1179
1192 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL); 1180 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1193 if (lprop) 1181 if (lprop)
1194 memory_limit = *lprop; 1182 memory_limit = *lprop;
1195 1183
1196#ifdef CONFIG_PPC64 1184#ifdef CONFIG_PPC64
1197 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 1185 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1198 if (lprop) 1186 if (lprop)
1199 tce_alloc_start = *lprop; 1187 tce_alloc_start = *lprop;
1200 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 1188 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1201 if (lprop) 1189 if (lprop)
1202 tce_alloc_end = *lprop; 1190 tce_alloc_end = *lprop;
1203#endif 1191#endif
@@ -1209,9 +1197,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1209 { 1197 {
1210 u64 *basep, *entryp; 1198 u64 *basep, *entryp;
1211 1199
1212 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL); 1200 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1213 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL); 1201 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1214 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL); 1202 prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
1215 if (basep && entryp && prop) { 1203 if (basep && entryp && prop) {
1216 rtas.base = *basep; 1204 rtas.base = *basep;
1217 rtas.entry = *entryp; 1205 rtas.entry = *entryp;
@@ -1232,11 +1220,11 @@ static int __init early_init_dt_scan_root(unsigned long node,
1232 if (depth != 0) 1220 if (depth != 0)
1233 return 0; 1221 return 0;
1234 1222
1235 prop = get_flat_dt_prop(node, "#size-cells", NULL); 1223 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1236 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1224 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1237 DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 1225 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1238 1226
1239 prop = get_flat_dt_prop(node, "#address-cells", NULL); 1227 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1240 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1228 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1241 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 1229 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1242 1230
@@ -1271,7 +1259,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1271static int __init early_init_dt_scan_memory(unsigned long node, 1259static int __init early_init_dt_scan_memory(unsigned long node,
1272 const char *uname, int depth, void *data) 1260 const char *uname, int depth, void *data)
1273{ 1261{
1274 char *type = get_flat_dt_prop(node, "device_type", NULL); 1262 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1275 cell_t *reg, *endp; 1263 cell_t *reg, *endp;
1276 unsigned long l; 1264 unsigned long l;
1277 1265
@@ -1279,13 +1267,13 @@ static int __init early_init_dt_scan_memory(unsigned long node,
1279 if (type == NULL || strcmp(type, "memory") != 0) 1267 if (type == NULL || strcmp(type, "memory") != 0)
1280 return 0; 1268 return 0;
1281 1269
1282 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l); 1270 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1283 if (reg == NULL) 1271 if (reg == NULL)
1284 return 0; 1272 return 0;
1285 1273
1286 endp = reg + (l / sizeof(cell_t)); 1274 endp = reg + (l / sizeof(cell_t));
1287 1275
1288 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n", 1276 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1289 uname, l, reg[0], reg[1], reg[2], reg[3]); 1277 uname, l, reg[0], reg[1], reg[2], reg[3]);
1290 1278
1291 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1279 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
@@ -1343,12 +1331,12 @@ void __init early_init_devtree(void *params)
1343 * device-tree, including the platform type, initrd location and 1331 * device-tree, including the platform type, initrd location and
1344 * size, TCE reserve, and more ... 1332 * size, TCE reserve, and more ...
1345 */ 1333 */
1346 scan_flat_dt(early_init_dt_scan_chosen, NULL); 1334 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1347 1335
1348 /* Scan memory nodes and rebuild LMBs */ 1336 /* Scan memory nodes and rebuild LMBs */
1349 lmb_init(); 1337 lmb_init();
1350 scan_flat_dt(early_init_dt_scan_root, NULL); 1338 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1351 scan_flat_dt(early_init_dt_scan_memory, NULL); 1339 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1352 lmb_enforce_memory_limit(memory_limit); 1340 lmb_enforce_memory_limit(memory_limit);
1353 lmb_analyze(); 1341 lmb_analyze();
1354#ifdef CONFIG_PPC64 1342#ifdef CONFIG_PPC64
@@ -1363,10 +1351,10 @@ void __init early_init_devtree(void *params)
1363 1351
1364 DBG("Scanning CPUs ...\n"); 1352 DBG("Scanning CPUs ...\n");
1365 1353
1366 /* Retreive hash table size from flattened tree plus other 1354 /* Retreive CPU related informations from the flat tree
1367 * CPU related informations (altivec support, boot CPU ID, ...) 1355 * (altivec support, boot CPU ID, ...)
1368 */ 1356 */
1369 scan_flat_dt(early_init_dt_scan_cpus, NULL); 1357 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1370 1358
1371 DBG(" <- early_init_devtree()\n"); 1359 DBG(" <- early_init_devtree()\n");
1372} 1360}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 9750b3cd8ecd..c758b6624d7b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2000,7 +2000,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2000#endif 2000#endif
2001 2001
2002 /* 2002 /*
2003 * On pSeries and BPA, copy the CPU hold code 2003 * Copy the CPU hold code
2004 */ 2004 */
2005 if (RELOC(of_platform) != PLATFORM_POWERMAC) 2005 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0); 2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 568ea335d616..3d2abd95c7ae 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -248,46 +248,10 @@ void ptrace_disable(struct task_struct *child)
248 clear_single_step(child); 248 clear_single_step(child);
249} 249}
250 250
251long sys_ptrace(long request, long pid, long addr, long data) 251long arch_ptrace(struct task_struct *child, long request, long addr, long data)
252{ 252{
253 struct task_struct *child;
254 int ret = -EPERM; 253 int ret = -EPERM;
255 254
256 lock_kernel();
257 if (request == PTRACE_TRACEME) {
258 /* are we already being traced? */
259 if (current->ptrace & PT_PTRACED)
260 goto out;
261 ret = security_ptrace(current->parent, current);
262 if (ret)
263 goto out;
264 /* set the ptrace bit in the process flags. */
265 current->ptrace |= PT_PTRACED;
266 ret = 0;
267 goto out;
268 }
269 ret = -ESRCH;
270 read_lock(&tasklist_lock);
271 child = find_task_by_pid(pid);
272 if (child)
273 get_task_struct(child);
274 read_unlock(&tasklist_lock);
275 if (!child)
276 goto out;
277
278 ret = -EPERM;
279 if (pid == 1) /* you may not mess with init */
280 goto out_tsk;
281
282 if (request == PTRACE_ATTACH) {
283 ret = ptrace_attach(child);
284 goto out_tsk;
285 }
286
287 ret = ptrace_check_attach(child, request == PTRACE_KILL);
288 if (ret < 0)
289 goto out_tsk;
290
291 switch (request) { 255 switch (request) {
292 /* when I and D space are separate, these will need to be fixed. */ 256 /* when I and D space are separate, these will need to be fixed. */
293 case PTRACE_PEEKTEXT: /* read word at location addr. */ 257 case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -540,10 +504,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
540 ret = ptrace_request(child, request, addr, data); 504 ret = ptrace_request(child, request, addr, data);
541 break; 505 break;
542 } 506 }
543out_tsk: 507
544 put_task_struct(child);
545out:
546 unlock_kernel();
547 return ret; 508 return ret;
548} 509}
549 510
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
new file mode 100644
index 000000000000..5bdd5b079d96
--- /dev/null
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -0,0 +1,808 @@
1/*
2 * arch/ppc64/kernel/rtas-proc.c
3 * Copyright (C) 2000 Tilmann Bitterberg
4 * (tilmann@bitterberg.de)
5 *
6 * RTAS (Runtime Abstraction Services) stuff
7 * Intention is to provide a clean user interface
8 * to use the RTAS.
9 *
10 * TODO:
11 * Split off a header file and maybe move it to a different
12 * location. Write Documentation on what the /proc/rtas/ entries
13 * actually do.
14 */
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/proc_fs.h>
19#include <linux/stat.h>
20#include <linux/ctype.h>
21#include <linux/time.h>
22#include <linux/string.h>
23#include <linux/init.h>
24#include <linux/seq_file.h>
25#include <linux/bitops.h>
26#include <linux/rtc.h>
27
28#include <asm/uaccess.h>
29#include <asm/processor.h>
30#include <asm/io.h>
31#include <asm/prom.h>
32#include <asm/rtas.h>
33#include <asm/machdep.h> /* for ppc_md */
34#include <asm/time.h>
35#include <asm/systemcfg.h>
36
37/* Token for Sensors */
38#define KEY_SWITCH 0x0001
39#define ENCLOSURE_SWITCH 0x0002
40#define THERMAL_SENSOR 0x0003
41#define LID_STATUS 0x0004
42#define POWER_SOURCE 0x0005
43#define BATTERY_VOLTAGE 0x0006
44#define BATTERY_REMAINING 0x0007
45#define BATTERY_PERCENTAGE 0x0008
46#define EPOW_SENSOR 0x0009
47#define BATTERY_CYCLESTATE 0x000a
48#define BATTERY_CHARGING 0x000b
49
50/* IBM specific sensors */
51#define IBM_SURVEILLANCE 0x2328 /* 9000 */
52#define IBM_FANRPM 0x2329 /* 9001 */
53#define IBM_VOLTAGE 0x232a /* 9002 */
54#define IBM_DRCONNECTOR 0x232b /* 9003 */
55#define IBM_POWERSUPPLY 0x232c /* 9004 */
56
57/* Status return values */
58#define SENSOR_CRITICAL_HIGH 13
59#define SENSOR_WARNING_HIGH 12
60#define SENSOR_NORMAL 11
61#define SENSOR_WARNING_LOW 10
62#define SENSOR_CRITICAL_LOW 9
63#define SENSOR_SUCCESS 0
64#define SENSOR_HW_ERROR -1
65#define SENSOR_BUSY -2
66#define SENSOR_NOT_EXIST -3
67#define SENSOR_DR_ENTITY -9000
68
69/* Location Codes */
70#define LOC_SCSI_DEV_ADDR 'A'
71#define LOC_SCSI_DEV_LOC 'B'
72#define LOC_CPU 'C'
73#define LOC_DISKETTE 'D'
74#define LOC_ETHERNET 'E'
75#define LOC_FAN 'F'
76#define LOC_GRAPHICS 'G'
77/* reserved / not used 'H' */
78#define LOC_IO_ADAPTER 'I'
79/* reserved / not used 'J' */
80#define LOC_KEYBOARD 'K'
81#define LOC_LCD 'L'
82#define LOC_MEMORY 'M'
83#define LOC_NV_MEMORY 'N'
84#define LOC_MOUSE 'O'
85#define LOC_PLANAR 'P'
86#define LOC_OTHER_IO 'Q'
87#define LOC_PARALLEL 'R'
88#define LOC_SERIAL 'S'
89#define LOC_DEAD_RING 'T'
90#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
91#define LOC_VOLTAGE 'V'
92#define LOC_SWITCH_ADAPTER 'W'
93#define LOC_OTHER 'X'
94#define LOC_FIRMWARE 'Y'
95#define LOC_SCSI 'Z'
96
97/* Tokens for indicators */
98#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
99#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
100#define SYSTEM_POWER_STATE 0x0003
101#define WARNING_LIGHT 0x0004
102#define DISK_ACTIVITY_LIGHT 0x0005
103#define HEX_DISPLAY_UNIT 0x0006
104#define BATTERY_WARNING_TIME 0x0007
105#define CONDITION_CYCLE_REQUEST 0x0008
106#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
107#define DR_ACTION 0x2329 /* 9001 */
108#define DR_INDICATOR 0x232a /* 9002 */
109/* 9003 - 9004: Vendor specific */
110/* 9006 - 9999: Vendor specific */
111
112/* other */
113#define MAX_SENSORS 17 /* I only know of 17 sensors */
114#define MAX_LINELENGTH 256
115#define SENSOR_PREFIX "ibm,sensor-"
116#define cel_to_fahr(x) ((x*9/5)+32)
117
118
119/* Globals */
120static struct rtas_sensors sensors;
121static struct device_node *rtas_node = NULL;
122static unsigned long power_on_time = 0; /* Save the time the user set */
123static char progress_led[MAX_LINELENGTH];
124
125static unsigned long rtas_tone_frequency = 1000;
126static unsigned long rtas_tone_volume = 0;
127
128/* ****************STRUCTS******************************************* */
129struct individual_sensor {
130 unsigned int token;
131 unsigned int quant;
132};
133
134struct rtas_sensors {
135 struct individual_sensor sensor[MAX_SENSORS];
136 unsigned int quant;
137};
138
139/* ****************************************************************** */
140/* Declarations */
141static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
142static int ppc_rtas_clock_show(struct seq_file *m, void *v);
143static ssize_t ppc_rtas_clock_write(struct file *file,
144 const char __user *buf, size_t count, loff_t *ppos);
145static int ppc_rtas_progress_show(struct seq_file *m, void *v);
146static ssize_t ppc_rtas_progress_write(struct file *file,
147 const char __user *buf, size_t count, loff_t *ppos);
148static int ppc_rtas_poweron_show(struct seq_file *m, void *v);
149static ssize_t ppc_rtas_poweron_write(struct file *file,
150 const char __user *buf, size_t count, loff_t *ppos);
151
152static ssize_t ppc_rtas_tone_freq_write(struct file *file,
153 const char __user *buf, size_t count, loff_t *ppos);
154static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v);
155static ssize_t ppc_rtas_tone_volume_write(struct file *file,
156 const char __user *buf, size_t count, loff_t *ppos);
157static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v);
158static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v);
159
160static int sensors_open(struct inode *inode, struct file *file)
161{
162 return single_open(file, ppc_rtas_sensors_show, NULL);
163}
164
165struct file_operations ppc_rtas_sensors_operations = {
166 .open = sensors_open,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
172static int poweron_open(struct inode *inode, struct file *file)
173{
174 return single_open(file, ppc_rtas_poweron_show, NULL);
175}
176
177struct file_operations ppc_rtas_poweron_operations = {
178 .open = poweron_open,
179 .read = seq_read,
180 .llseek = seq_lseek,
181 .write = ppc_rtas_poweron_write,
182 .release = single_release,
183};
184
185static int progress_open(struct inode *inode, struct file *file)
186{
187 return single_open(file, ppc_rtas_progress_show, NULL);
188}
189
190struct file_operations ppc_rtas_progress_operations = {
191 .open = progress_open,
192 .read = seq_read,
193 .llseek = seq_lseek,
194 .write = ppc_rtas_progress_write,
195 .release = single_release,
196};
197
198static int clock_open(struct inode *inode, struct file *file)
199{
200 return single_open(file, ppc_rtas_clock_show, NULL);
201}
202
203struct file_operations ppc_rtas_clock_operations = {
204 .open = clock_open,
205 .read = seq_read,
206 .llseek = seq_lseek,
207 .write = ppc_rtas_clock_write,
208 .release = single_release,
209};
210
211static int tone_freq_open(struct inode *inode, struct file *file)
212{
213 return single_open(file, ppc_rtas_tone_freq_show, NULL);
214}
215
216struct file_operations ppc_rtas_tone_freq_operations = {
217 .open = tone_freq_open,
218 .read = seq_read,
219 .llseek = seq_lseek,
220 .write = ppc_rtas_tone_freq_write,
221 .release = single_release,
222};
223
224static int tone_volume_open(struct inode *inode, struct file *file)
225{
226 return single_open(file, ppc_rtas_tone_volume_show, NULL);
227}
228
229struct file_operations ppc_rtas_tone_volume_operations = {
230 .open = tone_volume_open,
231 .read = seq_read,
232 .llseek = seq_lseek,
233 .write = ppc_rtas_tone_volume_write,
234 .release = single_release,
235};
236
237static int rmo_buf_open(struct inode *inode, struct file *file)
238{
239 return single_open(file, ppc_rtas_rmo_buf_show, NULL);
240}
241
242struct file_operations ppc_rtas_rmo_buf_ops = {
243 .open = rmo_buf_open,
244 .read = seq_read,
245 .llseek = seq_lseek,
246 .release = single_release,
247};
248
249static int ppc_rtas_find_all_sensors(void);
250static void ppc_rtas_process_sensor(struct seq_file *m,
251 struct individual_sensor *s, int state, int error, char *loc);
252static char *ppc_rtas_process_error(int error);
253static void get_location_code(struct seq_file *m,
254 struct individual_sensor *s, char *loc);
255static void check_location_string(struct seq_file *m, char *c);
256static void check_location(struct seq_file *m, char *c);
257
258static int __init proc_rtas_init(void)
259{
260 struct proc_dir_entry *entry;
261
262 if (!(systemcfg->platform & PLATFORM_PSERIES))
263 return 1;
264
265 rtas_node = of_find_node_by_name(NULL, "rtas");
266 if (rtas_node == NULL)
267 return 1;
268
269 entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
270 if (entry)
271 entry->proc_fops = &ppc_rtas_progress_operations;
272
273 entry = create_proc_entry("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL);
274 if (entry)
275 entry->proc_fops = &ppc_rtas_clock_operations;
276
277 entry = create_proc_entry("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL);
278 if (entry)
279 entry->proc_fops = &ppc_rtas_poweron_operations;
280
281 entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL);
282 if (entry)
283 entry->proc_fops = &ppc_rtas_sensors_operations;
284
285 entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO,
286 NULL);
287 if (entry)
288 entry->proc_fops = &ppc_rtas_tone_freq_operations;
289
290 entry = create_proc_entry("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL);
291 if (entry)
292 entry->proc_fops = &ppc_rtas_tone_volume_operations;
293
294 entry = create_proc_entry("ppc64/rtas/rmo_buffer", S_IRUSR, NULL);
295 if (entry)
296 entry->proc_fops = &ppc_rtas_rmo_buf_ops;
297
298 return 0;
299}
300
301__initcall(proc_rtas_init);
302
303static int parse_number(const char __user *p, size_t count, unsigned long *val)
304{
305 char buf[40];
306 char *end;
307
308 if (count > 39)
309 return -EINVAL;
310
311 if (copy_from_user(buf, p, count))
312 return -EFAULT;
313
314 buf[count] = 0;
315
316 *val = simple_strtoul(buf, &end, 10);
317 if (*end && *end != '\n')
318 return -EINVAL;
319
320 return 0;
321}
322
323/* ****************************************************************** */
324/* POWER-ON-TIME */
325/* ****************************************************************** */
326static ssize_t ppc_rtas_poweron_write(struct file *file,
327 const char __user *buf, size_t count, loff_t *ppos)
328{
329 struct rtc_time tm;
330 unsigned long nowtime;
331 int error = parse_number(buf, count, &nowtime);
332 if (error)
333 return error;
334
335 power_on_time = nowtime; /* save the time */
336
337 to_tm(nowtime, &tm);
338
339 error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL,
340 tm.tm_year, tm.tm_mon, tm.tm_mday,
341 tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
342 if (error)
343 printk(KERN_WARNING "error: setting poweron time returned: %s\n",
344 ppc_rtas_process_error(error));
345 return count;
346}
347/* ****************************************************************** */
348static int ppc_rtas_poweron_show(struct seq_file *m, void *v)
349{
350 if (power_on_time == 0)
351 seq_printf(m, "Power on time not set\n");
352 else
353 seq_printf(m, "%lu\n",power_on_time);
354 return 0;
355}
356
357/* ****************************************************************** */
358/* PROGRESS */
359/* ****************************************************************** */
360static ssize_t ppc_rtas_progress_write(struct file *file,
361 const char __user *buf, size_t count, loff_t *ppos)
362{
363 unsigned long hex;
364
365 if (count >= MAX_LINELENGTH)
366 count = MAX_LINELENGTH -1;
367 if (copy_from_user(progress_led, buf, count)) { /* save the string */
368 return -EFAULT;
369 }
370 progress_led[count] = 0;
371
372 /* Lets see if the user passed hexdigits */
373 hex = simple_strtoul(progress_led, NULL, 10);
374
375 rtas_progress ((char *)progress_led, hex);
376 return count;
377
378 /* clear the line */
379 /* rtas_progress(" ", 0xffff);*/
380}
381/* ****************************************************************** */
382static int ppc_rtas_progress_show(struct seq_file *m, void *v)
383{
384 if (progress_led)
385 seq_printf(m, "%s\n", progress_led);
386 return 0;
387}
388
389/* ****************************************************************** */
390/* CLOCK */
391/* ****************************************************************** */
392static ssize_t ppc_rtas_clock_write(struct file *file,
393 const char __user *buf, size_t count, loff_t *ppos)
394{
395 struct rtc_time tm;
396 unsigned long nowtime;
397 int error = parse_number(buf, count, &nowtime);
398 if (error)
399 return error;
400
401 to_tm(nowtime, &tm);
402 error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
403 tm.tm_year, tm.tm_mon, tm.tm_mday,
404 tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
405 if (error)
406 printk(KERN_WARNING "error: setting the clock returned: %s\n",
407 ppc_rtas_process_error(error));
408 return count;
409}
410/* ****************************************************************** */
411static int ppc_rtas_clock_show(struct seq_file *m, void *v)
412{
413 int ret[8];
414 int error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
415
416 if (error) {
417 printk(KERN_WARNING "error: reading the clock returned: %s\n",
418 ppc_rtas_process_error(error));
419 seq_printf(m, "0");
420 } else {
421 unsigned int year, mon, day, hour, min, sec;
422 year = ret[0]; mon = ret[1]; day = ret[2];
423 hour = ret[3]; min = ret[4]; sec = ret[5];
424 seq_printf(m, "%lu\n",
425 mktime(year, mon, day, hour, min, sec));
426 }
427 return 0;
428}
429
430/* ****************************************************************** */
431/* SENSOR STUFF */
432/* ****************************************************************** */
433static int ppc_rtas_sensors_show(struct seq_file *m, void *v)
434{
435 int i,j;
436 int state, error;
437 int get_sensor_state = rtas_token("get-sensor-state");
438
439 seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n");
440 seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n");
441 seq_printf(m, "********************************************************\n");
442
443 if (ppc_rtas_find_all_sensors() != 0) {
444 seq_printf(m, "\nNo sensors are available\n");
445 return 0;
446 }
447
448 for (i=0; i<sensors.quant; i++) {
449 struct individual_sensor *p = &sensors.sensor[i];
450 char rstr[64];
451 char *loc;
452 int llen, offs;
453
454 sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
455 loc = (char *) get_property(rtas_node, rstr, &llen);
456
457 /* A sensor may have multiple instances */
458 for (j = 0, offs = 0; j <= p->quant; j++) {
459 error = rtas_call(get_sensor_state, 2, 2, &state,
460 p->token, j);
461
462 ppc_rtas_process_sensor(m, p, state, error, loc);
463 seq_putc(m, '\n');
464 if (loc) {
465 offs += strlen(loc) + 1;
466 loc += strlen(loc) + 1;
467 if (offs >= llen)
468 loc = NULL;
469 }
470 }
471 }
472 return 0;
473}
474
475/* ****************************************************************** */
476
477static int ppc_rtas_find_all_sensors(void)
478{
479 unsigned int *utmp;
480 int len, i;
481
482 utmp = (unsigned int *) get_property(rtas_node, "rtas-sensors", &len);
483 if (utmp == NULL) {
484 printk (KERN_ERR "error: could not get rtas-sensors\n");
485 return 1;
486 }
487
488 sensors.quant = len / 8; /* int + int */
489
490 for (i=0; i<sensors.quant; i++) {
491 sensors.sensor[i].token = *utmp++;
492 sensors.sensor[i].quant = *utmp++;
493 }
494 return 0;
495}
496
497/* ****************************************************************** */
498/*
499 * Builds a string of what rtas returned
500 */
501static char *ppc_rtas_process_error(int error)
502{
503 switch (error) {
504 case SENSOR_CRITICAL_HIGH:
505 return "(critical high)";
506 case SENSOR_WARNING_HIGH:
507 return "(warning high)";
508 case SENSOR_NORMAL:
509 return "(normal)";
510 case SENSOR_WARNING_LOW:
511 return "(warning low)";
512 case SENSOR_CRITICAL_LOW:
513 return "(critical low)";
514 case SENSOR_SUCCESS:
515 return "(read ok)";
516 case SENSOR_HW_ERROR:
517 return "(hardware error)";
518 case SENSOR_BUSY:
519 return "(busy)";
520 case SENSOR_NOT_EXIST:
521 return "(non existent)";
522 case SENSOR_DR_ENTITY:
523 return "(dr entity removed)";
524 default:
525 return "(UNKNOWN)";
526 }
527}
528
529/* ****************************************************************** */
530/*
531 * Builds a string out of what the sensor said
532 */
533
534static void ppc_rtas_process_sensor(struct seq_file *m,
535 struct individual_sensor *s, int state, int error, char *loc)
536{
537 /* Defined return vales */
538 const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t",
539 "Maintenance" };
540 const char * enclosure_switch[] = { "Closed", "Open" };
541 const char * lid_status[] = { " ", "Open", "Closed" };
542 const char * power_source[] = { "AC\t", "Battery",
543 "AC & Battery" };
544 const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
545 const char * epow_sensor[] = {
546 "EPOW Reset", "Cooling warning", "Power warning",
547 "System shutdown", "System halt", "EPOW main enclosure",
548 "EPOW power off" };
549 const char * battery_cyclestate[] = { "None", "In progress",
550 "Requested" };
551 const char * battery_charging[] = { "Charging", "Discharching",
552 "No current flow" };
553 const char * ibm_drconnector[] = { "Empty", "Present", "Unusable",
554 "Exchange" };
555
556 int have_strings = 0;
557 int num_states = 0;
558 int temperature = 0;
559 int unknown = 0;
560
561 /* What kind of sensor do we have here? */
562
563 switch (s->token) {
564 case KEY_SWITCH:
565 seq_printf(m, "Key switch:\t");
566 num_states = sizeof(key_switch) / sizeof(char *);
567 if (state < num_states) {
568 seq_printf(m, "%s\t", key_switch[state]);
569 have_strings = 1;
570 }
571 break;
572 case ENCLOSURE_SWITCH:
573 seq_printf(m, "Enclosure switch:\t");
574 num_states = sizeof(enclosure_switch) / sizeof(char *);
575 if (state < num_states) {
576 seq_printf(m, "%s\t",
577 enclosure_switch[state]);
578 have_strings = 1;
579 }
580 break;
581 case THERMAL_SENSOR:
582 seq_printf(m, "Temp. (C/F):\t");
583 temperature = 1;
584 break;
585 case LID_STATUS:
586 seq_printf(m, "Lid status:\t");
587 num_states = sizeof(lid_status) / sizeof(char *);
588 if (state < num_states) {
589 seq_printf(m, "%s\t", lid_status[state]);
590 have_strings = 1;
591 }
592 break;
593 case POWER_SOURCE:
594 seq_printf(m, "Power source:\t");
595 num_states = sizeof(power_source) / sizeof(char *);
596 if (state < num_states) {
597 seq_printf(m, "%s\t",
598 power_source[state]);
599 have_strings = 1;
600 }
601 break;
602 case BATTERY_VOLTAGE:
603 seq_printf(m, "Battery voltage:\t");
604 break;
605 case BATTERY_REMAINING:
606 seq_printf(m, "Battery remaining:\t");
607 num_states = sizeof(battery_remaining) / sizeof(char *);
608 if (state < num_states)
609 {
610 seq_printf(m, "%s\t",
611 battery_remaining[state]);
612 have_strings = 1;
613 }
614 break;
615 case BATTERY_PERCENTAGE:
616 seq_printf(m, "Battery percentage:\t");
617 break;
618 case EPOW_SENSOR:
619 seq_printf(m, "EPOW Sensor:\t");
620 num_states = sizeof(epow_sensor) / sizeof(char *);
621 if (state < num_states) {
622 seq_printf(m, "%s\t", epow_sensor[state]);
623 have_strings = 1;
624 }
625 break;
626 case BATTERY_CYCLESTATE:
627 seq_printf(m, "Battery cyclestate:\t");
628 num_states = sizeof(battery_cyclestate) /
629 sizeof(char *);
630 if (state < num_states) {
631 seq_printf(m, "%s\t",
632 battery_cyclestate[state]);
633 have_strings = 1;
634 }
635 break;
636 case BATTERY_CHARGING:
637 seq_printf(m, "Battery Charging:\t");
638 num_states = sizeof(battery_charging) / sizeof(char *);
639 if (state < num_states) {
640 seq_printf(m, "%s\t",
641 battery_charging[state]);
642 have_strings = 1;
643 }
644 break;
645 case IBM_SURVEILLANCE:
646 seq_printf(m, "Surveillance:\t");
647 break;
648 case IBM_FANRPM:
649 seq_printf(m, "Fan (rpm):\t");
650 break;
651 case IBM_VOLTAGE:
652 seq_printf(m, "Voltage (mv):\t");
653 break;
654 case IBM_DRCONNECTOR:
655 seq_printf(m, "DR connector:\t");
656 num_states = sizeof(ibm_drconnector) / sizeof(char *);
657 if (state < num_states) {
658 seq_printf(m, "%s\t",
659 ibm_drconnector[state]);
660 have_strings = 1;
661 }
662 break;
663 case IBM_POWERSUPPLY:
664 seq_printf(m, "Powersupply:\t");
665 break;
666 default:
667 seq_printf(m, "Unknown sensor (type %d), ignoring it\n",
668 s->token);
669 unknown = 1;
670 have_strings = 1;
671 break;
672 }
673 if (have_strings == 0) {
674 if (temperature) {
675 seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state));
676 } else
677 seq_printf(m, "%10d\t", state);
678 }
679 if (unknown == 0) {
680 seq_printf(m, "%s\t", ppc_rtas_process_error(error));
681 get_location_code(m, s, loc);
682 }
683}
684
685/* ****************************************************************** */
686
687static void check_location(struct seq_file *m, char *c)
688{
689 switch (c[0]) {
690 case LOC_PLANAR:
691 seq_printf(m, "Planar #%c", c[1]);
692 break;
693 case LOC_CPU:
694 seq_printf(m, "CPU #%c", c[1]);
695 break;
696 case LOC_FAN:
697 seq_printf(m, "Fan #%c", c[1]);
698 break;
699 case LOC_RACKMOUNTED:
700 seq_printf(m, "Rack #%c", c[1]);
701 break;
702 case LOC_VOLTAGE:
703 seq_printf(m, "Voltage #%c", c[1]);
704 break;
705 case LOC_LCD:
706 seq_printf(m, "LCD #%c", c[1]);
707 break;
708 case '.':
709 seq_printf(m, "- %c", c[1]);
710 break;
711 default:
712 seq_printf(m, "Unknown location");
713 break;
714 }
715}
716
717
718/* ****************************************************************** */
719/*
720 * Format:
721 * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
722 * the '.' may be an abbrevation
723 */
724static void check_location_string(struct seq_file *m, char *c)
725{
726 while (*c) {
727 if (isalpha(*c) || *c == '.')
728 check_location(m, c);
729 else if (*c == '/' || *c == '-')
730 seq_printf(m, " at ");
731 c++;
732 }
733}
734
735
736/* ****************************************************************** */
737
738static void get_location_code(struct seq_file *m, struct individual_sensor *s, char *loc)
739{
740 if (!loc || !*loc) {
741 seq_printf(m, "---");/* does not have a location */
742 } else {
743 check_location_string(m, loc);
744 }
745 seq_putc(m, ' ');
746}
747/* ****************************************************************** */
748/* INDICATORS - Tone Frequency */
749/* ****************************************************************** */
750static ssize_t ppc_rtas_tone_freq_write(struct file *file,
751 const char __user *buf, size_t count, loff_t *ppos)
752{
753 unsigned long freq;
754 int error = parse_number(buf, count, &freq);
755 if (error)
756 return error;
757
758 rtas_tone_frequency = freq; /* save it for later */
759 error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
760 TONE_FREQUENCY, 0, freq);
761 if (error)
762 printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
763 ppc_rtas_process_error(error));
764 return count;
765}
766/* ****************************************************************** */
767static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v)
768{
769 seq_printf(m, "%lu\n", rtas_tone_frequency);
770 return 0;
771}
772/* ****************************************************************** */
773/* INDICATORS - Tone Volume */
774/* ****************************************************************** */
775static ssize_t ppc_rtas_tone_volume_write(struct file *file,
776 const char __user *buf, size_t count, loff_t *ppos)
777{
778 unsigned long volume;
779 int error = parse_number(buf, count, &volume);
780 if (error)
781 return error;
782
783 if (volume > 100)
784 volume = 100;
785
786 rtas_tone_volume = volume; /* save it for later */
787 error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
788 TONE_VOLUME, 0, volume);
789 if (error)
790 printk(KERN_WARNING "error: setting tone volume returned: %s\n",
791 ppc_rtas_process_error(error));
792 return count;
793}
794/* ****************************************************************** */
795static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
796{
797 seq_printf(m, "%lu\n", rtas_tone_volume);
798 return 0;
799}
800
801#define RMO_READ_BUF_MAX 30
802
803/* RTAS Userspace access */
804static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
805{
806 seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
807 return 0;
808}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 4d22eeeeb91d..b7fc2d884950 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -43,6 +43,13 @@ char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
43unsigned long rtas_rmo_buf; 43unsigned long rtas_rmo_buf;
44 44
45/* 45/*
46 * If non-NULL, this gets called when the kernel terminates.
47 * This is done like this so rtas_flash can be a module.
48 */
49void (*rtas_flash_term_hook)(int);
50EXPORT_SYMBOL(rtas_flash_term_hook);
51
52/*
46 * call_rtas_display_status and call_rtas_display_status_delay 53 * call_rtas_display_status and call_rtas_display_status_delay
47 * are designed only for very early low-level debugging, which 54 * are designed only for very early low-level debugging, which
48 * is why the token is hard-coded to 10. 55 * is why the token is hard-coded to 10.
@@ -206,6 +213,7 @@ void rtas_progress(char *s, unsigned short hex)
206 213
207 spin_unlock(&progress_lock); 214 spin_unlock(&progress_lock);
208} 215}
216EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
209 217
210int rtas_token(const char *service) 218int rtas_token(const char *service)
211{ 219{
@@ -492,6 +500,8 @@ int rtas_set_indicator(int indicator, int index, int new_value)
492 500
493void rtas_restart(char *cmd) 501void rtas_restart(char *cmd)
494{ 502{
503 if (rtas_flash_term_hook)
504 rtas_flash_term_hook(SYS_RESTART);
495 printk("RTAS system-reboot returned %d\n", 505 printk("RTAS system-reboot returned %d\n",
496 rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); 506 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
497 for (;;); 507 for (;;);
@@ -499,6 +509,8 @@ void rtas_restart(char *cmd)
499 509
500void rtas_power_off(void) 510void rtas_power_off(void)
501{ 511{
512 if (rtas_flash_term_hook)
513 rtas_flash_term_hook(SYS_POWER_OFF);
502 /* allow power on only with power button press */ 514 /* allow power on only with power button press */
503 printk("RTAS power-off returned %d\n", 515 printk("RTAS power-off returned %d\n",
504 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); 516 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
@@ -507,7 +519,12 @@ void rtas_power_off(void)
507 519
508void rtas_halt(void) 520void rtas_halt(void)
509{ 521{
510 rtas_power_off(); 522 if (rtas_flash_term_hook)
523 rtas_flash_term_hook(SYS_HALT);
524 /* allow power on only with power button press */
525 printk("RTAS power-off returned %d\n",
526 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
527 for (;;);
511} 528}
512 529
513/* Must be in the RMO region, so we place it here */ 530/* Must be in the RMO region, so we place it here */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
new file mode 100644
index 000000000000..50500093c97f
--- /dev/null
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -0,0 +1,834 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * /proc/ppc64/rtas/firmware_flash interface
10 *
11 * This file implements a firmware_flash interface to pump a firmware
12 * image into the kernel. At reboot time rtas_restart() will see the
13 * firmware image and flash it as it reboots (see rtas.c).
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/proc_fs.h>
19#include <asm/delay.h>
20#include <asm/uaccess.h>
21#include <asm/rtas.h>
22#include <asm/abs_addr.h>
23
24#define MODULE_VERS "1.0"
25#define MODULE_NAME "rtas_flash"
26
27#define FIRMWARE_FLASH_NAME "firmware_flash"
28#define FIRMWARE_UPDATE_NAME "firmware_update"
29#define MANAGE_FLASH_NAME "manage_flash"
30#define VALIDATE_FLASH_NAME "validate_flash"
31
32/* General RTAS Status Codes */
33#define RTAS_RC_SUCCESS 0
34#define RTAS_RC_HW_ERR -1
35#define RTAS_RC_BUSY -2
36
37/* Flash image status values */
38#define FLASH_AUTH -9002 /* RTAS Not Service Authority Partition */
39#define FLASH_NO_OP -1099 /* No operation initiated by user */
40#define FLASH_IMG_SHORT -1005 /* Flash image shorter than expected */
41#define FLASH_IMG_BAD_LEN -1004 /* Bad length value in flash list block */
42#define FLASH_IMG_NULL_DATA -1003 /* Bad data value in flash list block */
43#define FLASH_IMG_READY 0 /* Firmware img ready for flash on reboot */
44
45/* Manage image status values */
46#define MANAGE_AUTH -9002 /* RTAS Not Service Authority Partition */
47#define MANAGE_ACTIVE_ERR -9001 /* RTAS Cannot Overwrite Active Img */
48#define MANAGE_NO_OP -1099 /* No operation initiated by user */
49#define MANAGE_PARAM_ERR -3 /* RTAS Parameter Error */
50#define MANAGE_HW_ERR -1 /* RTAS Hardware Error */
51
52/* Validate image status values */
53#define VALIDATE_AUTH -9002 /* RTAS Not Service Authority Partition */
54#define VALIDATE_NO_OP -1099 /* No operation initiated by the user */
55#define VALIDATE_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */
56#define VALIDATE_READY -1001 /* Firmware image ready for validation */
57#define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */
58#define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */
59#define VALIDATE_TMP_UPDATE 0 /* Validate Return Status */
60#define VALIDATE_FLASH_AUTH 1 /* Validate Return Status */
61#define VALIDATE_INVALID_IMG 2 /* Validate Return Status */
62#define VALIDATE_CUR_UNKNOWN 3 /* Validate Return Status */
63#define VALIDATE_TMP_COMMIT_DL 4 /* Validate Return Status */
64#define VALIDATE_TMP_COMMIT 5 /* Validate Return Status */
65#define VALIDATE_TMP_UPDATE_DL 6 /* Validate Return Status */
66
67/* ibm,manage-flash-image operation tokens */
68#define RTAS_REJECT_TMP_IMG 0
69#define RTAS_COMMIT_TMP_IMG 1
70
71/* Array sizes */
72#define VALIDATE_BUF_SIZE 4096
73#define RTAS_MSG_MAXLEN 64
74
75struct flash_block {
76 char *data;
77 unsigned long length;
78};
79
80/* This struct is very similar but not identical to
81 * that needed by the rtas flash update.
82 * All we need to do for rtas is rewrite num_blocks
83 * into a version/length and translate the pointers
84 * to absolute.
85 */
86#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block))
87struct flash_block_list {
88 unsigned long num_blocks;
89 struct flash_block_list *next;
90 struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
91};
92struct flash_block_list_header { /* just the header of flash_block_list */
93 unsigned long num_blocks;
94 struct flash_block_list *next;
95};
96
97static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
98
99#define FLASH_BLOCK_LIST_VERSION (1UL)
100
101/* Local copy of the flash block list.
102 * We only allow one open of the flash proc file and create this
103 * list as we go. This list will be put in the
104 * rtas_firmware_flash_list var once it is fully read.
105 *
106 * For convenience as we build the list we use virtual addrs,
107 * we do not fill in the version number, and the length field
108 * is treated as the number of entries currently in the block
109 * (i.e. not a byte count). This is all fixed on release.
110 */
111
112/* Status int must be first member of struct */
113struct rtas_update_flash_t
114{
115 int status; /* Flash update status */
116 struct flash_block_list *flist; /* Local copy of flash block list */
117};
118
119/* Status int must be first member of struct */
120struct rtas_manage_flash_t
121{
122 int status; /* Returned status */
123 unsigned int op; /* Reject or commit image */
124};
125
126/* Status int must be first member of struct */
127struct rtas_validate_flash_t
128{
129 int status; /* Returned status */
130 char buf[VALIDATE_BUF_SIZE]; /* Candidate image buffer */
131 unsigned int buf_size; /* Size of image buf */
132 unsigned int update_results; /* Update results token */
133};
134
135static DEFINE_SPINLOCK(flash_file_open_lock);
136static struct proc_dir_entry *firmware_flash_pde;
137static struct proc_dir_entry *firmware_update_pde;
138static struct proc_dir_entry *validate_pde;
139static struct proc_dir_entry *manage_pde;
140
141/* Do simple sanity checks on the flash image. */
142static int flash_list_valid(struct flash_block_list *flist)
143{
144 struct flash_block_list *f;
145 int i;
146 unsigned long block_size, image_size;
147
148 /* Paranoid self test here. We also collect the image size. */
149 image_size = 0;
150 for (f = flist; f; f = f->next) {
151 for (i = 0; i < f->num_blocks; i++) {
152 if (f->blocks[i].data == NULL) {
153 return FLASH_IMG_NULL_DATA;
154 }
155 block_size = f->blocks[i].length;
156 if (block_size <= 0 || block_size > PAGE_SIZE) {
157 return FLASH_IMG_BAD_LEN;
158 }
159 image_size += block_size;
160 }
161 }
162
163 if (image_size < (256 << 10)) {
164 if (image_size < 2)
165 return FLASH_NO_OP;
166 }
167
168 printk(KERN_INFO "FLASH: flash image with %ld bytes stored for hardware flash on reboot\n", image_size);
169
170 return FLASH_IMG_READY;
171}
172
173static void free_flash_list(struct flash_block_list *f)
174{
175 struct flash_block_list *next;
176 int i;
177
178 while (f) {
179 for (i = 0; i < f->num_blocks; i++)
180 free_page((unsigned long)(f->blocks[i].data));
181 next = f->next;
182 free_page((unsigned long)f);
183 f = next;
184 }
185}
186
187static int rtas_flash_release(struct inode *inode, struct file *file)
188{
189 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
190 struct rtas_update_flash_t *uf;
191
192 uf = (struct rtas_update_flash_t *) dp->data;
193 if (uf->flist) {
194 /* File was opened in write mode for a new flash attempt */
195 /* Clear saved list */
196 if (rtas_firmware_flash_list.next) {
197 free_flash_list(rtas_firmware_flash_list.next);
198 rtas_firmware_flash_list.next = NULL;
199 }
200
201 if (uf->status != FLASH_AUTH)
202 uf->status = flash_list_valid(uf->flist);
203
204 if (uf->status == FLASH_IMG_READY)
205 rtas_firmware_flash_list.next = uf->flist;
206 else
207 free_flash_list(uf->flist);
208
209 uf->flist = NULL;
210 }
211
212 atomic_dec(&dp->count);
213 return 0;
214}
215
216static void get_flash_status_msg(int status, char *buf)
217{
218 char *msg;
219
220 switch (status) {
221 case FLASH_AUTH:
222 msg = "error: this partition does not have service authority\n";
223 break;
224 case FLASH_NO_OP:
225 msg = "info: no firmware image for flash\n";
226 break;
227 case FLASH_IMG_SHORT:
228 msg = "error: flash image short\n";
229 break;
230 case FLASH_IMG_BAD_LEN:
231 msg = "error: internal error bad length\n";
232 break;
233 case FLASH_IMG_NULL_DATA:
234 msg = "error: internal error null data\n";
235 break;
236 case FLASH_IMG_READY:
237 msg = "ready: firmware image ready for flash on reboot\n";
238 break;
239 default:
240 sprintf(buf, "error: unexpected status value %d\n", status);
241 return;
242 }
243
244 strcpy(buf, msg);
245}
246
247/* Reading the proc file will show status (not the firmware contents) */
248static ssize_t rtas_flash_read(struct file *file, char __user *buf,
249 size_t count, loff_t *ppos)
250{
251 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
252 struct rtas_update_flash_t *uf;
253 char msg[RTAS_MSG_MAXLEN];
254 int msglen;
255
256 uf = (struct rtas_update_flash_t *) dp->data;
257
258 if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
259 get_flash_status_msg(uf->status, msg);
260 } else { /* FIRMWARE_UPDATE_NAME */
261 sprintf(msg, "%d\n", uf->status);
262 }
263 msglen = strlen(msg);
264 if (msglen > count)
265 msglen = count;
266
267 if (ppos && *ppos != 0)
268 return 0; /* be cheap */
269
270 if (!access_ok(VERIFY_WRITE, buf, msglen))
271 return -EINVAL;
272
273 if (copy_to_user(buf, msg, msglen))
274 return -EFAULT;
275
276 if (ppos)
277 *ppos = msglen;
278 return msglen;
279}
280
281/* We could be much more efficient here. But to keep this function
282 * simple we allocate a page to the block list no matter how small the
283 * count is. If the system is low on memory it will be just as well
284 * that we fail....
285 */
286static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
287 size_t count, loff_t *off)
288{
289 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
290 struct rtas_update_flash_t *uf;
291 char *p;
292 int next_free;
293 struct flash_block_list *fl;
294
295 uf = (struct rtas_update_flash_t *) dp->data;
296
297 if (uf->status == FLASH_AUTH || count == 0)
298 return count; /* discard data */
299
300 /* In the case that the image is not ready for flashing, the memory
301 * allocated for the block list will be freed upon the release of the
302 * proc file
303 */
304 if (uf->flist == NULL) {
305 uf->flist = (struct flash_block_list *) get_zeroed_page(GFP_KERNEL);
306 if (!uf->flist)
307 return -ENOMEM;
308 }
309
310 fl = uf->flist;
311 while (fl->next)
312 fl = fl->next; /* seek to last block_list for append */
313 next_free = fl->num_blocks;
314 if (next_free == FLASH_BLOCKS_PER_NODE) {
315 /* Need to allocate another block_list */
316 fl->next = (struct flash_block_list *)get_zeroed_page(GFP_KERNEL);
317 if (!fl->next)
318 return -ENOMEM;
319 fl = fl->next;
320 next_free = 0;
321 }
322
323 if (count > PAGE_SIZE)
324 count = PAGE_SIZE;
325 p = (char *)get_zeroed_page(GFP_KERNEL);
326 if (!p)
327 return -ENOMEM;
328
329 if(copy_from_user(p, buffer, count)) {
330 free_page((unsigned long)p);
331 return -EFAULT;
332 }
333 fl->blocks[next_free].data = p;
334 fl->blocks[next_free].length = count;
335 fl->num_blocks++;
336
337 return count;
338}
339
340static int rtas_excl_open(struct inode *inode, struct file *file)
341{
342 struct proc_dir_entry *dp = PDE(inode);
343
344 /* Enforce exclusive open with use count of PDE */
345 spin_lock(&flash_file_open_lock);
346 if (atomic_read(&dp->count) > 1) {
347 spin_unlock(&flash_file_open_lock);
348 return -EBUSY;
349 }
350
351 atomic_inc(&dp->count);
352 spin_unlock(&flash_file_open_lock);
353
354 return 0;
355}
356
357static int rtas_excl_release(struct inode *inode, struct file *file)
358{
359 struct proc_dir_entry *dp = PDE(inode);
360
361 atomic_dec(&dp->count);
362
363 return 0;
364}
365
366static void manage_flash(struct rtas_manage_flash_t *args_buf)
367{
368 unsigned int wait_time;
369 s32 rc;
370
371 while (1) {
372 rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
373 1, NULL, args_buf->op);
374 if (rc == RTAS_RC_BUSY)
375 udelay(1);
376 else if (rtas_is_extended_busy(rc)) {
377 wait_time = rtas_extended_busy_delay_time(rc);
378 udelay(wait_time * 1000);
379 } else
380 break;
381 }
382
383 args_buf->status = rc;
384}
385
386static ssize_t manage_flash_read(struct file *file, char __user *buf,
387 size_t count, loff_t *ppos)
388{
389 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
390 struct rtas_manage_flash_t *args_buf;
391 char msg[RTAS_MSG_MAXLEN];
392 int msglen;
393
394 args_buf = (struct rtas_manage_flash_t *) dp->data;
395 if (args_buf == NULL)
396 return 0;
397
398 msglen = sprintf(msg, "%d\n", args_buf->status);
399 if (msglen > count)
400 msglen = count;
401
402 if (ppos && *ppos != 0)
403 return 0; /* be cheap */
404
405 if (!access_ok(VERIFY_WRITE, buf, msglen))
406 return -EINVAL;
407
408 if (copy_to_user(buf, msg, msglen))
409 return -EFAULT;
410
411 if (ppos)
412 *ppos = msglen;
413 return msglen;
414}
415
416static ssize_t manage_flash_write(struct file *file, const char __user *buf,
417 size_t count, loff_t *off)
418{
419 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
420 struct rtas_manage_flash_t *args_buf;
421 const char reject_str[] = "0";
422 const char commit_str[] = "1";
423 char stkbuf[10];
424 int op;
425
426 args_buf = (struct rtas_manage_flash_t *) dp->data;
427 if ((args_buf->status == MANAGE_AUTH) || (count == 0))
428 return count;
429
430 op = -1;
431 if (buf) {
432 if (count > 9) count = 9;
433 if (copy_from_user (stkbuf, buf, count)) {
434 return -EFAULT;
435 }
436 if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0)
437 op = RTAS_REJECT_TMP_IMG;
438 else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0)
439 op = RTAS_COMMIT_TMP_IMG;
440 }
441
442 if (op == -1) /* buf is empty, or contains invalid string */
443 return -EINVAL;
444
445 args_buf->op = op;
446 manage_flash(args_buf);
447
448 return count;
449}
450
451static void validate_flash(struct rtas_validate_flash_t *args_buf)
452{
453 int token = rtas_token("ibm,validate-flash-image");
454 unsigned int wait_time;
455 int update_results;
456 s32 rc;
457
458 rc = 0;
459 while(1) {
460 spin_lock(&rtas_data_buf_lock);
461 memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE);
462 rc = rtas_call(token, 2, 2, &update_results,
463 (u32) __pa(rtas_data_buf), args_buf->buf_size);
464 memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE);
465 spin_unlock(&rtas_data_buf_lock);
466
467 if (rc == RTAS_RC_BUSY)
468 udelay(1);
469 else if (rtas_is_extended_busy(rc)) {
470 wait_time = rtas_extended_busy_delay_time(rc);
471 udelay(wait_time * 1000);
472 } else
473 break;
474 }
475
476 args_buf->status = rc;
477 args_buf->update_results = update_results;
478}
479
480static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
481 char *msg)
482{
483 int n;
484
485 if (args_buf->status >= VALIDATE_TMP_UPDATE) {
486 n = sprintf(msg, "%d\n", args_buf->update_results);
487 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
488 (args_buf->update_results == VALIDATE_TMP_UPDATE))
489 n += sprintf(msg + n, "%s\n", args_buf->buf);
490 } else {
491 n = sprintf(msg, "%d\n", args_buf->status);
492 }
493 return n;
494}
495
496static ssize_t validate_flash_read(struct file *file, char __user *buf,
497 size_t count, loff_t *ppos)
498{
499 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
500 struct rtas_validate_flash_t *args_buf;
501 char msg[RTAS_MSG_MAXLEN];
502 int msglen;
503
504 args_buf = (struct rtas_validate_flash_t *) dp->data;
505
506 if (ppos && *ppos != 0)
507 return 0; /* be cheap */
508
509 msglen = get_validate_flash_msg(args_buf, msg);
510 if (msglen > count)
511 msglen = count;
512
513 if (!access_ok(VERIFY_WRITE, buf, msglen))
514 return -EINVAL;
515
516 if (copy_to_user(buf, msg, msglen))
517 return -EFAULT;
518
519 if (ppos)
520 *ppos = msglen;
521 return msglen;
522}
523
524static ssize_t validate_flash_write(struct file *file, const char __user *buf,
525 size_t count, loff_t *off)
526{
527 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
528 struct rtas_validate_flash_t *args_buf;
529 int rc;
530
531 args_buf = (struct rtas_validate_flash_t *) dp->data;
532
533 if (dp->data == NULL) {
534 dp->data = kmalloc(sizeof(struct rtas_validate_flash_t),
535 GFP_KERNEL);
536 if (dp->data == NULL)
537 return -ENOMEM;
538 }
539
540 /* We are only interested in the first 4K of the
541 * candidate image */
542 if ((*off >= VALIDATE_BUF_SIZE) ||
543 (args_buf->status == VALIDATE_AUTH)) {
544 *off += count;
545 return count;
546 }
547
548 if (*off + count >= VALIDATE_BUF_SIZE) {
549 count = VALIDATE_BUF_SIZE - *off;
550 args_buf->status = VALIDATE_READY;
551 } else {
552 args_buf->status = VALIDATE_INCOMPLETE;
553 }
554
555 if (!access_ok(VERIFY_READ, buf, count)) {
556 rc = -EFAULT;
557 goto done;
558 }
559 if (copy_from_user(args_buf->buf + *off, buf, count)) {
560 rc = -EFAULT;
561 goto done;
562 }
563
564 *off += count;
565 rc = count;
566done:
567 if (rc < 0) {
568 kfree(dp->data);
569 dp->data = NULL;
570 }
571 return rc;
572}
573
574static int validate_flash_release(struct inode *inode, struct file *file)
575{
576 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
577 struct rtas_validate_flash_t *args_buf;
578
579 args_buf = (struct rtas_validate_flash_t *) dp->data;
580
581 if (args_buf->status == VALIDATE_READY) {
582 args_buf->buf_size = VALIDATE_BUF_SIZE;
583 validate_flash(args_buf);
584 }
585
586 /* The matching atomic_inc was in rtas_excl_open() */
587 atomic_dec(&dp->count);
588
589 return 0;
590}
591
592static void rtas_flash_firmware(int reboot_type)
593{
594 unsigned long image_size;
595 struct flash_block_list *f, *next, *flist;
596 unsigned long rtas_block_list;
597 int i, status, update_token;
598
599 if (rtas_firmware_flash_list.next == NULL)
600 return; /* nothing to do */
601
602 if (reboot_type != SYS_RESTART) {
603 printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
604 printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
605 return;
606 }
607
608 update_token = rtas_token("ibm,update-flash-64-and-reboot");
609 if (update_token == RTAS_UNKNOWN_SERVICE) {
610 printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot "
611 "is not available -- not a service partition?\n");
612 printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
613 return;
614 }
615
616 /* NOTE: the "first" block list is a global var with no data
617 * blocks in the kernel data segment. We do this because
618 * we want to ensure this block_list addr is under 4GB.
619 */
620 rtas_firmware_flash_list.num_blocks = 0;
621 flist = (struct flash_block_list *)&rtas_firmware_flash_list;
622 rtas_block_list = virt_to_abs(flist);
623 if (rtas_block_list >= 4UL*1024*1024*1024) {
624 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
625 return;
626 }
627
628 printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
629 /* Update the block_list in place. */
630 image_size = 0;
631 for (f = flist; f; f = next) {
632 /* Translate data addrs to absolute */
633 for (i = 0; i < f->num_blocks; i++) {
634 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
635 image_size += f->blocks[i].length;
636 }
637 next = f->next;
638 /* Don't translate NULL pointer for last entry */
639 if (f->next)
640 f->next = (struct flash_block_list *)virt_to_abs(f->next);
641 else
642 f->next = NULL;
643 /* make num_blocks into the version/length field */
644 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
645 }
646
647 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
648 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
649 rtas_progress("Flashing \n", 0x0);
650 rtas_progress("Please Wait... ", 0x0);
651 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
652 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
653 switch (status) { /* should only get "bad" status */
654 case 0:
655 printk(KERN_ALERT "FLASH: success\n");
656 break;
657 case -1:
658 printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
659 break;
660 case -3:
661 printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
662 break;
663 case -4:
664 printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
665 break;
666 default:
667 printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
668 break;
669 }
670}
671
672static void remove_flash_pde(struct proc_dir_entry *dp)
673{
674 if (dp) {
675 if (dp->data != NULL)
676 kfree(dp->data);
677 dp->owner = NULL;
678 remove_proc_entry(dp->name, dp->parent);
679 }
680}
681
682static int initialize_flash_pde_data(const char *rtas_call_name,
683 size_t buf_size,
684 struct proc_dir_entry *dp)
685{
686 int *status;
687 int token;
688
689 dp->data = kmalloc(buf_size, GFP_KERNEL);
690 if (dp->data == NULL) {
691 remove_flash_pde(dp);
692 return -ENOMEM;
693 }
694
695 memset(dp->data, 0, buf_size);
696
697 /*
698 * This code assumes that the status int is the first member of the
699 * struct
700 */
701 status = (int *) dp->data;
702 token = rtas_token(rtas_call_name);
703 if (token == RTAS_UNKNOWN_SERVICE)
704 *status = FLASH_AUTH;
705 else
706 *status = FLASH_NO_OP;
707
708 return 0;
709}
710
711static struct proc_dir_entry *create_flash_pde(const char *filename,
712 struct file_operations *fops)
713{
714 struct proc_dir_entry *ent = NULL;
715
716 ent = create_proc_entry(filename, S_IRUSR | S_IWUSR, NULL);
717 if (ent != NULL) {
718 ent->nlink = 1;
719 ent->proc_fops = fops;
720 ent->owner = THIS_MODULE;
721 }
722
723 return ent;
724}
725
726static struct file_operations rtas_flash_operations = {
727 .read = rtas_flash_read,
728 .write = rtas_flash_write,
729 .open = rtas_excl_open,
730 .release = rtas_flash_release,
731};
732
733static struct file_operations manage_flash_operations = {
734 .read = manage_flash_read,
735 .write = manage_flash_write,
736 .open = rtas_excl_open,
737 .release = rtas_excl_release,
738};
739
740static struct file_operations validate_flash_operations = {
741 .read = validate_flash_read,
742 .write = validate_flash_write,
743 .open = rtas_excl_open,
744 .release = validate_flash_release,
745};
746
747int __init rtas_flash_init(void)
748{
749 int rc;
750
751 if (rtas_token("ibm,update-flash-64-and-reboot") ==
752 RTAS_UNKNOWN_SERVICE) {
753 printk(KERN_ERR "rtas_flash: no firmware flash support\n");
754 return 1;
755 }
756
757 firmware_flash_pde = create_flash_pde("ppc64/rtas/"
758 FIRMWARE_FLASH_NAME,
759 &rtas_flash_operations);
760 if (firmware_flash_pde == NULL) {
761 rc = -ENOMEM;
762 goto cleanup;
763 }
764
765 rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
766 sizeof(struct rtas_update_flash_t),
767 firmware_flash_pde);
768 if (rc != 0)
769 goto cleanup;
770
771 firmware_update_pde = create_flash_pde("ppc64/rtas/"
772 FIRMWARE_UPDATE_NAME,
773 &rtas_flash_operations);
774 if (firmware_update_pde == NULL) {
775 rc = -ENOMEM;
776 goto cleanup;
777 }
778
779 rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
780 sizeof(struct rtas_update_flash_t),
781 firmware_update_pde);
782 if (rc != 0)
783 goto cleanup;
784
785 validate_pde = create_flash_pde("ppc64/rtas/" VALIDATE_FLASH_NAME,
786 &validate_flash_operations);
787 if (validate_pde == NULL) {
788 rc = -ENOMEM;
789 goto cleanup;
790 }
791
792 rc = initialize_flash_pde_data("ibm,validate-flash-image",
793 sizeof(struct rtas_validate_flash_t),
794 validate_pde);
795 if (rc != 0)
796 goto cleanup;
797
798 manage_pde = create_flash_pde("ppc64/rtas/" MANAGE_FLASH_NAME,
799 &manage_flash_operations);
800 if (manage_pde == NULL) {
801 rc = -ENOMEM;
802 goto cleanup;
803 }
804
805 rc = initialize_flash_pde_data("ibm,manage-flash-image",
806 sizeof(struct rtas_manage_flash_t),
807 manage_pde);
808 if (rc != 0)
809 goto cleanup;
810
811 rtas_flash_term_hook = rtas_flash_firmware;
812 return 0;
813
814cleanup:
815 remove_flash_pde(firmware_flash_pde);
816 remove_flash_pde(firmware_update_pde);
817 remove_flash_pde(validate_pde);
818 remove_flash_pde(manage_pde);
819
820 return rc;
821}
822
823void __exit rtas_flash_cleanup(void)
824{
825 rtas_flash_term_hook = NULL;
826 remove_flash_pde(firmware_flash_pde);
827 remove_flash_pde(firmware_update_pde);
828 remove_flash_pde(validate_pde);
829 remove_flash_pde(manage_pde);
830}
831
832module_init(rtas_flash_init);
833module_exit(rtas_flash_cleanup);
834MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1292460fcde2..d43fa8c0e5ac 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -170,12 +170,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
170 } 170 }
171 171
172#ifdef CONFIG_SMP 172#ifdef CONFIG_SMP
173#ifdef CONFIG_PPC64 /* XXX for now */
174 pvr = per_cpu(pvr, cpu_id); 173 pvr = per_cpu(pvr, cpu_id);
175#else 174#else
176 pvr = cpu_data[cpu_id].pvr;
177#endif
178#else
179 pvr = mfspr(SPRN_PVR); 175 pvr = mfspr(SPRN_PVR);
180#endif 176#endif
181 maj = (pvr >> 8) & 0xFF; 177 maj = (pvr >> 8) & 0xFF;
@@ -201,11 +197,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
201#ifdef CONFIG_TAU_AVERAGE 197#ifdef CONFIG_TAU_AVERAGE
202 /* more straightforward, but potentially misleading */ 198 /* more straightforward, but potentially misleading */
203 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 199 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
204 cpu_temp(i)); 200 cpu_temp(cpu_id));
205#else 201#else
206 /* show the actual temp sensor range */ 202 /* show the actual temp sensor range */
207 u32 temp; 203 u32 temp;
208 temp = cpu_temp_both(i); 204 temp = cpu_temp_both(cpu_id);
209 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", 205 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
210 temp & 0xff, temp >> 16); 206 temp & 0xff, temp >> 16);
211#endif 207#endif
@@ -408,3 +404,118 @@ static int __init set_preferred_console(void)
408} 404}
409console_initcall(set_preferred_console); 405console_initcall(set_preferred_console);
410#endif /* CONFIG_PPC_MULTIPLATFORM */ 406#endif /* CONFIG_PPC_MULTIPLATFORM */
407
408#ifdef CONFIG_SMP
409
410/**
411 * setup_cpu_maps - initialize the following cpu maps:
412 * cpu_possible_map
413 * cpu_present_map
414 * cpu_sibling_map
415 *
416 * Having the possible map set up early allows us to restrict allocations
417 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
418 *
419 * We do not initialize the online map here; cpus set their own bits in
420 * cpu_online_map as they come up.
421 *
422 * This function is valid only for Open Firmware systems. finish_device_tree
423 * must be called before using this.
424 *
425 * While we're here, we may as well set the "physical" cpu ids in the paca.
426 */
427void __init smp_setup_cpu_maps(void)
428{
429 struct device_node *dn = NULL;
430 int cpu = 0;
431 int swap_cpuid = 0;
432
433 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
434 int *intserv;
435 int j, len = sizeof(u32), nthreads = 1;
436
437 intserv = (int *)get_property(dn, "ibm,ppc-interrupt-server#s",
438 &len);
439 if (intserv)
440 nthreads = len / sizeof(int);
441 else {
442 intserv = (int *) get_property(dn, "reg", NULL);
443 if (!intserv)
444 intserv = &cpu; /* assume logical == phys */
445 }
446
447 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
448 cpu_set(cpu, cpu_present_map);
449 set_hard_smp_processor_id(cpu, intserv[j]);
450
451 if (intserv[j] == boot_cpuid_phys)
452 swap_cpuid = cpu;
453 cpu_set(cpu, cpu_possible_map);
454 cpu++;
455 }
456 }
457
458 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
459 * boot cpu is logical 0.
460 */
461 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
462 u32 tmp;
463 tmp = get_hard_smp_processor_id(0);
464 set_hard_smp_processor_id(0, boot_cpuid_phys);
465 set_hard_smp_processor_id(swap_cpuid, tmp);
466 }
467
468#ifdef CONFIG_PPC64
469 /*
470 * On pSeries LPAR, we need to know how many cpus
471 * could possibly be added to this partition.
472 */
473 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
474 (dn = of_find_node_by_path("/rtas"))) {
475 int num_addr_cell, num_size_cell, maxcpus;
476 unsigned int *ireg;
477
478 num_addr_cell = prom_n_addr_cells(dn);
479 num_size_cell = prom_n_size_cells(dn);
480
481 ireg = (unsigned int *)
482 get_property(dn, "ibm,lrdr-capacity", NULL);
483
484 if (!ireg)
485 goto out;
486
487 maxcpus = ireg[num_addr_cell + num_size_cell];
488
489 /* Double maxcpus for processors which have SMT capability */
490 if (cpu_has_feature(CPU_FTR_SMT))
491 maxcpus *= 2;
492
493 if (maxcpus > NR_CPUS) {
494 printk(KERN_WARNING
495 "Partition configured for %d cpus, "
496 "operating system maximum is %d.\n",
497 maxcpus, NR_CPUS);
498 maxcpus = NR_CPUS;
499 } else
500 printk(KERN_INFO "Partition configured for %d cpus.\n",
501 maxcpus);
502
503 for (cpu = 0; cpu < maxcpus; cpu++)
504 cpu_set(cpu, cpu_possible_map);
505 out:
506 of_node_put(dn);
507 }
508
509 /*
510 * Do the sibling map; assume only two threads per processor.
511 */
512 for_each_cpu(cpu) {
513 cpu_set(cpu, cpu_sibling_map[cpu]);
514 if (cpu_has_feature(CPU_FTR_SMT))
515 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
516 }
517
518 systemcfg->processorCount = num_present_cpus();
519#endif /* CONFIG_PPC64 */
520}
521#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9680ae99b084..b45eedbb4b3a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -288,6 +288,8 @@ void __init setup_arch(char **cmdline_p)
288 unflatten_device_tree(); 288 unflatten_device_tree();
289 finish_device_tree(); 289 finish_device_tree();
290 290
291 smp_setup_cpu_maps();
292
291#ifdef CONFIG_BOOTX_TEXT 293#ifdef CONFIG_BOOTX_TEXT
292 init_boot_display(); 294 init_boot_display();
293#endif 295#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 40c48100bf1b..b0994050024f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -56,7 +56,7 @@
56#include <asm/page.h> 56#include <asm/page.h>
57#include <asm/mmu.h> 57#include <asm/mmu.h>
58#include <asm/lmb.h> 58#include <asm/lmb.h>
59#include <asm/iSeries/ItLpNaca.h> 59#include <asm/iseries/it_lp_naca.h>
60#include <asm/firmware.h> 60#include <asm/firmware.h>
61#include <asm/systemcfg.h> 61#include <asm/systemcfg.h>
62#include <asm/xmon.h> 62#include <asm/xmon.h>
@@ -103,8 +103,6 @@ extern void htab_initialize(void);
103extern void early_init_devtree(void *flat_dt); 103extern void early_init_devtree(void *flat_dt);
104extern void unflatten_device_tree(void); 104extern void unflatten_device_tree(void);
105 105
106extern void smp_release_cpus(void);
107
108int have_of = 1; 106int have_of = 1;
109int boot_cpuid = 0; 107int boot_cpuid = 0;
110int boot_cpuid_phys = 0; 108int boot_cpuid_phys = 0;
@@ -183,120 +181,14 @@ static int __init early_smt_enabled(char *p)
183} 181}
184early_param("smt-enabled", early_smt_enabled); 182early_param("smt-enabled", early_smt_enabled);
185 183
186/** 184#else
187 * setup_cpu_maps - initialize the following cpu maps: 185#define check_smt_enabled()
188 * cpu_possible_map
189 * cpu_present_map
190 * cpu_sibling_map
191 *
192 * Having the possible map set up early allows us to restrict allocations
193 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
194 *
195 * We do not initialize the online map here; cpus set their own bits in
196 * cpu_online_map as they come up.
197 *
198 * This function is valid only for Open Firmware systems. finish_device_tree
199 * must be called before using this.
200 *
201 * While we're here, we may as well set the "physical" cpu ids in the paca.
202 */
203static void __init setup_cpu_maps(void)
204{
205 struct device_node *dn = NULL;
206 int cpu = 0;
207 int swap_cpuid = 0;
208
209 check_smt_enabled();
210
211 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
212 u32 *intserv;
213 int j, len = sizeof(u32), nthreads;
214
215 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
216 &len);
217 if (!intserv)
218 intserv = (u32 *)get_property(dn, "reg", NULL);
219
220 nthreads = len / sizeof(u32);
221
222 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
223 cpu_set(cpu, cpu_present_map);
224 set_hard_smp_processor_id(cpu, intserv[j]);
225
226 if (intserv[j] == boot_cpuid_phys)
227 swap_cpuid = cpu;
228 cpu_set(cpu, cpu_possible_map);
229 cpu++;
230 }
231 }
232
233 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
234 * boot cpu is logical 0.
235 */
236 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
237 u32 tmp;
238 tmp = get_hard_smp_processor_id(0);
239 set_hard_smp_processor_id(0, boot_cpuid_phys);
240 set_hard_smp_processor_id(swap_cpuid, tmp);
241 }
242
243 /*
244 * On pSeries LPAR, we need to know how many cpus
245 * could possibly be added to this partition.
246 */
247 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
248 (dn = of_find_node_by_path("/rtas"))) {
249 int num_addr_cell, num_size_cell, maxcpus;
250 unsigned int *ireg;
251
252 num_addr_cell = prom_n_addr_cells(dn);
253 num_size_cell = prom_n_size_cells(dn);
254
255 ireg = (unsigned int *)
256 get_property(dn, "ibm,lrdr-capacity", NULL);
257
258 if (!ireg)
259 goto out;
260
261 maxcpus = ireg[num_addr_cell + num_size_cell];
262
263 /* Double maxcpus for processors which have SMT capability */
264 if (cpu_has_feature(CPU_FTR_SMT))
265 maxcpus *= 2;
266
267 if (maxcpus > NR_CPUS) {
268 printk(KERN_WARNING
269 "Partition configured for %d cpus, "
270 "operating system maximum is %d.\n",
271 maxcpus, NR_CPUS);
272 maxcpus = NR_CPUS;
273 } else
274 printk(KERN_INFO "Partition configured for %d cpus.\n",
275 maxcpus);
276
277 for (cpu = 0; cpu < maxcpus; cpu++)
278 cpu_set(cpu, cpu_possible_map);
279 out:
280 of_node_put(dn);
281 }
282
283 /*
284 * Do the sibling map; assume only two threads per processor.
285 */
286 for_each_cpu(cpu) {
287 cpu_set(cpu, cpu_sibling_map[cpu]);
288 if (cpu_has_feature(CPU_FTR_SMT))
289 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
290 }
291
292 systemcfg->processorCount = num_present_cpus();
293}
294#endif /* CONFIG_SMP */ 186#endif /* CONFIG_SMP */
295 187
296extern struct machdep_calls pSeries_md; 188extern struct machdep_calls pSeries_md;
297extern struct machdep_calls pmac_md; 189extern struct machdep_calls pmac_md;
298extern struct machdep_calls maple_md; 190extern struct machdep_calls maple_md;
299extern struct machdep_calls bpa_md; 191extern struct machdep_calls cell_md;
300extern struct machdep_calls iseries_md; 192extern struct machdep_calls iseries_md;
301 193
302/* Ultimately, stuff them in an elf section like initcalls... */ 194/* Ultimately, stuff them in an elf section like initcalls... */
@@ -310,8 +202,8 @@ static struct machdep_calls __initdata *machines[] = {
310#ifdef CONFIG_PPC_MAPLE 202#ifdef CONFIG_PPC_MAPLE
311 &maple_md, 203 &maple_md,
312#endif /* CONFIG_PPC_MAPLE */ 204#endif /* CONFIG_PPC_MAPLE */
313#ifdef CONFIG_PPC_BPA 205#ifdef CONFIG_PPC_CELL
314 &bpa_md, 206 &cell_md,
315#endif 207#endif
316#ifdef CONFIG_PPC_ISERIES 208#ifdef CONFIG_PPC_ISERIES
317 &iseries_md, 209 &iseries_md,
@@ -385,21 +277,49 @@ void __init early_setup(unsigned long dt_ptr)
385 DBG("Found, Initializing memory management...\n"); 277 DBG("Found, Initializing memory management...\n");
386 278
387 /* 279 /*
388 * Initialize stab / SLB management 280 * Initialize the MMU Hash table and create the linear mapping
281 * of memory. Has to be done before stab/slb initialization as
282 * this is currently where the page size encoding is obtained
389 */ 283 */
390 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 284 htab_initialize();
391 stab_initialize(lpaca->stab_real);
392 285
393 /* 286 /*
394 * Initialize the MMU Hash table and create the linear mapping 287 * Initialize stab / SLB management except on iSeries
395 * of memory
396 */ 288 */
397 htab_initialize(); 289 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
290 if (cpu_has_feature(CPU_FTR_SLB))
291 slb_initialize();
292 else
293 stab_initialize(lpaca->stab_real);
294 }
398 295
399 DBG(" <- early_setup()\n"); 296 DBG(" <- early_setup()\n");
400} 297}
401 298
402 299
300#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
301void smp_release_cpus(void)
302{
303 extern unsigned long __secondary_hold_spinloop;
304
305 DBG(" -> smp_release_cpus()\n");
306
307 /* All secondary cpus are spinning on a common spinloop, release them
308 * all now so they can start to spin on their individual paca
309 * spinloops. For non SMP kernels, the secondary cpus never get out
310 * of the common spinloop.
311 * This is useless but harmless on iSeries, secondaries are already
312 * waiting on their paca spinloops. */
313
314 __secondary_hold_spinloop = 1;
315 mb();
316
317 DBG(" <- smp_release_cpus()\n");
318}
319#else
320#define smp_release_cpus()
321#endif /* CONFIG_SMP || CONFIG_KEXEC */
322
403/* 323/*
404 * Initialize some remaining members of the ppc64_caches and systemcfg structures 324 * Initialize some remaining members of the ppc64_caches and systemcfg structures
405 * (at least until we get rid of them completely). This is mostly some 325 * (at least until we get rid of them completely). This is mostly some
@@ -589,17 +509,13 @@ void __init setup_system(void)
589 509
590 parse_early_param(); 510 parse_early_param();
591 511
592#ifdef CONFIG_SMP 512 check_smt_enabled();
593 /* 513 smp_setup_cpu_maps();
594 * iSeries has already initialized the cpu maps at this point.
595 */
596 setup_cpu_maps();
597 514
598 /* Release secondary cpus out of their spinloops at 0x60 now that 515 /* Release secondary cpus out of their spinloops at 0x60 now that
599 * we can map physical -> logical CPU ids 516 * we can map physical -> logical CPU ids
600 */ 517 */
601 smp_release_cpus(); 518 smp_release_cpus();
602#endif
603 519
604 printk("Starting Linux PPC64 %s\n", system_utsname.version); 520 printk("Starting Linux PPC64 %s\n", system_utsname.version);
605 521
@@ -631,23 +547,6 @@ static int ppc64_panic_event(struct notifier_block *this,
631 return NOTIFY_DONE; 547 return NOTIFY_DONE;
632} 548}
633 549
634#ifdef CONFIG_PPC_ISERIES
635/*
636 * On iSeries we just parse the mem=X option from the command line.
637 * On pSeries it's a bit more complicated, see prom_init_mem()
638 */
639static int __init early_parsemem(char *p)
640{
641 if (!p)
642 return 0;
643
644 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
645
646 return 0;
647}
648early_param("mem", early_parsemem);
649#endif /* CONFIG_PPC_ISERIES */
650
651#ifdef CONFIG_IRQSTACKS 550#ifdef CONFIG_IRQSTACKS
652static void __init irqstack_early_init(void) 551static void __init irqstack_early_init(void)
653{ 552{
@@ -658,10 +557,12 @@ static void __init irqstack_early_init(void)
658 * SLB misses on them. 557 * SLB misses on them.
659 */ 558 */
660 for_each_cpu(i) { 559 for_each_cpu(i) {
661 softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, 560 softirq_ctx[i] = (struct thread_info *)
662 THREAD_SIZE, 0x10000000)); 561 __va(lmb_alloc_base(THREAD_SIZE,
663 hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, 562 THREAD_SIZE, 0x10000000));
664 THREAD_SIZE, 0x10000000)); 563 hardirq_ctx[i] = (struct thread_info *)
564 __va(lmb_alloc_base(THREAD_SIZE,
565 THREAD_SIZE, 0x10000000));
665 } 566 }
666} 567}
667#else 568#else
@@ -689,8 +590,8 @@ static void __init emergency_stack_init(void)
689 limit = min(0x10000000UL, lmb.rmo_size); 590 limit = min(0x10000000UL, lmb.rmo_size);
690 591
691 for_each_cpu(i) 592 for_each_cpu(i)
692 paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128, 593 paca[i].emergency_sp =
693 limit)) + PAGE_SIZE; 594 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
694} 595}
695 596
696/* 597/*
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 444c3e81884c..876c57c11365 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -43,7 +43,7 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
45#ifdef CONFIG_PPC64 45#ifdef CONFIG_PPC64
46#include <asm/ppc32.h> 46#include "ppc32.h"
47#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
48#include <asm/unistd.h> 48#include <asm/unistd.h>
49#include <asm/vdso.h> 49#include <asm/vdso.h>
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
new file mode 100644
index 000000000000..ec9d0984b6a0
--- /dev/null
+++ b/arch/powerpc/kernel/signal_64.c
@@ -0,0 +1,581 @@
1/*
2 * linux/arch/ppc64/kernel/signal.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/signal.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/kernel.h>
23#include <linux/signal.h>
24#include <linux/errno.h>
25#include <linux/wait.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/elf.h>
29#include <linux/ptrace.h>
30#include <linux/module.h>
31
32#include <asm/sigcontext.h>
33#include <asm/ucontext.h>
34#include <asm/uaccess.h>
35#include <asm/pgtable.h>
36#include <asm/ppcdebug.h>
37#include <asm/unistd.h>
38#include <asm/cacheflush.h>
39#include <asm/vdso.h>
40
41#define DEBUG_SIG 0
42
43#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
44
45#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
46#define FP_REGS_SIZE sizeof(elf_fpregset_t)
47
48#define TRAMP_TRACEBACK 3
49#define TRAMP_SIZE 6
50
51/*
52 * When we have signals to deliver, we set up on the user stack,
53 * going down from the original stack pointer:
54 * 1) a rt_sigframe struct which contains the ucontext
55 * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller
56 * frame for the signal handler.
57 */
58
59struct rt_sigframe {
60 /* sys_rt_sigreturn requires the ucontext be the first field */
61 struct ucontext uc;
62 unsigned long _unused[2];
63 unsigned int tramp[TRAMP_SIZE];
64 struct siginfo *pinfo;
65 void *puc;
66 struct siginfo info;
67 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
68 char abigap[288];
69} __attribute__ ((aligned (16)));
70
71
72/*
73 * Atomically swap in the new signal mask, and wait for a signal.
74 */
75long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
76 int p6, int p7, struct pt_regs *regs)
77{
78 sigset_t saveset, newset;
79
80 /* XXX: Don't preclude handling different sized sigset_t's. */
81 if (sigsetsize != sizeof(sigset_t))
82 return -EINVAL;
83
84 if (copy_from_user(&newset, unewset, sizeof(newset)))
85 return -EFAULT;
86 sigdelsetmask(&newset, ~_BLOCKABLE);
87
88 spin_lock_irq(&current->sighand->siglock);
89 saveset = current->blocked;
90 current->blocked = newset;
91 recalc_sigpending();
92 spin_unlock_irq(&current->sighand->siglock);
93
94 regs->result = -EINTR;
95 regs->gpr[3] = EINTR;
96 regs->ccr |= 0x10000000;
97 while (1) {
98 current->state = TASK_INTERRUPTIBLE;
99 schedule();
100 if (do_signal(&saveset, regs))
101 return 0;
102 }
103}
104
105long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5,
106 unsigned long r6, unsigned long r7, unsigned long r8,
107 struct pt_regs *regs)
108{
109 return do_sigaltstack(uss, uoss, regs->gpr[1]);
110}
111
112
113/*
114 * Set up the sigcontext for the signal frame.
115 */
116
117static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
118 int signr, sigset_t *set, unsigned long handler)
119{
120 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
121 * process never used altivec yet (MSR_VEC is zero in pt_regs of
122 * the context). This is very important because we must ensure we
123 * don't lose the VRSAVE content that may have been set prior to
124 * the process doing its first vector operation
125 * Userland shall check AT_HWCAP to know wether it can rely on the
126 * v_regs pointer or not
127 */
128#ifdef CONFIG_ALTIVEC
129 elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
130#endif
131 long err = 0;
132
133 flush_fp_to_thread(current);
134
135 /* Make sure signal doesn't get spurrious FP exceptions */
136 current->thread.fpscr.val = 0;
137
138#ifdef CONFIG_ALTIVEC
139 err |= __put_user(v_regs, &sc->v_regs);
140
141 /* save altivec registers */
142 if (current->thread.used_vr) {
143 flush_altivec_to_thread(current);
144 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
145 err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
146 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
147 * contains valid data.
148 */
149 regs->msr |= MSR_VEC;
150 }
151 /* We always copy to/from vrsave, it's 0 if we don't have or don't
152 * use altivec.
153 */
154 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
155#else /* CONFIG_ALTIVEC */
156 err |= __put_user(0, &sc->v_regs);
157#endif /* CONFIG_ALTIVEC */
158 err |= __put_user(&sc->gp_regs, &sc->regs);
159 err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
160 err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
161 err |= __put_user(signr, &sc->signal);
162 err |= __put_user(handler, &sc->handler);
163 if (set != NULL)
164 err |= __put_user(set->sig[0], &sc->oldmask);
165
166 return err;
167}
168
169/*
170 * Restore the sigcontext from the signal frame.
171 */
172
173static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
174 struct sigcontext __user *sc)
175{
176#ifdef CONFIG_ALTIVEC
177 elf_vrreg_t __user *v_regs;
178#endif
179 unsigned long err = 0;
180 unsigned long save_r13 = 0;
181 elf_greg_t *gregs = (elf_greg_t *)regs;
182#ifdef CONFIG_ALTIVEC
183 unsigned long msr;
184#endif
185 int i;
186
187 /* If this is not a signal return, we preserve the TLS in r13 */
188 if (!sig)
189 save_r13 = regs->gpr[13];
190
191 /* copy everything before MSR */
192 err |= __copy_from_user(regs, &sc->gp_regs,
193 PT_MSR*sizeof(unsigned long));
194
195 /* skip MSR and SOFTE */
196 for (i = PT_MSR+1; i <= PT_RESULT; i++) {
197 if (i == PT_SOFTE)
198 continue;
199 err |= __get_user(gregs[i], &sc->gp_regs[i]);
200 }
201
202 if (!sig)
203 regs->gpr[13] = save_r13;
204 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
205 if (set != NULL)
206 err |= __get_user(set->sig[0], &sc->oldmask);
207
208#ifdef CONFIG_ALTIVEC
209 err |= __get_user(v_regs, &sc->v_regs);
210 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
211 if (err)
212 return err;
213 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
214 if (v_regs != 0 && (msr & MSR_VEC) != 0)
215 err |= __copy_from_user(current->thread.vr, v_regs,
216 33 * sizeof(vector128));
217 else if (current->thread.used_vr)
218 memset(current->thread.vr, 0, 33 * sizeof(vector128));
219 /* Always get VRSAVE back */
220 if (v_regs != 0)
221 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
222 else
223 current->thread.vrsave = 0;
224#endif /* CONFIG_ALTIVEC */
225
226#ifndef CONFIG_SMP
227 preempt_disable();
228 if (last_task_used_math == current)
229 last_task_used_math = NULL;
230 if (last_task_used_altivec == current)
231 last_task_used_altivec = NULL;
232 preempt_enable();
233#endif
234 /* Force reload of FP/VEC */
235 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
236
237 return err;
238}
239
240/*
241 * Allocate space for the signal frame
242 */
243static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
244 size_t frame_size)
245{
246 unsigned long newsp;
247
248 /* Default to using normal stack */
249 newsp = regs->gpr[1];
250
251 if (ka->sa.sa_flags & SA_ONSTACK) {
252 if (! on_sig_stack(regs->gpr[1]))
253 newsp = (current->sas_ss_sp + current->sas_ss_size);
254 }
255
256 return (void __user *)((newsp - frame_size) & -16ul);
257}
258
259/*
260 * Setup the trampoline code on the stack
261 */
262static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
263{
264 int i;
265 long err = 0;
266
267 /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
268 err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]);
269 /* li r0, __NR_[rt_]sigreturn| */
270 err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]);
271 /* sc */
272 err |= __put_user(0x44000002UL, &tramp[2]);
273
274 /* Minimal traceback info */
275 for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
276 err |= __put_user(0, &tramp[i]);
277
278 if (!err)
279 flush_icache_range((unsigned long) &tramp[0],
280 (unsigned long) &tramp[TRAMP_SIZE]);
281
282 return err;
283}
284
285/*
286 * Restore the user process's signal mask (also used by signal32.c)
287 */
288void restore_sigmask(sigset_t *set)
289{
290 sigdelsetmask(set, ~_BLOCKABLE);
291 spin_lock_irq(&current->sighand->siglock);
292 current->blocked = *set;
293 recalc_sigpending();
294 spin_unlock_irq(&current->sighand->siglock);
295}
296
297
298/*
299 * Handle {get,set,swap}_context operations
300 */
301int sys_swapcontext(struct ucontext __user *old_ctx,
302 struct ucontext __user *new_ctx,
303 long ctx_size, long r6, long r7, long r8, struct pt_regs *regs)
304{
305 unsigned char tmp;
306 sigset_t set;
307
308 /* Context size is for future use. Right now, we only make sure
309 * we are passed something we understand
310 */
311 if (ctx_size < sizeof(struct ucontext))
312 return -EINVAL;
313
314 if (old_ctx != NULL) {
315 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
316 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0)
317 || __copy_to_user(&old_ctx->uc_sigmask,
318 &current->blocked, sizeof(sigset_t)))
319 return -EFAULT;
320 }
321 if (new_ctx == NULL)
322 return 0;
323 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
324 || __get_user(tmp, (u8 __user *) new_ctx)
325 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
326 return -EFAULT;
327
328 /*
329 * If we get a fault copying the context into the kernel's
330 * image of the user's registers, we can't just return -EFAULT
331 * because the user's registers will be corrupted. For instance
332 * the NIP value may have been updated but not some of the
333 * other registers. Given that we have done the access_ok
334 * and successfully read the first and last bytes of the region
335 * above, this should only happen in an out-of-memory situation
336 * or if another thread unmaps the region containing the context.
337 * We kill the task with a SIGSEGV in this situation.
338 */
339
340 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
341 do_exit(SIGSEGV);
342 restore_sigmask(&set);
343 if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
344 do_exit(SIGSEGV);
345
346 /* This returns like rt_sigreturn */
347 return 0;
348}
349
350
351/*
352 * Do a signal return; undo the signal stack.
353 */
354
355int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
356 unsigned long r6, unsigned long r7, unsigned long r8,
357 struct pt_regs *regs)
358{
359 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
360 sigset_t set;
361
362 /* Always make any pending restarted system calls return -EINTR */
363 current_thread_info()->restart_block.fn = do_no_restart_syscall;
364
365 if (!access_ok(VERIFY_READ, uc, sizeof(*uc)))
366 goto badframe;
367
368 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
369 goto badframe;
370 restore_sigmask(&set);
371 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
372 goto badframe;
373
374 /* do_sigaltstack expects a __user pointer and won't modify
375 * what's in there anyway
376 */
377 do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]);
378
379 return regs->result;
380
381badframe:
382#if DEBUG_SIG
383 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
384 regs, uc, &uc->uc_mcontext);
385#endif
386 force_sig(SIGSEGV, current);
387 return 0;
388}
389
390static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
391 sigset_t *set, struct pt_regs *regs)
392{
393 /* Handler is *really* a pointer to the function descriptor for
394 * the signal routine. The first entry in the function
395 * descriptor is the entry address of signal and the second
396 * entry is the TOC value we need to use.
397 */
398 func_descr_t __user *funct_desc_ptr;
399 struct rt_sigframe __user *frame;
400 unsigned long newsp = 0;
401 long err = 0;
402
403 frame = get_sigframe(ka, regs, sizeof(*frame));
404
405 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
406 goto badframe;
407
408 err |= __put_user(&frame->info, &frame->pinfo);
409 err |= __put_user(&frame->uc, &frame->puc);
410 err |= copy_siginfo_to_user(&frame->info, info);
411 if (err)
412 goto badframe;
413
414 /* Create the ucontext. */
415 err |= __put_user(0, &frame->uc.uc_flags);
416 err |= __put_user(0, &frame->uc.uc_link);
417 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
418 err |= __put_user(sas_ss_flags(regs->gpr[1]),
419 &frame->uc.uc_stack.ss_flags);
420 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
421 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL,
422 (unsigned long)ka->sa.sa_handler);
423 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
424 if (err)
425 goto badframe;
426
427 /* Set up to return from userspace. */
428 if (vdso64_rt_sigtramp && current->thread.vdso_base) {
429 regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
430 } else {
431 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
432 if (err)
433 goto badframe;
434 regs->link = (unsigned long) &frame->tramp[0];
435 }
436 funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler;
437
438 /* Allocate a dummy caller frame for the signal handler. */
439 newsp = (unsigned long)frame - __SIGNAL_FRAMESIZE;
440 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
441
442 /* Set up "regs" so we "return" to the signal handler. */
443 err |= get_user(regs->nip, &funct_desc_ptr->entry);
444 regs->gpr[1] = newsp;
445 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
446 regs->gpr[3] = signr;
447 regs->result = 0;
448 if (ka->sa.sa_flags & SA_SIGINFO) {
449 err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
450 err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
451 regs->gpr[6] = (unsigned long) frame;
452 } else {
453 regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
454 }
455 if (err)
456 goto badframe;
457
458 if (test_thread_flag(TIF_SINGLESTEP))
459 ptrace_notify(SIGTRAP);
460
461 return 1;
462
463badframe:
464#if DEBUG_SIG
465 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
466 regs, frame, newsp);
467#endif
468 force_sigsegv(signr, current);
469 return 0;
470}
471
472
473/*
474 * OK, we're invoking a handler
475 */
476static int handle_signal(unsigned long sig, struct k_sigaction *ka,
477 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
478{
479 int ret;
480
481 /* Set up Signal Frame */
482 ret = setup_rt_frame(sig, ka, info, oldset, regs);
483
484 if (ret) {
485 spin_lock_irq(&current->sighand->siglock);
486 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
487 if (!(ka->sa.sa_flags & SA_NODEFER))
488 sigaddset(&current->blocked,sig);
489 recalc_sigpending();
490 spin_unlock_irq(&current->sighand->siglock);
491 }
492
493 return ret;
494}
495
496static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
497{
498 switch ((int)regs->result) {
499 case -ERESTART_RESTARTBLOCK:
500 case -ERESTARTNOHAND:
501 /* ERESTARTNOHAND means that the syscall should only be
502 * restarted if there was no handler for the signal, and since
503 * we only get here if there is a handler, we dont restart.
504 */
505 regs->result = -EINTR;
506 break;
507 case -ERESTARTSYS:
508 /* ERESTARTSYS means to restart the syscall if there is no
509 * handler or the handler was registered with SA_RESTART
510 */
511 if (!(ka->sa.sa_flags & SA_RESTART)) {
512 regs->result = -EINTR;
513 break;
514 }
515 /* fallthrough */
516 case -ERESTARTNOINTR:
517 /* ERESTARTNOINTR means that the syscall should be
518 * called again after the signal handler returns.
519 */
520 regs->gpr[3] = regs->orig_gpr3;
521 regs->nip -= 4;
522 regs->result = 0;
523 break;
524 }
525}
526
527/*
528 * Note that 'init' is a special process: it doesn't get signals it doesn't
529 * want to handle. Thus you cannot kill init even with a SIGKILL even by
530 * mistake.
531 */
532int do_signal(sigset_t *oldset, struct pt_regs *regs)
533{
534 siginfo_t info;
535 int signr;
536 struct k_sigaction ka;
537
538 /*
539 * If the current thread is 32 bit - invoke the
540 * 32 bit signal handling code
541 */
542 if (test_thread_flag(TIF_32BIT))
543 return do_signal32(oldset, regs);
544
545 if (!oldset)
546 oldset = &current->blocked;
547
548 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
549 if (signr > 0) {
550 /* Whee! Actually deliver the signal. */
551 if (TRAP(regs) == 0x0C00)
552 syscall_restart(regs, &ka);
553
554 /*
555 * Reenable the DABR before delivering the signal to
556 * user space. The DABR will have been cleared if it
557 * triggered inside the kernel.
558 */
559 if (current->thread.dabr)
560 set_dabr(current->thread.dabr);
561
562 return handle_signal(signr, &ka, &info, oldset, regs);
563 }
564
565 if (TRAP(regs) == 0x0C00) { /* System Call! */
566 if ((int)regs->result == -ERESTARTNOHAND ||
567 (int)regs->result == -ERESTARTSYS ||
568 (int)regs->result == -ERESTARTNOINTR) {
569 regs->gpr[3] = regs->orig_gpr3;
570 regs->nip -= 4; /* Back up & retry system call */
571 regs->result = 0;
572 } else if ((int)regs->result == -ERESTART_RESTARTBLOCK) {
573 regs->gpr[0] = __NR_restart_syscall;
574 regs->nip -= 4;
575 regs->result = 0;
576 }
577 }
578
579 return 0;
580}
581EXPORT_SYMBOL(do_signal);
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
new file mode 100644
index 000000000000..9adef3bddad4
--- /dev/null
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -0,0 +1,171 @@
1/*
2 * Smp timebase synchronization for ppc.
3 *
4 * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se)
5 *
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/smp.h>
12#include <linux/unistd.h>
13#include <linux/init.h>
14#include <asm/atomic.h>
15#include <asm/smp.h>
16#include <asm/time.h>
17
18#define NUM_ITER 300
19
20enum {
21 kExit=0, kSetAndTest, kTest
22};
23
24static struct {
25 volatile u64 tb;
26 volatile u64 mark;
27 volatile int cmd;
28 volatile int handshake;
29 int filler[2];
30
31 volatile int ack;
32 int filler2[7];
33
34 volatile int race_result;
35} *tbsync;
36
37static volatile int running;
38
39static void __devinit enter_contest(u64 mark, long add)
40{
41 while (get_tb() < mark)
42 tbsync->race_result = add;
43}
44
45void __devinit smp_generic_take_timebase(void)
46{
47 int cmd;
48 u64 tb;
49
50 local_irq_disable();
51 while (!running)
52 barrier();
53 rmb();
54
55 for (;;) {
56 tbsync->ack = 1;
57 while (!tbsync->handshake)
58 barrier();
59 rmb();
60
61 cmd = tbsync->cmd;
62 tb = tbsync->tb;
63 mb();
64 tbsync->ack = 0;
65 if (cmd == kExit)
66 break;
67
68 while (tbsync->handshake)
69 barrier();
70 if (cmd == kSetAndTest)
71 set_tb(tb >> 32, tb & 0xfffffffful);
72 enter_contest(tbsync->mark, -1);
73 }
74 local_irq_enable();
75}
76
77static int __devinit start_contest(int cmd, long offset, int num)
78{
79 int i, score=0;
80 u64 tb;
81 long mark;
82
83 tbsync->cmd = cmd;
84
85 local_irq_disable();
86 for (i = -3; i < num; ) {
87 tb = get_tb() + 400;
88 tbsync->tb = tb + offset;
89 tbsync->mark = mark = tb + 400;
90
91 wmb();
92
93 tbsync->handshake = 1;
94 while (tbsync->ack)
95 barrier();
96
97 while (get_tb() <= tb)
98 barrier();
99 tbsync->handshake = 0;
100 enter_contest(mark, 1);
101
102 while (!tbsync->ack)
103 barrier();
104
105 if (i++ > 0)
106 score += tbsync->race_result;
107 }
108 local_irq_enable();
109 return score;
110}
111
112void __devinit smp_generic_give_timebase(void)
113{
114 int i, score, score2, old, min=0, max=5000, offset=1000;
115
116 printk("Synchronizing timebase\n");
117
118 /* if this fails then this kernel won't work anyway... */
119 tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL );
120 memset( tbsync, 0, sizeof(*tbsync) );
121 mb();
122 running = 1;
123
124 while (!tbsync->ack)
125 barrier();
126
127 printk("Got ack\n");
128
129 /* binary search */
130 for (old = -1; old != offset ; offset = (min+max) / 2) {
131 score = start_contest(kSetAndTest, offset, NUM_ITER);
132
133 printk("score %d, offset %d\n", score, offset );
134
135 if( score > 0 )
136 max = offset;
137 else
138 min = offset;
139 old = offset;
140 }
141 score = start_contest(kSetAndTest, min, NUM_ITER);
142 score2 = start_contest(kSetAndTest, max, NUM_ITER);
143
144 printk("Min %d (score %d), Max %d (score %d)\n",
145 min, score, max, score2);
146 score = abs(score);
147 score2 = abs(score2);
148 offset = (score < score2) ? min : max;
149
150 /* guard against inaccurate mttb */
151 for (i = 0; i < 10; i++) {
152 start_contest(kSetAndTest, offset, NUM_ITER/10);
153
154 if ((score2 = start_contest(kTest, offset, NUM_ITER)) < 0)
155 score2 = -score2;
156 if (score2 <= score || score2 < 20)
157 break;
158 }
159 printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
160
161 /* exiting */
162 tbsync->cmd = kExit;
163 wmb();
164 tbsync->handshake = 1;
165 while (tbsync->ack)
166 barrier();
167 tbsync->handshake = 0;
168 kfree(tbsync);
169 tbsync = NULL;
170 running = 0;
171}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
new file mode 100644
index 000000000000..1794a694a928
--- /dev/null
+++ b/arch/powerpc/kernel/smp.c
@@ -0,0 +1,565 @@
1/*
2 * SMP support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#undef DEBUG
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/spinlock.h>
29#include <linux/cache.h>
30#include <linux/err.h>
31#include <linux/sysdev.h>
32#include <linux/cpu.h>
33#include <linux/notifier.h>
34
35#include <asm/ptrace.h>
36#include <asm/atomic.h>
37#include <asm/irq.h>
38#include <asm/page.h>
39#include <asm/pgtable.h>
40#include <asm/prom.h>
41#include <asm/smp.h>
42#include <asm/time.h>
43#include <asm/xmon.h>
44#include <asm/machdep.h>
45#include <asm/cputable.h>
46#include <asm/system.h>
47#include <asm/mpic.h>
48#ifdef CONFIG_PPC64
49#include <asm/paca.h>
50#endif
51
52int smp_hw_index[NR_CPUS];
53struct thread_info *secondary_ti;
54
55#ifdef DEBUG
56#define DBG(fmt...) udbg_printf(fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61cpumask_t cpu_possible_map = CPU_MASK_NONE;
62cpumask_t cpu_online_map = CPU_MASK_NONE;
63cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
64
65EXPORT_SYMBOL(cpu_online_map);
66EXPORT_SYMBOL(cpu_possible_map);
67
68/* SMP operations for this machine */
69struct smp_ops_t *smp_ops;
70
71static volatile unsigned int cpu_callin_map[NR_CPUS];
72
73void smp_call_function_interrupt(void);
74
75int smt_enabled_at_boot = 1;
76
77#ifdef CONFIG_MPIC
78int __init smp_mpic_probe(void)
79{
80 int nr_cpus;
81
82 DBG("smp_mpic_probe()...\n");
83
84 nr_cpus = cpus_weight(cpu_possible_map);
85
86 DBG("nr_cpus: %d\n", nr_cpus);
87
88 if (nr_cpus > 1)
89 mpic_request_ipis();
90
91 return nr_cpus;
92}
93
94void __devinit smp_mpic_setup_cpu(int cpu)
95{
96 mpic_setup_this_cpu();
97}
98#endif /* CONFIG_MPIC */
99
100#ifdef CONFIG_PPC64
101void __devinit smp_generic_kick_cpu(int nr)
102{
103 BUG_ON(nr < 0 || nr >= NR_CPUS);
104
105 /*
106 * The processor is currently spinning, waiting for the
107 * cpu_start field to become non-zero After we set cpu_start,
108 * the processor will continue on to secondary_start
109 */
110 paca[nr].cpu_start = 1;
111 smp_mb();
112}
113#endif
114
115void smp_message_recv(int msg, struct pt_regs *regs)
116{
117 switch(msg) {
118 case PPC_MSG_CALL_FUNCTION:
119 smp_call_function_interrupt();
120 break;
121 case PPC_MSG_RESCHEDULE:
122 /* XXX Do we have to do this? */
123 set_need_resched();
124 break;
125#ifdef CONFIG_DEBUGGER
126 case PPC_MSG_DEBUGGER_BREAK:
127 debugger_ipi(regs);
128 break;
129#endif
130 default:
131 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
132 smp_processor_id(), msg);
133 break;
134 }
135}
136
137void smp_send_reschedule(int cpu)
138{
139 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
140}
141
142#ifdef CONFIG_DEBUGGER
143void smp_send_debugger_break(int cpu)
144{
145 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
146}
147#endif
148
149static void stop_this_cpu(void *dummy)
150{
151 local_irq_disable();
152 while (1)
153 ;
154}
155
156void smp_send_stop(void)
157{
158 smp_call_function(stop_this_cpu, NULL, 1, 0);
159}
160
161/*
162 * Structure and data for smp_call_function(). This is designed to minimise
163 * static memory requirements. It also looks cleaner.
164 * Stolen from the i386 version.
165 */
166static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
167
168static struct call_data_struct {
169 void (*func) (void *info);
170 void *info;
171 atomic_t started;
172 atomic_t finished;
173 int wait;
174} *call_data;
175
176/* delay of at least 8 seconds */
177#define SMP_CALL_TIMEOUT 8
178
179/*
180 * This function sends a 'generic call function' IPI to all other CPUs
181 * in the system.
182 *
183 * [SUMMARY] Run a function on all other CPUs.
184 * <func> The function to run. This must be fast and non-blocking.
185 * <info> An arbitrary pointer to pass to the function.
186 * <nonatomic> currently unused.
187 * <wait> If true, wait (atomically) until function has completed on other CPUs.
188 * [RETURNS] 0 on success, else a negative status code. Does not return until
189 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
190 *
191 * You must not call this function with disabled interrupts or from a
192 * hardware interrupt handler or from a bottom half handler.
193 */
194int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
195 int wait)
196{
197 struct call_data_struct data;
198 int ret = -1, cpus;
199 u64 timeout;
200
201 /* Can deadlock when called with interrupts disabled */
202 WARN_ON(irqs_disabled());
203
204 data.func = func;
205 data.info = info;
206 atomic_set(&data.started, 0);
207 data.wait = wait;
208 if (wait)
209 atomic_set(&data.finished, 0);
210
211 spin_lock(&call_lock);
212 /* Must grab online cpu count with preempt disabled, otherwise
213 * it can change. */
214 cpus = num_online_cpus() - 1;
215 if (!cpus) {
216 ret = 0;
217 goto out;
218 }
219
220 call_data = &data;
221 smp_wmb();
222 /* Send a message to all other CPUs and wait for them to respond */
223 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
224
225 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
226
227 /* Wait for response */
228 while (atomic_read(&data.started) != cpus) {
229 HMT_low();
230 if (get_tb() >= timeout) {
231 printk("smp_call_function on cpu %d: other cpus not "
232 "responding (%d)\n", smp_processor_id(),
233 atomic_read(&data.started));
234 debugger(NULL);
235 goto out;
236 }
237 }
238
239 if (wait) {
240 while (atomic_read(&data.finished) != cpus) {
241 HMT_low();
242 if (get_tb() >= timeout) {
243 printk("smp_call_function on cpu %d: other "
244 "cpus not finishing (%d/%d)\n",
245 smp_processor_id(),
246 atomic_read(&data.finished),
247 atomic_read(&data.started));
248 debugger(NULL);
249 goto out;
250 }
251 }
252 }
253
254 ret = 0;
255
256 out:
257 call_data = NULL;
258 HMT_medium();
259 spin_unlock(&call_lock);
260 return ret;
261}
262
263EXPORT_SYMBOL(smp_call_function);
264
265void smp_call_function_interrupt(void)
266{
267 void (*func) (void *info);
268 void *info;
269 int wait;
270
271 /* call_data will be NULL if the sender timed out while
272 * waiting on us to receive the call.
273 */
274 if (!call_data)
275 return;
276
277 func = call_data->func;
278 info = call_data->info;
279 wait = call_data->wait;
280
281 if (!wait)
282 smp_mb__before_atomic_inc();
283
284 /*
285 * Notify initiating CPU that I've grabbed the data and am
286 * about to execute the function
287 */
288 atomic_inc(&call_data->started);
289 /*
290 * At this point the info structure may be out of scope unless wait==1
291 */
292 (*func)(info);
293 if (wait) {
294 smp_mb__before_atomic_inc();
295 atomic_inc(&call_data->finished);
296 }
297}
298
299extern struct gettimeofday_struct do_gtod;
300
301struct thread_info *current_set[NR_CPUS];
302
303DECLARE_PER_CPU(unsigned int, pvr);
304
305static void __devinit smp_store_cpu_info(int id)
306{
307 per_cpu(pvr, id) = mfspr(SPRN_PVR);
308}
309
310static void __init smp_create_idle(unsigned int cpu)
311{
312 struct task_struct *p;
313
314 /* create a process for the processor */
315 p = fork_idle(cpu);
316 if (IS_ERR(p))
317 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
318#ifdef CONFIG_PPC64
319 paca[cpu].__current = p;
320#endif
321 current_set[cpu] = p->thread_info;
322 p->thread_info->cpu = cpu;
323}
324
325void __init smp_prepare_cpus(unsigned int max_cpus)
326{
327 unsigned int cpu;
328
329 DBG("smp_prepare_cpus\n");
330
331 /*
332 * setup_cpu may need to be called on the boot cpu. We havent
333 * spun any cpus up but lets be paranoid.
334 */
335 BUG_ON(boot_cpuid != smp_processor_id());
336
337 /* Fixup boot cpu */
338 smp_store_cpu_info(boot_cpuid);
339 cpu_callin_map[boot_cpuid] = 1;
340
341 max_cpus = smp_ops->probe();
342
343 smp_space_timers(max_cpus);
344
345 for_each_cpu(cpu)
346 if (cpu != boot_cpuid)
347 smp_create_idle(cpu);
348}
349
350void __devinit smp_prepare_boot_cpu(void)
351{
352 BUG_ON(smp_processor_id() != boot_cpuid);
353
354 cpu_set(boot_cpuid, cpu_online_map);
355#ifdef CONFIG_PPC64
356 paca[boot_cpuid].__current = current;
357#endif
358 current_set[boot_cpuid] = current->thread_info;
359}
360
361#ifdef CONFIG_HOTPLUG_CPU
362/* State of each CPU during hotplug phases */
363DEFINE_PER_CPU(int, cpu_state) = { 0 };
364
365int generic_cpu_disable(void)
366{
367 unsigned int cpu = smp_processor_id();
368
369 if (cpu == boot_cpuid)
370 return -EBUSY;
371
372 systemcfg->processorCount--;
373 cpu_clear(cpu, cpu_online_map);
374 fixup_irqs(cpu_online_map);
375 return 0;
376}
377
378int generic_cpu_enable(unsigned int cpu)
379{
380 /* Do the normal bootup if we haven't
381 * already bootstrapped. */
382 if (system_state != SYSTEM_RUNNING)
383 return -ENOSYS;
384
385 /* get the target out of it's holding state */
386 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
387 smp_wmb();
388
389 while (!cpu_online(cpu))
390 cpu_relax();
391
392 fixup_irqs(cpu_online_map);
393 /* counter the irq disable in fixup_irqs */
394 local_irq_enable();
395 return 0;
396}
397
398void generic_cpu_die(unsigned int cpu)
399{
400 int i;
401
402 for (i = 0; i < 100; i++) {
403 smp_rmb();
404 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
405 return;
406 msleep(100);
407 }
408 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
409}
410
411void generic_mach_cpu_die(void)
412{
413 unsigned int cpu;
414
415 local_irq_disable();
416 cpu = smp_processor_id();
417 printk(KERN_DEBUG "CPU%d offline\n", cpu);
418 __get_cpu_var(cpu_state) = CPU_DEAD;
419 smp_wmb();
420 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
421 cpu_relax();
422
423 flush_tlb_pending();
424 cpu_set(cpu, cpu_online_map);
425 local_irq_enable();
426}
427#endif
428
429static int __devinit cpu_enable(unsigned int cpu)
430{
431 if (smp_ops->cpu_enable)
432 return smp_ops->cpu_enable(cpu);
433
434 return -ENOSYS;
435}
436
437int __devinit __cpu_up(unsigned int cpu)
438{
439 int c;
440
441 secondary_ti = current_set[cpu];
442 if (!cpu_enable(cpu))
443 return 0;
444
445 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))
446 return -EINVAL;
447
448#ifdef CONFIG_PPC64
449 paca[cpu].default_decr = tb_ticks_per_jiffy;
450#endif
451
452 /* Make sure callin-map entry is 0 (can be leftover a CPU
453 * hotplug
454 */
455 cpu_callin_map[cpu] = 0;
456
457 /* The information for processor bringup must
458 * be written out to main store before we release
459 * the processor.
460 */
461 smp_mb();
462
463 /* wake up cpus */
464 DBG("smp: kicking cpu %d\n", cpu);
465 smp_ops->kick_cpu(cpu);
466
467 /*
468 * wait to see if the cpu made a callin (is actually up).
469 * use this value that I found through experimentation.
470 * -- Cort
471 */
472 if (system_state < SYSTEM_RUNNING)
473 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
474 udelay(100);
475#ifdef CONFIG_HOTPLUG_CPU
476 else
477 /*
478 * CPUs can take much longer to come up in the
479 * hotplug case. Wait five seconds.
480 */
481 for (c = 25; c && !cpu_callin_map[cpu]; c--) {
482 msleep(200);
483 }
484#endif
485
486 if (!cpu_callin_map[cpu]) {
487 printk("Processor %u is stuck.\n", cpu);
488 return -ENOENT;
489 }
490
491 printk("Processor %u found.\n", cpu);
492
493 if (smp_ops->give_timebase)
494 smp_ops->give_timebase();
495
496 /* Wait until cpu puts itself in the online map */
497 while (!cpu_online(cpu))
498 cpu_relax();
499
500 return 0;
501}
502
503
504/* Activate a secondary processor. */
505int __devinit start_secondary(void *unused)
506{
507 unsigned int cpu = smp_processor_id();
508
509 atomic_inc(&init_mm.mm_count);
510 current->active_mm = &init_mm;
511
512 smp_store_cpu_info(cpu);
513 set_dec(tb_ticks_per_jiffy);
514 cpu_callin_map[cpu] = 1;
515
516 smp_ops->setup_cpu(cpu);
517 if (smp_ops->take_timebase)
518 smp_ops->take_timebase();
519
520 spin_lock(&call_lock);
521 cpu_set(cpu, cpu_online_map);
522 spin_unlock(&call_lock);
523
524 local_irq_enable();
525
526 cpu_idle();
527 return 0;
528}
529
530int setup_profiling_timer(unsigned int multiplier)
531{
532 return 0;
533}
534
535void __init smp_cpus_done(unsigned int max_cpus)
536{
537 cpumask_t old_mask;
538
539 /* We want the setup_cpu() here to be called from CPU 0, but our
540 * init thread may have been "borrowed" by another CPU in the meantime
541 * se we pin us down to CPU 0 for a short while
542 */
543 old_mask = current->cpus_allowed;
544 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
545
546 smp_ops->setup_cpu(boot_cpuid);
547
548 set_cpus_allowed(current, old_mask);
549}
550
551#ifdef CONFIG_HOTPLUG_CPU
552int __cpu_disable(void)
553{
554 if (smp_ops->cpu_disable)
555 return smp_ops->cpu_disable();
556
557 return -ENOSYS;
558}
559
560void __cpu_die(unsigned int cpu)
561{
562 if (smp_ops->cpu_die)
563 smp_ops->cpu_die(cpu);
564}
565#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 23436b6c1881..b1c89bc4bf90 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -66,9 +66,10 @@
66#include <asm/firmware.h> 66#include <asm/firmware.h>
67#endif 67#endif
68#ifdef CONFIG_PPC_ISERIES 68#ifdef CONFIG_PPC_ISERIES
69#include <asm/iSeries/ItLpQueue.h> 69#include <asm/iseries/it_lp_queue.h>
70#include <asm/iSeries/HvCallXm.h> 70#include <asm/iseries/hv_call_xm.h>
71#endif 71#endif
72#include <asm/smp.h>
72 73
73/* keep track of when we need to update the rtc */ 74/* keep track of when we need to update the rtc */
74time_t last_rtc_update; 75time_t last_rtc_update;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 5d638ecddbd0..07e5ee40b870 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -147,8 +147,8 @@ int die(const char *str, struct pt_regs *regs, long err)
147 printk("POWERMAC "); 147 printk("POWERMAC ");
148 nl = 1; 148 nl = 1;
149 break; 149 break;
150 case PLATFORM_BPA: 150 case PLATFORM_CELL:
151 printk("BPA "); 151 printk("CELL ");
152 nl = 1; 152 nl = 1;
153 break; 153 break;
154 } 154 }
@@ -749,22 +749,22 @@ static int check_bug_trap(struct pt_regs *regs)
749 if (bug->line & BUG_WARNING_TRAP) { 749 if (bug->line & BUG_WARNING_TRAP) {
750 /* this is a WARN_ON rather than BUG/BUG_ON */ 750 /* this is a WARN_ON rather than BUG/BUG_ON */
751#ifdef CONFIG_XMON 751#ifdef CONFIG_XMON
752 xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", 752 xmon_printf(KERN_ERR "Badness in %s at %s:%ld\n",
753 bug->function, bug->file, 753 bug->function, bug->file,
754 bug->line & ~BUG_WARNING_TRAP); 754 bug->line & ~BUG_WARNING_TRAP);
755#endif /* CONFIG_XMON */ 755#endif /* CONFIG_XMON */
756 printk(KERN_ERR "Badness in %s at %s:%d\n", 756 printk(KERN_ERR "Badness in %s at %s:%ld\n",
757 bug->function, bug->file, 757 bug->function, bug->file,
758 bug->line & ~BUG_WARNING_TRAP); 758 bug->line & ~BUG_WARNING_TRAP);
759 dump_stack(); 759 dump_stack();
760 return 1; 760 return 1;
761 } 761 }
762#ifdef CONFIG_XMON 762#ifdef CONFIG_XMON
763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", 763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
764 bug->function, bug->file, bug->line); 764 bug->function, bug->file, bug->line);
765 xmon(regs); 765 xmon(regs);
766#endif /* CONFIG_XMON */ 766#endif /* CONFIG_XMON */
767 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", 767 printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
768 bug->function, bug->file, bug->line); 768 bug->function, bug->file, bug->line);
769 769
770 return 0; 770 return 0;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index d4dfcfbce272..7fa7b15fd8e6 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -3,9 +3,12 @@
3#include <asm/page.h> 3#include <asm/page.h>
4#else 4#else
5#define PAGE_SIZE 4096 5#define PAGE_SIZE 4096
6#define KERNELBASE CONFIG_KERNEL_START
6#endif 7#endif
7#include <asm-generic/vmlinux.lds.h> 8#include <asm-generic/vmlinux.lds.h>
8 9
10ENTRY(_stext)
11
9#ifdef CONFIG_PPC64 12#ifdef CONFIG_PPC64
10OUTPUT_ARCH(powerpc:common64) 13OUTPUT_ARCH(powerpc:common64)
11jiffies = jiffies_64; 14jiffies = jiffies_64;
@@ -21,33 +24,9 @@ SECTIONS
21 *(.exit.data) 24 *(.exit.data)
22 } 25 }
23 26
27 . = KERNELBASE;
24 28
25 /* Read-only sections, merged into text segment: */ 29 /* Read-only sections, merged into text segment: */
26#ifdef CONFIG_PPC32
27 . = + SIZEOF_HEADERS;
28 .interp : { *(.interp) }
29 .hash : { *(.hash) }
30 .dynsym : { *(.dynsym) }
31 .dynstr : { *(.dynstr) }
32 .rel.text : { *(.rel.text) }
33 .rela.text : { *(.rela.text) }
34 .rel.data : { *(.rel.data) }
35 .rela.data : { *(.rela.data) }
36 .rel.rodata : { *(.rel.rodata) }
37 .rela.rodata : { *(.rela.rodata) }
38 .rel.got : { *(.rel.got) }
39 .rela.got : { *(.rela.got) }
40 .rel.ctors : { *(.rel.ctors) }
41 .rela.ctors : { *(.rela.ctors) }
42 .rel.dtors : { *(.rel.dtors) }
43 .rela.dtors : { *(.rela.dtors) }
44 .rel.bss : { *(.rel.bss) }
45 .rela.bss : { *(.rela.bss) }
46 .rel.plt : { *(.rel.plt) }
47 .rela.plt : { *(.rela.plt) }
48/* .init : { *(.init) } =0*/
49 .plt : { *(.plt) }
50#endif
51 .text : { 30 .text : {
52 *(.text .text.*) 31 *(.text .text.*)
53 SCHED_TEXT 32 SCHED_TEXT